summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig23
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/acpi_configfs.c4
-rw-r--r--drivers/acpi/acpi_lpss.c48
-rw-r--r--drivers/acpi/acpi_platform.c43
-rw-r--r--drivers/acpi/acpi_video.c8
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h10
-rw-r--r--drivers/acpi/acpica/acutils.h9
-rw-r--r--drivers/acpi/acpica/dbconvert.c4
-rw-r--r--drivers/acpi/acpica/dbdisply.c2
-rw-r--r--drivers/acpi/acpica/dbfileio.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c36
-rw-r--r--drivers/acpi/acpica/dbmethod.c4
-rw-r--r--drivers/acpi/acpica/dbnames.c114
-rw-r--r--drivers/acpi/acpica/dbobject.c1
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c12
-rw-r--r--drivers/acpi/acpica/evgpeblk.c11
-rw-r--r--drivers/acpi/acpica/evgpeinit.c3
-rw-r--r--drivers/acpi/acpica/evmisc.c12
-rw-r--r--drivers/acpi/acpica/evregion.c4
-rw-r--r--drivers/acpi/acpica/evrgnini.c1
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c3
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c6
-rw-r--r--drivers/acpi/acpica/nsxfname.c4
-rw-r--r--drivers/acpi/acpica/psobject.c7
-rw-r--r--drivers/acpi/acpica/rscreate.c3
-rw-r--r--drivers/acpi/acpica/tbdata.c3
-rw-r--r--drivers/acpi/acpica/tbxfload.c40
-rw-r--r--drivers/acpi/acpica/utbuffer.c52
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/apei/apei-base.c44
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c5
-rw-r--r--drivers/acpi/apei/ghes.c25
-rw-r--r--drivers/acpi/apei/hest.c14
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c139
-rw-r--r--drivers/acpi/ec.c195
-rw-r--r--drivers/acpi/hmat/Makefile2
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/numa/Kconfig (renamed from drivers/acpi/hmat/Kconfig)7
-rw-r--r--drivers/acpi/numa/Makefile3
-rw-r--r--drivers/acpi/numa/hmat.c (renamed from drivers/acpi/hmat/hmat.c)158
-rw-r--r--drivers/acpi/numa/srat.c (renamed from drivers/acpi/numa.c)0
-rw-r--r--drivers/acpi/osi.c6
-rw-r--r--drivers/acpi/pmic/intel_pmic.c20
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c (renamed from drivers/acpi/pmic/intel_pmic_crc.c)4
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtcrc.c44
-rw-r--r--drivers/acpi/processor_idle.c21
-rw-r--r--drivers/acpi/property.c48
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/utils.c32
-rw-r--r--drivers/android/binder.c6
-rw-r--r--drivers/android/binder_alloc.c42
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/ahci_tegra.c6
-rw-r--r--drivers/ata/ata_piix.c14
-rw-r--r--drivers/ata/libahci.c6
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_falcon.c42
-rw-r--r--drivers/ata/pata_macio.c6
-rw-r--r--drivers/ata/pata_pxa.c8
-rw-r--r--drivers/ata/pdc_adma.c7
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c34
-rw-r--r--drivers/ata/sata_nv.c18
-rw-r--r--drivers/ata/sata_promise.c6
-rw-r--r--drivers/ata/sata_qstor.c8
-rw-r--r--drivers/ata/sata_rcar.c6
-rw-r--r--drivers/ata/sata_sil.c8
-rw-r--r--drivers/ata/sata_sil24.c6
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/base/core.c308
-rw-r--r--drivers/base/cpu.c17
-rw-r--r--drivers/base/firmware_loader/Kconfig14
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile3
-rw-r--r--drivers/base/firmware_loader/main.c9
-rw-r--r--drivers/base/memory.c36
-rw-r--r--drivers/base/platform.c393
-rw-r--r--drivers/base/power/common.c20
-rw-r--r--drivers/base/power/domain.c40
-rw-r--r--drivers/base/power/power.h30
-rw-r--r--drivers/base/power/wakeirq.c4
-rw-r--r--drivers/base/property.c83
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/base/soc.c30
-rw-r--r--drivers/base/swnode.c258
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c24
-rw-r--r--drivers/block/drbd/drbd_nl.c13
-rw-r--r--drivers/block/loop.c39
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk.h19
-rw-r--r--drivers/block/null_blk_main.c125
-rw-r--r--drivers/block/null_blk_zoned.c87
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btbcm.c10
-rw-r--r--drivers/bluetooth/btintel.c45
-rw-r--r--drivers/bluetooth/btintel.h5
-rw-r--r--drivers/bluetooth/btmtksdio.c1
-rw-r--r--drivers/bluetooth/btqca.c92
-rw-r--r--drivers/bluetooth/btqca.h32
-rw-r--r--drivers/bluetooth/btrtl.c4
-rw-r--r--drivers/bluetooth/btusb.c57
-rw-r--r--drivers/bluetooth/btwilink.c337
-rw-r--r--drivers/bluetooth/hci_bcm.c4
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_ll.c39
-rw-r--r--drivers/bluetooth/hci_nokia.c2
-rw-r--r--drivers/bluetooth/hci_qca.c278
-rw-r--r--drivers/bus/Kconfig9
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c6
-rw-r--r--drivers/bus/fsl-mc/dprc.c53
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c43
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h42
-rw-r--r--drivers/bus/ti-pwmss.c (renamed from drivers/pwm/pwm-tipwmss.c)0
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/hw_random/Kconfig46
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/atmel-rng.c43
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c5
-rw-r--r--drivers/char/hw_random/core.c66
-rw-r--r--drivers/char/hw_random/exynos-trng.c4
-rw-r--r--drivers/char/hw_random/hisi-rng.c4
-rw-r--r--drivers/char/hw_random/hisi-trng-v2.c99
-rw-r--r--drivers/char/hw_random/iproc-rng200.c9
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c44
-rw-r--r--drivers/char/hw_random/meson-rng.c4
-rw-r--r--drivers/char/hw_random/mtk-rng.c9
-rw-r--r--drivers/char/hw_random/npcm-rng.c184
-rw-r--r--drivers/char/hw_random/omap-rng.c13
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c168
-rw-r--r--drivers/char/hw_random/pasemi-rng.c4
-rw-r--r--drivers/char/hw_random/pic32-rng.c4
-rw-r--r--drivers/char/hw_random/st-rng.c4
-rw-r--r--drivers/char/hw_random/tx4939-rng.c4
-rw-r--r--drivers/char/hw_random/xgene-rng.c4
-rw-r--r--drivers/char/ipmi/Kconfig98
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c37
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c55
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c40
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/ppdev.c16
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/char/tpm/Kconfig7
-rw-r--r--drivers/char/tpm/Makefile4
-rw-r--r--drivers/char/tpm/tpm-interface.c64
-rw-r--r--drivers/char/tpm/tpm-sysfs.c45
-rw-r--r--drivers/char/tpm/tpm.h248
-rw-r--r--drivers/char/tpm/tpm1-cmd.c15
-rw-r--r--drivers/char/tpm/tpm2-cmd.c311
-rw-r--r--drivers/char/tpm/tpm_crb.c123
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c79
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c143
-rw-r--r--drivers/char/tpm/tpm_tis_spi.h53
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c322
-rw-r--r--drivers/char/virtio_console.c28
-rw-r--r--drivers/char/xillybus/xillybus_of.c5
-rw-r--r--drivers/clocksource/hyperv_timer.c154
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c3
-rw-r--r--drivers/clocksource/sh_cmt.c13
-rw-r--r--drivers/clocksource/sh_mtu2.c13
-rw-r--r--drivers/clocksource/sh_tmu.c14
-rw-r--r--drivers/clocksource/timer-riscv.c31
-rw-r--r--drivers/counter/104-quad-8.c33
-rw-r--r--drivers/counter/Kconfig11
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/counter.c101
-rw-r--r--drivers/counter/ftm-quaddec.c14
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c7
-rw-r--r--drivers/counter/stm32-timer-cnt.c23
-rw-r--r--drivers/counter/ti-eqep.c466
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/arm_big_little.c658
-rw-r--r--drivers/cpufreq/arm_big_little.h43
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq.c35
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c20
-rw-r--r--drivers/cpufreq/intel_pstate.c30
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c17
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c7
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c25
-rw-r--r--drivers/cpufreq/ti-cpufreq.c119
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c584
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c7
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/driver.c72
-rw-r--r--drivers/cpuidle/governor.c7
-rw-r--r--drivers/cpuidle/governors/haltpoll.c7
-rw-r--r--drivers/cpuidle/governors/ladder.c29
-rw-r--r--drivers/cpuidle/governors/menu.c131
-rw-r--r--drivers/cpuidle/governors/teo.c182
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/cpuidle/sysfs.c71
-rw-r--r--drivers/crypto/Kconfig92
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/allwinner/Kconfig87
-rw-r--r--drivers/crypto/allwinner/Makefile3
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/Makefile (renamed from drivers/crypto/sunxi-ss/Makefile)0
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-cipher.c)34
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-core.c)139
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-hash.c)47
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-prng.c)9
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h (renamed from drivers/crypto/sunxi-ss/sun4i-ss.h)2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c438
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c676
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h254
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c436
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c642
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h218
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/amlogic/Kconfig24
-rw-r--r--drivers/crypto/amlogic/Makefile2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c382
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c332
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h161
-rw-r--r--drivers/crypto/atmel-aes.c590
-rw-r--r--drivers/crypto/atmel-authenc.h2
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c469
-rw-r--r--drivers/crypto/bcm/cipher.c373
-rw-r--r--drivers/crypto/bcm/cipher.h10
-rw-r--r--drivers/crypto/bcm/spu2.c6
-rw-r--r--drivers/crypto/caam/Kconfig6
-rw-r--r--drivers/crypto/caam/caampkc.c72
-rw-r--r--drivers/crypto/caam/caampkc.h8
-rw-r--r--drivers/crypto/caam/ctrl.c222
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/qi.c8
-rw-r--r--drivers/crypto/caam/qi.h1
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c292
-rw-r--r--drivers/crypto/cavium/nitrox/Kconfig2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c39
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h15
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c9
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c134
-rw-r--r--drivers/crypto/ccp/Kconfig2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c94
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c169
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c100
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c14
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h13
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c14
-rw-r--r--drivers/crypto/ccp/ccp-dev.c15
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c1
-rw-r--r--drivers/crypto/ccp/ccp-ops.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c68
-rw-r--r--drivers/crypto/ccp/psp-dev.h1
-rw-r--r--drivers/crypto/ccree/cc_aead.c3
-rw-r--r--drivers/crypto/ccree/cc_cipher.c4
-rw-r--r--drivers/crypto/chelsio/Kconfig2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c334
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h16
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c27
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h5
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c15
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c20
-rw-r--r--drivers/crypto/geode-aes.c433
-rw-r--r--drivers/crypto/geode-aes.h15
-rw-r--r--drivers/crypto/hifn_795x.c183
-rw-r--r--drivers/crypto/hisilicon/Kconfig45
-rw-r--r--drivers/crypto/hisilicon/Makefile6
-rw-r--r--drivers/crypto/hisilicon/hpre/Makefile2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h83
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c1137
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c1052
-rw-r--r--drivers/crypto/hisilicon/qm.c142
-rw-r--r--drivers/crypto/hisilicon/qm.h17
-rw-r--r--drivers/crypto/hisilicon/sec2/Makefile2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h156
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c889
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h198
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c1095
-rw-r--r--drivers/crypto/hisilicon/sgl.c184
-rw-r--r--drivers/crypto/hisilicon/sgl.h24
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h1
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c46
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c294
-rw-r--r--drivers/crypto/inside-secure/safexcel.c329
-rw-r--r--drivers/crypto/inside-secure/safexcel.h131
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c1506
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c1475
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c5
-rw-r--r--drivers/crypto/ixp4xx_crypto.c228
-rw-r--r--drivers/crypto/marvell/cesa.h6
-rw-r--r--drivers/crypto/marvell/cipher.c14
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c250
-rw-r--r--drivers/crypto/mxs-dcp.c140
-rw-r--r--drivers/crypto/n2_core.c206
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c81
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c45
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c87
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c76
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c29
-rw-r--r--drivers/crypto/nx/nx.c64
-rw-r--r--drivers/crypto/nx/nx.h19
-rw-r--r--drivers/crypto/nx/nx_debugfs.c18
-rw-r--r--drivers/crypto/omap-aes.c209
-rw-r--r--drivers/crypto/omap-aes.h4
-rw-r--r--drivers/crypto/omap-des.c232
-rw-r--r--drivers/crypto/padlock-aes.c157
-rw-r--r--drivers/crypto/picoxcell_crypto.c386
-rw-r--r--drivers/crypto/qat/Kconfig2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c304
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h4
-rw-r--r--drivers/crypto/qce/Makefile2
-rw-r--r--drivers/crypto/qce/cipher.h8
-rw-r--r--drivers/crypto/qce/common.c12
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/core.c2
-rw-r--r--drivers/crypto/qce/dma.c4
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c (renamed from drivers/crypto/qce/ablkcipher.c)172
-rw-r--r--drivers/crypto/rockchip/Makefile2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c8
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h3
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c556
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c538
-rw-r--r--drivers/crypto/s5p-sss.c187
-rw-r--r--drivers/crypto/sahara.c156
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c338
-rw-r--r--drivers/crypto/talitos.c314
-rw-r--r--drivers/crypto/ux500/Kconfig2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c371
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c3
-rw-r--r--drivers/crypto/virtio/Kconfig2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c192
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/vmx/Makefile6
-rw-r--r--drivers/dax/Kconfig27
-rw-r--r--drivers/dax/Makefile2
-rw-r--r--drivers/dax/bus.c2
-rw-r--r--drivers/dax/bus.h2
-rw-r--r--drivers/dax/dax-private.h2
-rw-r--r--drivers/dax/hmem.c56
-rw-r--r--drivers/devfreq/devfreq.c33
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c1
-rw-r--r--drivers/devfreq/governor.h3
-rw-r--r--drivers/devfreq/tegra30-devfreq.c417
-rw-r--r--drivers/dma-buf/dma-buf.c120
-rw-r--r--drivers/dma-buf/dma-fence.c78
-rw-r--r--drivers/edac/altera_edac.c152
-rw-r--r--drivers/edac/amd64_edac.c217
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/aspeed_edac.c7
-rw-r--r--drivers/edac/edac_device.c50
-rw-r--r--drivers/edac/edac_device.h54
-rw-r--r--drivers/edac/edac_mc.c138
-rw-r--r--drivers/edac/edac_mc_sysfs.c49
-rw-r--r--drivers/edac/ghes_edac.c128
-rw-r--r--drivers/edac/i10nm_base.c3
-rw-r--r--drivers/edac/i3200_edac.c3
-rw-r--r--drivers/edac/i5000_edac.c5
-rw-r--r--drivers/edac/i5100_edac.c16
-rw-r--r--drivers/edac/i5400_edac.c18
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/i7core_edac.c3
-rw-r--r--drivers/edac/ie31200_edac.c7
-rw-r--r--drivers/edac/pnd2_edac.c4
-rw-r--r--drivers/edac/sb_edac.c23
-rw-r--r--drivers/edac/skx_base.c54
-rw-r--r--drivers/edac/skx_common.c65
-rw-r--r--drivers/edac/skx_common.h4
-rw-r--r--drivers/edac/ti_edac.c2
-rw-r--r--drivers/extcon/extcon-axp288.c38
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c16
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-sm5502.h2
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firmware/arm_sdei.c12
-rw-r--r--drivers/firmware/broadcom/Kconfig8
-rw-r--r--drivers/firmware/broadcom/Makefile1
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c279
-rw-r--r--drivers/firmware/efi/Kconfig21
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/apple-properties.c18
-rw-r--r--drivers/firmware/efi/arm-init.c9
-rw-r--r--drivers/firmware/efi/arm-runtime.c24
-rw-r--r--drivers/firmware/efi/efi.c15
-rw-r--r--drivers/firmware/efi/esrt.c3
-rw-r--r--drivers/firmware/efi/fake_mem.c26
-rw-r--r--drivers/firmware/efi/fake_mem.h10
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c5
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c19
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/random.c27
-rw-r--r--drivers/firmware/efi/x86_fake_mem.c69
-rw-r--r--drivers/firmware/psci/psci.c24
-rw-r--r--drivers/firmware/stratix10-rsu.c42
-rw-r--r--drivers/firmware/stratix10-svc.c18
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/dfl-fme-main.c385
-rw-r--r--drivers/fpga/zynq-fpga.c4
-rw-r--r--drivers/fsi/Kconfig8
-rw-r--r--drivers/fsi/Makefile1
-rw-r--r--drivers/fsi/fsi-core.c67
-rw-r--r--drivers/fsi/fsi-master-aspeed.c544
-rw-r--r--drivers/fsi/fsi-master-hub.c46
-rw-r--r--drivers/fsi/fsi-master.h71
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-bd70528.c6
-rw-r--r--drivers/gpio/gpio-loongson.c2
-rw-r--r--drivers/gpio/gpio-max77620.c6
-rw-r--r--drivers/gpio/gpio-menz127.c1
-rw-r--r--drivers/gpio/gpio-mvebu.c19
-rw-r--r--drivers/gpio/gpio-tegra186.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c17
-rw-r--r--drivers/gpio/gpiolib-devres.c33
-rw-r--r--drivers/gpio/gpiolib.c48
-rw-r--r--drivers/gpu/drm/Kconfig36
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c289
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c306
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c497
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c659
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c230
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c158
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/arct_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1398
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c474
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c380
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h139
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c272
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c108
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/display/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c383
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c346
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h66
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c59
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c186
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c281
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c345
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h187
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c640
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c349
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c116
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h19
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c51
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c426
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h442
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c531
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c307
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c305
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c163
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h139
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c328
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h272
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h289
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c98
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c93
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h49
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h92
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h176
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h12
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h27
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h42
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h10
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h34
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1142
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c523
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c196
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c222
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c231
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c68
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h370
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h51
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h134
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c549
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c483
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h204
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c370
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c134
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c1
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c221
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c41
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c105
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c77
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h20
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_event.c140
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h17
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c76
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c16
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h10
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h43
-rw-r--r--drivers/gpu/drm/ast/ast_main.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c266
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c5
-rw-r--r--drivers/gpu/drm/bochs/Kconfig2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c26
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c110
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.h17
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c1
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c1
-rw-r--r--drivers/gpu/drm/bridge/panel.c70
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c1
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c37
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c11
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c21
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c155
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h39
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c10
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c66
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h247
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c18
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c2
-rw-r--r--drivers/gpu/drm/drm_blend.c7
-rw-r--r--drivers/gpu/drm/drm_cache.c14
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c144
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c23
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h3
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c8
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c29
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c177
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1807
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology_internal.h24
-rw-r--r--drivers/gpu/drm/drm_drv.c17
-rw-r--r--drivers/gpu/drm/drm_dsc.c23
-rw-r--r--drivers/gpu/drm/drm_edid.c222
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_encoder.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c62
-rw-r--r--drivers/gpu/drm/drm_gem.c40
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c31
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c84
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c735
-rw-r--r--drivers/gpu/drm/drm_memory.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c11
-rw-r--r--drivers/gpu/drm/drm_mm.c36
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_of.c5
-rw-r--r--drivers/gpu/drm/drm_panel.c14
-rw-r--r--drivers/gpu/drm/drm_prime.c9
-rw-r--r--drivers/gpu/drm/drm_print.c60
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c4
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c3
-rw-r--r--drivers/gpu/drm/drm_syncobj.c38
-rw-r--r--drivers/gpu/drm/drm_trace.h14
-rw-r--r--drivers/gpu/drm/drm_vblank.c60
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c8
-rw-r--r--drivers/gpu/drm/drm_vram_mm_helper.c297
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c12
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig18
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug143
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile49
-rw-r--r--drivers/gpu/drm/i915/Kconfig.unstable29
-rw-r--r--drivers/gpu/drm/i915/Makefile25
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1220
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c611
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c839
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2389
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h66
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c557
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h64
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c509
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c412
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c332
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c216
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c297
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c441
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c549
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c87
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h55
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c621
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h61
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c200
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c99
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c84
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c165
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c174
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c82
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c128
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c130
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c55
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c579
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c30
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c214
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c704
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c306
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c354
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c33
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h231
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c246
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c234
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h104
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c160
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c207
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c137
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c360
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c161
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1500
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c287
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c787
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c172
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c323
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h131
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c (renamed from drivers/gpu/drm/i915/gt/intel_ringbuffer.c)404
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1872
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c67
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c71
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c350
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c60
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c207
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c1895
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c138
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c270
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c185
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c41
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c76
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c48
-rw-r--r--drivers/gpu/drm/i915/i915_active.c389
-rw-r--r--drivers/gpu/drm/i915/i915_active.h330
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h34
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c435
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c522
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c288
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h649
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c422
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c413
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h77
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c150
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c839
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h16
-rw-r--r--drivers/gpu/drm/i915/i915_params.c12
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c80
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1860
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h32
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h435
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c313
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h7
-rw-r--r--drivers/gpu/drm/i915/i915_query.c306
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h886
-rw-r--r--drivers/gpu/drm/i915/i915_request.c237
-rw-r--r--drivers/gpu/drm/i915/i915_request.h40
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h8
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c55
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h18
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h9
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c11
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c67
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c162
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h40
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c43
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h34
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c639
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h134
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c230
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h8
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c272
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h129
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3065
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h27
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c132
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c94
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h20
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.c121
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c90
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c46
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c143
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c404
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c217
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c502
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c23
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c624
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c56
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c53
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c60
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c5
-rw-r--r--drivers/gpu/drm/lima/Kconfig1
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_device.c5
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c22
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c195
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h32
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c46
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c1
-rw-r--r--drivers/gpu/drm/lima/lima_object.c119
-rw-r--r--drivers/gpu/drm/lima/lima_object.h35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c87
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c111
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c136
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c128
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c67
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c234
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c338
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c288
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c149
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c32
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c115
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c9
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c327
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h23
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c17
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h1
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h1
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c6
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c20
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c46
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c37
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c137
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c5
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c5
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c5
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c5
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c26
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c5
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c5
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c5
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c5
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c5
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c5
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c29
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c5
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c5
-rw-r--r--drivers/gpu/drm/panfrost/TODO2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c124
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c17
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c4
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c32
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c62
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c29
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c19
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c169
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c48
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c12
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c66
-rw-r--r--drivers/gpu/drm/selftests/Makefile2
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c238
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c14
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c26
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c10
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c5
-rw-r--r--drivers/gpu/drm/stm/ltdc.c39
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c35
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h1
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c30
-rw-r--r--drivers/gpu/drm/tegra/dc.h2
-rw-r--r--drivers/gpu/drm/tegra/dp.c876
-rw-r--r--drivers/gpu/drm/tegra/dp.h177
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c208
-rw-r--r--drivers/gpu/drm/tegra/drm.c417
-rw-r--r--drivers/gpu/drm/tegra/drm.h13
-rw-r--r--drivers/gpu/drm/tegra/falcon.c64
-rw-r--r--drivers/gpu/drm/tegra/falcon.h16
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c81
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c12
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c12
-rw-r--r--drivers/gpu/drm/tegra/hub.c6
-rw-r--r--drivers/gpu/drm/tegra/output.c28
-rw-r--r--drivers/gpu/drm/tegra/plane.c104
-rw-r--r--drivers/gpu/drm/tegra/plane.h8
-rw-r--r--drivers/gpu/drm/tegra/sor.c1704
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tegra/vic.c138
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c2
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c190
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c69
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c7
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c5
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c55
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig2
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h27
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c149
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c119
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c138
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c4
-rw-r--r--drivers/gpu/drm/virtio/Kconfig2
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h135
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c183
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c228
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c270
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c61
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c305
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c227
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c15
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c7
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/cdma.c6
-rw-r--r--drivers/gpu/host1x/channel.c13
-rw-r--r--drivers/gpu/host1x/channel.h1
-rw-r--r--drivers/gpu/host1x/dev.c236
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/intr.c1
-rw-r--r--drivers/gpu/host1x/job.c91
-rw-r--r--drivers/gpu/host1x/job.h4
-rw-r--r--drivers/greybus/connection.c3
-rw-r--r--drivers/hv/hv.c4
-rw-r--r--drivers/hv/vmbus_drv.c30
-rw-r--r--drivers/hwmon/Kconfig41
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/ab8500.c65
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/applesmc.c38
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c115
-rw-r--r--drivers/hwmon/ina3221.c163
-rw-r--r--drivers/hwmon/ltc2947-core.c1183
-rw-r--r--drivers/hwmon/ltc2947-i2c.c49
-rw-r--r--drivers/hwmon/ltc2947-spi.c50
-rw-r--r--drivers/hwmon/ltc2947.h12
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c131
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c74
-rw-r--r--drivers/hwmon/tmp421.c3
-rw-r--r--drivers/hwmon/tmp513.c772
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c312
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c351
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h81
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c36
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c26
-rw-r--r--drivers/hwtracing/coresight/coresight.c51
-rw-r--r--drivers/hwtracing/intel_th/core.c8
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/policy.c4
-rw-r--r--drivers/i2c/i2c-core-acpi.c28
-rw-r--r--drivers/i2c/i2c-core-of.c5
-rw-r--r--drivers/i3c/master.c4
-rw-r--r--drivers/ide/falconide.c60
-rw-r--r--drivers/ide/tx4938ide.c2
-rw-r--r--drivers/ide/tx4939ide.c6
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/adc/Kconfig35
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c1218
-rw-r--r--drivers/iio/adc/ad7292.c350
-rw-r--r--drivers/iio/adc/ad7949.c33
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c3
-rw-r--r--drivers/iio/adc/aspeed_adc.c4
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c4
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c2
-rw-r--r--drivers/iio/adc/cc10001_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c20
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/adc/hx711.c22
-rw-r--r--drivers/iio/adc/ingenic-adc.c153
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c262
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c4
-rw-r--r--drivers/iio/adc/max1027.c180
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c4
-rw-r--r--drivers/iio/adc/npcm_adc.c4
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c4
-rw-r--r--drivers/iio/adc/sc27xx_adc.c16
-rw-r--r--drivers/iio/adc/spear_adc.c4
-rw-r--r--drivers/iio/adc/stm32-adc-core.c27
-rw-r--r--drivers/iio/adc/stm32-adc.c21
-rw-r--r--drivers/iio/adc/stmpe-adc.c2
-rw-r--r--drivers/iio/adc/twl4030-madc.c18
-rw-r--r--drivers/iio/adc/vf610_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c8
-rw-r--r--drivers/iio/chemical/sgp30.c2
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/dac/Kconfig4
-rw-r--r--drivers/iio/dac/ad5446.c6
-rw-r--r--drivers/iio/dac/ad7303.c13
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c4
-rw-r--r--drivers/iio/dac/stm32-dac-core.c138
-rw-r--r--drivers/iio/dac/stm32-dac.c94
-rw-r--r--drivers/iio/dac/vf610_dac.c4
-rw-r--r--drivers/iio/gyro/adis16080.c8
-rw-r--r--drivers/iio/gyro/adis16130.c2
-rw-r--r--drivers/iio/gyro/adis16136.c24
-rw-r--r--drivers/iio/gyro/itg3200_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c1
-rw-r--r--drivers/iio/humidity/hdc100x.c19
-rw-r--r--drivers/iio/imu/Kconfig27
-rw-r--r--drivers/iio/imu/Makefile5
-rw-r--r--drivers/iio/imu/adis.c29
-rw-r--r--drivers/iio/imu/adis16400.c22
-rw-r--r--drivers/iio/imu/adis16460.c8
-rw-r--r--drivers/iio/imu/adis16480.c116
-rw-r--r--drivers/iio/imu/fxos8700.h10
-rw-r--r--drivers/iio/imu/fxos8700_core.c649
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c71
-rw-r--r--drivers/iio/imu/fxos8700_spi.c59
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile7
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c204
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h19
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c195
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c60
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h74
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c356
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h36
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c86
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h87
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c109
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c1056
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c45
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c10
-rw-r--r--drivers/iio/industrialio-core.c17
-rw-r--r--drivers/iio/light/Kconfig22
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adux1020.c849
-rw-r--r--drivers/iio/light/bh1750.c4
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/tcs3414.c30
-rw-r--r--drivers/iio/light/veml6030.c908
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c130
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c6
-rw-r--r--drivers/iio/pressure/bmp280-spi.c6
-rw-r--r--drivers/iio/pressure/bmp280.h1
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c3
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/iio/pressure/zpa2326.c16
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c5
-rw-r--r--drivers/iio/proximity/sx9500.c16
-rw-r--r--drivers/iio/temperature/Kconfig11
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/ltc2983.c1557
-rw-r--r--drivers/iio/temperature/max31856.c2
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cache.c8
-rw-r--r--drivers/infiniband/core/cm.c66
-rw-r--r--drivers/infiniband/core/cm_msgs.h32
-rw-r--r--drivers/infiniband/core/cma.c107
-rw-r--r--drivers/infiniband/core/core_priv.h11
-rw-r--r--drivers/infiniband/core/counters.c40
-rw-r--r--drivers/infiniband/core/device.c50
-rw-r--r--drivers/infiniband/core/ib_core_uverbs.c335
-rw-r--r--drivers/infiniband/core/iwpm_util.h5
-rw-r--r--drivers/infiniband/core/mad.c31
-rw-r--r--drivers/infiniband/core/nldev.c141
-rw-r--r--drivers/infiniband/core/rdma_core.c1
-rw-r--r--drivers/infiniband/core/restrack.c20
-rw-r--r--drivers/infiniband/core/restrack.h1
-rw-r--r--drivers/infiniband/core/rw.c25
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c12
-rw-r--r--drivers/infiniband/core/umem.c12
-rw-r--r--drivers/infiniband/core/umem_odp.c38
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c84
-rw-r--r--drivers/infiniband/core/verbs.c12
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig12
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c143
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig19
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile7
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1312
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h204
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c344
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.h69
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h802
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c282
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h155
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2258
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h233
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c230
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c232
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c101
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1321
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h347
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1082
-rw-r--r--drivers/infiniband/hw/cxgb3/tcb.h632
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c7
-rw-r--r--drivers/infiniband/hw/efa/efa.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h29
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c40
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h19
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c17
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c370
-rw-r--r--drivers/infiniband/hw/hfi1/init.c1
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c17
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c2
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c16
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c57
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.h3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h5
-rw-r--r--drivers/infiniband/hw/hns/Kconfig17
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c300
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c38
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c69
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c88
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c30
-rw-r--r--drivers/infiniband/hw/mlx4/main.c18
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c37
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c25
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c29
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c24
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c124
-rw-r--r--drivers/infiniband/hw/mlx5/main.c178
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c199
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h73
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c177
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c989
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c60
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c90
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c74
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h11
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h2
-rw-r--r--drivers/infiniband/hw/qedr/main.c5
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h72
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c150
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c643
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c38
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c119
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c30
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h1
-rw-r--r--drivers/infiniband/sw/siw/siw.h31
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c45
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c35
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c338
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h34
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c5
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c72
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h8
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c47
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c247
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h58
-rw-r--r--drivers/input/ff-memless.c9
-rw-r--r--drivers/input/rmi4/rmi_f11.c9
-rw-r--r--drivers/input/rmi4/rmi_f12.c32
-rw-r--r--drivers/input/rmi4/rmi_f54.c5
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c7
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/msm8974.c784
-rw-r--r--drivers/iommu/amd_iommu.c30
-rw-r--r--drivers/irqchip/irq-gic-v3.c20
-rw-r--r--drivers/irqchip/irq-gic-v4.c7
-rw-r--r--drivers/irqchip/irq-sifive-plic.c11
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c16
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c11
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c8
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c12
-rw-r--r--drivers/isdn/mISDN/hwchannel.c7
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c14
-rw-r--r--drivers/macintosh/rack-meter.c7
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_pm72.c22
-rw-r--r--drivers/macintosh/windfarm_rm31.c6
-rw-r--r--drivers/mailbox/tegra-hsp.c4
-rw-r--r--drivers/mcb/mcb-core.c28
-rw-r--r--drivers/mcb/mcb-lpc.c1
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/mcb/mcb-pci.c1
-rw-r--r--drivers/md/Kconfig54
-rw-r--r--drivers/md/bcache/Makefile2
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c17
-rw-r--r--drivers/md/bcache/btree.c19
-rw-r--r--drivers/md/bcache/closure.c7
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c56
-rw-r--r--drivers/md/bcache/sysfs.c7
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/dm-bio-prison-v1.c27
-rw-r--r--drivers/md/dm-bio-prison-v2.c26
-rw-r--r--drivers/md/dm-cache-target.c77
-rw-r--r--drivers/md/dm-clone-metadata.c29
-rw-r--r--drivers/md/dm-clone-metadata.h4
-rw-r--r--drivers/md/dm-clone-target.c62
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-dust.c97
-rw-r--r--drivers/md/dm-flakey.c25
-rw-r--r--drivers/md/dm-integrity.c28
-rw-r--r--drivers/md/dm-linear.c22
-rw-r--r--drivers/md/dm-raid.c164
-rw-r--r--drivers/md/dm-stripe.c15
-rw-r--r--drivers/md/dm-table.c27
-rw-r--r--drivers/md/dm-thin.c118
-rw-r--r--drivers/md/dm-writecache.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c166
-rw-r--r--drivers/md/dm-zoned-reclaim.c8
-rw-r--r--drivers/md/dm-zoned-target.c54
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c135
-rw-r--r--drivers/md/md-bitmap.c2
-rw-r--r--drivers/md/md-linear.c5
-rw-r--r--drivers/md/md-multipath.c5
-rw-r--r--drivers/md/md.c57
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid0.c7
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/cec/cec-adap.c12
-rw-r--r--drivers/media/cec/cec-api.c20
-rw-r--r--drivers/media/cec/cec-core.c5
-rw-r--r--drivers/media/cec/cec-notifier.c5
-rw-r--r--drivers/media/cec/cec-pin.c10
-rw-r--r--drivers/media/common/siano/smscoreapi.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsir.h2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c12
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c54
-rw-r--r--drivers/media/dvb-frontends/mt312.c13
-rw-r--r--drivers/media/dvb-frontends/si2168.h47
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h10
-rw-r--r--drivers/media/dvb-frontends/tc90522.c27
-rw-r--r--drivers/media/dvb-frontends/tc90522.h3
-rw-r--r--drivers/media/i2c/Kconfig80
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/ad5820.c35
-rw-r--r--drivers/media/i2c/adv7180.c6
-rw-r--r--drivers/media/i2c/adv7842.c4
-rw-r--r--drivers/media/i2c/bt819.c2
-rw-r--r--drivers/media/i2c/hi556.c1200
-rw-r--r--drivers/media/i2c/imx214.c9
-rw-r--r--drivers/media/i2c/imx290.c884
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max2175.h4
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/ov2659.c139
-rw-r--r--drivers/media/i2c/ov5640.c33
-rw-r--r--drivers/media/i2c/ov5695.c2
-rw-r--r--drivers/media/i2c/ov6650.c266
-rw-r--r--drivers/media/i2c/saa711x_regs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c326
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h36
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h3
-rw-r--r--drivers/media/i2c/st-mipid02.c5
-rw-r--r--drivers/media/i2c/tda1997x_regs.h2
-rw-r--r--drivers/media/i2c/tvp5150_reg.h2
-rw-r--r--drivers/media/i2c/vpx3220.c2
-rw-r--r--drivers/media/mc/mc-device.c65
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c5
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c43
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c1
-rw-r--r--drivers/media/pci/cx88/cx88-video.c11
-rw-r--r--drivers/media/pci/cx88/cx88.h1
-rw-r--r--drivers/media/pci/dm1105/dm1105.c1
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c4
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c166
-rw-r--r--drivers/media/pci/smipcie/smipcie.h1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c2
-rw-r--r--drivers/media/platform/Kconfig17
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c861
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h43
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h10
-rw-r--r--drivers/media/platform/aspeed-video.c58
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c2
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c41
-rw-r--r--drivers/media/platform/coda/coda-common.c13
-rw-r--r--drivers/media/platform/coda/coda.h1
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c6
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c7
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c38
-rw-r--r--drivers/media/platform/meson/ao-cec.c32
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c20
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c9
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h9
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c4
-rw-r--r--drivers/media/platform/qcom/venus/core.c56
-rw-r--r--drivers/media/platform/qcom/venus/core.h30
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c247
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c6
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c11
-rw-r--r--drivers/media/platform/qcom/venus/venc.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c17
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c63
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c156
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h6
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c2
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c5
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c26
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c4
-rw-r--r--drivers/media/platform/sunxi/Makefile1
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c1028
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.h237
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c4
-rw-r--r--drivers/media/platform/ti-vpe/csc.c254
-rw-r--r--drivers/media/platform/ti-vpe/csc.h4
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c13
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h2
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h5
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c396
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c4
-rw-r--r--drivers/media/platform/vim2m.c8
-rw-r--r--drivers/media/platform/vimc/Makefile7
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c107
-rw-r--r--drivers/media/platform/vimc/vimc-common.c171
-rw-r--r--drivers/media/platform/vimc/vimc-common.h120
-rw-r--r--drivers/media/platform/vimc/vimc-core.c215
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c182
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c102
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c109
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c19
-rw-r--r--drivers/media/platform/vivid/Makefile2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c7
-rw-r--r--drivers/media/platform/vivid/vivid-core.c368
-rw-r--r--drivers/media/platform/vivid/vivid-core.h25
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c89
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c62
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c57
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.c201
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.h29
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.c174
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.h25
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h2
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/rc/imon.c64
-rw-r--r--drivers/media/rc/imon_raw.c22
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c6
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-beelink-gs1.c84
-rw-r--r--drivers/media/rc/keymaps/rc-vega-s9x.c54
-rw-r--r--drivers/media/rc/mceusb.c141
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-main.c1
-rw-r--r--drivers/media/rc/tango-ir.c14
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/si2157.h33
-rw-r--r--drivers/media/tuners/si2157_priv.h5
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c13
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c508
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c172
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c795
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h30
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c37
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c28
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c391
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h14
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c5
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c20
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c30
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/sq905.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-regs.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-usb-isoc.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000.h2
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c29
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c10
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c28
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c4
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c128
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c199
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c112
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c77
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c190
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c6
-rw-r--r--drivers/memstick/core/Kconfig18
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c12
-rw-r--r--drivers/mfd/Kconfig7
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ab8500-debugfs.c715
-rw-r--r--drivers/mfd/ab8500-gpadc.c1075
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c2
-rw-r--r--drivers/mfd/tps6105x.c34
-rw-r--r--drivers/misc/Kconfig17
-rw-r--r--drivers/misc/atmel_tclib.c4
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rts5260.c3
-rw-r--r--drivers/misc/cardreader/rts5261.c792
-rw-r--r--drivers/misc/cardreader/rts5261.h233
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c43
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/eeprom/eeprom.c4
-rw-r--r--drivers/misc/fastrpc.c209
-rw-r--r--drivers/misc/habanalabs/command_submission.c127
-rw-r--r--drivers/misc/habanalabs/debugfs.c112
-rw-r--r--drivers/misc/habanalabs/device.c18
-rw-r--r--drivers/misc/habanalabs/firmware_if.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c78
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c53
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/habanalabs.h171
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c73
-rw-r--r--drivers/misc/habanalabs/hw_queue.c249
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h2
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h7
-rw-r--r--drivers/misc/habanalabs/include/qman_if.h12
-rw-r--r--drivers/misc/habanalabs/memory.c392
-rw-r--r--drivers/misc/habanalabs/mmu.c204
-rw-r--r--drivers/misc/hpilo.h2
-rw-r--r--drivers/misc/ibmvmc.h4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c80
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h4
-rw-r--r--drivers/misc/lkdtm/refcount.c11
-rw-r--r--drivers/misc/mei/bus-fixup.c9
-rw-r--r--drivers/misc/mei/bus.c42
-rw-r--r--drivers/misc/mei/client.h36
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c45
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h17
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c74
-rw-r--r--drivers/misc/mei/hw-me.h12
-rw-r--r--drivers/misc/mei/hw-txe.c10
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/main.c24
-rw-r--r--drivers/misc/mei/mei_dev.h18
-rw-r--r--drivers/misc/mei/pci-me.c16
-rw-r--r--drivers/misc/mic/Kconfig16
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h2
-rw-r--r--drivers/misc/ocxl/trace.h2
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c11
-rw-r--r--drivers/misc/sram.c28
-rw-r--r--drivers/misc/ti-st/st_core.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c67
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c7
-rw-r--r--drivers/mmc/core/block.c151
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c9
-rw-r--r--drivers/mmc/core/quirks.h7
-rw-r--r--drivers/mmc/core/sdio.c28
-rw-r--r--drivers/mmc/core/sdio_bus.c9
-rw-r--r--drivers/mmc/host/Kconfig21
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/bcm2835.c4
-rw-r--r--drivers/mmc/host/cavium-octeon.c15
-rw-r--r--drivers/mmc/host/dw_mmc.c14
-rw-r--r--drivers/mmc/host/jz4740_mmc.c41
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c198
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c46
-rw-r--r--drivers/mmc/host/moxart-mmc.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c31
-rw-r--r--drivers/mmc/host/owl-mmc.c696
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c49
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h14
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c362
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c493
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c12
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c21
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c257
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c53
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_am654.c71
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c26
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.h32
-rw-r--r--drivers/mmc/host/tmio_mmc.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c12
-rw-r--r--drivers/mmc/host/vub300.c7
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c79
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c8
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/devices/mchp23k256.c20
-rw-r--r--drivers/mtd/devices/spear_smi.c42
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c1
-rw-r--r--drivers/mtd/maps/Kconfig11
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/physmap-core.c5
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.c132
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.h17
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdcore.c26
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig7
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c23
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c3030
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c59
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c4
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c1
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_base.c8
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c8
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c5
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c4
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c23
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c58
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c23
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c6
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c58
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c25
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c23
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1491
-rw-r--r--drivers/mtd/ubi/debug.c131
-rw-r--r--drivers/net/Kconfig64
-rw-r--r--drivers/net/bonding/bond_main.c138
-rw-r--r--drivers/net/caif/Kconfig46
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/c_can/c_can_platform.c21
-rw-r--r--drivers/net/can/dev.c5
-rw-r--r--drivers/net/can/flexcan.c131
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c4
-rw-r--r--drivers/net/can/m_can/m_can.c54
-rw-r--r--drivers/net/can/m_can/m_can_platform.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c25
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h3
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c4
-rw-r--r--drivers/net/can/rx-offload.c122
-rw-r--r--drivers/net/can/slcan.c1
-rw-r--r--drivers/net/can/spi/mcp251x.c75
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c26
-rw-r--r--drivers/net/can/xilinx_can.c102
-rw-r--r--drivers/net/dsa/Kconfig3
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c73
-rw-r--r--drivers/net/dsa/b53/b53_priv.h8
-rw-r--r--drivers/net/dsa/bcm_sf2.c37
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/dsa_loop.c5
-rw-r--r--drivers/net/dsa/lan9303-core.c4
-rw-r--r--drivers/net/dsa/lantiq_gswip.c4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c12
-rw-r--r--drivers/net/dsa/mt7530.c23
-rw-r--r--drivers/net/dsa/mv88e6060.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c519
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c60
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c13
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c13
-rw-r--r--drivers/net/dsa/ocelot/Kconfig11
-rw-r--r--drivers/net/dsa/ocelot/Makefile6
-rw-r--r--drivers/net/dsa/ocelot/felix.c530
-rw-r--r--drivers/net/dsa/ocelot/felix.h37
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c583
-rw-r--r--drivers/net/dsa/qca8k.c14
-rw-r--r--drivers/net/dsa/realtek-smi-core.c5
-rw-r--r--drivers/net/dsa/sja1105/Kconfig1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h61
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c65
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c12
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c418
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c630
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h113
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c409
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c432
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h27
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c5
-rw-r--r--drivers/net/dummy.c36
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c158
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c270
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c120
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c328
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h44
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c100
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.c147
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c1392
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h140
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c63
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c439
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c122
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h277
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c212
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h396
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c322
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c15
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c7
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c5
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c328
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c416
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c97
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c55
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/macb.h9
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c491
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h129
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c796
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c354
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c650
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c131
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c265
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1036
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h50
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c56
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c370
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c68
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c182
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c375
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h38
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h73
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c183
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h226
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c39
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h17
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c27
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h93
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c300
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c17
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c93
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c188
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c563
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c114
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c30
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c26
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c203
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h12
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c276
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c48
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c118
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c859
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c205
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c313
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c933
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c524
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1327
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c810
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c1267
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c600
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h140
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c535
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c1181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c104
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c231
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c30
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c639
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h14918
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c116
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h221
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c130
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c1711
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c876
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c187
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h40
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c60
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c590
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c228
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c229
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c506
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h15
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c303
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h27
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1157
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h482
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c154
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c32
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c36
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_sys.h144
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c56
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/ni/nixge.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c59
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c60
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c128
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h196
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c41
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c24
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c290
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c11
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1030
-rw-r--r--drivers/net/ethernet/renesas/ravb.h3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c30
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c5
-rw-r--r--drivers/net/ethernet/sfc/ef10.c22
-rw-r--r--drivers/net/ethernet/sfc/efx.c283
-rw-r--r--drivers/net/ethernet/sfc/efx.h22
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c33
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h84
-rw-r--r--drivers/net/ethernet/sfc/ptp.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c220
-rw-r--r--drivers/net/ethernet/sfc/tx.c92
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c62
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c119
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c290
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig20
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1285
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c150
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h11
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c2048
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1246
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h81
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c589
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.h15
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c5
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c18
-rw-r--r--drivers/net/hyperv/hyperv_net.h7
-rw-r--r--drivers/net/hyperv/netvsc.c38
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/hyperv/rndis_filter.c9
-rw-r--r--drivers/net/ieee802154/Kconfig12
-rw-r--r--drivers/net/ieee802154/cc2520.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/loopback.c38
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/bus.c1
-rw-r--r--drivers/net/netdevsim/dev.c393
-rw-r--r--drivers/net/netdevsim/fib.c176
-rw-r--r--drivers/net/netdevsim/health.c319
-rw-r--r--drivers/net/netdevsim/netdev.c10
-rw-r--r--drivers/net/netdevsim/netdevsim.h33
-rw-r--r--drivers/net/nlmon.c28
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/aquantia.h4
-rw-r--r--drivers/net/phy/at803x.c312
-rw-r--r--drivers/net/phy/bcm-phy-lib.h2
-rw-r--r--drivers/net/phy/broadcom.c89
-rw-r--r--drivers/net/phy/dp83640.c16
-rw-r--r--drivers/net/phy/dp83867.c152
-rw-r--r--drivers/net/phy/dp83869.c434
-rw-r--r--drivers/net/phy/marvell.c255
-rw-r--r--drivers/net/phy/marvell10g.c25
-rw-r--r--drivers/net/phy/mdio-cavium.h2
-rw-r--r--drivers/net/phy/mdio-i2c.h2
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/mdio-xgene.h2
-rw-r--r--drivers/net/phy/mdio_bus.c17
-rw-r--r--drivers/net/phy/mscc.c208
-rw-r--r--drivers/net/phy/phy-core.c44
-rw-r--r--drivers/net/phy/phy.c67
-rw-r--r--drivers/net/phy/phy_device.c220
-rw-r--r--drivers/net/phy/phylink.c95
-rw-r--r--drivers/net/phy/sfp-bus.c216
-rw-r--r--drivers/net/phy/sfp.c630
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c53
-rw-r--r--drivers/net/usb/aqc111.h4
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c35
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c1307
-rw-r--r--drivers/net/usb/usbnet.c9
-rw-r--r--drivers/net/veth.c43
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vsockmon.c31
-rw-r--r--drivers/net/vxlan.c29
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c6
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c6
-rw-r--r--drivers/net/wireless/ath/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig14
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c38
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c188
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c387
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h30
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c82
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h68
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c98
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c3
-rw-r--r--drivers/net/wireless/ath/regd.c50
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.h17
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h13
-rw-r--r--drivers/net/wireless/atmel/Kconfig42
-rw-r--r--drivers/net/wireless/atmel/atmel_cs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c53
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c81
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h2
-rw-r--r--drivers/net/wireless/cisco/Kconfig2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h514
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c811
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c891
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c392
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c625
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c16
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c51
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c33
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c194
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h113
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c141
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c187
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h57
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c119
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c44
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c58
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c198
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c151
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c47
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c85
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c35
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h133
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/switchdev.h24
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig44
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c9
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c42
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h93
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c9
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c509
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h619
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c189
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c400
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h92
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c174
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c227
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h80
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c138
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c263
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c320
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h239
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c236
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h16
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c171
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h30
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c191
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c469
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c829
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c376
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c101
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c21
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c135
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c27
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c4
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.c6
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c3
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c25
-rw-r--r--drivers/net/wireless/ti/wl12xx/Kconfig8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c4
-rw-r--r--drivers/net/xen-netback/interface.c114
-rw-r--r--drivers/nfc/nfcmrvl/Kconfig2
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c1
-rw-r--r--drivers/nfc/nxp-nci/i2c.c6
-rw-r--r--drivers/nfc/pn533/Kconfig11
-rw-r--r--drivers/nfc/pn533/Makefile2
-rw-r--r--drivers/nfc/pn533/i2c.c32
-rw-r--r--drivers/nfc/pn533/pn533.c287
-rw-r--r--drivers/nfc/pn533/pn533.h40
-rw-r--r--drivers/nfc/pn533/uart.c330
-rw-r--r--drivers/nfc/pn533/usb.c16
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c1
-rw-r--r--drivers/ntb/test/ntb_pingpong.c5
-rw-r--r--drivers/nubus/nubus.c2
-rw-r--r--drivers/nvdimm/Kconfig1
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/region_devs.c13
-rw-r--r--drivers/nvme/host/Kconfig10
-rw-r--r--drivers/nvme/host/Makefile1
-rw-r--r--drivers/nvme/host/core.c42
-rw-r--r--drivers/nvme/host/fc.c49
-rw-r--r--drivers/nvme/host/hwmon.c259
-rw-r--r--drivers/nvme/host/multipath.c13
-rw-r--r--drivers/nvme/host/nvme.h33
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/host/tcp.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c133
-rw-r--r--drivers/nvme/target/core.c20
-rw-r--r--drivers/nvme/target/discovery.c70
-rw-r--r--drivers/nvme/target/fabrics-cmd.c15
-rw-r--r--drivers/nvme/target/fc.c31
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c43
-rw-r--r--drivers/nvme/target/io-cmd-file.c20
-rw-r--r--drivers/nvme/target/loop.c7
-rw-r--r--drivers/nvme/target/nvmet.h10
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c14
-rw-r--r--drivers/nvmem/Kconfig23
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/core.c61
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c120
-rw-r--r--drivers/nvmem/imx-ocotp.c4
-rw-r--r--drivers/nvmem/rockchip-otp.c268
-rw-r--r--drivers/nvmem/sc27xx-efuse.c13
-rw-r--r--drivers/nvmem/sprd-efuse.c424
-rw-r--r--drivers/of/fdt.c20
-rw-r--r--drivers/of/of_mdio.c4
-rw-r--r--drivers/of/of_net.c16
-rw-r--r--drivers/of/platform.c12
-rw-r--r--drivers/of/property.c332
-rw-r--r--drivers/opp/core.c69
-rw-r--r--drivers/oprofile/oprofile_perf.c8
-rw-r--r--drivers/parport/daisy.c40
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c26
-rw-r--r--drivers/pci/pci.c18
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c1
-rw-r--r--drivers/pcmcia/i82092.c34
-rw-r--r--drivers/pcmcia/i82092aa.h2
-rw-r--r--drivers/pcmcia/yenta_socket.c3
-rw-r--r--drivers/perf/arm-cci.c4
-rw-r--r--drivers/perf/arm-ccn.c4
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c5
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c124
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c26
-rw-r--r--drivers/perf/thunderx2_pmu.c267
-rw-r--r--drivers/perf/xgene_pmu.c14
-rw-r--r--drivers/phy/allwinner/Kconfig11
-rw-r--r--drivers/phy/allwinner/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c190
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c10
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c4
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c4
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c3
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c9
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c120
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h96
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c7
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c5
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c7
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c805
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c23
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c137
-rw-r--r--drivers/phy/tegra/xusb.c93
-rw-r--r--drivers/phy/tegra/xusb.h4
-rw-r--r--drivers/phy/ti/Kconfig4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c3
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/pinctrl/Kconfig36
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c119
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/devicetree.c50
-rw-r--r--drivers/pinctrl/devicetree.h7
-rw-r--r--drivers/pinctrl/freescale/Kconfig12
-rw-r--r--drivers/pinctrl/intel/Kconfig7
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c119
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c171
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c454
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/meson/Kconfig6
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c942
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c38
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c1
-rw-r--r--drivers/pinctrl/mvebu/Kconfig10
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c40
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c12
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c81
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c32
-rw-r--r--drivers/pinctrl/pinctrl-amd.c3
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c65
-rw-r--r--drivers/pinctrl/pinctrl-at91.c55
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c4
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c54
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c4
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c944
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h144
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c50
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c23
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c29
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c29
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c30
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c382
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c8
-rw-r--r--drivers/pinctrl/pinctrl-rza2.c8
-rw-r--r--drivers/pinctrl/pinctrl-rzn1.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c53
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c21
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c4
-rw-r--r--drivers/pinctrl/pinctrl-u300.c4
-rw-r--r--drivers/pinctrl/pinctrl-xway.c4
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa25x.c13
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa27x.c13
-rw-r--r--drivers/pinctrl/qcom/Kconfig101
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c1127
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c121
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig12
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c32
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c35
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c863
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c57
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c4
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h8
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c41
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c43
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c51
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c4
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c23
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c10
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c3
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c4
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c4
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c5
-rw-r--r--drivers/platform/goldfish/Kconfig3
-rw-r--r--drivers/platform/mips/Kconfig4
-rw-r--r--drivers/platform/mips/cpu_hwmon.c17
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/intel_oaktrail.c10
-rw-r--r--drivers/power/avs/smartreflex.c2
-rw-r--r--drivers/power/reset/at91-reset.c6
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c8
-rw-r--r--drivers/power/supply/Kconfig2
-rw-r--r--drivers/power/supply/ab8500_btemp.c50
-rw-r--r--drivers/power/supply/ab8500_charger.c83
-rw-r--r--drivers/power/supply/ab8500_fg.c49
-rw-r--r--drivers/power/supply/abx500_chargalg.c8
-rw-r--r--drivers/power/supply/axp20x_usb_power.c8
-rw-r--r--drivers/power/supply/bd70528-charger.c1
-rw-r--r--drivers/power/supply/cpcap-battery.c271
-rw-r--r--drivers/power/supply/cpcap-charger.c222
-rw-r--r--drivers/power/supply/test_power.c61
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/ptp/Kconfig12
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/idt8a340_reg.h659
-rw-r--r--drivers/ptp/ptp_chardev.c20
-rw-r--r--drivers/ptp/ptp_clockmatrix.c1427
-rw-r--r--drivers/ptp/ptp_clockmatrix.h104
-rw-r--r--drivers/ptp/ptp_dte.c4
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/ab8500.c17
-rw-r--r--drivers/regulator/bd70528-regulator.c1
-rw-r--r--drivers/regulator/bd718x7-regulator.c1
-rw-r--r--drivers/regulator/core.c19
-rw-r--r--drivers/regulator/da9062-regulator.c63
-rw-r--r--drivers/regulator/da9063-regulator.c9
-rw-r--r--drivers/regulator/da9211-regulator.c12
-rw-r--r--drivers/regulator/fan53555.c2
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/internal.h1
-rw-r--r--drivers/regulator/max77686-regulator.c5
-rw-r--r--drivers/regulator/max8907-regulator.c15
-rw-r--r--drivers/regulator/pbias-regulator.c75
-rw-r--r--drivers/regulator/pcap-regulator.c4
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c62
-rw-r--r--drivers/regulator/qcom_smd-regulator.c92
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c41
-rw-r--r--drivers/regulator/rk808-regulator.c29
-rw-r--r--drivers/regulator/rn5t618-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c7
-rw-r--r--drivers/regulator/s5m8767.c7
-rw-r--r--drivers/regulator/slg51000-regulator.c13
-rw-r--r--drivers/regulator/stm32-vrefbuf.c4
-rw-r--r--drivers/regulator/stpmic1_regulator.c6
-rw-r--r--drivers/regulator/tps6105x-regulator.c2
-rw-r--r--drivers/regulator/tps65090-regulator.c26
-rw-r--r--drivers/regulator/tps65132-regulator.c17
-rw-r--r--drivers/regulator/uniphier-regulator.c4
-rw-r--r--drivers/regulator/vexpress-regulator.c5
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/cio/Makefile4
-rw-r--r--drivers/s390/cio/qdio.h27
-rw-r--r--drivers/s390/cio/qdio_main.c57
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c41
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c11
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h76
-rw-r--r--drivers/s390/crypto/pkey_api.c26
-rw-r--r--drivers/s390/net/ism.h2
-rw-r--r--drivers/s390/net/qeth_core.h20
-rw-r--r--drivers/s390/net/qeth_core_main.c200
-rw-r--r--drivers/s390/net/qeth_core_mpc.h5
-rw-r--r--drivers/s390/net/qeth_core_sys.c80
-rw-r--r--drivers/s390/net/qeth_ethtool.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c68
-rw-r--r--drivers/s390/net/qeth_l2_sys.c43
-rw-r--r--drivers/s390/net/qeth_l3.h27
-rw-r--r--drivers/s390/net/qeth_l3_main.c257
-rw-r--r--drivers/s390/net/qeth_l3_sys.c96
-rw-r--r--drivers/scsi/a3000.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c8
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/sd.h12
-rw-r--r--drivers/scsi/sd_zbc.c278
-rw-r--r--drivers/sh/intc/core.c4
-rw-r--r--drivers/soc/fsl/qbman/qman.c7
-rw-r--r--drivers/soundwire/Kconfig2
-rw-r--r--drivers/soundwire/bus.c7
-rw-r--r--drivers/soundwire/cadence_master.c292
-rw-r--r--drivers/soundwire/cadence_master.h39
-rw-r--r--drivers/soundwire/intel.c201
-rw-r--r--drivers/soundwire/intel_init.c1
-rw-r--r--drivers/soundwire/slave.c98
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/spi-at91-usart.c4
-rw-r--r--drivers/spi/spi-atmel.c219
-rw-r--r--drivers/spi/spi-axi-spi-engine.c16
-rw-r--r--drivers/spi/spi-bcm-qspi.c7
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c3
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-cavium.c3
-rw-r--r--drivers/spi/spi-dw-mmio.c6
-rw-r--r--drivers/spi/spi-dw-pci.c24
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-falcon.c2
-rw-r--r--drivers/spi/spi-fsl-cpm.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c43
-rw-r--r--drivers/spi/spi-fsl-espi.c19
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-fsl-qspi.c55
-rw-r--r--drivers/spi/spi-fsl-spi.c3
-rw-r--r--drivers/spi/spi-gpio.c9
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-lantiq-ssc.c10
-rw-r--r--drivers/spi/spi-loopback-test.c12
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c3
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c3
-rw-r--r--drivers/spi/spi-mt65xx.c23
-rw-r--r--drivers/spi/spi-mxic.c8
-rw-r--r--drivers/spi/spi-npcm-pspi.c3
-rw-r--r--drivers/spi/spi-nxp-fspi.c2
-rw-r--r--drivers/spi/spi-omap-100k.c7
-rw-r--r--drivers/spi/spi-omap2-mcspi.c105
-rw-r--r--drivers/spi/spi-orion.c9
-rw-r--r--drivers/spi/spi-pic32.c46
-rw-r--r--drivers/spi/spi-pl022.c29
-rw-r--r--drivers/spi/spi-pxa2xx.c95
-rw-r--r--drivers/spi/spi-qup.c4
-rw-r--r--drivers/spi/spi-rspi.c8
-rw-r--r--drivers/spi/spi-s3c64xx.c6
-rw-r--r--drivers/spi/spi-sc18is602.c3
-rw-r--r--drivers/spi/spi-sh-hspi.c3
-rw-r--r--drivers/spi/spi-sifive.c11
-rw-r--r--drivers/spi/spi-slave-mt27xx.c12
-rw-r--r--drivers/spi/spi-sprd-adi.c8
-rw-r--r--drivers/spi/spi-sprd.c15
-rw-r--r--drivers/spi/spi-st-ssc4.c3
-rw-r--r--drivers/spi/spi-stm32-qspi.c3
-rw-r--r--drivers/spi/spi-tegra114.c42
-rw-r--r--drivers/spi/spi-tegra20-sflash.c5
-rw-r--r--drivers/spi/spi-tegra20-slink.c8
-rw-r--r--drivers/spi/spi-topcliff-pch.c7
-rw-r--r--drivers/spi/spi-txx9.c78
-rw-r--r--drivers/spi/spi-xcomm.c3
-rw-r--r--drivers/spi/spi-xilinx.c7
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c10
-rw-r--r--drivers/spi/spi-zynq-qspi.c84
-rw-r--r--drivers/spi/spi.c332
-rw-r--r--drivers/spi/spidev.c9
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c301
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt18
-rw-r--r--drivers/staging/board/armadillo800eva.c12
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c4
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c21
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c27
-rw-r--r--drivers/staging/exfat/Kconfig9
-rw-r--r--drivers/staging/exfat/TODO61
-rw-r--r--drivers/staging/exfat/exfat.h186
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c28
-rw-r--r--drivers/staging/exfat/exfat_cache.c303
-rw-r--r--drivers/staging/exfat/exfat_core.c1938
-rw-r--r--drivers/staging/exfat/exfat_nls.c192
-rw-r--r--drivers/staging/exfat/exfat_super.c896
-rw-r--r--drivers/staging/fbtft/Kconfig21
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_seps525.c213
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c22
-rw-r--r--drivers/staging/fbtft/fbtft-core.c129
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h11
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c8
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c2
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c6
-rw-r--r--drivers/staging/fieldbus/dev_core.c3
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h6
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c50
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h5
-rw-r--r--drivers/staging/fwserial/Kconfig26
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c16
-rw-r--r--drivers/staging/hp/Kconfig (renamed from drivers/net/ethernet/hp/Kconfig)0
-rw-r--r--drivers/staging/hp/Makefile (renamed from drivers/net/ethernet/hp/Makefile)0
-rw-r--r--drivers/staging/hp/hp100.c (renamed from drivers/net/ethernet/hp/hp100.c)0
-rw-r--r--drivers/staging/hp/hp100.h (renamed from drivers/net/ethernet/hp/hp100.h)0
-rw-r--r--drivers/staging/iio/accel/adis16240.c1
-rw-r--r--drivers/staging/iio/adc/ad7192.c79
-rw-r--r--drivers/staging/iio/frequency/ad9834.c4
-rw-r--r--drivers/staging/isdn/avm/b1.c41
-rw-r--r--drivers/staging/isdn/gigaset/interface.c2
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c204
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c24
-rw-r--r--drivers/staging/media/allegro-dvt/nal-h264.c2
-rw-r--r--drivers/staging/media/hantro/hantro.h20
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c16
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c52
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c120
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h7
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c48
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c20
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c12
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c12
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c25
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c51
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c21
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c41
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c10
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c27
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c27
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c38
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c36
-rw-r--r--drivers/staging/media/ipu3/Makefile6
-rw-r--r--drivers/staging/media/ipu3/TODO5
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h5
-rw-r--r--drivers/staging/media/omap4iss/iss.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c64
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c9
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c147
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c616
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h318
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c102
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.h1
-rw-r--r--drivers/staging/most/Kconfig8
-rw-r--r--drivers/staging/most/cdev/cdev.c1
-rw-r--r--drivers/staging/most/configfs.c124
-rw-r--r--drivers/staging/most/core.c108
-rw-r--r--drivers/staging/most/core.h1
-rw-r--r--drivers/staging/most/net/net.c1
-rw-r--r--drivers/staging/most/sound/sound.c9
-rw-r--r--drivers/staging/most/video/video.c1
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c21
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c23
-rw-r--r--drivers/staging/netlogic/TODO2
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/nvec/Kconfig10
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c6
-rw-r--r--drivers/staging/octeon/ethernet-tx.c6
-rw-r--r--drivers/staging/octeon/ethernet.c6
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/octeon/octeon-stubs.h106
-rw-r--r--drivers/staging/olpc_dcon/Kconfig21
-rw-r--r--drivers/staging/olpc_dcon/Makefile4
-rw-r--r--drivers/staging/olpc_dcon/TODO1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c6
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h5
-rw-r--r--drivers/staging/pi433/Kconfig24
-rw-r--r--drivers/staging/qlge/TODO3
-rw-r--r--drivers/staging/qlge/qlge.h145
-rw-r--r--drivers/staging/qlge/qlge_dbg.c291
-rw-r--r--drivers/staging/qlge/qlge_main.c909
-rw-r--r--drivers/staging/qlge/qlge_mpi.c1
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c43
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c167
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c26
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c55
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c3
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h1
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c30
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c9
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c8
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c135
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c25
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c47
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c103
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c20
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c174
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c23
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c159
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c19
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c402
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c1076
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c77
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c41
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c127
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h26
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h21
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h4
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h7
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h15
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c56
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c59
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c23
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c136
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c8
-rw-r--r--drivers/staging/rts5208/ms.c86
-rw-r--r--drivers/staging/rts5208/ms.h70
-rw-r--r--drivers/staging/rts5208/rtsx.c3
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c4
-rw-r--r--drivers/staging/rts5208/sd.h2
-rw-r--r--drivers/staging/rts5208/xd.c8
-rw-r--r--drivers/staging/rts5208/xd.h6
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c41
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h18
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c4
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c16
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c28
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h11
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c94
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h83
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h17
-rw-r--r--drivers/staging/uwb/rsv.c4
-rw-r--r--drivers/staging/vboxsf/Kconfig10
-rw-r--r--drivers/staging/vboxsf/Makefile5
-rw-r--r--drivers/staging/vboxsf/TODO7
-rw-r--r--drivers/staging/vboxsf/dir.c418
-rw-r--r--drivers/staging/vboxsf/file.c370
-rw-r--r--drivers/staging/vboxsf/shfl_hostintf.h901
-rw-r--r--drivers/staging/vboxsf/super.c501
-rw-r--r--drivers/staging/vboxsf/utils.c551
-rw-r--r--drivers/staging/vboxsf/vboxsf_wrappers.c371
-rw-r--r--drivers/staging/vboxsf/vfsmod.h137
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig12
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c9
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h102
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg.h172
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h28
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c23
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c370
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h32
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c231
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h104
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h96
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c164
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c4
-rw-r--r--drivers/staging/vt6655/card.c24
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/device_main.c14
-rw-r--r--drivers/staging/vt6655/power.c10
-rw-r--r--drivers/staging/vt6655/rf.c5
-rw-r--r--drivers/staging/vt6655/rf.h19
-rw-r--r--drivers/staging/vt6655/rxtx.c5
-rw-r--r--drivers/staging/vt6656/main_usb.c9
-rw-r--r--drivers/staging/vt6656/rxtx.c8
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt97
-rw-r--r--drivers/staging/wfx/Kconfig8
-rw-r--r--drivers/staging/wfx/Makefile24
-rw-r--r--drivers/staging/wfx/TODO17
-rw-r--r--drivers/staging/wfx/bh.c321
-rw-r--r--drivers/staging/wfx/bh.h32
-rw-r--r--drivers/staging/wfx/bus.h36
-rw-r--r--drivers/staging/wfx/bus_sdio.c271
-rw-r--r--drivers/staging/wfx/bus_spi.c267
-rw-r--r--drivers/staging/wfx/data_rx.c213
-rw-r--r--drivers/staging/wfx/data_rx.h19
-rw-r--r--drivers/staging/wfx/data_tx.c837
-rw-r--r--drivers/staging/wfx/data_tx.h93
-rw-r--r--drivers/staging/wfx/debug.c311
-rw-r--r--drivers/staging/wfx/debug.h19
-rw-r--r--drivers/staging/wfx/fwio.c413
-rw-r--r--drivers/staging/wfx/fwio.h15
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h681
-rw-r--r--drivers/staging/wfx/hif_api_general.h437
-rw-r--r--drivers/staging/wfx/hif_api_mib.h557
-rw-r--r--drivers/staging/wfx/hif_rx.c364
-rw-r--r--drivers/staging/wfx/hif_rx.h18
-rw-r--r--drivers/staging/wfx/hif_tx.c493
-rw-r--r--drivers/staging/wfx/hif_tx.h68
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h293
-rw-r--r--drivers/staging/wfx/hwio.c352
-rw-r--r--drivers/staging/wfx/hwio.h80
-rw-r--r--drivers/staging/wfx/key.c268
-rw-r--r--drivers/staging/wfx/key.h22
-rw-r--r--drivers/staging/wfx/main.c491
-rw-r--r--drivers/staging/wfx/main.h47
-rw-r--r--drivers/staging/wfx/queue.c619
-rw-r--r--drivers/staging/wfx/queue.h61
-rw-r--r--drivers/staging/wfx/scan.c294
-rw-r--r--drivers/staging/wfx/scan.h42
-rw-r--r--drivers/staging/wfx/secure_link.h57
-rw-r--r--drivers/staging/wfx/sta.c1684
-rw-r--r--drivers/staging/wfx/sta.h103
-rw-r--r--drivers/staging/wfx/traces.h443
-rw-r--r--drivers/staging/wfx/wfx.h208
-rw-r--r--drivers/staging/wilc1000/Makefile8
-rw-r--r--drivers/staging/wilc1000/cfg80211.c (renamed from drivers/staging/wilc1000/wilc_wfi_cfgoperations.c)246
-rw-r--r--drivers/staging/wilc1000/cfg80211.h (renamed from drivers/staging/wilc1000/wilc_wfi_cfgoperations.h)8
-rw-r--r--drivers/staging/wilc1000/hif.c (renamed from drivers/staging/wilc1000/wilc_hif.c)43
-rw-r--r--drivers/staging/wilc1000/hif.h (renamed from drivers/staging/wilc1000/wilc_hif.h)6
-rw-r--r--drivers/staging/wilc1000/mon.c (renamed from drivers/staging/wilc1000/wilc_mon.c)4
-rw-r--r--drivers/staging/wilc1000/netdev.c (renamed from drivers/staging/wilc1000/wilc_netdev.c)146
-rw-r--r--drivers/staging/wilc1000/netdev.h (renamed from drivers/staging/wilc1000/wilc_wfi_netdevice.h)34
-rw-r--r--drivers/staging/wilc1000/sdio.c (renamed from drivers/staging/wilc1000/wilc_sdio.c)4
-rw-r--r--drivers/staging/wilc1000/spi.c (renamed from drivers/staging/wilc1000/wilc_spi.c)15
-rw-r--r--drivers/staging/wilc1000/wlan.c (renamed from drivers/staging/wilc1000/wilc_wlan.c)4
-rw-r--r--drivers/staging/wilc1000/wlan.h (renamed from drivers/staging/wilc1000/wilc_wlan.h)2
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.c (renamed from drivers/staging/wilc1000/wilc_wlan_cfg.c)30
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.h (renamed from drivers/staging/wilc1000/wilc_wlan_cfg.h)0
-rw-r--r--drivers/staging/wilc1000/wlan_if.h (renamed from drivers/staging/wilc1000/wilc_wlan_if.h)8
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h18
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c64
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c6
-rw-r--r--drivers/thunderbolt/cap.c6
-rw-r--r--drivers/thunderbolt/ctl.c8
-rw-r--r--drivers/thunderbolt/eeprom.c11
-rw-r--r--drivers/thunderbolt/icm.c157
-rw-r--r--drivers/thunderbolt/lc.c193
-rw-r--r--drivers/thunderbolt/path.c52
-rw-r--r--drivers/thunderbolt/switch.c586
-rw-r--r--drivers/thunderbolt/tb.c340
-rw-r--r--drivers/thunderbolt/tb.h81
-rw-r--r--drivers/thunderbolt/tb_msgs.h2
-rw-r--r--drivers/thunderbolt/tb_regs.h97
-rw-r--r--drivers/thunderbolt/tunnel.c364
-rw-r--r--drivers/thunderbolt/tunnel.h10
-rw-r--r--drivers/thunderbolt/xdomain.c5
-rw-r--r--drivers/tty/hvc/Kconfig2
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/sh-sci.c11
-rw-r--r--drivers/tty/tty_ldsem.c8
-rw-r--r--drivers/uio/uio_dmem_genirq.c14
-rw-r--r--drivers/usb/cdns3/Kconfig10
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c236
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c79
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c22
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c75
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c31
-rw-r--r--drivers/usb/core/config.c12
-rw-r--r--drivers/usb/core/devio.c19
-rw-r--r--drivers/usb/core/hub.c201
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc3/Kconfig30
-rw-r--r--drivers/usb/dwc3/core.c37
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/debugfs.c2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c28
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_acm.c21
-rw-r--r--drivers/usb/gadget/function/f_obex.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c21
-rw-r--r--drivers/usb/gadget/function/f_tcm.c13
-rw-r--r--drivers/usb/gadget/function/u_audio.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c516
-rw-r--r--drivers/usb/gadget/function/u_serial.h8
-rw-r--r--drivers/usb/gadget/legacy/Kconfig26
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c3
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c3
-rw-r--r--drivers/usb/gadget/legacy/serial.c49
-rw-r--r--drivers/usb/gadget/udc/Kconfig19
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c9
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c10
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c12
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c7
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c6
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h2
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c4
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c5
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c3810
-rw-r--r--drivers/usb/host/Kconfig106
-rw-r--r--drivers/usb/host/bcma-hcd.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c8
-rw-r--r--drivers/usb/host/imx21-dbg.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/ohci-at91.c8
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c14
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci-tegra.c126
-rw-r--r--drivers/usb/host/xhci-trace.h26
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h29
-rw-r--r--drivers/usb/image/microtek.c3
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c2
-rw-r--r--drivers/usb/misc/Kconfig22
-rw-r--r--drivers/usb/misc/appledisplay.c8
-rw-r--r--drivers/usb/misc/chaoskey.c24
-rw-r--r--drivers/usb/misc/ftdi-elan.c6
-rw-r--r--drivers/usb/misc/idmouse.c36
-rw-r--r--drivers/usb/misc/legousbtower.c303
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/usb251xb.c66
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c35
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/phy/phy-keystone.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c5
-rw-r--r--drivers/usb/renesas_usbhs/common.h3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c19
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c12
-rw-r--r--drivers/usb/roles/class.c21
-rw-r--r--drivers/usb/serial/Kconfig48
-rw-r--r--drivers/usb/serial/ch341.c97
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c762
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/pl2303.c124
-rw-r--r--drivers/usb/serial/pl2303.h6
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/Kconfig11
-rw-r--r--drivers/usb/typec/Makefile1
-rw-r--r--drivers/usb/typec/class.c42
-rw-r--r--drivers/usb/typec/hd3ss3220.c269
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c135
-rw-r--r--drivers/usb/typec/tps6598x.c49
-rw-r--r--drivers/usb/typec/ucsi/displayport.c40
-rw-r--r--drivers/usb/typec/ucsi/trace.c11
-rw-r--r--drivers/usb/typec/ucsi/trace.h79
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c609
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h417
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c91
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c166
-rw-r--r--drivers/usb/usbip/Kconfig1
-rw-r--r--drivers/usb/usbip/stub_rx.c50
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/vhost/vsock.c102
-rw-r--r--drivers/video/console/vgacon.c6
-rw-r--r--drivers/video/fbdev/core/fbmem.c17
-rw-r--r--drivers/video/fbdev/sa1100fb.c13
-rw-r--r--drivers/video/hdmi.c8
-rw-r--r--drivers/virtio/virtio_balloon.c20
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/w1/masters/sgi_w1.c4
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2430.c295
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/xen/Kconfig63
-rw-r--r--drivers/xen/mcelog.c14
3921 files changed, 229776 insertions, 102123 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ebe1e9e5fd81..4fb97511a16f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -319,12 +319,6 @@ config ACPI_THERMAL
To compile this driver as a module, choose M here:
the module will be called thermal.
-config ACPI_NUMA
- bool "NUMA support"
- depends on NUMA
- depends on (X86 || IA64 || ARM64)
- default y if IA64 || ARM64
-
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
default ""
@@ -473,8 +467,7 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
source "drivers/acpi/nfit/Kconfig"
-source "drivers/acpi/hmat/Kconfig"
-
+source "drivers/acpi/numa/Kconfig"
source "drivers/acpi/apei/Kconfig"
source "drivers/acpi/dptf/Kconfig"
@@ -513,11 +506,19 @@ menuconfig PMIC_OPREGION
PMIC chip.
if PMIC_OPREGION
-config CRC_PMIC_OPREGION
- bool "ACPI operation region support for CrystalCove PMIC"
+config BYTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Bay Trail Crystal Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ help
+ This config adds ACPI operation region support for the Bay Trail
+ version of the Crystal Cove PMIC.
+
+config CHTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Cherry Trail Crystal Cove PMIC"
depends on INTEL_SOC_PMIC
help
- This config adds ACPI operation region support for CrystalCove PMIC.
+ This config adds ACPI operation region support for the Cherry Trail
+ version of the Crystal Cove PMIC.
config XPOWER_PMIC_OPREGION
bool "ACPI operation region support for XPower AXP288 PMIC"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 5d361e4e3405..33fdaf67454e 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -48,14 +48,13 @@ acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += power.o
acpi-y += event.o
-acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
+acpi-y += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
-acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
@@ -80,7 +79,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
-obj-$(CONFIG_ACPI_HMAT) += hmat/
+obj-$(CONFIG_ACPI_NUMA) += numa/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
@@ -109,7 +108,8 @@ obj-$(CONFIG_ACPI_APEI) += apei/
obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o
-obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
+obj-$(CONFIG_BYTCRC_PMIC_OPREGION) += pmic/intel_pmic_bytcrc.o
+obj-$(CONFIG_CHTCRC_PMIC_OPREGION) += pmic/intel_pmic_chtcrc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 57d9d574d4dd..ece8c1a921cc 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -53,7 +53,7 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
if (!table->header)
return -ENOMEM;
- ret = acpi_load_table(table->header);
+ ret = acpi_load_table(table->header, &table->index);
if (ret) {
kfree(table->header);
table->header = NULL;
@@ -223,7 +223,7 @@ static void acpi_table_drop_item(struct config_group *group,
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
- acpi_tb_unload_table(table->index);
+ acpi_unload_table(table->index);
}
static struct configfs_group_operations acpi_table_group_ops = {
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 60bbc5090abe..70f740b09684 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mutex.h>
@@ -463,6 +464,18 @@ struct lpss_device_links {
const char *consumer_hid;
const char *consumer_uid;
u32 flags;
+ const struct dmi_system_id *dep_missing_ids;
+};
+
+/* Please keep this list sorted alphabetically by vendor and model */
+static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ },
+ {}
};
/*
@@ -473,36 +486,29 @@ struct lpss_device_links {
* the supplier is not enumerated until after the consumer is probed.
*/
static const struct lpss_device_links lpss_device_links[] = {
+ /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
{"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+ /* CHT iGPU depends on PMIC I2C controller */
{"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
+ /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
+ {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
+ i2c1_dep_missing_dmi_ids},
+ /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
{"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
+ /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
+ {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
};
-static bool hid_uid_match(struct acpi_device *adev,
- const char *hid2, const char *uid2)
-{
- const char *hid1 = acpi_device_hid(adev);
- const char *uid1 = acpi_device_uid(adev);
-
- if (strcmp(hid1, hid2))
- return false;
-
- if (!uid2)
- return true;
-
- return uid1 && !strcmp(uid1, uid2);
-}
-
static bool acpi_lpss_is_supplier(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
+ return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
}
static bool acpi_lpss_is_consumer(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
+ return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
}
struct hid_uid {
@@ -518,7 +524,7 @@ static int match_hid_uid(struct device *dev, const void *data)
if (!adev)
return 0;
- return hid_uid_match(adev, id->hid, id->uid);
+ return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
}
static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
@@ -570,7 +576,8 @@ static void acpi_lpss_link_consumer(struct device *dev1,
if (!dev2)
return;
- if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
+ || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
device_link_add(dev2, dev1, link->flags);
put_device(dev2);
@@ -585,7 +592,8 @@ static void acpi_lpss_link_supplier(struct device *dev1,
if (!dev2)
return;
- if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
+ || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
device_link_add(dev1, dev2, link->flags);
put_device(dev2);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 00ec4f2bf015..c05050f474cd 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -31,6 +31,44 @@ static const struct acpi_device_id forbidden_id_list[] = {
{"", 0},
};
+static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev);
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int acpi_platform_device_remove_notify(struct notifier_block *nb,
+ unsigned long value, void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct platform_device *pdev;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ /* Nothing to do here */
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ pdev = acpi_platform_device_find_by_companion(adev);
+ if (!pdev)
+ break;
+
+ platform_device_unregister(pdev);
+ put_device(&pdev->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block acpi_platform_notifier = {
+ .notifier_call = acpi_platform_device_remove_notify,
+};
+
static void acpi_platform_fill_resource(struct acpi_device *adev,
const struct resource *src, struct resource *dest)
{
@@ -130,3 +168,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
return pdev;
}
EXPORT_SYMBOL_GPL(acpi_create_platform_device);
+
+void __init acpi_platform_init(void)
+{
+ acpi_reconfig_notifier_register(&acpi_platform_notifier);
+}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 4f325e47519f..2f380e7381d6 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -699,9 +699,13 @@ acpi_video_device_EDID(struct acpi_video_device *device,
* event notify code.
* lcd_flag :
* 0. The system BIOS should automatically control the brightness level
- * of the LCD when the power changes from AC to DC
+ * of the LCD when:
+ * - the power changes from AC to DC (ACPI appendix B)
+ * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* 1. The system BIOS should NOT automatically control the brightness
- * level of the LCD when the power changes from AC to DC.
+ * level of the LCD when:
+ * - the power changes from AC to DC (ACPI appendix B)
+ * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* Return Value:
* -EINVAL wrong arg.
*/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 32f2e38c7570..694cf206fa9a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -148,6 +148,8 @@ void acpi_db_find_references(char *object_arg);
void acpi_db_get_bus_info(void);
+acpi_status acpi_db_display_fields(u32 address_space_id);
+
/*
* dbdisply - debug display commands
*/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 218ff4c8b817..2043dff370b1 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -192,6 +192,16 @@ struct acpi_device_walk_info {
u32 num_INI;
};
+/* Info used by Acpi acpi_db_display_fields */
+
+struct acpi_region_walk_info {
+ u32 debug_level;
+ u32 count;
+ acpi_owner_id owner_id;
+ u8 display_type;
+ u32 address_space_id;
+};
+
/* TBD: [Restructure] Merge with struct above */
struct acpi_walk_info {
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 601808be86d1..5fb50634e08e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -142,10 +142,11 @@ struct acpi_pkg_info {
/* acpi_ut_dump_buffer */
-#define DB_BYTE_DISPLAY 1
-#define DB_WORD_DISPLAY 2
-#define DB_DWORD_DISPLAY 4
-#define DB_QWORD_DISPLAY 8
+#define DB_BYTE_DISPLAY 0x01
+#define DB_WORD_DISPLAY 0x02
+#define DB_DWORD_DISPLAY 0x04
+#define DB_QWORD_DISPLAY 0x08
+#define DB_DISPLAY_DATA_ONLY 0x10
/*
* utascii - ASCII utilities
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 9fd9a98a9cbe..2b84ac093698 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -106,6 +106,10 @@ acpi_db_convert_to_buffer(char *string, union acpi_object *object)
u8 *buffer;
acpi_status status;
+ /* Skip all preceding white space */
+
+ acpi_ut_remove_whitespace(&string);
+
/* Generate the final buffer length */
for (i = 0, length = 0; string[i];) {
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 30ab62b0fec8..f2df416d0d2d 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -513,7 +513,6 @@ void acpi_db_display_results(void)
return;
}
- obj_desc = walk_state->method_desc;
node = walk_state->method_node;
if (walk_state->results) {
@@ -565,7 +564,6 @@ void acpi_db_display_calling_tree(void)
return;
}
- node = walk_state->method_node;
acpi_os_printf("Current Control Method Call Tree\n");
while (walk_state) {
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index c6e25734dc5c..e1b6e54a96ac 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -93,7 +93,7 @@ acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head)
while (table_list_head) {
table = table_list_head->table;
- status = acpi_load_table(table);
+ status = acpi_load_table(table, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
acpi_os_printf
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 55a7e10998d8..e1632b340182 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -50,6 +50,7 @@ enum acpi_ex_debugger_commands {
CMD_EVALUATE,
CMD_EXECUTE,
CMD_EXIT,
+ CMD_FIELDS,
CMD_FIND,
CMD_GO,
CMD_HANDLERS,
@@ -127,6 +128,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"EVALUATE", 1},
{"EXECUTE", 1},
{"EXIT", 0},
+ {"FIELDS", 1},
{"FIND", 1},
{"GO", 0},
{"HANDLERS", 0},
@@ -200,6 +202,8 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
"Find ACPI name(s) with wildcards\n"},
{1, " Integrity", "Validate namespace integrity\n"},
{1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Fields <AddressSpaceId>",
+ "Display list of loaded field units by space ID\n"},
{1, " Namespace [Object] [Depth]",
"Display loaded namespace tree/subtree\n"},
{1, " Notify <Object> <Value>", "Send a notification on Object\n"},
@@ -507,6 +511,21 @@ char *acpi_db_get_next_token(char *string,
}
break;
+ case '{':
+
+ /* This is the start of a field unit, scan until closing brace */
+
+ string++;
+ start = string;
+ type = ACPI_TYPE_FIELD_UNIT;
+
+ /* Find end of buffer */
+
+ while (*string && (*string != '}')) {
+ string++;
+ }
+ break;
+
case '[':
/* This is the start of a package, scan until closing bracket */
@@ -674,6 +693,7 @@ acpi_db_command_dispatch(char *input_buffer,
union acpi_parse_object *op)
{
u32 temp;
+ u64 temp64;
u32 command_index;
u32 param_count;
char *command_line;
@@ -689,7 +709,6 @@ acpi_db_command_dispatch(char *input_buffer,
param_count = acpi_db_get_line(input_buffer);
command_index = acpi_db_match_command(acpi_gbl_db_args[0]);
- temp = 0;
/*
* We don't want to add the !! command to the history buffer. It
@@ -790,6 +809,21 @@ acpi_db_command_dispatch(char *input_buffer,
status = acpi_db_find_name_in_namespace(acpi_gbl_db_args[1]);
break;
+ case CMD_FIELDS:
+
+ status = acpi_ut_strtoul64(acpi_gbl_db_args[1], &temp64);
+
+ if (ACPI_FAILURE(status)
+ || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
+ acpi_os_printf
+ ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+ ACPI_NUM_PREDEFINED_REGIONS - 1);
+ return (AE_OK);
+ }
+
+ status = acpi_db_display_fields((u32)temp64);
+ break;
+
case CMD_GO:
acpi_gbl_cm_single_step = FALSE;
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 76a15b6ffc5d..4e48a7de7413 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -321,6 +321,10 @@ acpi_status acpi_db_disassemble_method(char *name)
walk_state->parse_flags |= ACPI_PARSE_DISASSEMBLE;
status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
(void)acpi_dm_parse_deferred_ops(op);
/* Now we can disassemble the method */
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 63fe30e86807..3615e1a6efd8 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -10,6 +10,7 @@
#include "acnamesp.h"
#include "acdebug.h"
#include "acpredef.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME("dbnames")
@@ -504,6 +505,86 @@ acpi_db_walk_for_object_counts(acpi_handle obj_handle,
/*******************************************************************************
*
+ * FUNCTION: acpi_db_walk_for_fields
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Display short info about objects in the namespace
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_db_walk_for_fields(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ union acpi_object *ret_value;
+ struct acpi_region_walk_info *info =
+ (struct acpi_region_walk_info *)context;
+ struct acpi_buffer buffer;
+ acpi_status status;
+ struct acpi_namespace_node *node = acpi_ns_validate_handle(obj_handle);
+
+ if (!node) {
+ return (AE_OK);
+ }
+ if (node->object->field.region_obj->region.space_id !=
+ info->address_space_id) {
+ return (AE_OK);
+ }
+
+ info->count++;
+
+ /* Get and display the full pathname to this object */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could Not get pathname for object %p\n",
+ obj_handle);
+ return (AE_OK);
+ }
+
+ acpi_os_printf("%s ", (char *)buffer.pointer);
+ ACPI_FREE(buffer.pointer);
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+
+ /*
+ * Since this is a field unit, surround the output in braces
+ */
+ acpi_os_printf("{");
+
+ ret_value = (union acpi_object *)buffer.pointer;
+ switch (ret_value->type) {
+ case ACPI_TYPE_INTEGER:
+
+ acpi_os_printf("%8.8X%8.8X",
+ ACPI_FORMAT_UINT64(ret_value->integer.value));
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_ut_dump_buffer(ret_value->buffer.pointer,
+ ret_value->buffer.length,
+ DB_DISPLAY_DATA_ONLY | DB_BYTE_DISPLAY, 0);
+ break;
+
+ default:
+
+ break;
+ }
+ acpi_os_printf("}\n");
+
+ ACPI_FREE(buffer.pointer);
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_walk_for_specific_objects
*
* PARAMETERS: Callback from walk_namespace
@@ -630,6 +711,39 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_display_fields
+ *
+ * PARAMETERS: obj_type_arg - Type of object to display
+ * display_count_arg - Max depth to display
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Display objects in the namespace of the requested type
+ *
+ ******************************************************************************/
+
+acpi_status acpi_db_display_fields(u32 address_space_id)
+{
+ struct acpi_region_walk_info info;
+
+ info.count = 0;
+ info.owner_id = ACPI_OWNER_ID_MAX;
+ info.debug_level = ACPI_UINT32_MAX;
+ info.display_type = ACPI_DISPLAY_SUMMARY | ACPI_DISPLAY_SHORT;
+ info.address_space_id = address_space_id;
+
+ /* Walk the namespace from the root */
+
+ (void)acpi_walk_namespace(ACPI_TYPE_LOCAL_REGION_FIELD,
+ ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ acpi_db_walk_for_fields, NULL, (void *)&info,
+ NULL);
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_integrity_walk
*
* PARAMETERS: Callback from walk_namespace
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index f9fc84bc3e84..4b4c530a0654 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -464,7 +464,6 @@ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state)
u8 display_args = FALSE;
node = walk_state->method_node;
- obj_desc = walk_state->method_desc;
/* There are no arguments for the module-level code case */
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 4847f89c678c..5034fab9cf69 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -85,7 +85,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
control_state->control.loop_timeout = acpi_os_get_timer() +
- (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
+ ((u64)acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index cf4e061bb0f0..faa38a22263a 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -149,7 +149,6 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
if (walk_state->deferred_node) {
node = walk_state->deferred_node;
- status = AE_OK;
} else {
/* Execute flag should always be set when this function is entered */
@@ -264,7 +263,6 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
union acpi_parse_object *child;
#ifdef ACPI_EXEC_APP
- u64 value = 0;
union acpi_operand_object *result_desc;
union acpi_operand_object *obj_desc;
char *name_path;
@@ -406,19 +404,17 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
name_path =
acpi_ns_get_external_pathname(info->
field_node);
- obj_desc =
- acpi_ut_create_integer_object
- (value);
if (ACPI_SUCCESS
(ae_lookup_init_file_entry
- (name_path, &value))) {
+ (name_path, &obj_desc))) {
acpi_ex_write_data_to_field
(obj_desc,
acpi_ns_get_attached_object
(info->field_node),
&result_desc);
+ acpi_ut_remove_reference
+ (obj_desc);
}
- acpi_ut_remove_reference(obj_desc);
ACPI_FREE(name_path);
#endif
}
@@ -636,8 +632,6 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
}
/* Name already exists, just ignore this error */
-
- status = AE_OK;
}
arg->common.node = node;
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fb15e9e2373b..9c7adaa7b582 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -110,6 +110,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
status =
acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
if (!gpe_block->previous && !gpe_block->next) {
@@ -359,10 +362,10 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
walk_info.gpe_device = gpe_device;
walk_info.execute_by_owner_id = FALSE;
- status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
- ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_match_gpe_method, NULL,
- &walk_info, NULL);
+ (void)acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_gpe_method, NULL, &walk_info,
+ NULL);
/* Return the new block */
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index b04f982e59fa..70d21d5ec5f3 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -156,8 +156,6 @@ acpi_status acpi_ev_gpe_initialize(void)
* GPE0 and GPE1 do not have to be contiguous in the GPE number
* space. However, GPE0 always starts at GPE number zero.
*/
- gpe_number_max = acpi_gbl_FADT.gpe1_base +
- ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
}
}
@@ -169,7 +167,6 @@ acpi_status acpi_ev_gpe_initialize(void)
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"There are no GPE blocks defined in the FADT\n"));
- status = AE_OK;
goto cleanup;
}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d45f7639f7ee..aa98fe07cd1b 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -230,11 +230,15 @@ void acpi_ev_terminate(void)
/* Disable all GPEs in all GPE blocks */
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not disable GPEs in GPE block"));
+ }
status = acpi_ev_remove_global_lock_handler();
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not remove Global Lock handler"));
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not remove Global Lock handler"));
}
acpi_gbl_events_initialized = FALSE;
@@ -250,6 +254,10 @@ void acpi_ev_terminate(void)
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not delete GPE handlers"));
+ }
/* Return to original mode if necessary */
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 45dc797df05d..1ff126460007 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -836,11 +836,11 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
objects[1].type = ACPI_TYPE_INTEGER;
objects[1].integer.value = ACPI_REG_CONNECT;
- status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
+ (void)acpi_evaluate_object(reg_method, NULL, &args, NULL);
exit:
/* We ignore all errors from above, don't care */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
return_VOID;
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47bbcd2a23..aee09640d710 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -198,7 +198,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
* root bridge. Still need to return a context object
* for the new PCI_Config operation region, however.
*/
- status = AE_OK;
} else {
ACPI_EXCEPTION((AE_INFO, status,
"Could not install PciConfig handler "
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index abbf9702aa7f..2919746c9041 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -166,6 +166,9 @@ acpi_status acpi_enter_sleep_state_s4bios(void)
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
(u32)acpi_gbl_FADT.s4_bios_request, 8);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
do {
acpi_os_stall(ACPI_USEC_PER_MSEC);
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 14cbf63f1991..c86d0770ed6e 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -486,5 +486,5 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
error_exit:
ACPI_FREE(name);
*return_object = new_object;
- return (AE_OK);
+ return (status);
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 9731d7cf1b83..9ad340f644a1 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -291,7 +291,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
for (i = 0;
(i < obj_desc->buffer.length
&& i < 12); i++) {
- acpi_os_printf(" %.2hX",
+ acpi_os_printf(" %2.2X",
obj_desc->buffer.
pointer[i]);
}
@@ -404,7 +404,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
- acpi_os_printf(" Off %.3X Len %.2X Acc %.2hd\n",
+ acpi_os_printf(" Off %.3X Len %.2X Acc %.2X\n",
(obj_desc->common_field.
base_byte_offset * 8)
+
@@ -589,8 +589,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
goto cleanup;
}
-
- obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */
}
cleanup:
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 55b4a5b3331f..161e60ddfb69 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -425,8 +425,8 @@ acpi_get_object_info(acpi_handle handle,
}
if (cls) {
- next_id_string = acpi_ns_copy_device_id(&info->class_code,
- cls, next_id_string);
+ (void)acpi_ns_copy_device_id(&info->class_code,
+ cls, next_id_string);
}
/* Copy the fixed-length data */
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 98e5c7400e54..ded2779fc8ea 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -481,8 +481,7 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
walk_state->opcode = (*op)->common.aml_opcode;
status = walk_state->ascending_callback(walk_state);
- status =
- acpi_ps_next_parse_state(walk_state, *op, status);
+ (void)acpi_ps_next_parse_state(walk_state, *op, status);
status2 = acpi_ps_complete_this_op(walk_state, *op);
if (ACPI_FAILURE(status2)) {
@@ -490,7 +489,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
}
}
- status = AE_OK;
break;
case AE_CTRL_BREAK:
@@ -512,14 +510,13 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
walk_state->opcode = (*op)->common.aml_opcode;
status = walk_state->ascending_callback(walk_state);
- status = acpi_ps_next_parse_state(walk_state, *op, status);
+ (void)acpi_ps_next_parse_state(walk_state, *op, status);
status2 = acpi_ps_complete_this_op(walk_state, *op);
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
- status = AE_OK;
break;
case AE_CTRL_TERMINATE:
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 570ea0df8a1b..c659b54985a5 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -312,6 +312,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
path_buffer.pointer = user_prt->source;
status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* +1 to include null terminator */
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 309440010ab2..2cf36451e46f 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -933,6 +933,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
}
status = acpi_ns_load_table(table_index, parent_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/*
* Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 86f1693f6d29..0782acf85722 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -268,6 +268,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
*
* PARAMETERS: table - Pointer to a buffer containing the ACPI
* table to be loaded.
+ * table_idx - Pointer to a u32 for storing the table
+ * index, might be NULL
*
* RETURN: Status
*
@@ -278,7 +280,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
* to ensure that the table is not deleted or unmapped.
*
******************************************************************************/
-acpi_status acpi_load_table(struct acpi_table_header *table)
+acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx)
{
acpi_status status;
u32 table_index;
@@ -297,6 +299,10 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
FALSE, &table_index);
+ if (table_idx) {
+ *table_idx = table_index;
+ }
+
if (ACPI_SUCCESS(status)) {
/* Complete the initialization/resolution of new objects */
@@ -390,3 +396,35 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
}
ACPI_EXPORT_SYMBOL(acpi_unload_parent_table)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_unload_table
+ *
+ * PARAMETERS: table_index - Index as returned by acpi_load_table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Via the table_index representing an SSDT or OEMx table, unloads
+ * the table and deletes all namespace objects associated with
+ * that table. Unloading of the DSDT is not allowed.
+ * Note: Mainly intended to support hotplug removal of SSDTs.
+ *
+ ******************************************************************************/
+acpi_status acpi_unload_table(u32 table_index)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_unload_table);
+
+ if (table_index == 1) {
+
+ /* table_index==1 means DSDT is the owner. DSDT cannot be unloaded */
+
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ status = acpi_tb_unload_table(table_index);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_unload_table)
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 61db9967ebe4..db897af1de05 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -37,7 +37,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
u32 j;
u32 temp32;
u8 buf_char;
+ u32 display_data_only = display & DB_DISPLAY_DATA_ONLY;
+ display &= ~DB_DISPLAY_DATA_ONLY;
if (!buffer) {
acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
return;
@@ -53,7 +55,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
/* Print current offset */
- acpi_os_printf("%8.4X: ", (base_offset + i));
+ if (!display_data_only) {
+ acpi_os_printf("%8.4X: ", (base_offset + i));
+ }
/* Print 16 hex chars */
@@ -109,32 +113,34 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
* Print the ASCII equivalent characters but watch out for the bad
* unprintable ones (printable chars are 0x20 through 0x7E)
*/
- acpi_os_printf(" ");
- for (j = 0; j < 16; j++) {
- if (i + j >= count) {
- acpi_os_printf("\n");
- return;
+ if (!display_data_only) {
+ acpi_os_printf(" ");
+ for (j = 0; j < 16; j++) {
+ if (i + j >= count) {
+ acpi_os_printf("\n");
+ return;
+ }
+
+ /*
+ * Add comment characters so rest of line is ignored when
+ * compiled
+ */
+ if (j == 0) {
+ acpi_os_printf("// ");
+ }
+
+ buf_char = buffer[(acpi_size)i + j];
+ if (isprint(buf_char)) {
+ acpi_os_printf("%c", buf_char);
+ } else {
+ acpi_os_printf(".");
+ }
}
- /*
- * Add comment characters so rest of line is ignored when
- * compiled
- */
- if (j == 0) {
- acpi_os_printf("// ");
- }
+ /* Done with that line. */
- buf_char = buffer[(acpi_size)i + j];
- if (isprint(buf_char)) {
- acpi_os_printf("%c", buf_char);
- } else {
- acpi_os_printf(".");
- }
+ acpi_os_printf("\n");
}
-
- /* Done with that line. */
-
- acpi_os_printf("\n");
i += 16;
}
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index e805abdd95b8..30198c828ab6 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -289,9 +289,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
value);
length = ACPI_EISAID_STRING_SIZE;
} else { /* ACPI_TYPE_STRING */
-
/* Copy the String CID from the returned object */
-
strcpy(next_id_string, cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 8052f7ef5025..14de4d15e618 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -660,7 +660,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
case ACPI_DESC_TYPE_PARSER:
acpi_os_printf
- ("AmlOpcode 0x%04hX\n",
+ ("AmlOpcode 0x%04X\n",
descriptor->op.asl.
aml_opcode);
break;
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 131c35ee9ed3..e358d0046494 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -170,9 +170,9 @@ rewind:
if (ip == ctx->ip) {
if (entry->instruction >= ctx->instructions ||
!ctx->ins_table[entry->instruction].run) {
- pr_warning(FW_WARN APEI_PFX
- "Invalid action table, unknown instruction type: %d\n",
- entry->instruction);
+ pr_warn(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
return -EINVAL;
}
run = ctx->ins_table[entry->instruction].run;
@@ -211,9 +211,9 @@ static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
if (end)
*end = i;
if (ins >= ctx->instructions || !ins_table[ins].run) {
- pr_warning(FW_WARN APEI_PFX
- "Invalid action table, unknown instruction type: %d\n",
- ins);
+ pr_warn(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
return -EINVAL;
}
rc = func(ctx, entry, data);
@@ -579,18 +579,18 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
space_id = reg->space_id;
*paddr = get_unaligned(&reg->address);
if (!*paddr) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
if (access_size_code < 1 || access_size_code > 4) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
*access_bit_width = 1UL << (access_size_code + 2);
@@ -604,19 +604,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
*access_bit_width = 64;
if ((bit_width + bit_offset) > *access_bit_width) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index e430cf4caec2..086373f8ccb1 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -172,7 +172,7 @@ static int einj_get_available_error_type(u32 *type)
static int einj_timedout(u64 *t)
{
if ((s64)*t < SPIN_UNIT) {
- pr_warning(FW_WARN "Firmware does not respond in time\n");
+ pr_warn(FW_WARN "Firmware does not respond in time\n");
return 1;
}
*t -= SPIN_UNIT;
@@ -312,7 +312,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
}
rc = einj_check_trigger_header(trigger_tab);
if (rc) {
- pr_warning(FW_BUG "Invalid trigger error action table.\n");
+ pr_warn(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index d0f3a46716e9..c740f0faad39 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -118,9 +118,8 @@ retry:
if (rc < 0)
goto out;
if (len > ERST_DBG_RECORD_LEN_MAX) {
- pr_warning(ERST_DBG_PFX
- "Record (ID: 0x%llx) length is too long: %zd\n",
- id, len);
+ pr_warn(ERST_DBG_PFX
+ "Record (ID: 0x%llx) length is too long: %zd\n", id, len);
rc = -EIO;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 777f6f7122b4..8906c80175e6 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -235,10 +235,10 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
goto err_unmap_read_ack_addr;
error_block_length = generic->error_block_length;
if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
- pr_warning(FW_WARN GHES_PFX
- "Error status block length is too long: %u for "
- "generic hardware error source: %d.\n",
- error_block_length, generic->header.source_id);
+ pr_warn(FW_WARN GHES_PFX
+ "Error status block length is too long: %u for "
+ "generic hardware error source: %d.\n",
+ error_block_length, generic->header.source_id);
error_block_length = GHES_ESTATUS_MAX_SIZE;
}
ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
@@ -748,8 +748,8 @@ static void ghes_add_timer(struct ghes *ghes)
unsigned long expire;
if (!g->notify.poll_interval) {
- pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
- g->header.source_id);
+ pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
+ g->header.source_id);
return;
}
expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
@@ -1155,21 +1155,20 @@ static int ghes_probe(struct platform_device *ghes_dev)
}
break;
case ACPI_HEST_NOTIFY_LOCAL:
- pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
- generic->header.source_id);
+ pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
+ generic->header.source_id);
goto err;
default:
- pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
- generic->notify.type, generic->header.source_id);
+ pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
goto err;
}
rc = -EIO;
if (generic->error_block_length <
sizeof(struct acpi_hest_generic_status)) {
- pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
- generic->error_block_length,
- generic->header.source_id);
+ pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length, generic->header.source_id);
goto err;
}
ghes = ghes_new(generic);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 267bdbf6a7bf..822402480f7d 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -92,15 +92,15 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
for (i = 0; i < hest_tab->error_source_count; i++) {
len = hest_esrc_len(hest_hdr);
if (!len) {
- pr_warning(FW_WARN HEST_PFX
- "Unknown or unused hardware error source "
- "type: %d for hardware error source: %d.\n",
- hest_hdr->type, hest_hdr->source_id);
+ pr_warn(FW_WARN HEST_PFX
+ "Unknown or unused hardware error source "
+ "type: %d for hardware error source: %d.\n",
+ hest_hdr->type, hest_hdr->source_id);
return -EINVAL;
}
if ((void *)hest_hdr + len >
(void *)hest_tab + hest_tab->header.length) {
- pr_warning(FW_BUG HEST_PFX
+ pr_warn(FW_BUG HEST_PFX
"Table contents overflow for hardware error source: %d.\n",
hest_hdr->source_id);
return -EINVAL;
@@ -164,8 +164,8 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
ghes_dev = ghes_arr->ghes_devs[i];
hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
if (hdr->source_id == hest_hdr->source_id) {
- pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
- hdr->source_id);
+ pr_warn(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
+ hdr->source_id);
return -EIO;
}
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 558fedf8a7a1..8f0e0c8d8c3d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1176,7 +1176,7 @@ static const struct file_operations acpi_battery_alarm_fops = {
static int acpi_battery_add_fs(struct acpi_device *device)
{
- pr_warning(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ pr_warn(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 4a2cde2c536a..d27b01c0323d 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -44,9 +44,19 @@
#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
#define ACPI_BUTTON_TYPE_LID 0x05
-#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
-#define ACPI_BUTTON_LID_INIT_OPEN 0x01
-#define ACPI_BUTTON_LID_INIT_METHOD 0x02
+enum {
+ ACPI_BUTTON_LID_INIT_IGNORE,
+ ACPI_BUTTON_LID_INIT_OPEN,
+ ACPI_BUTTON_LID_INIT_METHOD,
+ ACPI_BUTTON_LID_INIT_DISABLED,
+};
+
+static const char * const lid_init_state_str[] = {
+ [ACPI_BUTTON_LID_INIT_IGNORE] = "ignore",
+ [ACPI_BUTTON_LID_INIT_OPEN] = "open",
+ [ACPI_BUTTON_LID_INIT_METHOD] = "method",
+ [ACPI_BUTTON_LID_INIT_DISABLED] = "disabled",
+};
#define _COMPONENT ACPI_BUTTON_COMPONENT
ACPI_MODULE_NAME("button");
@@ -65,18 +75,39 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
-/*
- * Some devices which don't even have a lid in anyway have a broken _LID
- * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
- */
-static const struct dmi_system_id lid_blacklst[] = {
+/* Please keep this list sorted alphabetically by vendor and model */
+static const struct dmi_system_id dmi_lid_quirks[] = {
+ {
+ /*
+ * Asus T200TA, _LID keeps reporting closed after every second
+ * openening of the lid. Causing immediate re-suspend after
+ * opening every other open. Using LID_INIT_OPEN fixes this.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{
- /* GP-electronic T701 */
+ /* GP-electronic T701, _LID method points to a floating GPIO */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
},
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
+ },
+ {
+ /*
+ * Medion Akoya E2215T, notification of the LID device only
+ * happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{}
};
@@ -116,9 +147,8 @@ struct acpi_button {
bool suspended;
};
-static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+static long lid_init_state = -1;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);
@@ -146,7 +176,6 @@ static int acpi_lid_evaluate_state(struct acpi_device *device)
static int acpi_lid_notify_state(struct acpi_device *device, int state)
{
struct acpi_button *button = acpi_driver_data(device);
- int ret;
ktime_t next_report;
bool do_update;
@@ -223,18 +252,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
button->last_time = ktime_get();
}
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
- if (ret == NOTIFY_DONE)
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
- device);
- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
- /*
- * It is also regarded as success if the notifier_chain
- * returns NOTIFY_OK or NOTIFY_DONE.
- */
- ret = 0;
- }
- return ret;
+ return 0;
}
static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq,
@@ -331,18 +349,6 @@ static int acpi_button_remove_fs(struct acpi_device *device)
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
-int acpi_lid_notifier_register(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&acpi_lid_notifier, nb);
-}
-EXPORT_SYMBOL(acpi_lid_notifier_register);
-
-int acpi_lid_notifier_unregister(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb);
-}
-EXPORT_SYMBOL(acpi_lid_notifier_unregister);
-
int acpi_lid_open(void)
{
if (!lid_device)
@@ -472,7 +478,8 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
- if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) &&
+ lid_init_state == ACPI_BUTTON_LID_INIT_DISABLED)
return -ENODEV;
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
@@ -578,36 +585,30 @@ static int acpi_button_remove(struct acpi_device *device)
static int param_set_lid_init_state(const char *val,
const struct kernel_param *kp)
{
- int result = 0;
-
- if (!strncmp(val, "open", sizeof("open") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
- pr_info("Notify initial lid state as open\n");
- } else if (!strncmp(val, "method", sizeof("method") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
- pr_info("Notify initial lid state with _LID return value\n");
- } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
- pr_info("Do not notify initial lid state\n");
- } else
- result = -EINVAL;
- return result;
+ int i;
+
+ i = sysfs_match_string(lid_init_state_str, val);
+ if (i < 0)
+ return i;
+
+ lid_init_state = i;
+ pr_info("Initial lid state set to '%s'\n", lid_init_state_str[i]);
+ return 0;
}
-static int param_get_lid_init_state(char *buffer,
- const struct kernel_param *kp)
+static int param_get_lid_init_state(char *buf, const struct kernel_param *kp)
{
- switch (lid_init_state) {
- case ACPI_BUTTON_LID_INIT_OPEN:
- return sprintf(buffer, "open");
- case ACPI_BUTTON_LID_INIT_METHOD:
- return sprintf(buffer, "method");
- case ACPI_BUTTON_LID_INIT_IGNORE:
- return sprintf(buffer, "ignore");
- default:
- return sprintf(buffer, "invalid");
- }
- return 0;
+ int i, c = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lid_init_state_str); i++)
+ if (i == lid_init_state)
+ c += sprintf(buf + c, "[%s] ", lid_init_state_str[i]);
+ else
+ c += sprintf(buf + c, "%s ", lid_init_state_str[i]);
+
+ buf[c - 1] = '\n'; /* Replace the final space with a newline */
+
+ return c;
}
module_param_call(lid_init_state,
@@ -617,6 +618,16 @@ MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
static int acpi_button_register_driver(struct acpi_driver *driver)
{
+ const struct dmi_system_id *dmi_id;
+
+ if (lid_init_state == -1) {
+ dmi_id = dmi_first_match(dmi_lid_quirks);
+ if (dmi_id)
+ lid_init_state = (long)dmi_id->driver_data;
+ else
+ lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+ }
+
/*
* Modules such as nouveau.ko and i915.ko have a link time dependency
* on acpi_lid_open(), and would therefore not be loadable on ACPI
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index da1e5c5ce150..4fd84fbdac29 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -95,12 +95,12 @@ enum {
EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
- EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
+ EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
- EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
+ EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
- EC_FLAGS_GPE_MASKED, /* GPE masked */
+ EC_FLAGS_EVENTS_MASKED, /* Events masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -397,8 +397,8 @@ static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
static void acpi_ec_submit_request(struct acpi_ec *ec)
{
ec->reference_count++;
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
- ec->reference_count == 1)
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
+ ec->gpe >= 0 && ec->reference_count == 1)
acpi_ec_enable_gpe(ec, true);
}
@@ -407,28 +407,36 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
bool flushed = false;
ec->reference_count--;
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
- ec->reference_count == 0)
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
+ ec->gpe >= 0 && ec->reference_count == 0)
acpi_ec_disable_gpe(ec, true);
flushed = acpi_ec_flushed(ec);
if (flushed)
wake_up(&ec->wait);
}
-static void acpi_ec_mask_gpe(struct acpi_ec *ec)
+static void acpi_ec_mask_events(struct acpi_ec *ec)
{
- if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
- acpi_ec_disable_gpe(ec, false);
+ if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
+ if (ec->gpe >= 0)
+ acpi_ec_disable_gpe(ec, false);
+ else
+ disable_irq_nosync(ec->irq);
+
ec_dbg_drv("Polling enabled");
- set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
+ set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
}
}
-static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
+static void acpi_ec_unmask_events(struct acpi_ec *ec)
{
- if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
- clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
- acpi_ec_enable_gpe(ec, false);
+ if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
+ clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
+ if (ec->gpe >= 0)
+ acpi_ec_enable_gpe(ec, false);
+ else
+ enable_irq(ec->irq);
+
ec_dbg_drv("Polling disabled");
}
}
@@ -454,7 +462,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- acpi_ec_mask_gpe(ec);
+ acpi_ec_mask_events(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
@@ -470,7 +478,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- acpi_ec_unmask_gpe(ec);
+ acpi_ec_unmask_events(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -648,7 +656,9 @@ static void advance_transaction(struct acpi_ec *ec)
* ensure a hardware STS 0->1 change after this clearing can always
* trigger a GPE interrupt.
*/
- acpi_ec_clear_gpe(ec);
+ if (ec->gpe >= 0)
+ acpi_ec_clear_gpe(ec);
+
status = acpi_ec_read_status(ec);
t = ec->curr;
/*
@@ -717,7 +727,7 @@ err:
++t->irq_count;
/* Allow triggering on 0 threshold */
if (t->irq_count == ec_storm_threshold)
- acpi_ec_mask_gpe(ec);
+ acpi_ec_mask_events(ec);
}
}
out:
@@ -815,7 +825,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
- acpi_ec_unmask_gpe(ec);
+ acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
@@ -1275,18 +1285,28 @@ static void acpi_ec_event_handler(struct work_struct *work)
acpi_ec_check_event(ec);
}
-static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
- u32 gpe_number, void *data)
+static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
unsigned long flags;
- struct acpi_ec *ec = data;
spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number, void *data)
+{
+ acpi_ec_handle_interrupt(data);
return ACPI_INTERRUPT_HANDLED;
}
+static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
+{
+ acpi_ec_handle_interrupt(data);
+ return IRQ_HANDLED;
+}
+
/* --------------------------------------------------------------------------
* Address Space Management
* -------------------------------------------------------------------------- */
@@ -1359,6 +1379,8 @@ static struct acpi_ec *acpi_ec_alloc(void)
ec->timestamp = jiffies;
ec->busy_polling = true;
ec->polling_guard = 0;
+ ec->gpe = -1;
+ ec->irq = -1;
return ec;
}
@@ -1406,9 +1428,13 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
/* Get GPE bit assignment (EC events). */
/* TODO: Add support for _GPE returning a package */
status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return status;
- ec->gpe = tmp;
+ if (ACPI_SUCCESS(status))
+ ec->gpe = tmp;
+
+ /*
+ * Errors are non-fatal, allowing for ACPI Reduced Hardware
+ * platforms which use GpioInt instead of GPE.
+ */
}
/* Use the global lock for all EC transactions? */
tmp = 0;
@@ -1418,12 +1444,57 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
return AE_CTRL_TERMINATE;
}
+static void install_gpe_event_handler(struct acpi_ec *ec)
+{
+ acpi_status status =
+ acpi_install_gpe_raw_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler,
+ ec);
+ if (ACPI_SUCCESS(status)) {
+ /* This is not fatal as we can poll EC events */
+ set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_ec_enable_gpe(ec, true);
+ }
+}
+
+/* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */
+static int install_gpio_irq_event_handler(struct acpi_ec *ec,
+ struct acpi_device *device)
+{
+ int irq = acpi_dev_gpio_irq_get(device, 0);
+ int ret;
+
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED,
+ "ACPI EC", ec);
+
+ /*
+ * Unlike the GPE case, we treat errors here as fatal, we'll only
+ * implement GPIO polling if we find a case that needs it.
+ */
+ if (ret < 0)
+ return ret;
+
+ ec->irq = irq;
+ set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
+
+ return 0;
+}
+
/*
* Note: This function returns an error code only when the address space
* handler is not installed, which means "not able to handle
* transactions".
*/
-static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
+static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ bool handle_events)
{
acpi_status status;
@@ -1456,24 +1527,23 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
if (!handle_events)
return 0;
- if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
+ if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods,
NULL, ec, NULL);
- set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
+ set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
- if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
- status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
- /* This is not fatal as we can poll EC events */
- if (ACPI_SUCCESS(status)) {
- set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
- acpi_ec_leave_noirq(ec);
- if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
- acpi_ec_enable_gpe(ec, true);
+ if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
+ if (ec->gpe >= 0) {
+ install_gpe_event_handler(ec);
+ } else if (device) {
+ int ret = install_gpio_irq_event_handler(ec, device);
+
+ if (ret)
+ return ret;
+ } else { /* No GPE and no GpioInt? */
+ return -ENODEV;
}
}
/* EC is fully operational, allow queries */
@@ -1504,23 +1574,29 @@ static void ec_remove_handlers(struct acpi_ec *ec)
*/
acpi_ec_stop(ec, false);
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
- if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler)))
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
+ if (ec->gpe >= 0 &&
+ ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler)))
pr_err("failed to remove gpe handler\n");
- clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+
+ if (ec->irq >= 0)
+ free_irq(ec->irq, ec);
+
+ clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
}
- if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
+ if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
acpi_ec_remove_query_handlers(ec, true, 0);
- clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
+ clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
}
-static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
+static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device,
+ bool handle_events)
{
int ret;
- ret = ec_install_handlers(ec, handle_events);
+ ret = ec_install_handlers(ec, device, handle_events);
if (ret)
return ret;
@@ -1531,8 +1607,8 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
- ec->gpe, ec->command_addr, ec->data_addr);
+ "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ ec->gpe, ec->irq, ec->command_addr, ec->data_addr);
return ret;
}
@@ -1596,7 +1672,7 @@ static int acpi_ec_add(struct acpi_device *device)
}
}
- ret = acpi_ec_setup(ec, true);
+ ret = acpi_ec_setup(ec, device, true);
if (ret)
goto err_query;
@@ -1716,7 +1792,7 @@ void __init acpi_ec_dsdt_probe(void)
* At this point, the GPE is not fully initialized, so do not to
* handle the events.
*/
- ret = acpi_ec_setup(ec, false);
+ ret = acpi_ec_setup(ec, NULL, false);
if (ret) {
acpi_ec_free(ec);
return;
@@ -1889,14 +1965,21 @@ void __init acpi_ec_ecdt_probe(void)
ec->command_addr = ecdt_ptr->control.address;
ec->data_addr = ecdt_ptr->data.address;
}
- ec->gpe = ecdt_ptr->gpe;
+
+ /*
+ * Ignore the GPE value on Reduced Hardware platforms.
+ * Some products have this set to an erroneous value.
+ */
+ if (!acpi_gbl_reduced_hardware)
+ ec->gpe = ecdt_ptr->gpe;
+
ec->handle = ACPI_ROOT_OBJECT;
/*
* At this point, the namespace is not initialized, so do not find
* the namespace objects, or handle the events.
*/
- ret = acpi_ec_setup(ec, false);
+ ret = acpi_ec_setup(ec, NULL, false);
if (ret) {
acpi_ec_free(ec);
return;
@@ -1928,7 +2011,7 @@ static int acpi_ec_suspend_noirq(struct device *dev)
* masked at the low level without side effects.
*/
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
+ ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
acpi_ec_enter_noirq(ec);
@@ -1943,7 +2026,7 @@ static int acpi_ec_resume_noirq(struct device *dev)
acpi_ec_leave_noirq(ec);
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
+ ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
return 0;
diff --git a/drivers/acpi/hmat/Makefile b/drivers/acpi/hmat/Makefile
deleted file mode 100644
index 1c20ef36a385..000000000000
--- a/drivers/acpi/hmat/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ACPI_HMAT) := hmat.o
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index afe6636f9ad3..3616daec650b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -165,7 +165,8 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- u32 gpe;
+ int gpe;
+ int irq;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/hmat/Kconfig b/drivers/acpi/numa/Kconfig
index 95a29964dbea..fcf2e556d69d 100644
--- a/drivers/acpi/hmat/Kconfig
+++ b/drivers/acpi/numa/Kconfig
@@ -1,8 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+config ACPI_NUMA
+ bool "NUMA support"
+ depends on NUMA
+ depends on (X86 || IA64 || ARM64)
+ default y if IA64 || ARM64
+
config ACPI_HMAT
bool "ACPI Heterogeneous Memory Attribute Table Support"
depends on ACPI_NUMA
select HMEM_REPORTING
+ select MEMREGION
help
If set, this option has the kernel parse and report the
platform's ACPI HMAT (Heterogeneous Memory Attributes Table),
diff --git a/drivers/acpi/numa/Makefile b/drivers/acpi/numa/Makefile
new file mode 100644
index 000000000000..517a6c689a94
--- /dev/null
+++ b/drivers/acpi/numa/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ACPI_NUMA) += srat.o
+obj-$(CONFIG_ACPI_HMAT) += hmat.o
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/numa/hmat.c
index 8b0de8a3c647..2c32cfb72370 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -8,12 +8,18 @@
* the applicable attributes with the node's interfaces.
*/
+#define pr_fmt(fmt) "acpi/hmat: " fmt
+#define dev_fmt(fmt) "acpi/hmat: " fmt
+
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <linux/list_sort.h>
+#include <linux/memregion.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/node.h>
@@ -49,6 +55,7 @@ struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
+ struct resource memregions;
struct node_hmem_attrs hmem_attrs;
struct list_head caches;
struct node_cache_attrs cache_attrs;
@@ -104,22 +111,36 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
list_add_tail(&initiator->node, &initiators);
}
-static __init void alloc_memory_target(unsigned int mem_pxm)
+static __init void alloc_memory_target(unsigned int mem_pxm,
+ resource_size_t start, resource_size_t len)
{
struct memory_target *target;
target = find_mem_target(mem_pxm);
- if (target)
- return;
-
- target = kzalloc(sizeof(*target), GFP_KERNEL);
- if (!target)
- return;
+ if (!target) {
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target)
+ return;
+ target->memory_pxm = mem_pxm;
+ target->processor_pxm = PXM_INVAL;
+ target->memregions = (struct resource) {
+ .name = "ACPI mem",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+ };
+ list_add_tail(&target->node, &targets);
+ INIT_LIST_HEAD(&target->caches);
+ }
- target->memory_pxm = mem_pxm;
- target->processor_pxm = PXM_INVAL;
- list_add_tail(&target->node, &targets);
- INIT_LIST_HEAD(&target->caches);
+ /*
+ * There are potentially multiple ranges per PXM, so record each
+ * in the per-target memregions resource tree.
+ */
+ if (!__request_region(&target->memregions, start, len, "memory target",
+ IORESOURCE_MEM))
+ pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
+ start, start + len, mem_pxm);
}
static __init const char *hmat_data_type(u8 type)
@@ -272,7 +293,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
u8 type, mem_hier;
if (hmat_loc->header.length < sizeof(*hmat_loc)) {
- pr_notice("HMAT: Unexpected locality header length: %d\n",
+ pr_notice("HMAT: Unexpected locality header length: %u\n",
hmat_loc->header.length);
return -EINVAL;
}
@@ -284,12 +305,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
sizeof(*inits) * ipds + sizeof(*targs) * tpds;
if (hmat_loc->header.length < total_size) {
- pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
+ pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
hmat_loc->header.length, total_size);
return -EINVAL;
}
- pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
+ pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
hmat_loc->flags, hmat_data_type(type), ipds, tpds,
hmat_loc->entry_base_unit);
@@ -302,7 +323,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%d-%d]:%d%s\n",
+ pr_info(" Initiator-Target[%u-%u]:%u%s\n",
inits[init], targs[targ], value,
hmat_data_type_suffix(type));
@@ -329,13 +350,13 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
u32 attrs;
if (cache->header.length < sizeof(*cache)) {
- pr_notice("HMAT: Unexpected cache header length: %d\n",
+ pr_notice("HMAT: Unexpected cache header length: %u\n",
cache->header.length);
return -EINVAL;
}
attrs = cache->cache_attributes;
- pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
@@ -390,17 +411,17 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
struct memory_target *target = NULL;
if (p->header.length != sizeof(*p)) {
- pr_notice("HMAT: Unexpected address range header length: %d\n",
+ pr_notice("HMAT: Unexpected address range header length: %u\n",
p->header.length);
return -EINVAL;
}
if (hmat_revision == 1)
- pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->reserved3, p->reserved4, p->flags, p->processor_PD,
p->memory_PD);
else
- pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
@@ -417,7 +438,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_debug("HMAT: Invalid Processor Domain\n");
return -EINVAL;
}
- target->processor_pxm = p_node;
+ target->processor_pxm = p->processor_PD;
}
return 0;
@@ -452,7 +473,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
return -EINVAL;
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
return 0;
- alloc_memory_target(ma->proximity_domain);
+ alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
return 0;
}
@@ -613,11 +634,92 @@ static void hmat_register_target_perf(struct memory_target *target)
node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
}
+static void hmat_register_target_device(struct memory_target *target,
+ struct resource *r)
+{
+ /* define a clean / non-busy resource for the platform device */
+ struct resource res = {
+ .start = r->start,
+ .end = r->end,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *pdev;
+ struct memregion_info info;
+ int rc, id;
+
+ rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
+ IORES_DESC_SOFT_RESERVED);
+ if (rc != REGION_INTERSECTS)
+ return;
+
+ id = memregion_alloc(GFP_KERNEL);
+ if (id < 0) {
+ pr_err("memregion allocation failure for %pr\n", &res);
+ return;
+ }
+
+ pdev = platform_device_alloc("hmem", id);
+ if (!pdev) {
+ pr_err("hmem device allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ pdev->dev.numa_node = acpi_map_pxm_to_online_node(target->memory_pxm);
+ info = (struct memregion_info) {
+ .target_node = acpi_map_pxm_to_node(target->memory_pxm),
+ };
+ rc = platform_device_add_data(pdev, &info, sizeof(info));
+ if (rc < 0) {
+ pr_err("hmem memregion_info allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ rc = platform_device_add_resources(pdev, &res, 1);
+ if (rc < 0) {
+ pr_err("hmem resource allocation failure for %pr\n", &res);
+ goto out_resource;
+ }
+
+ rc = platform_device_add(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "device add failed for %pr\n", &res);
+ goto out_resource;
+ }
+
+ return;
+
+out_resource:
+ put_device(&pdev->dev);
+out_pdev:
+ memregion_free(id);
+}
+
+static void hmat_register_target_devices(struct memory_target *target)
+{
+ struct resource *res;
+
+ /*
+ * Do not bother creating devices if no driver is available to
+ * consume them.
+ */
+ if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
+ return;
+
+ for (res = target->memregions.child; res; res = res->sibling)
+ hmat_register_target_device(target, res);
+}
+
static void hmat_register_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Devices may belong to either an offline or online
+ * node, so unconditionally add them.
+ */
+ hmat_register_target_devices(target);
+
+ /*
* Skip offline nodes. This can happen when memory
* marked EFI_MEMORY_SP, "specific purpose", is applied
* to all the memory in a promixity domain leading to
@@ -677,11 +779,21 @@ static __init void hmat_free_structures(void)
struct target_cache *tcache, *cnext;
list_for_each_entry_safe(target, tnext, &targets, node) {
+ struct resource *res, *res_next;
+
list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
list_del(&tcache->node);
kfree(tcache);
}
+
list_del(&target->node);
+ res = target->memregions.child;
+ while (res) {
+ res_next = res->sibling;
+ __release_region(&target->memregions, res->start,
+ resource_size(res));
+ res = res_next;
+ }
kfree(target);
}
@@ -748,4 +860,4 @@ out_put:
acpi_put_table(tbl);
return 0;
}
-subsys_initcall(hmat_init);
+device_initcall(hmat_init);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa/srat.c
index eadbf90e65d1..eadbf90e65d1 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa/srat.c
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index bec0bebc7f52..9f6853809138 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -473,9 +473,9 @@ static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
*/
/*
- * Without this this EEEpc exports a non working WMI interface, with
- * this it exports a working "good old" eeepc_laptop interface, fixing
- * both brightness control, and rfkill not working.
+ * Without this EEEpc exports a non working WMI interface, with
+ * this it exports a working "good old" eeepc_laptop interface,
+ * fixing both brightness control, and rfkill not working.
*/
{
.callback = dmi_enable_osi_linux,
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index 452041398b34..a371f273f99d 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -252,7 +252,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
struct regmap *regmap,
struct intel_pmic_opregion_data *d)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct intel_pmic_opregion *opregion;
int ret;
@@ -270,7 +270,8 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
opregion->regmap = regmap;
opregion->lpat_table = acpi_lpat_get_conversion_table(handle);
- status = acpi_install_address_space_handler(handle,
+ if (d->power_table_count)
+ status = acpi_install_address_space_handler(handle,
PMIC_POWER_OPREGION_ID,
intel_pmic_power_handler,
NULL, opregion);
@@ -279,7 +280,8 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
goto out_error;
}
- status = acpi_install_address_space_handler(handle,
+ if (d->thermal_table_count)
+ status = acpi_install_address_space_handler(handle,
PMIC_THERMAL_OPREGION_ID,
intel_pmic_thermal_handler,
NULL, opregion);
@@ -301,12 +303,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
return 0;
out_remove_thermal_handler:
- acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID,
- intel_pmic_thermal_handler);
+ if (d->thermal_table_count)
+ acpi_remove_address_space_handler(handle,
+ PMIC_THERMAL_OPREGION_ID,
+ intel_pmic_thermal_handler);
out_remove_power_handler:
- acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
- intel_pmic_power_handler);
+ if (d->power_table_count)
+ acpi_remove_address_space_handler(handle,
+ PMIC_POWER_OPREGION_ID,
+ intel_pmic_power_handler);
out_error:
acpi_lpat_free_conversion_table(opregion->lpat_table);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index a0f411a6e5ac..2a692cc4b7ae 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Intel CrystalCove PMIC operation region driver
+ * Intel Bay Trail Crystal Cove PMIC operation region driver
*
* Copyright (C) 2014 Intel Corporation. All rights reserved.
*/
@@ -295,7 +295,7 @@ static int intel_crc_pmic_opregion_probe(struct platform_device *pdev)
static struct platform_driver intel_crc_pmic_opregion_driver = {
.probe = intel_crc_pmic_opregion_probe,
.driver = {
- .name = "crystal_cove_pmic",
+ .name = "byt_crystal_cove_pmic",
},
};
builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtcrc.c b/drivers/acpi/pmic/intel_pmic_chtcrc.c
new file mode 100644
index 000000000000..ebf8d3187df1
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_chtcrc.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Cherry Trail Crystal Cove PMIC operation region driver
+ *
+ * Copyright (C) 2019 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include "intel_pmic.h"
+
+/*
+ * We have no docs for the CHT Crystal Cove PMIC. The Asus Zenfone-2 kernel
+ * code has 2 Crystal Cove regulator drivers, one calls the PMIC a "Crystal
+ * Cove Plus" PMIC and talks about Cherry Trail, so presuambly that one
+ * could be used to get register info for the regulators if we need to
+ * implement regulator support in the future.
+ *
+ * For now the sole purpose of this driver is to make
+ * intel_soc_pmic_exec_mipi_pmic_seq_element work on devices with a
+ * CHT Crystal Cove PMIC.
+ */
+static struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = {
+ .pmic_i2c_address = 0x6e,
+};
+
+static int intel_chtcrc_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ return intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent), pmic->regmap,
+ &intel_chtcrc_pmic_opregion_data);
+}
+
+static struct platform_driver intel_chtcrc_pmic_opregion_driver = {
+ .probe = intel_chtcrc_pmic_opregion_probe,
+ .driver = {
+ .name = "cht_crystal_cove_pmic",
+ },
+};
+builtin_platform_driver(intel_chtcrc_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ed56c6d20b08..2ae95df2e74f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -642,6 +642,19 @@ static int acpi_idle_bm_check(void)
return bm_status;
}
+static void wait_for_freeze(void)
+{
+#ifdef CONFIG_X86
+ /* No delay is needed if we are in guest */
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+#endif
+ /* Dummy wait op - must do something useless after P_LVL2 read
+ because chipsets cannot guarantee that STPCLK# signal
+ gets asserted in time to freeze execution properly. */
+ inl(acpi_gbl_FADT.xpm_timer_block.address);
+}
+
/**
* acpi_idle_do_entry - enter idle state using the appropriate method
* @cx: cstate data
@@ -658,10 +671,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
} else {
/* IO port based C-state */
inb(cx->address);
- /* Dummy wait op - must do something useless after P_LVL2 read
- because chipsets cannot guarantee that STPCLK# signal
- gets asserted in time to freeze execution properly. */
- inl(acpi_gbl_FADT.xpm_timer_block.address);
+ wait_for_freeze();
}
}
@@ -682,8 +692,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
inb(cx->address);
- /* See comment in acpi_idle_do_entry() */
- inl(acpi_gbl_FADT.xpm_timer_block.address);
+ wait_for_freeze();
} else
return -ENODEV;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3eacf474e1e3..e601c4511a8b 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1317,6 +1317,52 @@ acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
args_count, args);
}
+static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct acpi_device *adev;
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "\\";
+
+ fwnode_handle_put(parent);
+
+ if (is_acpi_data_node(fwnode)) {
+ const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
+
+ return dn->name;
+ }
+
+ adev = to_acpi_device_node(fwnode);
+ if (WARN_ON(!adev))
+ return NULL;
+
+ return acpi_device_bid(adev);
+}
+
+static const char *
+acpi_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Is this 2nd node from the root? */
+ parent = fwnode_get_next_parent(parent);
+ if (!parent)
+ return "";
+
+ fwnode_handle_put(parent);
+
+ /* ACPI device or data node. */
+ return ".";
+}
+
static struct fwnode_handle *
acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
{
@@ -1357,6 +1403,8 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
.get_parent = acpi_node_get_parent, \
.get_next_child_node = acpi_get_next_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
+ .get_name = acpi_fwnode_get_name, \
+ .get_name_prefix = acpi_fwnode_get_name_prefix, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
acpi_graph_get_next_endpoint, \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 2a3e392751e0..3b4448972374 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -413,8 +413,8 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
if (triggering != trig || polarity != pol) {
- pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
- t ? "level" : "edge", p ? "low" : "high");
+ pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi,
+ t ? "level" : "edge", p ? "low" : "high");
triggering = trig;
polarity = pol;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index aad6be5c0af0..915650bf519f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2174,6 +2174,7 @@ int __init acpi_scan_init(void)
acpi_pci_root_init();
acpi_pci_link_init();
acpi_processor_init();
+ acpi_platform_init();
acpi_lpss_init();
acpi_apd_init();
acpi_cmos_rtc_init();
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index e3974a8f8fd4..804ac0df58ec 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -455,6 +455,7 @@ EXPORT_SYMBOL(acpi_evaluate_ost);
/**
* acpi_handle_path: Return the object path of handle
+ * @handle: ACPI device handle
*
* Caller must free the returned buffer
*/
@@ -473,6 +474,9 @@ static char *acpi_handle_path(acpi_handle handle)
/**
* acpi_handle_printk: Print message with ACPI prefix and object path
+ * @level: log level
+ * @handle: ACPI device handle
+ * @fmt: format string
*
* This function is called through acpi_handle_<level> macros and prints
* a message with ACPI prefix and object path. This function acquires
@@ -501,6 +505,9 @@ EXPORT_SYMBOL(acpi_handle_printk);
#if defined(CONFIG_DYNAMIC_DEBUG)
/**
* __acpi_handle_debug: pr_debug with ACPI prefix and object path
+ * @descriptor: Dynamic Debug descriptor
+ * @handle: ACPI device handle
+ * @fmt: format string
*
* This function is called through acpi_handle_debug macro and debug
* prints a message with ACPI prefix and object path. This function
@@ -695,6 +702,31 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
EXPORT_SYMBOL(acpi_check_dsm);
/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2.
+ * Returns true if matches.
+ */
+bool acpi_dev_hid_uid_match(struct acpi_device *adev,
+ const char *hid2, const char *uid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+ const char *uid1 = acpi_device_uid(adev);
+
+ if (strcmp(hid1, hid2))
+ return false;
+
+ if (!uid2)
+ return true;
+
+ return uid1 && !strcmp(uid1, uid2);
+}
+EXPORT_SYMBOL(acpi_dev_hid_uid_match);
+
+/**
* acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 265d9dd46a5e..a606262da9d6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -65,6 +65,7 @@
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/task_work.h>
+#include <linux/sizes.h>
#include <uapi/linux/android/binder.h>
#include <uapi/linux/android/binderfs.h>
@@ -92,11 +93,6 @@ static atomic_t binder_last_id;
static int proc_show(struct seq_file *m, void *unused);
DEFINE_SHOW_ATTRIBUTE(proc);
-/* This is only defined in include/asm-arm/sizes.h */
-#ifndef SZ_1K
-#define SZ_1K 0x400
-#endif
-
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
enum {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index eb76a823fbb2..2d8b9b91dee0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -268,7 +268,6 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
alloc->pages_high = index + 1;
trace_binder_alloc_page_end(alloc, index);
- /* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_read(&mm->mmap_sem);
@@ -277,8 +276,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return 0;
free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
+ for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
bool ret;
size_t index;
@@ -291,6 +289,8 @@ free_range:
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
+ if (page_addr == start)
+ break;
continue;
err_vm_insert_page_failed:
@@ -298,7 +298,8 @@ err_vm_insert_page_failed:
page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
- ;
+ if (page_addr == start)
+ break;
}
err_no_vma:
if (mm) {
@@ -681,17 +682,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock);
- if (alloc->buffer) {
+ if (alloc->buffer_size) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
+ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+ SZ_4M);
+ mutex_unlock(&binder_alloc_mmap_lock);
alloc->buffer = (void __user *)vma->vm_start;
- mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
- SZ_4M);
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
@@ -722,8 +723,9 @@ err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer = NULL;
+ mutex_lock(&binder_alloc_mmap_lock);
+ alloc->buffer_size = 0;
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -841,14 +843,20 @@ void binder_alloc_print_pages(struct seq_file *m,
int free = 0;
mutex_lock(&alloc->mutex);
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
+ /*
+ * Make sure the binder_alloc is fully initialized, otherwise we might
+ * read inconsistent state.
+ */
+ if (binder_alloc_get_vma(alloc) != NULL) {
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
}
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 753985c01517..46dc54d18f0b 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -56,7 +56,7 @@ struct acard_sg {
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
};
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
return si;
}
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 05c2b32dcc4d..ec6c64fce74a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -56,6 +56,7 @@ enum board_ids {
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
+ board_ahci_al,
board_ahci_avn,
board_ahci_mcp65,
board_ahci_mcp77,
@@ -167,6 +168,13 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
/* by chipsets */
+ [board_ahci_al] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_avn] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
@@ -415,6 +423,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+ /* Amazon's Annapurna Labs support */
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031),
+ .class = PCI_CLASS_STORAGE_SATA_AHCI,
+ .class_mask = 0xffffff,
+ board_ahci_al },
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index e3163dae5e85..cb55ebc1725b 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -483,7 +483,6 @@ static int tegra_ahci_probe(struct platform_device *pdev)
struct tegra_ahci_priv *tegra;
struct resource *res;
int ret;
- unsigned int i;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
@@ -543,8 +542,9 @@ static int tegra_ahci_probe(struct platform_device *pdev)
if (!tegra->supplies)
return -ENOMEM;
- for (i = 0; i < tegra->soc->num_supplies; i++)
- tegra->supplies[i].supply = tegra->soc->supply_names[i];
+ regulator_bulk_set_supply_names(tegra->supplies,
+ tegra->soc->supply_names,
+ tegra->soc->num_supplies);
ret = devm_regulator_bulk_get(&pdev->dev,
tegra->soc->num_supplies,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e4da725381d3..3ca7720e7d8f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -841,6 +841,12 @@ static int piix_broken_suspend(void)
},
},
{
+ .ident = "TECRA M3",
+ .matches = {
+ DMI_MATCH(DMI_OEM_STRING, "Tecra M3,"),
+ },
+ },
+ {
.ident = "TECRA M4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
@@ -955,18 +961,10 @@ static int piix_broken_suspend(void)
{ } /* terminate list */
};
- static const char *oemstrs[] = {
- "Tecra M3,",
- };
- int i;
if (dmi_check_system(sysids))
return 1;
- for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
- if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
- return 1;
-
/* TECRA M4 sometimes forgets its identify and reports bogus
* DMI information. As the bogus information is a bit
* generic, match as many entries as possible. This manual
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index bff369d9a1a7..ea5bf5f4cbed 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
@@ -1624,7 +1624,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
return sata_pmp_qc_defer_cmd_switch(qc);
}
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -1660,6 +1660,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static void ahci_fbs_dec_intr(struct ata_port *ap)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 28c492be0a57..e9017c570bc5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4980,7 +4980,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
return ATA_DEFER_LINK;
}
-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
+{
+ return AC_ERR_OK;
+}
/**
* ata_sg_init - Associate command with scatter-gather table.
@@ -5443,7 +5446,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
- ap->ops->qc_prep(qc);
+ qc->err_mask |= ap->ops->qc_prep(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
@@ -6708,6 +6713,9 @@ void ata_host_detach(struct ata_host *host)
{
int i;
+ /* Ensure ata_port probe has completed */
+ async_synchronize_full();
+
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 4ed682da52ae..038db94216a9 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
@@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg_dumb(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 3aa006c5ed0c..6bd2228bb6ff 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -100,7 +100,7 @@ static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev,
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
- const u16 timing[2][5] = {
+ static const u16 timing[2][5] = {
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
@@ -154,7 +154,7 @@ static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
- const u8 timing[2][5] = {
+ static const u8 timing[2][5] = {
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 41e0d6a6cd05..27b0952fde6b 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -33,7 +33,6 @@
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
-#define ATA_HD_BASE 0xfff00000
#define ATA_HD_CONTROL 0x39
static struct scsi_host_template pata_falcon_sht = {
@@ -120,24 +119,22 @@ static struct ata_port_operations pata_falcon_ops = {
.set_mode = pata_falcon_set_mode,
};
-static int pata_falcon_init_one(void)
+static int __init pata_falcon_init_one(struct platform_device *pdev)
{
+ struct resource *res;
struct ata_host *host;
struct ata_port *ap;
- struct platform_device *pdev;
void __iomem *base;
- if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
- return -ENODEV;
-
- pr_info(DRV_NAME ": Atari Falcon PATA controller\n");
+ dev_info(&pdev->dev, "Atari Falcon PATA controller\n");
- pdev = platform_device_register_simple(DRV_NAME, 0, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
- if (!devm_request_mem_region(&pdev->dev, ATA_HD_BASE, 0x40, DRV_NAME)) {
- pr_err(DRV_NAME ": resources busy\n");
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
@@ -152,7 +149,7 @@ static int pata_falcon_init_one(void)
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
ap->flags |= ATA_FLAG_PIO_POLLING;
- base = (void __iomem *)ATA_HD_BASE;
+ base = (void __iomem *)res->start;
ap->ioaddr.data_addr = base;
ap->ioaddr.error_addr = base + 1 + 1 * 4;
ap->ioaddr.feature_addr = base + 1 + 1 * 4;
@@ -174,9 +171,26 @@ static int pata_falcon_init_one(void)
return ata_host_activate(host, 0, NULL, 0, &pata_falcon_sht);
}
-module_init(pata_falcon_init_one);
+static int __exit pata_falcon_remove_one(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+
+static struct platform_driver pata_falcon_driver = {
+ .remove = __exit_p(pata_falcon_remove_one),
+ .driver = {
+ .name = "atari-falcon-ide",
+ },
+};
+
+module_platform_driver_probe(pata_falcon_driver, pata_falcon_init_one);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:atari-falcon-ide");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 57f2ec71cfc3..1bfd0154dad5 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
return ATA_CBL_PATA40;
}
-static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
{
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
@@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
__func__, qc, qc->flags, write, qc->dev->devno);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
table = (struct dbdma_cmd *) priv->dma_table_cpu;
@@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
table->command = cpu_to_le16(DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
+
+ return AC_ERR_OK;
}
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 4afcb8e63e21..41430f79663c 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d)
/*
* Prepare taskfile for submission.
*/
-static void pxa_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
struct dma_async_tx_descriptor *tx;
enum dma_transfer_direction dir;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
DMA_PREP_INTERRUPT);
if (!tx) {
ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
- return;
+ return AC_ERR_OK;
}
tx->callback = pxa_ata_dma_irq;
tx->callback_param = pd;
pd->dma_cookie = dmaengine_submit(tx);
+
+ return AC_ERR_OK;
}
/*
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index cb490531b62e..5db55e1e2a61 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap);
static void adma_port_stop(struct ata_port *ap);
-static void adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void adma_freeze(struct ata_port *ap);
@@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
return i;
}
-static void adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
{
struct adma_port_priv *pp = qc->ap->private_data;
u8 *buf = pp->pkt;
@@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */
@@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
printk("%s\n", obuf);
}
#endif
+ return AC_ERR_OK;
}
static inline void adma_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 8e9cb198fcd1..9239615d8a04 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
return num_prde;
}
-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
@@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde);
+
+ return AC_ERR_OK;
}
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 7f99e23bff88..a6b76cc12a66 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
prd[-1].flags |= PRD_END;
}
-static void inic_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
{
struct inic_port_priv *pp = qc->ap->private_data;
struct inic_pkt *pkt = pp->pkt;
@@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
inic_fill_sg(prd, qc);
pp->cpb_tbl[0] = pp->pkt_dma;
+
+ return AC_ERR_OK;
}
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index ad385a113391..277f11909fc1 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
-static void mv_qc_prep(struct ata_queued_cmd *qc);
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
@@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
- return;
+ return AC_ERR_OK;
/* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc);
- return;
+ return AC_ERR_OK;
default:
- return;
+ return AC_ERR_OK;
}
/* Fill in command request block
@@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this
* driver needs work.
- *
- * FIXME: modify libata to give qc_prep a return value and
- * return error here.
*/
- BUG_ON(tf->command);
- break;
+ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
+ tf->command);
+ return AC_ERR_INVALID;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
@@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
@@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
- return;
+ return AC_ERR_OK;
if (tf->command == ATA_CMD_DSM)
- return; /* use bmdma for this */
+ return AC_ERR_OK; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 56946012d113..65ec8dff1c51 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
static int nv_adma_slave_config(struct scsi_device *sdev);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
static void nv_adma_irq_clear(struct ata_port *ap);
@@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
static int nv_swncq_slave_config(struct scsi_device *sdev);
static int nv_swncq_port_start(struct ata_port *ap);
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
@@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
return 1;
}
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
@@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
cpb->resp_flags = NV_CPB_RESP_DONE;
@@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
cpb->ctl_flags = ctl_flags;
wmb();
cpb->resp_flags = 0;
+
+ return AC_ERR_OK;
}
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
@@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
return 0;
}
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{
if (qc->tf.protocol != ATA_PROT_NCQ) {
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
nv_swncq_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5fd464765ddc..c451d7d1c817 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
-static void pdc_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
@@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void pdc_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
{
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
@@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static int pdc_is_sataii_tx4(unsigned long flags)
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index c53c5a47204d..ef00ab644afb 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
-static void qs_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap);
@@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
return si;
}
-static void qs_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
@@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
nelem = qs_fill_sg(qc);
@@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
/* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
+
+ return AC_ERR_OK;
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 3495e1733a8e..980aacdbcf3b 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
}
-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sata_rcar_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index e6fbae2f645a..75321f1ceba5 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev);
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
-static void sil_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
static void sil_bmdma_setup(struct ata_queued_cmd *qc);
static void sil_bmdma_start(struct ata_queued_cmd *qc);
static void sil_bmdma_stop(struct ata_queued_cmd *qc);
@@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void sil_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sil_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 7bef82de53ca..560070d4f1d0 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev);
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
-static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap);
@@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
return ata_std_qc_defer(qc);
}
-static void sil24_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data;
@@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_DMAMAP)
sil24_fill_sg(qc, sge);
+
+ return AC_ERR_OK;
}
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 2277ba0c9c7f..2c7b30c5ea3d 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap);
static void pdc_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap);
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static unsigned int pdc20621_dimm_init(struct ata_host *host);
@@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
}
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
@@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 2bbab0230aeb..aad00d2b28f5 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1070,7 +1070,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
RC_FLAGS_BFPS_BFP * bfp |
RC_FLAGS_RXBM_PSB, 0, 0);
break;
- };
+ }
if (IS_FS50 (dev)) {
submit_command (dev, &dev->hp_txq,
QE_CMD_REG_WR | QE_CMD_IMM_INQ,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7bd9cd366d41..42a672456432 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -45,6 +45,10 @@ early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
/* Device links support. */
+static LIST_HEAD(wait_for_suppliers);
+static DEFINE_MUTEX(wfs_lock);
+static LIST_HEAD(deferred_sync);
+static unsigned int defer_sync_state_count = 1;
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
@@ -127,6 +131,9 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
+
if (link->consumer == target)
return 1;
@@ -196,8 +203,11 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_pm_move_last(dev);
device_for_each_child(dev, NULL, device_reorder_to_tail);
- list_for_each_entry(link, &dev->links.consumers, s_node)
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
device_reorder_to_tail(link->consumer, NULL);
+ }
return 0;
}
@@ -224,7 +234,8 @@ void device_pm_move_to_tail(struct device *dev)
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
- DL_FLAG_AUTOPROBE_CONSUMER)
+ DL_FLAG_AUTOPROBE_CONSUMER | \
+ DL_FLAG_SYNC_STATE_ONLY)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@@ -292,6 +303,8 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
+ (flags & DL_FLAG_SYNC_STATE_ONLY &&
+ flags != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -312,11 +325,14 @@ struct device_link *device_link_add(struct device *consumer,
/*
* If the supplier has not been fully registered yet or there is a
- * reverse dependency between the consumer and the supplier already in
- * the graph, return NULL.
+ * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
+ * the supplier already in the graph, return NULL. If the link is a
+ * SYNC_STATE_ONLY link, we don't check for reverse dependencies
+ * because it only affects sync_state() callbacks.
*/
if (!device_pm_initialized(supplier)
- || device_is_dependent(consumer, supplier)) {
+ || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
+ device_is_dependent(consumer, supplier))) {
link = NULL;
goto out;
}
@@ -343,9 +359,14 @@ struct device_link *device_link_add(struct device *consumer,
}
if (flags & DL_FLAG_STATELESS) {
- link->flags |= DL_FLAG_STATELESS;
kref_get(&link->kref);
- goto out;
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(link->flags & DL_FLAG_STATELESS)) {
+ link->flags |= DL_FLAG_STATELESS;
+ goto reorder;
+ } else {
+ goto out;
+ }
}
/*
@@ -367,6 +388,12 @@ struct device_link *device_link_add(struct device *consumer,
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
+ goto reorder;
+ }
+
goto out;
}
@@ -406,6 +433,13 @@ struct device_link *device_link_add(struct device *consumer,
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
+ if (flags & DL_FLAG_SYNC_STATE_ONLY) {
+ dev_dbg(consumer,
+ "Linked as a sync state only consumer to %s\n",
+ dev_name(supplier));
+ goto out;
+ }
+reorder:
/*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
@@ -431,6 +465,70 @@ struct device_link *device_link_add(struct device *consumer,
}
EXPORT_SYMBOL_GPL(device_link_add);
+/**
+ * device_link_wait_for_supplier - Add device to wait_for_suppliers list
+ * @consumer: Consumer device
+ *
+ * Marks the @consumer device as waiting for suppliers to become available by
+ * adding it to the wait_for_suppliers list. The consumer device will never be
+ * probed until it's removed from the wait_for_suppliers list.
+ *
+ * The caller is responsible for adding the links to the supplier devices once
+ * they are available and removing the @consumer device from the
+ * wait_for_suppliers list once links to all the suppliers have been created.
+ *
+ * This function is NOT meant to be called from the probe function of the
+ * consumer but rather from code that creates/adds the consumer device.
+ */
+static void device_link_wait_for_supplier(struct device *consumer,
+ bool need_for_probe)
+{
+ mutex_lock(&wfs_lock);
+ list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers);
+ consumer->links.need_for_probe = need_for_probe;
+ mutex_unlock(&wfs_lock);
+}
+
+static void device_link_wait_for_mandatory_supplier(struct device *consumer)
+{
+ device_link_wait_for_supplier(consumer, true);
+}
+
+static void device_link_wait_for_optional_supplier(struct device *consumer)
+{
+ device_link_wait_for_supplier(consumer, false);
+}
+
+/**
+ * device_link_add_missing_supplier_links - Add links from consumer devices to
+ * supplier devices, leaving any
+ * consumer with inactive suppliers on
+ * the wait_for_suppliers list
+ *
+ * Loops through all consumers waiting on suppliers and tries to add all their
+ * supplier links. If that succeeds, the consumer device is removed from
+ * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers
+ * list. Devices left on the wait_for_suppliers list will not be probed.
+ *
+ * The fwnode add_links callback is expected to return 0 if it has found and
+ * added all the supplier links for the consumer device. It should return an
+ * error if it isn't able to do so.
+ *
+ * The caller of device_link_wait_for_supplier() is expected to call this once
+ * it's aware of potential suppliers becoming available.
+ */
+static void device_link_add_missing_supplier_links(void)
+{
+ struct device *dev, *tmp;
+
+ mutex_lock(&wfs_lock);
+ list_for_each_entry_safe(dev, tmp, &wait_for_suppliers,
+ links.needs_suppliers)
+ if (!fwnode_call_int_op(dev->fwnode, add_links, dev))
+ list_del_init(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
+}
+
static void device_link_free(struct device_link *link)
{
while (refcount_dec_not_one(&link->rpm_active))
@@ -565,10 +663,23 @@ int device_links_check_suppliers(struct device *dev)
struct device_link *link;
int ret = 0;
+ /*
+ * Device waiting for supplier to become available is not allowed to
+ * probe.
+ */
+ mutex_lock(&wfs_lock);
+ if (!list_empty(&dev->links.needs_suppliers) &&
+ dev->links.need_for_probe) {
+ mutex_unlock(&wfs_lock);
+ return -EPROBE_DEFER;
+ }
+ mutex_unlock(&wfs_lock);
+
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -585,6 +696,128 @@ int device_links_check_suppliers(struct device *dev)
}
/**
+ * __device_links_queue_sync_state - Queue a device for sync_state() callback
+ * @dev: Device to call sync_state() on
+ * @list: List head to queue the @dev on
+ *
+ * Queues a device for a sync_state() callback when the device links write lock
+ * isn't held. This allows the sync_state() execution flow to use device links
+ * APIs. The caller must ensure this function is called with
+ * device_links_write_lock() held.
+ *
+ * This function does a get_device() to make sure the device is not freed while
+ * on this list.
+ *
+ * So the caller must also ensure that device_links_flush_sync_list() is called
+ * as soon as the caller releases device_links_write_lock(). This is necessary
+ * to make sure the sync_state() is called in a timely fashion and the
+ * put_device() is called on this device.
+ */
+static void __device_links_queue_sync_state(struct device *dev,
+ struct list_head *list)
+{
+ struct device_link *link;
+
+ if (dev->state_synced)
+ return;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+ if (link->status != DL_STATE_ACTIVE)
+ return;
+ }
+
+ /*
+ * Set the flag here to avoid adding the same device to a list more
+ * than once. This can happen if new consumers get added to the device
+ * and probed before the list is flushed.
+ */
+ dev->state_synced = true;
+
+ if (WARN_ON(!list_empty(&dev->links.defer_sync)))
+ return;
+
+ get_device(dev);
+ list_add_tail(&dev->links.defer_sync, list);
+}
+
+/**
+ * device_links_flush_sync_list - Call sync_state() on a list of devices
+ * @list: List of devices to call sync_state() on
+ *
+ * Calls sync_state() on all the devices that have been queued for it. This
+ * function is used in conjunction with __device_links_queue_sync_state().
+ */
+static void device_links_flush_sync_list(struct list_head *list)
+{
+ struct device *dev, *tmp;
+
+ list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
+ list_del_init(&dev->links.defer_sync);
+
+ device_lock(dev);
+
+ if (dev->bus->sync_state)
+ dev->bus->sync_state(dev);
+ else if (dev->driver && dev->driver->sync_state)
+ dev->driver->sync_state(dev);
+
+ device_unlock(dev);
+
+ put_device(dev);
+ }
+}
+
+void device_links_supplier_sync_state_pause(void)
+{
+ device_links_write_lock();
+ defer_sync_state_count++;
+ device_links_write_unlock();
+}
+
+void device_links_supplier_sync_state_resume(void)
+{
+ struct device *dev, *tmp;
+ LIST_HEAD(sync_list);
+
+ device_links_write_lock();
+ if (!defer_sync_state_count) {
+ WARN(true, "Unmatched sync_state pause/resume!");
+ goto out;
+ }
+ defer_sync_state_count--;
+ if (defer_sync_state_count)
+ goto out;
+
+ list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
+ /*
+ * Delete from deferred_sync list before queuing it to
+ * sync_list because defer_sync is used for both lists.
+ */
+ list_del_init(&dev->links.defer_sync);
+ __device_links_queue_sync_state(dev, &sync_list);
+ }
+out:
+ device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list);
+}
+
+static int sync_state_resume_initcall(void)
+{
+ device_links_supplier_sync_state_resume();
+ return 0;
+}
+late_initcall(sync_state_resume_initcall);
+
+static void __device_links_supplier_defer_sync(struct device *sup)
+{
+ if (list_empty(&sup->links.defer_sync))
+ list_add_tail(&sup->links.defer_sync, &deferred_sync);
+}
+
+/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
@@ -598,6 +831,16 @@ int device_links_check_suppliers(struct device *dev)
void device_links_driver_bound(struct device *dev)
{
struct device_link *link;
+ LIST_HEAD(sync_list);
+
+ /*
+ * If a device probes successfully, it's expected to have created all
+ * the device links it needs to or make new device links as it needs
+ * them. So, it no longer needs to wait on any suppliers.
+ */
+ mutex_lock(&wfs_lock);
+ list_del_init(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
device_links_write_lock();
@@ -628,11 +871,19 @@ void device_links_driver_bound(struct device *dev)
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+
+ if (defer_sync_state_count)
+ __device_links_supplier_defer_sync(link->supplier);
+ else
+ __device_links_queue_sync_state(link->supplier,
+ &sync_list);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list);
}
static void device_link_drop_managed(struct device_link *link)
@@ -744,6 +995,7 @@ void device_links_driver_cleanup(struct device *dev)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
+ list_del_init(&dev->links.defer_sync);
__device_links_no_driver(dev);
device_links_write_unlock();
@@ -813,7 +1065,8 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
status = link->status;
@@ -849,6 +1102,10 @@ static void device_links_purge(struct device *dev)
{
struct device_link *link, *ln;
+ mutex_lock(&wfs_lock);
+ list_del(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
+
/*
* Delete all of the remaining links from this device to any other
* devices (either consumers or suppliers).
@@ -1713,6 +1970,8 @@ void device_initialize(struct device *dev)
#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
+ INIT_LIST_HEAD(&dev->links.needs_suppliers);
+ INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -2101,7 +2360,7 @@ int device_add(struct device *dev)
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
- int error = -EINVAL;
+ int error = -EINVAL, fw_ret;
struct kobject *glue_dir = NULL;
dev = get_device(dev);
@@ -2199,6 +2458,32 @@ int device_add(struct device *dev)
BUS_NOTIFY_ADD_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+ if (dev->fwnode && !dev->fwnode->dev)
+ dev->fwnode->dev = dev;
+
+ /*
+ * Check if any of the other devices (consumers) have been waiting for
+ * this device (supplier) to be added so that they can create a device
+ * link to it.
+ *
+ * This needs to happen after device_pm_add() because device_link_add()
+ * requires the supplier be registered before it's called.
+ *
+ * But this also needs to happe before bus_probe_device() to make sure
+ * waiting consumers can link to it before the driver is bound to the
+ * device and the driver sync_state callback is called for this device.
+ */
+ device_link_add_missing_supplier_links();
+
+ if (fwnode_has_op(dev->fwnode, add_links)) {
+ fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
+ if (fw_ret == -ENODEV)
+ device_link_wait_for_mandatory_supplier(dev);
+ else if (fw_ret)
+ device_link_wait_for_optional_supplier(dev);
+ }
+
bus_probe_device(dev);
if (parent)
klist_add_tail(&dev->p->knode_parent,
@@ -2343,6 +2628,9 @@ void device_del(struct device *dev)
kill_device(dev);
device_unlock(dev);
+ if (dev->fwnode && dev->fwnode->dev == dev)
+ dev->fwnode->dev = NULL;
+
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index cc37511de866..6265871a4af2 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -554,12 +554,27 @@ ssize_t __weak cpu_show_mds(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
+static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -568,6 +583,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
&dev_attr_mds.attr,
+ &dev_attr_tsx_async_abort.attr,
+ &dev_attr_itlb_multihit.attr,
NULL
};
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 3f9e274e2ed3..5b24f3959255 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -148,7 +148,7 @@ config FW_LOADER_USER_HELPER_FALLBACK
to be used for all firmware requests which explicitly do not disable a
a fallback mechanism. Firmware calls which do prohibit a fallback
mechanism is request_firmware_direct(). This option is kept for
- backward compatibility purposes given this precise mechanism can also
+ backward compatibility purposes given this precise mechanism can also
be enabled by setting the proc sysctl value to true:
/proc/sys/kernel/firmware_config/force_sysfs_fallback
@@ -169,5 +169,17 @@ config FW_LOADER_COMPRESS
be compressed with either none or crc32 integrity check type (pass
"-C crc32" option to xz command).
+config FW_CACHE
+ bool "Enable firmware caching during suspend"
+ depends on PM_SLEEP
+ default y if PM_SLEEP
+ help
+ Because firmware caching generates uevent messages that are sent
+ over a netlink socket, it can prevent suspend on many platforms.
+ It is also not always useful, so on such platforms we have the
+ option.
+
+ If unsure, say Y.
+
endif # FW_LOADER
endmenu
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 37e5ae387400..4a66888e7253 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -8,7 +8,8 @@ fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
-FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))
+comma := ,
+FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index bf44c79beae9..249add8c5e05 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2003 Manuel Estrada Sainz
*
- * Please see Documentation/firmware_class/ for more information.
+ * Please see Documentation/driver-api/firmware/ for more information.
*
*/
@@ -51,7 +51,7 @@ struct firmware_cache {
struct list_head head;
int state;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
/*
* Names of firmware images which have been cached successfully
* will be added into the below list so that device uncache
@@ -504,6 +504,7 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
path);
continue;
}
+ dev_dbg(device, "Loading firmware from %s\n", path);
if (decompress) {
dev_dbg(device, "f/w decompressing %s\n",
fw_priv->fw_name);
@@ -556,7 +557,7 @@ static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
(unsigned int)fw_priv->size);
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
static void fw_name_devm_release(struct device *dev, void *res)
{
struct fw_name_devm *fwn = res;
@@ -1046,7 +1047,7 @@ request_firmware_nowait(
}
EXPORT_SYMBOL(request_firmware_nowait);
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
/**
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 55907c27075b..84c4e1f72cbd 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
}
return ret;
}
+
+struct for_each_memory_block_cb_data {
+ walk_memory_blocks_func_t func;
+ void *arg;
+};
+
+static int for_each_memory_block_cb(struct device *dev, void *data)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ struct for_each_memory_block_cb_data *cb_data = data;
+
+ return cb_data->func(mem, cb_data->arg);
+}
+
+/**
+ * for_each_memory_block - walk through all present memory blocks
+ *
+ * @arg: argument passed to func
+ * @func: callback for each memory block walked
+ *
+ * This function walks through all present memory blocks, calling func on
+ * each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
+{
+ struct for_each_memory_block_cb_data cb_data = {
+ .func = func,
+ .arg = arg,
+ };
+
+ return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
+ for_each_memory_block_cb);
+}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b230beb6ccb4..7c532548b0a6 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -60,6 +60,7 @@ struct resource *platform_get_resource(struct platform_device *dev,
}
EXPORT_SYMBOL_GPL(platform_get_resource);
+#ifdef CONFIG_HAS_IOMEM
/**
* devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
* device
@@ -68,7 +69,6 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
* resource management
* @index: resource index
*/
-#ifdef CONFIG_HAS_IOMEM
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
@@ -78,9 +78,63 @@ void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, res);
}
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
+
+/**
+ * devm_platform_ioremap_resource_wc - write-combined variant of
+ * devm_platform_ioremap_resource()
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @index: resource index
+ */
+void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
+ unsigned int index)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ return devm_ioremap_resource_wc(&pdev->dev, res);
+}
+
+/**
+ * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
+ * a platform device, retrieve the
+ * resource by name
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @name: name of the resource
+ */
+void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ return devm_ioremap_resource(&pdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
-static int __platform_get_irq(struct platform_device *dev, unsigned int num)
+/**
+ * platform_get_irq_optional - get an optional IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ *
+ * Gets an IRQ for a platform device. Device drivers should check the return
+ * value for errors so as to not pass a negative integer value to the
+ * request_irq() APIs. This is the same as platform_get_irq(), except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Example:
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
{
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
@@ -89,9 +143,9 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
return dev->archdata.irqs[num];
#else
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- int ret;
+ int ret;
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
ret = of_irq_get(dev->dev.of_node, num);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
@@ -100,8 +154,6 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
if (has_acpi_companion(&dev->dev)) {
if (r && r->flags & IORESOURCE_DISABLED) {
- int ret;
-
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
if (ret)
return ret;
@@ -134,8 +186,7 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
* allows a common code path across either kind of resource.
*/
if (num == 0 && has_acpi_companion(&dev->dev)) {
- int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
-
+ ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
return ret;
@@ -144,6 +195,7 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
return -ENXIO;
#endif
}
+EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
* platform_get_irq - get an IRQ for a device
@@ -165,7 +217,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
{
int ret;
- ret = __platform_get_irq(dev, num);
+ ret = platform_get_irq_optional(dev, num);
if (ret < 0 && ret != -EPROBE_DEFER)
dev_err(&dev->dev, "IRQ index %u not found\n", num);
@@ -174,29 +226,6 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
- * platform_get_irq_optional - get an optional IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
- *
- * Gets an IRQ for a platform device. Device drivers should check the return
- * value for errors so as to not pass a negative integer value to the
- * request_irq() APIs. This is the same as platform_get_irq(), except that it
- * does not print an error message if an IRQ can not be obtained.
- *
- * Example:
- * int irq = platform_get_irq_optional(pdev, 0);
- * if (irq < 0)
- * return irq;
- *
- * Return: IRQ number on success, negative error number on failure.
- */
-int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
-{
- return __platform_get_irq(dev, num);
-}
-EXPORT_SYMBOL_GPL(platform_get_irq_optional);
-
-/**
* platform_irq_count - Count the number of IRQs a platform device uses
* @dev: platform device
*
@@ -206,7 +235,7 @@ int platform_irq_count(struct platform_device *dev)
{
int ret, nr = 0;
- while ((ret = __platform_get_irq(dev, nr)) >= 0)
+ while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
nr++;
if (ret == -EPROBE_DEFER)
@@ -245,10 +274,9 @@ static int __platform_get_irq_byname(struct platform_device *dev,
const char *name)
{
struct resource *r;
+ int ret;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- int ret;
-
ret = of_irq_get_byname(dev->dev.of_node, name);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
@@ -1278,6 +1306,11 @@ struct bus_type platform_bus_type = {
};
EXPORT_SYMBOL_GPL(platform_bus_type);
+static inline int __platform_match(struct device *dev, const void *drv)
+{
+ return platform_match(dev, (struct device_driver *)drv);
+}
+
/**
* platform_find_device_by_driver - Find a platform device with a given
* driver.
@@ -1288,7 +1321,7 @@ struct device *platform_find_device_by_driver(struct device *start,
const struct device_driver *drv)
{
return bus_find_device(&platform_bus_type, start, drv,
- (void *)platform_match);
+ __platform_match);
}
EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
@@ -1296,8 +1329,6 @@ int __init platform_bus_init(void)
{
int error;
- early_platform_cleanup();
-
error = device_register(&platform_bus);
if (error) {
put_device(&platform_bus);
@@ -1309,289 +1340,3 @@ int __init platform_bus_init(void)
of_platform_register_reconfig_notifier();
return error;
}
-
-static __initdata LIST_HEAD(early_platform_driver_list);
-static __initdata LIST_HEAD(early_platform_device_list);
-
-/**
- * early_platform_driver_register - register early platform driver
- * @epdrv: early_platform driver structure
- * @buf: string passed from early_param()
- *
- * Helper function for early_platform_init() / early_platform_init_buffer()
- */
-int __init early_platform_driver_register(struct early_platform_driver *epdrv,
- char *buf)
-{
- char *tmp;
- int n;
-
- /* Simply add the driver to the end of the global list.
- * Drivers will by default be put on the list in compiled-in order.
- */
- if (!epdrv->list.next) {
- INIT_LIST_HEAD(&epdrv->list);
- list_add_tail(&epdrv->list, &early_platform_driver_list);
- }
-
- /* If the user has specified device then make sure the driver
- * gets prioritized. The driver of the last device specified on
- * command line will be put first on the list.
- */
- n = strlen(epdrv->pdrv->driver.name);
- if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
- list_move(&epdrv->list, &early_platform_driver_list);
-
- /* Allow passing parameters after device name */
- if (buf[n] == '\0' || buf[n] == ',')
- epdrv->requested_id = -1;
- else {
- epdrv->requested_id = simple_strtoul(&buf[n + 1],
- &tmp, 10);
-
- if (buf[n] != '.' || (tmp == &buf[n + 1])) {
- epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
- n = 0;
- } else
- n += strcspn(&buf[n + 1], ",") + 1;
- }
-
- if (buf[n] == ',')
- n++;
-
- if (epdrv->bufsize) {
- memcpy(epdrv->buffer, &buf[n],
- min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
- epdrv->buffer[epdrv->bufsize - 1] = '\0';
- }
- }
-
- return 0;
-}
-
-/**
- * early_platform_add_devices - adds a number of early platform devices
- * @devs: array of early platform devices to add
- * @num: number of early platform devices in array
- *
- * Used by early architecture code to register early platform devices and
- * their platform data.
- */
-void __init early_platform_add_devices(struct platform_device **devs, int num)
-{
- struct device *dev;
- int i;
-
- /* simply add the devices to list */
- for (i = 0; i < num; i++) {
- dev = &devs[i]->dev;
-
- if (!dev->devres_head.next) {
- pm_runtime_early_init(dev);
- INIT_LIST_HEAD(&dev->devres_head);
- list_add_tail(&dev->devres_head,
- &early_platform_device_list);
- }
- }
-}
-
-/**
- * early_platform_driver_register_all - register early platform drivers
- * @class_str: string to identify early platform driver class
- *
- * Used by architecture code to register all early platform drivers
- * for a certain class. If omitted then only early platform drivers
- * with matching kernel command line class parameters will be registered.
- */
-void __init early_platform_driver_register_all(char *class_str)
-{
- /* The "class_str" parameter may or may not be present on the kernel
- * command line. If it is present then there may be more than one
- * matching parameter.
- *
- * Since we register our early platform drivers using early_param()
- * we need to make sure that they also get registered in the case
- * when the parameter is missing from the kernel command line.
- *
- * We use parse_early_options() to make sure the early_param() gets
- * called at least once. The early_param() may be called more than
- * once since the name of the preferred device may be specified on
- * the kernel command line. early_platform_driver_register() handles
- * this case for us.
- */
- parse_early_options(class_str);
-}
-
-/**
- * early_platform_match - find early platform device matching driver
- * @epdrv: early platform driver structure
- * @id: id to match against
- */
-static struct platform_device * __init
-early_platform_match(struct early_platform_driver *epdrv, int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id == id)
- return pd;
-
- return NULL;
-}
-
-/**
- * early_platform_left - check if early platform driver has matching devices
- * @epdrv: early platform driver structure
- * @id: return true if id or above exists
- */
-static int __init early_platform_left(struct early_platform_driver *epdrv,
- int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id >= id)
- return 1;
-
- return 0;
-}
-
-/**
- * early_platform_driver_probe_id - probe drivers matching class_str and id
- * @class_str: string to identify early platform driver class
- * @id: id to match against
- * @nr_probe: number of platform devices to successfully probe before exiting
- */
-static int __init early_platform_driver_probe_id(char *class_str,
- int id,
- int nr_probe)
-{
- struct early_platform_driver *epdrv;
- struct platform_device *match;
- int match_id;
- int n = 0;
- int left = 0;
-
- list_for_each_entry(epdrv, &early_platform_driver_list, list) {
- /* only use drivers matching our class_str */
- if (strcmp(class_str, epdrv->class_str))
- continue;
-
- if (id == -2) {
- match_id = epdrv->requested_id;
- left = 1;
-
- } else {
- match_id = id;
- left += early_platform_left(epdrv, id);
-
- /* skip requested id */
- switch (epdrv->requested_id) {
- case EARLY_PLATFORM_ID_ERROR:
- case EARLY_PLATFORM_ID_UNSET:
- break;
- default:
- if (epdrv->requested_id == id)
- match_id = EARLY_PLATFORM_ID_UNSET;
- }
- }
-
- switch (match_id) {
- case EARLY_PLATFORM_ID_ERROR:
- pr_warn("%s: unable to parse %s parameter\n",
- class_str, epdrv->pdrv->driver.name);
- /* fall-through */
- case EARLY_PLATFORM_ID_UNSET:
- match = NULL;
- break;
- default:
- match = early_platform_match(epdrv, match_id);
- }
-
- if (match) {
- /*
- * Set up a sensible init_name to enable
- * dev_name() and others to be used before the
- * rest of the driver core is initialized.
- */
- if (!match->dev.init_name && slab_is_available()) {
- if (match->id != -1)
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s.%d",
- match->name,
- match->id);
- else
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s",
- match->name);
-
- if (!match->dev.init_name)
- return -ENOMEM;
- }
-
- if (epdrv->pdrv->probe(match))
- pr_warn("%s: unable to probe %s early.\n",
- class_str, match->name);
- else
- n++;
- }
-
- if (n >= nr_probe)
- break;
- }
-
- if (left)
- return n;
- else
- return -ENODEV;
-}
-
-/**
- * early_platform_driver_probe - probe a class of registered drivers
- * @class_str: string to identify early platform driver class
- * @nr_probe: number of platform devices to successfully probe before exiting
- * @user_only: only probe user specified early platform devices
- *
- * Used by architecture code to probe registered early platform drivers
- * within a certain class. For probe to happen a registered early platform
- * device matching a registered early platform driver is needed.
- */
-int __init early_platform_driver_probe(char *class_str,
- int nr_probe,
- int user_only)
-{
- int k, n, i;
-
- n = 0;
- for (i = -2; n < nr_probe; i++) {
- k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
-
- if (k < 0)
- break;
-
- n += k;
-
- if (user_only)
- break;
- }
-
- return n;
-}
-
-/**
- * early_platform_cleanup - clean up early platform code
- */
-void __init early_platform_cleanup(void)
-{
- struct platform_device *pd, *pd2;
-
- /* clean up the devres list used to chain devices */
- list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
- dev.devres_head) {
- list_del(&pd->dev.devres_head);
- memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
- }
-}
-
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 8db98a1f83dc..bbddb267c2e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -188,6 +188,26 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_start - Start the device through its PM domain.
+ * @dev: Device to start.
+ *
+ * This function should typically be called during probe by a subsystem/driver,
+ * when it needs to start its device from the PM domain's perspective. Note
+ * that, it's assumed that the PM domain is already powered on when this
+ * function is called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_domain_start(struct device *dev)
+{
+ if (dev->pm_domain && dev->pm_domain->start)
+ return dev->pm_domain->start(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_start);
+
+/**
* dev_pm_domain_set - Set PM domain of a device.
* @dev: Device whose PM domain is to be set.
* @pd: PM domain to be set, or NULL.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cc85e87eaf05..8e5725b11ee8 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -634,6 +634,13 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
return ret;
}
+static int genpd_dev_pm_start(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+
+ return genpd_start_dev(genpd, dev);
+}
+
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -922,24 +929,6 @@ static int __init genpd_power_off_unused(void)
}
late_initcall(genpd_power_off_unused);
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-
-static bool genpd_present(const struct generic_pm_domain *genpd)
-{
- const struct generic_pm_domain *gpd;
-
- if (IS_ERR_OR_NULL(genpd))
- return false;
-
- list_for_each_entry(gpd, &gpd_list, gpd_list_node)
- if (gpd == genpd)
- return true;
-
- return false;
-}
-
-#endif
-
#ifdef CONFIG_PM_SLEEP
/**
@@ -1354,8 +1343,8 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
{
struct generic_pm_domain *genpd;
- genpd = dev_to_genpd(dev);
- if (!genpd_present(genpd))
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
return;
if (suspend) {
@@ -1805,6 +1794,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
genpd->domain.ops.restore_noirq = genpd_restore_noirq;
genpd->domain.ops.complete = genpd_complete;
+ genpd->domain.start = genpd_dev_pm_start;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -2020,6 +2010,16 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
return 0;
}
+static bool genpd_present(const struct generic_pm_domain *genpd)
+{
+ const struct generic_pm_domain *gpd;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+ if (gpd == genpd)
+ return true;
+ return false;
+}
+
/**
* of_genpd_add_provider_simple() - Register a simple PM domain provider
* @np: Device node pointer associated with the PM domain provider.
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 39a06a0cfdaa..444f5c169a0b 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -117,6 +117,13 @@ static inline bool device_pm_initialized(struct device *dev)
return dev->power.in_dpm_list;
}
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -141,6 +148,11 @@ static inline bool device_pm_initialized(struct device *dev)
return device_is_registered(dev);
}
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
@@ -149,21 +161,3 @@ static inline void device_pm_init(struct device *dev)
device_pm_sleep_init(dev);
pm_runtime_init(dev);
}
-
-#ifdef CONFIG_PM_SLEEP
-
-/* drivers/base/power/wakeup_stats.c */
-extern int wakeup_source_sysfs_add(struct device *parent,
- struct wakeup_source *ws);
-extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
-
-extern int pm_wakeup_source_sysfs_add(struct device *parent);
-
-#else /* !CONFIG_PM_SLEEP */
-
-static inline int pm_wakeup_source_sysfs_add(struct device *parent)
-{
- return 0;
-}
-
-#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5ce77d1ef9fc..8e021082dba8 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -272,7 +272,7 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
@@ -299,7 +299,7 @@ void dev_pm_disable_wake_irq_check(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 81bd01ed4042..511f6d7acdfe 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -557,6 +557,42 @@ int device_add_properties(struct device *dev,
EXPORT_SYMBOL_GPL(device_add_properties);
/**
+ * fwnode_get_name - Return the name of a node
+ * @fwnode: The firmware node
+ *
+ * Returns a pointer to the node name.
+ */
+const char *fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name);
+}
+
+/**
+ * fwnode_get_name_prefix - Return the prefix of node for printing purposes
+ * @fwnode: The firmware node
+ *
+ * Returns the prefix of a node, intended to be printed right before the node.
+ * The prefix works also as a separator between the nodes.
+ */
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name_prefix);
+}
+
+/**
+ * fwnode_get_parent - Return parent firwmare node
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * Return parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
+ */
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_parent);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_parent);
+
+/**
* fwnode_get_next_parent - Iterate to the node's parent
* @fwnode: Firmware whose parent is retrieved
*
@@ -578,17 +614,50 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_parent - Return parent firwmare node
- * @fwnode: Firmware whose parent is retrieved
+ * fwnode_count_parents - Return the number of parents a node has
+ * @fwnode: The node the parents of which are to be counted
*
- * Return parent firmware node of the given node if possible or %NULL if no
- * parent was available.
+ * Returns the number of parents a node has.
*/
-struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
- return fwnode_call_ptr_op(fwnode, get_parent);
+ struct fwnode_handle *__fwnode;
+ unsigned int count;
+
+ __fwnode = fwnode_get_parent(fwnode);
+
+ for (count = 0; __fwnode; count++)
+ __fwnode = fwnode_get_next_parent(__fwnode);
+
+ return count;
}
-EXPORT_SYMBOL_GPL(fwnode_get_parent);
+EXPORT_SYMBOL_GPL(fwnode_count_parents);
+
+/**
+ * fwnode_get_nth_parent - Return an nth parent of a node
+ * @fwnode: The node the parent of which is requested
+ * @depth: Distance of the parent from the node
+ *
+ * Returns the nth parent of a node. If there is no parent at the requested
+ * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
+ * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
+ *
+ * The caller is responsible for calling fwnode_handle_put() for the returned
+ * node.
+ */
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
+ unsigned int depth)
+{
+ unsigned int i;
+
+ fwnode_handle_get(fwnode);
+
+ for (i = 0; i < depth && fwnode; i++)
+ fwnode = fwnode_get_next_parent(fwnode);
+
+ return fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
* fwnode_get_next_child_node - Return the next child node handle for a node
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 3a7d30b8c3ac..1fbaaad71ca5 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -215,8 +215,6 @@ struct regmap *__regmap_init_w1(struct device *w1_dev,
return __regmap_init(w1_dev, bus, w1_dev, config,
lock_key, lock_name);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(__regmap_init_w1);
@@ -233,8 +231,6 @@ struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
return __devm_regmap_init(w1_dev, bus, w1_dev, config,
lock_key, lock_name);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 7c0c5ca5953d..4af11a423475 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -104,15 +104,12 @@ static const struct attribute_group soc_attr_group = {
.is_visible = soc_attribute_mode,
};
-static const struct attribute_group *soc_attr_groups[] = {
- &soc_attr_group,
- NULL,
-};
-
static void soc_release(struct device *dev)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+ kfree(soc_dev->dev.groups);
kfree(soc_dev);
}
@@ -121,6 +118,7 @@ static struct soc_device_attribute *early_soc_dev_attr;
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
{
struct soc_device *soc_dev;
+ const struct attribute_group **soc_attr_groups;
int ret;
if (!soc_bus_type.p) {
@@ -136,10 +134,18 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
goto out1;
}
+ soc_attr_groups = kcalloc(3, sizeof(*soc_attr_groups), GFP_KERNEL);
+ if (!soc_attr_groups) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+ soc_attr_groups[0] = &soc_attr_group;
+ soc_attr_groups[1] = soc_dev_attr->custom_attr_group;
+
/* Fetch a unique (reclaimable) SOC ID. */
ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
- goto out2;
+ goto out3;
soc_dev->soc_dev_num = ret;
soc_dev->attr = soc_dev_attr;
@@ -150,15 +156,15 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
ret = device_register(&soc_dev->dev);
- if (ret)
- goto out3;
+ if (ret) {
+ put_device(&soc_dev->dev);
+ return ERR_PTR(ret);
+ }
return soc_dev;
out3:
- ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
- put_device(&soc_dev->dev);
- soc_dev = NULL;
+ kfree(soc_attr_groups);
out2:
kfree(soc_dev);
out1:
@@ -169,8 +175,6 @@ EXPORT_SYMBOL_GPL(soc_device_register);
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
- ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
-
device_unregister(&soc_dev->dev);
early_soc_dev_attr = NULL;
}
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index a1f3f0994f9f..d8d0dc0ca5ac 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -71,9 +71,9 @@ software_node_to_swnode(const struct software_node *node)
return swnode;
}
-const struct software_node *to_software_node(struct fwnode_handle *fwnode)
+const struct software_node *to_software_node(const struct fwnode_handle *fwnode)
{
- struct swnode *swnode = to_swnode(fwnode);
+ const struct swnode *swnode = to_swnode(fwnode);
return swnode ? swnode->node : NULL;
}
@@ -103,71 +103,15 @@ property_entry_get(const struct property_entry *prop, const char *name)
return NULL;
}
-static void
-property_set_pointer(struct property_entry *prop, const void *pointer)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- prop->pointer.u8_data = pointer;
- else
- prop->value.u8_data = *((u8 *)pointer);
- break;
- case DEV_PROP_U16:
- if (prop->is_array)
- prop->pointer.u16_data = pointer;
- else
- prop->value.u16_data = *((u16 *)pointer);
- break;
- case DEV_PROP_U32:
- if (prop->is_array)
- prop->pointer.u32_data = pointer;
- else
- prop->value.u32_data = *((u32 *)pointer);
- break;
- case DEV_PROP_U64:
- if (prop->is_array)
- prop->pointer.u64_data = pointer;
- else
- prop->value.u64_data = *((u64 *)pointer);
- break;
- case DEV_PROP_STRING:
- if (prop->is_array)
- prop->pointer.str = pointer;
- else
- prop->value.str = pointer;
- break;
- default:
- break;
- }
-}
-
static const void *property_get_pointer(const struct property_entry *prop)
{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- return prop->pointer.u8_data;
- return &prop->value.u8_data;
- case DEV_PROP_U16:
- if (prop->is_array)
- return prop->pointer.u16_data;
- return &prop->value.u16_data;
- case DEV_PROP_U32:
- if (prop->is_array)
- return prop->pointer.u32_data;
- return &prop->value.u32_data;
- case DEV_PROP_U64:
- if (prop->is_array)
- return prop->pointer.u64_data;
- return &prop->value.u64_data;
- case DEV_PROP_STRING:
- if (prop->is_array)
- return prop->pointer.str;
- return &prop->value.str;
- default:
+ if (!prop->length)
return NULL;
- }
+
+ if (prop->is_array)
+ return prop->pointer;
+
+ return &prop->value;
}
static const void *property_entry_find(const struct property_entry *props,
@@ -187,66 +131,6 @@ static const void *property_entry_find(const struct property_entry *props,
return pointer;
}
-static int property_entry_read_u8_array(const struct property_entry *props,
- const char *propname,
- u8 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u16_array(const struct property_entry *props,
- const char *propname,
- u16 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u32_array(const struct property_entry *props,
- const char *propname,
- u32 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u64_array(const struct property_entry *props,
- const char *propname,
- u64 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
static int
property_entry_count_elems_of_size(const struct property_entry *props,
const char *propname, size_t length)
@@ -265,49 +149,45 @@ static int property_entry_read_int_array(const struct property_entry *props,
unsigned int elem_size, void *val,
size_t nval)
{
+ const void *pointer;
+ size_t length;
+
if (!val)
return property_entry_count_elems_of_size(props, name,
elem_size);
- switch (elem_size) {
- case sizeof(u8):
- return property_entry_read_u8_array(props, name, val, nval);
- case sizeof(u16):
- return property_entry_read_u16_array(props, name, val, nval);
- case sizeof(u32):
- return property_entry_read_u32_array(props, name, val, nval);
- case sizeof(u64):
- return property_entry_read_u64_array(props, name, val, nval);
- }
- return -ENXIO;
+ if (!is_power_of_2(elem_size) || elem_size > sizeof(u64))
+ return -ENXIO;
+
+ length = nval * elem_size;
+
+ pointer = property_entry_find(props, name, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(val, pointer, length);
+ return 0;
}
static int property_entry_read_string_array(const struct property_entry *props,
const char *propname,
const char **strings, size_t nval)
{
- const struct property_entry *prop;
const void *pointer;
- size_t array_len, length;
+ size_t length;
+ int array_len;
/* Find out the array length. */
- prop = property_entry_get(props, propname);
- if (!prop)
- return -EINVAL;
-
- if (prop->is_array)
- /* Find the length of an array. */
- array_len = property_entry_count_elems_of_size(props, propname,
- sizeof(const char *));
- else
- /* The array length for a non-array string property is 1. */
- array_len = 1;
+ array_len = property_entry_count_elems_of_size(props, propname,
+ sizeof(const char *));
+ if (array_len < 0)
+ return array_len;
/* Return how many there are if strings is NULL. */
if (!strings)
return array_len;
- array_len = min(nval, array_len);
+ array_len = min_t(size_t, nval, array_len);
length = array_len * sizeof(*strings);
pointer = property_entry_find(props, propname, length);
@@ -322,13 +202,15 @@ static int property_entry_read_string_array(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
const void *pointer = property_get_pointer(p);
+ const char * const *src_str;
size_t i, nval;
if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer.str) {
+ if (p->type == DEV_PROP_STRING && p->pointer) {
+ src_str = p->pointer;
nval = p->length / sizeof(const char *);
for (i = 0; i < nval; i++)
- kfree(p->pointer.str[i]);
+ kfree(src_str[i]);
}
kfree(pointer);
} else if (p->type == DEV_PROP_STRING) {
@@ -337,29 +219,29 @@ static void property_entry_free_data(const struct property_entry *p)
kfree(p->name);
}
-static int property_copy_string_array(struct property_entry *dst,
- const struct property_entry *src)
+static const char * const *
+property_copy_string_array(const struct property_entry *src)
{
const char **d;
+ const char * const *src_str = src->pointer;
size_t nval = src->length / sizeof(*d);
int i;
d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
if (!d)
- return -ENOMEM;
+ return NULL;
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
- if (!d[i] && src->pointer.str[i]) {
+ d[i] = kstrdup(src_str[i], GFP_KERNEL);
+ if (!d[i] && src_str[i]) {
while (--i >= 0)
kfree(d[i]);
kfree(d);
- return -ENOMEM;
+ return NULL;
}
}
- dst->pointer.str = d;
- return 0;
+ return d;
}
static int property_entry_copy_data(struct property_entry *dst,
@@ -367,36 +249,35 @@ static int property_entry_copy_data(struct property_entry *dst,
{
const void *pointer = property_get_pointer(src);
const void *new;
- int error;
if (src->is_array) {
if (!src->length)
return -ENODATA;
if (src->type == DEV_PROP_STRING) {
- error = property_copy_string_array(dst, src);
- if (error)
- return error;
- new = dst->pointer.str;
+ new = property_copy_string_array(src);
+ if (!new)
+ return -ENOMEM;
} else {
new = kmemdup(pointer, src->length, GFP_KERNEL);
if (!new)
return -ENOMEM;
}
+
+ dst->is_array = true;
+ dst->pointer = new;
} else if (src->type == DEV_PROP_STRING) {
new = kstrdup(src->value.str, GFP_KERNEL);
if (!new && src->value.str)
return -ENOMEM;
+
+ dst->value.str = new;
} else {
- new = pointer;
+ dst->value = src->value;
}
dst->length = src->length;
- dst->is_array = src->is_array;
dst->type = src->type;
-
- property_set_pointer(dst, new);
-
dst->name = kstrdup(src->name, GFP_KERNEL);
if (!dst->name)
goto out_free_data;
@@ -515,12 +396,47 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
propname, val, nval);
}
+static const char *
+software_node_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct swnode *swnode = to_swnode(fwnode);
+
+ if (!swnode)
+ return "(null)";
+
+ return kobject_name(&swnode->kobj);
+}
+
+static const char *
+software_node_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ const char *prefix;
+
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Figure out the prefix from the parents. */
+ while (is_software_node(parent))
+ parent = fwnode_get_next_parent(parent);
+
+ prefix = fwnode_get_name_prefix(parent);
+ fwnode_handle_put(parent);
+
+ /* Guess something if prefix was NULL. */
+ return prefix ?: "/";
+}
+
static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
- return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
+ if (!swnode || !swnode->parent)
+ return NULL;
+
+ return fwnode_handle_get(&swnode->parent->fwnode);
}
static struct fwnode_handle *
@@ -612,6 +528,8 @@ static const struct fwnode_operations software_node_ops = {
.property_present = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
+ .get_name = software_node_get_name,
+ .get_name_prefix = software_node_get_name_prefix,
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index f4161064365c..3056f81efca4 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -233,8 +233,10 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
- /* enable 12 mA drive strenth for 4313 and set chipControl
- register bit 1 */
+ /*
+ * enable 12 mA drive strenth for 4313 and set chipControl
+ * register bit 1
+ */
bcma_chipco_chipctl_maskset(cc, 0,
~BCMA_CCTRL_4313_12MA_LED_DRIVE,
BCMA_CCTRL_4313_12MA_LED_DRIVE);
@@ -246,8 +248,10 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
break;
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43421:
- /* enable 12 mA drive strenth for 43224 and set chipControl
- register bit 15 */
+ /*
+ * enable 12 mA drive strenth for 43224 and set chipControl
+ * register bit 15
+ */
if (bus->chipinfo.rev == 0) {
bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
~BCMA_CCTRL_43224_GPIO_TOGGLE,
@@ -500,8 +504,10 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
case BCMA_CHIP_ID_BCM53572:
/* 5357[ab]0, 43236[ab]0, and 6362b0 */
- /* BCM5357 needs to touch PLL1_PLLCTL[02],
- so offset PLL0_PLLCTL[02] by 6 */
+ /*
+ * BCM5357 needs to touch PLL1_PLLCTL[02],
+ * so offset PLL0_PLLCTL[02] by 6
+ */
phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
@@ -619,8 +625,10 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM43428:
/* LCNXN */
- /* PLL Settings for spur avoidance on/off mode,
- no on2 support for 43228A0 */
+ /*
+ * PLL Settings for spur avoidance on/off mode,
+ * no on2 support for 43228A0
+ */
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x01100014);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 5d52a2d32155..de2f94d0103a 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -268,19 +268,18 @@ static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
/* some more paranoia, if the request was over-determined */
if (adm_ctx->device && adm_ctx->resource &&
adm_ctx->device->resource != adm_ctx->resource) {
- pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
- adm_ctx->minor, adm_ctx->resource->name,
- adm_ctx->device->resource->name);
+ pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
+ adm_ctx->minor, adm_ctx->resource->name,
+ adm_ctx->device->resource->name);
drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
return ERR_INVALID_REQUEST;
}
if (adm_ctx->device &&
adm_ctx->volume != VOLUME_UNSPECIFIED &&
adm_ctx->volume != adm_ctx->device->vnr) {
- pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
- adm_ctx->minor, adm_ctx->volume,
- adm_ctx->device->vnr,
- adm_ctx->device->resource->name);
+ pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
+ adm_ctx->minor, adm_ctx->volume,
+ adm_ctx->device->vnr, adm_ctx->device->resource->name);
drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
return ERR_INVALID_REQUEST;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f6f77eaa7217..739b372a5112 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -417,18 +417,20 @@ out_free_page:
return ret;
}
-static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
+static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
+ int mode)
{
/*
- * We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
+ * We use fallocate to manipulate the space mappings used by the image
+ * a.k.a. discard/zerorange. However we do not support this if
+ * encryption is enabled, because it may give an attacker useful
+ * information.
*/
struct file *file = lo->lo_backing_file;
- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
int ret;
+ mode |= FALLOC_FL_KEEP_SIZE;
+
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
ret = -EOPNOTSUPP;
goto out;
@@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
- case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
- return lo_discard(lo, rq, pos);
+ /*
+ * If the caller doesn't want deallocation, call zeroout to
+ * write zeroes the range. Otherwise, punch them out.
+ */
+ return lo_fallocate(lo, rq, pos,
+ (rq->cmd_flags & REQ_NOUNMAP) ?
+ FALLOC_FL_ZERO_RANGE :
+ FALLOC_FL_PUNCH_HOLE);
+ case REQ_OP_DISCARD:
+ return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (lo->transfer)
return lo_write_transfer(lo, rq, pos);
@@ -630,7 +640,9 @@ static void loop_reread_partitions(struct loop_device *lo,
{
int rc;
- rc = blkdev_reread_part(bdev);
+ mutex_lock(&bdev->bd_mutex);
+ rc = bdev_disk_changed(bdev, false);
+ mutex_unlock(&bdev->bd_mutex);
if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc);
@@ -1154,10 +1166,11 @@ out_unlock:
* must be at least one and it can only become zero when the
* current holder is released.
*/
- if (release)
- err = __blkdev_reread_part(bdev);
- else
- err = blkdev_reread_part(bdev);
+ if (!release)
+ mutex_lock(&bdev->bd_mutex);
+ err = bdev_disk_changed(bdev, false);
+ if (!release)
+ mutex_unlock(&bdev->bd_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo_number, err);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 964f78cfffa0..f6bafa9a68b9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -129,7 +129,7 @@ struct mtip_compat_ide_task_request_s {
/*
* This function check_for_surprise_removal is called
* while card is removed from the system and it will
- * read the vendor id from the configration space
+ * read the vendor id from the configuration space
*
* @pdev Pointer to the pci_dev structure.
*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a94ee45440b3..57532465fb83 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -993,6 +993,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
+ sockfd_put(sock);
return NULL;
}
@@ -1031,14 +1032,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
sockfd_put(sock);
return -ENOMEM;
}
+
+ config->socks = socks;
+
nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
if (!nsock) {
sockfd_put(sock);
return -ENOMEM;
}
- config->socks = socks;
-
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index a235c45e22a7..bc837862b767 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -91,11 +91,13 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+int null_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors);
+size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector, unsigned int len);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
@@ -103,17 +105,18 @@ static inline int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
-static inline int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones)
-{
- return -EOPNOTSUPP;
-}
static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}
+static inline size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector,
+ unsigned int len)
+{
+ return len;
+}
+#define null_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 0e7da5015ccd..795fda576824 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -227,7 +227,7 @@ static ssize_t nullb_device_uint_attr_store(unsigned int *val,
int result;
result = kstrtouint(page, 0, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -241,7 +241,7 @@ static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
unsigned long tmp;
result = kstrtoul(page, 0, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -255,7 +255,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
int result;
result = kstrtobool(page, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -263,7 +263,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
}
/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
-#define NULLB_DEVICE_ATTR(NAME, TYPE) \
+#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
static ssize_t \
nullb_device_##NAME##_show(struct config_item *item, char *page) \
{ \
@@ -274,31 +274,57 @@ static ssize_t \
nullb_device_##NAME##_store(struct config_item *item, const char *page, \
size_t count) \
{ \
- if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
- return -EBUSY; \
- return nullb_device_##TYPE##_attr_store( \
- &to_nullb_device(item)->NAME, page, count); \
+ int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY; \
+ struct nullb_device *dev = to_nullb_device(item); \
+ TYPE new_value; \
+ int ret; \
+ \
+ ret = nullb_device_##TYPE##_attr_store(&new_value, page, count); \
+ if (ret < 0) \
+ return ret; \
+ if (apply_fn) \
+ ret = apply_fn(dev, new_value); \
+ else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
+ ret = -EBUSY; \
+ if (ret < 0) \
+ return ret; \
+ dev->NAME = new_value; \
+ return count; \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
-NULLB_DEVICE_ATTR(size, ulong);
-NULLB_DEVICE_ATTR(completion_nsec, ulong);
-NULLB_DEVICE_ATTR(submit_queues, uint);
-NULLB_DEVICE_ATTR(home_node, uint);
-NULLB_DEVICE_ATTR(queue_mode, uint);
-NULLB_DEVICE_ATTR(blocksize, uint);
-NULLB_DEVICE_ATTR(irqmode, uint);
-NULLB_DEVICE_ATTR(hw_queue_depth, uint);
-NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(blocking, bool);
-NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
-NULLB_DEVICE_ATTR(memory_backed, bool);
-NULLB_DEVICE_ATTR(discard, bool);
-NULLB_DEVICE_ATTR(mbps, uint);
-NULLB_DEVICE_ATTR(cache_size, ulong);
-NULLB_DEVICE_ATTR(zoned, bool);
-NULLB_DEVICE_ATTR(zone_size, ulong);
-NULLB_DEVICE_ATTR(zone_nr_conv, uint);
+static int nullb_apply_submit_queues(struct nullb_device *dev,
+ unsigned int submit_queues)
+{
+ struct nullb *nullb = dev->nullb;
+ struct blk_mq_tag_set *set;
+
+ if (!nullb)
+ return 0;
+
+ set = nullb->tag_set;
+ blk_mq_update_nr_hw_queues(set, submit_queues);
+ return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
+}
+
+NULLB_DEVICE_ATTR(size, ulong, NULL);
+NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
+NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
+NULLB_DEVICE_ATTR(home_node, uint, NULL);
+NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
+NULLB_DEVICE_ATTR(blocksize, uint, NULL);
+NULLB_DEVICE_ATTR(irqmode, uint, NULL);
+NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
+NULLB_DEVICE_ATTR(index, uint, NULL);
+NULLB_DEVICE_ATTR(blocking, bool, NULL);
+NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
+NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
+NULLB_DEVICE_ATTR(discard, bool, NULL);
+NULLB_DEVICE_ATTR(mbps, uint, NULL);
+NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
+NULLB_DEVICE_ATTR(zoned, bool, NULL);
+NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
+NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -467,7 +493,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_nr_conv\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -996,6 +1022,16 @@ next:
return 0;
}
+static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off)
+{
+ void *dst;
+
+ dst = kmap_atomic(page);
+ memset(dst + off, 0xFF, len);
+ kunmap_atomic(dst);
+}
+
static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
{
size_t temp;
@@ -1036,10 +1072,24 @@ static int null_transfer(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off, bool is_write, sector_t sector,
bool is_fua)
{
+ struct nullb_device *dev = nullb->dev;
+ unsigned int valid_len = len;
int err = 0;
if (!is_write) {
- err = copy_from_nullb(nullb, page, off, sector, len);
+ if (dev->zoned)
+ valid_len = null_zone_valid_read_len(nullb,
+ sector, len);
+
+ if (valid_len) {
+ err = copy_from_nullb(nullb, page, off,
+ sector, valid_len);
+ off += valid_len;
+ len -= valid_len;
+ }
+
+ if (len)
+ nullb_fill_pattern(nullb, page, len, off);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
@@ -1418,20 +1468,9 @@ static void null_config_discard(struct nullb *nullb)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
-static int null_open(struct block_device *bdev, fmode_t mode)
-{
- return 0;
-}
-
-static void null_release(struct gendisk *disk, fmode_t mode)
-{
-}
-
-static const struct block_device_operations null_fops = {
- .owner = THIS_MODULE,
- .open = null_open,
- .release = null_release,
- .report_zones = null_zone_report,
+static const struct block_device_operations null_ops = {
+ .owner = THIS_MODULE,
+ .report_zones = null_report_zones,
};
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
@@ -1532,7 +1571,7 @@ static int null_gendisk_register(struct nullb *nullb)
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
- disk->fops = &null_fops;
+ disk->fops = &null_ops;
disk->private_data = nullb;
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 3d7fdea872f8..d4d88b581822 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -66,22 +66,53 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
-int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+int null_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int zno, nrz = 0;
-
- zno = null_zone_no(dev, sector);
- if (zno < dev->nr_zones) {
- nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
- memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
+ unsigned int first_zone, i;
+ struct blk_zone zone;
+ int error;
+
+ first_zone = null_zone_no(dev, sector);
+ if (first_zone >= dev->nr_zones)
+ return 0;
+
+ nr_zones = min(nr_zones, dev->nr_zones - first_zone);
+ for (i = 0; i < nr_zones; i++) {
+ /*
+ * Stacked DM target drivers will remap the zone information by
+ * modifying the zone information passed to the report callback.
+ * So use a local copy to avoid corruption of the device zone
+ * array.
+ */
+ memcpy(&zone, &dev->zones[first_zone + i],
+ sizeof(struct blk_zone));
+ error = cb(&zone, i, data);
+ if (error)
+ return error;
}
- *nr_zones = nrz;
+ return nr_zones;
+}
- return 0;
+size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector, unsigned int len)
+{
+ struct nullb_device *dev = nullb->dev;
+ struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
+ unsigned int nr_sectors = len >> SECTOR_SHIFT;
+
+ /* Read must be below the write pointer position */
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
+ sector + nr_sectors <= zone->wp)
+ return len;
+
+ if (sector > zone->wp)
+ return 0;
+
+ return (zone->wp - sector) << SECTOR_SHIFT;
}
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
@@ -118,14 +149,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_OK;
}
-static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
+static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+ sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- unsigned int zno = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zno];
+ struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
size_t i;
- switch (req_op(cmd->rq)) {
+ switch (op) {
case REQ_OP_ZONE_RESET_ALL:
for (i = 0; i < dev->nr_zones; i++) {
if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
@@ -141,6 +172,29 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
break;
+ case REQ_OP_ZONE_OPEN:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case REQ_OP_ZONE_FINISH:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zone->len;
+ break;
default:
return BLK_STS_NOTSUPP;
}
@@ -155,7 +209,10 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
return null_zone_write(cmd, sector, nr_sectors);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
- return null_zone_reset(cmd, sector);
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return null_zone_mgmt(cmd, op, sector);
default:
return BLK_STS_OK;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 39136675dae5..13527a0b4e44 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2087,7 +2087,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_data *osd_data;
u64 objno;
- u8 state, new_state, current_state;
+ u8 state, new_state, uninitialized_var(current_state);
bool has_current_state;
void *p;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 76b73ddf8fd7..10f6368117d8 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
cancel_work_sync(&card->event_work);
+ destroy_workqueue(card->event_wq);
rsxx_destroy_dev(card);
rsxx_dma_destroy(card);
+ destroy_workqueue(card->creg_ctrl.creg_wq);
spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index aae665a3a254..f7aa2dc1ff85 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -380,17 +380,6 @@ config BT_ATH3K
Say Y here to compile support for "Atheros firmware download driver"
into the kernel or say M to compile it as module (ath3k).
-config BT_WILINK
- tristate "Texas Instruments WiLink7 driver"
- depends on TI_ST
- help
- This enables the Bluetooth driver for Texas Instrument's BT/FM/GPS
- combo devices. This makes use of shared transport line discipline
- core driver to communicate with the BT core of the combo chip.
-
- Say Y here to compile support for Texas Instrument's WiLink7 driver
- into the kernel or say M to compile it as module (btwilink).
-
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 34887b9b3a85..1a58a3ae142c 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_BT_INTEL) += btintel.o
obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
-obj-$(CONFIG_BT_WILINK) += btwilink.o
obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o
obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 2d2e6d862068..8e05706fe5d9 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -23,6 +23,7 @@
#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
+#define BDADDR_BCM4334B0 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb0, 0x34, 0x43}})
#define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}})
#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
@@ -74,6 +75,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
!bacmp(&bda->bdaddr, BDADDR_BCM2076B1) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM4334B0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4345C5) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
@@ -326,6 +328,7 @@ struct bcm_subver_table {
static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
+ { 0x410d, "BCM4334B0" }, /* 002.001.013 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
@@ -339,6 +342,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x220e, "BCM20702A1" }, /* 001.002.014 */
{ 0x4217, "BCM4329B1" }, /* 002.002.023 */
{ 0x6106, "BCM4359C0" }, /* 003.001.006 */
+ { 0x4106, "BCM4335A0" }, /* 002.001.006 */
{ }
};
@@ -440,6 +444,12 @@ int btbcm_finalize(struct hci_dev *hdev)
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ /* Some devices ship with the controller default address.
+ * Allow the bootloader to set a valid address through the
+ * device tree.
+ */
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_finalize);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index bb99c8653aab..62e781a18bf0 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -709,6 +709,51 @@ done:
}
EXPORT_SYMBOL_GPL(btintel_download_firmware);
+void btintel_reset_to_bootloader(struct hci_dev *hdev)
+{
+ struct intel_reset params;
+ struct sk_buff *skb;
+
+ /* Send Intel Reset command. This will result in
+ * re-enumeration of BT controller.
+ *
+ * Intel Reset parameter description:
+ * reset_type : 0x00 (Soft reset),
+ * 0x01 (Hard reset)
+ * patch_enable : 0x00 (Do not enable),
+ * 0x01 (Enable)
+ * ddc_reload : 0x00 (Do not reload),
+ * 0x01 (Reload)
+ * boot_option: 0x00 (Current image),
+ * 0x01 (Specified boot address)
+ * boot_param: Boot address
+ *
+ */
+ params.reset_type = 0x01;
+ params.patch_enable = 0x01;
+ params.ddc_reload = 0x01;
+ params.boot_option = 0x00;
+ params.boot_param = cpu_to_le32(0x00000000);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
+ &params, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "FW download error recovery failed (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+ bt_dev_info(hdev, "Intel reset sent to retry FW download");
+ kfree_skb(skb);
+
+ /* Current Intel BT controllers(ThP/JfP) hold the USB reset
+ * lines for 2ms when it receives Intel Reset in bootloader mode.
+ * Whereas, the upcoming Intel BT controllers will hold USB reset
+ * for 150ms. To keep the delay generic, 150ms is chosen here.
+ */
+ msleep(150);
+}
+EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 3d846190f2bf..a69ea8a87b9b 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -87,6 +87,7 @@ int btintel_read_boot_params(struct hci_dev *hdev,
struct intel_boot_params *params);
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param);
+void btintel_reset_to_bootloader(struct hci_dev *hdev);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -181,4 +182,8 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
{
return -EOPNOTSUPP;
}
+
+static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
+{
+}
#endif
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 813338288453..519788c442ca 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -57,6 +57,7 @@ static const struct sdio_device_id btmtksdio_table[] = {
.driver_data = (kernel_ulong_t)&mt7668_data },
{ } /* Terminating entry */
};
+MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_REG_CHLPCR 0x4 /* W1S */
#define C_INT_EN_SET BIT(0)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8cc21ad7cf29..ec69e5dd7bd3 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -14,19 +14,33 @@
#define VERSION "0.1"
-int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type soc_type)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
- struct rome_version *ver;
+ struct qca_btsoc_version *ver;
char cmd;
int err = 0;
+ u8 event_type = HCI_EV_VENDOR;
+ u8 rlen = sizeof(*edl) + sizeof(*ver);
+ u8 rtype = EDL_APP_VER_RES_EVT;
bt_dev_dbg(hdev, "QCA Version Request");
+ /* Unlike other SoC's sending version command response as payload to
+ * VSE event. WCN3991 sends version command response as a payload to
+ * command complete event.
+ */
+ if (soc_type == QCA_WCN3991) {
+ event_type = 0;
+ rlen += 1;
+ rtype = EDL_PATCH_VER_REQ_CMD;
+ }
+
cmd = EDL_PATCH_VER_REQ_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
- &cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ &cmd, event_type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Reading QCA version information failed (%d)",
@@ -34,7 +48,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
return err;
}
- if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+ if (skb->len != rlen) {
bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
err = -EILSEQ;
goto out;
@@ -48,18 +62,21 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
}
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
- edl->rtype != EDL_APP_VER_RES_EVT) {
+ edl->rtype != rtype) {
bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
edl->rtype);
err = -EIO;
goto out;
}
- ver = (struct rome_version *)(edl->data);
+ if (soc_type == QCA_WCN3991)
+ memmove(&edl->data, &edl->data[1], sizeof(*ver));
+
+ ver = (struct qca_btsoc_version *)(edl->data);
BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
- BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+ BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rom_ver));
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
/* QCA chipset version can be decided by patch and SoC
@@ -67,7 +84,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
* and lower 2 bytes from patch will be used.
*/
*soc_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
if (*soc_version == 0)
err = -EILSEQ;
@@ -121,7 +138,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
-static void qca_tlv_check_data(struct rome_config *config,
+static void qca_tlv_check_data(struct qca_fw_config *config,
const struct firmware *fw)
{
const u8 *data;
@@ -140,8 +157,8 @@ static void qca_tlv_check_data(struct rome_config *config,
BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
BT_DBG("Length\t\t : %d bytes", length);
- config->dnld_mode = ROME_SKIP_EVT_NONE;
- config->dnld_type = ROME_SKIP_EVT_NONE;
+ config->dnld_mode = QCA_SKIP_EVT_NONE;
+ config->dnld_type = QCA_SKIP_EVT_NONE;
switch (config->type) {
case TLV_TYPE_PATCH:
@@ -223,31 +240,45 @@ static void qca_tlv_check_data(struct rome_config *config,
}
static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
- const u8 *data, enum rome_tlv_dnld_mode mode)
+ const u8 *data, enum qca_tlv_dnld_mode mode,
+ enum qca_btsoc_type soc_type)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
struct tlv_seg_resp *tlv_resp;
u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
int err = 0;
+ u8 event_type = HCI_EV_VENDOR;
+ u8 rlen = (sizeof(*edl) + sizeof(*tlv_resp));
+ u8 rtype = EDL_TVL_DNLD_RES_EVT;
cmd[0] = EDL_PATCH_TLV_REQ_CMD;
cmd[1] = seg_size;
memcpy(cmd + 2, data, seg_size);
- if (mode == ROME_SKIP_EVT_VSE_CC || mode == ROME_SKIP_EVT_VSE)
+ if (mode == QCA_SKIP_EVT_VSE_CC || mode == QCA_SKIP_EVT_VSE)
return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2,
cmd);
+ /* Unlike other SoC's sending version command response as payload to
+ * VSE event. WCN3991 sends version command response as a payload to
+ * command complete event.
+ */
+ if (soc_type == QCA_WCN3991) {
+ event_type = 0;
+ rlen = sizeof(*edl);
+ rtype = EDL_PATCH_TLV_REQ_CMD;
+ }
+
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ event_type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
return err;
}
- if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+ if (skb->len != rlen) {
bt_dev_err(hdev, "QCA TLV response size mismatch");
err = -EILSEQ;
goto out;
@@ -260,13 +291,19 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
goto out;
}
- tlv_resp = (struct tlv_seg_resp *)(edl->data);
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) {
+ bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x",
+ edl->cresp, edl->rtype);
+ err = -EIO;
+ }
- if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
- edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+ if (soc_type == QCA_WCN3991)
+ goto out;
+
+ tlv_resp = (struct tlv_seg_resp *)(edl->data);
+ if (tlv_resp->result) {
bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
edl->cresp, edl->rtype, tlv_resp->result);
- err = -EIO;
}
out:
@@ -301,7 +338,8 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
}
static int qca_download_firmware(struct hci_dev *hdev,
- struct rome_config *config)
+ struct qca_fw_config *config,
+ enum qca_btsoc_type soc_type)
{
const struct firmware *fw;
const u8 *segment;
@@ -328,10 +366,10 @@ static int qca_download_firmware(struct hci_dev *hdev,
remain -= segsize;
/* The last segment is always acked regardless download mode */
if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
- config->dnld_mode = ROME_SKIP_EVT_NONE;
+ config->dnld_mode = QCA_SKIP_EVT_NONE;
ret = qca_tlv_send_segment(hdev, segsize, segment,
- config->dnld_mode);
+ config->dnld_mode, soc_type);
if (ret)
goto out;
@@ -344,8 +382,8 @@ static int qca_download_firmware(struct hci_dev *hdev,
* decrease the BT in initialization time. Here we will inject a command
* complete event to avoid a command timeout error message.
*/
- if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
- config->dnld_type == ROME_SKIP_EVT_VSE)
+ if (config->dnld_type == QCA_SKIP_EVT_VSE_CC ||
+ config->dnld_type == QCA_SKIP_EVT_VSE)
ret = qca_inject_cmd_complete_event(hdev);
out:
@@ -382,7 +420,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver,
const char *firmware_name)
{
- struct rome_config config;
+ struct qca_fw_config config;
int err;
u8 rom_ver = 0;
@@ -405,7 +443,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/rampatch_%08x.bin", soc_ver);
}
- err = qca_download_firmware(hdev, &config);
+ err = qca_download_firmware(hdev, &config, soc_type);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err;
@@ -426,7 +464,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
- err = qca_download_firmware(hdev, &config);
+ err = qca_download_firmware(hdev, &config, soc_type);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 69c5315a65fd..f5795b1a3779 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -56,24 +56,24 @@ enum qca_baudrate {
QCA_BAUDRATE_RESERVED
};
-enum rome_tlv_dnld_mode {
- ROME_SKIP_EVT_NONE,
- ROME_SKIP_EVT_VSE,
- ROME_SKIP_EVT_CC,
- ROME_SKIP_EVT_VSE_CC
+enum qca_tlv_dnld_mode {
+ QCA_SKIP_EVT_NONE,
+ QCA_SKIP_EVT_VSE,
+ QCA_SKIP_EVT_CC,
+ QCA_SKIP_EVT_VSE_CC
};
-enum rome_tlv_type {
+enum qca_tlv_type {
TLV_TYPE_PATCH = 1,
TLV_TYPE_NVM
};
-struct rome_config {
+struct qca_fw_config {
u8 type;
char fwname[64];
uint8_t user_baud_rate;
- enum rome_tlv_dnld_mode dnld_mode;
- enum rome_tlv_dnld_mode dnld_type;
+ enum qca_tlv_dnld_mode dnld_mode;
+ enum qca_tlv_dnld_mode dnld_type;
};
struct edl_event_hdr {
@@ -82,10 +82,10 @@ struct edl_event_hdr {
__u8 data[0];
} __packed;
-struct rome_version {
+struct qca_btsoc_version {
__le32 product_id;
__le16 patch_ver;
- __le16 rome_ver;
+ __le16 rom_ver;
__le32 soc_id;
} __packed;
@@ -125,6 +125,7 @@ enum qca_btsoc_type {
QCA_AR3002,
QCA_ROME,
QCA_WCN3990,
+ QCA_WCN3991,
QCA_WCN3998,
};
@@ -134,12 +135,14 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver,
const char *firmware_name);
-int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
- return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3991 ||
+ soc_type == QCA_WCN3998;
}
#else
@@ -155,7 +158,8 @@ static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return -EOPNOTSUPP;
}
-static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
+static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index bf3c02be6930..f838537f9f89 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -418,7 +418,7 @@ static int rtl_download_firmware(struct hci_dev *hdev,
if (IS_ERR(skb)) {
rtl_dev_err(hdev, "download fw command failed (%ld)",
PTR_ERR(skb));
- ret = -PTR_ERR(skb);
+ ret = PTR_ERR(skb);
goto out;
}
@@ -778,7 +778,7 @@ int btrtl_get_uart_settings(struct hci_dev *hdev,
rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)",
le16_to_cpu(entry->offset), entry->len);
break;
- };
+ }
i += sizeof(*entry) + entry->len;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a9c35ebb30f8..70e385987d41 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1200,7 +1200,7 @@ static int btusb_open(struct hci_dev *hdev)
if (data->setup_on_usb) {
err = data->setup_on_usb(hdev);
if (err < 0)
- return err;
+ goto setup_fail;
}
data->intf->needs_remote_wakeup = 1;
@@ -1239,6 +1239,7 @@ done:
failed:
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+setup_fail:
usb_autopm_put_interface(data->intf);
return err;
}
@@ -2182,8 +2183,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* loaded.
*/
err = btintel_read_version(hdev, &ver);
- if (err)
+ if (err) {
+ bt_dev_err(hdev, "Intel Read version failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
return err;
+ }
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
@@ -2326,9 +2330,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Start firmware downloading and get boot parameter */
err = btintel_download_firmware(hdev, fw, &boot_param);
- if (err < 0)
+ if (err < 0) {
+ /* When FW download fails, send Intel Reset to retry
+ * FW download.
+ */
+ btintel_reset_to_bootloader(hdev);
goto done;
-
+ }
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
bt_dev_info(hdev, "Waiting for firmware download to complete");
@@ -2355,6 +2363,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (err) {
bt_dev_err(hdev, "Firmware loading timeout");
err = -ETIMEDOUT;
+ btintel_reset_to_bootloader(hdev);
goto done;
}
@@ -2381,8 +2390,11 @@ done:
set_bit(BTUSB_BOOTING, &data->flags);
err = btintel_send_intel_reset(hdev, boot_param);
- if (err)
+ if (err) {
+ bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
return err;
+ }
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
@@ -2404,6 +2416,7 @@ done:
if (err) {
bt_dev_err(hdev, "Device boot timeout");
+ btintel_reset_to_bootloader(hdev);
return -ETIMEDOUT;
}
@@ -2432,6 +2445,13 @@ done:
*/
btintel_set_event_mask(hdev, false);
+ /* Read the Intel version information after loading the FW */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
return 0;
}
@@ -2489,8 +2509,6 @@ static int btusb_shutdown_intel_new(struct hci_dev *hdev)
return 0;
}
-#ifdef CONFIG_BT_HCIBTUSB_MTK
-
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
@@ -3051,7 +3069,6 @@ static int btusb_mtk_shutdown(struct hci_dev *hdev)
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
-#endif
#ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
@@ -3411,7 +3428,6 @@ static int btusb_setup_qca(struct hci_dev *hdev)
return 0;
}
-#ifdef CONFIG_BT_HCIBTUSB_BCM
static inline int __set_diag_interface(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -3498,7 +3514,6 @@ static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
return submit_or_queue_tx_urb(hdev, urb);
}
-#endif
#ifdef CONFIG_PM
static irqreturn_t btusb_oob_wake_handler(int irq, void *priv)
@@ -3724,8 +3739,8 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
-#ifdef CONFIG_BT_HCIBTUSB_BCM
- if (id->driver_info & BTUSB_BCM_PATCHRAM) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) &&
+ (id->driver_info & BTUSB_BCM_PATCHRAM)) {
hdev->manufacturer = 15;
hdev->setup = btbcm_setup_patchram;
hdev->set_diag = btusb_bcm_set_diag;
@@ -3735,7 +3750,8 @@ static int btusb_probe(struct usb_interface *intf,
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
}
- if (id->driver_info & BTUSB_BCM_APPLE) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) &&
+ (id->driver_info & BTUSB_BCM_APPLE)) {
hdev->manufacturer = 15;
hdev->setup = btbcm_setup_apple;
hdev->set_diag = btusb_bcm_set_diag;
@@ -3743,7 +3759,6 @@ static int btusb_probe(struct usb_interface *intf,
/* Broadcom LM_DIAG Interface numbers are hardcoded */
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
}
-#endif
if (id->driver_info & BTUSB_INTEL) {
hdev->manufacturer = 2;
@@ -3774,14 +3789,13 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_MARVELL)
hdev->set_bdaddr = btusb_set_bdaddr_marvell;
-#ifdef CONFIG_BT_HCIBTUSB_MTK
- if (id->driver_info & BTUSB_MEDIATEK) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) &&
+ (id->driver_info & BTUSB_MEDIATEK)) {
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
}
-#endif
if (id->driver_info & BTUSB_SWAVE) {
set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
@@ -3807,8 +3821,8 @@ static int btusb_probe(struct usb_interface *intf,
btusb_check_needs_reset_resume(intf);
}
-#ifdef CONFIG_BT_HCIBTUSB_RTL
- if (id->driver_info & BTUSB_REALTEK) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) &&
+ (id->driver_info & BTUSB_REALTEK)) {
hdev->setup = btrtl_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
hdev->cmd_timeout = btusb_rtl_cmd_timeout;
@@ -3819,7 +3833,6 @@ static int btusb_probe(struct usb_interface *intf,
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
}
-#endif
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
@@ -3887,15 +3900,13 @@ static int btusb_probe(struct usb_interface *intf,
goto out_free_dev;
}
-#ifdef CONFIG_BT_HCIBTUSB_BCM
- if (data->diag) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
if (!usb_driver_claim_interface(&btusb_driver,
data->diag, data))
__set_diag_interface(hdev);
else
data->diag = NULL;
}
-#endif
if (enable_autosuspend)
usb_enable_autosuspend(data->udev);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
deleted file mode 100644
index e55f06e4270f..000000000000
--- a/drivers/bluetooth/btwilink.c
+++ /dev/null
@@ -1,337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Texas Instrument's Bluetooth Driver For Shared Transport.
- *
- * Bluetooth Driver acts as interface between HCI core and
- * TI Shared Transport Layer.
- *
- * Copyright (C) 2009-2010 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Pavan Savoy <pavan_savoy@ti.com>
- */
-
-#include <linux/platform_device.h>
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/hci.h>
-
-#include <linux/ti_wilink_st.h>
-#include <linux/module.h>
-
-/* Bluetooth Driver Version */
-#define VERSION "1.0"
-#define MAX_BT_CHNL_IDS 3
-
-/* Number of seconds to wait for registration completion
- * when ST returns PENDING status.
- */
-#define BT_REGISTER_TIMEOUT 6000 /* 6 sec */
-
-/**
- * struct ti_st - driver operation structure
- * @hdev: hci device pointer which binds to bt driver
- * @reg_status: ST registration callback status
- * @st_write: write function provided by the ST driver
- * to be used by the driver during send_frame.
- * @wait_reg_completion - completion sync between ti_st_open
- * and st_reg_completion_cb.
- */
-struct ti_st {
- struct hci_dev *hdev;
- int reg_status;
- long (*st_write) (struct sk_buff *);
- struct completion wait_reg_completion;
-};
-
-/* Increments HCI counters based on pocket ID (cmd,acl,sco) */
-static inline void ti_st_tx_complete(struct ti_st *hst, int pkt_type)
-{
- struct hci_dev *hdev = hst->hdev;
-
- /* Update HCI stat counters */
- switch (pkt_type) {
- case HCI_COMMAND_PKT:
- hdev->stat.cmd_tx++;
- break;
-
- case HCI_ACLDATA_PKT:
- hdev->stat.acl_tx++;
- break;
-
- case HCI_SCODATA_PKT:
- hdev->stat.sco_tx++;
- break;
- }
-}
-
-/* ------- Interfaces to Shared Transport ------ */
-
-/* Called by ST layer to indicate protocol registration completion
- * status.ti_st_open() function will wait for signal from this
- * API when st_register() function returns ST_PENDING.
- */
-static void st_reg_completion_cb(void *priv_data, int data)
-{
- struct ti_st *lhst = priv_data;
-
- /* Save registration status for use in ti_st_open() */
- lhst->reg_status = data;
- /* complete the wait in ti_st_open() */
- complete(&lhst->wait_reg_completion);
-}
-
-/* Called by Shared Transport layer when receive data is available */
-static long st_receive(void *priv_data, struct sk_buff *skb)
-{
- struct ti_st *lhst = priv_data;
- int err;
-
- if (!skb)
- return -EFAULT;
-
- if (!lhst) {
- kfree_skb(skb);
- return -EFAULT;
- }
-
- /* Forward skb to HCI core layer */
- err = hci_recv_frame(lhst->hdev, skb);
- if (err < 0) {
- BT_ERR("Unable to push skb to HCI core(%d)", err);
- return err;
- }
-
- lhst->hdev->stat.byte_rx += skb->len;
-
- return 0;
-}
-
-/* ------- Interfaces to HCI layer ------ */
-/* protocol structure registered with shared transport */
-static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
- {
- .chnl_id = HCI_EVENT_PKT, /* HCI Events */
- .hdr_len = sizeof(struct hci_event_hdr),
- .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
- .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
- .reserve = 8,
- },
- {
- .chnl_id = HCI_ACLDATA_PKT, /* ACL */
- .hdr_len = sizeof(struct hci_acl_hdr),
- .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
- .len_size = 2, /* sizeof(dlen) in struct hci_acl_hdr */
- .reserve = 8,
- },
- {
- .chnl_id = HCI_SCODATA_PKT, /* SCO */
- .hdr_len = sizeof(struct hci_sco_hdr),
- .offset_len_in_hdr = offsetof(struct hci_sco_hdr, dlen),
- .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
- .reserve = 8,
- },
-};
-
-/* Called from HCI core to initialize the device */
-static int ti_st_open(struct hci_dev *hdev)
-{
- unsigned long timeleft;
- struct ti_st *hst;
- int err, i;
-
- BT_DBG("%s %p", hdev->name, hdev);
-
- /* provide contexts for callbacks from ST */
- hst = hci_get_drvdata(hdev);
-
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
- ti_st_proto[i].priv_data = hst;
- ti_st_proto[i].max_frame_size = HCI_MAX_FRAME_SIZE;
- ti_st_proto[i].recv = st_receive;
- ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;
-
- /* Prepare wait-for-completion handler */
- init_completion(&hst->wait_reg_completion);
- /* Reset ST registration callback status flag,
- * this value will be updated in
- * st_reg_completion_cb()
- * function whenever it called from ST driver.
- */
- hst->reg_status = -EINPROGRESS;
-
- err = st_register(&ti_st_proto[i]);
- if (!err)
- goto done;
-
- if (err != -EINPROGRESS) {
- BT_ERR("st_register failed %d", err);
- return err;
- }
-
- /* ST is busy with either protocol
- * registration or firmware download.
- */
- BT_DBG("waiting for registration "
- "completion signal from ST");
- timeleft = wait_for_completion_timeout
- (&hst->wait_reg_completion,
- msecs_to_jiffies(BT_REGISTER_TIMEOUT));
- if (!timeleft) {
- BT_ERR("Timeout(%d sec),didn't get reg "
- "completion signal from ST",
- BT_REGISTER_TIMEOUT / 1000);
- return -ETIMEDOUT;
- }
-
- /* Is ST registration callback
- * called with ERROR status?
- */
- if (hst->reg_status != 0) {
- BT_ERR("ST registration completed with invalid "
- "status %d", hst->reg_status);
- return -EAGAIN;
- }
-
-done:
- hst->st_write = ti_st_proto[i].write;
- if (!hst->st_write) {
- BT_ERR("undefined ST write function");
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
- /* Undo registration with ST */
- err = st_unregister(&ti_st_proto[i]);
- if (err)
- BT_ERR("st_unregister() failed with "
- "error %d", err);
- hst->st_write = NULL;
- }
- return -EIO;
- }
- }
- return 0;
-}
-
-/* Close device */
-static int ti_st_close(struct hci_dev *hdev)
-{
- int err, i;
- struct ti_st *hst = hci_get_drvdata(hdev);
-
- for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
- err = st_unregister(&ti_st_proto[i]);
- if (err)
- BT_ERR("st_unregister(%d) failed with error %d",
- ti_st_proto[i].chnl_id, err);
- }
-
- hst->st_write = NULL;
-
- return err;
-}
-
-static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct ti_st *hst;
- long len;
- int pkt_type;
-
- hst = hci_get_drvdata(hdev);
-
- /* Prepend skb with frame type */
- memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
-
- BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
- skb->len);
-
- /* Insert skb to shared transport layer's transmit queue.
- * Freeing skb memory is taken care in shared transport layer,
- * so don't free skb memory here.
- */
- pkt_type = hci_skb_pkt_type(skb);
- len = hst->st_write(skb);
- if (len < 0) {
- BT_ERR("ST write failed (%ld)", len);
- /* Try Again, would only fail if UART has gone bad */
- return -EAGAIN;
- }
-
- /* ST accepted our skb. So, Go ahead and do rest */
- hdev->stat.byte_tx += len;
- ti_st_tx_complete(hst, pkt_type);
-
- return 0;
-}
-
-static int bt_ti_probe(struct platform_device *pdev)
-{
- struct ti_st *hst;
- struct hci_dev *hdev;
- int err;
-
- hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
- if (!hst)
- return -ENOMEM;
-
- /* Expose "hciX" device to user space */
- hdev = hci_alloc_dev();
- if (!hdev)
- return -ENOMEM;
-
- BT_DBG("hdev %p", hdev);
-
- hst->hdev = hdev;
- hdev->bus = HCI_UART;
- hci_set_drvdata(hdev, hst);
- hdev->open = ti_st_open;
- hdev->close = ti_st_close;
- hdev->flush = NULL;
- hdev->send = ti_st_send_frame;
-
- err = hci_register_dev(hdev);
- if (err < 0) {
- BT_ERR("Can't register HCI device error %d", err);
- hci_free_dev(hdev);
- return err;
- }
-
- BT_DBG("HCI device registered (hdev %p)", hdev);
-
- dev_set_drvdata(&pdev->dev, hst);
- return 0;
-}
-
-static int bt_ti_remove(struct platform_device *pdev)
-{
- struct hci_dev *hdev;
- struct ti_st *hst = dev_get_drvdata(&pdev->dev);
-
- if (!hst)
- return -EFAULT;
-
- BT_DBG("%s", hst->hdev->name);
-
- hdev = hst->hdev;
- ti_st_close(hdev);
- hci_unregister_dev(hdev);
-
- hci_free_dev(hdev);
-
- dev_set_drvdata(&pdev->dev, NULL);
- return 0;
-}
-
-static struct platform_driver btwilink_driver = {
- .probe = bt_ti_probe,
- .remove = bt_ti_remove,
- .driver = {
- .name = "btwilink",
- },
-};
-
-module_platform_driver(btwilink_driver);
-
-/* ------ Module Info ------ */
-
-MODULE_AUTHOR("Raja Mani <raja_mani@ti.com>");
-MODULE_DESCRIPTION("Bluetooth Driver for TI Shared Transport" VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 7646636f2d18..d2a6a4afdbbb 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -445,9 +445,11 @@ static int bcm_open(struct hci_uart *hu)
out:
if (bcm->dev) {
+ hci_uart_set_flow_control(hu, true);
hu->init_speed = bcm->dev->init_speed;
hu->oper_speed = bcm->dev->oper_speed;
err = bcm_gpio_set_power(bcm->dev, true);
+ hci_uart_set_flow_control(hu, false);
if (err)
goto err_unset_hu;
}
@@ -1422,6 +1424,8 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt" },
+ { .compatible = "brcm,bcm43540-bt" },
+ { .compatible = "brcm,bcm4335a0" },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index fe2e307009f4..cf4a56095817 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (*ptr == 0xc0) {
BT_ERR("Short BCSP packet");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_START;
bcsp->rx_count = 0;
} else
@@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
@@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 285706618f8a..d9a4c6c691e0 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu)
serdev_device_set_flow_control(serdev, true);
- if (hu->oper_speed)
- speed = hu->oper_speed;
- else if (hu->proto->oper_speed)
- speed = hu->proto->oper_speed;
- else
- speed = 0;
-
do {
/* Reset the Bluetooth device */
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
@@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu)
return err;
}
- if (speed) {
- __le32 speed_le = cpu_to_le32(speed);
- struct sk_buff *skb;
-
- skb = __hci_cmd_sync(hu->hdev,
- HCI_VS_UPDATE_UART_HCI_BAUDRATE,
- sizeof(speed_le), &speed_le,
- HCI_INIT_TIMEOUT);
- if (!IS_ERR(skb)) {
- kfree_skb(skb);
- serdev_device_set_baudrate(serdev, speed);
- }
- }
-
err = download_firmware(lldev);
if (!err)
break;
@@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu)
}
/* Operational speed if any */
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ else
+ speed = 0;
+
+ if (speed) {
+ __le32 speed_le = cpu_to_le32(speed);
+ struct sk_buff *skb;
+ skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+ sizeof(speed_le), &speed_le,
+ HCI_INIT_TIMEOUT);
+ if (!IS_ERR(skb)) {
+ kfree_skb(skb);
+ serdev_device_set_baudrate(serdev, speed);
+ }
+ }
return 0;
}
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 6463350b7977..05f7f6de6863 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -520,7 +520,7 @@ static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
err = skb_pad(skb, 1);
if (err)
return err;
- skb_put_u8(skb, 0x00);
+ skb_put(skb, 1);
}
skb_queue_tail(&btdev->txq, skb);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e3164c200eac..f10bdf8e1fc5 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -43,7 +43,8 @@
#define HCI_MAX_IBS_SIZE 10
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
-#define IBS_TX_IDLE_TIMEOUT_MS 2000
+#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
+#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
/* susclk rate */
@@ -55,6 +56,7 @@
enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
+ QCA_SUSPENDING,
};
/* HCI_IBS transmit side sleep protocol states */
@@ -100,6 +102,7 @@ struct qca_data {
struct work_struct ws_tx_vote_off;
unsigned long flags;
struct completion drop_ev_comp;
+ wait_queue_head_t suspend_wait_q;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -130,8 +133,6 @@ enum qca_speed_type {
*/
struct qca_vreg {
const char *name;
- unsigned int min_uV;
- unsigned int max_uV;
unsigned int load_uA;
};
@@ -146,8 +147,8 @@ struct qca_vreg_data {
*/
struct qca_power {
struct device *dev;
- const struct qca_vreg_data *vreg_data;
struct regulator_bulk_data *vreg_bulk;
+ int num_vregs;
bool vregs_on;
};
@@ -162,7 +163,8 @@ struct qca_serdev {
const char *firmware_name;
};
-static int qca_power_setup(struct hci_uart *hu, bool on);
+static int qca_regulator_enable(struct qca_serdev *qcadev);
+static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
@@ -438,6 +440,12 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
flags, SINGLE_DEPTH_NESTING);
+ /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
/* No WAKE_ACK, retransmit WAKE */
@@ -497,6 +505,8 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+ init_waitqueue_head(&qca->suspend_wait_q);
+
qca->hu = hu;
init_completion(&qca->drop_ev_comp);
@@ -518,7 +528,7 @@ static int qca_open(struct hci_uart *hu)
} else {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
- ret = qca_power_setup(hu, true);
+ ret = qca_regulator_enable(qcadev);
if (ret) {
destroy_workqueue(qca->workqueue);
kfree_skb(qca->rx_skb);
@@ -533,7 +543,7 @@ static int qca_open(struct hci_uart *hu)
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
- qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+ qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -648,6 +658,12 @@ static void device_want_to_wakeup(struct hci_uart *hu)
qca->ibs_recv_wakes++;
+ /* Don't wake the rx up when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->rx_ibs_state) {
case HCI_IBS_RX_ASLEEP:
/* Make sure clock is on - we may have turned clock off since
@@ -712,6 +728,8 @@ static void device_want_to_sleep(struct hci_uart *hu)
break;
}
+ wake_up_interruptible(&qca->suspend_wait_q);
+
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
}
@@ -729,6 +747,12 @@ static void device_woke_up(struct hci_uart *hu)
qca->ibs_recv_wacks++;
+ /* Don't react to the wake-up-acknowledgment when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_AWAKE:
/* Expect one if we send 2 WAKEs */
@@ -781,8 +805,10 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
/* Don't go to sleep in middle of patch download or
* Out-Of-Band(GPIOs control) sleep is selected.
+ * Don't wake the device up when suspending.
*/
- if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) {
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
+ test_bit(QCA_SUSPENDING, &qca->flags)) {
skb_queue_tail(&qca->txq, skb);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
return 0;
@@ -1188,7 +1214,7 @@ static int qca_wcn3990_init(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
if (!qcadev->bt_power->vregs_on) {
serdev_device_close(hu->serdev);
- ret = qca_power_setup(hu, true);
+ ret = qca_regulator_enable(qcadev);
if (ret)
return ret;
@@ -1262,7 +1288,7 @@ static int qca_setup(struct hci_uart *hu)
if (ret)
return ret;
- ret = qca_read_soc_version(hdev, &soc_ver);
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
} else {
@@ -1282,7 +1308,7 @@ static int qca_setup(struct hci_uart *hu)
if (!qca_is_wcn399x(soc_type)) {
/* Get QCA version information */
- ret = qca_read_soc_version(hdev, &soc_ver);
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
}
@@ -1332,10 +1358,21 @@ static const struct hci_uart_proto qca_proto = {
static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
- { "vddio", 1800000, 1900000, 15000 },
- { "vddxo", 1800000, 1900000, 80000 },
- { "vddrf", 1300000, 1350000, 300000 },
- { "vddch0", 3300000, 3400000, 450000 },
+ { "vddio", 15000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
+ },
+ .num_vregs = 4,
+};
+
+static const struct qca_vreg_data qca_soc_data_wcn3991 = {
+ .soc_type = QCA_WCN3991,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
},
.num_vregs = 4,
};
@@ -1343,19 +1380,22 @@ static const struct qca_vreg_data qca_soc_data_wcn3990 = {
static const struct qca_vreg_data qca_soc_data_wcn3998 = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
- { "vddio", 1800000, 1900000, 10000 },
- { "vddxo", 1800000, 1900000, 80000 },
- { "vddrf", 1300000, 1352000, 300000 },
- { "vddch0", 3300000, 3300000, 450000 },
+ { "vddio", 10000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
},
.num_vregs = 4,
};
static void qca_power_shutdown(struct hci_uart *hu)
{
+ struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
unsigned long flags;
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+
/* From this point we go into power off state. But serial port is
* still open, stop queueing the IBS data and flush all the buffered
* data in skb's.
@@ -1367,7 +1407,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
- qca_power_setup(hu, false);
+ qca_regulator_disable(qcadev);
}
static int qca_power_off(struct hci_dev *hdev)
@@ -1383,97 +1423,71 @@ static int qca_power_off(struct hci_dev *hdev)
return 0;
}
-static int qca_enable_regulator(struct qca_vreg vregs,
- struct regulator *regulator)
+static int qca_regulator_enable(struct qca_serdev *qcadev)
{
+ struct qca_power *power = qcadev->bt_power;
int ret;
- ret = regulator_set_voltage(regulator, vregs.min_uV,
- vregs.max_uV);
- if (ret)
- return ret;
+ /* Already enabled */
+ if (power->vregs_on)
+ return 0;
- if (vregs.load_uA)
- ret = regulator_set_load(regulator,
- vregs.load_uA);
+ BT_DBG("enabling %d regulators)", power->num_vregs);
+ ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
if (ret)
return ret;
- return regulator_enable(regulator);
-
-}
-
-static void qca_disable_regulator(struct qca_vreg vregs,
- struct regulator *regulator)
-{
- regulator_disable(regulator);
- regulator_set_voltage(regulator, 0, vregs.max_uV);
- if (vregs.load_uA)
- regulator_set_load(regulator, 0);
+ power->vregs_on = true;
+ return 0;
}
-static int qca_power_setup(struct hci_uart *hu, bool on)
+static void qca_regulator_disable(struct qca_serdev *qcadev)
{
- struct qca_vreg *vregs;
- struct regulator_bulk_data *vreg_bulk;
- struct qca_serdev *qcadev;
- int i, num_vregs, ret = 0;
+ struct qca_power *power;
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
- !qcadev->bt_power->vreg_bulk)
- return -EINVAL;
-
- vregs = qcadev->bt_power->vreg_data->vregs;
- vreg_bulk = qcadev->bt_power->vreg_bulk;
- num_vregs = qcadev->bt_power->vreg_data->num_vregs;
- BT_DBG("on: %d", on);
- if (on && !qcadev->bt_power->vregs_on) {
- for (i = 0; i < num_vregs; i++) {
- ret = qca_enable_regulator(vregs[i],
- vreg_bulk[i].consumer);
- if (ret)
- break;
- }
+ if (!qcadev)
+ return;
- if (ret) {
- BT_ERR("failed to enable regulator:%s", vregs[i].name);
- /* turn off regulators which are enabled */
- for (i = i - 1; i >= 0; i--)
- qca_disable_regulator(vregs[i],
- vreg_bulk[i].consumer);
- } else {
- qcadev->bt_power->vregs_on = true;
- }
- } else if (!on && qcadev->bt_power->vregs_on) {
- /* turn off regulator in reverse order */
- i = qcadev->bt_power->vreg_data->num_vregs - 1;
- for ( ; i >= 0; i--)
- qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
+ power = qcadev->bt_power;
- qcadev->bt_power->vregs_on = false;
- }
+ /* Already disabled? */
+ if (!power->vregs_on)
+ return;
- return ret;
+ regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
+ power->vregs_on = false;
}
static int qca_init_regulators(struct qca_power *qca,
const struct qca_vreg *vregs, size_t num_vregs)
{
+ struct regulator_bulk_data *bulk;
+ int ret;
int i;
- qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
- sizeof(struct regulator_bulk_data),
- GFP_KERNEL);
- if (!qca->vreg_bulk)
+ bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
+ if (!bulk)
return -ENOMEM;
for (i = 0; i < num_vregs; i++)
- qca->vreg_bulk[i].supply = vregs[i].name;
+ bulk[i].supply = vregs[i].name;
+
+ ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_vregs; i++) {
+ ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
+ if (ret)
+ return ret;
+ }
- return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
+ qca->vreg_bulk = bulk;
+ qca->num_vregs = num_vregs;
+
+ return 0;
}
static int qca_serdev_probe(struct serdev_device *serdev)
@@ -1500,7 +1514,6 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->bt_power->dev = &serdev->dev;
- qcadev->bt_power->vreg_data = data;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
if (err) {
@@ -1564,9 +1577,103 @@ static void qca_serdev_remove(struct serdev_device *serdev)
hci_uart_unregister_device(&qcadev->serdev_hu);
}
+static int __maybe_unused qca_suspend(struct device *dev)
+{
+ struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ unsigned long flags;
+ int ret = 0;
+ u8 cmd;
+
+ set_bit(QCA_SUSPENDING, &qca->flags);
+
+ /* Device is downloading patch or doesn't support in-band sleep. */
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
+ return 0;
+
+ cancel_work_sync(&qca->ws_awake_device);
+ cancel_work_sync(&qca->ws_awake_rx);
+
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+ flags, SINGLE_DEPTH_NESTING);
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_WAKING:
+ del_timer(&qca->wake_retrans_timer);
+ /* Fall through */
+ case HCI_IBS_TX_AWAKE:
+ del_timer(&qca->tx_idle_timer);
+
+ serdev_device_write_flush(hu->serdev);
+ cmd = HCI_IBS_SLEEP_IND;
+ ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
+
+ if (ret < 0) {
+ BT_ERR("Failed to send SLEEP to device");
+ break;
+ }
+
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+ qca->ibs_sent_slps++;
+
+ qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ break;
+
+ default:
+ BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ if (ret < 0)
+ goto error;
+
+ serdev_device_wait_until_sent(hu->serdev,
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+
+ /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
+ * to sleep, so that the packet does not wake the system later.
+ */
+
+ ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
+ qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
+ msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
+
+ if (ret > 0)
+ return 0;
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+error:
+ clear_bit(QCA_SUSPENDING, &qca->flags);
+
+ return ret;
+}
+
+static int __maybe_unused qca_resume(struct device *dev)
+{
+ struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ clear_bit(QCA_SUSPENDING, &qca->flags);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
+ { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
@@ -1578,6 +1685,7 @@ static struct serdev_device_driver qca_serdev_driver = {
.driver = {
.name = "hci_uart_qca",
.of_match_table = qca_bluetooth_of_match,
+ .pm = &qca_pm_ops,
},
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6b331061d34b..97ab5ad171d4 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -150,6 +150,15 @@ config TEGRA_GMI
Driver for the Tegra Generic Memory Interface bus which can be used
to attach devices such as NOR, UART, FPGA and more.
+config TI_PWMSS
+ bool
+ default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM || TI_EQEP)
+ help
+ PWM Subsystem driver support for AM33xx SOC.
+
+ PWM submodules require PWM config space access from submodule
+ drivers and require common parent driver support.
+
config TI_SYSC
bool "TI sysc interconnect target module driver"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 16b43d3468c6..1320bcf9fa9d 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
+obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o
obj-$(CONFIG_TI_SYSC) += ti-sysc.o
obj-$(CONFIG_TS_NBUS) += ts-nbus.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 52c7e15143d6..c8b1c3842c1a 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -104,10 +104,8 @@ static int __fsl_mc_device_match(struct device *dev, void *data)
return fsl_mc_device_match(mc_dev, obj_desc);
}
-static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
- *obj_desc,
- struct fsl_mc_device
- *mc_bus_dev)
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev)
{
struct device *dev;
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 0fe3f52ae0de..602f030d84eb 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -554,3 +554,56 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENOTCONN if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return -ENOTCONN;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5c9bf2e06552..a07cc19becdb 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -166,42 +166,52 @@ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
static struct device_type *fsl_mc_get_device_type(const char *type)
{
@@ -702,6 +712,39 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev, *endpoint;
+ struct fsl_mc_obj_desc endpoint_desc = { 0 };
+ struct dprc_endpoint endpoint1 = { 0 };
+ struct dprc_endpoint endpoint2 = { 0 };
+ int state, err;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ strcpy(endpoint1.type, mc_dev->obj_desc.type);
+ endpoint1.id = mc_dev->obj_desc.id;
+
+ err = dprc_get_connection(mc_bus_dev->mc_io, 0,
+ mc_bus_dev->mc_handle,
+ &endpoint1, &endpoint2,
+ &state);
+
+ if (err == -ENOTCONN || state == -1)
+ return ERR_PTR(-ENOTCONN);
+
+ if (err < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ strcpy(endpoint_desc.type, endpoint2.type);
+ endpoint_desc.id = endpoint2.id;
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+
+ return endpoint;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
+
static int parse_mc_ranges(struct device *dev,
int *paddr_cells,
int *mc_addr_cells,
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 020fcc04ec8b..21ca8c756ee7 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -105,6 +105,8 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
struct dprc_cmd_open {
__le32 container_id;
};
@@ -228,6 +230,22 @@ struct dprc_cmd_set_obj_irq {
u8 obj_type[16];
};
+struct dprc_cmd_get_connection {
+ __le32 ep1_id;
+ __le16 ep1_interface_id;
+ u8 pad[2];
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ __le64 pad[3];
+ __le32 ep2_id;
+ __le16 ep2_interface_id;
+ __le16 pad1;
+ u8 ep2_type[16];
+ __le32 state;
+};
+
/*
* DPRC API for managing and querying DPAA resources
*/
@@ -392,6 +410,27 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int *container_id);
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ u16 if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
/*
* Data Path Buffer Pool (DPBP) API
*/
@@ -574,4 +613,7 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
bool fsl_mc_is_root_dprc(struct device *dev);
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev);
+
#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/bus/ti-pwmss.c
index e9c26c94251b..e9c26c94251b 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/bus/ti-pwmss.c
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 6626c84f66d1..5b21dc421c94 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -742,7 +742,7 @@ static int probe_gdrom(struct platform_device *devptr)
int err;
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
- pr_warning("ATA Probe for GDROM failed\n");
+ pr_warn("ATA Probe for GDROM failed\n");
return -ENODEV;
}
/* Print out firmware ID */
@@ -814,7 +814,7 @@ probe_fail_no_disk:
probe_fail_no_mem:
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
gdrom_major = 0;
- pr_warning("Probe failed - error is 0x%X\n", err);
+ pr_warn("Probe failed - error is 0x%X\n", err);
return err;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index df0fc997dc3e..26956c006987 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -439,8 +439,8 @@ config RAW_DRIVER
Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
See the raw(8) manpage for more details.
- Applications should preferably open the device (eg /dev/hda1)
- with the O_DIRECT flag.
+ Applications should preferably open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
@@ -559,4 +559,4 @@ config RANDOM_TRUST_BOOTLOADER
device randomness. Say Y here to assume the entropy provided by the
booloader is trustworthy so it will be added to the kernel's entropy
pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool. \ No newline at end of file
+ only mixes the entropy pool.
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 812d6aa6e013..bc54235a7022 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -63,7 +63,7 @@ config AGP_AMD64
This option gives you AGP support for the GLX component of
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
You still need an external AGP bridge like the AMD 8151, VIA
- K8T400M, SiS755. It may also support other AGP bridges when loaded
+ K8T400M, SiS755. It may also support other AGP bridges when loaded
with agp_try_unsupported=1.
config AGP_INTEL
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 59f25286befe..8486c29d8324 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -308,6 +308,19 @@ config HW_RANDOM_HISI
If unsure, say Y.
+config HW_RANDOM_HISI_V2
+ tristate "HiSilicon True Random Number Generator V2 support"
+ depends on HW_RANDOM && ARM64 && ACPI
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator V2 hardware found on HiSilicon Hi1620 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hisi-trng-v2.
+
+ If unsure, say Y.
+
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
@@ -373,17 +386,17 @@ config HW_RANDOM_MESON
If unsure, say Y.
config HW_RANDOM_CAVIUM
- tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Cavium SoCs.
+ tristate "Cavium ThunderX Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Cavium SoCs.
- To compile this driver as a module, choose M here: the
- module will be called cavium_rng.
+ To compile this driver as a module, choose M here: the
+ module will be called cavium_rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_MTK
tristate "Mediatek Random Number Generator support"
@@ -440,6 +453,19 @@ config HW_RANDOM_OPTEE
If unsure, say Y.
+config HW_RANDOM_NPCM
+ tristate "NPCM Random Number Generator support"
+ depends on ARCH_NPCM || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides support for the Random Number
+ Generator hardware available in Nuvoton NPCM SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called npcm-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
@@ -458,7 +484,7 @@ config UML_RANDOM
/dev/hwrng and injects the entropy into /dev/random.
config HW_RANDOM_KEYSTONE
- depends on ARCH_KEYSTONE
+ depends on ARCH_KEYSTONE || COMPILE_TEST
default HW_RANDOM
tristate "TI Keystone NETCP SA Hardware random number generator"
help
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 7c9ef4a7667f..a7801b49ce6c 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
+obj-$(CONFIG_HW_RANDOM_HISI_V2) += hisi-trng-v2.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
@@ -39,3 +40,4 @@ obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
+obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index e55705745d5e..ecb71c4317a5 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -14,14 +14,22 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/hw_random.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#define TRNG_CR 0x00
+#define TRNG_MR 0x04
#define TRNG_ISR 0x1c
#define TRNG_ODATA 0x50
#define TRNG_KEY 0x524e4700 /* RNG */
+#define TRNG_HALFR BIT(0) /* generate RN every 168 cycles */
+
+struct atmel_trng_data {
+ bool has_half_rate;
+};
+
struct atmel_trng {
struct clk *clk;
void __iomem *base;
@@ -62,21 +70,31 @@ static void atmel_trng_disable(struct atmel_trng *trng)
static int atmel_trng_probe(struct platform_device *pdev)
{
struct atmel_trng *trng;
- struct resource *res;
+ const struct atmel_trng_data *data;
int ret;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->base = devm_ioremap_resource(&pdev->dev, res);
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->base))
return PTR_ERR(trng->base);
trng->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(trng->clk))
return PTR_ERR(trng->clk);
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ if (data->has_half_rate) {
+ unsigned long rate = clk_get_rate(trng->clk);
+
+ /* if peripheral clk is above 100MHz, set HALFR */
+ if (rate > 100000000)
+ writel(TRNG_HALFR, trng->base + TRNG_MR);
+ }
ret = clk_prepare_enable(trng->clk);
if (ret)
@@ -141,9 +159,24 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
};
#endif /* CONFIG_PM */
+static const struct atmel_trng_data at91sam9g45_config = {
+ .has_half_rate = false,
+};
+
+static const struct atmel_trng_data sam9x60_config = {
+ .has_half_rate = true,
+};
+
static const struct of_device_id atmel_trng_dt_ids[] = {
- { .compatible = "atmel,at91sam9g45-trng" },
- { /* sentinel */ }
+ {
+ .compatible = "atmel,at91sam9g45-trng",
+ .data = &at91sam9g45_config,
+ }, {
+ .compatible = "microchip,sam9x60-trng",
+ .data = &sam9x60_config,
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index f759790c3cdb..d2a5791eb49f 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -142,7 +142,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
- struct resource *r;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -151,10 +150,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
/* map peripheral */
- priv->base = devm_ioremap_resource(dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 80b850ef1bf6..d2d7a42d7e0d 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/kernel.h>
@@ -112,6 +111,14 @@ static void drop_current_rng(void)
}
/* Returns ERR_PTR(), NULL or refcounted hwrng */
+static struct hwrng *get_current_rng_nolock(void)
+{
+ if (current_rng)
+ kref_get(&current_rng->ref);
+
+ return current_rng;
+}
+
static struct hwrng *get_current_rng(void)
{
struct hwrng *rng;
@@ -119,9 +126,7 @@ static struct hwrng *get_current_rng(void)
if (mutex_lock_interruptible(&rng_mutex))
return ERR_PTR(-ERESTARTSYS);
- rng = current_rng;
- if (rng)
- kref_get(&rng->ref);
+ rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
return rng;
@@ -156,8 +161,6 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- add_early_randomness(rng);
-
current_quality = rng->quality ? : default_quality;
if (current_quality > 1024)
current_quality = 1024;
@@ -321,12 +324,13 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
const char *buf, size_t len)
{
int err = -ENODEV;
- struct hwrng *rng;
+ struct hwrng *rng, *old_rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
+ old_rng = current_rng;
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
} else {
@@ -338,9 +342,15 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
}
}
}
-
+ new_rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
+ if (new_rng) {
+ if (new_rng != old_rng)
+ add_early_randomness(new_rng);
+ put_rng(new_rng);
+ }
+
return err ? : len;
}
@@ -422,9 +432,7 @@ static int hwrng_fillfn(void *unused)
{
long rc;
- set_freezable();
-
- while (!kthread_freezable_should_stop(NULL)) {
+ while (!kthread_should_stop()) {
struct hwrng *rng;
rng = get_current_rng();
@@ -460,13 +468,15 @@ static void start_khwrngd(void)
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
- struct hwrng *old_rng, *tmp;
+ struct hwrng *tmp;
struct list_head *rng_list_ptr;
+ bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
mutex_lock(&rng_mutex);
+
/* Must not register two RNGs with the same name. */
err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) {
@@ -485,10 +495,8 @@ int hwrng_register(struct hwrng *rng)
}
list_add_tail(&rng->list, rng_list_ptr);
- old_rng = current_rng;
- err = 0;
- if (!old_rng ||
- (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
+ if (!current_rng ||
+ (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
@@ -497,19 +505,26 @@ int hwrng_register(struct hwrng *rng)
err = set_current_rng(rng);
if (err)
goto out_unlock;
+ /* to use current_rng in add_early_randomness() we need
+ * to take a ref
+ */
+ is_new_current = true;
+ kref_get(&rng->ref);
}
-
- if (old_rng && !rng->init) {
+ mutex_unlock(&rng_mutex);
+ if (is_new_current || !rng->init) {
/*
* Use a new device's input to add some randomness to
* the system. If this rng device isn't going to be
* used right away, its init function hasn't been
- * called yet; so only use the randomness from devices
- * that don't need an init callback.
+ * called yet by set_current_rng(); so only use the
+ * randomness from devices that don't need an init callback
*/
add_early_randomness(rng);
}
-
+ if (is_new_current)
+ put_rng(rng);
+ return 0;
out_unlock:
mutex_unlock(&rng_mutex);
out:
@@ -519,10 +534,12 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
+ struct hwrng *old_rng, *new_rng;
int err;
mutex_lock(&rng_mutex);
+ old_rng = current_rng;
list_del(&rng->list);
if (current_rng == rng) {
err = enable_best_rng();
@@ -532,6 +549,7 @@ void hwrng_unregister(struct hwrng *rng)
}
}
+ new_rng = get_current_rng_nolock();
if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex);
if (hwrng_fill)
@@ -539,6 +557,12 @@ void hwrng_unregister(struct hwrng *rng)
} else
mutex_unlock(&rng_mutex);
+ if (new_rng) {
+ if (old_rng != new_rng)
+ add_early_randomness(new_rng);
+ put_rng(new_rng);
+ }
+
wait_for_completion(&rng->cleanup_done);
}
EXPORT_SYMBOL_GPL(hwrng_unregister);
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index b4b52ab23b6b..8e1fe3f8dd2d 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -109,7 +109,6 @@ static int exynos_trng_init(struct hwrng *rng)
static int exynos_trng_probe(struct platform_device *pdev)
{
struct exynos_trng_dev *trng;
- struct resource *res;
int ret = -ENOMEM;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
@@ -128,8 +127,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, trng);
trng->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->mem = devm_ioremap_resource(&pdev->dev, res);
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
index c663d5dd85bb..6815e17a9834 100644
--- a/drivers/char/hw_random/hisi-rng.c
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -73,7 +73,6 @@ static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
static int hisi_rng_probe(struct platform_device *pdev)
{
struct hisi_rng *rng;
- struct resource *res;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
@@ -82,8 +81,7 @@ static int hisi_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rng);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng->base = devm_ioremap_resource(&pdev->dev, res);
+ rng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
diff --git a/drivers/char/hw_random/hisi-trng-v2.c b/drivers/char/hw_random/hisi-trng-v2.c
new file mode 100644
index 000000000000..6a65b8232ce0
--- /dev/null
+++ b/drivers/char/hw_random/hisi-trng-v2.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#define HISI_TRNG_REG 0x00F0
+#define HISI_TRNG_BYTES 4
+#define HISI_TRNG_QUALITY 512
+#define SLEEP_US 10
+#define TIMEOUT_US 10000
+
+struct hisi_trng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct hisi_trng *trng;
+ int currsize = 0;
+ u32 val = 0;
+ u32 ret;
+
+ trng = container_of(rng, struct hisi_trng, rng);
+
+ do {
+ ret = readl_poll_timeout(trng->base + HISI_TRNG_REG, val,
+ val, SLEEP_US, TIMEOUT_US);
+ if (ret)
+ return currsize;
+
+ if (max - currsize >= HISI_TRNG_BYTES) {
+ memcpy(buf + currsize, &val, HISI_TRNG_BYTES);
+ currsize += HISI_TRNG_BYTES;
+ if (currsize == max)
+ return currsize;
+ continue;
+ }
+
+ /* copy remaining bytes */
+ memcpy(buf + currsize, &val, max - currsize);
+ currsize = max;
+ } while (currsize < max);
+
+ return currsize;
+}
+
+static int hisi_trng_probe(struct platform_device *pdev)
+{
+ struct hisi_trng *trng;
+ int ret;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
+
+ trng->rng.name = pdev->name;
+ trng->rng.read = hisi_trng_read;
+ trng->rng.quality = HISI_TRNG_QUALITY;
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register hwrng!\n");
+
+ return ret;
+}
+
+static const struct acpi_device_id hisi_trng_acpi_match[] = {
+ { "HISI02B3", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
+
+static struct platform_driver hisi_trng_driver = {
+ .probe = hisi_trng_probe,
+ .driver = {
+ .name = "hisi-trng-v2",
+ .acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
+ },
+};
+
+module_platform_driver(hisi_trng_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Weili Qian <qianweili@huawei.com>");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon true random number generator V2 driver");
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 92be1c0ab99f..899ff25f4f28 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -181,7 +181,6 @@ static void iproc_rng200_cleanup(struct hwrng *rng)
static int iproc_rng200_probe(struct platform_device *pdev)
{
struct iproc_rng200_dev *priv;
- struct resource *res;
struct device *dev = &pdev->dev;
int ret;
@@ -190,13 +189,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return -ENOMEM;
/* Map peripheral */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get rng resources\n");
- return -EINVAL;
- }
-
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
dev_err(dev, "failed to remap rng regs\n");
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index a67430010aa6..e2330e757f1f 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/delay.h>
+#include <linux/timekeeping.h>
#define SA_CMD_STATUS_OFS 0x8
@@ -84,14 +85,37 @@ struct ks_sa_rng {
struct hwrng rng;
struct clk *clk;
struct regmap *regmap_cfg;
- struct trng_regs *reg_rng;
+ struct trng_regs __iomem *reg_rng;
+ u64 ready_ts;
+ unsigned int refill_delay_ns;
};
+static unsigned int cycles_to_ns(unsigned long clk_rate, unsigned int cycles)
+{
+ return DIV_ROUND_UP_ULL((TRNG_DEF_CLK_DIV_CYCLES + 1) * 1000000000ull *
+ cycles, clk_rate);
+}
+
+static unsigned int startup_delay_ns(unsigned long clk_rate)
+{
+ if (!TRNG_DEF_STARTUP_CYCLES)
+ return cycles_to_ns(clk_rate, BIT(24));
+ return cycles_to_ns(clk_rate, 256 * TRNG_DEF_STARTUP_CYCLES);
+}
+
+static unsigned int refill_delay_ns(unsigned long clk_rate)
+{
+ if (!TRNG_DEF_MAX_REFILL_CYCLES)
+ return cycles_to_ns(clk_rate, BIT(24));
+ return cycles_to_ns(clk_rate, 256 * TRNG_DEF_MAX_REFILL_CYCLES);
+}
+
static int ks_sa_rng_init(struct hwrng *rng)
{
u32 value;
struct device *dev = (struct device *)rng->priv;
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ unsigned long clk_rate = clk_get_rate(ks_sa_rng->clk);
/* Enable RNG module */
regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
@@ -120,6 +144,10 @@ static int ks_sa_rng_init(struct hwrng *rng)
value |= TRNG_CNTL_REG_TRNG_ENABLE;
writel(value, &ks_sa_rng->reg_rng->control);
+ ks_sa_rng->refill_delay_ns = refill_delay_ns(clk_rate);
+ ks_sa_rng->ready_ts = ktime_get_ns() +
+ startup_delay_ns(clk_rate);
+
return 0;
}
@@ -144,6 +172,7 @@ static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
data[1] = readl(&ks_sa_rng->reg_rng->output_h);
writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
+ ks_sa_rng->ready_ts = ktime_get_ns() + ks_sa_rng->refill_delay_ns;
return sizeof(u32) * 2;
}
@@ -152,10 +181,19 @@ static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
{
struct device *dev = (struct device *)rng->priv;
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ u64 now = ktime_get_ns();
u32 ready;
int j;
+ if (wait && now < ks_sa_rng->ready_ts) {
+ /* Max delay expected here is 81920000 ns */
+ unsigned long min_delay =
+ DIV_ROUND_UP((u32)(ks_sa_rng->ready_ts - now), 1000);
+
+ usleep_range(min_delay, min_delay + SA_RNG_DATA_RETRY_DELAY);
+ }
+
for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
ready = readl(&ks_sa_rng->reg_rng->status);
ready &= TRNG_STATUS_REG_READY;
@@ -174,7 +212,6 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
struct ks_sa_rng *ks_sa_rng;
struct device *dev = &pdev->dev;
int ret;
- struct resource *mem;
ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
if (!ks_sa_rng)
@@ -190,8 +227,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
};
ks_sa_rng->rng.priv = (unsigned long)dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ks_sa_rng->reg_rng = devm_ioremap_resource(dev, mem);
+ ks_sa_rng->reg_rng = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ks_sa_rng->reg_rng))
return PTR_ERR(ks_sa_rng->reg_rng);
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 76e693da5dde..e446236e81f2 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -42,7 +42,6 @@ static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
- struct resource *res;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -51,8 +50,7 @@ static int meson_rng_probe(struct platform_device *pdev)
data->pdev = pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index e649be5a5f13..8ad7b515a51b 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -105,16 +105,9 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
static int mtk_rng_probe(struct platform_device *pdev)
{
- struct resource *res;
int ret;
struct mtk_rng *priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no iomem resource\n");
- return -ENXIO;
- }
-
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -135,7 +128,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
return ret;
}
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
new file mode 100644
index 000000000000..01d04404d8c0
--- /dev/null
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+
+#define NPCM_RNGCS_REG 0x00 /* Control and status register */
+#define NPCM_RNGD_REG 0x04 /* Data register */
+#define NPCM_RNGMODE_REG 0x08 /* Mode register */
+
+#define NPCM_RNG_CLK_SET_25MHZ GENMASK(4, 3) /* 20-25 MHz */
+#define NPCM_RNG_DATA_VALID BIT(1)
+#define NPCM_RNG_ENABLE BIT(0)
+#define NPCM_RNG_M1ROSEL BIT(1)
+
+#define NPCM_RNG_TIMEOUT_USEC 20000
+#define NPCM_RNG_POLL_USEC 1000
+
+#define to_npcm_rng(p) container_of(p, struct npcm_rng, rng)
+
+struct npcm_rng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int npcm_rng_init(struct hwrng *rng)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+
+ writel(NPCM_RNG_CLK_SET_25MHZ | NPCM_RNG_ENABLE,
+ priv->base + NPCM_RNGCS_REG);
+
+ return 0;
+}
+
+static void npcm_rng_cleanup(struct hwrng *rng)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+
+ writel(NPCM_RNG_CLK_SET_25MHZ, priv->base + NPCM_RNGCS_REG);
+}
+
+static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+ int retval = 0;
+ int ready;
+
+ pm_runtime_get_sync((struct device *)priv->rng.priv);
+
+ while (max >= sizeof(u32)) {
+ if (wait) {
+ if (readl_poll_timeout(priv->base + NPCM_RNGCS_REG,
+ ready,
+ ready & NPCM_RNG_DATA_VALID,
+ NPCM_RNG_POLL_USEC,
+ NPCM_RNG_TIMEOUT_USEC))
+ break;
+ } else {
+ if ((readl(priv->base + NPCM_RNGCS_REG) &
+ NPCM_RNG_DATA_VALID) == 0)
+ break;
+ }
+
+ *(u32 *)buf = readl(priv->base + NPCM_RNGD_REG);
+ retval += sizeof(u32);
+ buf += sizeof(u32);
+ max -= sizeof(u32);
+ }
+
+ pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+
+ return retval || !wait ? retval : -EIO;
+}
+
+static int npcm_rng_probe(struct platform_device *pdev)
+{
+ struct npcm_rng *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ dev_set_drvdata(&pdev->dev, priv);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+#ifndef CONFIG_PM
+ priv->rng.init = npcm_rng_init;
+ priv->rng.cleanup = npcm_rng_cleanup;
+#endif
+ priv->rng.name = pdev->name;
+ priv->rng.read = npcm_rng_read;
+ priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 1000;
+
+ writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
+
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register rng device: %d\n",
+ ret);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int npcm_rng_remove(struct platform_device *pdev)
+{
+ struct npcm_rng *priv = platform_get_drvdata(pdev);
+
+ devm_hwrng_unregister(&pdev->dev, &priv->rng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int npcm_rng_runtime_suspend(struct device *dev)
+{
+ struct npcm_rng *priv = dev_get_drvdata(dev);
+
+ npcm_rng_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int npcm_rng_runtime_resume(struct device *dev)
+{
+ struct npcm_rng *priv = dev_get_drvdata(dev);
+
+ return npcm_rng_init(&priv->rng);
+}
+#endif
+
+static const struct dev_pm_ops npcm_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(npcm_rng_runtime_suspend,
+ npcm_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id rng_dt_id[] = {
+ { .compatible = "nuvoton,npcm750-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rng_dt_id);
+
+static struct platform_driver npcm_rng_driver = {
+ .driver = {
+ .name = "npcm-rng",
+ .pm = &npcm_rng_pm_ops,
+ .of_match_table = of_match_ptr(rng_dt_id),
+ },
+ .probe = npcm_rng_probe,
+ .remove = npcm_rng_remove,
+};
+
+module_platform_driver(npcm_rng_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM Random Number Generator Driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b27f39688b5e..0ed07d16ec8e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -66,6 +66,13 @@
#define OMAP4_RNG_OUTPUT_SIZE 0x8
#define EIP76_RNG_OUTPUT_SIZE 0x10
+/*
+ * EIP76 RNG takes approx. 700us to produce 16 bytes of output data
+ * as per testing results. And to account for the lack of udelay()'s
+ * reliability, we keep the timeout as 1000us.
+ */
+#define RNG_DATA_FILL_TIMEOUT 100
+
enum {
RNG_OUTPUT_0_REG = 0,
RNG_OUTPUT_1_REG,
@@ -176,7 +183,7 @@ static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
if (max < priv->pdata->data_size)
return 0;
- for (i = 0; i < 20; i++) {
+ for (i = 0; i < RNG_DATA_FILL_TIMEOUT; i++) {
present = priv->pdata->data_present(priv);
if (present || !wait)
break;
@@ -432,7 +439,6 @@ static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
static int omap_rng_probe(struct platform_device *pdev)
{
struct omap_rng_dev *priv;
- struct resource *res;
struct device *dev = &pdev->dev;
int ret;
@@ -449,8 +455,7 @@ static int omap_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_ioremap;
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 38b719017186..e08a8887e718 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -11,8 +11,6 @@
* warranty of any kind, whether express or implied.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/random.h>
@@ -20,117 +18,159 @@
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define RNG_RESET 0x01
#define RNG_GEN_PRNG_HW_INIT 0x02
#define RNG_GEN_HW 0x08
-/* param1: ptr, param2: count, param3: flag */
-static u32 (*omap3_rom_rng_call)(u32, u32, u32);
-
-static struct delayed_work idle_work;
-static int rng_idle;
-static struct clk *rng_clk;
+struct omap_rom_rng {
+ struct clk *clk;
+ struct device *dev;
+ struct hwrng ops;
+ u32 (*rom_rng_call)(u32 ptr, u32 count, u32 flag);
+};
-static void omap3_rom_rng_idle(struct work_struct *work)
+static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
{
+ struct omap_rom_rng *ddata;
+ u32 ptr;
int r;
- r = omap3_rom_rng_call(0, 0, RNG_RESET);
- if (r != 0) {
- pr_err("reset failed: %d\n", r);
- return;
+ ddata = (struct omap_rom_rng *)rng->priv;
+
+ r = pm_runtime_get_sync(ddata->dev);
+ if (r < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+
+ return r;
}
- clk_disable_unprepare(rng_clk);
- rng_idle = 1;
+
+ ptr = virt_to_phys(data);
+ r = ddata->rom_rng_call(ptr, 4, RNG_GEN_HW);
+ if (r != 0)
+ r = -EINVAL;
+ else
+ r = 4;
+
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
+
+ return r;
}
-static int omap3_rom_rng_get_random(void *buf, unsigned int count)
+static int __maybe_unused omap_rom_rng_runtime_suspend(struct device *dev)
{
- u32 r;
- u32 ptr;
+ struct omap_rom_rng *ddata;
+ int r;
- cancel_delayed_work_sync(&idle_work);
- if (rng_idle) {
- r = clk_prepare_enable(rng_clk);
- if (r)
- return r;
-
- r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
- if (r != 0) {
- clk_disable_unprepare(rng_clk);
- pr_err("HW init failed: %d\n", r);
- return -EIO;
- }
- rng_idle = 0;
- }
+ ddata = dev_get_drvdata(dev);
- ptr = virt_to_phys(buf);
- r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
- schedule_delayed_work(&idle_work, msecs_to_jiffies(500));
+ r = ddata->rom_rng_call(0, 0, RNG_RESET);
if (r != 0)
- return -EINVAL;
+ dev_err(dev, "reset failed: %d\n", r);
+
+ clk_disable_unprepare(ddata->clk);
+
return 0;
}
-static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
+static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev)
{
+ struct omap_rom_rng *ddata;
int r;
- r = omap3_rom_rng_get_random(data, 4);
+ ddata = dev_get_drvdata(dev);
+
+ r = clk_prepare_enable(ddata->clk);
if (r < 0)
return r;
- return 4;
+
+ r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
+ if (r != 0) {
+ clk_disable(ddata->clk);
+ dev_err(dev, "HW init failed: %d\n", r);
+
+ return -EIO;
+ }
+
+ return 0;
}
-static struct hwrng omap3_rom_rng_ops = {
- .name = "omap3-rom",
- .read = omap3_rom_rng_read,
-};
+static void omap_rom_rng_finish(void *data)
+{
+ struct omap_rom_rng *ddata = data;
+
+ pm_runtime_dont_use_autosuspend(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+}
static int omap3_rom_rng_probe(struct platform_device *pdev)
{
+ struct omap_rom_rng *ddata;
int ret = 0;
- pr_info("initializing\n");
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
- omap3_rom_rng_call = pdev->dev.platform_data;
- if (!omap3_rom_rng_call) {
- pr_err("omap3_rom_rng_call is NULL\n");
+ ddata->dev = &pdev->dev;
+ ddata->ops.priv = (unsigned long)ddata;
+ ddata->ops.name = "omap3-rom";
+ ddata->ops.read = of_device_get_match_data(&pdev->dev);
+ ddata->ops.quality = 900;
+ if (!ddata->ops.read) {
+ dev_err(&pdev->dev, "missing rom code handler\n");
+
+ return -ENODEV;
+ }
+ dev_set_drvdata(ddata->dev, ddata);
+
+ ddata->rom_rng_call = pdev->dev.platform_data;
+ if (!ddata->rom_rng_call) {
+ dev_err(ddata->dev, "rom_rng_call is NULL\n");
return -EINVAL;
}
- INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle);
- rng_clk = devm_clk_get(&pdev->dev, "ick");
- if (IS_ERR(rng_clk)) {
- pr_err("unable to get RNG clock\n");
- return PTR_ERR(rng_clk);
+ ddata->clk = devm_clk_get(ddata->dev, "ick");
+ if (IS_ERR(ddata->clk)) {
+ dev_err(ddata->dev, "unable to get RNG clock\n");
+ return PTR_ERR(ddata->clk);
}
- /* Leave the RNG in reset state. */
- ret = clk_prepare_enable(rng_clk);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ ret = devm_add_action_or_reset(ddata->dev, omap_rom_rng_finish,
+ ddata);
if (ret)
return ret;
- omap3_rom_rng_idle(0);
- return hwrng_register(&omap3_rom_rng_ops);
+ return devm_hwrng_register(ddata->dev, &ddata->ops);
}
-static int omap3_rom_rng_remove(struct platform_device *pdev)
-{
- cancel_delayed_work_sync(&idle_work);
- hwrng_unregister(&omap3_rom_rng_ops);
- clk_disable_unprepare(rng_clk);
- return 0;
-}
+static const struct of_device_id omap_rom_rng_match[] = {
+ { .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, omap_rom_rng_match);
+
+static const struct dev_pm_ops omap_rom_rng_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rom_rng_runtime_suspend,
+ omap_rom_rng_runtime_resume)
+};
static struct platform_driver omap3_rom_rng_driver = {
.driver = {
.name = "omap3-rom-rng",
+ .of_match_table = omap_rom_rng_match,
+ .pm = &omap_rom_rng_pm_ops,
},
.probe = omap3_rom_rng_probe,
- .remove = omap3_rom_rng_remove,
};
module_platform_driver(omap3_rom_rng_driver);
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 24b1460b49d4..2498d4ef9fe2 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -86,10 +86,8 @@ static struct hwrng pasemi_rng = {
static int rng_probe(struct platform_device *pdev)
{
void __iomem *rng_regs;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng_regs = devm_ioremap_resource(&pdev->dev, res);
+ rng_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng_regs))
return PTR_ERR(rng_regs);
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 90f498c98947..81080cb2294e 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -70,7 +70,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
static int pic32_rng_probe(struct platform_device *pdev)
{
struct pic32_rng *priv;
- struct resource *res;
u32 v;
int ret;
@@ -78,8 +77,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 863448360a7d..783c24e3f8b7 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -72,7 +72,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
static int st_rng_probe(struct platform_device *pdev)
{
struct st_rng_data *ddata;
- struct resource *res;
struct clk *clk;
void __iomem *base;
int ret;
@@ -81,8 +80,7 @@ static int st_rng_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index 1093583b579c..c8bd34e740fd 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -107,14 +107,12 @@ static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
static int __init tx4939_rng_probe(struct platform_device *dev)
{
struct tx4939_rng *rngdev;
- struct resource *r;
int i;
rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
if (!rngdev)
return -ENOMEM;
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- rngdev->base = devm_ioremap_resource(&dev->dev, r);
+ rngdev->base = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(rngdev->base))
return PTR_ERR(rngdev->base);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 7e568db87ae2..d7516a446987 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -313,7 +313,6 @@ static struct hwrng xgene_rng_func = {
static int xgene_rng_probe(struct platform_device *pdev)
{
- struct resource *res;
struct xgene_rng_dev *ctx;
int rc = 0;
@@ -324,8 +323,7 @@ static int xgene_rng_probe(struct platform_device *pdev)
ctx->dev = &pdev->dev;
platform_set_drvdata(pdev, ctx);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->csr_base))
return PTR_ERR(ctx->csr_base);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 4bad0614109b..7dc2c3ec4051 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -4,38 +4,38 @@
#
menuconfig IPMI_HANDLER
- tristate 'IPMI top-level message handler'
- depends on HAS_IOMEM
- select IPMI_DMI_DECODE if DMI
- help
- This enables the central IPMI message handler, required for IPMI
- to work.
+ tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
+ select IPMI_DMI_DECODE if DMI
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
- IPMI is a standard for managing sensors (temperature,
- voltage, etc.) in a system.
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
- See <file:Documentation/IPMI.txt> for more details on the driver.
+ See <file:Documentation/IPMI.txt> for more details on the driver.
- If unsure, say N.
+ If unsure, say N.
config IPMI_DMI_DECODE
- select IPMI_PLAT_DATA
- bool
+ select IPMI_PLAT_DATA
+ bool
config IPMI_PLAT_DATA
- bool
+ bool
if IPMI_HANDLER
config IPMI_PANIC_EVENT
- bool 'Generate a panic event to all BMCs on a panic'
- help
- When a panic occurs, this will cause the IPMI message handler to,
- by default, generate an IPMI event describing the panic to each
- interface registered with the message handler. This is always
- available, the module parameter for ipmi_msghandler named
- panic_op can be set to "event" to chose this value, this config
- simply causes the default value to be set to "event".
+ bool 'Generate a panic event to all BMCs on a panic'
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
config IPMI_PANIC_STRING
bool 'Generate OEM events containing the panic string'
@@ -54,43 +54,43 @@ config IPMI_PANIC_STRING
causes the default value to be set to "string".
config IPMI_DEVICE_INTERFACE
- tristate 'Device interface for IPMI'
- help
- This provides an IOCTL interface to the IPMI message handler so
- userland processes may use IPMI. It supports poll() and select().
+ tristate 'Device interface for IPMI'
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
config IPMI_SI
- tristate 'IPMI System Interface handler'
- select IPMI_PLAT_DATA
- help
- Provides a driver for System Interfaces (KCS, SMIC, BT).
- Currently, only KCS and SMIC are supported. If
- you are using IPMI, you should probably say "y" here.
+ tristate 'IPMI System Interface handler'
+ select IPMI_PLAT_DATA
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
config IPMI_SSIF
- tristate 'IPMI SMBus handler (SSIF)'
- select I2C
- help
- Provides a driver for a SMBus interface to a BMC, meaning that you
- have a driver that must be accessed over an I2C bus instead of a
- standard interface. This module requires I2C support.
+ tristate 'IPMI SMBus handler (SSIF)'
+ select I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
config IPMI_POWERNV
- depends on PPC_POWERNV
- tristate 'POWERNV (OPAL firmware) IPMI interface'
- help
- Provides a driver for OPAL firmware-based IPMI interfaces.
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
config IPMI_WATCHDOG
- tristate 'IPMI Watchdog Timer'
- help
- This enables the IPMI watchdog timer.
+ tristate 'IPMI Watchdog Timer'
+ help
+ This enables the IPMI watchdog timer.
config IPMI_POWEROFF
- tristate 'IPMI Poweroff'
- help
- This enables a function to power off the system with IPMI if
- the IPMI management controller is capable of this.
+ tristate 'IPMI Poweroff'
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
endif # IPMI_HANDLER
@@ -126,7 +126,7 @@ config NPCM7XX_KCS_IPMI_BMC
config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
- depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+ depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
tristate "BT IPMI bmc driver"
help
Provides a driver for the BT (Block Transfer) IPMI interface
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 40b9927c072c..d36aeacb290e 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -444,15 +444,13 @@ static int bt_bmc_probe(struct platform_device *pdev)
bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
if (IS_ERR(bt_bmc->map)) {
- struct resource *res;
void __iomem *base;
/*
* Assume it's not the MFD-based devicetree description, in
* which case generate a regmap ourselves
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 285e0b8f9a97..1ff4fb1def7c 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -133,9 +133,6 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]);
netf_rq_lun = msg[NETFN_LUN_IDX];
- if (!(netf_rq_lun & NETFN_RSP_BIT_MASK))
- return -EINVAL;
-
/*
* subtract rq_sa and netf_rq_lun from the length of the msg passed to
* i2c_smbus_xfer
@@ -154,16 +151,16 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
return ret ? : count;
}
-static unsigned int ipmb_poll(struct file *file, poll_table *wait)
+static __poll_t ipmb_poll(struct file *file, poll_table *wait)
{
struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
- unsigned int mask = POLLOUT;
+ __poll_t mask = EPOLLOUT;
mutex_lock(&ipmb_dev->file_mutex);
poll_wait(file, &ipmb_dev->wait_queue, wait);
if (atomic_read(&ipmb_dev->request_queue_len))
- mask |= POLLIN;
+ mask |= EPOLLIN;
mutex_unlock(&ipmb_dev->file_mutex);
return mask;
@@ -203,25 +200,16 @@ static u8 ipmb_verify_checksum1(struct ipmb_dev *ipmb_dev, u8 rs_sa)
ipmb_dev->request.checksum1);
}
-static bool is_ipmb_request(struct ipmb_dev *ipmb_dev, u8 rs_sa)
+/*
+ * Verify if message has proper ipmb header with minimum length
+ * and correct checksum byte.
+ */
+static bool is_ipmb_msg(struct ipmb_dev *ipmb_dev, u8 rs_sa)
{
- if (ipmb_dev->msg_idx >= IPMB_REQUEST_LEN_MIN) {
- if (ipmb_verify_checksum1(ipmb_dev, rs_sa))
- return false;
+ if ((ipmb_dev->msg_idx >= IPMB_REQUEST_LEN_MIN) &&
+ (!ipmb_verify_checksum1(ipmb_dev, rs_sa)))
+ return true;
- /*
- * Check whether this is an IPMB request or
- * response.
- * The 6 MSB of netfn_rs_lun are dedicated to the netfn
- * while the remaining bits are dedicated to the lun.
- * If the LSB of the netfn is cleared, it is associated
- * with an IPMB request.
- * If the LSB of the netfn is set, it is associated with
- * an IPMB response.
- */
- if (!(ipmb_dev->request.netfn_rs_lun & NETFN_RSP_BIT_MASK))
- return true;
- }
return false;
}
@@ -273,8 +261,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
case I2C_SLAVE_STOP:
ipmb_dev->request.len = ipmb_dev->msg_idx;
-
- if (is_ipmb_request(ipmb_dev, GET_8BIT_ADDR(client->addr)))
+ if (is_ipmb_msg(ipmb_dev, GET_8BIT_ADDR(client->addr)))
ipmb_handle_request(ipmb_dev);
break;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 2aab80e19ae0..cad9563f8f48 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -44,25 +44,6 @@ static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
-#ifdef DEBUG
-static void ipmi_debug_msg(const char *title, unsigned char *data,
- unsigned int len)
-{
- int i, pos;
- char buf[100];
-
- pos = snprintf(buf, sizeof(buf), "%s: ", title);
- for (i = 0; i < len; i++)
- pos += snprintf(buf + pos, sizeof(buf) - pos,
- " %2.2x", data[i]);
- pr_debug("%s\n", buf);
-}
-#else
-static void ipmi_debug_msg(const char *title, unsigned char *data,
- unsigned int len)
-{ }
-#endif
-
static bool initialized;
static bool drvregistered;
@@ -448,6 +429,8 @@ enum ipmi_stat_indexes {
#define IPMI_IPMB_NUM_SEQ 64
struct ipmi_smi {
+ struct module *owner;
+
/* What interface number are we? */
int intf_num;
@@ -1220,6 +1203,11 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
goto out_kfree;
+ if (!try_module_get(intf->owner)) {
+ rv = -ENODEV;
+ goto out_kfree;
+ }
+
/* Note that each existing user holds a refcount to the interface. */
kref_get(&intf->refcount);
@@ -1349,6 +1337,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
kref_put(&intf->refcount, intf_free);
+ module_put(intf->owner);
}
int ipmi_destroy_user(struct ipmi_user *user)
@@ -2267,7 +2256,7 @@ out_err:
ipmi_free_smi_msg(smi_msg);
ipmi_free_recv_msg(recv_msg);
} else {
- ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
+ pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
smi_send(intf, intf->handlers, smi_msg, priority);
}
@@ -2459,7 +2448,7 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
* been recently fetched, this will just use the cached data. Otherwise
* it will run a new fetch.
*
- * Except for the first time this is called (in ipmi_register_smi()),
+ * Except for the first time this is called (in ipmi_add_smi()),
* this will always return good data;
*/
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
@@ -3031,8 +3020,11 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bmc->pdev.name = "ipmi_bmc";
rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
- if (rv < 0)
+ if (rv < 0) {
+ kfree(bmc);
goto out;
+ }
+
bmc->pdev.dev.driver = &ipmidriver.driver;
bmc->pdev.id = rv;
bmc->pdev.dev.release = release_bmc_device;
@@ -3377,10 +3369,11 @@ static void redo_bmc_reg(struct work_struct *work)
kref_put(&intf->refcount, intf_free);
}
-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
- void *send_info,
- struct device *si_dev,
- unsigned char slave_addr)
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *si_dev,
+ unsigned char slave_addr)
{
int i, j;
int rv;
@@ -3406,7 +3399,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
return rv;
}
-
+ intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
mutex_init(&intf->bmc->dyn_mutex);
@@ -3514,7 +3507,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
return rv;
}
-EXPORT_SYMBOL(ipmi_register_smi);
+EXPORT_SYMBOL(ipmi_add_smi);
static void deliver_smi_err_response(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg,
@@ -3730,7 +3723,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
msg->data[10] = ipmb_checksum(&msg->data[6], 4);
msg->data_size = 11;
- ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
+ pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
rcu_read_lock();
if (!intf->in_shutdown) {
@@ -4217,7 +4210,7 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
int requeue;
int chan;
- ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
+ pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
@@ -4576,7 +4569,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
smi_msg->data_size = recv_msg->msg.data_len;
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
- ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
+ pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
return smi_msg;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 6b9a0593d2eb..c7cc8538b84a 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -265,10 +265,10 @@ static void cleanup_ipmi_si(void);
#ifdef DEBUG_TIMING
void debug_timestamp(char *msg)
{
- struct timespec t;
+ struct timespec64 t;
- ktime_get_ts(&t);
- pr_debug("**%s: %ld.%9.9ld\n", msg, (long) t.tv_sec, t.tv_nsec);
+ ktime_get_ts64(&t);
+ pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
}
#else
#define debug_timestamp(x)
@@ -935,38 +935,25 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
}
/*
- * Use -1 in the nsec value of the busy waiting timespec to tell that
- * we are spinning in kipmid looking for something and not delaying
- * between checks
+ * Use -1 as a special constant to tell that we are spinning in kipmid
+ * looking for something and not delaying between checks
*/
-static inline void ipmi_si_set_not_busy(struct timespec *ts)
-{
- ts->tv_nsec = -1;
-}
-static inline int ipmi_si_is_busy(struct timespec *ts)
-{
- return ts->tv_nsec != -1;
-}
-
+#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
const struct smi_info *smi_info,
- struct timespec *busy_until)
+ ktime_t *busy_until)
{
unsigned int max_busy_us = 0;
if (smi_info->si_num < num_max_busy_us)
max_busy_us = kipmid_max_busy_us[smi_info->si_num];
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
- ipmi_si_set_not_busy(busy_until);
- else if (!ipmi_si_is_busy(busy_until)) {
- ktime_get_ts(busy_until);
- timespec_add_ns(busy_until, max_busy_us * NSEC_PER_USEC);
+ *busy_until = IPMI_TIME_NOT_BUSY;
+ else if (*busy_until == IPMI_TIME_NOT_BUSY) {
+ *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
} else {
- struct timespec now;
-
- ktime_get_ts(&now);
- if (unlikely(timespec_compare(&now, busy_until) > 0)) {
- ipmi_si_set_not_busy(busy_until);
+ if (unlikely(ktime_get() > *busy_until)) {
+ *busy_until = IPMI_TIME_NOT_BUSY;
return false;
}
}
@@ -988,9 +975,8 @@ static int ipmi_thread(void *data)
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
- struct timespec busy_until = { 0, 0 };
+ ktime_t busy_until = IPMI_TIME_NOT_BUSY;
- ipmi_si_set_not_busy(&busy_until);
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
int busy_wait;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 7c9269e3477a..bd95aba1f9fe 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg)
if (copy_from_user(karg, arg, sizeof(karg)))
return -EFAULT;
+ /* sparc64 suseconds_t is 32-bit only */
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ karg[1] >>= 32;
+
return lp_set_timeout(minor, karg[0], karg[1]);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index c86f18aa8985..34bb88fe0b0a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -619,20 +619,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(time32, argp, sizeof(time32)))
return -EFAULT;
+ if ((time32[0] < 0) || (time32[1] < 0))
+ return -EINVAL;
+
return pp_set_timeout(pp->pdev, time32[0], time32[1]);
case PPSETTIME64:
if (copy_from_user(time64, argp, sizeof(time64)))
return -EFAULT;
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] >>= 32;
+
return pp_set_timeout(pp->pdev, time64[0], time64[1]);
case PPGETTIME32:
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time32[0] = ts.tv_sec;
time32[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time32[0] < 0) || (time32[1] < 0))
- return -EINVAL;
if (copy_to_user(argp, time32, sizeof(time32)))
return -EFAULT;
@@ -643,8 +650,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time64[0] = ts.tv_sec;
time64[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time64[0] < 0) || (time64[1] < 0))
- return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] <<= 32;
if (copy_to_user(argp, time64, sizeof(time64)))
return -EFAULT;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index de434feb873a..01b8868b9bed 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -327,7 +327,6 @@
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
-#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -2500,8 +2499,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_freezable(random_write_wait,
- kthread_should_stop() ||
+ wait_event_interruptible(random_write_wait, kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9c37047f4b56..aacdeed93320 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -67,6 +67,13 @@ config TCG_TIS_SPI
within Linux. To compile this driver as a module, choose M here;
the module will be called tpm_tis_spi.
+config TCG_TIS_SPI_CR50
+ bool "Cr50 SPI Interface"
+ depends on TCG_TIS_SPI
+ help
+ If you have a H1 secure module running Cr50 firmware on SPI bus,
+ say Yes and it will be accessible from within Linux.
+
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
depends on I2C
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index c354cdff9c62..5a0d99d4fec0 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -21,7 +21,9 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
-obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o
+tpm_tis_spi_mod-y := tpm_tis_spi.o
+tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d7a3888ad80f..a438b1206fcb 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/suspend.h>
#include <linux/freezer.h>
#include <linux/tpm_eventlog.h>
@@ -394,7 +395,11 @@ int tpm_pm_suspend(struct device *dev)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
- return 0;
+ goto suspended;
+
+ if ((chip->flags & TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED) &&
+ !pm_suspend_via_firmware())
+ goto suspended;
if (!tpm_chip_start(chip)) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -405,6 +410,7 @@ int tpm_pm_suspend(struct device *dev)
tpm_chip_stop(chip);
}
+suspended:
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
@@ -453,62 +459,6 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
}
EXPORT_SYMBOL_GPL(tpm_get_random);
-/**
- * tpm_seal_trusted() - seal a trusted key payload
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
- *
- * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
- * the keyring subsystem.
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
- return -ENODEV;
-
- rc = tpm2_seal_trusted(chip, payload, options);
-
- tpm_put_ops(chip);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_seal_trusted);
-
-/**
- * tpm_unseal_trusted() - unseal a trusted key
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
- *
- * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
- * the keyring subsystem.
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
- return -ENODEV;
-
- rc = tpm2_unseal_trusted(chip, payload, options);
-
- tpm_put_ops(chip);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
-
static int __init tpm_init(void)
{
int rc;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index edfa89160010..3b53b3e5ec3e 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -217,6 +217,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm1_version *version;
ssize_t rc = 0;
char *str = buf;
cap_t cap;
@@ -232,31 +233,31 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
- /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ /* TPM 1.2 */
+ if (!tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version",
- sizeof(cap.tpm_version_1_2));
- if (!rc) {
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version_1_2.Major,
- cap.tpm_version_1_2.Minor,
- cap.tpm_version_1_2.revMajor,
- cap.tpm_version_1_2.revMinor);
- } else {
- /* Otherwise just use TPM_STRUCT_VER */
- if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version",
- sizeof(cap.tpm_version)))
- goto out_ops;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version.Major,
- cap.tpm_version.Minor,
- cap.tpm_version.revMajor,
- cap.tpm_version.revMinor);
+ sizeof(cap.version2))) {
+ version = &cap.version2.version;
+ goto out_print;
}
+
+ /* TPM 1.1 */
+ if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1))) {
+ goto out_ops;
+ }
+
+ version = &cap.version1;
+
+out_print:
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ version->major, version->minor,
+ version->rev_major, version->rev_minor);
+
rc = str - buf;
+
out_ops:
tpm_put_ops(chip);
return rc;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index a7fea3e0ca86..b9e1547be6b5 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -25,7 +25,6 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/tpm.h>
-#include <linux/highmem.h>
#include <linux/tpm_eventlog.h>
#ifdef CONFIG_X86
@@ -58,123 +57,6 @@ enum tpm_addr {
#define TPM_ERR_DISABLED 0x7
#define TPM_ERR_INVALID_POSTINIT 38
-#define TPM_HEADER_SIZE 10
-
-enum tpm2_const {
- TPM2_PLATFORM_PCR = 24,
- TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8),
-};
-
-enum tpm2_timeouts {
- TPM2_TIMEOUT_A = 750,
- TPM2_TIMEOUT_B = 2000,
- TPM2_TIMEOUT_C = 200,
- TPM2_TIMEOUT_D = 30,
- TPM2_DURATION_SHORT = 20,
- TPM2_DURATION_MEDIUM = 750,
- TPM2_DURATION_LONG = 2000,
- TPM2_DURATION_LONG_LONG = 300000,
- TPM2_DURATION_DEFAULT = 120000,
-};
-
-enum tpm2_structures {
- TPM2_ST_NO_SESSIONS = 0x8001,
- TPM2_ST_SESSIONS = 0x8002,
-};
-
-/* Indicates from what layer of the software stack the error comes from */
-#define TSS2_RC_LAYER_SHIFT 16
-#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
-
-enum tpm2_return_codes {
- TPM2_RC_SUCCESS = 0x0000,
- TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
- TPM2_RC_HANDLE = 0x008B,
- TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
- TPM2_RC_FAILURE = 0x0101,
- TPM2_RC_DISABLED = 0x0120,
- TPM2_RC_COMMAND_CODE = 0x0143,
- TPM2_RC_TESTING = 0x090A, /* RC_WARN */
- TPM2_RC_REFERENCE_H0 = 0x0910,
- TPM2_RC_RETRY = 0x0922,
-};
-
-enum tpm2_command_codes {
- TPM2_CC_FIRST = 0x011F,
- TPM2_CC_HIERARCHY_CONTROL = 0x0121,
- TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129,
- TPM2_CC_CREATE_PRIMARY = 0x0131,
- TPM2_CC_SEQUENCE_COMPLETE = 0x013E,
- TPM2_CC_SELF_TEST = 0x0143,
- TPM2_CC_STARTUP = 0x0144,
- TPM2_CC_SHUTDOWN = 0x0145,
- TPM2_CC_NV_READ = 0x014E,
- TPM2_CC_CREATE = 0x0153,
- TPM2_CC_LOAD = 0x0157,
- TPM2_CC_SEQUENCE_UPDATE = 0x015C,
- TPM2_CC_UNSEAL = 0x015E,
- TPM2_CC_CONTEXT_LOAD = 0x0161,
- TPM2_CC_CONTEXT_SAVE = 0x0162,
- TPM2_CC_FLUSH_CONTEXT = 0x0165,
- TPM2_CC_VERIFY_SIGNATURE = 0x0177,
- TPM2_CC_GET_CAPABILITY = 0x017A,
- TPM2_CC_GET_RANDOM = 0x017B,
- TPM2_CC_PCR_READ = 0x017E,
- TPM2_CC_PCR_EXTEND = 0x0182,
- TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185,
- TPM2_CC_HASH_SEQUENCE_START = 0x0186,
- TPM2_CC_CREATE_LOADED = 0x0191,
- TPM2_CC_LAST = 0x0193, /* Spec 1.36 */
-};
-
-enum tpm2_permanent_handles {
- TPM2_RS_PW = 0x40000009,
-};
-
-enum tpm2_capabilities {
- TPM2_CAP_HANDLES = 1,
- TPM2_CAP_COMMANDS = 2,
- TPM2_CAP_PCRS = 5,
- TPM2_CAP_TPM_PROPERTIES = 6,
-};
-
-enum tpm2_properties {
- TPM_PT_TOTAL_COMMANDS = 0x0129,
-};
-
-enum tpm2_startup_types {
- TPM2_SU_CLEAR = 0x0000,
- TPM2_SU_STATE = 0x0001,
-};
-
-enum tpm2_cc_attrs {
- TPM2_CC_ATTR_CHANDLES = 25,
- TPM2_CC_ATTR_RHANDLE = 28,
-};
-
-#define TPM_VID_INTEL 0x8086
-#define TPM_VID_WINBOND 0x1050
-#define TPM_VID_STM 0x104A
-
-enum tpm_chip_flags {
- TPM_CHIP_FLAG_TPM2 = BIT(1),
- TPM_CHIP_FLAG_IRQ = BIT(2),
- TPM_CHIP_FLAG_VIRTUAL = BIT(3),
- TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
- TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
-};
-
-#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-
-struct tpm_header {
- __be16 tag;
- __be32 length;
- union {
- __be32 ordinal;
- __be32 return_code;
- };
-} __packed;
-
#define TPM_TAG_RQU_COMMAND 193
struct stclear_flags_t {
@@ -186,19 +68,16 @@ struct stclear_flags_t {
u8 bGlobalLock;
} __packed;
-struct tpm_version_t {
- u8 Major;
- u8 Minor;
- u8 revMajor;
- u8 revMinor;
+struct tpm1_version {
+ u8 major;
+ u8 minor;
+ u8 rev_major;
+ u8 rev_minor;
} __packed;
-struct tpm_version_1_2_t {
- __be16 tag;
- u8 Major;
- u8 Minor;
- u8 revMajor;
- u8 revMinor;
+struct tpm1_version2 {
+ __be16 tag;
+ struct tpm1_version version;
} __packed;
struct timeout_t {
@@ -243,8 +122,8 @@ typedef union {
struct stclear_flags_t stclear_flags;
__u8 owned;
__be32 num_pcrs;
- struct tpm_version_t tpm_version;
- struct tpm_version_1_2_t tpm_version_1_2;
+ struct tpm1_version version1;
+ struct tpm1_version2 version2;
__be32 manufacturer_id;
struct timeout_t timeout;
struct duration_t duration;
@@ -274,102 +153,6 @@ enum tpm_sub_capabilities {
* compiler warnings about stack frame size. */
#define TPM_MAX_RNG_DATA 128
-/* A string buffer type for constructing TPM commands. This is based on the
- * ideas of string buffer code in security/keys/trusted.h but is heap based
- * in order to keep the stack usage minimal.
- */
-
-enum tpm_buf_flags {
- TPM_BUF_OVERFLOW = BIT(0),
-};
-
-struct tpm_buf {
- struct page *data_page;
- unsigned int flags;
- u8 *data;
-};
-
-static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-}
-
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- buf->data_page = alloc_page(GFP_HIGHUSER);
- if (!buf->data_page)
- return -ENOMEM;
-
- buf->flags = 0;
- buf->data = kmap(buf->data_page);
- tpm_buf_reset(buf, tag, ordinal);
- return 0;
-}
-
-static inline void tpm_buf_destroy(struct tpm_buf *buf)
-{
- kunmap(buf->data_page);
- __free_page(buf->data_page);
-}
-
-static inline u32 tpm_buf_length(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be32_to_cpu(head->length);
-}
-
-static inline u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-
-static inline void tpm_buf_append(struct tpm_buf *buf,
- const unsigned char *new_data,
- unsigned int new_len)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- u32 len = tpm_buf_length(buf);
-
- /* Return silently if overflow has already happened. */
- if (buf->flags & TPM_BUF_OVERFLOW)
- return;
-
- if ((len + new_len) > PAGE_SIZE) {
- WARN(1, "tpm_buf: overflow\n");
- buf->flags |= TPM_BUF_OVERFLOW;
- return;
- }
-
- memcpy(&buf->data[len], new_data, new_len);
- head->length = cpu_to_be32(len + new_len);
-}
-
-static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
-{
- tpm_buf_append(buf, &value, 1);
-}
-
-static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
-{
- __be16 value2 = cpu_to_be16(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 2);
-}
-
-static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
-{
- __be32 value2 = cpu_to_be32(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 4);
-}
-
extern struct class *tpm_class;
extern struct class *tpmrm_class;
extern dev_t tpm_devt;
@@ -429,11 +212,6 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
-static inline u32 tpm2_rc_value(u32 rc)
-{
- return (rc & BIT(7)) ? rc & 0xff : rc;
-}
-
int tpm2_get_timeouts(struct tpm_chip *chip);
int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest, u16 *digest_size_ptr);
@@ -441,12 +219,6 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
-int tpm2_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
-int tpm2_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index 149e953ca369..ca7158fa6e6c 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -343,6 +343,7 @@ int tpm1_get_timeouts(struct tpm_chip *chip)
{
cap_t cap;
unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
+ unsigned long durations[3];
ssize_t rc;
rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
@@ -427,6 +428,20 @@ int tpm1_get_timeouts(struct tpm_chip *chip)
usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */
+ /*
+ * Provide the ability for vendor overrides of duration values in case
+ * of misreporting.
+ */
+ if (chip->ops->update_durations)
+ chip->ops->update_durations(chip, durations);
+
+ if (chip->duration_adjusted) {
+ dev_info(&chip->dev, HW_ERR "Adjusting reported durations.");
+ chip->duration[TPM_SHORT] = durations[0];
+ chip->duration[TPM_MEDIUM] = durations[1];
+ chip->duration[TPM_LONG] = durations[2];
+ }
+
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index ba9acae83bff..fdb457704aa7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -13,20 +13,6 @@
#include "tpm.h"
#include <crypto/hash_info.h>
-#include <keys/trusted-type.h>
-
-enum tpm2_object_attributes {
- TPM2_OA_USER_WITH_AUTH = BIT(6),
-};
-
-enum tpm2_session_attributes {
- TPM2_SA_CONTINUE_SESSION = BIT(0),
-};
-
-struct tpm2_hash {
- unsigned int crypto_id;
- unsigned int tpm_id;
-};
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
@@ -377,299 +363,6 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
tpm_buf_destroy(&buf);
}
-/**
- * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
- *
- * @buf: an allocated tpm_buf instance
- * @session_handle: session handle
- * @nonce: the session nonce, may be NULL if not used
- * @nonce_len: the session nonce length, may be 0 if not used
- * @attributes: the session attributes
- * @hmac: the session HMAC or password, may be NULL if not used
- * @hmac_len: the session HMAC or password length, maybe 0 if not used
- */
-static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
- const u8 *nonce, u16 nonce_len,
- u8 attributes,
- const u8 *hmac, u16 hmac_len)
-{
- tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
- tpm_buf_append_u32(buf, session_handle);
- tpm_buf_append_u16(buf, nonce_len);
-
- if (nonce && nonce_len)
- tpm_buf_append(buf, nonce, nonce_len);
-
- tpm_buf_append_u8(buf, attributes);
- tpm_buf_append_u16(buf, hmac_len);
-
- if (hmac && hmac_len)
- tpm_buf_append(buf, hmac, hmac_len);
-}
-
-/**
- * tpm2_seal_trusted() - seal the payload of a trusted key
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- *
- * Return: < 0 on error and 0 on success.
- */
-int tpm2_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- unsigned int blob_len;
- struct tpm_buf buf;
- u32 hash;
- int i;
- int rc;
-
- for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
- if (options->hash == tpm2_hash_map[i].crypto_id) {
- hash = tpm2_hash_map[i].tpm_id;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(tpm2_hash_map))
- return -EINVAL;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, options->keyhandle);
- tpm2_buf_append_auth(&buf, TPM2_RS_PW,
- NULL /* nonce */, 0,
- 0 /* session_attributes */,
- options->keyauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- /* sensitive */
- tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
-
- tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
- tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
- tpm_buf_append_u16(&buf, payload->key_len + 1);
- tpm_buf_append(&buf, payload->key, payload->key_len);
- tpm_buf_append_u8(&buf, payload->migratable);
-
- /* public */
- tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
- tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
- tpm_buf_append_u16(&buf, hash);
-
- /* policy */
- if (options->policydigest_len) {
- tpm_buf_append_u32(&buf, 0);
- tpm_buf_append_u16(&buf, options->policydigest_len);
- tpm_buf_append(&buf, options->policydigest,
- options->policydigest_len);
- } else {
- tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
- tpm_buf_append_u16(&buf, 0);
- }
-
- /* public parameters */
- tpm_buf_append_u16(&buf, TPM_ALG_NULL);
- tpm_buf_append_u16(&buf, 0);
-
- /* outside info */
- tpm_buf_append_u16(&buf, 0);
-
- /* creation PCR */
- tpm_buf_append_u32(&buf, 0);
-
- if (buf.flags & TPM_BUF_OVERFLOW) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
- if (rc)
- goto out;
-
- blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
- if (blob_len > MAX_BLOB_SIZE) {
- rc = -E2BIG;
- goto out;
- }
- if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
- payload->blob_len = blob_len;
-
-out:
- tpm_buf_destroy(&buf);
-
- if (rc > 0) {
- if (tpm2_rc_value(rc) == TPM2_RC_HASH)
- rc = -EINVAL;
- else
- rc = -EPERM;
- }
-
- return rc;
-}
-
-/**
- * tpm2_load_cmd() - execute a TPM2_Load command
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- * @blob_handle: returned blob handle
- *
- * Return: 0 on success.
- * -E2BIG on wrong payload size.
- * -EPERM on tpm error status.
- * < 0 error from tpm_transmit_cmd.
- */
-static int tpm2_load_cmd(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 *blob_handle)
-{
- struct tpm_buf buf;
- unsigned int private_len;
- unsigned int public_len;
- unsigned int blob_len;
- int rc;
-
- private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
- if (private_len > (payload->blob_len - 2))
- return -E2BIG;
-
- public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
- blob_len = private_len + public_len + 4;
- if (blob_len > payload->blob_len)
- return -E2BIG;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, options->keyhandle);
- tpm2_buf_append_auth(&buf, TPM2_RS_PW,
- NULL /* nonce */, 0,
- 0 /* session_attributes */,
- options->keyauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- tpm_buf_append(&buf, payload->blob, blob_len);
-
- if (buf.flags & TPM_BUF_OVERFLOW) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
- if (!rc)
- *blob_handle = be32_to_cpup(
- (__be32 *) &buf.data[TPM_HEADER_SIZE]);
-
-out:
- tpm_buf_destroy(&buf);
-
- if (rc > 0)
- rc = -EPERM;
-
- return rc;
-}
-
-/**
- * tpm2_unseal_cmd() - execute a TPM2_Unload command
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- * @blob_handle: blob handle
- *
- * Return: 0 on success
- * -EPERM on tpm error status
- * < 0 error from tpm_transmit_cmd
- */
-static int tpm2_unseal_cmd(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 blob_handle)
-{
- struct tpm_buf buf;
- u16 data_len;
- u8 *data;
- int rc;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, blob_handle);
- tpm2_buf_append_auth(&buf,
- options->policyhandle ?
- options->policyhandle : TPM2_RS_PW,
- NULL /* nonce */, 0,
- TPM2_SA_CONTINUE_SESSION,
- options->blobauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
- if (rc > 0)
- rc = -EPERM;
-
- if (!rc) {
- data_len = be16_to_cpup(
- (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
- if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
- rc = -EFAULT;
- goto out;
- }
-
- if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
- rc = -EFAULT;
- goto out;
- }
- data = &buf.data[TPM_HEADER_SIZE + 6];
-
- memcpy(payload->key, data, data_len - 1);
- payload->key_len = data_len - 1;
- payload->migratable = data[data_len - 1];
- }
-
-out:
- tpm_buf_destroy(&buf);
- return rc;
-}
-
-/**
- * tpm2_unseal_trusted() - unseal the payload of a trusted key
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- *
- * Return: Same as with tpm_transmit_cmd.
- */
-int tpm2_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- u32 blob_handle;
- int rc;
-
- rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
- if (rc)
- return rc;
-
- rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
- tpm2_flush_context(chip, blob_handle);
- return rc;
-}
-
struct tpm2_get_cap_out {
u8 more_data;
__be32 subcap_id;
@@ -939,6 +632,10 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands,
GFP_KERNEL);
+ if (!chip->cc_attrs_tbl) {
+ rc = -ENOMEM;
+ goto out;
+ }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
if (rc)
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index e59f1f91d7f3..a9dcf31eadd2 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -22,6 +22,7 @@
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
+#define TPM_CRB_MAX_RESOURCES 3
static const guid_t crb_acpi_start_guid =
GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
@@ -91,7 +92,6 @@ enum crb_status {
struct crb_priv {
u32 sm;
const char *hid;
- void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
u8 __iomem *cmd;
@@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
- struct resource *io_res = data;
+ struct resource *iores_array = data;
struct resource_win win;
struct resource *res = &(win.res);
+ int i;
if (acpi_dev_resource_memory(ares, res) ||
acpi_dev_resource_address_space(ares, &win)) {
- *io_res = *res;
- io_res->name = NULL;
+ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
+ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
+ iores_array[i] = *res;
+ iores_array[i].name = NULL;
+ break;
+ }
+ }
}
return 1;
}
-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
- struct resource *io_res, u64 start, u32 size)
+static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
+ void __iomem **iobase_ptr, u64 start, u32 size)
{
struct resource new_res = {
.start = start,
@@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
if (start != new_res.start)
return (void __iomem *) ERR_PTR(-EINVAL);
- if (!resource_contains(io_res, &new_res))
+ if (!iores)
return devm_ioremap_resource(dev, &new_res);
- return priv->iobase + (new_res.start - io_res->start);
+ if (!*iobase_ptr) {
+ *iobase_ptr = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(*iobase_ptr))
+ return *iobase_ptr;
+ }
+
+ return *iobase_ptr + (new_res.start - iores->start);
}
/*
@@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct acpi_table_tpm2 *buf)
{
- struct list_head resources;
- struct resource io_res;
+ struct list_head acpi_resource_list;
+ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
+ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
struct device *dev = &device->dev;
+ struct resource *iores;
+ void __iomem **iobase_ptr;
+ int i;
u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
@@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
u32 rsp_size;
int ret;
- INIT_LIST_HEAD(&resources);
- ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
- &io_res);
+ INIT_LIST_HEAD(&acpi_resource_list);
+ ret = acpi_dev_get_resources(device, &acpi_resource_list,
+ crb_check_resource, iores_array);
if (ret < 0)
return ret;
- acpi_dev_free_resource_list(&resources);
+ acpi_dev_free_resource_list(&acpi_resource_list);
- if (resource_type(&io_res) != IORESOURCE_MEM) {
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
}
- priv->iobase = devm_ioremap_resource(dev, &io_res);
- if (IS_ERR(priv->iobase))
- return PTR_ERR(priv->iobase);
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (buf->control_address >= iores_array[i].start &&
+ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
+ iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
+ sizeof(struct crb_regs_tail));
+
+ if (IS_ERR(priv->regs_t))
+ return PTR_ERR(priv->regs_t);
/* The ACPI IO region starts at the head area and continues to include
* the control area, as one nice sane region except for some older
@@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
(priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
- if (buf->control_address == io_res.start +
+ if (iores &&
+ buf->control_address == iores->start +
sizeof(*priv->regs_h))
- priv->regs_h = priv->iobase;
+ priv->regs_h = *iobase_ptr;
else
dev_warn(dev, FW_BUG "Bad ACPI memory layout");
}
@@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (ret)
return ret;
- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
- sizeof(struct crb_regs_tail));
- if (IS_ERR(priv->regs_t)) {
- ret = PTR_ERR(priv->regs_t);
- goto out_relinquish_locality;
- }
-
/*
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
@@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
cmd_pa = ((u64)pa_high << 32) | pa_low;
- cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
- ioread32(&priv->regs_t->ctrl_cmd_size));
+ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; iores_array[i].end; ++i) {
+ if (cmd_pa >= iores_array[i].start &&
+ cmd_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
pa_high, pa_low, cmd_size);
- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
+ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
if (IS_ERR(priv->cmd)) {
ret = PTR_ERR(priv->cmd);
goto out;
@@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
rsp_pa = le64_to_cpu(__rsp_pa);
- rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
- ioread32(&priv->regs_t->ctrl_rsp_size));
+ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (rsp_pa >= iores_array[i].start &&
+ rsp_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
if (cmd_pa != rsp_pa) {
- priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
+ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
+ rsp_pa, rsp_size);
ret = PTR_ERR_OR_ZERO(priv->rsp);
goto out;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e4fdde93ed4c..e7df342a317d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -286,7 +286,7 @@ static int tpm_tis_plat_probe(struct platform_device *pdev)
}
tpm_info.res = *res;
- tpm_info.irq = platform_get_irq(pdev, 0);
+ tpm_info.irq = platform_get_irq_optional(pdev, 0);
if (tpm_info.irq <= 0) {
if (pdev != force_pdev)
tpm_info.irq = -1;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 270f43acbb77..8af2cee1a762 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -506,6 +506,84 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
return rc;
}
+struct tis_vendor_durations_override {
+ u32 did_vid;
+ struct tpm1_version version;
+ unsigned long durations[3];
+};
+
+static const struct tis_vendor_durations_override vendor_dur_overrides[] = {
+ /* STMicroelectronics 0x104a */
+ { 0x0000104a,
+ { 1, 2, 8, 28 },
+ { (2 * 60 * HZ), (2 * 60 * HZ), (2 * 60 * HZ) } },
+};
+
+static void tpm_tis_update_durations(struct tpm_chip *chip,
+ unsigned long *duration_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ struct tpm1_version *version;
+ u32 did_vid;
+ int i, rc;
+ cap_t cap;
+
+ chip->duration_adjusted = false;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid. %d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Try to get a TPM version 1.2 or 1.1 TPM_CAP_VERSION_INFO */
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version",
+ sizeof(cap.version2));
+ if (!rc) {
+ version = &cap.version2.version;
+ } else {
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1));
+
+ if (rc)
+ goto out;
+
+ version = &cap.version1;
+ }
+
+ for (i = 0; i != ARRAY_SIZE(vendor_dur_overrides); i++) {
+ if (vendor_dur_overrides[i].did_vid != did_vid)
+ continue;
+
+ if ((version->major ==
+ vendor_dur_overrides[i].version.major) &&
+ (version->minor ==
+ vendor_dur_overrides[i].version.minor) &&
+ (version->rev_major ==
+ vendor_dur_overrides[i].version.rev_major) &&
+ (version->rev_minor ==
+ vendor_dur_overrides[i].version.rev_minor)) {
+
+ memcpy(duration_cap,
+ vendor_dur_overrides[i].durations,
+ sizeof(vendor_dur_overrides[i].durations));
+
+ chip->duration_adjusted = true;
+ goto out;
+ }
+ }
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+}
+
struct tis_vendor_timeout_override {
u32 did_vid;
unsigned long timeout_us[4];
@@ -842,6 +920,7 @@ static const struct tpm_class_ops tpm_tis = {
.send = tpm_tis_send,
.cancel = tpm_tis_ready,
.update_timeouts = tpm_tis_update_timeouts,
+ .update_durations = tpm_tis_update_durations,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 19513e622053..d1754fd6c573 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -20,42 +20,64 @@
* Dorn and Kyleen Hall and Jarko Sakkinnen.
*/
+#include <linux/acpi.h>
+#include <linux/completion.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/acpi.h>
-#include <linux/freezer.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/tpm.h>
+
#include "tpm.h"
#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
-struct tpm_tis_spi_phy {
- struct tpm_tis_data priv;
- struct spi_device *spi_device;
- u8 *iobuf;
-};
-
-static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+/*
+ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+ * keep trying to read from the device until MISO goes high indicating the
+ * wait state has ended.
+ *
+ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+ */
+static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
{
- return container_of(data, struct tpm_tis_spi_phy, priv);
+ struct spi_message m;
+ int ret, i;
+
+ if ((phy->iobuf[3] & 0x01) == 0) {
+ // handle SPI wait states
+ phy->iobuf[0] = 0;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ spi_xfer->len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+ if (phy->iobuf[0] & 0x01)
+ break;
+ }
+
+ if (i == TPM_RETRY)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *in, const u8 *out)
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
- int i;
struct spi_message m;
struct spi_transfer spi_xfer;
u8 transfer_len;
@@ -82,26 +104,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->iobuf[3] & 0x01) == 0) {
- // handle SPI wait states
- phy->iobuf[0] = 0;
-
- for (i = 0; i < TPM_RETRY; i++) {
- spi_xfer.len = 1;
- spi_message_init(&m);
- spi_message_add_tail(&spi_xfer, &m);
- ret = spi_sync_locked(phy->spi_device, &m);
- if (ret < 0)
- goto exit;
- if (phy->iobuf[0] & 0x01)
- break;
- }
-
- if (i == TPM_RETRY) {
- ret = -ETIMEDOUT;
- goto exit;
- }
- }
+ ret = phy->flow_control(phy, &spi_xfer);
+ if (ret < 0)
+ goto exit;
spi_xfer.cs_change = 0;
spi_xfer.len = transfer_len;
@@ -117,6 +122,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_message_init(&m);
spi_message_add_tail(&spi_xfer, &m);
+ reinit_completion(&phy->ready);
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
@@ -146,7 +152,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
-static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
__le16 result_le;
int rc;
@@ -159,7 +165,7 @@ static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
return rc;
}
-static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
__le32 result_le;
int rc;
@@ -172,7 +178,7 @@ static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
return rc;
}
-static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
__le32 value_le;
int rc;
@@ -184,6 +190,18 @@ static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
return rc;
}
+int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops)
+{
+ phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
+ phy->spi_device = spi;
+
+ return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
+}
+
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
.read_bytes = tpm_tis_spi_read_bytes,
.write_bytes = tpm_tis_spi_write_bytes,
@@ -202,11 +220,7 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy)
return -ENOMEM;
- phy->spi_device = dev;
-
- phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
- if (!phy->iobuf)
- return -ENOMEM;
+ phy->flow_control = tpm_tis_spi_flow_control;
/* If the SPI device has an IRQ then use that */
if (dev->irq > 0)
@@ -214,11 +228,27 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
else
irq = -1;
- return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
- NULL);
+ init_completion(&phy->ready);
+ return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
+}
+
+typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
+
+static int tpm_tis_spi_driver_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
+ tpm_tis_spi_probe_func probe_func;
+
+ probe_func = of_device_get_match_data(&spi->dev);
+ if (!probe_func && spi_dev_id)
+ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+ if (!probe_func)
+ return -ENODEV;
+
+ return probe_func(spi);
}
-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
static int tpm_tis_spi_remove(struct spi_device *dev)
{
@@ -230,15 +260,17 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
}
static const struct spi_device_id tpm_tis_spi_id[] = {
- {"tpm_tis_spi", 0},
+ { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "cr50", (unsigned long)cr50_spi_probe },
{}
};
MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
static const struct of_device_id of_tis_spi_match[] = {
- { .compatible = "st,st33htpm-spi", },
- { .compatible = "infineon,slb9670", },
- { .compatible = "tcg,tpm_tis-spi", },
+ { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
+ { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "google,cr50", .data = cr50_spi_probe },
{}
};
MODULE_DEVICE_TABLE(of, of_tis_spi_match);
@@ -251,13 +283,12 @@ MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
static struct spi_driver tpm_tis_spi_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tpm_tis_spi",
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_spi_match),
.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
},
- .probe = tpm_tis_spi_probe,
+ .probe = tpm_tis_spi_driver_probe,
.remove = tpm_tis_spi_remove,
.id_table = tpm_tis_spi_id,
};
diff --git a/drivers/char/tpm/tpm_tis_spi.h b/drivers/char/tpm/tpm_tis_spi.h
new file mode 100644
index 000000000000..bba73979c368
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ */
+
+#ifndef TPM_TIS_SPI_H
+#define TPM_TIS_SPI_H
+
+#include "tpm_tis_core.h"
+
+struct tpm_tis_spi_phy {
+ struct tpm_tis_data priv;
+ struct spi_device *spi_device;
+ int (*flow_control)(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *xfer);
+ struct completion ready;
+ unsigned long wake_after;
+
+ u8 *iobuf;
+};
+
+static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_spi_phy, priv);
+}
+
+extern int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops);
+
+extern int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out);
+
+extern int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result);
+extern int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result);
+extern int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value);
+
+#ifdef CONFIG_TCG_TIS_SPI_CR50
+extern int cr50_spi_probe(struct spi_device *spi);
+#else
+static inline int cr50_spi_probe(struct spi_device *spi)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_TCG_TIS_SPI_CR50)
+extern int tpm_tis_spi_resume(struct device *dev);
+#else
+#define tpm_tis_spi_resume NULL
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
new file mode 100644
index 000000000000..37d72e818335
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This device driver implements a TCG PTP FIFO interface over SPI for chips
+ * with Cr50 firmware.
+ * It is based on tpm_tis_spi driver by Peter Huewe and Christophe Ricard.
+ */
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
+
+/*
+ * Cr50 timing constants:
+ * - can go to sleep not earlier than after CR50_SLEEP_DELAY_MSEC.
+ * - needs up to CR50_WAKE_START_DELAY_USEC to wake after sleep.
+ * - requires waiting for "ready" IRQ, if supported; or waiting for at least
+ * CR50_NOIRQ_ACCESS_DELAY_MSEC between transactions, if IRQ is not supported.
+ * - waits for up to CR50_FLOW_CONTROL for flow control 'ready' indication.
+ */
+#define CR50_SLEEP_DELAY_MSEC 1000
+#define CR50_WAKE_START_DELAY_USEC 1000
+#define CR50_NOIRQ_ACCESS_DELAY msecs_to_jiffies(2)
+#define CR50_READY_IRQ_TIMEOUT msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define CR50_FLOW_CONTROL msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define MAX_IRQ_CONFIRMATION_ATTEMPTS 3
+
+#define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12))
+#define TPM_CR50_MAX_FW_VER_LEN 64
+
+struct cr50_spi_phy {
+ struct tpm_tis_spi_phy spi_phy;
+
+ struct mutex time_track_mutex;
+ unsigned long last_access;
+
+ unsigned long access_delay;
+
+ unsigned int irq_confirmation_attempt;
+ bool irq_needs_confirmation;
+ bool irq_confirmed;
+};
+
+static inline struct cr50_spi_phy *to_cr50_spi_phy(struct tpm_tis_spi_phy *phy)
+{
+ return container_of(phy, struct cr50_spi_phy, spi_phy);
+}
+
+/*
+ * The cr50 interrupt handler just signals waiting threads that the
+ * interrupt was asserted. It does not do any processing triggered
+ * by interrupts but is instead used to avoid fixed delays.
+ */
+static irqreturn_t cr50_spi_irq_handler(int dummy, void *dev_id)
+{
+ struct cr50_spi_phy *cr50_phy = dev_id;
+
+ cr50_phy->irq_confirmed = true;
+ complete(&cr50_phy->spi_phy.ready);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Cr50 needs to have at least some delay between consecutive
+ * transactions. Make sure we wait.
+ */
+static void cr50_ensure_access_delay(struct cr50_spi_phy *phy)
+{
+ unsigned long allowed_access = phy->last_access + phy->access_delay;
+ unsigned long time_now = jiffies;
+ struct device *dev = &phy->spi_phy.spi_device->dev;
+
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll have an unneeded short delay,
+ * which is fine.
+ */
+ if (time_in_range_open(time_now, phy->last_access, allowed_access)) {
+ unsigned long remaining, timeout = allowed_access - time_now;
+
+ remaining = wait_for_completion_timeout(&phy->spi_phy.ready,
+ timeout);
+ if (!remaining && phy->irq_confirmed)
+ dev_warn(dev, "Timeout waiting for TPM ready IRQ\n");
+ }
+
+ if (phy->irq_needs_confirmation) {
+ unsigned int attempt = ++phy->irq_confirmation_attempt;
+
+ if (phy->irq_confirmed) {
+ phy->irq_needs_confirmation = false;
+ phy->access_delay = CR50_READY_IRQ_TIMEOUT;
+ dev_info(dev, "TPM ready IRQ confirmed on attempt %u\n",
+ attempt);
+ } else if (attempt > MAX_IRQ_CONFIRMATION_ATTEMPTS) {
+ phy->irq_needs_confirmation = false;
+ dev_warn(dev, "IRQ not confirmed - will use delays\n");
+ }
+ }
+}
+
+/*
+ * Cr50 might go to sleep if there is no SPI activity for some time and
+ * miss the first few bits/bytes on the bus. In such case, wake it up
+ * by asserting CS and give it time to start up.
+ */
+static bool cr50_needs_waking(struct cr50_spi_phy *phy)
+{
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll probably timeout or read
+ * incorrect value from TPM_STS and just retry the operation.
+ */
+ return !time_in_range_open(jiffies, phy->last_access,
+ phy->spi_phy.wake_after);
+}
+
+static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy)
+{
+ struct tpm_tis_spi_phy *phy = &cr50_phy->spi_phy;
+
+ if (cr50_needs_waking(cr50_phy)) {
+ /* Assert CS, wait 1 msec, deassert CS */
+ struct spi_transfer spi_cs_wake = { .delay_usecs = 1000 };
+
+ spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1);
+ /* Wait for it to fully wake */
+ usleep_range(CR50_WAKE_START_DELAY_USEC,
+ CR50_WAKE_START_DELAY_USEC * 2);
+ }
+
+ /* Reset the time when we need to wake Cr50 again */
+ phy->wake_after = jiffies + msecs_to_jiffies(CR50_SLEEP_DELAY_MSEC);
+}
+
+/*
+ * Flow control: clock the bus and wait for cr50 to set LSB before
+ * sending/receiving data. TCG PTP spec allows it to happen during
+ * the last byte of header, but cr50 never does that in practice,
+ * and earlier versions had a bug when it was set too early, so don't
+ * check for it during header transfer.
+ */
+static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct device *dev = &phy->spi_device->dev;
+ unsigned long timeout = jiffies + CR50_FLOW_CONTROL;
+ struct spi_message m;
+ int ret;
+
+ spi_xfer->len = 1;
+
+ do {
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "Timeout during flow control\n");
+ return -EBUSY;
+ }
+ } while (!(phy->iobuf[0] & 0x01));
+
+ return 0;
+}
+
+static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct cr50_spi_phy *cr50_phy = to_cr50_spi_phy(phy);
+ int ret;
+
+ mutex_lock(&cr50_phy->time_track_mutex);
+ /*
+ * Do this outside of spi_bus_lock in case cr50 is not the
+ * only device on that spi bus.
+ */
+ cr50_ensure_access_delay(cr50_phy);
+ cr50_wake_if_needed(cr50_phy);
+
+ ret = tpm_tis_spi_transfer(data, addr, len, in, out);
+
+ cr50_phy->last_access = jiffies;
+ mutex_unlock(&cr50_phy->time_track_mutex);
+
+ return ret;
+}
+
+static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = {
+ .read_bytes = tpm_tis_spi_cr50_read_bytes,
+ .write_bytes = tpm_tis_spi_cr50_write_bytes,
+ .read16 = tpm_tis_spi_read16,
+ .read32 = tpm_tis_spi_read32,
+ .write32 = tpm_tis_spi_write32,
+};
+
+static void cr50_print_fw_version(struct tpm_tis_data *data)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int i, len = 0;
+ char fw_ver[TPM_CR50_MAX_FW_VER_LEN + 1];
+ char fw_ver_block[4];
+
+ /*
+ * Write anything to TPM_CR50_FW_VER to start from the beginning
+ * of the version string
+ */
+ tpm_tis_write8(data, TPM_CR50_FW_VER(data->locality), 0);
+
+ /* Read the string, 4 bytes at a time, until we get '\0' */
+ do {
+ tpm_tis_read_bytes(data, TPM_CR50_FW_VER(data->locality), 4,
+ fw_ver_block);
+ for (i = 0; i < 4 && fw_ver_block[i]; ++len, ++i)
+ fw_ver[len] = fw_ver_block[i];
+ } while (i == 4 && len < TPM_CR50_MAX_FW_VER_LEN);
+ fw_ver[len] = '\0';
+
+ dev_info(&phy->spi_device->dev, "Cr50 firmware version: %s\n", fw_ver);
+}
+
+int cr50_spi_probe(struct spi_device *spi)
+{
+ struct tpm_tis_spi_phy *phy;
+ struct cr50_spi_phy *cr50_phy;
+ int ret;
+ struct tpm_chip *chip;
+
+ cr50_phy = devm_kzalloc(&spi->dev, sizeof(*cr50_phy), GFP_KERNEL);
+ if (!cr50_phy)
+ return -ENOMEM;
+
+ phy = &cr50_phy->spi_phy;
+ phy->flow_control = cr50_spi_flow_control;
+ phy->wake_after = jiffies;
+ init_completion(&phy->ready);
+
+ cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY;
+ cr50_phy->last_access = jiffies;
+ mutex_init(&cr50_phy->time_track_mutex);
+
+ if (spi->irq > 0) {
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ cr50_spi_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "cr50_spi", cr50_phy);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_warn(&spi->dev, "Requesting IRQ %d failed: %d\n",
+ spi->irq, ret);
+ /*
+ * This is not fatal, the driver will fall back to
+ * delays automatically, since ready will never
+ * be completed without a registered irq handler.
+ * So, just fall through.
+ */
+ } else {
+ /*
+ * IRQ requested, let's verify that it is actually
+ * triggered, before relying on it.
+ */
+ cr50_phy->irq_needs_confirmation = true;
+ }
+ } else {
+ dev_warn(&spi->dev,
+ "No IRQ - will use delays between transactions.\n");
+ }
+
+ ret = tpm_tis_spi_init(spi, phy, -1, &tpm_spi_cr50_phy_ops);
+ if (ret)
+ return ret;
+
+ cr50_print_fw_version(&phy->priv);
+
+ chip = dev_get_drvdata(&spi->dev);
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_spi_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ /*
+ * Jiffies not increased during suspend, so we need to reset
+ * the time to wake Cr50 after resume.
+ */
+ phy->wake_after = jiffies;
+
+ return tpm_tis_resume(dev);
+}
+#endif
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 7270e7b69262..3259426f01dc 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1325,24 +1325,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols;
}
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{
struct port_buffer *buf;
- unsigned int nr_added_bufs;
+ int nr_added_bufs;
int ret;
nr_added_bufs = 0;
do {
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf)
- break;
+ return -ENOMEM;
spin_lock_irq(lock);
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
free_buf(buf, true);
- break;
+ return ret;
}
nr_added_bufs++;
spin_unlock_irq(lock);
@@ -1362,7 +1362,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16];
struct port *port;
dev_t devt;
- unsigned int nr_added_bufs;
int err;
port = kmalloc(sizeof(*port), GFP_KERNEL);
@@ -1421,11 +1420,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue);
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
+ /* We can safely ignore ENOSPC because it means
+ * the queue already has buffers. Buffers are removed
+ * only by virtcons_remove(), not by unplug_port()
+ */
+ err = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
goto free_device;
}
@@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
- unsigned int nr_added_bufs;
-
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
- nr_added_bufs = fill_queue(portdev->c_ivq,
- &portdev->c_ivq_lock);
- if (!nr_added_bufs) {
+ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+ if (err < 0) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
/*
@@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_device *vdev)
VIRTIO_CONSOLE_DEVICE_READY, 0);
/* Device was functional: we need full cleanup. */
virtcons_remove(vdev);
- return -ENOMEM;
+ return err;
}
} else {
/*
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index bfafd8f5e826..96b6de8a30e5 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -116,7 +116,6 @@ static int xilly_drv_probe(struct platform_device *op)
struct xilly_endpoint *endpoint;
int rc;
int irq;
- struct resource *res;
struct xilly_endpoint_hardware *ephw = &of_hw;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
@@ -129,9 +128,7 @@ static int xilly_drv_probe(struct platform_device *op)
dev_set_drvdata(dev, endpoint);
- res = platform_get_resource(op, IORESOURCE_MEM, 0);
- endpoint->registers = devm_ioremap_resource(dev, res);
-
+ endpoint->registers = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 2317d4e3daaf..287d8d58c21a 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -17,6 +17,7 @@
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/mm.h>
+#include <linux/cpuhotplug.h>
#include <clocksource/hyperv_timer.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -30,6 +31,15 @@ static u64 hv_sched_clock_offset __ro_after_init;
* mechanism is used when running on older versions of Hyper-V
* that don't support Direct Mode. While Hyper-V provides
* four stimer's per CPU, Linux uses only stimer0.
+ *
+ * Because Direct Mode does not require processing a VMbus
+ * message, stimer interrupts can be enabled earlier in the
+ * process of booting a CPU, and consistent with when timer
+ * interrupts are enabled for other clocksource drivers.
+ * However, for legacy versions of Hyper-V when Direct Mode
+ * is not enabled, setting up stimer interrupts must be
+ * delayed until VMbus is initialized and can process the
+ * interrupt message.
*/
static bool direct_mode_enabled;
@@ -102,17 +112,12 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
/*
* hv_stimer_init - Per-cpu initialization of the clockevent
*/
-void hv_stimer_init(unsigned int cpu)
+static int hv_stimer_init(unsigned int cpu)
{
struct clock_event_device *ce;
- /*
- * Synthetic timers are always available except on old versions of
- * Hyper-V on x86. In that case, just return as Linux will use a
- * clocksource based on emulated PIT or LAPIC timer hardware.
- */
- if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
- return;
+ if (!hv_clock_event)
+ return 0;
ce = per_cpu_ptr(hv_clock_event, cpu);
ce->name = "Hyper-V clockevent";
@@ -127,28 +132,55 @@ void hv_stimer_init(unsigned int cpu)
HV_CLOCK_HZ,
HV_MIN_DELTA_TICKS,
HV_MAX_MAX_DELTA_TICKS);
+ return 0;
}
-EXPORT_SYMBOL_GPL(hv_stimer_init);
/*
* hv_stimer_cleanup - Per-cpu cleanup of the clockevent
*/
-void hv_stimer_cleanup(unsigned int cpu)
+int hv_stimer_cleanup(unsigned int cpu)
{
struct clock_event_device *ce;
- /* Turn off clockevent device */
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
- ce = per_cpu_ptr(hv_clock_event, cpu);
+ if (!hv_clock_event)
+ return 0;
+
+ /*
+ * In the legacy case where Direct Mode is not enabled
+ * (which can only be on x86/64), stimer cleanup happens
+ * relatively early in the CPU offlining process. We
+ * must unbind the stimer-based clockevent device so
+ * that the LAPIC timer can take over until clockevents
+ * are no longer needed in the offlining process. Note
+ * that clockevents_unbind_device() eventually calls
+ * hv_ce_shutdown().
+ *
+ * The unbind should not be done when Direct Mode is
+ * enabled because we may be on an architecture where
+ * there are no other clockevent devices to fallback to.
+ */
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ if (direct_mode_enabled)
hv_ce_shutdown(ce);
- }
+ else
+ clockevents_unbind_device(ce, cpu);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
-int hv_stimer_alloc(int sint)
+int hv_stimer_alloc(void)
{
- int ret;
+ int ret = 0;
+
+ /*
+ * Synthetic timers are always available except on old versions of
+ * Hyper-V on x86. In that case, return as error as Linux will use a
+ * clockevent based on emulated LAPIC timer hardware.
+ */
+ if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
+ return -EINVAL;
hv_clock_event = alloc_percpu(struct clock_event_device);
if (!hv_clock_event)
@@ -159,22 +191,78 @@ int hv_stimer_alloc(int sint)
if (direct_mode_enabled) {
ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
hv_stimer0_isr);
- if (ret) {
- free_percpu(hv_clock_event);
- hv_clock_event = NULL;
- return ret;
- }
+ if (ret)
+ goto free_percpu;
+
+ /*
+ * Since we are in Direct Mode, stimer initialization
+ * can be done now with a CPUHP value in the same range
+ * as other clockevent devices.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
+ "clockevents/hyperv/stimer:starting",
+ hv_stimer_init, hv_stimer_cleanup);
+ if (ret < 0)
+ goto free_stimer0_irq;
}
+ return ret;
- stimer0_message_sint = sint;
- return 0;
+free_stimer0_irq:
+ hv_remove_stimer0_irq(stimer0_irq);
+ stimer0_irq = 0;
+free_percpu:
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+ return ret;
}
EXPORT_SYMBOL_GPL(hv_stimer_alloc);
+/*
+ * hv_stimer_legacy_init -- Called from the VMbus driver to handle
+ * the case when Direct Mode is not enabled, and the stimer
+ * must be initialized late in the CPU onlining process.
+ *
+ */
+void hv_stimer_legacy_init(unsigned int cpu, int sint)
+{
+ if (direct_mode_enabled)
+ return;
+
+ /*
+ * This function gets called by each vCPU, so setting the
+ * global stimer_message_sint value each time is conceptually
+ * not ideal, but the value passed in is always the same and
+ * it avoids introducing yet another interface into this
+ * clocksource driver just to set the sint in the legacy case.
+ */
+ stimer0_message_sint = sint;
+ (void)hv_stimer_init(cpu);
+}
+EXPORT_SYMBOL_GPL(hv_stimer_legacy_init);
+
+/*
+ * hv_stimer_legacy_cleanup -- Called from the VMbus driver to
+ * handle the case when Direct Mode is not enabled, and the
+ * stimer must be cleaned up early in the CPU offlining
+ * process.
+ */
+void hv_stimer_legacy_cleanup(unsigned int cpu)
+{
+ if (direct_mode_enabled)
+ return;
+ (void)hv_stimer_cleanup(cpu);
+}
+EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
+
+
/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
void hv_stimer_free(void)
{
- if (direct_mode_enabled && (stimer0_irq != 0)) {
+ if (!hv_clock_event)
+ return;
+
+ if (direct_mode_enabled) {
+ cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
hv_remove_stimer0_irq(stimer0_irq);
stimer0_irq = 0;
}
@@ -190,14 +278,20 @@ EXPORT_SYMBOL_GPL(hv_stimer_free);
void hv_stimer_global_cleanup(void)
{
int cpu;
- struct clock_event_device *ce;
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
- for_each_present_cpu(cpu) {
- ce = per_cpu_ptr(hv_clock_event, cpu);
- clockevents_unbind_device(ce, cpu);
- }
+ /*
+ * hv_stime_legacy_cleanup() will stop the stimer if Direct
+ * Mode is not enabled, and fallback to the LAPIC timer.
+ */
+ for_each_present_cpu(cpu) {
+ hv_stimer_legacy_cleanup(cpu);
}
+
+ /*
+ * If Direct Mode is enabled, the cpuhp teardown callback
+ * (hv_stimer_cleanup) will be run on all CPUs to stop the
+ * stimers.
+ */
hv_stimer_free();
}
EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 895f53eb5771..dae1b2b5a0c5 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -430,8 +430,7 @@ static int __init samsung_pwm_alloc(struct device_node *np,
of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
if (val >= SAMSUNG_PWM_NUM) {
- pr_warning("%s: invalid channel index in samsung,pwm-outputs property\n",
- __func__);
+ pr_warn("%s: invalid channel index in samsung,pwm-outputs property\n", __func__);
continue;
}
pwm.variant.output_mask |= 1 << val;
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index ef773db080e9..9cde50cb3220 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -25,6 +25,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
struct sh_cmt_device;
/*
@@ -1052,7 +1056,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -1072,7 +1076,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -1109,7 +1113,10 @@ static void __exit sh_cmt_exit(void)
platform_driver_unregister(&sh_cmt_device_driver);
}
-early_platform_init("earlytimer", &sh_cmt_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
+#endif
+
subsys_initcall(sh_cmt_init);
module_exit(sh_cmt_exit);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 62812f80b5cc..64526e50d471 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -23,6 +23,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
struct sh_mtu2_device;
struct sh_mtu2_channel {
@@ -448,7 +452,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -468,7 +472,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -517,7 +521,10 @@ static void __exit sh_mtu2_exit(void)
platform_driver_unregister(&sh_mtu2_device_driver);
}
-early_platform_init("earlytimer", &sh_mtu2_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_mtu2_device_driver);
+#endif
+
subsys_initcall(sh_mtu2_init);
module_exit(sh_mtu2_exit);
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 8c4f3753b36e..d49690d15536 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -24,6 +24,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
enum sh_tmu_model {
SH_TMU,
SH_TMU_SH3,
@@ -595,7 +599,7 @@ static int sh_tmu_probe(struct platform_device *pdev)
struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -615,7 +619,8 @@ static int sh_tmu_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -665,7 +670,10 @@ static void __exit sh_tmu_exit(void)
platform_driver_unregister(&sh_tmu_device_driver);
}
-early_platform_init("earlytimer", &sh_tmu_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
+#endif
+
subsys_initcall(sh_tmu_init);
module_exit(sh_tmu_exit);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 470c7ef02ea4..4e54856ce2a5 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -3,9 +3,9 @@
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
- * All RISC-V systems have a timer attached to every hart. These timers can be
- * read from the "time" and "timeh" CSRs, and can use the SBI to setup
- * events.
+ * All RISC-V systems have a timer attached to every hart. These timers can
+ * either be read from the "time" and "timeh" CSRs, and can use the SBI to
+ * setup events, or directly accessed using MMIO registers.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -13,14 +13,29 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/sched_clock.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/smp.h>
#include <asm/sbi.h>
+u64 __iomem *riscv_time_cmp;
+u64 __iomem *riscv_time_val;
+
+static inline void mmio_set_timer(u64 val)
+{
+ void __iomem *r;
+
+ r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
+ writeq_relaxed(val, r);
+}
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
- csr_set(sie, SIE_STIE);
- sbi_set_timer(get_cycles64() + delta);
+ csr_set(CSR_IE, IE_TIE);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_set_timer(get_cycles64() + delta);
+ else
+ mmio_set_timer(get_cycles64() + delta);
return 0;
}
@@ -61,13 +76,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
- csr_set(sie, SIE_STIE);
+ csr_set(CSR_IE, IE_TIE);
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
- csr_clear(sie, SIE_STIE);
+ csr_clear(CSR_IE, IE_TIE);
return 0;
}
@@ -76,7 +91,7 @@ void riscv_timer_interrupt(void)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
- csr_clear(sie, SIE_STIE);
+ csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
}
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 00b113f4b958..17e67a84777d 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -562,11 +562,10 @@ static const struct iio_chan_spec quad8_channels[] = {
};
static int quad8_signal_read(struct counter_device *counter,
- struct counter_signal *signal, struct counter_signal_read_value *val)
+ struct counter_signal *signal, enum counter_signal_value *val)
{
const struct quad8_iio *const priv = counter->priv;
unsigned int state;
- enum counter_signal_level level;
/* Only Index signal levels can be read */
if (signal->id < 16)
@@ -575,22 +574,19 @@ static int quad8_signal_read(struct counter_device *counter,
state = inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
& BIT(signal->id - 16);
- level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
-
- counter_signal_read_value_set(val, COUNTER_SIGNAL_LEVEL, &level);
+ *val = (state) ? COUNTER_SIGNAL_HIGH : COUNTER_SIGNAL_LOW;
return 0;
}
static int quad8_count_read(struct counter_device *counter,
- struct counter_count *count, struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
const struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
- unsigned long position;
int i;
flags = inb(base_offset + 1);
@@ -598,36 +594,27 @@ static int quad8_count_read(struct counter_device *counter,
carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
- position = (unsigned long)(borrow ^ carry) << 24;
+ *val = (unsigned long)(borrow ^ carry) << 24;
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1);
for (i = 0; i < 3; i++)
- position |= (unsigned long)inb(base_offset) << (8 * i);
-
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position);
+ *val |= (unsigned long)inb(base_offset) << (8 * i);
return 0;
}
static int quad8_count_write(struct counter_device *counter,
- struct counter_count *count, struct counter_count_write_value *val)
+ struct counter_count *count, unsigned long val)
{
const struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
- int err;
- unsigned long position;
int i;
- err = counter_count_write_value_get(&position, COUNTER_COUNT_POSITION,
- val);
- if (err)
- return err;
-
/* Only 24-bit values are supported */
- if (position > 0xFFFFFF)
+ if (val > 0xFFFFFF)
return -EINVAL;
/* Reset Byte Pointer */
@@ -635,7 +622,7 @@ static int quad8_count_write(struct counter_device *counter,
/* Counter can only be set via Preset Register */
for (i = 0; i < 3; i++)
- outb(position >> (8 * i), base_offset);
+ outb(val >> (8 * i), base_offset);
/* Transfer Preset Register to Counter */
outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
@@ -644,9 +631,9 @@ static int quad8_count_write(struct counter_device *counter,
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register back to original value */
- position = priv->preset[count->id];
+ val = priv->preset[count->id];
for (i = 0; i < 3; i++)
- outb(position >> (8 * i), base_offset);
+ outb(val >> (8 * i), base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 2967d0a9ff91..c80fa76bb531 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -49,6 +49,17 @@ config STM32_LPTIMER_CNT
To compile this driver as a module, choose M here: the
module will be called stm32-lptimer-cnt.
+config TI_EQEP
+ tristate "TI eQEP counter driver"
+ depends on (SOC_AM33XX || COMPILE_TEST)
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Quadrature
+ Encoder Pulse (eQEP) counter driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ti-eqep.
+
config FTM_QUADDEC
tristate "Flex Timer Module Quadrature decoder driver"
depends on HAS_IOMEM && OF
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 40d35522937d..55142d1f4c43 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_COUNTER) += counter.o
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o
obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
+obj-$(CONFIG_TI_EQEP) += ti-eqep.o
obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c
index 106bc7180cd8..6a683d086008 100644
--- a/drivers/counter/counter.c
+++ b/drivers/counter/counter.c
@@ -220,86 +220,6 @@ ssize_t counter_device_enum_available_read(struct counter_device *counter,
}
EXPORT_SYMBOL_GPL(counter_device_enum_available_read);
-static const char *const counter_signal_level_str[] = {
- [COUNTER_SIGNAL_LEVEL_LOW] = "low",
- [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
-};
-
-/**
- * counter_signal_read_value_set - set counter_signal_read_value data
- * @val: counter_signal_read_value structure to set
- * @type: property Signal data represents
- * @data: Signal data
- *
- * This function sets an opaque counter_signal_read_value structure with the
- * provided Signal data.
- */
-void counter_signal_read_value_set(struct counter_signal_read_value *const val,
- const enum counter_signal_value_type type,
- void *const data)
-{
- if (type == COUNTER_SIGNAL_LEVEL)
- val->len = sprintf(val->buf, "%s\n",
- counter_signal_level_str[*(enum counter_signal_level *)data]);
- else
- val->len = 0;
-}
-EXPORT_SYMBOL_GPL(counter_signal_read_value_set);
-
-/**
- * counter_count_read_value_set - set counter_count_read_value data
- * @val: counter_count_read_value structure to set
- * @type: property Count data represents
- * @data: Count data
- *
- * This function sets an opaque counter_count_read_value structure with the
- * provided Count data.
- */
-void counter_count_read_value_set(struct counter_count_read_value *const val,
- const enum counter_count_value_type type,
- void *const data)
-{
- switch (type) {
- case COUNTER_COUNT_POSITION:
- val->len = sprintf(val->buf, "%lu\n", *(unsigned long *)data);
- break;
- default:
- val->len = 0;
- }
-}
-EXPORT_SYMBOL_GPL(counter_count_read_value_set);
-
-/**
- * counter_count_write_value_get - get counter_count_write_value data
- * @data: Count data
- * @type: property Count data represents
- * @val: counter_count_write_value structure containing data
- *
- * This function extracts Count data from the provided opaque
- * counter_count_write_value structure and stores it at the address provided by
- * @data.
- *
- * RETURNS:
- * 0 on success, negative error number on failure.
- */
-int counter_count_write_value_get(void *const data,
- const enum counter_count_value_type type,
- const struct counter_count_write_value *const val)
-{
- int err;
-
- switch (type) {
- case COUNTER_COUNT_POSITION:
- err = kstrtoul(val->buf, 0, data);
- if (err)
- return err;
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(counter_count_write_value_get);
-
struct counter_attr_parm {
struct counter_device_attr_group *group;
const char *prefix;
@@ -369,6 +289,11 @@ struct counter_signal_unit {
struct counter_signal *signal;
};
+static const char *const counter_signal_value_str[] = {
+ [COUNTER_SIGNAL_LOW] = "low",
+ [COUNTER_SIGNAL_HIGH] = "high"
+};
+
static ssize_t counter_signal_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -377,13 +302,13 @@ static ssize_t counter_signal_show(struct device *dev,
const struct counter_signal_unit *const component = devattr->component;
struct counter_signal *const signal = component->signal;
int err;
- struct counter_signal_read_value val = { .buf = buf };
+ enum counter_signal_value val;
err = counter->ops->signal_read(counter, signal, &val);
if (err)
return err;
- return val.len;
+ return sprintf(buf, "%s\n", counter_signal_value_str[val]);
}
struct counter_name_unit {
@@ -788,13 +713,13 @@ static ssize_t counter_count_show(struct device *dev,
const struct counter_count_unit *const component = devattr->component;
struct counter_count *const count = component->count;
int err;
- struct counter_count_read_value val = { .buf = buf };
+ unsigned long val;
err = counter->ops->count_read(counter, count, &val);
if (err)
return err;
- return val.len;
+ return sprintf(buf, "%lu\n", val);
}
static ssize_t counter_count_store(struct device *dev,
@@ -806,9 +731,13 @@ static ssize_t counter_count_store(struct device *dev,
const struct counter_count_unit *const component = devattr->component;
struct counter_count *const count = component->count;
int err;
- struct counter_count_write_value val = { .buf = buf };
+ unsigned long val;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
- err = counter->ops->count_write(counter, count, &val);
+ err = counter->ops->count_write(counter, count, val);
if (err)
return err;
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 4046aa9f9234..c2b3fdfd8b77 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -178,31 +178,25 @@ static const enum counter_count_function ftm_quaddec_count_functions[] = {
static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_read_value *val)
+ unsigned long *val)
{
struct ftm_quaddec *const ftm = counter->priv;
uint32_t cntval;
ftm_read(ftm, FTM_CNT, &cntval);
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cntval);
+ *val = cntval;
return 0;
}
static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_write_value *val)
+ const unsigned long val)
{
struct ftm_quaddec *const ftm = counter->priv;
- u32 cnt;
- int err;
- err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
- if (err)
- return err;
-
- if (cnt != 0) {
+ if (val != 0) {
dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
return -EINVAL;
}
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index bbc930a5962c..8e276eb655f5 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -347,7 +347,7 @@ static const struct iio_chan_spec stm32_lptim_cnt_channels = {
};
/**
- * stm32_lptim_cnt_function - enumerates stm32 LPTimer counter & encoder modes
+ * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes
* @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges
* @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature)
*/
@@ -377,8 +377,7 @@ static enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
};
static int stm32_lptim_cnt_read(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
struct stm32_lptim_cnt *const priv = counter->priv;
u32 cnt;
@@ -388,7 +387,7 @@ static int stm32_lptim_cnt_read(struct counter_device *counter,
if (ret)
return ret;
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+ *val = cnt;
return 0;
}
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 644ba18a72ad..3eafccec3beb 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -28,7 +28,7 @@ struct stm32_timer_cnt {
};
/**
- * stm32_count_function - enumerates stm32 timer counter encoder modes
+ * enum stm32_count_function - enumerates stm32 timer counter encoder modes
* @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1
* @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level
* @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level
@@ -48,34 +48,27 @@ static enum counter_count_function stm32_count_functions[] = {
};
static int stm32_count_read(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 cnt;
regmap_read(priv->regmap, TIM_CNT, &cnt);
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+ *val = cnt;
return 0;
}
static int stm32_count_write(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_write_value *val)
+ const unsigned long val)
{
struct stm32_timer_cnt *const priv = counter->priv;
- u32 cnt;
- int err;
-
- err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
- if (err)
- return err;
- if (cnt > priv->ceiling)
+ if (val > priv->ceiling)
return -EINVAL;
- return regmap_write(priv->regmap, TIM_CNT, cnt);
+ return regmap_write(priv->regmap, TIM_CNT, val);
}
static int stm32_count_function_get(struct counter_device *counter,
@@ -219,8 +212,8 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter,
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
- if (!(cr1 & TIM_CR1_CEN))
- clk_enable(priv->clk);
+ if (!(cr1 & TIM_CR1_CEN))
+ clk_enable(priv->clk);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
new file mode 100644
index 000000000000..1ff07faef27f
--- /dev/null
+++ b/drivers/counter/ti-eqep.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 David Lechner <david@lechnology.com>
+ *
+ * Counter driver for Texas Instruments Enhanced Quadrature Encoder Pulse (eQEP)
+ */
+
+#include <linux/bitops.h>
+#include <linux/counter.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+/* 32-bit registers */
+#define QPOSCNT 0x0
+#define QPOSINIT 0x4
+#define QPOSMAX 0x8
+#define QPOSCMP 0xc
+#define QPOSILAT 0x10
+#define QPOSSLAT 0x14
+#define QPOSLAT 0x18
+#define QUTMR 0x1c
+#define QUPRD 0x20
+
+/* 16-bit registers */
+#define QWDTMR 0x0 /* 0x24 */
+#define QWDPRD 0x2 /* 0x26 */
+#define QDECCTL 0x4 /* 0x28 */
+#define QEPCTL 0x6 /* 0x2a */
+#define QCAPCTL 0x8 /* 0x2c */
+#define QPOSCTL 0xa /* 0x2e */
+#define QEINT 0xc /* 0x30 */
+#define QFLG 0xe /* 0x32 */
+#define QCLR 0x10 /* 0x34 */
+#define QFRC 0x12 /* 0x36 */
+#define QEPSTS 0x14 /* 0x38 */
+#define QCTMR 0x16 /* 0x3a */
+#define QCPRD 0x18 /* 0x3c */
+#define QCTMRLAT 0x1a /* 0x3e */
+#define QCPRDLAT 0x1c /* 0x40 */
+
+#define QDECCTL_QSRC_SHIFT 14
+#define QDECCTL_QSRC GENMASK(15, 14)
+#define QDECCTL_SOEN BIT(13)
+#define QDECCTL_SPSEL BIT(12)
+#define QDECCTL_XCR BIT(11)
+#define QDECCTL_SWAP BIT(10)
+#define QDECCTL_IGATE BIT(9)
+#define QDECCTL_QAP BIT(8)
+#define QDECCTL_QBP BIT(7)
+#define QDECCTL_QIP BIT(6)
+#define QDECCTL_QSP BIT(5)
+
+#define QEPCTL_FREE_SOFT GENMASK(15, 14)
+#define QEPCTL_PCRM GENMASK(13, 12)
+#define QEPCTL_SEI GENMASK(11, 10)
+#define QEPCTL_IEI GENMASK(9, 8)
+#define QEPCTL_SWI BIT(7)
+#define QEPCTL_SEL BIT(6)
+#define QEPCTL_IEL GENMASK(5, 4)
+#define QEPCTL_PHEN BIT(3)
+#define QEPCTL_QCLM BIT(2)
+#define QEPCTL_UTE BIT(1)
+#define QEPCTL_WDE BIT(0)
+
+/* EQEP Inputs */
+enum {
+ TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
+ TI_EQEP_SIGNAL_QEPB, /* QEPB/XDIR */
+};
+
+/* Position Counter Input Modes */
+enum {
+ TI_EQEP_COUNT_FUNC_QUAD_COUNT,
+ TI_EQEP_COUNT_FUNC_DIR_COUNT,
+ TI_EQEP_COUNT_FUNC_UP_COUNT,
+ TI_EQEP_COUNT_FUNC_DOWN_COUNT,
+};
+
+enum {
+ TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES,
+ TI_EQEP_SYNAPSE_ACTION_RISING_EDGE,
+ TI_EQEP_SYNAPSE_ACTION_NONE,
+};
+
+struct ti_eqep_cnt {
+ struct counter_device counter;
+ struct regmap *regmap32;
+ struct regmap *regmap16;
+};
+
+static int ti_eqep_count_read(struct counter_device *counter,
+ struct counter_count *count, unsigned long *val)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 cnt;
+
+ regmap_read(priv->regmap32, QPOSCNT, &cnt);
+ *val = cnt;
+
+ return 0;
+}
+
+static int ti_eqep_count_write(struct counter_device *counter,
+ struct counter_count *count, unsigned long val)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 max;
+
+ regmap_read(priv->regmap32, QPOSMAX, &max);
+ if (val > max)
+ return -EINVAL;
+
+ return regmap_write(priv->regmap32, QPOSCNT, val);
+}
+
+static int ti_eqep_function_get(struct counter_device *counter,
+ struct counter_count *count, size_t *function)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qdecctl;
+
+ regmap_read(priv->regmap16, QDECCTL, &qdecctl);
+ *function = (qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT;
+
+ return 0;
+}
+
+static int ti_eqep_function_set(struct counter_device *counter,
+ struct counter_count *count, size_t function)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+
+ return regmap_write_bits(priv->regmap16, QDECCTL, QDECCTL_QSRC,
+ function << QDECCTL_QSRC_SHIFT);
+}
+
+static int ti_eqep_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse, size_t *action)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ size_t function;
+ u32 qdecctl;
+ int err;
+
+ err = ti_eqep_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ switch (function) {
+ case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
+ /* In quadrature mode, the rising and falling edge of both
+ * QEPA and QEPB trigger QCLK.
+ */
+ *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case TI_EQEP_COUNT_FUNC_DIR_COUNT:
+ /* In direction-count mode only rising edge of QEPA is counted
+ * and QEPB gives direction.
+ */
+ switch (synapse->signal->id) {
+ case TI_EQEP_SIGNAL_QEPA:
+ *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ default:
+ *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ break;
+ }
+ break;
+ case TI_EQEP_COUNT_FUNC_UP_COUNT:
+ case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
+ /* In up/down-count modes only QEPA is counted and QEPB is not
+ * used.
+ */
+ switch (synapse->signal->id) {
+ case TI_EQEP_SIGNAL_QEPA:
+ err = regmap_read(priv->regmap16, QDECCTL, &qdecctl);
+ if (err)
+ return err;
+
+ if (qdecctl & QDECCTL_XCR)
+ *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ else
+ *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ default:
+ *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct counter_ops ti_eqep_counter_ops = {
+ .count_read = ti_eqep_count_read,
+ .count_write = ti_eqep_count_write,
+ .function_get = ti_eqep_function_get,
+ .function_set = ti_eqep_function_set,
+ .action_get = ti_eqep_action_get,
+};
+
+static ssize_t ti_eqep_position_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qposmax;
+
+ regmap_read(priv->regmap32, QPOSMAX, &qposmax);
+
+ return sprintf(buf, "%u\n", qposmax);
+}
+
+static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ u32 res;
+
+ err = kstrtouint(buf, 0, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write(priv->regmap32, QPOSMAX, res);
+
+ return len;
+}
+
+static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qposinit;
+
+ regmap_read(priv->regmap32, QPOSINIT, &qposinit);
+
+ return sprintf(buf, "%u\n", qposinit);
+}
+
+static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ u32 res;
+
+ err = kstrtouint(buf, 0, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write(priv->regmap32, QPOSINIT, res);
+
+ return len;
+}
+
+static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qepctl;
+
+ regmap_read(priv->regmap16, QEPCTL, &qepctl);
+
+ return sprintf(buf, "%u\n", !!(qepctl & QEPCTL_PHEN));
+}
+
+static ssize_t ti_eqep_position_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ bool res;
+
+ err = kstrtobool(buf, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, res ? -1 : 0);
+
+ return len;
+}
+
+static struct counter_count_ext ti_eqep_position_ext[] = {
+ {
+ .name = "ceiling",
+ .read = ti_eqep_position_ceiling_read,
+ .write = ti_eqep_position_ceiling_write,
+ },
+ {
+ .name = "floor",
+ .read = ti_eqep_position_floor_read,
+ .write = ti_eqep_position_floor_write,
+ },
+ {
+ .name = "enable",
+ .read = ti_eqep_position_enable_read,
+ .write = ti_eqep_position_enable_write,
+ },
+};
+
+static struct counter_signal ti_eqep_signals[] = {
+ [TI_EQEP_SIGNAL_QEPA] = {
+ .id = TI_EQEP_SIGNAL_QEPA,
+ .name = "QEPA"
+ },
+ [TI_EQEP_SIGNAL_QEPB] = {
+ .id = TI_EQEP_SIGNAL_QEPB,
+ .name = "QEPB"
+ },
+};
+
+static const enum counter_count_function ti_eqep_position_functions[] = {
+ [TI_EQEP_COUNT_FUNC_QUAD_COUNT] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+ [TI_EQEP_COUNT_FUNC_DIR_COUNT] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
+ [TI_EQEP_COUNT_FUNC_UP_COUNT] = COUNTER_COUNT_FUNCTION_INCREASE,
+ [TI_EQEP_COUNT_FUNC_DOWN_COUNT] = COUNTER_COUNT_FUNCTION_DECREASE,
+};
+
+static const enum counter_synapse_action ti_eqep_position_synapse_actions[] = {
+ [TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ [TI_EQEP_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ [TI_EQEP_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static struct counter_synapse ti_eqep_position_synapses[] = {
+ {
+ .actions_list = ti_eqep_position_synapse_actions,
+ .num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
+ .signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPA],
+ },
+ {
+ .actions_list = ti_eqep_position_synapse_actions,
+ .num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
+ .signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPB],
+ },
+};
+
+static struct counter_count ti_eqep_counts[] = {
+ {
+ .id = 0,
+ .name = "QPOSCNT",
+ .functions_list = ti_eqep_position_functions,
+ .num_functions = ARRAY_SIZE(ti_eqep_position_functions),
+ .synapses = ti_eqep_position_synapses,
+ .num_synapses = ARRAY_SIZE(ti_eqep_position_synapses),
+ .ext = ti_eqep_position_ext,
+ .num_ext = ARRAY_SIZE(ti_eqep_position_ext),
+ },
+};
+
+static const struct regmap_config ti_eqep_regmap32_config = {
+ .name = "32-bit",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x24,
+};
+
+static const struct regmap_config ti_eqep_regmap16_config = {
+ .name = "16-bit",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x1e,
+};
+
+static int ti_eqep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_eqep_cnt *priv;
+ void __iomem *base;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap32 = devm_regmap_init_mmio(dev, base,
+ &ti_eqep_regmap32_config);
+ if (IS_ERR(priv->regmap32))
+ return PTR_ERR(priv->regmap32);
+
+ priv->regmap16 = devm_regmap_init_mmio(dev, base + 0x24,
+ &ti_eqep_regmap16_config);
+ if (IS_ERR(priv->regmap16))
+ return PTR_ERR(priv->regmap16);
+
+ priv->counter.name = dev_name(dev);
+ priv->counter.parent = dev;
+ priv->counter.ops = &ti_eqep_counter_ops;
+ priv->counter.counts = ti_eqep_counts;
+ priv->counter.num_counts = ARRAY_SIZE(ti_eqep_counts);
+ priv->counter.signals = ti_eqep_signals;
+ priv->counter.num_signals = ARRAY_SIZE(ti_eqep_signals);
+ priv->counter.priv = priv;
+
+ platform_set_drvdata(pdev, priv);
+
+ /*
+ * Need to make sure power is turned on. On AM33xx, this comes from the
+ * parent PWMSS bus driver. On AM17xx, this comes from the PSC power
+ * domain.
+ */
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = counter_register(&priv->counter);
+ if (err < 0) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ti_eqep_remove(struct platform_device *pdev)
+{
+ struct ti_eqep_cnt *priv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ counter_unregister(&priv->counter);
+ pm_runtime_put_sync(dev),
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_eqep_of_match[] = {
+ { .compatible = "ti,am3352-eqep", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_eqep_of_match);
+
+static struct platform_driver ti_eqep_driver = {
+ .probe = ti_eqep_probe,
+ .remove = ti_eqep_remove,
+ .driver = {
+ .name = "ti-eqep-cnt",
+ .of_match_table = ti_eqep_of_match,
+ },
+};
+module_platform_driver(ti_eqep_driver);
+
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("TI eQEP counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a905796f7f85..3858d86cf409 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -49,14 +49,6 @@ config ARM_ARMADA_8K_CPUFREQ
If in doubt, say N.
-# big LITTLE core layer and glue drivers
-config ARM_BIG_LITTLE_CPUFREQ
- tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- select PM_OPP
- help
- This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
-
config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver"
depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
@@ -69,7 +61,9 @@ config ARM_SCPI_CPUFREQ
config ARM_VEXPRESS_SPC_CPUFREQ
tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ depends on ARM_CPU_TOPOLOGY && HAVE_CLK
+ depends on ARCH_VEXPRESS_SPC
+ select PM_OPP
help
This add the CPUfreq driver support for Versatile Express
big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9a9f5ccd13d9..f6670c4abbb0 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -47,8 +47,6 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
-
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
deleted file mode 100644
index 7fe52fcddcf1..000000000000
--- a/drivers/cpufreq/arm_big_little.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * ARM big.LITTLE Platforms CPUFreq support
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/topology.h>
-#include <linux/types.h>
-
-#include "arm_big_little.h"
-
-/* Currently we support only two clusters */
-#define A15_CLUSTER 0
-#define A7_CLUSTER 1
-#define MAX_CLUSTERS 2
-
-#ifdef CONFIG_BL_SWITCHER
-#include <asm/bL_switcher.h>
-static bool bL_switching_enabled;
-#define is_bL_switching_enabled() bL_switching_enabled
-#define set_switching_enabled(x) (bL_switching_enabled = (x))
-#else
-#define is_bL_switching_enabled() false
-#define set_switching_enabled(x) do { } while (0)
-#define bL_switch_request(...) do { } while (0)
-#define bL_switcher_put_enabled() do { } while (0)
-#define bL_switcher_get_enabled() do { } while (0)
-#endif
-
-#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
-#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
-
-static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
-static const struct cpufreq_arm_bL_ops *arm_bL_ops;
-static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
-static atomic_t cluster_usage[MAX_CLUSTERS + 1];
-
-static unsigned int clk_big_min; /* (Big) clock frequencies */
-static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
-
-static DEFINE_PER_CPU(unsigned int, physical_cluster);
-static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
-
-static struct mutex cluster_lock[MAX_CLUSTERS];
-
-static inline int raw_cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
-static inline int cpu_to_cluster(int cpu)
-{
- return is_bL_switching_enabled() ?
- MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
-}
-
-static unsigned int find_cluster_maxfreq(int cluster)
-{
- int j;
- u32 max_freq = 0, cpu_freq;
-
- for_each_online_cpu(j) {
- cpu_freq = per_cpu(cpu_last_req_freq, j);
-
- if ((cluster == per_cpu(physical_cluster, j)) &&
- (max_freq < cpu_freq))
- max_freq = cpu_freq;
- }
-
- pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
- max_freq);
-
- return max_freq;
-}
-
-static unsigned int clk_get_cpu_rate(unsigned int cpu)
-{
- u32 cur_cluster = per_cpu(physical_cluster, cpu);
- u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
-
- /* For switcher we use virtual A7 clock rates */
- if (is_bL_switching_enabled())
- rate = VIRT_FREQ(cur_cluster, rate);
-
- pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
- cur_cluster, rate);
-
- return rate;
-}
-
-static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
-{
- if (is_bL_switching_enabled()) {
- pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
- cpu));
-
- return per_cpu(cpu_last_req_freq, cpu);
- } else {
- return clk_get_cpu_rate(cpu);
- }
-}
-
-static unsigned int
-bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
-{
- u32 new_rate, prev_rate;
- int ret;
- bool bLs = is_bL_switching_enabled();
-
- mutex_lock(&cluster_lock[new_cluster]);
-
- if (bLs) {
- prev_rate = per_cpu(cpu_last_req_freq, cpu);
- per_cpu(cpu_last_req_freq, cpu) = rate;
- per_cpu(physical_cluster, cpu) = new_cluster;
-
- new_rate = find_cluster_maxfreq(new_cluster);
- new_rate = ACTUAL_FREQ(new_cluster, new_rate);
- } else {
- new_rate = rate;
- }
-
- pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
- __func__, cpu, old_cluster, new_cluster, new_rate);
-
- ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
- if (!ret) {
- /*
- * FIXME: clk_set_rate hasn't returned an error here however it
- * may be that clk_change_rate failed due to hardware or
- * firmware issues and wasn't able to report that due to the
- * current design of the clk core layer. To work around this
- * problem we will read back the clock rate and check it is
- * correct. This needs to be removed once clk core is fixed.
- */
- if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
- ret = -EIO;
- }
-
- if (WARN_ON(ret)) {
- pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
- new_cluster);
- if (bLs) {
- per_cpu(cpu_last_req_freq, cpu) = prev_rate;
- per_cpu(physical_cluster, cpu) = old_cluster;
- }
-
- mutex_unlock(&cluster_lock[new_cluster]);
-
- return ret;
- }
-
- mutex_unlock(&cluster_lock[new_cluster]);
-
- /* Recalc freq for old cluster when switching clusters */
- if (old_cluster != new_cluster) {
- pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
- __func__, cpu, old_cluster, new_cluster);
-
- /* Switch cluster */
- bL_switch_request(cpu, new_cluster);
-
- mutex_lock(&cluster_lock[old_cluster]);
-
- /* Set freq of old cluster if there are cpus left on it */
- new_rate = find_cluster_maxfreq(old_cluster);
- new_rate = ACTUAL_FREQ(old_cluster, new_rate);
-
- if (new_rate) {
- pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
- __func__, old_cluster, new_rate);
-
- if (clk_set_rate(clk[old_cluster], new_rate * 1000))
- pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
- __func__, ret, old_cluster);
- }
- mutex_unlock(&cluster_lock[old_cluster]);
- }
-
- return 0;
-}
-
-/* Set clock frequency */
-static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
- unsigned int freqs_new;
- int ret;
-
- cur_cluster = cpu_to_cluster(cpu);
- new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
-
- freqs_new = freq_table[cur_cluster][index].frequency;
-
- if (is_bL_switching_enabled()) {
- if ((actual_cluster == A15_CLUSTER) &&
- (freqs_new < clk_big_min)) {
- new_cluster = A7_CLUSTER;
- } else if ((actual_cluster == A7_CLUSTER) &&
- (freqs_new > clk_little_max)) {
- new_cluster = A15_CLUSTER;
- }
- }
-
- ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
-
- if (!ret) {
- arch_set_freq_scale(policy->related_cpus, freqs_new,
- policy->cpuinfo.max_freq);
- }
-
- return ret;
-}
-
-static inline u32 get_table_count(struct cpufreq_frequency_table *table)
-{
- int count;
-
- for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
- ;
-
- return count;
-}
-
-/* get the minimum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_min(struct cpufreq_frequency_table *table)
-{
- struct cpufreq_frequency_table *pos;
- uint32_t min_freq = ~0;
- cpufreq_for_each_entry(pos, table)
- if (pos->frequency < min_freq)
- min_freq = pos->frequency;
- return min_freq;
-}
-
-/* get the maximum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_max(struct cpufreq_frequency_table *table)
-{
- struct cpufreq_frequency_table *pos;
- uint32_t max_freq = 0;
- cpufreq_for_each_entry(pos, table)
- if (pos->frequency > max_freq)
- max_freq = pos->frequency;
- return max_freq;
-}
-
-static int merge_cluster_tables(void)
-{
- int i, j, k = 0, count = 1;
- struct cpufreq_frequency_table *table;
-
- for (i = 0; i < MAX_CLUSTERS; i++)
- count += get_table_count(freq_table[i]);
-
- table = kcalloc(count, sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- freq_table[MAX_CLUSTERS] = table;
-
- /* Add in reverse order to get freqs in increasing order */
- for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
- for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
- j++) {
- table[k].frequency = VIRT_FREQ(i,
- freq_table[i][j].frequency);
- pr_debug("%s: index: %d, freq: %d\n", __func__, k,
- table[k].frequency);
- k++;
- }
- }
-
- table[k].driver_data = k;
- table[k].frequency = CPUFREQ_TABLE_END;
-
- pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
-
- return 0;
-}
-
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
-
- if (!freq_table[cluster])
- return;
-
- clk_put(clk[cluster]);
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
- if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpumask);
- dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
-}
-
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
- int i;
-
- if (atomic_dec_return(&cluster_usage[cluster]))
- return;
-
- if (cluster < MAX_CLUSTERS)
- return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
-
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return;
- }
-
- _put_cluster_clk_and_freq_table(cdev, cpumask);
- }
-
- /* free virtual table */
- kfree(freq_table[cluster]);
-}
-
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
- int ret;
-
- if (freq_table[cluster])
- return 0;
-
- ret = arm_bL_ops->init_opp_table(cpumask);
- if (ret) {
- dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
- __func__, cpu_dev->id, ret);
- goto out;
- }
-
- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
- if (ret) {
- dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
- __func__, cpu_dev->id, ret);
- goto free_opp_table;
- }
-
- clk[cluster] = clk_get(cpu_dev, NULL);
- if (!IS_ERR(clk[cluster])) {
- dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
- __func__, clk[cluster], freq_table[cluster],
- cluster);
- return 0;
- }
-
- dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
- __func__, cpu_dev->id, cluster);
- ret = PTR_ERR(clk[cluster]);
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-
-free_opp_table:
- if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpumask);
-out:
- dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
- cluster);
- return ret;
-}
-
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
- int i, ret;
-
- if (atomic_inc_return(&cluster_usage[cluster]) != 1)
- return 0;
-
- if (cluster < MAX_CLUSTERS) {
- ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
- if (ret)
- atomic_dec(&cluster_usage[cluster]);
- return ret;
- }
-
- /*
- * Get data for all clusters and fill virtual cluster with a merge of
- * both
- */
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return -ENODEV;
- }
-
- ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
- if (ret)
- goto put_clusters;
- }
-
- ret = merge_cluster_tables();
- if (ret)
- goto put_clusters;
-
- /* Assuming 2 cluster, set clk_big_min and clk_little_max */
- clk_big_min = get_table_min(freq_table[0]);
- clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
-
- pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
- __func__, cluster, clk_big_min, clk_little_max);
-
- return 0;
-
-put_clusters:
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return -ENODEV;
- }
-
- _put_cluster_clk_and_freq_table(cdev, cpumask);
- }
-
- atomic_dec(&cluster_usage[cluster]);
-
- return ret;
-}
-
-/* Per-CPU initialization */
-static int bL_cpufreq_init(struct cpufreq_policy *policy)
-{
- u32 cur_cluster = cpu_to_cluster(policy->cpu);
- struct device *cpu_dev;
- int ret;
-
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
- return -ENODEV;
- }
-
- if (cur_cluster < MAX_CLUSTERS) {
- int cpu;
-
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
-
- for_each_cpu(cpu, policy->cpus)
- per_cpu(physical_cluster, cpu) = cur_cluster;
- } else {
- /* Assumption: during init, we are always running on A15 */
- per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
- }
-
- ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
- if (ret)
- return ret;
-
- policy->freq_table = freq_table[cur_cluster];
- policy->cpuinfo.transition_latency =
- arm_bL_ops->get_transition_latency(cpu_dev);
-
- dev_pm_opp_of_register_em(policy->cpus);
-
- if (is_bL_switching_enabled())
- per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
-
- dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
- return 0;
-}
-
-static int bL_cpufreq_exit(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev;
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- if (cur_cluster < MAX_CLUSTERS) {
- cpufreq_cooling_unregister(cdev[cur_cluster]);
- cdev[cur_cluster] = NULL;
- }
-
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
- return -ENODEV;
- }
-
- put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
- dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
-
- return 0;
-}
-
-static void bL_cpufreq_ready(struct cpufreq_policy *policy)
-{
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- /* Do not register a cpu_cooling device if we are in IKS mode */
- if (cur_cluster >= MAX_CLUSTERS)
- return;
-
- cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
-}
-
-static struct cpufreq_driver bL_cpufreq_driver = {
- .name = "arm-big-little",
- .flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = bL_cpufreq_set_target,
- .get = bL_cpufreq_get_rate,
- .init = bL_cpufreq_init,
- .exit = bL_cpufreq_exit,
- .ready = bL_cpufreq_ready,
- .attr = cpufreq_generic_attr,
-};
-
-#ifdef CONFIG_BL_SWITCHER
-static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
- unsigned long action, void *_arg)
-{
- pr_debug("%s: action: %ld\n", __func__, action);
-
- switch (action) {
- case BL_NOTIFY_PRE_ENABLE:
- case BL_NOTIFY_PRE_DISABLE:
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- break;
-
- case BL_NOTIFY_POST_ENABLE:
- set_switching_enabled(true);
- cpufreq_register_driver(&bL_cpufreq_driver);
- break;
-
- case BL_NOTIFY_POST_DISABLE:
- set_switching_enabled(false);
- cpufreq_register_driver(&bL_cpufreq_driver);
- break;
-
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block bL_switcher_notifier = {
- .notifier_call = bL_cpufreq_switcher_notifier,
-};
-
-static int __bLs_register_notifier(void)
-{
- return bL_switcher_register_notifier(&bL_switcher_notifier);
-}
-
-static int __bLs_unregister_notifier(void)
-{
- return bL_switcher_unregister_notifier(&bL_switcher_notifier);
-}
-#else
-static int __bLs_register_notifier(void) { return 0; }
-static int __bLs_unregister_notifier(void) { return 0; }
-#endif
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
-{
- int ret, i;
-
- if (arm_bL_ops) {
- pr_debug("%s: Already registered: %s, exiting\n", __func__,
- arm_bL_ops->name);
- return -EBUSY;
- }
-
- if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
- !ops->get_transition_latency) {
- pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
- return -ENODEV;
- }
-
- arm_bL_ops = ops;
-
- set_switching_enabled(bL_switcher_get_enabled());
-
- for (i = 0; i < MAX_CLUSTERS; i++)
- mutex_init(&cluster_lock[i]);
-
- ret = cpufreq_register_driver(&bL_cpufreq_driver);
- if (ret) {
- pr_info("%s: Failed registering platform driver: %s, err: %d\n",
- __func__, ops->name, ret);
- arm_bL_ops = NULL;
- } else {
- ret = __bLs_register_notifier();
- if (ret) {
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- arm_bL_ops = NULL;
- } else {
- pr_info("%s: Registered platform driver: %s\n",
- __func__, ops->name);
- }
- }
-
- bL_switcher_put_enabled();
- return ret;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_register);
-
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops)
-{
- if (arm_bL_ops != ops) {
- pr_err("%s: Registered with: %s, can't unregister, exiting\n",
- __func__, arm_bL_ops->name);
- return;
- }
-
- bL_switcher_get_enabled();
- __bLs_unregister_notifier();
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- bL_switcher_put_enabled();
- pr_info("%s: Un-registered platform driver: %s\n", __func__,
- arm_bL_ops->name);
- arm_bL_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
-MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
deleted file mode 100644
index 88a176e466c8..000000000000
--- a/drivers/cpufreq/arm_big_little.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * ARM big.LITTLE platform's CPUFreq header file
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef CPUFREQ_ARM_BIG_LITTLE_H
-#define CPUFREQ_ARM_BIG_LITTLE_H
-
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct cpufreq_arm_bL_ops {
- char name[CPUFREQ_NAME_LEN];
-
- /*
- * This must set opp table for cpu_dev in a similar way as done by
- * dev_pm_opp_of_add_table().
- */
- int (*init_opp_table)(const struct cpumask *cpumask);
-
- /* Optional */
- int (*get_transition_latency)(struct device *cpu_dev);
- void (*free_opp_table)(const struct cpumask *cpumask);
-};
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops);
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops);
-
-#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bca8d1f47fd2..54bc76743b1f 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -86,7 +86,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "st-ericsson,u9540", },
{ .compatible = "ti,omap2", },
- { .compatible = "ti,omap3", },
{ .compatible = "ti,omap4", },
{ .compatible = "ti,omap5", },
@@ -137,6 +136,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
+ { .compatible = "ti,omap3", },
{ }
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 48a224a6b178..77114a3897fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -113,18 +113,21 @@ EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- u64 idle_time;
+ struct kernel_cpustat kcpustat;
u64 cur_wall_time;
+ u64 idle_time;
u64 busy_time;
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+ kcpustat_cpu_fetch(&kcpustat, cpu);
+
+ busy_time = kcpustat.cpustat[CPUTIME_USER];
+ busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat.cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat.cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat.cpustat[CPUTIME_NICE];
idle_time = cur_wall_time - busy_time;
if (wall)
@@ -933,6 +936,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
+ if (!fattr->show)
+ return -EIO;
+
down_read(&policy->rwsem);
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
@@ -947,6 +953,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+ if (!fattr->store)
+ return -EIO;
+
/*
* cpus_read_trylock() is used here to work around a circular lock
* dependency problem with respect to the cpufreq_register_driver().
@@ -2385,7 +2394,10 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
- /* verify the cpu speed can be set within this limit */
+ /*
+ * Verify that the CPU speed can be set within these limits and make sure
+ * that min <= max.
+ */
ret = cpufreq_driver->verify(new_policy);
if (ret)
return ret;
@@ -2628,6 +2640,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (cpufreq_disabled())
return -ENODEV;
+ /*
+ * The cpufreq core depends heavily on the availability of device
+ * structure, make sure they are available before proceeding further.
+ */
+ if (!get_cpu_device(0))
+ return -EPROBE_DEFER;
+
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 4bb054d0cb43..f99ae45efaea 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -105,7 +105,7 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
dbs_data->io_is_busy);
if (dbs_data->ignore_nice_load)
- j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
}
}
}
@@ -149,7 +149,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
- u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
j_cdbs->prev_cpu_nice = cur_nice;
@@ -530,7 +530,7 @@ int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
j_cdbs->prev_load = 0;
if (ignore_nice)
- j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
}
gov->start(policy);
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 35db14cf3102..85a6efd6b68f 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -44,19 +44,19 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
- * Early samples without fuses written report "0 0" which means
- * consumer segment and minimum speed grading.
- *
- * According to datasheet minimum speed grading is not supported for
- * consumer parts so clamp to 1 to avoid warning for "no OPPs"
+ * Early samples without fuses written report "0 0" which may NOT
+ * match any OPP defined in DT. So clamp to minimum OPP defined in
+ * DT to avoid warning for "no OPPs".
*
* Applies to i.MX8M series SoCs.
*/
- if (mkt_segment == 0 && speed_grade == 0 && (
- of_machine_is_compatible("fsl,imx8mm") ||
- of_machine_is_compatible("fsl,imx8mn") ||
- of_machine_is_compatible("fsl,imx8mq")))
- speed_grade = 1;
+ if (mkt_segment == 0 && speed_grade == 0) {
+ if (of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mq"))
+ speed_grade = 1;
+ if (of_machine_is_compatible("fsl,imx8mn"))
+ speed_grade = 0xb;
+ }
supported_hw[0] = BIT(speed_grade);
supported_hw[1] = BIT(mkt_segment);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8ab31702cf6a..d2fa3e9ccd97 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2662,21 +2662,21 @@ enum {
/* Hardware vendor-specific info that has its own power management modes */
static struct acpi_platform_list plat_info[] __initdata = {
- {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
- {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
+ {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{ } /* End */
};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 890813e0bb76..e9caa9586982 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -23,7 +23,7 @@
#include <asm/clock.h>
#include <asm/idle.h>
-#include <asm/mach-loongson64/loongson.h>
+#include <asm/mach-loongson2ef/loongson.h>
static uint nowait;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6061850e59c9..56f4bc0d209e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -1041,9 +1041,14 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
static int init_chip_info(void)
{
- unsigned int chip[256];
+ unsigned int *chip;
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
+ int ret = 0;
+
+ chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
@@ -1055,8 +1060,10 @@ static int init_chip_info(void)
}
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
+ if (!chips) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
@@ -1066,7 +1073,9 @@ static int init_chip_info(void)
per_cpu(chip_info, cpu) = &chips[i];
}
- return 0;
+free_and_return:
+ kfree(chip);
+ return ret;
}
static inline void clean_chip_info(void)
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index af0c00dabb22..c6bdfc308e99 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -19,7 +19,6 @@
static struct regulator *vddarm;
static unsigned long regulator_latency;
-#ifdef CONFIG_CPU_S3C6410
struct s3c64xx_dvfs {
unsigned int vddarm_min;
unsigned int vddarm_max;
@@ -48,7 +47,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 4, 800000 },
{ 0, 0, CPUFREQ_TABLE_END },
};
-#endif
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
@@ -149,11 +147,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -EINVAL;
- if (s3c64xx_freq_table == NULL) {
- pr_err("No frequency information for this CPU\n");
- return -ENODEV;
- }
-
policy->clk = clk_get(NULL, "armclk");
if (IS_ERR(policy->clk)) {
pr_err("Unable to obtain ARMCLK: %ld\n",
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 2b51e0718c9f..20d1f85d5f5a 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -1,8 +1,6 @@
/*
* System Control and Power Interface (SCPI) based CPUFreq Interface driver
*
- * It provides necessary ops to arm_big_little cpufreq driver.
- *
* Copyright (C) 2015 ARM Ltd.
* Sudeep Holla <sudeep.holla@arm.com>
*
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index eca32e443716..9907a165135b 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -25,7 +25,7 @@
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
/**
- * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
* @versions: Set to the value parsed from efuse
*
* Returns 0 if success.
@@ -69,21 +69,16 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
return PTR_ERR(speedbin);
efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
- switch (efuse_value) {
- case 0b0001:
- *versions = 1;
- break;
- case 0b0011:
- *versions = 2;
- break;
- default:
- /*
- * For other situations, we treat it as bin0.
- * This vf table can be run for any good cpu.
- */
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ *versions = efuse_value - 1;
+ else
*versions = 0;
- break;
- }
kfree(speedbin);
return 0;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index aeaa883a8c9d..557cb513bf7f 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -31,11 +31,17 @@
#define DRA7_EFUSE_OD_MPU_OPP BIT(1)
#define DRA7_EFUSE_HIGH_MPU_OPP BIT(2)
+#define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C
+#define OMAP3_CONTROL_IDCODE 0x4830A204
+#define OMAP34xx_ProdID_SKUID 0x4830A20C
+#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
struct ti_cpufreq_soc_data {
+ const char * const *reg_names;
unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data,
unsigned long efuse);
unsigned long efuse_fallback;
@@ -85,6 +91,13 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
return calculated_efuse;
}
+static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ /* OPP enable bit ("Speed Binned") */
+ return BIT(efuse);
+}
+
static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_xlate = amx3_efuse_xlate,
.efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
@@ -112,6 +125,74 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.multi_regulator = true,
};
+/*
+ * OMAP35x TRM (SPRUF98K):
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control OMAP Status Register 15:0 (Address 0x4800 244C)
+ * to separate between omap3503, omap3515, omap3525, omap3530
+ * and feature presence.
+ * There are encodings for versions limited to 400/266MHz
+ * but we ignore.
+ * Not clear if this also holds for omap34xx.
+ * some eFuse values e.g. CONTROL_FUSE_OPP1_VDD1
+ * are stored in the SYSCON register range
+ * Register 0x4830A20C [ProdID.SKUID] [0:3]
+ * 0x0 for normal 600/430MHz device.
+ * 0x8 for 720/520MHz device.
+ * Not clear what omap34xx value is.
+ */
+
+static struct ti_cpufreq_soc_data omap34xx_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP34xx_ProdID_SKUID - OMAP3_SYSCON_BASE,
+ .efuse_shift = 3,
+ .efuse_mask = BIT(3),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+/*
+ * AM/DM37x TRM (SPRUGN4M)
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control Device Status Register 15:0 (Address 0x4800 244C)
+ * to separate between am3703, am3715, dm3725, dm3730
+ * and feature presence.
+ * Speed Binned = Bit 9
+ * 0 800/600 MHz
+ * 1 1000/800 MHz
+ * some eFuse values e.g. CONTROL_FUSE_OPP 1G_VDD1
+ * are stored in the SYSCON register range.
+ * There is no 0x4830A20C [ProdID.SKUID] register (exists but
+ * seems to always read as 0).
+ */
+
+static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+
+static struct ti_cpufreq_soc_data omap36xx_soc_data = {
+ .reg_names = omap3_reg_names,
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 9,
+ .efuse_mask = BIT(9),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = true,
+};
+
+/*
+ * AM3517 is quite similar to AM/DM37x except that it has no
+ * high speed grade eFuse and no abb ldo
+ */
+
+static struct ti_cpufreq_soc_data am3517_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 0,
+ .efuse_mask = 0,
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
* @opp_data: pointer to ti_cpufreq_data context
@@ -128,7 +209,17 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
- if (ret) {
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->efuse_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ efuse = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
dev_err(dev,
"Failed to read the efuse value from syscon: %d\n",
ret);
@@ -159,7 +250,17 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
- if (ret) {
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->rev_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ revision = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
dev_err(dev,
"Failed to read the revision number from syscon: %d\n",
ret);
@@ -189,8 +290,14 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
+ { .compatible = "ti,am3517", .data = &am3517_soc_data, },
{ .compatible = "ti,am43", .data = &am4x_soc_data, },
{ .compatible = "ti,dra7", .data = &dra7_soc_data },
+ { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+ /* legacy */
+ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
{},
};
@@ -212,7 +319,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
- const char * const reg_names[] = {"vdd", "vbb"};
+ const char * const default_reg_names[] = {"vdd", "vbb"};
int ret;
match = dev_get_platdata(&pdev->dev);
@@ -268,9 +375,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
opp_data->opp_table = ti_opp_table;
if (opp_data->soc_data->multi_regulator) {
+ const char * const *reg_names = default_reg_names;
+
+ if (opp_data->soc_data->reg_names)
+ reg_names = opp_data->soc_data->reg_names;
ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
reg_names,
- ARRAY_SIZE(reg_names));
+ ARRAY_SIZE(default_reg_names));
if (IS_ERR(ti_opp_table)) {
dev_pm_opp_put_supported_hw(opp_data->opp_table);
ret = PTR_ERR(ti_opp_table);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 53237289e606..506e3f2bf53a 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -1,61 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Versatile Express SPC CPUFreq Interface driver
*
- * It provides necessary ops to arm_big_little cpufreq driver.
+ * Copyright (C) 2013 - 2019 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
*
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
+#define MAX_CLUSTERS 2
+
+#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#define bL_switch_request(...) do { } while (0)
+#define bL_switcher_put_enabled() do { } while (0)
+#define bL_switcher_get_enabled() do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
+static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
+}
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if (cluster == per_cpu(physical_cluster, j) &&
+ max_freq < cpu_freq)
+ max_freq = cpu_freq;
+ }
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled())
+ return per_cpu(cpu_last_req_freq, cpu);
+ else
+ return clk_get_cpu_rate(cpu);
+}
+
+static unsigned int
+ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
+
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (!ret) {
+ /*
+ * FIXME: clk_set_rate hasn't returned an error here however it
+ * may be that clk_change_rate failed due to hardware or
+ * firmware issues and wasn't able to report that due to the
+ * current design of the clk core layer. To work around this
+ * problem we will read back the clock rate and check it is
+ * correct. This needs to be removed once clk core is fixed.
+ */
+ if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
+ ret = -EIO;
+ }
+
+ if (WARN_ON(ret)) {
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate &&
+ clk_set_rate(clk[old_cluster], new_rate * 1000)) {
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
+}
+
+/* Set clock frequency */
+static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+ int ret;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
+ new_cluster = A7_CLUSTER;
+ else if (actual_cluster == A7_CLUSTER &&
+ freqs_new > clk_little_max)
+ new_cluster = A15_CLUSTER;
+ }
+
+ ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
+ freqs_new);
+
+ if (!ret) {
+ arch_set_freq_scale(policy->related_cpus, freqs_new,
+ policy->cpuinfo.max_freq);
+ }
+
+ return ret;
+}
+
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
+
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
+
+ return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 min_freq = ~0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency < min_freq)
+ min_freq = pos->frequency;
+ return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 max_freq = 0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency > max_freq)
+ max_freq = pos->frequency;
+ return max_freq;
+}
+
+static bool search_frequency(struct cpufreq_frequency_table *table, int size,
+ unsigned int freq)
+{
+ int count;
+
+ for (count = 0; count < size; count++) {
+ if (table[count].frequency == freq)
+ return true;
+ }
+
+ return false;
+}
+
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kcalloc(count, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ if (i == A15_CLUSTER &&
+ search_frequency(table, count, freq_table[i][j].frequency))
+ continue; /* skip duplicates */
+ table[k++].frequency =
+ VIRT_FREQ(i, freq_table[i][j].frequency);
+ }
+ }
+
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
+
+ return 0;
+}
-static int ve_spc_init_opp_table(const struct cpumask *cpumask)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
+
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
+}
+
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+ int ret;
+
+ if (freq_table[cluster])
+ return 0;
+
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
*/
- return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+ ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+ if (ret)
+ goto out;
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (ret)
+ goto out;
+
+ clk[cluster] = clk_get(cpu_dev, NULL);
+ if (!IS_ERR(clk[cluster]))
+ return 0;
+
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+ __func__, cpu_dev->id, cluster);
+ ret = PTR_ERR(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+out:
+ dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+ cluster);
+ return ret;
}
-static int ve_spc_get_transition_latency(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
- return 1000000; /* 1 ms */
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
+ clk_little_max = VIRT_FREQ(A7_CLUSTER,
+ get_table_max(freq_table[A7_CLUSTER]));
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
}
-static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
- .name = "vexpress-spc",
- .get_transition_latency = ve_spc_get_transition_latency,
- .init_opp_table = ve_spc_init_opp_table,
+/* Per-CPU initialization */
+static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ int cpu;
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(physical_cluster, cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
+
+ ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ if (ret)
+ return ret;
+
+ policy->freq_table = freq_table[cur_cluster];
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
+
+ dev_pm_opp_of_register_em(policy->cpus);
+
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) =
+ clk_get_cpu_rate(policy->cpu);
+
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
+ return 0;
+}
+
+static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpufreq_cooling_unregister(cdev[cur_cluster]);
+ cdev[cur_cluster] = NULL;
+ }
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
+ return 0;
+}
+
+static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+
+ /* Do not register a cpu_cooling device if we are in IKS mode */
+ if (cur_cluster >= MAX_CLUSTERS)
+ return;
+
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
+}
+
+static struct cpufreq_driver ve_spc_cpufreq_driver = {
+ .name = "vexpress-spc",
+ .flags = CPUFREQ_STICKY |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = ve_spc_cpufreq_set_target,
+ .get = ve_spc_cpufreq_get_rate,
+ .init = ve_spc_cpufreq_init,
+ .exit = ve_spc_cpufreq_exit,
+ .ready = ve_spc_cpufreq_ready,
+ .attr = cpufreq_generic_attr,
};
+#ifdef CONFIG_BL_SWITCHER
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
+static int __bLs_register_notifier(void)
+{
+ return bL_switcher_register_notifier(&bL_switcher_notifier);
+}
+
+static int __bLs_unregister_notifier(void)
+{
+ return bL_switcher_unregister_notifier(&bL_switcher_notifier);
+}
+#else
+static int __bLs_register_notifier(void) { return 0; }
+static int __bLs_unregister_notifier(void) { return 0; }
+#endif
+
static int ve_spc_cpufreq_probe(struct platform_device *pdev)
{
- return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+ int ret, i;
+
+ set_switching_enabled(bL_switcher_get_enabled());
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
+ ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ if (ret) {
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+ __func__, ve_spc_cpufreq_driver.name, ret);
+ } else {
+ ret = __bLs_register_notifier();
+ if (ret)
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ else
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ve_spc_cpufreq_driver.name);
+ }
+
+ bL_switcher_put_enabled();
+ return ret;
}
static int ve_spc_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+ bL_switcher_get_enabled();
+ __bLs_unregister_notifier();
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ bL_switcher_put_enabled();
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
+ ve_spc_cpufreq_driver.name);
return 0;
}
@@ -68,4 +599,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
};
module_platform_driver(ve_spc_cpufreq_platdrv);
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 84b1ebe212b3..1b299e801f74 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -56,13 +56,10 @@ static u64 get_snooze_timeout(struct cpuidle_device *dev,
return default_snooze_timeout;
for (i = index + 1; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
-
- if (s->disabled || su->disable)
+ if (dev->states_usage[i].disable)
continue;
- return s->target_residency * tb_ticks_per_usec;
+ return drv->states[i].target_residency * tb_ticks_per_usec;
}
return default_snooze_timeout;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0895b988fa92..569dbac443bd 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -75,44 +75,45 @@ int cpuidle_play_dead(void)
static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
- unsigned int max_latency,
+ u64 max_latency_ns,
unsigned int forbidden_flags,
bool s2idle)
{
- unsigned int latency_req = 0;
+ u64 latency_req = 0;
int i, ret = 0;
for (i = 1; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable || s->exit_latency <= latency_req
- || s->exit_latency > max_latency
- || (s->flags & forbidden_flags)
- || (s2idle && !s->enter_s2idle))
+ if (dev->states_usage[i].disable ||
+ s->exit_latency_ns <= latency_req ||
+ s->exit_latency_ns > max_latency_ns ||
+ (s->flags & forbidden_flags) ||
+ (s2idle && !s->enter_s2idle))
continue;
- latency_req = s->exit_latency;
+ latency_req = s->exit_latency_ns;
ret = i;
}
return ret;
}
/**
- * cpuidle_use_deepest_state - Set/clear governor override flag.
- * @enable: New value of the flag.
+ * cpuidle_use_deepest_state - Set/unset governor override mode.
+ * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
*
- * Set/unset the current CPU to use the deepest idle state (override governors
- * going forward if set).
+ * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
+ * state with exit latency within @latency_limit_ns (override governors going
+ * forward), or do not override governors if it is zero.
*/
-void cpuidle_use_deepest_state(bool enable)
+void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
struct cpuidle_device *dev;
preempt_disable();
dev = cpuidle_get_device();
if (dev)
- dev->use_deepest_state = enable;
+ dev->forced_idle_latency_limit_ns = latency_limit_ns;
preempt_enable();
}
@@ -122,9 +123,10 @@ void cpuidle_use_deepest_state(bool enable)
* @dev: cpuidle device for the given CPU.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{
- return find_deepest_state(drv, dev, UINT_MAX, 0, false);
+ return find_deepest_state(drv, dev, latency_limit_ns, 0, false);
}
#ifdef CONFIG_SUSPEND
@@ -180,7 +182,7 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
- index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
+ index = find_deepest_state(drv, dev, U64_MAX, 0, true);
if (index > 0)
enter_s2idle_proper(drv, dev, index);
@@ -209,7 +211,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* CPU as a broadcast timer, this call may fail if it is not available.
*/
if (broadcast && tick_broadcast_enter()) {
- index = find_deepest_state(drv, dev, target_state->exit_latency,
+ index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
CPUIDLE_FLAG_TIMER_STOP, false);
if (index < 0) {
default_idle_call();
@@ -247,7 +249,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
local_irq_enable();
if (entered_state >= 0) {
- s64 diff, delay = drv->states[entered_state].exit_latency;
+ s64 diff, delay = drv->states[entered_state].exit_latency_ns;
int i;
/*
@@ -255,18 +257,15 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* This can be moved to within driver enter routine,
* but that results in multiple copies of same code.
*/
- diff = ktime_us_delta(time_end, time_start);
- if (diff > INT_MAX)
- diff = INT_MAX;
+ diff = ktime_sub(time_end, time_start);
- dev->last_residency = (int)diff;
- dev->states_usage[entered_state].time += dev->last_residency;
+ dev->last_residency_ns = diff;
+ dev->states_usage[entered_state].time_ns += diff;
dev->states_usage[entered_state].usage++;
- if (diff < drv->states[entered_state].target_residency) {
+ if (diff < drv->states[entered_state].target_residency_ns) {
for (i = entered_state - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
/* Shallower states are enabled, so update. */
@@ -275,22 +274,21 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
} else if (diff > delay) {
for (i = entered_state + 1; i < drv->state_count; i++) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
/*
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
- if (diff - delay >= drv->states[i].target_residency)
+ if (diff - delay >= drv->states[i].target_residency_ns)
dev->states_usage[entered_state].below++;
break;
}
}
} else {
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
}
return entered_state;
@@ -380,10 +378,10 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
limit_ns = TICK_NSEC;
for (i = 1; i < drv->state_count; i++) {
- if (drv->states[i].disabled || dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
- limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
+ limit_ns = (u64)drv->states[i].target_residency_ns;
}
dev->poll_limit_ns = limit_ns;
@@ -554,7 +552,7 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
dev->next_hrtimer = 0;
}
@@ -567,12 +565,16 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
*/
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
- int ret;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int i, ret;
if (!try_module_get(drv->owner))
return -EINVAL;
+ for (i = 0; i < drv->state_count; i++)
+ if (drv->states[i].disabled)
+ dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 80c1a830d991..c76423aaef4d 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -62,24 +62,23 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
- * For each CPU in the driver's cpumask, unset the registered driver per CPU
- * to @drv.
- *
- * Returns 0 on success, -EBUSY if the CPUs have driver(s) already.
+ * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver
+ * different from drv already.
*/
static inline int __cpuidle_set_driver(struct cpuidle_driver *drv)
{
int cpu;
for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_driver *old_drv;
- if (__cpuidle_get_cpu_driver(cpu)) {
- __cpuidle_unset_driver(drv);
+ old_drv = __cpuidle_get_cpu_driver(cpu);
+ if (old_drv && old_drv != drv)
return -EBUSY;
- }
+ }
+ for_each_cpu(cpu, drv->cpumask)
per_cpu(cpuidle_drivers, cpu) = drv;
- }
return 0;
}
@@ -166,16 +165,27 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
if (!drv->cpumask)
drv->cpumask = (struct cpumask *)cpu_possible_mask;
- /*
- * Look for the timer stop flag in the different states, so that we know
- * if the broadcast timer has to be set up. The loop is in the reverse
- * order, because usually one of the deeper states have this flag set.
- */
- for (i = drv->state_count - 1; i >= 0 ; i--) {
- if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+ for (i = 0; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+
+ /*
+ * Look for the timer stop flag in the different states and if
+ * it is found, indicate that the broadcast timer has to be set
+ * up.
+ */
+ if (s->flags & CPUIDLE_FLAG_TIMER_STOP)
drv->bctimer = 1;
- break;
- }
+
+ /*
+ * The core will use the target residency and exit latency
+ * values in nanoseconds, but allow drivers to provide them in
+ * microseconds too.
+ */
+ if (s->target_residency > 0)
+ s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
+
+ if (s->exit_latency > 0)
+ s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
}
}
@@ -379,3 +389,31 @@ void cpuidle_driver_unref(void)
spin_unlock(&cpuidle_driver_lock);
}
+
+/**
+ * cpuidle_driver_state_disabled - Disable or enable an idle state
+ * @drv: cpuidle driver owning the state
+ * @idx: State index
+ * @disable: Whether or not to disable the state
+ */
+void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable)
+{
+ unsigned int cpu;
+
+ mutex_lock(&cpuidle_lock);
+
+ for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+
+ if (!dev)
+ continue;
+
+ if (disable)
+ dev->states_usage[idx].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ else
+ dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ }
+
+ mutex_unlock(&cpuidle_lock);
+}
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index e9801f26c732..e48271e117a3 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -107,11 +107,14 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
* cpuidle_governor_latency_req - Compute a latency constraint for CPU
* @cpu: Target CPU
*/
-int cpuidle_governor_latency_req(unsigned int cpu)
+s64 cpuidle_governor_latency_req(unsigned int cpu)
{
int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device);
- return device_req < global_req ? device_req : global_req;
+ if (device_req > global_req)
+ device_req = global_req;
+
+ return (s64)device_req * NSEC_PER_USEC;
}
diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c
index 7a703d2e0064..cb2a96eafc02 100644
--- a/drivers/cpuidle/governors/haltpoll.c
+++ b/drivers/cpuidle/governors/haltpoll.c
@@ -49,7 +49,7 @@ static int haltpoll_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
bool *stop_tick)
{
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
if (!drv->state_count || latency_req == 0) {
*stop_tick = false;
@@ -75,10 +75,9 @@ static int haltpoll_select(struct cpuidle_driver *drv,
return 0;
}
-static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us)
+static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
{
unsigned int val;
- u64 block_ns = block_us*NSEC_PER_USEC;
/* Grow cpu_halt_poll_us if
* cpu_halt_poll_us < block_ns < guest_halt_poll_us
@@ -115,7 +114,7 @@ static void haltpoll_reflect(struct cpuidle_device *dev, int index)
dev->last_state_idx = index;
if (index != 0)
- adjust_poll_limit(dev, dev->last_residency);
+ adjust_poll_limit(dev, dev->last_residency_ns);
}
/**
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 428eeb832fe7..8e9058c4ea63 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -27,8 +27,8 @@ struct ladder_device_state {
struct {
u32 promotion_count;
u32 demotion_count;
- u32 promotion_time;
- u32 demotion_time;
+ u64 promotion_time_ns;
+ u64 demotion_time_ns;
} threshold;
struct {
int promotion_count;
@@ -68,9 +68,10 @@ static int ladder_select_state(struct cpuidle_driver *drv,
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
- int last_residency, last_idx = dev->last_state_idx;
+ int last_idx = dev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 last_residency;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
@@ -80,14 +81,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
- last_residency = dev->last_residency - drv->states[last_idx].exit_latency;
+ last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
- !drv->states[last_idx + 1].disabled &&
!dev->states_usage[last_idx + 1].disable &&
- last_residency > last_state->threshold.promotion_time &&
- drv->states[last_idx + 1].exit_latency <= latency_req) {
+ last_residency > last_state->threshold.promotion_time_ns &&
+ drv->states[last_idx + 1].exit_latency_ns <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -98,13 +98,12 @@ static int ladder_select_state(struct cpuidle_driver *drv,
/* consider demotion */
if (last_idx > first_idx &&
- (drv->states[last_idx].disabled ||
- dev->states_usage[last_idx].disable ||
- drv->states[last_idx].exit_latency > latency_req)) {
+ (dev->states_usage[last_idx].disable ||
+ drv->states[last_idx].exit_latency_ns > latency_req)) {
int i;
for (i = last_idx - 1; i > first_idx; i--) {
- if (drv->states[i].exit_latency <= latency_req)
+ if (drv->states[i].exit_latency_ns <= latency_req)
break;
}
ladder_do_selection(dev, ldev, last_idx, i);
@@ -112,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
}
if (last_idx > first_idx &&
- last_residency < last_state->threshold.demotion_time) {
+ last_residency < last_state->threshold.demotion_time_ns) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
@@ -152,9 +151,9 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
lstate->threshold.demotion_count = DEMOTION_COUNT;
if (i < drv->state_count - 1)
- lstate->threshold.promotion_time = state->exit_latency;
+ lstate->threshold.promotion_time_ns = state->exit_latency_ns;
if (i > first_idx)
- lstate->threshold.demotion_time = state->exit_latency;
+ lstate->threshold.demotion_time_ns = state->exit_latency_ns;
}
return 0;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index e5a5d0c8d66b..b0a7ad566081 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,22 +19,12 @@
#include <linux/sched/stat.h>
#include <linux/math64.h>
-/*
- * Please note when changing the tuning values:
- * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
- * a scaling operation multiplication may overflow on 32 bit platforms.
- * In that case, #define RESOLUTION as ULL to get 64 bit result:
- * #define RESOLUTION 1024ULL
- *
- * The default values do not overflow.
- */
#define BUCKETS 12
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
#define RESOLUTION 1024
#define DECAY 8
-#define MAX_INTERESTING 50000
-
+#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
/*
* Concepts and ideas behind the menu governor
@@ -120,14 +110,14 @@ struct menu_device {
int needs_update;
int tick_wakeup;
- unsigned int next_timer_us;
+ u64 next_timer_ns;
unsigned int bucket;
unsigned int correction_factor[BUCKETS];
unsigned int intervals[INTERVALS];
int interval_ptr;
};
-static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
+static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
{
int bucket = 0;
@@ -140,15 +130,15 @@ static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters
if (nr_iowaiters)
bucket = BUCKETS/2;
- if (duration < 10)
+ if (duration_ns < 10ULL * NSEC_PER_USEC)
return bucket;
- if (duration < 100)
+ if (duration_ns < 100ULL * NSEC_PER_USEC)
return bucket + 1;
- if (duration < 1000)
+ if (duration_ns < 1000ULL * NSEC_PER_USEC)
return bucket + 2;
- if (duration < 10000)
+ if (duration_ns < 10000ULL * NSEC_PER_USEC)
return bucket + 3;
- if (duration < 100000)
+ if (duration_ns < 100000ULL * NSEC_PER_USEC)
return bucket + 4;
return bucket + 5;
}
@@ -276,13 +266,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
- int i;
- int idx;
- unsigned int interactivity_req;
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
unsigned int predicted_us;
+ u64 predicted_ns;
+ u64 interactivity_req;
unsigned long nr_iowaiters;
ktime_t delta_next;
+ int i, idx;
if (data->needs_update) {
menu_update(drv, dev);
@@ -290,15 +280,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
/* determine the expected residency time, round up */
- data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
+ data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next);
nr_iowaiters = nr_iowait_cpu(dev->cpu);
- data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
+ data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
- ((data->next_timer_us < drv->states[1].target_residency ||
- latency_req < drv->states[1].exit_latency) &&
- !drv->states[0].disabled && !dev->states_usage[0].disable)) {
+ ((data->next_timer_ns < drv->states[1].target_residency_ns ||
+ latency_req < drv->states[1].exit_latency_ns) &&
+ !dev->states_usage[0].disable)) {
/*
* In this case state[0] will be used no matter what, so return
* it right away and keep the tick running if state[0] is a
@@ -308,18 +298,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- /*
- * Force the result of multiplication to be 64 bits even if both
- * operands are 32 bits.
- * Make sure to round up for half microseconds.
- */
- predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
- data->correction_factor[data->bucket],
- RESOLUTION * DECAY);
- /*
- * Use the lowest expected idle interval to pick the idle state.
- */
- predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
+ /* Round up the result for half microseconds. */
+ predicted_us = div_u64(data->next_timer_ns *
+ data->correction_factor[data->bucket] +
+ (RESOLUTION * DECAY * NSEC_PER_USEC) / 2,
+ RESOLUTION * DECAY * NSEC_PER_USEC);
+ /* Use the lowest expected idle interval to pick the idle state. */
+ predicted_ns = (u64)min(predicted_us,
+ get_typical_interval(data, predicted_us)) *
+ NSEC_PER_USEC;
if (tick_nohz_tick_stopped()) {
/*
@@ -330,14 +317,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* the known time till the closest timer event for the idle
* state selection.
*/
- if (predicted_us < TICK_USEC)
- predicted_us = ktime_to_us(delta_next);
+ if (predicted_ns < TICK_NSEC)
+ predicted_ns = delta_next;
} else {
/*
* Use the performance multiplier and the user-configurable
* latency_req to determine the maximum exit latency.
*/
- interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
+ interactivity_req = div64_u64(predicted_ns,
+ performance_multiplier(nr_iowaiters));
if (latency_req > interactivity_req)
latency_req = interactivity_req;
}
@@ -349,27 +337,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = -1;
for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable)
+ if (dev->states_usage[i].disable)
continue;
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency > predicted_us) {
+ if (s->target_residency_ns > predicted_ns) {
/*
* Use a physical idle state, not busy polling, unless
* a timer is going to trigger soon enough.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->exit_latency <= latency_req &&
- s->target_residency <= data->next_timer_us) {
- predicted_us = s->target_residency;
+ s->exit_latency_ns <= latency_req &&
+ s->target_residency_ns <= data->next_timer_ns) {
+ predicted_ns = s->target_residency_ns;
idx = i;
break;
}
- if (predicted_us < TICK_USEC)
+ if (predicted_ns < TICK_NSEC)
break;
if (!tick_nohz_tick_stopped()) {
@@ -379,7 +366,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick in that case and let the governor run
* again in the next iteration of the loop.
*/
- predicted_us = drv->states[idx].target_residency;
+ predicted_ns = drv->states[idx].target_residency_ns;
break;
}
@@ -389,13 +376,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* closest timer event, select this one to avoid getting
* stuck in the shallow one for too long.
*/
- if (drv->states[idx].target_residency < TICK_USEC &&
- s->target_residency <= ktime_to_us(delta_next))
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_next)
idx = i;
return idx;
}
- if (s->exit_latency > latency_req)
+ if (s->exit_latency_ns > latency_req)
break;
idx = i;
@@ -409,12 +396,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
- unsigned int delta_next_us = ktime_to_us(delta_next);
-
+ predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
*stop_tick = false;
- if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
+ if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) {
/*
* The tick is not going to be stopped and the target
* residency of the state to be returned is not within
@@ -422,12 +407,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick, so try to correct that.
*/
for (i = idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
idx = i;
- if (drv->states[i].target_residency <= delta_next_us)
+ if (drv->states[i].target_residency_ns <= delta_next)
break;
}
}
@@ -463,7 +447,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
struct menu_device *data = this_cpu_ptr(&menu_devices);
int last_idx = dev->last_state_idx;
struct cpuidle_state *target = &drv->states[last_idx];
- unsigned int measured_us;
+ u64 measured_ns;
unsigned int new_factor;
/*
@@ -481,7 +465,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* assume the state was never reached and the exit latency is 0.
*/
- if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
+ if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
/*
* The nohz code said that there wouldn't be any events within
* the tick boundary (if the tick was stopped), but the idle
@@ -491,7 +475,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* have been idle long (but not forever) to help the idle
* duration predictor do a better job next time.
*/
- measured_us = 9 * MAX_INTERESTING / 10;
+ measured_ns = 9 * MAX_INTERESTING / 10;
} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
dev->poll_time_limit) {
/*
@@ -501,28 +485,29 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the CPU might have been woken up from idle by the next timer.
* Assume that to be the case.
*/
- measured_us = data->next_timer_us;
+ measured_ns = data->next_timer_ns;
} else {
/* measured value */
- measured_us = dev->last_residency;
+ measured_ns = dev->last_residency_ns;
/* Deduct exit latency */
- if (measured_us > 2 * target->exit_latency)
- measured_us -= target->exit_latency;
+ if (measured_ns > 2 * target->exit_latency_ns)
+ measured_ns -= target->exit_latency_ns;
else
- measured_us /= 2;
+ measured_ns /= 2;
}
/* Make sure our coefficients do not exceed unity */
- if (measured_us > data->next_timer_us)
- measured_us = data->next_timer_us;
+ if (measured_ns > data->next_timer_ns)
+ measured_ns = data->next_timer_ns;
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;
- if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
- new_factor += RESOLUTION * measured_us / data->next_timer_us;
+ if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
+ new_factor += div64_u64(RESOLUTION * measured_ns,
+ data->next_timer_ns);
else
/*
* we were idle so long that we count it as a perfect
@@ -542,7 +527,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
/* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = measured_us;
+ data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
if (data->interval_ptr >= INTERVALS)
data->interval_ptr = 0;
}
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index b5a0e498f798..de7e706efd46 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -104,7 +104,7 @@ struct teo_cpu {
u64 sleep_length_ns;
struct teo_idle_state states[CPUIDLE_STATE_MAX];
int interval_idx;
- unsigned int intervals[INTERVALS];
+ u64 intervals[INTERVALS];
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -117,9 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
int i, idx_hit = -1, idx_timer = -1;
- unsigned int measured_us;
+ u64 measured_ns;
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
/*
@@ -127,23 +126,28 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* enough to the closest timer event expected at the idle state
* selection time to be discarded.
*/
- measured_us = UINT_MAX;
+ measured_ns = U64_MAX;
} else {
- unsigned int lat;
+ u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
- lat = drv->states[dev->last_state_idx].exit_latency;
-
- measured_us = ktime_to_us(cpu_data->time_span_ns);
+ /*
+ * The computations below are to determine whether or not the
+ * (saved) time till the next timer event and the measured idle
+ * duration fall into the same "bin", so use last_residency_ns
+ * for that instead of time_span_ns which includes the cpuidle
+ * overhead.
+ */
+ measured_ns = dev->last_residency_ns;
/*
* The delay between the wakeup and the first instruction
* executed by the CPU is not likely to be worst-case every
* time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it.
*/
- if (measured_us >= lat)
- measured_us -= lat / 2;
+ if (measured_ns >= lat_ns)
+ measured_ns -= lat_ns / 2;
else
- measured_us /= 2;
+ measured_ns /= 2;
}
/*
@@ -155,9 +159,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
- if (drv->states[i].target_residency <= sleep_length_us) {
+ if (drv->states[i].target_residency_ns <= cpu_data->sleep_length_ns) {
idx_timer = i;
- if (drv->states[i].target_residency <= measured_us)
+ if (drv->states[i].target_residency_ns <= measured_ns)
idx_hit = i;
}
}
@@ -193,30 +197,35 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Save idle duration values corresponding to non-timer wakeups for
* pattern detection.
*/
- cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
+ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
if (cpu_data->interval_idx > INTERVALS)
cpu_data->interval_idx = 0;
}
+static bool teo_time_ok(u64 interval_ns)
+{
+ return !tick_nohz_tick_stopped() || interval_ns >= TICK_NSEC;
+}
+
/**
* teo_find_shallower_state - Find shallower idle state matching given duration.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
- * @duration_us: Idle duration value to match.
+ * @duration_ns: Idle duration value to match.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- unsigned int duration_us)
+ u64 duration_ns)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled || dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
state_idx = i;
- if (drv->states[i].target_residency <= duration_us)
+ if (drv->states[i].target_residency_ns <= duration_ns)
break;
}
return state_idx;
@@ -232,9 +241,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
- unsigned int duration_us, count;
- int max_early_idx, constraint_idx, idx, i;
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ u64 duration_ns;
+ unsigned int hits, misses, early_hits;
+ int max_early_idx, prev_max_early_idx, constraint_idx, idx, i;
ktime_t delta_tick;
if (dev->last_state_idx >= 0) {
@@ -244,50 +254,92 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
cpu_data->time_span_ns = local_clock();
- cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
- duration_us = ktime_to_us(cpu_data->sleep_length_ns);
+ duration_ns = tick_nohz_get_sleep_length(&delta_tick);
+ cpu_data->sleep_length_ns = duration_ns;
- count = 0;
+ hits = 0;
+ misses = 0;
+ early_hits = 0;
max_early_idx = -1;
+ prev_max_early_idx = -1;
constraint_idx = drv->state_count;
idx = -1;
for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable) {
+ if (dev->states_usage[i].disable) {
+ /*
+ * Ignore disabled states with target residencies beyond
+ * the anticipated idle duration.
+ */
+ if (s->target_residency_ns > duration_ns)
+ continue;
+
+ /*
+ * This state is disabled, so the range of idle duration
+ * values corresponding to it is covered by the current
+ * candidate state, but still the "hits" and "misses"
+ * metrics of the disabled state need to be used to
+ * decide whether or not the state covering the range in
+ * question is good enough.
+ */
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
+
+ if (early_hits >= cpu_data->states[i].early_hits ||
+ idx < 0)
+ continue;
+
/*
- * If the "early hits" metric of a disabled state is
- * greater than the current maximum, it should be taken
- * into account, because it would be a mistake to select
- * a deeper state with lower "early hits" metric. The
- * index cannot be changed to point to it, however, so
- * just increase the max count alone and let the index
- * still point to a shallower idle state.
+ * If the current candidate state has been the one with
+ * the maximum "early hits" metric so far, the "early
+ * hits" metric of the disabled state replaces the
+ * current "early hits" count to avoid selecting a
+ * deeper state with lower "early hits" metric.
*/
- if (max_early_idx >= 0 &&
- count < cpu_data->states[i].early_hits)
- count = cpu_data->states[i].early_hits;
+ if (max_early_idx == idx) {
+ early_hits = cpu_data->states[i].early_hits;
+ continue;
+ }
+
+ /*
+ * The current candidate state is closer to the disabled
+ * one than the current maximum "early hits" state, so
+ * replace the latter with it, but in case the maximum
+ * "early hits" state index has not been set so far,
+ * check if the current candidate state is not too
+ * shallow for that role.
+ */
+ if (teo_time_ok(drv->states[idx].target_residency_ns)) {
+ prev_max_early_idx = max_early_idx;
+ early_hits = cpu_data->states[i].early_hits;
+ max_early_idx = idx;
+ }
continue;
}
- if (idx < 0)
+ if (idx < 0) {
idx = i; /* first enabled state */
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
+ }
- if (s->target_residency > duration_us)
+ if (s->target_residency_ns > duration_ns)
break;
- if (s->exit_latency > latency_req && constraint_idx > i)
+ if (s->exit_latency_ns > latency_req && constraint_idx > i)
constraint_idx = i;
idx = i;
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
- if (count < cpu_data->states[i].early_hits &&
- !(tick_nohz_tick_stopped() &&
- drv->states[i].target_residency < TICK_USEC)) {
- count = cpu_data->states[i].early_hits;
+ if (early_hits < cpu_data->states[i].early_hits &&
+ teo_time_ok(drv->states[i].target_residency_ns)) {
+ prev_max_early_idx = max_early_idx;
+ early_hits = cpu_data->states[i].early_hits;
max_early_idx = i;
}
}
@@ -300,10 +352,19 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* "early hits" metric, but if that cannot be determined, just use the
* state selected so far.
*/
- if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
- max_early_idx >= 0) {
- idx = max_early_idx;
- duration_us = drv->states[idx].target_residency;
+ if (hits <= misses) {
+ /*
+ * The current candidate state is not suitable, so take the one
+ * whose "early hits" metric is the maximum for the range of
+ * shallower states.
+ */
+ if (idx == max_early_idx)
+ max_early_idx = prev_max_early_idx;
+
+ if (max_early_idx >= 0) {
+ idx = max_early_idx;
+ duration_ns = drv->states[idx].target_residency_ns;
+ }
}
/*
@@ -316,18 +377,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx < 0) {
idx = 0; /* No states enabled. Must use 0. */
} else if (idx > 0) {
+ unsigned int count = 0;
u64 sum = 0;
- count = 0;
-
/*
* Count and sum the most recent idle duration values less than
* the current expected idle duration value.
*/
for (i = 0; i < INTERVALS; i++) {
- unsigned int val = cpu_data->intervals[i];
+ u64 val = cpu_data->intervals[i];
- if (val >= duration_us)
+ if (val >= duration_ns)
continue;
count++;
@@ -339,17 +399,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* values are in the interesting range.
*/
if (count > INTERVALS / 2) {
- unsigned int avg_us = div64_u64(sum, count);
+ u64 avg_ns = div64_u64(sum, count);
/*
* Avoid spending too much time in an idle state that
* would be too shallow.
*/
- if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
- duration_us = avg_us;
- if (drv->states[idx].target_residency > avg_us)
+ if (teo_time_ok(avg_ns)) {
+ duration_ns = avg_ns;
+ if (drv->states[idx].target_residency_ns > avg_ns)
idx = teo_find_shallower_state(drv, dev,
- idx, avg_us);
+ idx, avg_ns);
}
}
}
@@ -359,9 +419,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
- unsigned int delta_tick_us = ktime_to_us(delta_tick);
-
+ duration_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
*stop_tick = false;
/*
@@ -370,8 +428,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* till the closest timer including the tick, try to correct
* that.
*/
- if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
+ if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick)
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
}
return idx;
@@ -415,7 +473,7 @@ static int teo_enable_device(struct cpuidle_driver *drv,
memset(cpu_data, 0, sizeof(*cpu_data));
for (i = 0; i < INTERVALS; i++)
- cpu_data->intervals[i] = UINT_MAX;
+ cpu_data->intervals[i] = U64_MAX;
return 0;
}
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index c8fa5f41dfc4..9f1ace9c53da 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -49,6 +49,8 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
state->exit_latency = 0;
state->target_residency = 0;
+ state->exit_latency_ns = 0;
+ state->target_residency_ns = 0;
state->power_usage = -1;
state->enter = poll_idle;
state->disabled = false;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 2bb2683b493c..38ef770be90d 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -255,25 +255,6 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%u\n", state->_name);\
}
-#define define_store_state_ull_function(_name) \
-static ssize_t store_state_##_name(struct cpuidle_state *state, \
- struct cpuidle_state_usage *state_usage, \
- const char *buf, size_t size) \
-{ \
- unsigned long long value; \
- int err; \
- if (!capable(CAP_SYS_ADMIN)) \
- return -EPERM; \
- err = kstrtoull(buf, 0, &value); \
- if (err) \
- return err; \
- if (value) \
- state_usage->_name = 1; \
- else \
- state_usage->_name = 0; \
- return size; \
-}
-
#define define_show_state_ull_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
@@ -292,18 +273,60 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%s\n", state->_name);\
}
-define_show_state_function(exit_latency)
-define_show_state_function(target_residency)
+#define define_show_state_time_function(_name) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+}
+
+define_show_state_time_function(exit_latency)
+define_show_state_time_function(target_residency)
define_show_state_function(power_usage)
define_show_state_ull_function(usage)
-define_show_state_ull_function(time)
define_show_state_str_function(name)
define_show_state_str_function(desc)
-define_show_state_ull_function(disable)
-define_store_state_ull_function(disable)
define_show_state_ull_function(above)
define_show_state_ull_function(below)
+static ssize_t show_state_time(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+}
+
+static ssize_t show_state_disable(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
+}
+
+static ssize_t store_state_disable(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ const char *buf, size_t size)
+{
+ unsigned int value;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtouint(buf, 0, &value);
+ if (err)
+ return err;
+
+ if (value)
+ state_usage->disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+ else
+ state_usage->disable &= ~CPUIDLE_STATE_DISABLED_BY_USER;
+
+ return size;
+}
+
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1fb622f2a87d..43ed1b621718 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -11,6 +11,8 @@ menuconfig CRYPTO_HW
if CRYPTO_HW
+source "drivers/crypto/allwinner/Kconfig"
+
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on X86 && !UML
@@ -26,7 +28,7 @@ config CRYPTO_DEV_PADLOCK
config CRYPTO_DEV_PADLOCK_AES
tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
help
Use VIA PadLock for AES algorithm.
@@ -54,7 +56,7 @@ config CRYPTO_DEV_GEODE
tristate "Support for the Geode LX AES engine"
depends on X86_32 && PCI
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Say 'Y' here to use the AMD Geode LX processor on-board AES
engine for the CryptoAPI AES algorithm.
@@ -107,7 +109,7 @@ config CRYPTO_PAES_S390
depends on ZCRYPT
depends on PKEY
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms for use with protected key.
@@ -169,7 +171,7 @@ config CRYPTO_DES_S390
tristate "DES and Triple DES cipher algorithms"
depends on S390
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
This is the s390 hardware accelerated implementation of the
@@ -182,7 +184,7 @@ config CRYPTO_AES_S390
tristate "AES cipher algorithms"
depends on S390
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197).
@@ -236,7 +238,7 @@ config CRYPTO_DEV_MARVELL_CESA
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select SRAM
help
@@ -248,7 +250,7 @@ config CRYPTO_DEV_MARVELL_CESA
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
@@ -265,7 +267,7 @@ config CRYPTO_DEV_NIAGARA2
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
depends on PCI
depends on !ARCH_DMA_ADDR_T_64BIT
@@ -285,7 +287,7 @@ config CRYPTO_DEV_TALITOS
tristate "Talitos Freescale Security Engine (SEC)"
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select HW_RANDOM
depends on FSL_SOC
@@ -323,7 +325,7 @@ config CRYPTO_DEV_IXP4XX
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Driver for the IXP4xx NPE crypto engine.
@@ -332,11 +334,12 @@ config CRYPTO_DEV_PPC4XX
depends on PPC && 4xx
select CRYPTO_HASH
select CRYPTO_AEAD
+ select CRYPTO_AES
select CRYPTO_LIB_AES
select CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_GCM
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -373,7 +376,7 @@ config CRYPTO_DEV_OMAP_AES
tristate "Support for OMAP AES hw engine"
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
select CRYPTO_CBC
select CRYPTO_ECB
@@ -387,7 +390,7 @@ config CRYPTO_DEV_OMAP_DES
tristate "Support for OMAP DES/3DES hw engine"
depends on ARCH_OMAP2PLUS
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
help
OMAP processors have DES/3DES module accelerator. Select this if you
@@ -403,7 +406,7 @@ config CRYPTO_DEV_PICOXCELL
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_CBC
select CRYPTO_ECB
@@ -418,7 +421,7 @@ config CRYPTO_DEV_PICOXCELL
config CRYPTO_DEV_SAHARA
tristate "Support for SAHARA crypto accelerator"
depends on ARCH_MXC && OF
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_ECB
help
@@ -445,7 +448,7 @@ config CRYPTO_DEV_S5P
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This option allows you to have support for S5P crypto acceleration.
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
@@ -489,11 +492,9 @@ if CRYPTO_DEV_UX500
endif # if CRYPTO_DEV_UX500
config CRYPTO_DEV_ATMEL_AUTHENC
- tristate "Support for Atmel IPSEC/SSL hw accelerator"
+ bool "Support for Atmel IPSEC/SSL hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
- select CRYPTO_AUTHENC
- select CRYPTO_DEV_ATMEL_AES
- select CRYPTO_DEV_ATMEL_SHA
+ depends on CRYPTO_DEV_ATMEL_AES
help
Some Atmel processors can combine the AES and SHA hw accelerators
to enhance support of IPSEC/SSL.
@@ -505,7 +506,9 @@ config CRYPTO_DEV_ATMEL_AES
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AUTHENC if CRYPTO_DEV_ATMEL_AUTHENC
+ select CRYPTO_DEV_ATMEL_SHA if CRYPTO_DEV_ATMEL_AUTHENC
help
Some Atmel processors have AES hw accelerator.
Select this if you want to use the Atmel module for
@@ -518,7 +521,7 @@ config CRYPTO_DEV_ATMEL_TDES
tristate "Support for Atmel DES/TDES hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Some Atmel processors have DES/TDES hw accelerator.
Select this if you want to use the Atmel module for
@@ -590,7 +593,7 @@ config CRYPTO_DEV_MXS_DCP
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
help
The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
@@ -620,7 +623,7 @@ config CRYPTO_DEV_QCE
select CRYPTO_CBC
select CRYPTO_XTS
select CRYPTO_CTR
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This driver supports Qualcomm crypto engine accelerator
hardware. To compile this driver as a module, choose M here. The
@@ -657,31 +660,6 @@ config CRYPTO_DEV_IMGTEC_HASH
hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
hashing algorithms.
-config CRYPTO_DEV_SUN4I_SS
- tristate "Support for Allwinner Security System cryptographic accelerator"
- depends on ARCH_SUNXI && !64BIT
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_AES
- select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
- help
- Some Allwinner SoC have a crypto accelerator named
- Security System. Select this if you want to use it.
- The Security System handle AES/DES/3DES ciphers in CBC mode
- and SHA1 and MD5 hash algorithms.
-
- To compile this driver as a module, choose M here: the module
- will be called sun4i-ss.
-
-config CRYPTO_DEV_SUN4I_SS_PRNG
- bool "Support for Allwinner Security System PRNG"
- depends on CRYPTO_DEV_SUN4I_SS
- select CRYPTO_RNG
- help
- Select this option if you want to provide kernel-side support for
- the Pseudo-Random Number Generator found in the Security System.
-
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
@@ -691,7 +669,7 @@ config CRYPTO_DEV_ROCKCHIP
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This driver interfaces with the hardware crypto accelerator.
@@ -702,7 +680,7 @@ config CRYPTO_DEV_MEDIATEK
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CTR
select CRYPTO_SHA1
select CRYPTO_SHA256
@@ -730,7 +708,7 @@ config CRYPTO_DEV_BCM_SPU
select CRYPTO_SHA512
help
This driver provides support for Broadcom crypto acceleration using the
- Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
+ Secure Processing Unit (SPU). The SPU driver registers skcipher,
ahash, and aead algorithms with the kernel cryptographic API.
source "drivers/crypto/stm32/Kconfig"
@@ -740,7 +718,7 @@ config CRYPTO_DEV_SAFEXCEL
depends on OF || PCI || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_HASH
select CRYPTO_HMAC
@@ -748,6 +726,8 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_CHACHA20POLY1305
+ select CRYPTO_SHA3
help
This driver interfaces with the SafeXcel EIP-97 and EIP-197 cryptographic
engines designed by Inside Secure. It currently accelerates DES, 3DES and
@@ -762,7 +742,7 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CTR
select CRYPTO_HASH
select CRYPTO_SHA1
@@ -779,7 +759,7 @@ config CRYPTO_DEV_CCREE
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
default n
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -805,4 +785,6 @@ config CRYPTO_DEV_CCREE
source "drivers/crypto/hisilicon/Kconfig"
+source "drivers/crypto/amlogic/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index afc4753b5d28..40229d499476 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
@@ -39,7 +40,6 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_ARCH_STM32) += stm32/
-obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
@@ -48,3 +48,4 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += hisilicon/
+obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
new file mode 100644
index 000000000000..12e7c6a85a02
--- /dev/null
+++ b/drivers/crypto/allwinner/Kconfig
@@ -0,0 +1,87 @@
+config CRYPTO_DEV_ALLWINNER
+ bool "Support for Allwinner cryptographic offloader"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y if ARCH_SUNXI
+ help
+ Say Y here to get to see options for Allwinner hardware crypto devices
+
+config CRYPTO_DEV_SUN4I_SS
+ tristate "Support for Allwinner Security System cryptographic accelerator"
+ depends on ARCH_SUNXI
+ depends on PM
+ depends on CRYPTO_DEV_ALLWINNER
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_AES
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ help
+ Some Allwinner SoC have a crypto accelerator named
+ Security System. Select this if you want to use it.
+ The Security System handle AES/DES/3DES ciphers in CBC mode
+ and SHA1 and MD5 hash algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun4i-ss.
+
+config CRYPTO_DEV_SUN4I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN4I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
+config CRYPTO_DEV_SUN8I_CE
+ tristate "Support for Allwinner Crypto Engine cryptographic offloader"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ select CRYPTO_DES
+ depends on CRYPTO_DEV_ALLWINNER
+ depends on PM
+ help
+ Select y here to have support for the crypto Engine availlable on
+ Allwinner SoC H2+, H3, H5, H6, R40 and A64.
+ The Crypto Engine handle AES/3DES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun8i-ce.
+
+config CRYPTO_DEV_SUN8I_CE_DEBUG
+ bool "Enable sun8i-ce stats"
+ depends on CRYPTO_DEV_SUN8I_CE
+ depends on DEBUG_FS
+ help
+ Say y to enable sun8i-ce debug stats.
+ This will create /sys/kernel/debug/sun8i-ce/stats for displaying
+ the number of requests per flow and per algorithm.
+
+config CRYPTO_DEV_SUN8I_SS
+ tristate "Support for Allwinner Security System cryptographic offloader"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ select CRYPTO_DES
+ depends on CRYPTO_DEV_ALLWINNER
+ depends on PM
+ help
+ Select y here to have support for the Security System available on
+ Allwinner SoC A80, A83T.
+ The Security System handle AES/3DES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun8i-ss.
+
+config CRYPTO_DEV_SUN8I_SS_DEBUG
+ bool "Enable sun8i-ss stats"
+ depends on CRYPTO_DEV_SUN8I_SS
+ depends on DEBUG_FS
+ help
+ Say y to enable sun8i-ss debug stats.
+ This will create /sys/kernel/debug/sun8i-ss/stats for displaying
+ the number of requests per flow and per algorithm.
diff --git a/drivers/crypto/allwinner/Makefile b/drivers/crypto/allwinner/Makefile
new file mode 100644
index 000000000000..6effe864d7ff
--- /dev/null
+++ b/drivers/crypto/allwinner/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss/
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce/
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss/
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/allwinner/sun4i-ss/Makefile
index c0a2797d3168..c0a2797d3168 100644
--- a/drivers/crypto/sunxi-ss/Makefile
+++ b/drivers/crypto/allwinner/sun4i-ss/Makefile
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index 6536fd4bee65..cb2b0874f68f 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -72,7 +72,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
oi = 0;
oo = 0;
do {
- todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo) {
ileft -= todo;
writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
@@ -87,7 +88,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
oleft -= todo;
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
@@ -239,7 +241,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* todo is the number of consecutive 4byte word that we
* can read from current SG
*/
- todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft / 4);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo && !ob) {
writesl(ss->base + SS_RXFIFO, mi.addr + oi,
todo);
@@ -253,8 +256,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we need to be able to write all buf in one
* pass, so it is why we min() with rx_cnt
*/
- todo = min3(rx_cnt * 4 - ob, ileft,
- mi.length - oi);
+ todo = min(rx_cnt * 4 - ob, ileft);
+ todo = min_t(size_t, todo, mi.length - oi);
memcpy(buf + ob, mi.addr + oi, todo);
ileft -= todo;
oi += todo;
@@ -274,7 +277,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
+ dev_dbg(ss->dev,
+ "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->cryptlen, rx_cnt,
oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
@@ -282,7 +286,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (!tx_cnt)
continue;
/* todo in 4bytes word */
- todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft / 4);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
@@ -308,7 +313,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* no more than remaining buffer
* no need to test against oleft
*/
- todo = min(mo.length - oo, obl - obo);
+ todo = min_t(size_t,
+ mo.length - oo, obl - obo);
memcpy(mo.addr + oo, bufo + obo, todo);
oleft -= todo;
obo += todo;
@@ -480,6 +486,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct sun4i_ss_alg_template *algt;
const char *name = crypto_tfm_alg_name(tfm);
+ int err;
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
@@ -497,13 +504,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ goto error_pm;
+
return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
}
void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put(op->ss->dev);
}
/* check and set the AES key, prepare the mode to be used */
@@ -524,7 +540,7 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keymode = SS_AES_256BITS;
break;
default:
- dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 9aa6fe081a27..814cd12149a9 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -44,7 +44,8 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun4i_hash_crainit
+ .cra_init = sun4i_hash_crainit,
+ .cra_exit = sun4i_hash_craexit,
}
}
}
@@ -70,7 +71,8 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun4i_hash_crainit
+ .cra_init = sun4i_hash_crainit,
+ .cra_exit = sun4i_hash_craexit,
}
}
}
@@ -223,6 +225,82 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun4i_ss_pm_suspend(struct device *dev)
+{
+ struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
+
+ if (ss->reset)
+ reset_control_assert(ss->reset);
+
+ clk_disable_unprepare(ss->ssclk);
+ clk_disable_unprepare(ss->busclk);
+ return 0;
+}
+
+static int sun4i_ss_pm_resume(struct device *dev)
+{
+ struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
+
+ int err;
+
+ err = clk_prepare_enable(ss->busclk);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable busclk\n");
+ goto err_enable;
+ }
+
+ err = clk_prepare_enable(ss->ssclk);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable ssclk\n");
+ goto err_enable;
+ }
+
+ if (ss->reset) {
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto err_enable;
+ }
+ }
+
+ return err;
+err_enable:
+ sun4i_ss_pm_suspend(dev);
+ return err;
+}
+
+const struct dev_pm_ops sun4i_ss_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
+};
+
+/*
+ * When power management is enabled, this function enables the PM and set the
+ * device as suspended
+ * When power management is disabled, this function just enables the device
+ */
+static int sun4i_ss_pm_init(struct sun4i_ss_ctx *ss)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ss->dev);
+ pm_runtime_set_autosuspend_delay(ss->dev, 2000);
+
+ err = pm_runtime_set_suspended(ss->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ss->dev);
+ return err;
+}
+
+static void sun4i_ss_pm_exit(struct sun4i_ss_ctx *ss)
+{
+ pm_runtime_disable(ss->dev);
+}
+
static int sun4i_ss_probe(struct platform_device *pdev)
{
u32 v;
@@ -269,18 +347,6 @@ static int sun4i_ss_probe(struct platform_device *pdev)
ss->reset = NULL;
}
- /* Enable both clocks */
- err = clk_prepare_enable(ss->busclk);
- if (err) {
- dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
- return err;
- }
- err = clk_prepare_enable(ss->ssclk);
- if (err) {
- dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
- goto error_ssclk;
- }
-
/*
* Check that clock have the correct rates given in the datasheet
* Try to set the clock to the maximum allowed
@@ -288,16 +354,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
err = clk_set_rate(ss->ssclk, cr_mod);
if (err) {
dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
- goto error_clk;
- }
-
- /* Deassert reset if we have a reset control */
- if (ss->reset) {
- err = reset_control_deassert(ss->reset);
- if (err) {
- dev_err(&pdev->dev, "Cannot deassert reset control\n");
- goto error_clk;
- }
+ return err;
}
/*
@@ -325,12 +382,26 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
cr, cr / 1000000, cr_mod);
+ ss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ss);
+
+ spin_lock_init(&ss->slock);
+
+ err = sun4i_ss_pm_init(ss);
+ if (err)
+ return err;
+
/*
* Datasheet named it "Die Bonding ID"
* I expect to be a sort of Security System Revision number.
* Since the A80 seems to have an other version of SS
* this info could be useful
*/
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ goto error_pm;
+
writel(SS_ENABLED, ss->base + SS_CTL);
v = readl(ss->base + SS_CTL);
v >>= 16;
@@ -338,9 +409,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Die ID %d\n", v);
writel(0, ss->base + SS_CTL);
- ss->dev = &pdev->dev;
-
- spin_lock_init(&ss->slock);
+ pm_runtime_put_sync(ss->dev);
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
ss_algs[i].ss = ss;
@@ -370,7 +439,6 @@ static int sun4i_ss_probe(struct platform_device *pdev)
break;
}
}
- platform_set_drvdata(pdev, ss);
return 0;
error_alg:
i--;
@@ -387,12 +455,8 @@ error_alg:
break;
}
}
- if (ss->reset)
- reset_control_assert(ss->reset);
-error_clk:
- clk_disable_unprepare(ss->ssclk);
-error_ssclk:
- clk_disable_unprepare(ss->busclk);
+error_pm:
+ sun4i_ss_pm_exit(ss);
return err;
}
@@ -415,11 +479,7 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
}
- writel(0, ss->base + SS_CTL);
- if (ss->reset)
- reset_control_assert(ss->reset);
- clk_disable_unprepare(ss->busclk);
- clk_disable_unprepare(ss->ssclk);
+ sun4i_ss_pm_exit(ss);
return 0;
}
@@ -434,6 +494,7 @@ static struct platform_driver sun4i_ss_driver = {
.remove = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
+ .pm = &sun4i_ss_pm_ops,
.of_match_table = a20ss_crypto_of_match_table,
},
};
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fcffba5ef927..fdc0e6cdbb85 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -19,17 +19,29 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct sun4i_ss_alg_template *algt;
+ int err;
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
op->ss = algt->ss;
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ return err;
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sun4i_req_ctx));
return 0;
}
+void sun4i_hash_craexit(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ pm_runtime_put(op->ss->dev);
+}
+
/* sun4i_hash_init: initialize request context */
int sun4i_hash_init(struct ahash_request *areq)
{
@@ -175,7 +187,7 @@ static int sun4i_hash(struct ahash_request *areq)
*/
unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
unsigned int in_i = 0;
- u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0;
+ u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
struct sun4i_req_ctx *op = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -184,6 +196,7 @@ static int sun4i_hash(struct ahash_request *areq)
struct sg_mapping_iter mi;
int in_r, err = 0;
size_t copied = 0;
+ __le32 wb = 0;
dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
__func__, crypto_tfm_alg_name(areq->base.tfm),
@@ -215,7 +228,7 @@ static int sun4i_hash(struct ahash_request *areq)
*/
if (op->byte_count) {
ivmode = SS_IV_ARBITRARY;
- for (i = 0; i < 5; i++)
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
writel(op->hash[i], ss->base + SS_IV0 + i * 4);
}
/* Enable the device */
@@ -272,8 +285,8 @@ static int sun4i_hash(struct ahash_request *areq)
*/
while (op->len < 64 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, end - i,
- 64 - op->len);
+ in_r = min(end - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -293,8 +306,8 @@ static int sun4i_hash(struct ahash_request *areq)
}
if (mi.length - in_i > 3 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- ((mi.length - in_i) / 4) * 4);
+ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
+ in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
/* how many bytes we can write in the device*/
todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
@@ -320,8 +333,8 @@ static int sun4i_hash(struct ahash_request *areq)
if ((areq->nbytes - i) < 64) {
while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- 64 - op->len);
+ in_r = min(areq->nbytes - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -395,7 +408,7 @@ hash_final:
nbw = op->len - 4 * nwait;
if (nbw) {
- wb = *(u32 *)(op->buf + nwait * 4);
+ wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
wb &= GENMASK((nbw * 8) - 1, 0);
op->byte_count += nbw;
@@ -404,7 +417,7 @@ hash_final:
/* write the remaining bytes of the nbw buffer */
wb |= ((1 << 7) << (nbw * 8));
- bf[j++] = wb;
+ bf[j++] = le32_to_cpu(wb);
/*
* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
@@ -423,13 +436,13 @@ hash_final:
/* write the length of data */
if (op->mode == SS_OP_SHA1) {
- __be64 bits = cpu_to_be64(op->byte_count << 3);
- bf[j++] = lower_32_bits(bits);
- bf[j++] = upper_32_bits(bits);
+ __be64 *bits = (__be64 *)&bf[j];
+ *bits = cpu_to_be64(op->byte_count << 3);
+ j += 2;
} else {
- __le64 bits = op->byte_count << 3;
- bf[j++] = lower_32_bits(bits);
- bf[j++] = upper_32_bits(bits);
+ __le64 *bits = (__le64 *)&bf[j];
+ *bits = cpu_to_le64(op->byte_count << 3);
+ j += 2;
}
writesl(ss->base + SS_RXFIFO, bf, j);
@@ -471,7 +484,7 @@ hash_final:
}
} else {
for (i = 0; i < 4; i++) {
- v = readl(ss->base + SS_MD0 + i * 4);
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 63d636424161..729aafdbea84 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -17,7 +17,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
{
struct sun4i_ss_alg_template *algt;
struct rng_alg *alg = crypto_rng_alg(tfm);
- int i;
+ int i, err;
u32 v;
u32 *data = (u32 *)dst;
const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED;
@@ -28,6 +28,10 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ return err;
+
spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
@@ -52,5 +56,8 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
writel(0, ss->base + SS_CTL);
spin_unlock_bh(&ss->slock);
+
+ pm_runtime_put(ss->dev);
+
return 0;
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 35a27a7145f8..60425ac75d90 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <crypto/md5.h>
#include <crypto/skcipher.h>
#include <crypto/sha.h>
@@ -177,6 +178,7 @@ struct sun4i_req_ctx {
};
int sun4i_hash_crainit(struct crypto_tfm *tfm);
+void sun4i_hash_craexit(struct crypto_tfm *tfm);
int sun4i_hash_init(struct ahash_request *areq);
int sun4i_hash_update(struct ahash_request *areq);
int sun4i_hash_final(struct ahash_request *areq);
diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile
new file mode 100644
index 000000000000..08b68c3c1ca9
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o
+sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
new file mode 100644
index 000000000000..37d0b6c386a0
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-cipher.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include "sun8i-ce.h"
+
+static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct scatterlist *sg;
+
+ if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
+ return true;
+
+ if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
+ return true;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ return true;
+
+ sg = areq->src;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+#endif
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir & CE_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+static int sun8i_ce_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ struct scatterlist *sg;
+ unsigned int todo, len, offset, ivsize;
+ dma_addr_t addr_iv = 0, addr_key = 0;
+ void *backup_iv = NULL;
+ u32 common, sym;
+ int flow, i;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+
+ dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+#endif
+
+ flow = rctx->flow;
+
+ chan = &ce->chanlist[flow];
+
+ cet = chan->tl;
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->alg_cipher[algt->ce_algo_id];
+ common |= rctx->op_dir | CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+ /* CTS and recent CE (H6) need length in bytes, in word otherwise */
+ if (ce->variant->has_t_dlen_in_bytes)
+ cet->t_dlen = cpu_to_le32(areq->cryptlen);
+ else
+ cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
+
+ sym = ce->variant->op_mode[algt->ce_blockmode];
+ len = op->keylen;
+ switch (len) {
+ case 128 / 8:
+ sym |= CE_AES_128BITS;
+ break;
+ case 192 / 8:
+ sym |= CE_AES_192BITS;
+ break;
+ case 256 / 8:
+ sym |= CE_AES_256BITS;
+ break;
+ }
+
+ cet->t_sym_ctl = cpu_to_le32(sym);
+ cet->t_asym_ctl = 0;
+
+ chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
+ chan->op_dir = rctx->op_dir;
+ chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
+ chan->keylen = op->keylen;
+
+ addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ cet->t_key = cpu_to_le32(addr_key);
+ if (dma_mapping_error(ce->dev, addr_key)) {
+ dev_err(ce->dev, "Cannot DMA MAP KEY\n");
+ err = -EFAULT;
+ goto theend;
+ }
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ chan->ivlen = ivsize;
+ chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!chan->bounce_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ if (rctx->op_dir & CE_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ memcpy(chan->bounce_iv, areq->iv, ivsize);
+ addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
+ DMA_TO_DEVICE);
+ cet->t_iv = cpu_to_le32(addr_iv);
+ if (dma_mapping_error(ce->dev, addr_iv)) {
+ dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ err = -ENOMEM;
+ goto theend_iv;
+ }
+ }
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_src[i].len = cpu_to_le32(todo / 4);
+ dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(areq->dst, sg, nr_sgd, i) {
+ cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_dst[i].len = cpu_to_le32(todo / 4);
+ dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ chan->timeout = areq->cryptlen;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ if (nr_sgs > 0)
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+theend_iv:
+ if (areq->iv && ivsize > 0) {
+ if (addr_iv)
+ dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
+ DMA_TO_DEVICE);
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & CE_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kzfree(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(chan->bounce_iv);
+ }
+
+theend_key:
+ dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
+
+theend:
+ return err;
+}
+
+static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sun8i_ce_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sun8i_ce_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = CE_DECRYPTION;
+ if (sun8i_ce_cipher_need_fallback(areq))
+ return sun8i_ce_cipher_fallback(areq);
+
+ e = sun8i_ce_get_engine_number(op->ce);
+ rctx->flow = e;
+ engine = op->ce->chanlist[e].engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = CE_ENCRYPTION;
+ if (sun8i_ce_cipher_need_fallback(areq))
+ return sun8i_ce_cipher_fallback(areq);
+
+ e = sun8i_ce_get_engine_number(op->ce);
+ rctx->flow = e;
+ engine = op->ce->chanlist[e].engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sun8i_ce_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ op->ce = algt->ce;
+
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ dev_info(op->ce->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+
+ op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ce->dev);
+ if (err < 0)
+ goto error_pm;
+
+ return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync_suspend(op->ce->dev);
+}
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
+
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = verify_skcipher_des3_key(tfm, key);
+ if (err)
+ return err;
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
new file mode 100644
index 000000000000..73a7649f915d
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-core.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the CryptoEngine.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sun8i-ce.h"
+
+/*
+ * mod clock is lower on H3 than other SoC due to some DMA timeout occurring
+ * with high value.
+ * If you want to tune mod clock, loading driver and passing selftest is
+ * insufficient, you need to test with some LUKS test (mount and write to it)
+ */
+static const struct ce_variant ce_h3_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 50000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_h5_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_h6_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .has_t_dlen_in_bytes = true,
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ { "ram", 0, 400000000 },
+ }
+};
+
+static const struct ce_variant ce_a64_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_r40_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+/*
+ * sun8i_ce_get_engine_number() get the next channel slot
+ * This is a simple round-robin way of getting the next channel
+ */
+int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce)
+{
+ return atomic_inc_return(&ce->flow) % MAXFLOW;
+}
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
+{
+ u32 v;
+ int err = 0;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ce->chanlist[flow].stat_req++;
+#endif
+
+ mutex_lock(&ce->mlock);
+
+ v = readl(ce->base + CE_ICR);
+ v |= 1 << flow;
+ writel(v, ce->base + CE_ICR);
+
+ reinit_completion(&ce->chanlist[flow].complete);
+ writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ);
+
+ ce->chanlist[flow].status = 0;
+ /* Be sure all data is written before enabling the task */
+ wmb();
+
+ v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
+ writel(v, ce->base + CE_TLR);
+ mutex_unlock(&ce->mlock);
+
+ wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
+ msecs_to_jiffies(ce->chanlist[flow].timeout));
+
+ if (ce->chanlist[flow].status == 0) {
+ dev_err(ce->dev, "DMA timeout for %s\n", name);
+ err = -EFAULT;
+ }
+ /* No need to lock for this read, the channel is locked so
+ * nothing could modify the error value for this channel
+ */
+ v = readl(ce->base + CE_ESR);
+ if (v) {
+ v >>= (flow * 4);
+ v &= 0xFF;
+ if (v) {
+ dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ce->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
+ if (v & CE_ERR_ADDR_INVALID)
+ dev_err(ce->dev, "CE ERROR: address invalid\n");
+ }
+
+ return err;
+}
+
+static irqreturn_t ce_irq_handler(int irq, void *data)
+{
+ struct sun8i_ce_dev *ce = (struct sun8i_ce_dev *)data;
+ int flow = 0;
+ u32 p;
+
+ p = readl(ce->base + CE_ISR);
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (p & (BIT(flow))) {
+ writel(BIT(flow), ce->base + CE_ISR);
+ ce->chanlist[flow].status = 1;
+ complete(&ce->chanlist[flow].complete);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sun8i_ce_alg_template ce_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct sun8i_ce_dev *ce = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.skcipher.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun8i_ce_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun8i_ce_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun8i_ce_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(ce->chanlist[i].engine);
+ if (ce->chanlist[i].tl)
+ dma_free_coherent(ce->dev, sizeof(struct ce_task),
+ ce->chanlist[i].tl,
+ ce->chanlist[i].t_phy);
+ i--;
+ }
+}
+
+/*
+ * Allocate the channel list structure
+ */
+static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
+{
+ int i, err;
+
+ ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW,
+ sizeof(struct sun8i_ce_flow), GFP_KERNEL);
+ if (!ce->chanlist)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&ce->chanlist[i].complete);
+
+ ce->chanlist[i].engine = crypto_engine_alloc_init(ce->dev, true);
+ if (!ce->chanlist[i].engine) {
+ dev_err(ce->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(ce->chanlist[i].engine);
+ if (err) {
+ dev_err(ce->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ ce->chanlist[i].tl = dma_alloc_coherent(ce->dev,
+ sizeof(struct ce_task),
+ &ce->chanlist[i].t_phy,
+ GFP_KERNEL);
+ if (!ce->chanlist[i].tl) {
+ dev_err(ce->dev, "Cannot get DMA memory for task %d\n",
+ i);
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ sun8i_ce_free_chanlist(ce, i);
+ return err;
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun8i_ce_pm_suspend(struct device *dev)
+{
+ struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
+ int i;
+
+ reset_control_assert(ce->reset);
+ for (i = 0; i < CE_MAX_CLOCKS; i++)
+ clk_disable_unprepare(ce->ceclks[i]);
+ return 0;
+}
+
+static int sun8i_ce_pm_resume(struct device *dev)
+{
+ struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
+ int err, i;
+
+ for (i = 0; i < CE_MAX_CLOCKS; i++) {
+ if (!ce->variant->ce_clks[i].name)
+ continue;
+ err = clk_prepare_enable(ce->ceclks[i]);
+ if (err) {
+ dev_err(ce->dev, "Cannot prepare_enable %s\n",
+ ce->variant->ce_clks[i].name);
+ goto error;
+ }
+ }
+ err = reset_control_deassert(ce->reset);
+ if (err) {
+ dev_err(ce->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+ return 0;
+error:
+ sun8i_ce_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sun8i_ce_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun8i_ce_pm_suspend, sun8i_ce_pm_resume, NULL)
+};
+
+static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ce->dev);
+ pm_runtime_set_autosuspend_delay(ce->dev, 2000);
+
+ err = pm_runtime_set_suspended(ce->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ce->dev);
+ return err;
+}
+
+static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
+{
+ pm_runtime_disable(ce->dev);
+}
+
+static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
+{
+ unsigned long cr;
+ int err, i;
+
+ for (i = 0; i < CE_MAX_CLOCKS; i++) {
+ if (!ce->variant->ce_clks[i].name)
+ continue;
+ ce->ceclks[i] = devm_clk_get(ce->dev, ce->variant->ce_clks[i].name);
+ if (IS_ERR(ce->ceclks[i])) {
+ err = PTR_ERR(ce->ceclks[i]);
+ dev_err(ce->dev, "Cannot get %s CE clock err=%d\n",
+ ce->variant->ce_clks[i].name, err);
+ return err;
+ }
+ cr = clk_get_rate(ce->ceclks[i]);
+ if (!cr)
+ return -EINVAL;
+ if (ce->variant->ce_clks[i].freq > 0 &&
+ cr != ce->variant->ce_clks[i].freq) {
+ dev_info(ce->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n",
+ ce->variant->ce_clks[i].name,
+ ce->variant->ce_clks[i].freq,
+ ce->variant->ce_clks[i].freq / 1000000,
+ cr, cr / 1000000);
+ err = clk_set_rate(ce->ceclks[i], ce->variant->ce_clks[i].freq);
+ if (err)
+ dev_err(ce->dev, "Fail to set %s clk speed to %lu hz\n",
+ ce->variant->ce_clks[i].name,
+ ce->variant->ce_clks[i].freq);
+ }
+ if (ce->variant->ce_clks[i].max_freq > 0 &&
+ cr > ce->variant->ce_clks[i].max_freq)
+ dev_warn(ce->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)",
+ ce->variant->ce_clks[i].name, cr,
+ ce->variant->ce_clks[i].max_freq);
+ }
+ return 0;
+}
+
+static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
+{
+ int ce_method, err, id, i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ ce_algs[i].ce = ce;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ id = ce_algs[i].ce_algo_id;
+ ce_method = ce->variant->alg_cipher[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_dbg(ce->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ id = ce_algs[i].ce_blockmode;
+ ce_method = ce->variant->op_mode[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ dev_info(ce->dev, "Register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ce->dev, "ERROR: Fail to register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ return err;
+ }
+ break;
+ default:
+ ce_algs[i].ce = NULL;
+ dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int sun8i_ce_probe(struct platform_device *pdev)
+{
+ struct sun8i_ce_dev *ce;
+ int err, irq;
+ u32 v;
+
+ ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ ce->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ce);
+
+ ce->variant = of_device_get_match_data(&pdev->dev);
+ if (!ce->variant) {
+ dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
+ return -EINVAL;
+ }
+
+ ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ if (IS_ERR(ce->base))
+ return PTR_ERR(ce->base);
+
+ err = sun8i_ce_get_clks(ce);
+ if (err)
+ return err;
+
+ /* Get Non Secure IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(ce->dev, "Cannot get CryptoEngine Non-secure IRQ\n");
+ return irq;
+ }
+
+ ce->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ce->reset)) {
+ if (PTR_ERR(ce->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ce->reset);
+ dev_err(&pdev->dev, "No reset control found\n");
+ return PTR_ERR(ce->reset);
+ }
+
+ mutex_init(&ce->mlock);
+
+ err = sun8i_ce_allocate_chanlist(ce);
+ if (err)
+ return err;
+
+ err = sun8i_ce_pm_init(ce);
+ if (err)
+ goto error_pm;
+
+ err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0,
+ "sun8i-ce-ns", ce);
+ if (err) {
+ dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
+ goto error_irq;
+ }
+
+ err = sun8i_ce_register_algs(ce);
+ if (err)
+ goto error_alg;
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0)
+ goto error_alg;
+
+ v = readl(ce->base + CE_CTR);
+ v >>= CE_DIE_ID_SHIFT;
+ v &= CE_DIE_ID_MASK;
+ dev_info(&pdev->dev, "CryptoEngine Die ID %x\n", v);
+
+ pm_runtime_put_sync(ce->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ /* Ignore error of debugfs */
+ ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
+ ce->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ce->dbgfs_dir, ce,
+ &sun8i_ce_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ sun8i_ce_unregister_algs(ce);
+error_irq:
+ sun8i_ce_pm_exit(ce);
+error_pm:
+ sun8i_ce_free_chanlist(ce, MAXFLOW);
+ return err;
+}
+
+static int sun8i_ce_remove(struct platform_device *pdev)
+{
+ struct sun8i_ce_dev *ce = platform_get_drvdata(pdev);
+
+ sun8i_ce_unregister_algs(ce);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ debugfs_remove_recursive(ce->dbgfs_dir);
+#endif
+
+ sun8i_ce_free_chanlist(ce, MAXFLOW);
+
+ sun8i_ce_pm_exit(ce);
+ return 0;
+}
+
+static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-h3-crypto",
+ .data = &ce_h3_variant },
+ { .compatible = "allwinner,sun8i-r40-crypto",
+ .data = &ce_r40_variant },
+ { .compatible = "allwinner,sun50i-a64-crypto",
+ .data = &ce_a64_variant },
+ { .compatible = "allwinner,sun50i-h5-crypto",
+ .data = &ce_h5_variant },
+ { .compatible = "allwinner,sun50i-h6-crypto",
+ .data = &ce_h6_variant },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
+
+static struct platform_driver sun8i_ce_driver = {
+ .probe = sun8i_ce_probe,
+ .remove = sun8i_ce_remove,
+ .driver = {
+ .name = "sun8i-ce",
+ .pm = &sun8i_ce_pm_ops,
+ .of_match_table = sun8i_ce_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun8i_ce_driver);
+
+MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
new file mode 100644
index 000000000000..43db49ceafe4
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sun8i-ce.h - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6 SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+
+/* CE Registers */
+#define CE_TDQ 0x00
+#define CE_CTR 0x04
+#define CE_ICR 0x08
+#define CE_ISR 0x0C
+#define CE_TLR 0x10
+#define CE_TSR 0x14
+#define CE_ESR 0x18
+#define CE_CSSGR 0x1C
+#define CE_CDSGR 0x20
+#define CE_CSAR 0x24
+#define CE_CDAR 0x28
+#define CE_TPR 0x2C
+
+/* Used in struct ce_task */
+/* ce_task common */
+#define CE_ENCRYPTION 0
+#define CE_DECRYPTION BIT(8)
+
+#define CE_COMM_INT BIT(31)
+
+/* ce_task symmetric */
+#define CE_AES_128BITS 0
+#define CE_AES_192BITS 1
+#define CE_AES_256BITS 2
+
+#define CE_OP_ECB 0
+#define CE_OP_CBC (1 << 8)
+
+#define CE_ALG_AES 0
+#define CE_ALG_DES 1
+#define CE_ALG_3DES 2
+
+/* Used in ce_variant */
+#define CE_ID_NOTSUPP 0xFF
+
+#define CE_ID_CIPHER_AES 0
+#define CE_ID_CIPHER_DES 1
+#define CE_ID_CIPHER_DES3 2
+#define CE_ID_CIPHER_MAX 3
+
+#define CE_ID_OP_ECB 0
+#define CE_ID_OP_CBC 1
+#define CE_ID_OP_MAX 2
+
+/* Used in CE registers */
+#define CE_ERR_ALGO_NOTSUP BIT(0)
+#define CE_ERR_DATALEN BIT(1)
+#define CE_ERR_KEYSRAM BIT(2)
+#define CE_ERR_ADDR_INVALID BIT(5)
+#define CE_ERR_KEYLADDER BIT(6)
+
+#define CE_DIE_ID_SHIFT 16
+#define CE_DIE_ID_MASK 0x07
+
+#define MAX_SG 8
+
+#define CE_MAX_CLOCKS 3
+
+#define MAXFLOW 4
+
+/*
+ * struct ce_clock - Describe clocks used by sun8i-ce
+ * @name: Name of clock needed by this variant
+ * @freq: Frequency to set for each clock
+ * @max_freq: Maximum frequency for each clock (generally given by datasheet)
+ */
+struct ce_clock {
+ const char *name;
+ unsigned long freq;
+ unsigned long max_freq;
+};
+
+/*
+ * struct ce_variant - Describe CE capability for each variant hardware
+ * @alg_cipher: list of supported ciphers. for each CE_ID_ this will give the
+ * coresponding CE_ALG_XXX value
+ * @op_mode: list of supported block modes
+ * @has_t_dlen_in_bytes: Does the request size for cipher is in
+ * bytes or words
+ * @ce_clks: list of clocks needed by this variant
+ */
+struct ce_variant {
+ char alg_cipher[CE_ID_CIPHER_MAX];
+ u32 op_mode[CE_ID_OP_MAX];
+ bool has_t_dlen_in_bytes;
+ struct ce_clock ce_clks[CE_MAX_CLOCKS];
+};
+
+struct sginfo {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+/*
+ * struct ce_task - CE Task descriptor
+ * The structure of this descriptor could be found in the datasheet
+ */
+struct ce_task {
+ __le32 t_id;
+ __le32 t_common_ctl;
+ __le32 t_sym_ctl;
+ __le32 t_asym_ctl;
+ __le32 t_key;
+ __le32 t_iv;
+ __le32 t_ctr;
+ __le32 t_dlen;
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ __le32 next;
+ __le32 reserved[3];
+} __packed __aligned(8);
+
+/*
+ * struct sun8i_ce_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @bounce_iv: buffer which contain the IV
+ * @ivlen: size of bounce_iv
+ * @keylen: keylen for this flow operation
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @method: current method for flow
+ * @op_dir: direction (encrypt vs decrypt) of this flow
+ * @op_mode: op_mode for this flow
+ * @t_phy: Physical address of task
+ * @tl: pointer to the current ce_task for this flow
+ * @stat_req: number of request done by this flow
+ */
+struct sun8i_ce_flow {
+ struct crypto_engine *engine;
+ void *bounce_iv;
+ unsigned int ivlen;
+ unsigned int keylen;
+ struct completion complete;
+ int status;
+ u32 method;
+ u32 op_dir;
+ u32 op_mode;
+ dma_addr_t t_phy;
+ int timeout;
+ struct ce_task *tl;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct sun8i_ce_dev - main container for all this driver information
+ * @base: base address of CE
+ * @ceclks: clocks used by CE
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @mlock: Control access to device registers
+ * @chanlist: array of all flow
+ * @flow: flow to use in next request
+ * @variant: pointer to variant specific data
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sun8i_ce_dev {
+ void __iomem *base;
+ struct clk *ceclks[CE_MAX_CLOCKS];
+ struct reset_control *reset;
+ struct device *dev;
+ struct mutex mlock;
+ struct sun8i_ce_flow *chanlist;
+ atomic_t flow;
+ const struct ce_variant *variant;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+};
+
+/*
+ * struct sun8i_cipher_req_ctx - context for a skcipher request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_cipher_req_ctx {
+ u32 op_dir;
+ int flow;
+};
+
+/*
+ * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ce: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sun8i_ce_dev *ce;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ce_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @ce_algo_id: the CE_ID for this template
+ * @ce_blockmode: the type of block operation CE_ID
+ * @ce: pointer to the sun8i_ce_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct sun8i_ce_alg_template {
+ u32 type;
+ u32 ce_algo_id;
+ u32 ce_blockmode;
+ struct sun8i_ce_dev *ce;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ce_skdecrypt(struct skcipher_request *areq);
+int sun8i_ce_skencrypt(struct skcipher_request *areq);
+
+int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
diff --git a/drivers/crypto/allwinner/sun8i-ss/Makefile b/drivers/crypto/allwinner/sun8i-ss/Makefile
new file mode 100644
index 000000000000..add7b0543fd5
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o
+sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
new file mode 100644
index 000000000000..f222979a5623
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-cipher.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include "sun8i-ss.h"
+
+static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
+{
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *sg;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ return true;
+
+ if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
+ return true;
+
+ sg = areq->src;
+ while (sg) {
+ if ((sg->length % 16) != 0)
+ return true;
+ if ((sg_dma_len(sg) % 16) != 0)
+ return true;
+ if (!IS_ALIGNED(sg->offset, 16))
+ return true;
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if ((sg->length % 16) != 0)
+ return true;
+ if ((sg_dma_len(sg) % 16) != 0)
+ return true;
+ if (!IS_ALIGNED(sg->offset, 16))
+ return true;
+ sg = sg_next(sg);
+ }
+
+ /* SS need same numbers of SG (with same length) for source and destination */
+ in_sg = areq->src;
+ out_sg = areq->dst;
+ while (in_sg && out_sg) {
+ if (in_sg->length != out_sg->length)
+ return true;
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ }
+ if (in_sg || out_sg)
+ return true;
+ return false;
+}
+
+static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+static int sun8i_ss_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+ struct scatterlist *sg;
+ unsigned int todo, len, offset, ivsize;
+ void *backup_iv = NULL;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+ int i;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+
+ dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_req++;
+#endif
+
+ rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode];
+ rctx->method = ss->variant->alg_cipher[algt->ss_algo_id];
+ rctx->keylen = op->keylen;
+
+ rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, rctx->p_key)) {
+ dev_err(ss->dev, "Cannot DMA MAP KEY\n");
+ err = -EFAULT;
+ goto theend;
+ }
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ rctx->ivlen = ivsize;
+ rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!rctx->biv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ if (rctx->op_dir & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ memcpy(rctx->biv, areq->iv, ivsize);
+ rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, rctx->p_iv)) {
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -ENOMEM;
+ goto theend_iv;
+ }
+ }
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->src;
+ while (i < nr_sgs && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgs_next;
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_src[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgs_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->dst;
+ while (i < nr_sgd && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgd_next;
+ rctx->t_dst[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_dst[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgd_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+theend_iv:
+ if (rctx->p_iv)
+ dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
+ DMA_TO_DEVICE);
+
+ if (areq->iv && ivsize > 0) {
+ if (rctx->biv) {
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ memzero_explicit(backup_iv, ivsize);
+ kzfree(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(rctx->biv);
+ }
+ }
+
+theend_key:
+ dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE);
+
+theend:
+
+ return err;
+}
+
+static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sun8i_ss_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sun8i_ss_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
+ rctx->op_dir = SS_DECRYPTION;
+
+ if (sun8i_ss_need_fallback(areq))
+ return sun8i_ss_cipher_fallback(areq);
+
+ e = sun8i_ss_get_engine_number(op->ss);
+ engine = op->ss->flows[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ss_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
+ rctx->op_dir = SS_ENCRYPTION;
+
+ if (sun8i_ss_need_fallback(areq))
+ return sun8i_ss_cipher_fallback(areq);
+
+ e = sun8i_ss_get_engine_number(op->ss);
+ engine = op->ss->flows[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sun8i_ss_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ op->ss = algt->ss;
+
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ dev_info(op->ss->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+
+ op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0) {
+ dev_err(op->ss->dev, "pm error %d\n", err);
+ goto error_pm;
+ }
+
+ return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync(op->ss->dev);
+}
+
+int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
+
+int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+
+ if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+ dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
new file mode 100644
index 000000000000..90997cc509b8
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-core.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SecuritySystem
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sun8i-ss.h"
+
+static const struct ss_variant ss_a80_variant = {
+ .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
+ },
+ .op_mode = { SS_OP_ECB, SS_OP_CBC,
+ },
+ .ss_clks = {
+ { "bus", 0, 300 * 1000 * 1000 },
+ { "mod", 0, 300 * 1000 * 1000 },
+ }
+};
+
+static const struct ss_variant ss_a83t_variant = {
+ .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
+ },
+ .op_mode = { SS_OP_ECB, SS_OP_CBC,
+ },
+ .ss_clks = {
+ { "bus", 0, 300 * 1000 * 1000 },
+ { "mod", 0, 300 * 1000 * 1000 },
+ }
+};
+
+/*
+ * sun8i_ss_get_engine_number() get the next channel slot
+ * This is a simple round-robin way of getting the next channel
+ */
+int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss)
+{
+ return atomic_inc_return(&ss->flow) % MAXFLOW;
+}
+
+int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx,
+ const char *name)
+{
+ int flow = rctx->flow;
+ u32 v = 1;
+ int i;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ ss->flows[flow].stat_req++;
+#endif
+
+ /* choose between stream0/stream1 */
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ v |= rctx->op_mode;
+ v |= rctx->method;
+
+ if (rctx->op_dir)
+ v |= SS_DECRYPTION;
+
+ switch (rctx->keylen) {
+ case 128 / 8:
+ v |= SS_AES_128BITS << 7;
+ break;
+ case 192 / 8:
+ v |= SS_AES_192BITS << 7;
+ break;
+ case 256 / 8:
+ v |= SS_AES_256BITS << 7;
+ break;
+ }
+
+ for (i = 0; i < MAX_SG; i++) {
+ if (!rctx->t_dst[i].addr)
+ break;
+
+ mutex_lock(&ss->mlock);
+ writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
+
+ if (i == 0) {
+ if (rctx->p_iv)
+ writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
+ } else {
+ if (rctx->biv) {
+ if (rctx->op_dir == SS_ENCRYPTION)
+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ else
+ writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ }
+ }
+
+ dev_dbg(ss->dev,
+ "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n",
+ i, flow, name, v,
+ rctx->t_src[i].len, rctx->t_dst[i].len,
+ rctx->method, rctx->op_mode,
+ rctx->op_dir, rctx->t_src[i].len);
+
+ writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
+ writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
+ writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
+
+ reinit_completion(&ss->flows[flow].complete);
+ ss->flows[flow].status = 0;
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+ mutex_unlock(&ss->mlock);
+ wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
+ msecs_to_jiffies(2000));
+ if (ss->flows[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for %s\n", name);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t ss_irq_handler(int irq, void *data)
+{
+ struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data;
+ int flow = 0;
+ u32 p;
+
+ p = readl(ss->base + SS_INT_STA_REG);
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (p & (BIT(flow))) {
+ writel(BIT(flow), ss->base + SS_INT_STA_REG);
+ ss->flows[flow].status = 1;
+ complete(&ss->flows[flow].complete);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sun8i_ss_alg_template ss_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_AES,
+ .ss_blockmode = SS_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ss_aes_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_AES,
+ .ss_blockmode = SS_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sun8i_ss_aes_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_DES3,
+ .ss_blockmode = SS_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ss_des3_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_DES3,
+ .ss_blockmode = SS_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sun8i_ss_des3_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct sun8i_ss_dev *ss = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ss_algs[i].alg.skcipher.base.cra_driver_name,
+ ss_algs[i].alg.skcipher.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun8i_ss_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun8i_ss_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun8i_ss_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun8i_ss_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(ss->flows[i].engine);
+ i--;
+ }
+}
+
+/*
+ * Allocate the flow list structure
+ */
+static int allocate_flows(struct sun8i_ss_dev *ss)
+{
+ int i, err;
+
+ ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
+ GFP_KERNEL);
+ if (!ss->flows)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&ss->flows[i].complete);
+
+ ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
+ if (!ss->flows[i].engine) {
+ dev_err(ss->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(ss->flows[i].engine);
+ if (err) {
+ dev_err(ss->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ sun8i_ss_free_flows(ss, i);
+ return err;
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun8i_ss_pm_suspend(struct device *dev)
+{
+ struct sun8i_ss_dev *ss = dev_get_drvdata(dev);
+ int i;
+
+ reset_control_assert(ss->reset);
+ for (i = 0; i < SS_MAX_CLOCKS; i++)
+ clk_disable_unprepare(ss->ssclks[i]);
+ return 0;
+}
+
+static int sun8i_ss_pm_resume(struct device *dev)
+{
+ struct sun8i_ss_dev *ss = dev_get_drvdata(dev);
+ int err, i;
+
+ for (i = 0; i < SS_MAX_CLOCKS; i++) {
+ if (!ss->variant->ss_clks[i].name)
+ continue;
+ err = clk_prepare_enable(ss->ssclks[i]);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable %s\n",
+ ss->variant->ss_clks[i].name);
+ goto error;
+ }
+ }
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+ /* enable interrupts for all flows */
+ writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
+
+ return 0;
+error:
+ sun8i_ss_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sun8i_ss_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL)
+};
+
+static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ss->dev);
+ pm_runtime_set_autosuspend_delay(ss->dev, 2000);
+
+ err = pm_runtime_set_suspended(ss->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ss->dev);
+ return err;
+}
+
+static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss)
+{
+ pm_runtime_disable(ss->dev);
+}
+
+static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
+{
+ int ss_method, err, id, i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ ss_algs[i].ss = ss;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ id = ss_algs[i].ss_algo_id;
+ ss_method = ss->variant->alg_cipher[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ id = ss_algs[i].ss_blockmode;
+ ss_method = ss->variant->op_mode[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ dev_info(ss->dev, "DEBUG: Register %s\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ss_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ return err;
+ }
+ break;
+ default:
+ ss_algs[i].ss = NULL;
+ dev_err(ss->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ss->dev, "Unregister %d %s\n", i,
+ ss_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss)
+{
+ unsigned long cr;
+ int err, i;
+
+ for (i = 0; i < SS_MAX_CLOCKS; i++) {
+ if (!ss->variant->ss_clks[i].name)
+ continue;
+ ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name);
+ if (IS_ERR(ss->ssclks[i])) {
+ err = PTR_ERR(ss->ssclks[i]);
+ dev_err(ss->dev, "Cannot get %s SS clock err=%d\n",
+ ss->variant->ss_clks[i].name, err);
+ return err;
+ }
+ cr = clk_get_rate(ss->ssclks[i]);
+ if (!cr)
+ return -EINVAL;
+ if (ss->variant->ss_clks[i].freq > 0 &&
+ cr != ss->variant->ss_clks[i].freq) {
+ dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n",
+ ss->variant->ss_clks[i].name,
+ ss->variant->ss_clks[i].freq,
+ ss->variant->ss_clks[i].freq / 1000000,
+ cr, cr / 1000000);
+ err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq);
+ if (err)
+ dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n",
+ ss->variant->ss_clks[i].name,
+ ss->variant->ss_clks[i].freq);
+ }
+ if (ss->variant->ss_clks[i].max_freq > 0 &&
+ cr > ss->variant->ss_clks[i].max_freq)
+ dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)",
+ ss->variant->ss_clks[i].name, cr,
+ ss->variant->ss_clks[i].max_freq);
+ }
+ return 0;
+}
+
+static int sun8i_ss_probe(struct platform_device *pdev)
+{
+ struct sun8i_ss_dev *ss;
+ int err, irq;
+ u32 v;
+
+ ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ ss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ss);
+
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
+ return -EINVAL;
+ }
+
+ ss->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ss->base))
+ return PTR_ERR(ss->base);
+
+ err = sun8i_ss_get_clks(ss);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n");
+ return irq;
+ }
+
+ ss->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ss->reset)) {
+ if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ss->reset);
+ dev_err(&pdev->dev, "No reset control found\n");
+ return PTR_ERR(ss->reset);
+ }
+
+ mutex_init(&ss->mlock);
+
+ err = allocate_flows(ss);
+ if (err)
+ return err;
+
+ err = sun8i_ss_pm_init(ss);
+ if (err)
+ goto error_pm;
+
+ err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss);
+ if (err) {
+ dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err);
+ goto error_irq;
+ }
+
+ err = sun8i_ss_register_algs(ss);
+ if (err)
+ goto error_alg;
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ goto error_alg;
+
+ v = readl(ss->base + SS_CTL_REG);
+ v >>= SS_DIE_ID_SHIFT;
+ v &= SS_DIE_ID_MASK;
+ dev_info(&pdev->dev, "Security System Die ID %x\n", v);
+
+ pm_runtime_put_sync(ss->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ /* Ignore error of debugfs */
+ ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
+ ss->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ss->dbgfs_dir, ss,
+ &sun8i_ss_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ sun8i_ss_unregister_algs(ss);
+error_irq:
+ sun8i_ss_pm_exit(ss);
+error_pm:
+ sun8i_ss_free_flows(ss, MAXFLOW);
+ return err;
+}
+
+static int sun8i_ss_remove(struct platform_device *pdev)
+{
+ struct sun8i_ss_dev *ss = platform_get_drvdata(pdev);
+
+ sun8i_ss_unregister_algs(ss);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ debugfs_remove_recursive(ss->dbgfs_dir);
+#endif
+
+ sun8i_ss_free_flows(ss, MAXFLOW);
+
+ sun8i_ss_pm_exit(ss);
+
+ return 0;
+}
+
+static const struct of_device_id sun8i_ss_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a83t-crypto",
+ .data = &ss_a83t_variant },
+ { .compatible = "allwinner,sun9i-a80-crypto",
+ .data = &ss_a80_variant },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
+
+static struct platform_driver sun8i_ss_driver = {
+ .probe = sun8i_ss_probe,
+ .remove = sun8i_ss_remove,
+ .driver = {
+ .name = "sun8i-ss",
+ .pm = &sun8i_ss_pm_ops,
+ .of_match_table = sun8i_ss_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun8i_ss_driver);
+
+MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
new file mode 100644
index 000000000000..b5f855f3de10
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sun8i-ss.h - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+
+#define SS_ENCRYPTION 0
+#define SS_DECRYPTION BIT(6)
+
+#define SS_ALG_AES 0
+#define SS_ALG_DES (1 << 2)
+#define SS_ALG_3DES (2 << 2)
+
+#define SS_CTL_REG 0x00
+#define SS_INT_CTL_REG 0x04
+#define SS_INT_STA_REG 0x08
+#define SS_KEY_ADR_REG 0x10
+#define SS_IV_ADR_REG 0x18
+#define SS_SRC_ADR_REG 0x20
+#define SS_DST_ADR_REG 0x28
+#define SS_LEN_ADR_REG 0x30
+
+#define SS_ID_NOTSUPP 0xFF
+
+#define SS_ID_CIPHER_AES 0
+#define SS_ID_CIPHER_DES 1
+#define SS_ID_CIPHER_DES3 2
+#define SS_ID_CIPHER_MAX 3
+
+#define SS_ID_OP_ECB 0
+#define SS_ID_OP_CBC 1
+#define SS_ID_OP_MAX 2
+
+#define SS_AES_128BITS 0
+#define SS_AES_192BITS 1
+#define SS_AES_256BITS 2
+
+#define SS_OP_ECB 0
+#define SS_OP_CBC (1 << 13)
+
+#define SS_FLOW0 BIT(30)
+#define SS_FLOW1 BIT(31)
+
+#define MAX_SG 8
+
+#define MAXFLOW 2
+
+#define SS_MAX_CLOCKS 2
+
+#define SS_DIE_ID_SHIFT 20
+#define SS_DIE_ID_MASK 0x07
+
+/*
+ * struct ss_clock - Describe clocks used by sun8i-ss
+ * @name: Name of clock needed by this variant
+ * @freq: Frequency to set for each clock
+ * @max_freq: Maximum frequency for each clock
+ */
+struct ss_clock {
+ const char *name;
+ unsigned long freq;
+ unsigned long max_freq;
+};
+
+/*
+ * struct ss_variant - Describe SS capability for each variant hardware
+ * @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the
+ * coresponding SS_ALG_XXX value
+ * @op_mode: list of supported block modes
+ * @ss_clks! list of clock needed by this variant
+ */
+struct ss_variant {
+ char alg_cipher[SS_ID_CIPHER_MAX];
+ u32 op_mode[SS_ID_OP_MAX];
+ struct ss_clock ss_clks[SS_MAX_CLOCKS];
+};
+
+struct sginfo {
+ u32 addr;
+ u32 len;
+};
+
+/*
+ * struct sun8i_ss_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @stat_req: number of request done by this flow
+ */
+struct sun8i_ss_flow {
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct sun8i_ss_dev - main container for all this driver information
+ * @base: base address of SS
+ * @ssclks: clocks used by SS
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @mlock: Control access to device registers
+ * @flows: array of all flow
+ * @flow: flow to use in next request
+ * @variant: pointer to variant specific data
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sun8i_ss_dev {
+ void __iomem *base;
+ struct clk *ssclks[SS_MAX_CLOCKS];
+ struct reset_control *reset;
+ struct device *dev;
+ struct mutex mlock;
+ struct sun8i_ss_flow *flows;
+ atomic_t flow;
+ const struct ss_variant *variant;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+};
+
+/*
+ * struct sun8i_cipher_req_ctx - context for a skcipher request
+ * @t_src: list of mapped SGs with their size
+ * @t_dst: list of mapped SGs with their size
+ * @p_key: DMA address of the key
+ * @p_iv: DMA address of the IV
+ * @method: current algorithm for this request
+ * @op_mode: op_mode for this request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ * @ivlen: size of biv
+ * @keylen: keylen for this request
+ * @biv: buffer which contain the IV
+ */
+struct sun8i_cipher_req_ctx {
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ u32 p_key;
+ u32 p_iv;
+ u32 method;
+ u32 op_mode;
+ u32 op_dir;
+ int flow;
+ unsigned int ivlen;
+ unsigned int keylen;
+ void *biv;
+};
+
+/*
+ * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ss: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sun8i_ss_dev *ss;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ss_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @ss_algo_id: the SS_ID for this template
+ * @ss_blockmode: the type of block operation SS_ID
+ * @ss: pointer to the sun8i_ss_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct sun8i_ss_alg_template {
+ u32 type;
+ u32 ss_algo_id;
+ u32 ss_blockmode;
+ struct sun8i_ss_dev *ss;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ss_cipher_init(struct crypto_tfm *tfm);
+void sun8i_ss_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ss_skdecrypt(struct skcipher_request *areq);
+int sun8i_ss_skencrypt(struct skcipher_request *areq);
+
+int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss);
+
+int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index de5e9352e920..7d6b695c4ab3 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -365,12 +365,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
- if (!dev->scatter_buffer_va) {
- dma_free_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- dev->sdr, dev->sdr_pa);
+ if (!dev->scatter_buffer_va)
return -ENOMEM;
- }
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
new file mode 100644
index 000000000000..b90850d18965
--- /dev/null
+++ b/drivers/crypto/amlogic/Kconfig
@@ -0,0 +1,24 @@
+config CRYPTO_DEV_AMLOGIC_GXL
+ tristate "Support for amlogic cryptographic offloader"
+ default y if ARCH_MESON
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ help
+ Select y here to have support for the cryptographic offloader
+ available on Amlogic GXL SoC.
+ This hardware handles AES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amlogic-gxl-crypto.
+
+config CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ bool "Enable amlogic stats"
+ depends on CRYPTO_DEV_AMLOGIC_GXL
+ depends on DEBUG_FS
+ help
+ Say y to enable amlogic-crypto debug stats.
+ This will create /sys/kernel/debug/gxl-crypto/stats for displaying
+ the number of requests per flow and per algorithm.
diff --git a/drivers/crypto/amlogic/Makefile b/drivers/crypto/amlogic/Makefile
new file mode 100644
index 000000000000..39057e62c13e
--- /dev/null
+++ b/drivers/crypto/amlogic/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic-gxl-crypto.o
+amlogic-gxl-crypto-y := amlogic-gxl-core.o amlogic-gxl-cipher.o
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
new file mode 100644
index 000000000000..e589015aac1c
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
+ *
+ * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ */
+
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <crypto/internal/skcipher.h>
+#include "amlogic-gxl.h"
+
+static int get_engine_number(struct meson_dev *mc)
+{
+ return atomic_inc_return(&mc->flow) % MAXFLOW;
+}
+
+static bool meson_cipher_need_fallback(struct skcipher_request *areq)
+{
+ struct scatterlist *src_sg = areq->src;
+ struct scatterlist *dst_sg = areq->dst;
+
+ if (areq->cryptlen == 0)
+ return true;
+
+ if (sg_nents(src_sg) != sg_nents(dst_sg))
+ return true;
+
+ /* KEY/IV descriptors use 3 desc */
+ if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
+ return true;
+
+ while (src_sg && dst_sg) {
+ if ((src_sg->length % 16) != 0)
+ return true;
+ if ((dst_sg->length % 16) != 0)
+ return true;
+ if (src_sg->length != dst_sg->length)
+ return true;
+ if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
+ return true;
+ if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
+ return true;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ return false;
+}
+
+static int meson_cipher_do_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct meson_alg_template *algt;
+#endif
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+ skcipher_request_set_sync_tfm(req, op->fallback_tfm);
+ skcipher_request_set_callback(req, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir == MESON_DECRYPT)
+ err = crypto_skcipher_decrypt(req);
+ else
+ err = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return err;
+}
+
+static int meson_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct meson_dev *mc = op->mc;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct meson_alg_template *algt;
+ int flow = rctx->flow;
+ unsigned int todo, eat, len;
+ struct scatterlist *src_sg = areq->src;
+ struct scatterlist *dst_sg = areq->dst;
+ struct meson_desc *desc;
+ int nr_sgs, nr_sgd;
+ int i, err = 0;
+ unsigned int keyivlen, ivsize, offset, tloffset;
+ dma_addr_t phykeyiv;
+ void *backup_iv = NULL, *bkeyiv;
+ __le32 v;
+
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+
+ dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, crypto_skcipher_ivsize(tfm),
+ op->keylen, flow);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ algt->stat_req++;
+ mc->chanlist[flow].stat_req++;
+#endif
+
+ /*
+ * The hardware expect a list of meson_desc structures.
+ * The 2 first structures store key
+ * The third stores IV
+ */
+ bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
+ if (!bkeyiv)
+ return -ENOMEM;
+
+ memcpy(bkeyiv, op->key, op->keylen);
+ keyivlen = op->keylen;
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && ivsize > 0) {
+ if (ivsize > areq->cryptlen) {
+ dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
+ err = -EINVAL;
+ goto theend;
+ }
+ memcpy(bkeyiv + 32, areq->iv, ivsize);
+ keyivlen = 48;
+ if (rctx->op_dir == MESON_DECRYPT) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ }
+ if (keyivlen == 24)
+ keyivlen = 32;
+
+ phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(mc->dev, phykeyiv);
+ if (err) {
+ dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
+ goto theend;
+ }
+
+ tloffset = 0;
+ eat = 0;
+ i = 0;
+ while (keyivlen > eat) {
+ desc = &mc->chanlist[flow].tl[tloffset];
+ memset(desc, 0, sizeof(struct meson_desc));
+ todo = min(keyivlen - eat, 16u);
+ desc->t_src = cpu_to_le32(phykeyiv + i * 16);
+ desc->t_dst = cpu_to_le32(i * 16);
+ v = (MODE_KEY << 20) | DESC_OWN | 16;
+ desc->t_status = cpu_to_le32(v);
+
+ eat += todo;
+ i++;
+ tloffset++;
+ }
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs < 0) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend;
+ }
+ }
+
+ src_sg = areq->src;
+ dst_sg = areq->dst;
+ len = areq->cryptlen;
+ while (src_sg) {
+ desc = &mc->chanlist[flow].tl[tloffset];
+ memset(desc, 0, sizeof(struct meson_desc));
+
+ desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
+ desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
+ todo = min(len, sg_dma_len(src_sg));
+ v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
+ if (rctx->op_dir)
+ v |= DESC_ENCRYPTION;
+ len -= todo;
+
+ if (!sg_next(src_sg))
+ v |= DESC_LAST;
+ desc->t_status = cpu_to_le32(v);
+ tloffset++;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ reinit_completion(&mc->chanlist[flow].complete);
+ mc->chanlist[flow].status = 0;
+ writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
+ wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
+ msecs_to_jiffies(500));
+ if (mc->chanlist[flow].status == 0) {
+ dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
+ err = -EINVAL;
+ }
+
+ dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
+
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+ if (areq->iv && ivsize > 0) {
+ if (rctx->op_dir == MESON_DECRYPT) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst,
+ areq->cryptlen - ivsize,
+ ivsize, 0);
+ }
+ }
+theend:
+ kzfree(bkeyiv);
+ kzfree(backup_iv);
+
+ return err;
+}
+
+static int meson_handle_cipher_request(struct crypto_engine *engine,
+ void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = meson_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int meson_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = MESON_DECRYPT;
+ if (meson_cipher_need_fallback(areq))
+ return meson_cipher_do_fallback(areq);
+ e = get_engine_number(op->mc);
+ engine = op->mc->chanlist[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int meson_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = MESON_ENCRYPT;
+ if (meson_cipher_need_fallback(areq))
+ return meson_cipher_do_fallback(areq);
+ e = get_engine_number(op->mc);
+ engine = op->mc->chanlist[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int meson_cipher_init(struct crypto_tfm *tfm)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct meson_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+
+ memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ op->mc = algt->mc;
+
+ sktfm->reqsize = sizeof(struct meson_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ op->enginectx.op.do_one_request = meson_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+void meson_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
+int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_dev *mc = op->mc;
+
+ switch (keylen) {
+ case 128 / 8:
+ op->keymode = MODE_AES_128;
+ break;
+ case 192 / 8:
+ op->keymode = MODE_AES_192;
+ break;
+ case 256 / 8:
+ op->keymode = MODE_AES_256;
+ break;
+ default:
+ dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
new file mode 100644
index 000000000000..fa05fce1c0de
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * amlgoic-core.c - hardware cryptographic offloader for Amlogic GXL SoC
+ *
+ * Copyright (C) 2018-2019 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * Core file which registers crypto algorithms supported by the hardware.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/dma-mapping.h>
+
+#include "amlogic-gxl.h"
+
+static irqreturn_t meson_irq_handler(int irq, void *data)
+{
+ struct meson_dev *mc = (struct meson_dev *)data;
+ int flow;
+ u32 p;
+
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (mc->irqs[flow] == irq) {
+ p = readl(mc->base + ((0x04 + flow) << 2));
+ if (p) {
+ writel_relaxed(0xF, mc->base + ((0x4 + flow) << 2));
+ mc->chanlist[flow].status = 1;
+ complete(&mc->chanlist[flow].complete);
+ return IRQ_HANDLED;
+ }
+ dev_err(mc->dev, "%s %d Got irq for flow %d but ctrl is empty\n", __func__, irq, flow);
+ }
+ }
+
+ dev_err(mc->dev, "%s %d from unknown irq\n", __func__, irq);
+ return IRQ_HANDLED;
+}
+
+static struct meson_alg_template mc_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .blockmode = MESON_OPMODE_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-gxl",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = meson_cipher_init,
+ .cra_exit = meson_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = meson_aes_setkey,
+ .encrypt = meson_skencrypt,
+ .decrypt = meson_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .blockmode = MESON_OPMODE_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-gxl",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = meson_cipher_init,
+ .cra_exit = meson_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = meson_aes_setkey,
+ .encrypt = meson_skencrypt,
+ .decrypt = meson_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+static int meson_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct meson_dev *mc = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ mc_algs[i].alg.skcipher.base.cra_driver_name,
+ mc_algs[i].alg.skcipher.base.cra_name,
+ mc_algs[i].stat_req, mc_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int meson_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, meson_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations meson_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = meson_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void meson_free_chanlist(struct meson_dev *mc, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(mc->chanlist[i].engine);
+ if (mc->chanlist[i].tl)
+ dma_free_coherent(mc->dev, sizeof(struct meson_desc) * MAXDESC,
+ mc->chanlist[i].tl,
+ mc->chanlist[i].t_phy);
+ i--;
+ }
+}
+
+/*
+ * Allocate the channel list structure
+ */
+static int meson_allocate_chanlist(struct meson_dev *mc)
+{
+ int i, err;
+
+ mc->chanlist = devm_kcalloc(mc->dev, MAXFLOW,
+ sizeof(struct meson_flow), GFP_KERNEL);
+ if (!mc->chanlist)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&mc->chanlist[i].complete);
+
+ mc->chanlist[i].engine = crypto_engine_alloc_init(mc->dev, true);
+ if (!mc->chanlist[i].engine) {
+ dev_err(mc->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(mc->chanlist[i].engine);
+ if (err) {
+ dev_err(mc->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ mc->chanlist[i].tl = dma_alloc_coherent(mc->dev,
+ sizeof(struct meson_desc) * MAXDESC,
+ &mc->chanlist[i].t_phy,
+ GFP_KERNEL);
+ if (!mc->chanlist[i].tl) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ meson_free_chanlist(mc, i);
+ return err;
+}
+
+static int meson_register_algs(struct meson_dev *mc)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ mc_algs[i].mc = mc;
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = crypto_register_skcipher(&mc_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(mc->dev, "Fail to register %s\n",
+ mc_algs[i].alg.skcipher.base.cra_name);
+ mc_algs[i].mc = NULL;
+ return err;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void meson_unregister_algs(struct meson_dev *mc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ if (!mc_algs[i].mc)
+ continue;
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&mc_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int meson_crypto_probe(struct platform_device *pdev)
+{
+ struct meson_dev *mc;
+ int err, i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ mc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mc);
+
+ mc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mc->base)) {
+ err = PTR_ERR(mc->base);
+ dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
+ return err;
+ }
+ mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
+ if (IS_ERR(mc->busclk)) {
+ err = PTR_ERR(mc->busclk);
+ dev_err(&pdev->dev, "Cannot get core clock err=%d\n", err);
+ return err;
+ }
+
+ mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
+ for (i = 0; i < MAXFLOW; i++) {
+ mc->irqs[i] = platform_get_irq(pdev, i);
+ if (mc->irqs[i] < 0) {
+ dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i);
+ return mc->irqs[i];
+ }
+
+ err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
+ "gxl-crypto", mc);
+ if (err < 0) {
+ dev_err(mc->dev, "Cannot request IRQ for flow %d\n", i);
+ return err;
+ }
+ }
+
+ err = clk_prepare_enable(mc->busclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+ return err;
+ }
+
+ err = meson_allocate_chanlist(mc);
+ if (err)
+ goto error_flow;
+
+ err = meson_register_algs(mc);
+ if (err)
+ goto error_alg;
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
+ debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ meson_unregister_algs(mc);
+error_flow:
+ meson_free_chanlist(mc, MAXFLOW);
+ clk_disable_unprepare(mc->busclk);
+ return err;
+}
+
+static int meson_crypto_remove(struct platform_device *pdev)
+{
+ struct meson_dev *mc = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ debugfs_remove_recursive(mc->dbgfs_dir);
+#endif
+
+ meson_unregister_algs(mc);
+
+ meson_free_chanlist(mc, MAXFLOW);
+
+ clk_disable_unprepare(mc->busclk);
+ return 0;
+}
+
+static const struct of_device_id meson_crypto_of_match_table[] = {
+ { .compatible = "amlogic,gxl-crypto", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
+
+static struct platform_driver meson_crypto_driver = {
+ .probe = meson_crypto_probe,
+ .remove = meson_crypto_remove,
+ .driver = {
+ .name = "gxl-crypto",
+ .of_match_table = meson_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(meson_crypto_driver);
+
+MODULE_DESCRIPTION("Amlogic GXL cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
new file mode 100644
index 000000000000..b7f2de91ab76
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * amlogic.h - hardware cryptographic offloader for Amlogic SoC
+ *
+ * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
+#define MODE_KEY 1
+#define MODE_AES_128 0x8
+#define MODE_AES_192 0x9
+#define MODE_AES_256 0xa
+
+#define MESON_DECRYPT 0
+#define MESON_ENCRYPT 1
+
+#define MESON_OPMODE_ECB 0
+#define MESON_OPMODE_CBC 1
+
+#define MAXFLOW 2
+
+#define MAXDESC 64
+
+#define DESC_LAST BIT(18)
+#define DESC_ENCRYPTION BIT(28)
+#define DESC_OWN BIT(31)
+
+/*
+ * struct meson_desc - Descriptor for DMA operations
+ * Note that without datasheet, some are unknown
+ * @t_status: Descriptor of the cipher operation (see description below)
+ * @t_src: Physical address of data to read
+ * @t_dst: Physical address of data to write
+ * t_status is segmented like this:
+ * @len: 0-16 length of data to operate
+ * @irq: 17 Ignored by hardware
+ * @eoc: 18 End means the descriptor is the last
+ * @loop: 19 Unknown
+ * @mode: 20-23 Type of algorithm (AES, SHA)
+ * @begin: 24 Unknown
+ * @end: 25 Unknown
+ * @op_mode: 26-27 Blockmode (CBC, ECB)
+ * @enc: 28 0 means decryption, 1 is for encryption
+ * @block: 29 Unknown
+ * @error: 30 Unknown
+ * @owner: 31 owner of the descriptor, 1 own by HW
+ */
+struct meson_desc {
+ __le32 t_status;
+ __le32 t_src;
+ __le32 t_dst;
+};
+
+/*
+ * struct meson_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @keylen: keylen for this flow operation
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @t_phy: Physical address of task
+ * @tl: pointer to the current ce_task for this flow
+ * @stat_req: number of request done by this flow
+ */
+struct meson_flow {
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+ unsigned int keylen;
+ dma_addr_t t_phy;
+ struct meson_desc *tl;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct meson_dev - main container for all this driver information
+ * @base: base address of amlogic-crypto
+ * @busclk: bus clock for amlogic-crypto
+ * @dev: the platform device
+ * @chanlist: array of all flow
+ * @flow: flow to use in next request
+ * @irqs: IRQ numbers for amlogic-crypto
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct meson_dev {
+ void __iomem *base;
+ struct clk *busclk;
+ struct device *dev;
+ struct meson_flow *chanlist;
+ atomic_t flow;
+ int *irqs;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ struct dentry *dbgfs_dir;
+#endif
+};
+
+/*
+ * struct meson_cipher_req_ctx - context for a skcipher request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ */
+struct meson_cipher_req_ctx {
+ u32 op_dir;
+ int flow;
+};
+
+/*
+ * struct meson_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @keymode: The keymode(type and size of key) associated with this TFM
+ * @mc: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct meson_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ u32 keymode;
+ struct meson_dev *mc;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct meson_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @blockmode: the type of block operation
+ * @mc: pointer to the meson_dev structure associated with this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct meson_alg_template {
+ u32 type;
+ u32 blockmode;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+ struct meson_dev *mc;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int meson_enqueue(struct crypto_async_request *areq, u32 type);
+
+int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int meson_cipher_init(struct crypto_tfm *tfm);
+void meson_cipher_exit(struct crypto_tfm *tfm);
+int meson_skdecrypt(struct skcipher_request *areq);
+int meson_skencrypt(struct skcipher_request *areq);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 026f193556f9..91092504bc96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
@@ -117,7 +118,7 @@ struct atmel_aes_ctx {
struct atmel_aes_ctr_ctx {
struct atmel_aes_base_ctx base;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
@@ -129,13 +130,13 @@ struct atmel_aes_gcm_ctx {
struct scatterlist src[2];
struct scatterlist dst[2];
- u32 j0[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 j0[AES_BLOCK_SIZE / sizeof(u32)];
u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
- u32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
size_t textlen;
- const u32 *ghash_in;
- u32 *ghash_out;
+ const __be32 *ghash_in;
+ __be32 *ghash_out;
atmel_aes_fn_t ghash_resume;
};
@@ -145,7 +146,7 @@ struct atmel_aes_xts_ctx {
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
struct atmel_aes_authenc_ctx {
struct atmel_aes_base_ctx base;
struct atmel_sha_authenc_ctx *auth;
@@ -154,10 +155,10 @@ struct atmel_aes_authenc_ctx {
struct atmel_aes_reqctx {
unsigned long mode;
- u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
+ u8 lastc[AES_BLOCK_SIZE];
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
struct atmel_aes_authenc_reqctx {
struct atmel_aes_reqctx base;
@@ -388,13 +389,13 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
}
static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
- u32 *value)
+ void *value)
{
atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
- const u32 *value)
+ const void *value)
{
atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
@@ -486,13 +487,36 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
return (dd->flags & AES_FLAGS_ENCRYPT);
}
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
#endif
+static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
+{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen < ivsize)
+ return;
+
+ if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst)
+ memcpy(req->iv, rctx->lastc, ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
#endif
@@ -500,26 +524,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead) {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher =
- crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-
- if (rctx->mode & AES_FLAGS_ENCRYPT) {
- scatterwalk_map_and_copy(req->info, req->dst,
- req->nbytes - ivsize, ivsize, 0);
- } else {
- if (req->src == req->dst) {
- memcpy(req->info, rctx->lastc, ivsize);
- } else {
- scatterwalk_map_and_copy(req->info, req->src,
- req->nbytes - ivsize, ivsize, 0);
- }
- }
- }
+ if (!dd->ctx->is_aead)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -530,7 +536,7 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
}
static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv, const u32 *key, int keylen)
+ const __be32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
@@ -561,7 +567,7 @@ static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
}
static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+ const __be32 *iv)
{
atmel_aes_write_ctrl_key(dd, use_dma, iv,
@@ -976,9 +982,9 @@ static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
static int atmel_aes_start(struct atmel_aes_dev *dd)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
- bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+ bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
dd->ctx->block_size != AES_BLOCK_SIZE);
int err;
@@ -988,12 +994,13 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- atmel_aes_write_ctrl(dd, use_dma, req->info);
+ atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
if (use_dma)
- return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_dma_start(dd, req->src, req->dst,
+ req->cryptlen,
atmel_aes_transfer_complete);
- return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
atmel_aes_transfer_complete);
}
@@ -1006,7 +1013,7 @@ atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
u32 ctr, blocks;
size_t datalen;
@@ -1014,11 +1021,11 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Check for transfer completion. */
ctx->offset += dd->total;
- if (ctx->offset >= req->nbytes)
+ if (ctx->offset >= req->cryptlen)
return atmel_aes_transfer_complete(dd);
/* Compute data length. */
- datalen = req->nbytes - ctx->offset;
+ datalen = req->cryptlen - ctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
if (dd->caps.has_ctr32) {
@@ -1071,8 +1078,8 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
int err;
atmel_aes_set_mode(dd, rctx);
@@ -1081,16 +1088,16 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
ctx->offset = 0;
dd->total = 0;
return atmel_aes_ctr_transfer(dd);
}
-static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_aes_reqctx *rctx;
struct atmel_aes_dev *dd;
@@ -1121,28 +1128,30 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
if (!dd)
return -ENODEV;
- rctx = ablkcipher_request_ctx(req);
+ rctx = skcipher_request_ctx(req);
rctx->mode = mode;
if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
- scatterwalk_map_and_copy(rctx->lastc, req->src,
- (req->nbytes - ivsize), ivsize, 0);
+ if (req->cryptlen >= ivsize)
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
}
return atmel_aes_handle_queue(dd, &req->base);
}
-static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1152,297 +1161,279 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_ECB);
}
-static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CBC);
}
-static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_OFB);
}
-static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB128);
}
-static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB64);
}
-static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB32);
}
-static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB16);
}
-static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB8);
}
-static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR);
}
-static int atmel_aes_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_start;
return 0;
}
-static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_ctr_start;
return 0;
}
-static struct crypto_alg aes_algs[] = {
-{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "atmel-ecb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ecb_encrypt,
- .decrypt = atmel_aes_ecb_decrypt,
- }
+static struct skcipher_alg aes_algs[] = {
+{
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "atmel-ecb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ecb_encrypt,
+ .decrypt = atmel_aes_ecb_decrypt,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "atmel-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cbc_encrypt,
- .decrypt = atmel_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "atmel-cbc-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cbc_encrypt,
+ .decrypt = atmel_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "ofb(aes)",
- .cra_driver_name = "atmel-ofb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ofb_encrypt,
- .decrypt = atmel_aes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "atmel-ofb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ofb_encrypt,
+ .decrypt = atmel_aes_ofb_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb(aes)",
- .cra_driver_name = "atmel-cfb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb_encrypt,
- .decrypt = atmel_aes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "atmel-cfb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb_encrypt,
+ .decrypt = atmel_aes_cfb_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb32(aes)",
- .cra_driver_name = "atmel-cfb32-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb32_encrypt,
- .decrypt = atmel_aes_cfb32_decrypt,
- }
+ .base.cra_name = "cfb32(aes)",
+ .base.cra_driver_name = "atmel-cfb32-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB32_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb32_encrypt,
+ .decrypt = atmel_aes_cfb32_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb16(aes)",
- .cra_driver_name = "atmel-cfb16-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb16_encrypt,
- .decrypt = atmel_aes_cfb16_decrypt,
- }
+ .base.cra_name = "cfb16(aes)",
+ .base.cra_driver_name = "atmel-cfb16-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB16_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb16_encrypt,
+ .decrypt = atmel_aes_cfb16_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb8(aes)",
- .cra_driver_name = "atmel-cfb8-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb8_encrypt,
- .decrypt = atmel_aes_cfb8_decrypt,
- }
+ .base.cra_name = "cfb8(aes)",
+ .base.cra_driver_name = "atmel-cfb8-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB8_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb8_encrypt,
+ .decrypt = atmel_aes_cfb8_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "atmel-ctr-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_ctr_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ctr_encrypt,
- .decrypt = atmel_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "atmel-ctr-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_ctr_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ctr_encrypt,
+ .decrypt = atmel_aes_ctr_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
};
-static struct crypto_alg aes_cfb64_alg = {
- .cra_name = "cfb64(aes)",
- .cra_driver_name = "atmel-cfb64-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB64_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb64_encrypt,
- .decrypt = atmel_aes_cfb64_decrypt,
- }
+static struct skcipher_alg aes_cfb64_alg = {
+ .base.cra_name = "cfb64(aes)",
+ .base.cra_driver_name = "atmel-cfb64-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB64_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb64_encrypt,
+ .decrypt = atmel_aes_cfb64_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
};
@@ -1450,7 +1441,7 @@ static struct crypto_alg aes_cfb64_alg = {
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
const u32 *data, size_t datalen,
- const u32 *ghash_in, u32 *ghash_out,
+ const __be32 *ghash_in, __be32 *ghash_out,
atmel_aes_fn_t resume);
static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
@@ -1471,7 +1462,7 @@ atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
const u32 *data, size_t datalen,
- const u32 *ghash_in, u32 *ghash_out,
+ const __be32 *ghash_in, __be32 *ghash_out,
atmel_aes_fn_t resume)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
@@ -1558,7 +1549,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
memcpy(data, iv, ivsize);
memset(data + ivsize, 0, padlen + sizeof(u64));
- ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
+ ((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
NULL, ctx->j0, atmel_aes_gcm_process);
@@ -1591,7 +1582,7 @@ static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
struct aead_request *req = aead_request_cast(dd->areq);
- u32 j0_lsw, *j0 = ctx->j0;
+ __be32 j0_lsw, *j0 = ctx->j0;
size_t padlen;
/* Write incr32(J0) into IV. */
@@ -1674,7 +1665,7 @@ static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
struct aead_request *req = aead_request_cast(dd->areq);
- u64 *data = dd->buf;
+ __be64 *data = dd->buf;
if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
@@ -1857,8 +1848,8 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
{
struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
unsigned long flags;
int err;
@@ -1868,7 +1859,7 @@ static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- /* Compute the tweak value from req->info with ecb(aes). */
+ /* Compute the tweak value from req->iv with ecb(aes). */
flags = dd->flags;
dd->flags &= ~AES_FLAGS_MODE_MASK;
dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
@@ -1876,16 +1867,16 @@ static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
ctx->key2, ctx->base.keylen);
dd->flags = flags;
- atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
}
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
- static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
u8 *tweak_bytes = (u8 *)tweak;
int i;
@@ -1908,20 +1899,21 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
atmel_aes_write_block(dd, AES_TWR(0), tweak);
atmel_aes_write_block(dd, AES_ALPHAR(0), one);
if (use_dma)
- return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_dma_start(dd, req->src, req->dst,
+ req->cryptlen,
atmel_aes_transfer_complete);
- return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
atmel_aes_transfer_complete);
}
-static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
if (err)
return err;
@@ -1932,48 +1924,46 @@ static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_xts_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_xts_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_XTS);
}
-static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_xts_start;
return 0;
}
-static struct crypto_alg aes_xts_alg = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "atmel-xts-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_xts_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_xts_setkey,
- .encrypt = atmel_aes_xts_encrypt,
- .decrypt = atmel_aes_xts_decrypt,
- }
+static struct skcipher_alg aes_xts_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "atmel-xts-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ .init = atmel_aes_xts_init_tfm,
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* authenc aead functions */
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
@@ -2041,7 +2031,7 @@ static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
bool enc = atmel_aes_is_encrypt(dd);
struct scatterlist *src, *dst;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
u32 emr;
if (is_async)
@@ -2460,23 +2450,23 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc)
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
crypto_unregister_aead(&aes_authenc_algs[i]);
#endif
if (dd->caps.has_xts)
- crypto_unregister_alg(&aes_xts_alg);
+ crypto_unregister_skcipher(&aes_xts_alg);
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
if (dd->caps.has_cfb64)
- crypto_unregister_alg(&aes_cfb64_alg);
+ crypto_unregister_skcipher(&aes_cfb64_alg);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
}
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
@@ -2484,13 +2474,13 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
- err = crypto_register_alg(&aes_cfb64_alg);
+ err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
@@ -2502,12 +2492,12 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
}
if (dd->caps.has_xts) {
- err = crypto_register_alg(&aes_xts_alg);
+ err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
}
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
err = crypto_register_aead(&aes_authenc_algs[i]);
@@ -2519,22 +2509,22 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
return 0;
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
for (j = 0; j < i; j++)
crypto_unregister_aead(&aes_authenc_algs[j]);
- crypto_unregister_alg(&aes_xts_alg);
+ crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
- crypto_unregister_alg(&aes_cfb64_alg);
+ crypto_unregister_skcipher(&aes_cfb64_alg);
err_aes_cfb64_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&aes_algs[j]);
+ crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
@@ -2709,7 +2699,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
atmel_aes_get_cap(aes_dd);
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
goto iclk_unprepare;
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index cbd37a2edada..d6de810df44f 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -12,7 +12,7 @@
#ifndef __ATMEL_AUTHENC_H__
#define __ATMEL_AUTHENC_H__
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
#include <crypto/authenc.h>
#include <crypto/hash.h>
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 84cb8748a795..8ea0e4bcde0d 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -360,7 +360,7 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
{
unsigned int index, padlen;
- u64 bits[2];
+ __be64 bits[2];
u64 size[2];
size[0] = ctx->digcnt[0];
@@ -2212,7 +2212,7 @@ static struct ahash_alg sha_hmac_algs[] = {
},
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* authenc functions */
static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 1a6c86ae6148..0c1f79b30fc1 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -36,6 +36,7 @@
#include <crypto/internal/des.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
@@ -72,7 +73,7 @@ struct atmel_tdes_ctx {
struct atmel_tdes_dev *dd;
int keylen;
- u32 key[3*DES_KEY_SIZE / sizeof(u32)];
+ u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)];
unsigned long flags;
u16 block_size;
@@ -80,6 +81,7 @@ struct atmel_tdes_ctx {
struct atmel_tdes_reqctx {
unsigned long mode;
+ u8 lastc[DES_BLOCK_SIZE];
};
struct atmel_tdes_dma {
@@ -106,7 +108,7 @@ struct atmel_tdes_dev {
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
size_t total;
struct scatterlist *in_sg;
@@ -307,8 +309,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
dd->ctx->keylen >> 2);
if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
- atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
+ (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
}
return 0;
@@ -502,8 +504,8 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
- crypto_ablkcipher_reqtfm(dd->req));
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(
+ crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -571,19 +573,45 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
return err;
}
+static void
+atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
+{
+ struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen < ivsize)
+ return;
+
+ if (rctx->mode & TDES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst)
+ memcpy(req->iv, rctx->lastc, ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+}
+
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+
req->base.complete(&req->base, err);
}
static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct crypto_async_request *async_req, *backlog;
struct atmel_tdes_ctx *ctx;
@@ -593,7 +621,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
+ ret = crypto_enqueue_request(&dd->queue, &req->base);
if (dd->flags & TDES_FLAGS_BUSY) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
@@ -610,18 +638,18 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
+ dd->total = req->cryptlen;
dd->in_offset = 0;
dd->in_sg = req->src;
dd->out_offset = 0;
dd->out_sg = req->dst;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= TDES_FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
dd->ctx = ctx;
@@ -665,32 +693,32 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
return err;
}
-static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
if (mode & TDES_FLAGS_CFB8) {
- if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB16) {
- if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB32) {
- if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
} else {
- if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
@@ -699,6 +727,15 @@ static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
+ if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen >= ivsize)
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+
return atmel_tdes_handle_queue(ctx->dd, req);
}
@@ -770,13 +807,13 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
dma_release_channel(dd->dma_lch_out.chan);
}
-static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des_key(tfm, key);
+ err = verify_skcipher_des_key(tfm, key);
if (err)
return err;
@@ -786,13 +823,13 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des3_key(tfm, key);
+ err = verify_skcipher_des3_key(tfm, key);
if (err)
return err;
@@ -802,84 +839,84 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
}
-static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, 0);
}
-static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
}
-static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
}
-static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
}
-static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
}
-static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB8);
}
-static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
}
-static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB16);
}
-static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
}
-static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB32);
}
-static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
}
-static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
}
-static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
}
-static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
+static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct atmel_tdes_dev *dd;
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
dd = atmel_tdes_find_dev(ctx);
if (!dd)
@@ -888,204 +925,184 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct crypto_alg tdes_algs[] = {
+static struct skcipher_alg tdes_algs[] = {
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "atmel-ecb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ecb_encrypt,
- .decrypt = atmel_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "atmel-ecb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "atmel-cbc-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cbc_encrypt,
- .decrypt = atmel_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "atmel-cbc-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
},
{
- .cra_name = "cfb(des)",
- .cra_driver_name = "atmel-cfb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(des)",
+ .base.cra_driver_name = "atmel-cfb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb_encrypt,
+ .decrypt = atmel_tdes_cfb_decrypt,
},
{
- .cra_name = "cfb8(des)",
- .cra_driver_name = "atmel-cfb8-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
- }
+ .base.cra_name = "cfb8(des)",
+ .base.cra_driver_name = "atmel-cfb8-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB8_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb8_encrypt,
+ .decrypt = atmel_tdes_cfb8_decrypt,
},
{
- .cra_name = "cfb16(des)",
- .cra_driver_name = "atmel-cfb16-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
- }
+ .base.cra_name = "cfb16(des)",
+ .base.cra_driver_name = "atmel-cfb16-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB16_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x1,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb16_encrypt,
+ .decrypt = atmel_tdes_cfb16_decrypt,
},
{
- .cra_name = "cfb32(des)",
- .cra_driver_name = "atmel-cfb32-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
- }
+ .base.cra_name = "cfb32(des)",
+ .base.cra_driver_name = "atmel-cfb32-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB32_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x3,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb32_encrypt,
+ .decrypt = atmel_tdes_cfb32_decrypt,
},
{
- .cra_name = "ofb(des)",
- .cra_driver_name = "atmel-ofb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(des)",
+ .base.cra_driver_name = "atmel-ofb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "atmel-ecb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ecb_encrypt,
- .decrypt = atmel_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "atmel-ecb-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "atmel-cbc-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cbc_encrypt,
- .decrypt = atmel_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "atmel-cbc-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
},
{
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "atmel-ofb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(des3_ede)",
+ .base.cra_driver_name = "atmel-ofb-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
},
};
@@ -1148,7 +1165,7 @@ static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
int i;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
- crypto_unregister_alg(&tdes_algs[i]);
+ crypto_unregister_skcipher(&tdes_algs[i]);
}
static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
@@ -1156,7 +1173,7 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
- err = crypto_register_alg(&tdes_algs[i]);
+ err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
}
@@ -1165,7 +1182,7 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
err_tdes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&tdes_algs[j]);
+ crypto_unregister_skcipher(&tdes_algs[j]);
return err;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index f85356a48e7e..1564a6f8c9cb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -110,8 +110,8 @@ static u8 select_channel(void)
}
/**
- * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
- * receive a SPU response message for an ablkcipher request. Includes buffers to
+ * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
+ * receive a SPU response message for an skcipher request. Includes buffers to
* catch SPU message headers and the response data.
* @mssg: mailbox message containing the receive sg
* @rctx: crypto request context
@@ -130,7 +130,7 @@ static u8 select_channel(void)
* < 0 if an error
*/
static int
-spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
+spu_skcipher_rx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 rx_frag_num,
unsigned int chunksize, u32 stat_pad_len)
@@ -179,8 +179,8 @@ spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
}
/**
- * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
- * send a SPU request message for an ablkcipher request. Includes SPU message
+ * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
+ * send a SPU request message for an skcipher request. Includes SPU message
* headers and the request data.
* @mssg: mailbox message containing the transmit sg
* @rctx: crypto request context
@@ -198,7 +198,7 @@ spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
* < 0 if an error
*/
static int
-spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
+spu_skcipher_tx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
{
@@ -283,7 +283,7 @@ static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
}
/**
- * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
+ * handle_skcipher_req() - Submit as much of a block cipher request as fits in
* a single SPU request message, starting at the current position in the request
* data.
* @rctx: Crypto request context
@@ -300,12 +300,12 @@ static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
* asynchronously
* Any other value indicates an error
*/
-static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
+static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req =
- container_of(areq, struct ablkcipher_request, base);
+ struct skcipher_request *req =
+ container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
int err = 0;
@@ -468,7 +468,7 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
spu->spu_xts_tweak_in_payload())
rx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
+ err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
stat_pad_len);
if (err)
return err;
@@ -482,7 +482,7 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
spu->spu_xts_tweak_in_payload())
tx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
+ err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
pad_len);
if (err)
return err;
@@ -495,16 +495,16 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
}
/**
- * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
+ * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
* total received count for the request and updates global stats.
* @rctx: Crypto request context
*/
-static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
+static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req = ablkcipher_request_cast(areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
#endif
struct iproc_ctx_s *ctx = rctx->ctx;
u32 payload_len;
@@ -1685,8 +1685,8 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
/* Process the SPU response message */
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- handle_ablkcipher_resp(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ handle_skcipher_resp(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
handle_ahash_resp(rctx);
@@ -1708,8 +1708,8 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
spu_chunk_cleanup(rctx);
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = handle_ablkcipher_req(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = handle_skcipher_req(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = handle_ahash_req(rctx);
@@ -1739,7 +1739,7 @@ cb_finish:
/* ==================== Kernel Cryptographic API ==================== */
/**
- * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
+ * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
* @req: Crypto API request
* @encrypt: true if encrypting; false if decrypting
*
@@ -1747,11 +1747,11 @@ cb_finish:
* asynchronously
* < 0 if an error
*/
-static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
+static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
{
- struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
+ struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
struct iproc_ctx_s *ctx =
- crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int err;
flow_log("%s() enc:%u\n", __func__, encrypt);
@@ -1761,7 +1761,7 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
rctx->parent = &req->base;
rctx->is_encrypt = encrypt;
rctx->bd_suppress = false;
- rctx->total_todo = req->nbytes;
+ rctx->total_todo = req->cryptlen;
rctx->src_sent = 0;
rctx->total_sent = 0;
rctx->total_received = 0;
@@ -1782,15 +1782,15 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
ctx->cipher.mode == CIPHER_MODE_GCM ||
ctx->cipher.mode == CIPHER_MODE_CCM) {
rctx->iv_ctr_len =
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
+ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
+ memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
} else {
rctx->iv_ctr_len = 0;
}
/* Choose a SPU to process this request */
rctx->chan_idx = select_channel();
- err = handle_ablkcipher_req(rctx);
+ err = handle_skcipher_req(rctx);
if (err != -EINPROGRESS)
/* synchronous result */
spu_chunk_cleanup(rctx);
@@ -1798,13 +1798,13 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
return err;
}
-static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1812,13 +1812,13 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1826,10 +1826,10 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
if (ctx->cipher.mode == CIPHER_MODE_XTS)
/* XTS includes two keys of equal length */
@@ -1846,7 +1846,7 @@ static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -1854,10 +1854,10 @@ static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int i;
ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
@@ -1874,16 +1874,16 @@ static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
struct spu_hw *spu = &iproc_priv.spu;
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
struct spu_cipher_parms cipher_parms;
u32 alloc_len = 0;
int err;
- flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
+ flow_log("skcipher_setkey() keylen: %d\n", keylen);
flow_dump(" key: ", key, keylen);
switch (ctx->cipher.alg) {
@@ -1926,7 +1926,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
cipher_parms.iv_buf = NULL;
- cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
+ cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
cipher_parms.alg = ctx->cipher.alg;
@@ -1950,17 +1950,17 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
+ flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
- return ablkcipher_enqueue(req, true);
+ return skcipher_enqueue(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
- return ablkcipher_enqueue(req, false);
+ flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
+ return skcipher_enqueue(req, false);
}
static int ahash_enqueue(struct ahash_request *req)
@@ -3585,18 +3585,16 @@ static struct iproc_alg_s driver_algs[] = {
.auth_first = 0,
},
-/* ABLKCIPHER algorithms. */
+/* SKCIPHER algorithms. */
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(arc4)",
- .cra_driver_name = "ecb-arc4-iproc",
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(arc4)",
+ .base.cra_driver_name = "ecb-arc4-iproc",
+ .base.cra_blocksize = ARC4_BLOCK_SIZE,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_RC4,
@@ -3608,16 +3606,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des)",
- .cra_driver_name = "ofb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des)",
+ .base.cra_driver_name = "ofb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3629,16 +3625,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3650,16 +3644,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3671,16 +3663,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "ofb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des3_ede)",
+ .base.cra_driver_name = "ofb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3692,16 +3682,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3713,16 +3701,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3734,16 +3720,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ofb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3755,16 +3739,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3776,16 +3758,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3797,16 +3777,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3818,16 +3796,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -4282,16 +4258,17 @@ static int generic_cra_init(struct crypto_tfm *tfm,
return 0;
}
-static int ablkcipher_cra_init(struct crypto_tfm *tfm)
+static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
struct iproc_alg_s *cipher_alg;
flow_log("%s()\n", __func__);
- tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
+ crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
- cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
+ cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
return generic_cra_init(tfm, cipher_alg);
}
@@ -4363,6 +4340,11 @@ static void generic_cra_exit(struct crypto_tfm *tfm)
atomic_dec(&iproc_priv.session_count);
}
+static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+ generic_cra_exit(crypto_skcipher_tfm(tfm));
+}
+
static void aead_cra_exit(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
@@ -4524,10 +4506,10 @@ static void spu_counters_init(void)
atomic_set(&iproc_priv.bad_icv, 0);
}
-static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
+static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
{
struct spu_hw *spu = &iproc_priv.spu;
- struct crypto_alg *crypto = &driver_alg->alg.crypto;
+ struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
int err;
/* SPU2 does not support RC4 */
@@ -4535,26 +4517,23 @@ static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
(spu->spu_type == SPU_TYPE_SPU2))
return 0;
- crypto->cra_module = THIS_MODULE;
- crypto->cra_priority = cipher_pri;
- crypto->cra_alignmask = 0;
- crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
-
- crypto->cra_init = ablkcipher_cra_init;
- crypto->cra_exit = generic_cra_exit;
- crypto->cra_type = &crypto_ablkcipher_type;
- crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ crypto->base.cra_module = THIS_MODULE;
+ crypto->base.cra_priority = cipher_pri;
+ crypto->base.cra_alignmask = 0;
+ crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
+ crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
- crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
- crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+ crypto->init = skcipher_init_tfm;
+ crypto->exit = skcipher_exit_tfm;
+ crypto->setkey = skcipher_setkey;
+ crypto->encrypt = skcipher_encrypt;
+ crypto->decrypt = skcipher_decrypt;
- err = crypto_register_alg(crypto);
+ err = crypto_register_skcipher(crypto);
/* Mark alg as having been registered, if successful */
if (err == 0)
driver_alg->registered = true;
- pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
+ pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
return err;
}
@@ -4649,8 +4628,8 @@ static int spu_algs_register(struct device *dev)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = spu_register_ablkcipher(&driver_algs[i]);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = spu_register_skcipher(&driver_algs[i]);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = spu_register_ahash(&driver_algs[i]);
@@ -4680,8 +4659,8 @@ err_algs:
if (!driver_algs[j].registered)
continue;
switch (driver_algs[j].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[j].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
driver_algs[j].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
@@ -4837,10 +4816,10 @@ static int bcm_spu_remove(struct platform_device *pdev)
continue;
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[i].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
dev_dbg(dev, " unregistered cipher %s\n",
- driver_algs[i].alg.crypto.cra_driver_name);
+ driver_algs[i].alg.skcipher.base.cra_driver_name);
driver_algs[i].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 766452b24d0a..b6d83e3aa46c 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -1,3 +1,4 @@
+
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2016 Broadcom
@@ -11,6 +12,7 @@
#include <linux/mailbox_client.h>
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aead.h>
#include <crypto/arc4.h>
#include <crypto/gcm.h>
@@ -102,7 +104,7 @@ struct auth_op {
struct iproc_alg_s {
u32 type;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -149,7 +151,7 @@ struct spu_msg_buf {
u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
union {
- /* Buffers only used for ablkcipher */
+ /* Buffers only used for skcipher */
struct {
/*
* Field used for either SUPDT when RC4 is used
@@ -214,7 +216,7 @@ struct iproc_ctx_s {
/*
* Buffer to hold SPU message header template. Template is created at
- * setkey time for ablkcipher requests, since most of the fields in the
+ * setkey time for skcipher requests, since most of the fields in the
* header are known at that time. At request time, just fill in a few
* missing pieces related to length of data in the request and IVs, etc.
*/
@@ -256,7 +258,7 @@ struct iproc_reqctx_s {
/* total todo, rx'd, and sent for this request */
unsigned int total_todo;
- unsigned int total_received; /* only valid for ablkcipher */
+ unsigned int total_received; /* only valid for skcipher */
unsigned int total_sent;
/*
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 2add51024575..59abb5ecefa4 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -542,7 +542,7 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
- * subsequent ablkcipher requests for this context.
+ * subsequent skcipher requests for this context.
* @spu2_cipher_type: Cipher algorithm
* @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes
@@ -1107,13 +1107,13 @@ u32 spu2_create_request(u8 *spu_hdr,
}
/**
- * spu_cipher_req_init() - Build an ablkcipher SPU2 request message header,
+ * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request
*
* Called at setkey time to initialize a msg header that can be reused for all
- * subsequent ablkcipher requests. Construct the message starting at spu_hdr.
+ * subsequent skcipher requests. Construct the message starting at spu_hdr.
* Caller should allocate this buffer in DMA-able memory at least
* SPU_HEADER_ALLOC_LEN bytes long.
*
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 137ed3df0c74..87053e46c788 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -97,7 +97,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
Selecting this will offload crypto for users of the
@@ -110,7 +110,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
default y
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_DES
help
Selecting this will use CAAM Queue Interface (QI) for sending
@@ -158,7 +158,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
select CRYPTO_DEV_FSL_CAAM_COMMON
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_AEAD
select CRYPTO_HASH
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 83f96d4f86e0..6619c512ef1a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -252,9 +252,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
- int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ int mapped_src_nents, mapped_dst_nents;
unsigned int diff_size = 0;
int lzeros;
@@ -285,13 +285,27 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
- if (!diff_size && src_nents == 1)
+ mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ goto src_fail;
+ }
+
+ if (!diff_size && mapped_src_nents == 1)
sec4_sg_len = 0; /* no need for an input hw s/g table */
else
- sec4_sg_len = src_nents + !!diff_size;
+ sec4_sg_len = mapped_src_nents + !!diff_size;
sec4_sg_index = sec4_sg_len;
- if (dst_nents > 1)
- sec4_sg_len += pad_sg_nents(dst_nents);
+
+ if (mapped_dst_nents > 1)
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
else
sec4_sg_len = pad_sg_nents(sec4_sg_len);
@@ -301,19 +315,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc)
- return ERR_PTR(-ENOMEM);
-
- sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
- if (unlikely(!sgc)) {
- dev_err(dev, "unable to map source\n");
- goto src_fail;
- }
-
- sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
- if (unlikely(!sgc)) {
- dev_err(dev, "unable to map destination\n");
goto dst_fail;
- }
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
if (diff_size)
@@ -324,7 +326,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
edesc->sec4_sg + !!diff_size, 0);
- if (dst_nents > 1)
+ if (mapped_dst_nents > 1)
sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
@@ -335,6 +337,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (!sec4_sg_bytes)
return edesc;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
+
edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
@@ -351,11 +356,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
return edesc;
sec4_sg_fail:
- dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ kfree(edesc);
dst_fail:
- dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
src_fail:
- kfree(edesc);
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
return ERR_PTR(-ENOMEM);
}
@@ -383,15 +388,15 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
return -ENOMEM;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -428,17 +433,18 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
return -ENOMEM;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
+
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -493,17 +499,17 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_tmp1;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -582,17 +588,17 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_tmp1;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 2c488c9a3812..c68fb4c03ee6 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -112,8 +112,10 @@ struct caam_rsa_req_ctx {
/**
* rsa_edesc - s/w-extended rsa descriptor
- * @src_nents : number of segments in input scatterlist
- * @dst_nents : number of segments in output scatterlist
+ * @src_nents : number of segments in input s/w scatterlist
+ * @dst_nents : number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes : length of h/w link table
* @sec4_sg_dma : dma address of h/w link table
* @sec4_sg : pointer to h/w link table
@@ -123,6 +125,8 @@ struct caam_rsa_req_ctx {
struct rsa_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index db22777d59b4..d7c3c3805693 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -176,6 +176,73 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
}
/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+ u32 *desc, status;
+ int sh_idx, ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deinitialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret ||
+ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+static void devm_deinstantiate_rng(void *data)
+{
+ struct device *ctrldev = data;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+}
+
+/*
* instantiate_rng - builds and executes a descriptor on DECO0,
* which initializes the RNG block.
* @ctrldev - pointer to device
@@ -247,99 +314,13 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
kfree(desc);
- return ret;
-}
-
-/*
- * deinstantiate_rng - builds and executes a descriptor on DECO0,
- * which deinitializes the RNG block.
- * @ctrldev - pointer to device
- * @state_handle_mask - bitmask containing the instantiation status
- * for the RNG4 state handles which exist in
- * the RNG4 block: 1 if it's been instantiated
- *
- * Return: - 0 if no error occurred
- * - -ENOMEM if there isn't enough memory to allocate the descriptor
- * - -ENODEV if DECO0 couldn't be acquired
- * - -EAGAIN if an error occurred when executing the descriptor
- */
-static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
-{
- u32 *desc, status;
- int sh_idx, ret = 0;
-
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
- /*
- * If the corresponding bit is set, then it means the state
- * handle was initialized by us, and thus it needs to be
- * deinitialized as well
- */
- if ((1 << sh_idx) & state_handle_mask) {
- /*
- * Create the descriptor for deinstantating this state
- * handle
- */
- build_deinstantiation_desc(desc, sh_idx);
-
- /* Try to run it through DECO0 */
- ret = run_descriptor_deco0(ctrldev, desc, &status);
-
- if (ret ||
- (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
- dev_err(ctrldev,
- "Failed to deinstantiate RNG4 SH%d\n",
- sh_idx);
- break;
- }
- dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
- }
- }
-
- kfree(desc);
+ if (!ret)
+ ret = devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng,
+ ctrldev);
return ret;
}
-static int caam_remove(struct platform_device *pdev)
-{
- struct device *ctrldev;
- struct caam_drv_private *ctrlpriv;
- struct caam_ctrl __iomem *ctrl;
-
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
-
- /* Remove platform devices under the crypto node */
- of_platform_depopulate(ctrldev);
-
-#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qi_init)
- caam_qi_shutdown(ctrldev);
-#endif
-
- /*
- * De-initialize RNG state handles initialized by this driver.
- * In case of SoCs with Management Complex, RNG is managed by MC f/w.
- */
- if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
- deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
-
- /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
-
- /* Unmap controller region */
- iounmap(ctrl);
-
- return 0;
-}
-
/*
* kick_trng - sets the various parameters for enabling the initialization
* of the RNG4 block in CAAM
@@ -568,6 +549,13 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
}
+#ifdef CONFIG_DEBUG_FS
+static void caam_remove_debugfs(void *root)
+{
+ debugfs_remove_recursive(root);
+}
+#endif
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -580,6 +568,7 @@ static int caam_probe(struct platform_device *pdev)
struct caam_drv_private *ctrlpriv;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
+ struct dentry *dfs_root;
#endif
u32 scfgr, comp_params;
u8 rng_vid;
@@ -611,10 +600,11 @@ static int caam_probe(struct platform_device *pdev)
/* Get configuration properties from device tree */
/* First, get register page */
- ctrl = of_iomap(nprop, 0);
- if (!ctrl) {
+ ctrl = devm_of_iomap(dev, nprop, 0, NULL);
+ ret = PTR_ERR_OR_ZERO(ctrl);
+ if (ret) {
dev_err(dev, "caam: of_iomap() failed\n");
- return -ENOMEM;
+ return ret;
}
caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
@@ -632,22 +622,18 @@ static int caam_probe(struct platform_device *pdev)
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
if (!ret) {
- ret = -EPROBE_DEFER;
- goto iounmap_ctrl;
+ return -EPROBE_DEFER;
} else if (ret < 0) {
dev_err(dev, "failing probe due to qman probe error\n");
- ret = -ENODEV;
- goto iounmap_ctrl;
+ return -ENODEV;
}
ret = qman_portals_probed();
if (!ret) {
- ret = -EPROBE_DEFER;
- goto iounmap_ctrl;
+ return -EPROBE_DEFER;
} else if (ret < 0) {
dev_err(dev, "failing probe due to qman portals probe error\n");
- ret = -ENODEV;
- goto iounmap_ctrl;
+ return -ENODEV;
}
}
#endif
@@ -722,7 +708,7 @@ static int caam_probe(struct platform_device *pdev)
ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
- goto iounmap_ctrl;
+ return ret;
}
ctrlpriv->era = caam_get_era(ctrl);
@@ -736,8 +722,12 @@ static int caam_probe(struct platform_device *pdev)
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+ dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ ret = devm_add_action_or_reset(dev, caam_remove_debugfs, dfs_root);
+ if (ret)
+ return ret;
+
+ ctrlpriv->ctl = debugfs_create_dir("ctl", dfs_root);
#endif
/* Check to see if (DPAA 1.x) QI present. If so, enable */
@@ -757,12 +747,6 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
- goto shutdown_qi;
- }
-
ring = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
@@ -779,8 +763,7 @@ static int caam_probe(struct platform_device *pdev)
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
- ret = -ENOMEM;
- goto caam_remove;
+ return -ENOMEM;
}
if (ctrlpriv->era < 10)
@@ -843,7 +826,7 @@ static int caam_probe(struct platform_device *pdev)
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
- goto caam_remove;
+ return ret;
}
/*
* Set handles init'ed by this module as the complement of the
@@ -916,19 +899,11 @@ static int caam_probe(struct platform_device *pdev)
debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
#endif
- return 0;
-caam_remove:
- caam_remove(pdev);
- return ret;
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ dev_err(dev, "JR platform devices creation error\n");
-shutdown_qi:
-#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qi_init)
- caam_qi_shutdown(dev);
-#endif
-iounmap_ctrl:
- iounmap(ctrl);
return ret;
}
@@ -938,7 +913,6 @@ static struct platform_driver caam_driver = {
.of_match_table = caam_match,
},
.probe = caam_probe,
- .remove = caam_remove,
};
module_platform_driver(caam_driver);
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 731b06becd9c..c7c10c90464b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -81,9 +81,6 @@ struct caam_drv_private {
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
-#ifdef CONFIG_CAAM_QI
- u8 qi_init; /* Nonzero if QI has been initialized */
-#endif
u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -102,7 +99,6 @@ struct caam_drv_private {
* variables at runtime.
*/
#ifdef CONFIG_DEBUG_FS
- struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
#endif
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 378f627e1d64..dacf2fa4aa8e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -500,9 +500,10 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-void caam_qi_shutdown(struct device *qidev)
+static void caam_qi_shutdown(void *data)
{
int i;
+ struct device *qidev = data;
struct caam_qi_priv *priv = &qipriv;
const cpumask_t *cpus = qman_affine_cpus();
@@ -761,7 +762,10 @@ int caam_qi_init(struct platform_device *caam_pdev)
&times_congested, &caam_fops_u64_ro);
#endif
- ctrlpriv->qi_init = 1;
+ err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
+ if (err)
+ return err;
+
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index db0549549e3b..848958951f68 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -147,7 +147,6 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 596ce28b957d..1ad66677d88e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -92,15 +92,15 @@ static inline void update_output_data(struct cpt_request_info *req_info,
}
}
-static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
+static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
u32 *argcnt)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct fc_context *fctx = &rctx->fctx;
u64 *offset_control = &rctx->control_word;
- u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+ u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct cpt_request_info *req_info = &rctx->cpt_req;
u64 *ctrl_flags = NULL;
@@ -115,7 +115,7 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
else
req_info->req.opcode.s.minor = 3;
- req_info->req.param1 = req->nbytes; /* Encryption Data length */
+ req_info->req.param1 = req->cryptlen; /* Encryption Data length */
req_info->req.param2 = 0; /*Auth data length */
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
@@ -147,32 +147,32 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
return 0;
}
-static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc,
+static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
create_ctx_hdr(req, enc, &argcnt);
- update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
- update_input_data(req_info, req->src, req->nbytes, &argcnt);
+ update_input_iv(req_info, req->iv, enc_iv_len, &argcnt);
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
req_info->incnt = argcnt;
return 0;
}
-static inline void store_cb_info(struct ablkcipher_request *req,
+static inline void store_cb_info(struct skcipher_request *req,
struct cpt_request_info *req_info)
{
req_info->callback = (void *)cvm_callback;
req_info->callback_arg = (void *)&req->base;
}
-static inline void create_output_list(struct ablkcipher_request *req,
+static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
@@ -184,16 +184,16 @@ static inline void create_output_list(struct ablkcipher_request *req,
* [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
*/
/* Reading IV information */
- update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
- update_output_data(req_info, req->dst, req->nbytes, &argcnt);
+ update_output_iv(req_info, req->iv, enc_iv_len, &argcnt);
+ update_output_data(req_info, req->dst, req->cryptlen, &argcnt);
req_info->outcnt = argcnt;
}
-static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
+static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
- u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
+ u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct fc_context *fctx = &rctx->fctx;
struct cpt_request_info *req_info = &rctx->cpt_req;
void *cdev = NULL;
@@ -217,20 +217,20 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
return -EINPROGRESS;
}
-static int cvm_encrypt(struct ablkcipher_request *req)
+static int cvm_encrypt(struct skcipher_request *req)
{
return cvm_enc_dec(req, true);
}
-static int cvm_decrypt(struct ablkcipher_request *req)
+static int cvm_decrypt(struct skcipher_request *req)
{
return cvm_enc_dec(req, false);
}
-static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
const u8 *key1 = key;
@@ -284,10 +284,10 @@ static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
return -EINVAL;
}
-static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen, u8 cipher_type)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->cipher_type = cipher_type;
@@ -295,183 +295,159 @@ static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_ablkcipher_set_flags(cipher,
+ crypto_skcipher_set_flags(cipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
-static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cbc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CBC);
}
-static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_ECB);
}
-static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CFB);
}
-static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
+ return verify_skcipher_des3_key(cipher, key) ?:
cvm_setkey(cipher, key, keylen, DES3_CBC);
}
-static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
+ return verify_skcipher_des3_key(cipher, key) ?:
cvm_setkey(cipher, key, keylen, DES3_ECB);
}
-static int cvm_enc_dec_init(struct crypto_tfm *tfm)
+static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct cvm_req_ctx));
+
return 0;
}
-static struct crypto_alg algs[] = { {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "xts(aes)",
- .cra_driver_name = "cavium-xts-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .setkey = cvm_xts_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+static struct skcipher_alg algs[] = { {
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cavium-xts-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = cvm_xts_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cavium-cbc-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cbc_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cavium-cbc-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_cbc_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "ecb(aes)",
- .cra_driver_name = "cavium-ecb-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_ecb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cavium-ecb-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_ecb_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cavium-cfb-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cfb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cavium-cfb-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_cfb_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_des3_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cavium-cbc-des3_ede",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = cvm_cbc_des3_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cavium-cbc-des3_ede",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = cvm_cbc_des3_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_des3_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "cavium-ecb-des3_ede",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = cvm_ecb_des3_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cavium-ecb-des3_ede",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = cvm_ecb_des3_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
} };
static inline int cav_register_algs(void)
{
int err = 0;
- err = crypto_register_algs(algs, ARRAY_SIZE(algs));
+ err = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
if (err)
return err;
@@ -480,7 +456,7 @@ static inline int cav_register_algs(void)
static inline void cav_unregister_algs(void)
{
- crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
int cvm_crypto_init(struct cpt_vf *cptvf)
diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig
index 7b1e751bb9cd..7dc008332a81 100644
--- a/drivers/crypto/cavium/nitrox/Kconfig
+++ b/drivers/crypto/cavium/nitrox/Kconfig
@@ -4,7 +4,7 @@
#
config CRYPTO_DEV_NITROX
tristate
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_LIB_DES
select FW_LOADER
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index e4841eb2a09f..6f80cc3b5c84 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -74,6 +74,25 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
return 0;
}
+static int nitrox_aes_gcm_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return nitrox_aead_setauthsize(aead, authsize);
+}
+
static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
struct scatterlist *src, char *iv, int ivsize,
int buflen)
@@ -186,6 +205,14 @@ static void nitrox_aead_callback(void *arg, int err)
areq->base.complete(&areq->base, err);
}
+static inline bool nitrox_aes_gcm_assoclen_supported(unsigned int assoclen)
+{
+ if (assoclen <= 512)
+ return true;
+
+ return false;
+}
+
static int nitrox_aes_gcm_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
@@ -195,6 +222,9 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
struct flexi_crypto_context *fctx = nctx->u.fctx;
int ret;
+ if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen))
+ return -EINVAL;
+
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
rctx->cryptlen = areq->cryptlen;
@@ -226,6 +256,9 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
struct flexi_crypto_context *fctx = nctx->u.fctx;
int ret;
+ if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen))
+ return -EINVAL;
+
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
rctx->cryptlen = areq->cryptlen - aead->authsize;
@@ -492,13 +525,13 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_driver_name = "n5_aes_gcm",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.setkey = nitrox_aes_gcm_setkey,
- .setauthsize = nitrox_aead_setauthsize,
+ .setauthsize = nitrox_aes_gcm_setauthsize,
.encrypt = nitrox_aes_gcm_enc,
.decrypt = nitrox_aes_gcm_dec,
.init = nitrox_aes_gcm_init,
@@ -511,7 +544,7 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_driver_name = "n5_rfc4106",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 2217a2736c8e..c2d0c23fb81b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -109,6 +109,13 @@ struct nitrox_q_vector {
};
};
+enum mcode_type {
+ MCODE_TYPE_INVALID,
+ MCODE_TYPE_AE,
+ MCODE_TYPE_SE_SSL,
+ MCODE_TYPE_SE_IPSEC,
+};
+
/**
* mbox_msg - Mailbox message data
* @type: message type
@@ -128,6 +135,14 @@ union mbox_msg {
u64 chipid: 8;
u64 vfid: 8;
} id;
+ struct {
+ u64 type: 2;
+ u64 opcode: 6;
+ u64 count: 4;
+ u64 info: 40;
+ u64 next_se_grp: 3;
+ u64 next_ae_grp: 3;
+ } mcode_info;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index bc924980e10c..c4632d84c9a1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -103,8 +103,7 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
offset = UCD_UCODE_LOAD_BLOCK_NUM;
nitrox_write_csr(ndev, offset, block_num);
- code_size = ucode_size;
- code_size = roundup(code_size, 8);
+ code_size = roundup(ucode_size, 16);
while (code_size) {
data = ucode_data[i];
/* write 8 bytes at a time */
@@ -220,11 +219,11 @@ static int nitrox_load_fw(struct nitrox_device *ndev)
/* write block number and firmware length
* bit:<2:0> block number
- * bit:3 is set SE uses 32KB microcode
- * bit:3 is clear SE uses 64KB microcode
+ * bit:3 is set AE uses 32KB microcode
+ * bit:3 is clear AE uses 64KB microcode
*/
core_2_eid_val.value = 0ULL;
- core_2_eid_val.ucode_blk = 0;
+ core_2_eid_val.ucode_blk = 2;
if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
core_2_eid_val.ucode_len = 1;
else
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index 02ee95064841..b51b0449b478 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -25,6 +25,7 @@ enum mbx_msg_opcode {
MSG_OP_VF_UP,
MSG_OP_VF_DOWN,
MSG_OP_CHIPID_VFID,
+ MSG_OP_MCODE_INFO = 11,
};
struct pf2vf_work {
@@ -73,6 +74,13 @@ static void pf2vf_send_response(struct nitrox_device *ndev,
vfdev->nr_queues = 0;
atomic_set(&vfdev->state, __NDEV_NOT_READY);
break;
+ case MSG_OP_MCODE_INFO:
+ msg.data = 0;
+ msg.mcode_info.count = 2;
+ msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
+ msg.mcode_info.next_se_grp = 1;
+ msg.mcode_info.next_ae_grp = 1;
+ break;
default:
msg.type = MBX_MSG_TYPE_NOP;
break;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index f69ba02c4d25..12282c1b14f5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -10,6 +10,8 @@
#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define PRIO 4001
+typedef void (*sereq_completion_t)(void *req, int err);
+
/**
* struct gphdr - General purpose Header
* @param0: first parameter.
@@ -203,12 +205,14 @@ struct nitrox_crypto_ctx {
struct flexi_crypto_context *fctx;
} u;
struct crypto_ctx_hdr *chdr;
+ sereq_completion_t callback;
};
struct nitrox_kcrypt_request {
struct se_crypto_request creq;
u8 *src;
u8 *dst;
+ u8 *iv_out;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 3cdce1f0f257..97af4d50d003 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -6,6 +6,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/xts.h>
@@ -47,6 +48,63 @@ static enum flexi_cipher flexi_cipher_type(const char *name)
return cipher->value;
}
+static void free_src_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->src);
+}
+
+static void free_dst_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->dst);
+}
+
+static void nitrox_skcipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+
+ free_src_sglist(skreq);
+ free_dst_sglist(skreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ skcipher_request_complete(skreq, err);
+}
+
+static void nitrox_cbc_cipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ unsigned int start = skreq->cryptlen - ivsize;
+
+ if (err) {
+ nitrox_skcipher_callback(arg, err);
+ return;
+ }
+
+ if (nkreq->creq.ctrl.s.arg == ENCRYPT) {
+ scatterwalk_map_and_copy(skreq->iv, skreq->dst, start, ivsize,
+ 0);
+ } else {
+ if (skreq->src != skreq->dst) {
+ scatterwalk_map_and_copy(skreq->iv, skreq->src, start,
+ ivsize, 0);
+ } else {
+ memcpy(skreq->iv, nkreq->iv_out, ivsize);
+ kfree(nkreq->iv_out);
+ }
+ }
+
+ nitrox_skcipher_callback(arg, err);
+}
+
static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
@@ -63,6 +121,8 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
nitrox_put_device(nctx->ndev);
return -ENOMEM;
}
+
+ nctx->callback = nitrox_skcipher_callback;
nctx->chdr = chdr;
nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
sizeof(struct ctx_hdr));
@@ -71,6 +131,19 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
return 0;
}
+static int nitrox_cbc_init(struct crypto_skcipher *tfm)
+{
+ int err;
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+
+ err = nitrox_skcipher_init(tfm);
+ if (err)
+ return err;
+
+ nctx->callback = nitrox_cbc_cipher_callback;
+ return 0;
+}
+
static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
@@ -173,34 +246,6 @@ static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize)
return 0;
}
-static void free_src_sglist(struct skcipher_request *skreq)
-{
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
-
- kfree(nkreq->src);
-}
-
-static void free_dst_sglist(struct skcipher_request *skreq)
-{
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
-
- kfree(nkreq->dst);
-}
-
-static void nitrox_skcipher_callback(void *arg, int err)
-{
- struct skcipher_request *skreq = arg;
-
- free_src_sglist(skreq);
- free_dst_sglist(skreq);
- if (err) {
- pr_err_ratelimited("request failed status 0x%0x\n", err);
- err = -EINVAL;
- }
-
- skcipher_request_complete(skreq, err);
-}
-
static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
@@ -240,8 +285,28 @@ static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
}
/* send the crypto request */
- return nitrox_process_se_request(nctx->ndev, creq,
- nitrox_skcipher_callback, skreq);
+ return nitrox_process_se_request(nctx->ndev, creq, nctx->callback,
+ skreq);
+}
+
+static int nitrox_cbc_decrypt(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ gfp_t flags = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ unsigned int start = skreq->cryptlen - ivsize;
+
+ if (skreq->src != skreq->dst)
+ return nitrox_skcipher_crypt(skreq, false);
+
+ nkreq->iv_out = kmalloc(ivsize, flags);
+ if (!nkreq->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(nkreq->iv_out, skreq->src, start, ivsize, 0);
+ return nitrox_skcipher_crypt(skreq, false);
}
static int nitrox_aes_encrypt(struct skcipher_request *skreq)
@@ -340,8 +405,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_setkey,
.encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
+ .decrypt = nitrox_cbc_decrypt,
+ .init = nitrox_cbc_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
@@ -428,7 +493,6 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
@@ -455,8 +519,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = nitrox_3des_setkey,
.encrypt = nitrox_3des_encrypt,
- .decrypt = nitrox_3des_decrypt,
- .init = nitrox_skcipher_init,
+ .decrypt = nitrox_cbc_decrypt,
+ .init = nitrox_cbc_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 8fec733f567f..e0a8bd15aa74 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -27,7 +27,7 @@ config CRYPTO_DEV_CCP_CRYPTO
depends on CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_SP_CCP
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_RSA
select CRYPTO_LIB_AES
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 94c1ad7eeddf..ff50ee80d223 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -172,14 +172,12 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ccp_ctx),
.cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
.cra_exit = ccp_aes_gcm_cra_exit,
.cra_module = THIS_MODULE,
},
@@ -229,11 +227,10 @@ static int ccp_register_aes_aead(struct list_head *head,
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
alg->base.cra_blocksize = def->blocksize;
- alg->base.cra_ablkcipher.ivsize = def->ivsize;
ret = crypto_register_aead(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ pr_err("%s aead algorithm registration error (%d)\n",
alg->base.cra_name, ret);
kfree(ccp_aead);
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 8e4a531f4f70..04b2517df955 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -24,7 +24,7 @@ struct ccp_aes_xts_def {
const char *drv_name;
};
-static struct ccp_aes_xts_def aes_xts_algs[] = {
+static const struct ccp_aes_xts_def aes_xts_algs[] = {
{
.name = "xts(aes)",
.drv_name = "xts-aes-ccp",
@@ -61,26 +61,25 @@ static struct ccp_unit_size_map xts_unit_sizes[] = {
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
- memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
-static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
- struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ccpversion = ccp_version();
int ret;
- ret = xts_check_key(xfm, key, key_len);
+ ret = xts_verify_key(tfm, key, key_len);
if (ret)
return ret;
@@ -102,11 +101,12 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
-static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+static int ccp_aes_xts_crypt(struct skcipher_request *req,
unsigned int encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
unsigned int ccpversion = ccp_version();
unsigned int fallback = 0;
unsigned int unit;
@@ -116,7 +116,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!ctx->u.aes.key_len)
return -EINVAL;
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
/* Check conditions under which the CCP can fulfill a request. The
@@ -127,7 +127,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
*/
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
- if (req->nbytes == xts_unit_sizes[unit].size) {
+ if (req->cryptlen == xts_unit_sizes[unit].size) {
unit_size = unit;
break;
}
@@ -155,14 +155,14 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return ret;
}
- memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
@@ -177,7 +177,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.u.xts.iv = &rctx->iv_sg;
rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
rctx->cmd.u.xts.src = req->src;
- rctx->cmd.u.xts.src_len = req->nbytes;
+ rctx->cmd.u.xts.src_len = req->cryptlen;
rctx->cmd.u.xts.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -185,19 +185,19 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
return ret;
}
-static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_encrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 1);
}
-static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_decrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 0);
}
-static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
@@ -212,14 +212,14 @@ static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
+static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
@@ -227,8 +227,8 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -239,30 +239,30 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
alg = &ccp_alg->alg;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_blocksize = AES_BLOCK_SIZE;
- alg->cra_ctxsize = sizeof(struct ccp_ctx);
- alg->cra_priority = CCP_CRA_PRIORITY;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
- alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
- alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
- alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
- alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
- alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
- alg->cra_init = ccp_aes_xts_cra_init;
- alg->cra_exit = ccp_aes_xts_cra_exit;
- alg->cra_module = THIS_MODULE;
-
- ret = crypto_register_alg(alg);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->base.cra_blocksize = AES_BLOCK_SIZE;
+ alg->base.cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->base.cra_priority = CCP_CRA_PRIORITY;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->setkey = ccp_aes_xts_setkey;
+ alg->encrypt = ccp_aes_xts_encrypt;
+ alg->decrypt = ccp_aes_xts_decrypt;
+ alg->min_keysize = AES_MIN_KEY_SIZE * 2;
+ alg->max_keysize = AES_MAX_KEY_SIZE * 2;
+ alg->ivsize = AES_BLOCK_SIZE;
+ alg->init = ccp_aes_xts_init_tfm;
+ alg->exit = ccp_aes_xts_exit_tfm;
+
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 58c6dddfc5e1..33328a153225 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -21,25 +21,24 @@
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
- memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
-static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
- struct ccp_crypto_ablkcipher_alg *alg =
- ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
@@ -52,7 +51,7 @@ static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
@@ -64,10 +63,11 @@ static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
@@ -77,14 +77,14 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
(ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
- (req->nbytes & (AES_BLOCK_SIZE - 1)))
+ (req->cryptlen & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
- memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = AES_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
@@ -102,7 +102,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
rctx->cmd.u.aes.iv = iv_sg;
rctx->cmd.u.aes.iv_len = iv_len;
rctx->cmd.u.aes.src = req->src;
- rctx->cmd.u.aes.src_len = req->nbytes;
+ rctx->cmd.u.aes.src_len = req->cryptlen;
rctx->cmd.u.aes.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -110,48 +110,44 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
return ret;
}
-static int ccp_aes_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_encrypt(struct skcipher_request *req)
{
return ccp_aes_crypt(req, true);
}
-static int ccp_aes_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_decrypt(struct skcipher_request *req)
{
return ccp_aes_crypt(req, false);
}
-static int ccp_aes_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_aes_complete;
ctx->u.aes.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
/* Restore the original pointer */
- req->info = rctx->rfc3686_info;
+ req->iv = rctx->rfc3686_info;
return ccp_aes_complete(async_req, ret);
}
-static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -162,10 +158,11 @@ static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ccp_aes_setkey(tfm, key, key_len);
}
-static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
u8 *iv;
/* Initialize the CTR block */
@@ -173,84 +170,72 @@ static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
iv += CTR_RFC3686_NONCE_SIZE;
- memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE);
iv += CTR_RFC3686_IV_SIZE;
*(__be32 *)iv = cpu_to_be32(1);
/* Point to the new IV */
- rctx->rfc3686_info = req->info;
- req->info = rctx->rfc3686_iv;
+ rctx->rfc3686_info = req->iv;
+ req->iv = rctx->rfc3686_iv;
return ccp_aes_crypt(req, encrypt);
}
-static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, true);
}
-static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, false);
}
-static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_aes_rfc3686_complete;
ctx->u.aes.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct crypto_alg ccp_aes_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_aes_cra_init,
- .cra_exit = ccp_aes_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_aes_setkey,
- .encrypt = ccp_aes_encrypt,
- .decrypt = ccp_aes_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
+static const struct skcipher_alg ccp_aes_defaults = {
+ .setkey = ccp_aes_setkey,
+ .encrypt = ccp_aes_encrypt,
+ .decrypt = ccp_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .init = ccp_aes_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
-static struct crypto_alg ccp_aes_rfc3686_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_aes_rfc3686_cra_init,
- .cra_exit = ccp_aes_rfc3686_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_aes_rfc3686_setkey,
- .encrypt = ccp_aes_rfc3686_encrypt,
- .decrypt = ccp_aes_rfc3686_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- },
+static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
+ .setkey = ccp_aes_rfc3686_setkey,
+ .encrypt = ccp_aes_rfc3686_encrypt,
+ .decrypt = ccp_aes_rfc3686_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .init = ccp_aes_rfc3686_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
struct ccp_aes_def {
@@ -260,7 +245,7 @@ struct ccp_aes_def {
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
- struct crypto_alg *alg_defaults;
+ const struct skcipher_alg *alg_defaults;
};
static struct ccp_aes_def aes_algs[] = {
@@ -323,8 +308,8 @@ static struct ccp_aes_def aes_algs[] = {
static int ccp_register_aes_alg(struct list_head *head,
const struct ccp_aes_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -338,16 +323,16 @@ static int ccp_register_aes_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index d2c49b2f0323..9c129defdb50 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -20,28 +20,27 @@
static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
- memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, DES3_EDE_BLOCK_SIZE);
return 0;
}
-static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
- struct ccp_crypto_ablkcipher_alg *alg =
- ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des3_key(tfm, key);
+ err = verify_skcipher_des3_key(tfm, key);
if (err)
return err;
@@ -58,10 +57,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
@@ -71,14 +71,14 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
(ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
- (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
+ (req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
- memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, DES3_EDE_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = DES3_EDE_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
@@ -97,7 +97,7 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
rctx->cmd.u.des3.iv = iv_sg;
rctx->cmd.u.des3.iv_len = iv_len;
rctx->cmd.u.des3.src = req->src;
- rctx->cmd.u.des3.src_len = req->nbytes;
+ rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -105,51 +105,43 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
return ret;
}
-static int ccp_des3_encrypt(struct ablkcipher_request *req)
+static int ccp_des3_encrypt(struct skcipher_request *req)
{
return ccp_des3_crypt(req, true);
}
-static int ccp_des3_decrypt(struct ablkcipher_request *req)
+static int ccp_des3_decrypt(struct skcipher_request *req)
{
return ccp_des3_crypt(req, false);
}
-static int ccp_des3_cra_init(struct crypto_tfm *tfm)
+static int ccp_des3_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_des3_complete;
ctx->u.des3.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_des3_req_ctx));
return 0;
}
-static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct crypto_alg ccp_des3_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_des3_cra_init,
- .cra_exit = ccp_des3_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_des3_setkey,
- .encrypt = ccp_des3_encrypt,
- .decrypt = ccp_des3_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
+static const struct skcipher_alg ccp_des3_defaults = {
+ .setkey = ccp_des3_setkey,
+ .encrypt = ccp_des3_encrypt,
+ .decrypt = ccp_des3_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .init = ccp_des3_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
struct ccp_des3_def {
@@ -159,10 +151,10 @@ struct ccp_des3_def {
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
- struct crypto_alg *alg_defaults;
+ const struct skcipher_alg *alg_defaults;
};
-static struct ccp_des3_def des3_algs[] = {
+static const struct ccp_des3_def des3_algs[] = {
{
.mode = CCP_DES3_MODE_ECB,
.version = CCP_VERSION(5, 0),
@@ -186,8 +178,8 @@ static struct ccp_des3_def des3_algs[] = {
static int ccp_register_des3_alg(struct list_head *head,
const struct ccp_des3_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -201,16 +193,16 @@ static int ccp_register_des3_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 8ee4cb45a3f3..88275b4867ea 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
-static LIST_HEAD(cipher_algs);
+static LIST_HEAD(skcipher_algs);
static LIST_HEAD(aead_algs);
static LIST_HEAD(akcipher_algs);
@@ -330,7 +330,7 @@ static int ccp_register_algs(void)
int ret;
if (!aes_disable) {
- ret = ccp_register_aes_algs(&cipher_algs);
+ ret = ccp_register_aes_algs(&skcipher_algs);
if (ret)
return ret;
@@ -338,7 +338,7 @@ static int ccp_register_algs(void)
if (ret)
return ret;
- ret = ccp_register_aes_xts_algs(&cipher_algs);
+ ret = ccp_register_aes_xts_algs(&skcipher_algs);
if (ret)
return ret;
@@ -348,7 +348,7 @@ static int ccp_register_algs(void)
}
if (!des3_disable) {
- ret = ccp_register_des3_algs(&cipher_algs);
+ ret = ccp_register_des3_algs(&skcipher_algs);
if (ret)
return ret;
}
@@ -371,7 +371,7 @@ static int ccp_register_algs(void)
static void ccp_unregister_algs(void)
{
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
- struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+ struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
@@ -381,8 +381,8 @@ static void ccp_unregister_algs(void)
kfree(ahash_alg);
}
- list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
- crypto_unregister_alg(&ablk_alg->alg);
+ list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&ablk_alg->alg);
list_del(&ablk_alg->entry);
kfree(ablk_alg);
}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 9015b5da6ba3..90a009e6b5c1 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -21,6 +21,7 @@
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/akcipher.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/rsa.h>
/* We want the module name in front of our messages */
@@ -31,12 +32,12 @@
#define CCP_CRA_PRIORITY 300
-struct ccp_crypto_ablkcipher_alg {
+struct ccp_crypto_skcipher_alg {
struct list_head entry;
u32 mode;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
};
struct ccp_crypto_aead {
@@ -66,12 +67,12 @@ struct ccp_crypto_akcipher_alg {
struct akcipher_alg alg;
};
-static inline struct ccp_crypto_ablkcipher_alg *
- ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
+static inline struct ccp_crypto_skcipher_alg *
+ ccp_crypto_skcipher_alg(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
+ return container_of(alg, struct ccp_crypto_skcipher_alg, alg);
}
static inline struct ccp_crypto_ahash_alg *
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 57eb53b8ac21..82ac4c14c04c 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -789,6 +789,18 @@ static int ccp5_init(struct ccp_device *ccp)
/* Find available queues */
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ /*
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and fail
+ * the initialization (but not the load, as the PSP could get
+ * properly initialized).
+ */
+ if (qmr == 0xffffffff) {
+ dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n");
+ return 1;
+ }
+
for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
if (!(qmr & (1 << i)))
continue;
@@ -854,7 +866,7 @@ static int ccp5_init(struct ccp_device *ccp)
if (ccp->cmd_q_count == 0) {
dev_notice(dev, "no command queues available\n");
- ret = -EIO;
+ ret = 1;
goto e_pool;
}
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 73acf0fdb793..19ac509ed76e 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -641,18 +641,27 @@ int ccp_dev_init(struct sp_device *sp)
ccp->vdata->setup(ccp);
ret = ccp->vdata->perform->init(ccp);
- if (ret)
+ if (ret) {
+ /* A positive number means that the device cannot be initialized,
+ * but no additional message is required.
+ */
+ if (ret > 0)
+ goto e_quiet;
+
+ /* An unexpected problem occurred, and should be reported in the log */
goto e_err;
+ }
dev_notice(dev, "ccp enabled\n");
return 0;
e_err:
- sp->ccp_data = NULL;
-
dev_notice(dev, "ccp initialization failed\n");
+e_quiet:
+ sp->ccp_data = NULL;
+
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index a54f9367a580..0770a83bf1a5 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -342,6 +342,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit;
desc->ccp = chan->ccp;
+ INIT_LIST_HEAD(&desc->entry);
INIT_LIST_HEAD(&desc->pending);
INIT_LIST_HEAD(&desc->active);
desc->status = DMA_IN_PROGRESS;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c8da8eb160da..422193690fd4 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1777,8 +1777,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
LSB_ITEM_SIZE);
break;
default:
+ kfree(hmac_buf);
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
memset(&hmac_cmd, 0, sizeof(hmac_cmd));
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 6b17d179ef8a..7ca2d3408e7a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -21,6 +21,8 @@
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <asm/smp.h>
+
#include "sp-dev.h"
#include "psp-dev.h"
@@ -235,6 +237,13 @@ static int __sev_platform_init_locked(int *error)
return rc;
psp->sev_state = SEV_STATE_INIT;
+
+ /* Prepare for first SEV guest launch after INIT */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
dev_dbg(psp->dev, "SEV firmware initialized\n");
return rc;
@@ -294,6 +303,9 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
{
int state, rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
/*
* The SEV spec requires that FACTORY_RESET must be issued in
* UNINIT state. Before we go further lets check if any guest is
@@ -338,6 +350,9 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
{
int rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (psp_master->sev_state == SEV_STATE_UNINIT) {
rc = __sev_platform_init_locked(&argp->error);
if (rc)
@@ -354,6 +369,9 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
void *blob = NULL;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -540,6 +558,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
void *pek_blob, *oca_blob;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -695,6 +716,16 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
struct sev_data_pdh_cert_export *data;
int ret;
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (psp_master->sev_state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -741,13 +772,6 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
data->cert_chain_len = input.cert_chain_len;
cmd:
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_cert;
- }
-
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -929,8 +953,22 @@ static int sev_misc_init(struct psp_device *psp)
static int psp_check_sev_support(struct psp_device *psp)
{
- /* Check if device supports SEV feature */
- if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
+
+ /*
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
+ */
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
+ return -ENODEV;
+ }
+
+ if (!(val & 1)) {
+ /* Device does not support the SEV feature */
dev_dbg(psp->dev, "psp does not support SEV\n");
return -ENODEV;
}
@@ -1064,6 +1102,18 @@ void psp_pci_init(void)
/* Initialize the platform */
rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sp->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
return;
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 82a084f02990..dd516b35ba86 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -23,6 +23,7 @@
#include <linux/dmaengine.h>
#include <linux/psp-sev.h>
#include <linux/miscdevice.h>
+#include <linux/capability.h>
#include "sp-dev.h"
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index d3e8faa03f15..64d318dc0d47 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -293,7 +293,8 @@ static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
return 4;
}
-static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
+static unsigned int hmac_setkey(struct cc_hw_desc *desc,
+ struct cc_aead_ctx *ctx)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 254b48797799..3112b58d0bb1 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -16,7 +16,7 @@
#include "cc_cipher.h"
#include "cc_request_mgr.h"
-#define MAX_ABLKCIPHER_SEQ_LEN 6
+#define MAX_SKCIPHER_SEQ_LEN 6
#define template_skcipher template_u.skcipher
@@ -822,7 +822,7 @@ static int cc_cipher_process(struct skcipher_request *req,
void *iv = req->iv;
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+ struct cc_hw_desc desc[MAX_SKCIPHER_SEQ_LEN];
struct cc_crypto_req cc_req = {};
int rc;
unsigned int seq_len = 0;
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 250150560e68..91e424378217 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -35,7 +35,7 @@ config CHELSIO_IPSEC_INLINE
config CRYPTO_DEV_CHELSIO_TLS
tristate "Chelsio Crypto Inline TLS Driver"
depends on CHELSIO_T4
- depends on TLS
+ depends on TLS_TOE
select CRYPTO_DEV_CHELSIO
---help---
Support Chelsio Inline TLS with Chelsio crypto accelerator.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 38ee38b37ae6..1b4a5664e604 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -93,7 +93,7 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err);
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
@@ -568,11 +568,11 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
+static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.crypto);
+ container_of(alg, struct chcr_alg_template, alg.skcipher);
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
@@ -757,14 +757,14 @@ static inline void create_wreq(struct chcr_context *ctx,
*/
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
- struct chcr_blkcipher_req_ctx *reqctx =
- ablkcipher_request_ctx(wrparam->req);
+ struct chcr_skcipher_req_ctx *reqctx =
+ skcipher_request_ctx(wrparam->req);
unsigned int temp = 0, transhdr_len, dst_size;
int error;
int nents;
@@ -807,9 +807,9 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ (!(get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ (!(get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
@@ -843,7 +843,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
if (reqctx->op && (ablkctx->ciph_mode ==
CHCR_SCMD_CIPHER_MODE_AES_CBC))
sg_pcopy_to_buffer(wrparam->req->src,
- sg_nents(wrparam->req->src), wrparam->req->info, 16,
+ sg_nents(wrparam->req->src), wrparam->req->iv, 16,
reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
return skb;
@@ -866,11 +866,11 @@ static inline int chcr_keyctx_ck_size(unsigned int keylen)
return ck_size;
}
-static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
@@ -886,7 +886,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
return err;
}
-static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -912,13 +912,13 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
}
-static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -943,13 +943,13 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
}
-static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -981,7 +981,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1017,12 +1017,12 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
u32 isfinal)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_aes_ctx aes;
int ret, i;
u8 *key;
@@ -1051,16 +1051,16 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
return 0;
}
-static int chcr_update_cipher_iv(struct ablkcipher_request *req,
+static int chcr_update_cipher_iv(struct skcipher_request *req,
struct cpl_fw6_pld *fw6_pld, u8 *iv)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
+ ctr_add_iv(iv, req->iv, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
@@ -1071,7 +1071,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
/*Updated before sending last WR*/
- memcpy(iv, req->info, AES_BLOCK_SIZE);
+ memcpy(iv, req->iv, AES_BLOCK_SIZE);
else
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
}
@@ -1085,16 +1085,16 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
* for subsequent update requests
*/
-static int chcr_final_cipher_iv(struct ablkcipher_request *req,
+static int chcr_final_cipher_iv(struct skcipher_request *req,
struct cpl_fw6_pld *fw6_pld, u8 *iv)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
@@ -1108,25 +1108,25 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct cipher_wr_param wrparam;
struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
if (err)
goto unmap;
- if (req->nbytes == reqctx->processed) {
+ if (req->cryptlen == reqctx->processed) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
- err = chcr_final_cipher_iv(req, fw6_pld, req->info);
+ err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
goto complete;
}
@@ -1134,13 +1134,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
else
bytes = rounddown(bytes, 16);
} else {
/*CTR mode counter overfloa*/
- bytes = req->nbytes - reqctx->processed;
+ bytes = req->cryptlen - reqctx->processed;
}
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
if (err)
@@ -1153,13 +1153,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
req->base.flags,
req->src,
req->dst,
- req->nbytes,
- req->info,
+ req->cryptlen,
+ req->iv,
reqctx->op);
goto complete;
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
@@ -1185,33 +1185,33 @@ complete:
return err;
}
-static int process_cipher(struct ablkcipher_request *req,
+static int process_cipher(struct skcipher_request *req,
unsigned short qid,
struct sk_buff **skb,
unsigned short op_type)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct cipher_wr_param wrparam;
int bytes, err = -EINVAL;
reqctx->processed = 0;
- if (!req->info)
+ if (!req->iv)
goto error;
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes == 0) ||
- (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
+ (req->cryptlen == 0) ||
+ (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
- ablkctx->enckey_len, req->nbytes, ivsize);
+ ablkctx->enckey_len, req->cryptlen, ivsize);
goto error;
}
err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
if (err)
goto error;
- if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
AES_MIN_KEY_SIZE +
sizeof(struct cpl_rx_phys_dsgl) +
/*Min dsgl size*/
@@ -1219,14 +1219,14 @@ static int process_cipher(struct ablkcipher_request *req,
/* Can be sent as Imm*/
unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
- dnents = sg_nents_xlen(req->dst, req->nbytes,
+ dnents = sg_nents_xlen(req->dst, req->cryptlen,
CHCR_DST_SG_SIZE, 0);
phys_dsgl = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+ reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
SGE_MAX_WR_LEN;
- bytes = IV + req->nbytes;
+ bytes = IV + req->cryptlen;
} else {
reqctx->imm = 0;
@@ -1236,21 +1236,21 @@ static int process_cipher(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
else
bytes = rounddown(bytes, 16);
} else {
- bytes = req->nbytes;
+ bytes = req->cryptlen;
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR) {
- bytes = adjust_ctr_overflow(req->info, bytes);
+ bytes = adjust_ctr_overflow(req->iv, bytes);
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
@@ -1259,7 +1259,7 @@ static int process_cipher(struct ablkcipher_request *req,
} else {
- memcpy(reqctx->iv, req->info, IV);
+ memcpy(reqctx->iv, req->iv, IV);
}
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
@@ -1268,7 +1268,7 @@ static int process_cipher(struct ablkcipher_request *req,
req->base.flags,
req->src,
req->dst,
- req->nbytes,
+ req->cryptlen,
reqctx->iv,
op_type);
goto error;
@@ -1296,9 +1296,9 @@ error:
return err;
}
-static int chcr_aes_encrypt(struct ablkcipher_request *req)
+static int chcr_aes_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
@@ -1329,9 +1329,9 @@ error:
return err;
}
-static int chcr_aes_decrypt(struct ablkcipher_request *req)
+static int chcr_aes_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
@@ -1398,27 +1398,28 @@ out:
return err;
}
-static int chcr_cra_init(struct crypto_tfm *tfm)
+static int chcr_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+
+ return chcr_device_init(ctx);
}
-static int chcr_rfc3686_init(struct crypto_tfm *tfm)
+static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
@@ -1427,17 +1428,17 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+ return chcr_device_init(ctx);
}
-static void chcr_cra_exit(struct crypto_tfm *tfm)
+static void chcr_exit_tfm(struct crypto_skcipher *tfm)
{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
crypto_free_sync_skcipher(ablkctx->sw_cipher);
@@ -2056,8 +2057,8 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ chcr_handle_cipher_resp(skcipher_request_cast(req),
input, err);
break;
case CRYPTO_ALG_TYPE_AHASH:
@@ -2148,7 +2149,7 @@ out:
return err;
}
-static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int key_len)
{
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
@@ -2172,7 +2173,7 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -2576,12 +2577,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
-void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
u8 *buf = ulptx;
memcpy(buf, reqctx->iv, IV);
@@ -2599,13 +2600,13 @@ void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid)
{
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;
@@ -2680,7 +2681,7 @@ void chcr_hash_dma_unmap(struct device *dev,
}
int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
int error;
@@ -2709,7 +2710,7 @@ err:
}
void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
@@ -3712,82 +3713,76 @@ static int chcr_aead_decrypt(struct aead_request *req)
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_cbc_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_cbc_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
- }
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = NULL,
- .cra_u .ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_xts_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_xts_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_ctr_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_ctr_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ .type = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_rfc3686_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = chcr_aes_rfc3686_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_rfc3686_init,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = chcr_aes_rfc3686_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
/* SHA */
@@ -4254,10 +4249,10 @@ static int chcr_unregister_alg(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
if (driver_algs[i].is_registered)
- crypto_unregister_alg(
- &driver_algs[i].alg.crypto);
+ crypto_unregister_skcipher(
+ &driver_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AEAD:
if (driver_algs[i].is_registered)
@@ -4293,21 +4288,20 @@ static int chcr_register_alg(void)
if (driver_algs[i].is_registered)
continue;
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- driver_algs[i].alg.crypto.cra_priority =
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ driver_algs[i].alg.skcipher.base.cra_priority =
CHCR_CRA_PRIORITY;
- driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
- driver_algs[i].alg.crypto.cra_flags =
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+ driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
+ driver_algs[i].alg.skcipher.base.cra_flags =
+ CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK;
- driver_algs[i].alg.crypto.cra_ctxsize =
+ driver_algs[i].alg.skcipher.base.cra_ctxsize =
sizeof(struct chcr_context) +
sizeof(struct ablk_ctx);
- driver_algs[i].alg.crypto.cra_alignmask = 0;
- driver_algs[i].alg.crypto.cra_type =
- &crypto_ablkcipher_type;
- err = crypto_register_alg(&driver_algs[i].alg.crypto);
- name = driver_algs[i].alg.crypto.cra_driver_name;
+ driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
+
+ err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
+ name = driver_algs[i].alg.skcipher.base.cra_driver_name;
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index d1e6b51df0ce..f58c2b5c7fc5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -287,7 +287,7 @@ struct hash_wr_param {
};
struct cipher_wr_param {
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
char *iv;
int bytes;
unsigned short qid;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 993c97e70565..6db2df8c8a05 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -160,9 +160,9 @@ static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
return crypto_aead_ctx(tfm);
}
-static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+static inline struct chcr_context *c_ctx(struct crypto_skcipher *tfm)
{
- return crypto_ablkcipher_ctx(tfm);
+ return crypto_skcipher_ctx(tfm);
}
static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
@@ -285,7 +285,7 @@ struct chcr_ahash_req_ctx {
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
-struct chcr_blkcipher_req_ctx {
+struct chcr_skcipher_req_ctx {
struct sk_buff *skb;
struct scatterlist *dstsg;
unsigned int processed;
@@ -302,7 +302,7 @@ struct chcr_alg_template {
u32 type;
u32 is_registered;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -321,12 +321,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
unsigned short qid);
void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
-void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam);
-int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
-void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
-void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+int chcr_cipher_dma_map(struct device *dev, struct skcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct skcipher_request *req);
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 24355680f30a..9da0f93a330b 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -673,16 +673,16 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_state *x = xfrm_input_state(skb);
+ unsigned int last_desc, ndesc, flits = 0;
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
+ struct tx_sw_desc *sgl_sdesc;
int qidx, left, credits;
- unsigned int flits = 0, ndesc;
- struct adapter *adap;
+ bool immediate = false;
struct sge_eth_txq *q;
+ struct adapter *adap;
struct port_info *pi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
struct sec_path *sp;
- bool immediate = false;
if (!x->xso.offload_handle)
return NETDEV_TX_BUSY;
@@ -715,8 +715,14 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
if (!immediate &&
- unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
goto out_free;
}
@@ -742,17 +748,10 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
- int last_desc;
-
cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
- 0, addr);
+ 0, sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index 025c831d0899..d2bc655ab931 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -21,6 +21,7 @@
#include <crypto/internal/hash.h>
#include <linux/tls.h>
#include <net/tls.h>
+#include <net/tls_toe.h>
#include "t4fw_api.h"
#include "t4_msg.h"
@@ -118,7 +119,7 @@ struct tls_scmd {
};
struct chtls_dev {
- struct tls_device tlsdev;
+ struct tls_toe_device tlsdev;
struct list_head list;
struct cxgb4_lld_info *lldi;
struct pci_dev *pdev;
@@ -362,7 +363,7 @@ enum {
#define TCP_PAGE(sk) (sk->sk_frag.page)
#define TCP_OFF(sk) (sk->sk_frag.offset)
-static inline struct chtls_dev *to_chtls_dev(struct tls_device *tlsdev)
+static inline struct chtls_dev *to_chtls_dev(struct tls_toe_device *tlsdev)
{
return container_of(tlsdev, struct chtls_dev, tlsdev);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 98bc5a4cd5e7..5cf9b021220b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -97,7 +97,7 @@ static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
if (!skb)
return NULL;
- memcpy(__skb_put(skb, flowclen), flowc, flowclen);
+ __skb_put_data(skb, flowc, flowclen);
skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
return skb;
@@ -1437,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1470,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break;
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
@@ -1615,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
break;
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
@@ -1743,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1774,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
@@ -1841,8 +1841,7 @@ skip_copy:
tp->urg_data = 0;
if (avail + offset >= skb->len) {
- if (likely(skb))
- chtls_free_skb(sk, skb);
+ chtls_free_skb(sk, skb);
buffers_freed++;
if (copied >= target &&
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index e6df5b95ed47..18996935d8ba 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -124,7 +124,7 @@ static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
mutex_unlock(&notify_mutex);
}
-static int chtls_inline_feature(struct tls_device *dev)
+static int chtls_inline_feature(struct tls_toe_device *dev)
{
struct net_device *netdev;
struct chtls_dev *cdev;
@@ -140,7 +140,7 @@ static int chtls_inline_feature(struct tls_device *dev)
return 0;
}
-static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
+static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
@@ -149,7 +149,7 @@ static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
return 0;
}
-static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
+static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
@@ -161,7 +161,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
{
int i;
- tls_unregister_device(&cdev->tlsdev);
+ tls_toe_unregister_device(&cdev->tlsdev);
kvfree(cdev->kmap.addr);
idr_destroy(&cdev->hwtid_idr);
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
@@ -173,27 +173,27 @@ static void chtls_free_uld(struct chtls_dev *cdev)
static inline void chtls_dev_release(struct kref *kref)
{
+ struct tls_toe_device *dev;
struct chtls_dev *cdev;
- struct tls_device *dev;
- dev = container_of(kref, struct tls_device, kref);
+ dev = container_of(kref, struct tls_toe_device, kref);
cdev = to_chtls_dev(dev);
chtls_free_uld(cdev);
}
static void chtls_register_dev(struct chtls_dev *cdev)
{
- struct tls_device *tlsdev = &cdev->tlsdev;
+ struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
+ strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
- TLS_DEVICE_NAME_MAX);
+ TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
tlsdev->release = chtls_dev_release;
kref_init(&tlsdev->kref);
- tls_register_device(tlsdev);
+ tls_toe_register_device(tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index d81a1297cb9e..73a899e6f837 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -23,12 +24,12 @@ static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
static inline void
-_writefield(u32 offset, void *value)
+_writefield(u32 offset, const void *value)
{
int i;
for (i = 0; i < 4; i++)
- iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
+ iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
}
/* Read a 128 bit field (either a writable key or IV) */
@@ -42,12 +43,12 @@ _readfield(u32 offset, void *value)
}
static int
-do_crypt(void *src, void *dst, int len, u32 flags)
+do_crypt(const void *src, void *dst, u32 len, u32 flags)
{
u32 status;
u32 counter = AES_OP_TIMEOUT;
- iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
+ iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
iowrite32(len, _iobase + AES_LENA_REG);
@@ -64,16 +65,14 @@ do_crypt(void *src, void *dst, int len, u32 flags)
return counter ? 0 : 1;
}
-static unsigned int
-geode_aes_crypt(struct geode_aes_op *op)
+static void
+geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
+ void *dst, u32 len, u8 *iv, int mode, int dir)
{
u32 flags = 0;
unsigned long iflags;
int ret;
- if (op->len == 0)
- return 0;
-
/* If the source and destination is the same, then
* we need to turn on the coherent flags, otherwise
* we don't need to worry
@@ -81,32 +80,28 @@ geode_aes_crypt(struct geode_aes_op *op)
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
- if (op->dir == AES_DIR_ENCRYPT)
+ if (dir == AES_DIR_ENCRYPT)
flags |= AES_CTRL_ENCRYPT;
/* Start the critical section */
spin_lock_irqsave(&lock, iflags);
- if (op->mode == AES_MODE_CBC) {
+ if (mode == AES_MODE_CBC) {
flags |= AES_CTRL_CBC;
- _writefield(AES_WRITEIV0_REG, op->iv);
+ _writefield(AES_WRITEIV0_REG, iv);
}
- if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
- flags |= AES_CTRL_WRKEY;
- _writefield(AES_WRITEKEY0_REG, op->key);
- }
+ flags |= AES_CTRL_WRKEY;
+ _writefield(AES_WRITEKEY0_REG, tctx->key);
- ret = do_crypt(op->src, op->dst, op->len, flags);
+ ret = do_crypt(src, dst, len, flags);
BUG_ON(ret);
- if (op->mode == AES_MODE_CBC)
- _readfield(AES_WRITEIV0_REG, op->iv);
+ if (mode == AES_MODE_CBC)
+ _readfield(AES_WRITEIV0_REG, iv);
spin_unlock_irqrestore(&lock, iflags);
-
- return op->len;
}
/* CRYPTO-API Functions */
@@ -114,13 +109,13 @@ geode_aes_crypt(struct geode_aes_op *op)
static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- op->keylen = len;
+ tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
- memcpy(op->key, key, len);
+ memcpy(tctx->key, key, len);
return 0;
}
@@ -133,135 +128,93 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback
*/
- op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+ tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ tctx->fallback.cip->base.crt_flags |=
+ (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(op->fallback.cip, key, len);
+ ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
+ tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
}
return ret;
}
-static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
- unsigned int len)
+static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
unsigned int ret;
- op->keylen = len;
+ tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
- memcpy(op->key, key, len);
+ memcpy(tctx->key, key, len);
return 0;
}
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*
* The requested key size is not supported by HW, do a fallback
*/
- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
- return ret;
-}
-
-static int fallback_blk_dec(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-
- tfm = desc->tfm;
- desc->tfm = op->fallback.blk;
-
- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
-
- desc->tfm = tfm;
- return ret;
-}
-static int fallback_blk_enc(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-
- tfm = desc->tfm;
- desc->tfm = op->fallback.blk;
-
- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
-
- desc->tfm = tfm;
+ crypto_skcipher_clear_flags(tctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(tctx->fallback.skcipher,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(tctx->fallback.skcipher) &
+ CRYPTO_TFM_RES_MASK);
return ret;
}
static void
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
- crypto_cipher_encrypt_one(op->fallback.cip, out, in);
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
return;
}
- op->src = (void *) in;
- op->dst = (void *) out;
- op->mode = AES_MODE_ECB;
- op->flags = 0;
- op->len = AES_BLOCK_SIZE;
- op->dir = AES_DIR_ENCRYPT;
-
- geode_aes_crypt(op);
+ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
+ AES_MODE_ECB, AES_DIR_ENCRYPT);
}
static void
geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
- crypto_cipher_decrypt_one(op->fallback.cip, out, in);
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
return;
}
- op->src = (void *) in;
- op->dst = (void *) out;
- op->mode = AES_MODE_ECB;
- op->flags = 0;
- op->len = AES_BLOCK_SIZE;
- op->dir = AES_DIR_DECRYPT;
-
- geode_aes_crypt(op);
+ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
+ AES_MODE_ECB, AES_DIR_DECRYPT);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- op->fallback.cip = crypto_alloc_cipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ tctx->fallback.cip = crypto_alloc_cipher(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(op->fallback.cip)) {
+ if (IS_ERR(tctx->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(op->fallback.cip);
+ return PTR_ERR(tctx->fallback.cip);
}
return 0;
@@ -269,10 +222,9 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
static void fallback_exit_cip(struct crypto_tfm *tfm)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(op->fallback.cip);
- op->fallback.cip = NULL;
+ crypto_free_cipher(tctx->fallback.cip);
}
static struct crypto_alg geode_alg = {
@@ -285,7 +237,7 @@ static struct crypto_alg geode_alg = {
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
+ .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@@ -298,209 +250,126 @@ static struct crypto_alg geode_alg = {
}
};
-static int
-geode_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_init_skcipher(struct crypto_skcipher *tfm)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- op->iv = walk.iv;
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_CBC;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_DECRYPT;
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- ret = geode_aes_crypt(op);
-
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ tctx->fallback.skcipher =
+ crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tctx->fallback.skcipher)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(tctx->fallback.skcipher);
}
- return err;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tctx->fallback.skcipher));
+ return 0;
}
-static int
-geode_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static void geode_exit_skcipher(struct crypto_skcipher *tfm)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- op->iv = walk.iv;
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_CBC;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_ENCRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- return err;
+ crypto_free_skcipher(tctx->fallback.skcipher);
}
-static int fallback_init_blk(struct crypto_tfm *tfm)
+static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
+ if (dir == AES_DIR_DECRYPT)
+ return crypto_skcipher_decrypt(subreq);
+ else
+ return crypto_skcipher_encrypt(subreq);
+ }
- op->fallback.blk = crypto_alloc_blkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ err = skcipher_walk_virt(&walk, req, false);
- if (IS_ERR(op->fallback.blk)) {
- printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(op->fallback.blk);
+ while ((nbytes = walk.nbytes) != 0) {
+ geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ walk.iv, mode, dir);
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
- return 0;
+ return err;
}
-static void fallback_exit_blk(struct crypto_tfm *tfm)
+static int geode_cbc_encrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-
- crypto_free_blkcipher(op->fallback.blk);
- op->fallback.blk = NULL;
+ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
}
-static struct crypto_alg geode_cbc_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-geode",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
- .cra_alignmask = 15,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = geode_setkey_blk,
- .encrypt = geode_cbc_encrypt,
- .decrypt = geode_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
-};
-
-static int
-geode_ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_cbc_decrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_ECB;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_DECRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
+ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
}
-static int
-geode_ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_ecb_encrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_ECB;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_ENCRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
+}
- return err;
+static int geode_ecb_decrypt(struct skcipher_request *req)
+{
+ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
}
-static struct crypto_alg geode_ecb_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-geode",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
- .cra_alignmask = 15,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = geode_setkey_blk,
- .encrypt = geode_ecb_encrypt,
- .decrypt = geode_ecb_decrypt,
- }
- }
+static struct skcipher_alg geode_skcipher_algs[] = {
+ {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-geode",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
+ .base.cra_alignmask = 15,
+ .base.cra_module = THIS_MODULE,
+ .init = geode_init_skcipher,
+ .exit = geode_exit_skcipher,
+ .setkey = geode_setkey_skcipher,
+ .encrypt = geode_cbc_encrypt,
+ .decrypt = geode_cbc_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-geode",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
+ .base.cra_alignmask = 15,
+ .base.cra_module = THIS_MODULE,
+ .init = geode_init_skcipher,
+ .exit = geode_exit_skcipher,
+ .setkey = geode_setkey_skcipher,
+ .encrypt = geode_ecb_encrypt,
+ .decrypt = geode_ecb_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
};
static void geode_aes_remove(struct pci_dev *dev)
{
crypto_unregister_alg(&geode_alg);
- crypto_unregister_alg(&geode_ecb_alg);
- crypto_unregister_alg(&geode_cbc_alg);
+ crypto_unregister_skciphers(geode_skcipher_algs,
+ ARRAY_SIZE(geode_skcipher_algs));
pci_iounmap(dev, _iobase);
_iobase = NULL;
@@ -538,20 +407,14 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (ret)
goto eiomap;
- ret = crypto_register_alg(&geode_ecb_alg);
+ ret = crypto_register_skciphers(geode_skcipher_algs,
+ ARRAY_SIZE(geode_skcipher_algs));
if (ret)
goto ealg;
- ret = crypto_register_alg(&geode_cbc_alg);
- if (ret)
- goto eecb;
-
dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
return 0;
- eecb:
- crypto_unregister_alg(&geode_ecb_alg);
-
ealg:
crypto_unregister_alg(&geode_alg);
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
index 5c6e131a8f9d..6d0a0cdc7647 100644
--- a/drivers/crypto/geode-aes.h
+++ b/drivers/crypto/geode-aes.h
@@ -46,21 +46,10 @@
#define AES_OP_TIMEOUT 0x50000
-struct geode_aes_op {
-
- void *src;
- void *dst;
-
- u32 mode;
- u32 dir;
- u32 flags;
- int len;
-
+struct geode_aes_tfm_ctx {
u8 key[AES_KEYSIZE_128];
- u8 *iv;
-
union {
- struct crypto_blkcipher *blk;
+ struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
u32 keylen;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a18e62df68d9..4e7323884ae3 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -22,6 +22,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
static char hifn_pll_ref[sizeof("extNNN")] = "ext";
module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
@@ -596,7 +597,7 @@ struct hifn_crypt_result {
struct hifn_crypto_alg {
struct list_head entry;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
struct hifn_device *dev;
};
@@ -1404,7 +1405,7 @@ static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
w->num = 0;
}
-static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
+static int skcipher_add(unsigned int *drestp, struct scatterlist *dst,
unsigned int size, unsigned int *nbytesp)
{
unsigned int copy, drest = *drestp, nbytes = *nbytesp;
@@ -1433,11 +1434,11 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return idx;
}
-static int hifn_cipher_walk(struct ablkcipher_request *req,
+static int hifn_cipher_walk(struct skcipher_request *req,
struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
- unsigned int nbytes = req->nbytes, offset, copy, diff;
+ unsigned int nbytes = req->cryptlen, offset, copy, diff;
int idx, tidx, err;
tidx = idx = 0;
@@ -1459,7 +1460,7 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
t = &w->cache[idx];
- err = ablkcipher_add(&dlen, dst, slen, &nbytes);
+ err = skcipher_add(&dlen, dst, slen, &nbytes);
if (err < 0)
return err;
@@ -1498,7 +1499,7 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
dst = &req->dst[idx];
- err = ablkcipher_add(&dlen, dst, nbytes, &nbytes);
+ err = skcipher_add(&dlen, dst, nbytes, &nbytes);
if (err < 0)
return err;
@@ -1518,13 +1519,13 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
return tidx;
}
-static int hifn_setup_session(struct ablkcipher_request *req)
+static int hifn_setup_session(struct skcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
struct hifn_device *dev = ctx->dev;
unsigned long dlen, flags;
- unsigned int nbytes = req->nbytes, idx = 0;
+ unsigned int nbytes = req->cryptlen, idx = 0;
int err = -EINVAL, sg_num;
struct scatterlist *dst;
@@ -1563,7 +1564,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
goto err_out;
}
- err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
+ err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->cryptlen, req);
if (err)
goto err_out;
@@ -1610,7 +1611,7 @@ static int hifn_start_device(struct hifn_device *dev)
return 0;
}
-static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
+static int skcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
{
unsigned int srest = *srestp, nbytes = *nbytesp, copy;
@@ -1660,12 +1661,12 @@ static inline void hifn_complete_sa(struct hifn_device *dev, int i)
BUG_ON(dev->started < 0);
}
-static void hifn_process_ready(struct ablkcipher_request *req, int error)
+static void hifn_process_ready(struct skcipher_request *req, int error)
{
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
- unsigned int nbytes = req->nbytes;
+ unsigned int nbytes = req->cryptlen;
int idx = 0, err;
struct scatterlist *dst, *t;
void *saddr;
@@ -1688,7 +1689,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
saddr = kmap_atomic(sg_page(t));
- err = ablkcipher_get(saddr, &t->length, t->offset,
+ err = skcipher_get(saddr, &t->length, t->offset,
dst, nbytes, &nbytes);
if (err < 0) {
kunmap_atomic(saddr);
@@ -1910,7 +1911,7 @@ static void hifn_flush(struct hifn_device *dev)
{
unsigned long flags;
struct crypto_async_request *async_req;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int i;
@@ -1926,7 +1927,7 @@ static void hifn_flush(struct hifn_device *dev)
spin_lock_irqsave(&dev->lock, flags);
while ((async_req = crypto_dequeue_request(&dev->queue))) {
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
spin_unlock_irqrestore(&dev->lock, flags);
hifn_process_ready(req, -ENODEV);
@@ -1936,14 +1937,14 @@ static void hifn_flush(struct hifn_device *dev)
spin_unlock_irqrestore(&dev->lock, flags);
}
-static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int hifn_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_context *ctx = crypto_skcipher_ctx(cipher);
struct hifn_device *dev = ctx->dev;
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1955,14 +1956,14 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int hifn_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_context *ctx = crypto_skcipher_ctx(cipher);
struct hifn_device *dev = ctx->dev;
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1974,36 +1975,36 @@ static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int hifn_handle_req(struct ablkcipher_request *req)
+static int hifn_handle_req(struct skcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_device *dev = ctx->dev;
int err = -EAGAIN;
- if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
+ if (dev->started + DIV_ROUND_UP(req->cryptlen, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
err = hifn_setup_session(req);
if (err == -EAGAIN) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
spin_unlock_irqrestore(&dev->lock, flags);
}
return err;
}
-static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
+static int hifn_setup_crypto_req(struct skcipher_request *req, u8 op,
u8 type, u8 mode)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
unsigned ivsize;
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
- if (req->info && mode != ACRYPTO_MODE_ECB) {
+ if (req->iv && mode != ACRYPTO_MODE_ECB) {
if (type == ACRYPTO_TYPE_AES_128)
ivsize = HIFN_AES_IV_LENGTH;
else if (type == ACRYPTO_TYPE_DES)
@@ -2022,7 +2023,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
rctx->op = op;
rctx->mode = mode;
rctx->type = type;
- rctx->iv = req->info;
+ rctx->iv = req->iv;
rctx->ivsize = ivsize;
/*
@@ -2037,7 +2038,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
static int hifn_process_queue(struct hifn_device *dev)
{
struct crypto_async_request *async_req, *backlog;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
unsigned long flags;
int err = 0;
@@ -2053,7 +2054,7 @@ static int hifn_process_queue(struct hifn_device *dev)
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
err = hifn_handle_req(req);
if (err)
@@ -2063,7 +2064,7 @@ static int hifn_process_queue(struct hifn_device *dev)
return err;
}
-static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
+static int hifn_setup_crypto(struct skcipher_request *req, u8 op,
u8 type, u8 mode)
{
int err;
@@ -2083,22 +2084,22 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
/*
* AES ecryption functions.
*/
-static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
@@ -2107,22 +2108,22 @@ static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
/*
* AES decryption functions.
*/
-static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
@@ -2131,22 +2132,22 @@ static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
/*
* DES ecryption functions.
*/
-static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
@@ -2155,22 +2156,22 @@ static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
/*
* DES decryption functions.
*/
-static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
@@ -2179,44 +2180,44 @@ static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
/*
* 3DES ecryption functions.
*/
-static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
}
/* 3DES decryption functions. */
-static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
@@ -2226,16 +2227,16 @@ struct hifn_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char drv_name[CRYPTO_MAX_ALG_NAME];
unsigned int bsize;
- struct ablkcipher_alg ablkcipher;
+ struct skcipher_alg skcipher;
};
-static struct hifn_alg_template hifn_alg_templates[] = {
+static const struct hifn_alg_template hifn_alg_templates[] = {
/*
* 3DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2245,7 +2246,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2255,7 +2256,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2266,7 +2267,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2280,7 +2281,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
*/
{
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2290,7 +2291,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2300,7 +2301,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2311,7 +2312,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2325,7 +2326,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
*/
{
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2335,7 +2336,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_AES_IV_LENGTH,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -2346,7 +2347,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2356,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2366,18 +2367,19 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
};
-static int hifn_cra_init(struct crypto_tfm *tfm)
+static int hifn_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
- struct hifn_context *ctx = crypto_tfm_ctx(tfm);
+ struct hifn_context *ctx = crypto_skcipher_ctx(tfm);
ctx->dev = ha->dev;
- tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct hifn_request_context));
+
return 0;
}
-static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
+static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_template *t)
{
struct hifn_crypto_alg *alg;
int err;
@@ -2386,26 +2388,25 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
if (!alg)
return -ENOMEM;
- snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
- snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
+ alg->alg = t->skcipher;
+ alg->alg.init = hifn_init_tfm;
+
+ snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
+ snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
t->drv_name, dev->name);
- alg->alg.cra_priority = 300;
- alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
- alg->alg.cra_blocksize = t->bsize;
- alg->alg.cra_ctxsize = sizeof(struct hifn_context);
- alg->alg.cra_alignmask = 0;
- alg->alg.cra_type = &crypto_ablkcipher_type;
- alg->alg.cra_module = THIS_MODULE;
- alg->alg.cra_u.ablkcipher = t->ablkcipher;
- alg->alg.cra_init = hifn_cra_init;
+ alg->alg.base.cra_priority = 300;
+ alg->alg.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->alg.base.cra_blocksize = t->bsize;
+ alg->alg.base.cra_ctxsize = sizeof(struct hifn_context);
+ alg->alg.base.cra_alignmask = 0;
+ alg->alg.base.cra_module = THIS_MODULE;
alg->dev = dev;
list_add_tail(&alg->entry, &dev->alg_list);
- err = crypto_register_alg(&alg->alg);
+ err = crypto_register_skcipher(&alg->alg);
if (err) {
list_del(&alg->entry);
kfree(alg);
@@ -2420,7 +2421,7 @@ static void hifn_unregister_alg(struct hifn_device *dev)
list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
list_del(&a->entry);
- crypto_unregister_alg(&a->alg);
+ crypto_unregister_skcipher(&a->alg);
kfree(a);
}
}
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index ebaf91e0146d..c0e7a85fe129 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -2,7 +2,7 @@
config CRYPTO_DEV_HISI_SEC
tristate "Support for Hisilicon SEC crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select SG_SPLIT
@@ -14,26 +14,47 @@ config CRYPTO_DEV_HISI_SEC
To compile this as a module, choose M here: the module
will be called hisi_sec.
+config CRYPTO_DEV_HISI_SEC2
+ tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_DES
+ select CRYPTO_DEV_HISI_QM
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ help
+ Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
+ It provides AES, SM4, and 3DES algorithms with ECB
+ CBC, and XTS cipher mode.
+
+ To compile this as a module, choose M here: the module
+ will be called hisi_sec2.
+
config CRYPTO_DEV_HISI_QM
tristate
- depends on ARM64 && PCI && PCI_MSI
+ depends on ARM64 || COMPILE_TEST
+ depends on PCI && PCI_MSI
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
-config CRYPTO_HISI_SGL
- tristate
- depends on ARM64
- help
- HiSilicon accelerator engines use a common hardware scatterlist
- interface for data format. Specific engine driver may use this
- module.
-
config CRYPTO_DEV_HISI_ZIP
tristate "Support for HiSilicon ZIP accelerator"
- depends on ARM64 && PCI && PCI_MSI
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select CRYPTO_HISI_SGL
select SG_SPLIT
help
Support for HiSilicon ZIP Driver
+
+config CRYPTO_DEV_HISI_HPRE
+ tristate "Support for HISI HPRE accelerator"
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ select CRYPTO_DEV_HISI_QM
+ select CRYPTO_DH
+ select CRYPTO_RSA
+ help
+ Support for HiSilicon HPRE(High Performance RSA Engine)
+ accelerator, which can accelerate RSA and DH algorithms.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 45a279741126..7f5f74c72baa 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
-obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o
-obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
+obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
+hisi_qm-objs = qm.o sgl.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
diff --git a/drivers/crypto/hisilicon/hpre/Makefile b/drivers/crypto/hisilicon/hpre/Makefile
new file mode 100644
index 000000000000..4fd32b789e1e
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hisi_hpre.o
+hisi_hpre-objs = hpre_main.o hpre_crypto.o
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
new file mode 100644
index 000000000000..ddf13ea9862a
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef __HISI_HPRE_H
+#define __HISI_HPRE_H
+
+#include <linux/list.h>
+#include "../qm.h"
+
+#define HPRE_SQE_SIZE sizeof(struct hpre_sqe)
+#define HPRE_PF_DEF_Q_NUM 64
+#define HPRE_PF_DEF_Q_BASE 0
+
+enum {
+ HPRE_CLUSTER0,
+ HPRE_CLUSTER1,
+ HPRE_CLUSTER2,
+ HPRE_CLUSTER3,
+ HPRE_CLUSTERS_NUM,
+};
+
+enum hpre_ctrl_dbgfs_file {
+ HPRE_CURRENT_QM,
+ HPRE_CLEAR_ENABLE,
+ HPRE_CLUSTER_CTRL,
+ HPRE_DEBUG_FILE_NUM,
+};
+
+#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
+
+struct hpre_debugfs_file {
+ int index;
+ enum hpre_ctrl_dbgfs_file type;
+ spinlock_t lock;
+ struct hpre_debug *debug;
+};
+
+/*
+ * One HPRE controller has one PF and multiple VFs, some global configurations
+ * which PF has need this structure.
+ * Just relevant for PF.
+ */
+struct hpre_debug {
+ struct dentry *debug_root;
+ struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
+};
+
+struct hpre {
+ struct hisi_qm qm;
+ struct list_head list;
+ struct hpre_debug debug;
+ u32 num_vfs;
+ unsigned long status;
+};
+
+enum hpre_alg_type {
+ HPRE_ALG_NC_NCRT = 0x0,
+ HPRE_ALG_NC_CRT = 0x1,
+ HPRE_ALG_KG_STD = 0x2,
+ HPRE_ALG_KG_CRT = 0x3,
+ HPRE_ALG_DH_G2 = 0x4,
+ HPRE_ALG_DH = 0x5,
+};
+
+struct hpre_sqe {
+ __le32 dw0;
+ __u8 task_len1;
+ __u8 task_len2;
+ __u8 mrttest_num;
+ __u8 resv1;
+ __le64 key;
+ __le64 in;
+ __le64 out;
+ __le16 tag;
+ __le16 resv2;
+#define _HPRE_SQE_ALIGN_EXT 7
+ __le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
+};
+
+struct hpre *hpre_find_device(int node);
+int hpre_algs_register(void);
+void hpre_algs_unregister(void);
+
+#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
new file mode 100644
index 000000000000..98f037e6ea3e
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+#include <crypto/akcipher.h>
+#include <crypto/dh.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/kpp.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <linux/module.h>
+#include "hpre.h"
+
+struct hpre_ctx;
+
+#define HPRE_CRYPTO_ALG_PRI 1000
+#define HPRE_ALIGN_SZ 64
+#define HPRE_BITS_2_BYTES_SHIFT 3
+#define HPRE_RSA_512BITS_KSZ 64
+#define HPRE_RSA_1536BITS_KSZ 192
+#define HPRE_CRT_PRMS 5
+#define HPRE_CRT_Q 2
+#define HPRE_CRT_P 3
+#define HPRE_CRT_INV 4
+#define HPRE_DH_G_FLAG 0x02
+#define HPRE_TRY_SEND_TIMES 100
+#define HPRE_INVLD_REQ_ID (-1)
+#define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
+
+#define HPRE_SQE_ALG_BITS 5
+#define HPRE_SQE_DONE_SHIFT 30
+#define HPRE_DH_MAX_P_SZ 512
+
+typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
+
+struct hpre_rsa_ctx {
+ /* low address: e--->n */
+ char *pubkey;
+ dma_addr_t dma_pubkey;
+
+ /* low address: d--->n */
+ char *prikey;
+ dma_addr_t dma_prikey;
+
+ /* low address: dq->dp->q->p->qinv */
+ char *crt_prikey;
+ dma_addr_t dma_crt_prikey;
+
+ struct crypto_akcipher *soft_tfm;
+};
+
+struct hpre_dh_ctx {
+ /*
+ * If base is g we compute the public key
+ * ya = g^xa mod p; [RFC2631 sec 2.1.1]
+ * else if base if the counterpart public key we
+ * compute the shared secret
+ * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ */
+ char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+ dma_addr_t dma_xa_p;
+
+ char *g; /* m */
+ dma_addr_t dma_g;
+};
+
+struct hpre_ctx {
+ struct hisi_qp *qp;
+ struct hpre_asym_request **req_list;
+ spinlock_t req_lock;
+ unsigned int key_sz;
+ bool crt_g2_mode;
+ struct idr req_idr;
+ union {
+ struct hpre_rsa_ctx rsa;
+ struct hpre_dh_ctx dh;
+ };
+};
+
+struct hpre_asym_request {
+ char *src;
+ char *dst;
+ struct hpre_sqe req;
+ struct hpre_ctx *ctx;
+ union {
+ struct akcipher_request *rsa;
+ struct kpp_request *dh;
+ } areq;
+ int err;
+ int req_id;
+ hpre_cb cb;
+};
+
+static DEFINE_MUTEX(hpre_alg_lock);
+static unsigned int hpre_active_devs;
+
+static int hpre_alloc_req_id(struct hpre_ctx *ctx)
+{
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&ctx->req_lock, flags);
+ id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+ spin_unlock_irqrestore(&ctx->req_lock, flags);
+
+ return id;
+}
+
+static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->req_lock, flags);
+ idr_remove(&ctx->req_idr, req_id);
+ spin_unlock_irqrestore(&ctx->req_lock, flags);
+}
+
+static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
+{
+ struct hpre_ctx *ctx;
+ int id;
+
+ ctx = hpre_req->ctx;
+ id = hpre_alloc_req_id(ctx);
+ if (id < 0)
+ return -EINVAL;
+
+ ctx->req_list[id] = hpre_req;
+ hpre_req->req_id = id;
+
+ return id;
+}
+
+static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ int id = hpre_req->req_id;
+
+ if (hpre_req->req_id >= 0) {
+ hpre_req->req_id = HPRE_INVLD_REQ_ID;
+ ctx->req_list[id] = NULL;
+ hpre_free_req_id(ctx, id);
+ }
+}
+
+static struct hisi_qp *hpre_get_qp_and_start(void)
+{
+ struct hisi_qp *qp;
+ struct hpre *hpre;
+ int ret;
+
+ /* find the proper hpre device, which is near the current CPU core */
+ hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
+ if (!hpre) {
+ pr_err("Can not find proper hpre device!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ qp = hisi_qm_create_qp(&hpre->qm, 0);
+ if (IS_ERR(qp)) {
+ pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0) {
+ hisi_qm_release_qp(qp);
+ pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return qp;
+}
+
+static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ enum dma_data_direction dma_dir;
+
+ if (is_src) {
+ hpre_req->src = NULL;
+ dma_dir = DMA_TO_DEVICE;
+ } else {
+ hpre_req->dst = NULL;
+ dma_dir = DMA_FROM_DEVICE;
+ }
+ *tmp = dma_map_single(dev, sg_virt(data),
+ len, dma_dir);
+ if (dma_mapping_error(dev, *tmp)) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ void *ptr;
+ int shift;
+
+ shift = ctx->key_sz - len;
+ if (shift < 0)
+ return -EINVAL;
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ if (is_src) {
+ scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
+ hpre_req->src = ptr;
+ } else {
+ hpre_req->dst = ptr;
+ }
+
+ return 0;
+}
+
+static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ dma_addr_t tmp;
+ int ret;
+
+ /* when the data is dh's source, we should format it */
+ if ((sg_is_last(data) && len == ctx->key_sz) &&
+ ((is_dh && !is_src) || !is_dh))
+ ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
+ else
+ ret = hpre_prepare_dma_buf(hpre_req, data, len,
+ is_src, &tmp);
+ if (ret)
+ return ret;
+
+ if (is_src)
+ msg->in = cpu_to_le64(tmp);
+ else
+ msg->out = cpu_to_le64(tmp);
+
+ return 0;
+}
+
+static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst, struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t tmp;
+
+ tmp = le64_to_cpu(sqe->in);
+ if (!tmp)
+ return;
+
+ if (src) {
+ if (req->src)
+ dma_free_coherent(dev, ctx->key_sz,
+ req->src, tmp);
+ else
+ dma_unmap_single(dev, tmp,
+ ctx->key_sz, DMA_TO_DEVICE);
+ }
+
+ tmp = le64_to_cpu(sqe->out);
+ if (!tmp)
+ return;
+
+ if (req->dst) {
+ if (dst)
+ scatterwalk_map_and_copy(req->dst, dst, 0,
+ ctx->key_sz, 1);
+ dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
+ } else {
+ dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
+ }
+}
+
+static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
+ void **kreq)
+{
+ struct hpre_asym_request *req;
+ int err, id, done;
+
+#define HPRE_NO_HW_ERR 0
+#define HPRE_HW_TASK_DONE 3
+#define HREE_HW_ERR_MASK 0x7ff
+#define HREE_SQE_DONE_MASK 0x3
+ id = (int)le16_to_cpu(sqe->tag);
+ req = ctx->req_list[id];
+ hpre_rm_req_from_ctx(req);
+ *kreq = req;
+
+ err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
+ HREE_HW_ERR_MASK;
+
+ done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
+ HREE_SQE_DONE_MASK;
+
+ if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
+{
+ if (!ctx || !qp || qlen < 0)
+ return -EINVAL;
+
+ spin_lock_init(&ctx->req_lock);
+ ctx->qp = qp;
+
+ ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
+ if (!ctx->req_list)
+ return -ENOMEM;
+ ctx->key_sz = 0;
+ ctx->crt_g2_mode = false;
+ idr_init(&ctx->req_idr);
+
+ return 0;
+}
+
+static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ if (is_clear_all) {
+ idr_destroy(&ctx->req_idr);
+ kfree(ctx->req_list);
+ hisi_qm_release_qp(ctx->qp);
+ }
+
+ ctx->crt_g2_mode = false;
+ ctx->key_sz = 0;
+}
+
+static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_asym_request *req;
+ struct kpp_request *areq;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.dh;
+ areq->dst_len = ctx->key_sz;
+ hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+}
+
+static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_asym_request *req;
+ struct akcipher_request *areq;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.rsa;
+ areq->dst_len = ctx->key_sz;
+ hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ akcipher_request_complete(areq, ret);
+}
+
+static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
+{
+ struct hpre_ctx *ctx = qp->qp_ctx;
+ struct hpre_sqe *sqe = resp;
+
+ ctx->req_list[sqe->tag]->cb(ctx, resp);
+}
+
+static int hpre_ctx_init(struct hpre_ctx *ctx)
+{
+ struct hisi_qp *qp;
+
+ qp = hpre_get_qp_and_start();
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ qp->qp_ctx = ctx;
+ qp->req_cb = hpre_alg_cb;
+
+ return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+}
+
+static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (is_rsa) {
+ struct akcipher_request *akreq = req;
+
+ if (akreq->dst_len < ctx->key_sz) {
+ akreq->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ tmp = akcipher_request_ctx(akreq);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_rsa_cb;
+ h_req->areq.rsa = akreq;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ } else {
+ struct kpp_request *kreq = req;
+
+ if (kreq->dst_len < ctx->key_sz) {
+ kreq->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ tmp = kpp_request_ctx(kreq);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_dh_cb;
+ h_req->areq.dh = kreq;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
+ }
+
+ msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+
+ return 0;
+}
+
+#ifdef CONFIG_CRYPTO_DH
+static int hpre_dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, false);
+ if (ret)
+ return ret;
+
+ if (req->src) {
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
+ if (ret)
+ goto clear_all;
+ }
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
+ if (ret)
+ goto clear_all;
+
+ if (ctx->crt_g2_mode && !req->src)
+ msg->dw0 |= HPRE_ALG_DH_G2;
+ else
+ msg->dw0 |= HPRE_ALG_DH;
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_is_dh_params_length_valid(unsigned int key_sz)
+{
+#define _HPRE_DH_GRP1 768
+#define _HPRE_DH_GRP2 1024
+#define _HPRE_DH_GRP5 1536
+#define _HPRE_DH_GRP14 2048
+#define _HPRE_DH_GRP15 3072
+#define _HPRE_DH_GRP16 4096
+ switch (key_sz) {
+ case _HPRE_DH_GRP1:
+ case _HPRE_DH_GRP2:
+ case _HPRE_DH_GRP5:
+ case _HPRE_DH_GRP14:
+ case _HPRE_DH_GRP15:
+ case _HPRE_DH_GRP16:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz;
+
+ if (params->p_size > HPRE_DH_MAX_P_SZ)
+ return -EINVAL;
+
+ if (hpre_is_dh_params_length_valid(params->p_size <<
+ HPRE_BITS_2_BYTES_SHIFT))
+ return -EINVAL;
+
+ sz = ctx->key_sz = params->p_size;
+ ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
+ if (!ctx->dh.xa_p)
+ return -ENOMEM;
+
+ memcpy(ctx->dh.xa_p + sz, params->p, sz);
+
+ /* If g equals 2 don't copy it */
+ if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
+ ctx->crt_g2_mode = true;
+ return 0;
+ }
+
+ ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
+ if (!ctx->dh.g) {
+ dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
+ ctx->dh.dma_xa_p);
+ ctx->dh.xa_p = NULL;
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
+
+ return 0;
+}
+
+static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (ctx->dh.g) {
+ memset(ctx->dh.g, 0, sz);
+ dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
+ ctx->dh.g = NULL;
+ }
+
+ if (ctx->dh.xa_p) {
+ memset(ctx->dh.xa_p, 0, sz);
+ dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
+ ctx->dh.dma_xa_p);
+ ctx->dh.xa_p = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct dh params;
+ int ret;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ /* Free old secret if any */
+ hpre_dh_clear_ctx(ctx, false);
+
+ ret = hpre_dh_set_params(ctx, &params);
+ if (ret < 0)
+ goto err_clear_ctx;
+
+ memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
+ params.key_size);
+
+ return 0;
+
+err_clear_ctx:
+ hpre_dh_clear_ctx(ctx, false);
+ return ret;
+}
+
+static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return hpre_ctx_init(ctx);
+}
+
+static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_dh_clear_ctx(ctx, true);
+}
+#endif
+
+static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
+{
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static bool hpre_rsa_key_size_is_support(unsigned int len)
+{
+ unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
+
+#define _RSA_1024BITS_KEY_WDTH 1024
+#define _RSA_2048BITS_KEY_WDTH 2048
+#define _RSA_3072BITS_KEY_WDTH 3072
+#define _RSA_4096BITS_KEY_WDTH 4096
+
+ switch (bits) {
+ case _RSA_1024BITS_KEY_WDTH:
+ case _RSA_2048BITS_KEY_WDTH:
+ case _RSA_3072BITS_KEY_WDTH:
+ case _RSA_4096BITS_KEY_WDTH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int hpre_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ void *tmp = akcipher_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
+ ret = crypto_akcipher_encrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (!ctx->rsa.pubkey)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, true);
+ if (ret)
+ return ret;
+
+ msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
+
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ if (ret)
+ goto clear_all;
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ if (ret)
+ goto clear_all;
+
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ void *tmp = akcipher_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
+ ret = crypto_akcipher_decrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (!ctx->rsa.prikey)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, true);
+ if (ret)
+ return ret;
+
+ if (ctx->crt_g2_mode) {
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
+ msg->dw0 |= HPRE_ALG_NC_CRT;
+ } else {
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
+ msg->dw0 |= HPRE_ALG_NC_NCRT;
+ }
+
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ if (ret)
+ goto clear_all;
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ if (ret)
+ goto clear_all;
+
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
+ size_t vlen, bool private)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ ctx->key_sz = vlen;
+
+ /* if invalid key size provided, we use software tfm */
+ if (!hpre_rsa_key_size_is_support(ctx->key_sz))
+ return 0;
+
+ ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ &ctx->rsa.dma_pubkey,
+ GFP_KERNEL);
+ if (!ctx->rsa.pubkey)
+ return -ENOMEM;
+
+ if (private) {
+ ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ &ctx->rsa.dma_prikey,
+ GFP_KERNEL);
+ if (!ctx->rsa.prikey) {
+ dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
+ ctx->rsa.pubkey,
+ ctx->rsa.dma_pubkey);
+ ctx->rsa.pubkey = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
+ }
+ memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
+
+ /* Using hardware HPRE to do RSA */
+ return 1;
+}
+
+static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+ ctx->rsa.pubkey = NULL;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
+
+ return 0;
+}
+
+static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+ return -EINVAL;
+
+ memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
+
+ return 0;
+}
+
+static int hpre_crt_para_get(char *para, const char *raw,
+ unsigned int raw_sz, unsigned int para_size)
+{
+ const char *ptr = raw;
+ size_t len = raw_sz;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len || len > para_size)
+ return -EINVAL;
+
+ memcpy(para + para_size - len, ptr, len);
+
+ return 0;
+}
+
+static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
+{
+ unsigned int hlf_ksz = ctx->key_sz >> 1;
+ struct device *dev = HPRE_DEV(ctx);
+ u64 offset;
+ int ret;
+
+ ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
+ &ctx->rsa.dma_crt_prikey,
+ GFP_KERNEL);
+ if (!ctx->rsa.crt_prikey)
+ return -ENOMEM;
+
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
+ rsa_key->dq_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
+ rsa_key->dp_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_Q;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_P;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_INV;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ ctx->crt_g2_mode = true;
+
+ return 0;
+
+free_key:
+ offset = hlf_ksz * HPRE_CRT_PRMS;
+ memset(ctx->rsa.crt_prikey, 0, offset);
+ dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
+ ctx->rsa.dma_crt_prikey);
+ ctx->rsa.crt_prikey = NULL;
+ ctx->crt_g2_mode = false;
+
+ return ret;
+}
+
+/* If it is clear all, all the resources of the QP will be cleaned. */
+static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ unsigned int half_key_sz = ctx->key_sz >> 1;
+ struct device *dev = HPRE_DEV(ctx);
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (ctx->rsa.pubkey) {
+ dma_free_coherent(dev, ctx->key_sz << 1,
+ ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
+ ctx->rsa.pubkey = NULL;
+ }
+
+ if (ctx->rsa.crt_prikey) {
+ memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
+ ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
+ ctx->rsa.crt_prikey = NULL;
+ }
+
+ if (ctx->rsa.prikey) {
+ memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
+ ctx->rsa.dma_prikey);
+ ctx->rsa.prikey = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+/*
+ * we should judge if it is CRT or not,
+ * CRT: return true, N-CRT: return false .
+ */
+static bool hpre_is_crt_key(struct rsa_key *key)
+{
+ u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
+ key->qinv_sz;
+
+#define LEN_OF_NCRT_PARA 5
+
+ /* N-CRT less than 5 parameters */
+ return len > LEN_OF_NCRT_PARA;
+}
+
+static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct rsa_key rsa_key;
+ int ret;
+
+ hpre_rsa_clear_ctx(ctx, false);
+
+ if (private)
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ if (ret < 0)
+ return ret;
+
+ ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
+ if (ret <= 0)
+ return ret;
+
+ if (private) {
+ ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+ if (ret < 0)
+ goto free;
+
+ if (hpre_is_crt_key(&rsa_key)) {
+ ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
+ if (ret < 0)
+ goto free;
+ }
+ }
+
+ ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+ if (ret < 0)
+ goto free;
+
+ if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+
+free:
+ hpre_rsa_clear_ctx(ctx, false);
+ return ret;
+}
+
+static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return hpre_rsa_setkey(ctx, key, keylen, false);
+}
+
+static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return hpre_rsa_setkey(ctx, key, keylen, true);
+}
+
+static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
+ return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
+ if (IS_ERR(ctx->rsa.soft_tfm)) {
+ pr_err("Can not alloc_akcipher!\n");
+ return PTR_ERR(ctx->rsa.soft_tfm);
+ }
+
+ return hpre_ctx_init(ctx);
+}
+
+static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ hpre_rsa_clear_ctx(ctx, true);
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+}
+
+static struct akcipher_alg rsa = {
+ .sign = hpre_rsa_dec,
+ .verify = hpre_rsa_enc,
+ .encrypt = hpre_rsa_enc,
+ .decrypt = hpre_rsa_dec,
+ .set_pub_key = hpre_rsa_setpubkey,
+ .set_priv_key = hpre_rsa_setprivkey,
+ .max_size = hpre_rsa_max_size,
+ .init = hpre_rsa_init_tfm,
+ .exit = hpre_rsa_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "rsa",
+ .cra_driver_name = "hpre-rsa",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+#ifdef CONFIG_CRYPTO_DH
+static struct kpp_alg dh = {
+ .set_secret = hpre_dh_set_secret,
+ .generate_public_key = hpre_dh_compute_value,
+ .compute_shared_secret = hpre_dh_compute_value,
+ .max_size = hpre_dh_max_size,
+ .init = hpre_dh_init_tfm,
+ .exit = hpre_dh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "dh",
+ .cra_driver_name = "hpre-dh",
+ .cra_module = THIS_MODULE,
+ },
+};
+#endif
+
+int hpre_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&hpre_alg_lock);
+ if (++hpre_active_devs == 1) {
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ goto unlock;
+#ifdef CONFIG_CRYPTO_DH
+ ret = crypto_register_kpp(&dh);
+ if (ret) {
+ crypto_unregister_akcipher(&rsa);
+ goto unlock;
+ }
+#endif
+ }
+
+unlock:
+ mutex_unlock(&hpre_alg_lock);
+ return ret;
+}
+
+void hpre_algs_unregister(void)
+{
+ mutex_lock(&hpre_alg_lock);
+ if (--hpre_active_devs == 0) {
+ crypto_unregister_akcipher(&rsa);
+#ifdef CONFIG_CRYPTO_DH
+ crypto_unregister_kpp(&dh);
+#endif
+ }
+ mutex_unlock(&hpre_alg_lock);
+}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
new file mode 100644
index 000000000000..34e0424410bf
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019 HiSilicon Limited. */
+#include <linux/acpi.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/topology.h>
+#include "hpre.h"
+
+#define HPRE_VF_NUM 63
+#define HPRE_QUEUE_NUM_V2 1024
+#define HPRE_QM_ABNML_INT_MASK 0x100004
+#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HPRE_COMM_CNT_CLR_CE 0x0
+#define HPRE_CTRL_CNT_CLR_CE 0x301000
+#define HPRE_FSM_MAX_CNT 0x301008
+#define HPRE_VFG_AXQOS 0x30100c
+#define HPRE_VFG_AXCACHE 0x301010
+#define HPRE_RDCHN_INI_CFG 0x301014
+#define HPRE_AWUSR_FP_CFG 0x301018
+#define HPRE_BD_ENDIAN 0x301020
+#define HPRE_ECC_BYPASS 0x301024
+#define HPRE_RAS_WIDTH_CFG 0x301028
+#define HPRE_POISON_BYPASS 0x30102c
+#define HPRE_BD_ARUSR_CFG 0x301030
+#define HPRE_BD_AWUSR_CFG 0x301034
+#define HPRE_TYPES_ENB 0x301038
+#define HPRE_DATA_RUSER_CFG 0x30103c
+#define HPRE_DATA_WUSER_CFG 0x301040
+#define HPRE_INT_MASK 0x301400
+#define HPRE_INT_STATUS 0x301800
+#define HPRE_CORE_INT_ENABLE 0
+#define HPRE_CORE_INT_DISABLE 0x003fffff
+#define HPRE_RAS_ECC_1BIT_TH 0x30140c
+#define HPRE_RDCHN_INI_ST 0x301a00
+#define HPRE_CLSTR_BASE 0x302000
+#define HPRE_CORE_EN_OFFSET 0x04
+#define HPRE_CORE_INI_CFG_OFFSET 0x20
+#define HPRE_CORE_INI_STATUS_OFFSET 0x80
+#define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
+#define HPRE_CORE_IS_SCHD_OFFSET 0x90
+
+#define HPRE_RAS_CE_ENB 0x301410
+#define HPRE_HAC_RAS_CE_ENABLE 0x3f
+#define HPRE_RAS_NFE_ENB 0x301414
+#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0
+#define HPRE_RAS_FE_ENB 0x301418
+#define HPRE_HAC_RAS_FE_ENABLE 0
+
+#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
+#define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
+#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
+#define HPRE_HAC_ECC1_CNT 0x301a04
+#define HPRE_HAC_ECC2_CNT 0x301a08
+#define HPRE_HAC_INT_STATUS 0x301800
+#define HPRE_HAC_SOURCE_INT 0x301600
+#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
+#define MASTER_TRANS_RETURN_RW 3
+#define HPRE_MASTER_TRANS_RETURN 0x300150
+#define HPRE_MASTER_GLOBAL_CTRL 0x300000
+#define HPRE_CLSTR_ADDR_INTRVL 0x1000
+#define HPRE_CLUSTER_INQURY 0x100
+#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
+#define HPRE_TIMEOUT_ABNML_BIT 6
+#define HPRE_PASID_EN_BIT 9
+#define HPRE_REG_RD_INTVRL_US 10
+#define HPRE_REG_RD_TMOUT_US 1000
+#define HPRE_DBGFS_VAL_MAX_LEN 20
+#define HPRE_PCI_DEVICE_ID 0xa258
+#define HPRE_PCI_VF_DEVICE_ID 0xa259
+#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_QM_USR_CFG_MASK 0xfffffffe
+#define HPRE_QM_AXI_CFG_MASK 0xffff
+#define HPRE_QM_VFG_AX_MASK 0xff
+#define HPRE_BD_USR_MASK 0x3
+#define HPRE_CLUSTER_CORE_MASK 0xf
+
+#define HPRE_VIA_MSI_DSM 1
+
+static LIST_HEAD(hpre_list);
+static DEFINE_MUTEX(hpre_list_lock);
+static const char hpre_name[] = "hisi_hpre";
+static struct dentry *hpre_debugfs_root;
+static const struct pci_device_id hpre_dev_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
+
+struct hpre_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const char * const hpre_debug_file_name[] = {
+ [HPRE_CURRENT_QM] = "current_qm",
+ [HPRE_CLEAR_ENABLE] = "rdclr_en",
+ [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
+};
+
+static const struct hpre_hw_error hpre_hw_errors[] = {
+ { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
+ { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
+ { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
+ { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
+ { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
+ { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
+ { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
+ { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
+ { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
+ { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
+ { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
+ { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { /* sentinel */ }
+};
+
+static const u64 hpre_cluster_offsets[] = {
+ [HPRE_CLUSTER0] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER1] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER2] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER3] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
+};
+
+static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
+ {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
+ {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
+ {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
+ {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
+ {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
+};
+
+static struct debugfs_reg32 hpre_com_dfx_regs[] = {
+ {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
+ {"AXQOS ", HPRE_VFG_AXQOS},
+ {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
+ {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1},
+ {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1},
+ {"BD_ENDIAN ", HPRE_BD_ENDIAN},
+ {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
+ {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
+ {"POISON_BYPASS ", HPRE_POISON_BYPASS},
+ {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
+ {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
+ {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
+ {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
+ {"INT_STATUS ", HPRE_INT_STATUS},
+};
+
+static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
+ if (!pdev) {
+ q_num = HPRE_QUEUE_NUM_V2;
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ rev_id = pdev->revision;
+ if (rev_id != QM_HW_V2)
+ return -EINVAL;
+
+ q_num = HPRE_QUEUE_NUM_V2;
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || n == 0 || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops hpre_pf_q_num_ops = {
+ .set = hpre_pf_q_num_set,
+ .get = param_get_int,
+};
+
+static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
+module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
+MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
+
+static inline void hpre_add_to_list(struct hpre *hpre)
+{
+ mutex_lock(&hpre_list_lock);
+ list_add_tail(&hpre->list, &hpre_list);
+ mutex_unlock(&hpre_list_lock);
+}
+
+static inline void hpre_remove_from_list(struct hpre *hpre)
+{
+ mutex_lock(&hpre_list_lock);
+ list_del(&hpre->list);
+ mutex_unlock(&hpre_list_lock);
+}
+
+struct hpre *hpre_find_device(int node)
+{
+ struct hpre *hpre, *ret = NULL;
+ int min_distance = INT_MAX;
+ struct device *dev;
+ int dev_node = 0;
+
+ mutex_lock(&hpre_list_lock);
+ list_for_each_entry(hpre, &hpre_list, list) {
+ dev = &hpre->qm.pdev->dev;
+#ifdef CONFIG_NUMA
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+#endif
+ if (node_distance(dev_node, node) < min_distance) {
+ ret = hpre;
+ min_distance = node_distance(dev_node, node);
+ }
+ }
+ mutex_unlock(&hpre_list_lock);
+
+ return ret;
+}
+
+static int hpre_cfg_by_dsm(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ union acpi_object *obj;
+ guid_t guid;
+
+ if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
+ dev_err(dev, "Hpre GUID failed\n");
+ return -EINVAL;
+ }
+
+ /* Switch over to MSI handling due to non-standard PCI implementation */
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
+ 0, HPRE_VIA_MSI_DSM, NULL);
+ if (!obj) {
+ dev_err(dev, "ACPI handle failed!\n");
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ unsigned long offset;
+ int ret, i;
+ u32 val;
+
+ writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
+ writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
+ writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
+
+ /* disable FLR triggered by BME(bus master enable) */
+ writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
+
+ /* HPRE need more time, we close this interrupt */
+ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+ val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
+ writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+
+ writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
+ writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
+ writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
+ writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
+ writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
+ writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
+ writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
+
+ writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
+ writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
+ writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
+ val & BIT(0),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev, "read rd channel timeout fail!\n");
+ return -ETIMEDOUT;
+ }
+
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ offset = i * HPRE_CLSTR_ADDR_INTRVL;
+
+ /* clusters initiating */
+ writel(HPRE_CLUSTER_CORE_MASK,
+ HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
+ writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
+ HPRE_CORE_INI_STATUS), val,
+ ((val & HPRE_CLUSTER_CORE_MASK) ==
+ HPRE_CLUSTER_CORE_MASK),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev,
+ "cluster %d int st status timeout!\n", i);
+ return -ETIMEDOUT;
+ }
+ }
+
+ ret = hpre_cfg_by_dsm(qm);
+ if (ret)
+ dev_err(dev, "acpi_evaluate_dsm err.\n");
+
+ return ret;
+}
+
+static void hpre_cnt_regs_clear(struct hisi_qm *qm)
+{
+ unsigned long offset;
+ int i;
+
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear clusterX/cluster_ctrl */
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
+ writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+ }
+
+ /* clear rdclr_en */
+ writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
+
+ hisi_qm_debug_regs_clear(qm);
+}
+
+static void hpre_hw_error_disable(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ /* disable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+}
+
+static void hpre_hw_error_enable(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ /* enable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
+ writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+}
+
+static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
+{
+ struct hpre *hpre = container_of(file->debug, struct hpre, debug);
+
+ return &hpre->qm;
+}
+
+static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ struct hpre_debug *debug = file->debug;
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ u32 num_vfs = hpre->num_vfs;
+ u32 vfq_num, tmp;
+
+
+ if (val > num_vfs)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (val == 0) {
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ } else {
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+ if (val == num_vfs) {
+ qm->debug.curr_qm_qp_num =
+ qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
+ } else {
+ qm->debug.curr_qm_qp_num = vfq_num;
+ }
+ }
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+
+ return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
+ HPRE_CTRL_CNT_CLR_CE_BIT;
+}
+
+static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ u32 tmp;
+
+ if (val != 1 && val != 0)
+ return -EINVAL;
+
+ tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
+ ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
+ writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
+
+ return 0;
+}
+
+static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ int cluster_index = file->index - HPRE_CLUSTER_CTRL;
+ unsigned long offset = HPRE_CLSTR_BASE +
+ cluster_index * HPRE_CLSTR_ADDR_INTRVL;
+
+ return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
+}
+
+static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ int cluster_index = file->index - HPRE_CLUSTER_CTRL;
+ unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
+ HPRE_CLSTR_ADDR_INTRVL;
+
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+
+ return 0;
+}
+
+static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hpre_debugfs_file *file = filp->private_data;
+ char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
+ u32 val;
+ int ret;
+
+ spin_lock_irq(&file->lock);
+ switch (file->type) {
+ case HPRE_CURRENT_QM:
+ val = hpre_current_qm_read(file);
+ break;
+ case HPRE_CLEAR_ENABLE:
+ val = hpre_clear_enable_read(file);
+ break;
+ case HPRE_CLUSTER_CTRL:
+ val = hpre_cluster_inqry_read(file);
+ break;
+ default:
+ spin_unlock_irq(&file->lock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&file->lock);
+ ret = sprintf(tbuf, "%u\n", val);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hpre_debugfs_file *file = filp->private_data;
+ char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
+ unsigned long val;
+ int len, ret;
+
+ if (*pos != 0)
+ return 0;
+
+ if (count >= HPRE_DBGFS_VAL_MAX_LEN)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
+ pos, buf, count);
+ if (len < 0)
+ return len;
+
+ tbuf[len] = '\0';
+ if (kstrtoul(tbuf, 0, &val))
+ return -EFAULT;
+
+ spin_lock_irq(&file->lock);
+ switch (file->type) {
+ case HPRE_CURRENT_QM:
+ ret = hpre_current_qm_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case HPRE_CLEAR_ENABLE:
+ ret = hpre_clear_enable_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case HPRE_CLUSTER_CTRL:
+ ret = hpre_cluster_inqry_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_input;
+ }
+ spin_unlock_irq(&file->lock);
+
+ return count;
+
+err_input:
+ spin_unlock_irq(&file->lock);
+ return ret;
+}
+
+static const struct file_operations hpre_ctrl_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hpre_ctrl_debug_read,
+ .write = hpre_ctrl_debug_write,
+};
+
+static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
+ enum hpre_ctrl_dbgfs_file type, int indx)
+{
+ struct dentry *tmp, *file_dir;
+
+ if (dir)
+ file_dir = dir;
+ else
+ file_dir = dbg->debug_root;
+
+ if (type >= HPRE_DEBUG_FILE_NUM)
+ return -EINVAL;
+
+ spin_lock_init(&dbg->files[indx].lock);
+ dbg->files[indx].debug = dbg;
+ dbg->files[indx].type = type;
+ dbg->files[indx].index = indx;
+ tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
+ if (!tmp)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = hpre_com_dfx_regs;
+ regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
+ regset->base = qm->io_base;
+
+ tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ if (!tmp)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ char buf[HPRE_DBGFS_VAL_MAX_LEN];
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp_d, *tmp;
+ int i, ret;
+
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ sprintf(buf, "cluster%d", i);
+
+ tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ if (!tmp_d)
+ return -ENOENT;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = hpre_cluster_dfx_regs;
+ regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
+ regset->base = qm->io_base + hpre_cluster_offsets[i];
+
+ tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
+ if (!tmp)
+ return -ENOENT;
+ ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
+ i + HPRE_CLUSTER_CTRL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hpre_ctrl_debug_init(struct hpre_debug *debug)
+{
+ int ret;
+
+ ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM,
+ HPRE_CURRENT_QM);
+ if (ret)
+ return ret;
+
+ ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE,
+ HPRE_CLEAR_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = hpre_pf_comm_regs_debugfs_init(debug);
+ if (ret)
+ return ret;
+
+ return hpre_cluster_debugfs_init(debug);
+}
+
+static int hpre_debugfs_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct dentry *dir;
+ int ret;
+
+ dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
+ if (!dir)
+ return -ENOENT;
+
+ qm->debug.debug_root = dir;
+
+ ret = hisi_qm_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
+ if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
+ hpre->debug.debug_root = dir;
+ ret = hpre_ctrl_debug_init(&hpre->debug);
+ if (ret)
+ goto failed_to_create;
+ }
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(qm->debug.debug_root);
+ return ret;
+}
+
+static void hpre_debugfs_exit(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ debugfs_remove_recursive(qm->debug.debug_root);
+}
+
+static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ enum qm_hw_ver rev_id;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id < 0)
+ return -ENODEV;
+
+ if (rev_id == QM_HW_V1) {
+ pci_warn(pdev, "HPRE version 1 is not supported!\n");
+ return -EINVAL;
+ }
+
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+ qm->sqe_size = HPRE_SQE_SIZE;
+ qm->dev_name = hpre_name;
+ qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
+ QM_HW_PF : QM_HW_VF;
+ if (pdev->is_physfn) {
+ qm->qp_base = HPRE_PF_DEF_Q_BASE;
+ qm->qp_num = hpre_pf_q_num;
+ }
+ qm->use_dma_api = true;
+
+ return 0;
+}
+
+static void hpre_hw_err_init(struct hpre *hpre)
+{
+ hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE,
+ 0, QM_DB_RANDOM_INVALID);
+ hpre_hw_error_enable(hpre);
+}
+
+static int hpre_pf_probe_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
+
+ ret = hpre_set_user_domain_and_cache(hpre);
+ if (ret)
+ return ret;
+
+ hpre_hw_err_init(hpre);
+
+ return 0;
+}
+
+static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_qm *qm;
+ struct hpre *hpre;
+ int ret;
+
+ hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
+ if (!hpre)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, hpre);
+
+ qm = &hpre->qm;
+ ret = hpre_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
+
+ ret = hisi_qm_init(qm);
+ if (ret)
+ return ret;
+
+ if (pdev->is_physfn) {
+ ret = hpre_pf_probe_init(hpre);
+ if (ret)
+ goto err_with_qm_init;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
+ /* v2 starts to support get vft by mailbox */
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_with_qm_init;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ goto err_with_err_init;
+
+ ret = hpre_debugfs_init(hpre);
+ if (ret)
+ dev_warn(&pdev->dev, "init debugfs fail!\n");
+
+ hpre_add_to_list(hpre);
+
+ ret = hpre_algs_register();
+ if (ret < 0) {
+ hpre_remove_from_list(hpre);
+ pci_err(pdev, "fail to register algs to crypto!\n");
+ goto err_with_qm_start;
+ }
+ return 0;
+
+err_with_qm_start:
+ hisi_qm_stop(qm);
+
+err_with_err_init:
+ if (pdev->is_physfn)
+ hpre_hw_error_disable(hpre);
+
+err_with_qm_init:
+ hisi_qm_uninit(qm);
+
+ return ret;
+}
+
+static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ u32 qp_num = qm->qp_num;
+ int q_num, remain_q_num, i;
+ u32 q_base = qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qp_num;
+
+ /* If remaining queues are not enough, return error. */
+ if (remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
+ if (ret)
+ return ret;
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int hpre_clear_vft_config(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ u32 num_vfs = hpre->num_vfs;
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= num_vfs; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ hpre->num_vfs = 0;
+
+ return 0;
+}
+
+static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, ret;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev,
+ "Can't enable VF. Please disable pre-enabled VFs!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
+ ret = hpre_vf_q_assign(hpre, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ hpre->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ hpre_clear_vft_config(hpre);
+ return ret;
+ }
+
+ return num_vfs;
+}
+
+static int hpre_sriov_disable(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+
+ return hpre_clear_vft_config(hpre);
+}
+
+static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs)
+ return hpre_sriov_enable(pdev, num_vfs);
+ else
+ return hpre_sriov_disable(pdev);
+}
+
+static void hpre_remove(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ hpre_algs_unregister();
+ hpre_remove_from_list(hpre);
+ if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
+ ret = hpre_sriov_disable(pdev);
+ if (ret) {
+ pci_err(pdev, "Disable SRIOV fail!\n");
+ return;
+ }
+ }
+ if (qm->fun_type == QM_HW_PF) {
+ hpre_cnt_regs_clear(qm);
+ qm->debug.curr_qm_qp_num = 0;
+ }
+
+ hpre_debugfs_exit(hpre);
+ hisi_qm_stop(qm);
+ if (qm->fun_type == QM_HW_PF)
+ hpre_hw_error_disable(hpre);
+ hisi_qm_uninit(qm);
+}
+
+static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts)
+{
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &hpre->qm.pdev->dev;
+
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
+}
+
+static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS);
+ if (err_sts) {
+ hpre_log_hw_error(hpre, err_sts);
+
+ /* clear error interrupts */
+ writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, hpre_ret;
+
+ /* log qm error */
+ qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
+
+ /* log hpre error */
+ hpre_ret = hpre_hw_error_handle(hpre);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ hpre_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return hpre_process_hw_error(pdev);
+}
+
+static const struct pci_error_handlers hpre_err_handler = {
+ .error_detected = hpre_error_detected,
+};
+
+static struct pci_driver hpre_pci_driver = {
+ .name = hpre_name,
+ .id_table = hpre_dev_ids,
+ .probe = hpre_probe,
+ .remove = hpre_remove,
+ .sriov_configure = hpre_sriov_configure,
+ .err_handler = &hpre_err_handler,
+};
+
+static void hpre_register_debugfs(void)
+{
+ if (!debugfs_initialized())
+ return;
+
+ hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
+ if (IS_ERR_OR_NULL(hpre_debugfs_root))
+ hpre_debugfs_root = NULL;
+}
+
+static void hpre_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hpre_debugfs_root);
+}
+
+static int __init hpre_init(void)
+{
+ int ret;
+
+ hpre_register_debugfs();
+
+ ret = pci_register_driver(&hpre_pci_driver);
+ if (ret) {
+ hpre_unregister_debugfs();
+ pr_err("hpre: can't register hisi hpre driver.\n");
+ }
+
+ return ret;
+}
+
+static void __exit hpre_exit(void)
+{
+ pci_unregister_driver(&hpre_pci_driver);
+ hpre_unregister_debugfs();
+}
+
+module_init(hpre_init);
+module_exit(hpre_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f975c393a603..b57da5ef8b5b 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -59,17 +59,17 @@
#define QM_CQ_PHASE_SHIFT 0
#define QM_CQ_FLAG_SHIFT 1
-#define QM_CQE_PHASE(cqe) ((cqe)->w7 & 0x1)
+#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
#define QM_QC_CQE_SIZE 4
/* eqc shift */
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
-#define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1)
+#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
#define QM_EQE_CQN_MASK GENMASK(15, 0)
-#define QM_AEQE_PHASE(aeqe) (((aeqe)->dw0 >> 16) & 0x1)
+#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
#define QM_DOORBELL_CMD_SQ 0
@@ -169,17 +169,17 @@
#define QM_MK_SQC_DW3_V2(sqe_sz) \
((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define INIT_QC_COMMON(qc, base, pasid) do { \
- (qc)->head = 0; \
- (qc)->tail = 0; \
- (qc)->base_l = lower_32_bits(base); \
- (qc)->base_h = upper_32_bits(base); \
- (qc)->dw3 = 0; \
- (qc)->w8 = 0; \
- (qc)->rsvd0 = 0; \
- (qc)->pasid = pasid; \
- (qc)->w11 = 0; \
- (qc)->rsvd1 = 0; \
+#define INIT_QC_COMMON(qc, base, pasid) do { \
+ (qc)->head = 0; \
+ (qc)->tail = 0; \
+ (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
+ (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
+ (qc)->dw3 = 0; \
+ (qc)->w8 = 0; \
+ (qc)->rsvd0 = 0; \
+ (qc)->pasid = cpu_to_le16(pasid); \
+ (qc)->w11 = 0; \
+ (qc)->rsvd1 = 0; \
} while (0)
enum vft_type {
@@ -331,12 +331,18 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
unsigned long tmp0 = 0, tmp1 = 0;
+ if (!IS_ENABLED(CONFIG_ARM64)) {
+ memcpy_toio(fun_base, src, 16);
+ wmb();
+ return;
+ }
+
asm volatile("ldp %0, %1, %3\n"
"stp %0, %1, %2\n"
"dsb sy\n"
: "=&r" (tmp0),
"=&r" (tmp1),
- "+Q" (*((char *)fun_base))
+ "+Q" (*((char __iomem *)fun_base))
: "Q" (*((char *)src))
: "memory");
}
@@ -350,12 +356,12 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
queue, cmd, (unsigned long long)dma_addr);
- mailbox.w0 = cmd |
+ mailbox.w0 = cpu_to_le16(cmd |
(op ? 0x1 << QM_MB_OP_SHIFT : 0) |
- (0x1 << QM_MB_BUSY_SHIFT);
- mailbox.queue_num = queue;
- mailbox.base_l = lower_32_bits(dma_addr);
- mailbox.base_h = upper_32_bits(dma_addr);
+ (0x1 << QM_MB_BUSY_SHIFT));
+ mailbox.queue_num = cpu_to_le16(queue);
+ mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
+ mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
mailbox.rsvd = 0;
mutex_lock(&qm->mailbox_lock);
@@ -442,7 +448,7 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
- u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK;
+ u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
return qm->qp_array[cqn];
}
@@ -464,7 +470,8 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
if (qp->req_cb) {
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
dma_rmb();
- qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head);
+ qp->req_cb(qp, qp->sqe + qm->sqe_size *
+ le16_to_cpu(cqe->sq_head));
qm_cq_head_update(qp);
cqe = qp->cqe + qp->qp_status.cq_head;
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
@@ -542,7 +549,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_NONE;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = aeqe->dw0 >> QM_AEQE_TYPE_SHIFT;
+ type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
if (type < ARRAY_SIZE(qm_fifo_overflow))
dev_err(&qm->pdev->dev, "%s overflow\n",
qm_fifo_overflow[type]);
@@ -646,7 +653,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
- qp_status->cqc_phase = 1;
+ qp_status->cqc_phase = true;
qp_status->flags = 0;
}
@@ -963,13 +970,11 @@ static const struct file_operations qm_regs_fops = {
static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
{
- struct dentry *qm_d = qm->debug.qm_d, *tmp;
+ struct dentry *qm_d = qm->debug.qm_d;
struct debugfs_file *file = qm->debug.files + index;
- tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
- &qm_debug_fops);
- if (IS_ERR(tmp))
- return -ENOENT;
+ debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
+ &qm_debug_fops);
file->index = index;
mutex_init(&file->lock);
@@ -981,9 +986,6 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi)
{
- dev_info(&qm->pdev->dev,
- "QM v%d does not support hw error handle\n", qm->ver);
-
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
@@ -1123,6 +1125,7 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
}
set_bit(qp_id, qm->qp_bitmap);
qm->qp_array[qp_id] = qp;
+ qm->qp_in_used++;
write_unlock(&qm->qps_lock);
@@ -1187,6 +1190,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
write_lock(&qm->qps_lock);
qm->qp_array[qp->qp_id] = NULL;
clear_bit(qp->qp_id, qm->qp_bitmap);
+ qm->qp_in_used--;
write_unlock(&qm->qps_lock);
kfree(qp);
@@ -1218,14 +1222,14 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
- sqc->dw3 = QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size);
- sqc->w8 = QM_Q_DEPTH - 1;
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
+ sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
} else if (ver == QM_HW_V2) {
- sqc->dw3 = QM_MK_SQC_DW3_V2(qm->sqe_size);
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
sqc->w8 = 0; /* rand_qc */
}
- sqc->cq_num = qp_id;
- sqc->w13 = QM_MK_SQC_W13(0, 1, qp->alg_type);
+ sqc->cq_num = cpu_to_le16(qp_id);
+ sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
@@ -1245,13 +1249,13 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
if (ver == QM_HW_V1) {
- cqc->dw3 = QM_MK_CQC_DW3_V1(0, 0, 0, 4);
- cqc->w8 = QM_Q_DEPTH - 1;
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
+ cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
} else if (ver == QM_HW_V2) {
- cqc->dw3 = QM_MK_CQC_DW3_V2(4);
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
cqc->w8 = 0;
}
- cqc->dw6 = 1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT;
+ cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
@@ -1392,6 +1396,24 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
}
/**
+ * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
+ * @qm: The qm which want to get free qp.
+ *
+ * This function return free number of qp in qm.
+ */
+int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
+{
+ int ret;
+
+ read_lock(&qm->qps_lock);
+ ret = qm->qp_num - qm->qp_in_used;
+ read_unlock(&qm->qps_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
*
@@ -1454,6 +1476,7 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
goto err_free_irq_vectors;
+ qm->qp_in_used = 0;
mutex_init(&qm->mailbox_lock);
rwlock_init(&qm->qps_lock);
@@ -1560,8 +1583,8 @@ static void qm_init_eq_aeq_status(struct hisi_qm *qm)
status->eq_head = 0;
status->aeq_head = 0;
- status->eqc_phase = 1;
- status->aeqc_phase = 1;
+ status->eqc_phase = true;
+ status->aeqc_phase = true;
}
static int qm_eq_ctx_cfg(struct hisi_qm *qm)
@@ -1585,11 +1608,11 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- eqc->base_l = lower_32_bits(qm->eqe_dma);
- eqc->base_h = upper_32_bits(qm->eqe_dma);
+ eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
+ eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
- eqc->dw3 = QM_EQE_AEQE_SIZE;
- eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
+ eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
+ eqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -1606,9 +1629,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- aeqc->base_l = lower_32_bits(qm->aeqe_dma);
- aeqc->base_h = upper_32_bits(qm->aeqe_dma);
- aeqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
+ aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+ aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+ aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
@@ -1780,12 +1803,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop);
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
- struct dentry *qm_d, *qm_regs;
+ struct dentry *qm_d;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
- if (IS_ERR(qm_d))
- return -ENOENT;
qm->debug.qm_d = qm_d;
/* only show this in PF */
@@ -1796,12 +1817,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm,
- &qm_regs_fops);
- if (IS_ERR(qm_regs)) {
- ret = -ENOENT;
- goto failed_to_create;
- }
+ debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
return 0;
@@ -1862,8 +1878,7 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi)
{
if (!qm->ops->hw_error_init) {
- dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error handling!\n",
- qm->ver);
+ dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
@@ -1877,11 +1892,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
*
* Accelerators use this function to handle qm non-fatal hardware errors.
*/
-int hisi_qm_hw_error_handle(struct hisi_qm *qm)
+pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
- dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error report!\n",
- qm->ver);
+ dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
return PCI_ERS_RESULT_NONE;
}
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 70e672ae86bf..078b8f1f1b77 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -75,6 +75,8 @@
#define QM_Q_DEPTH 1024
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+
enum qp_state {
QP_STOP,
};
@@ -132,6 +134,7 @@ struct hisi_qm {
u32 sqe_size;
u32 qp_base;
u32 qp_num;
+ u32 qp_in_used;
u32 ctrl_qp_num;
struct qm_dma qdma;
@@ -204,12 +207,24 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
int hisi_qm_stop_qp(struct hisi_qp *qp);
void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi);
-int hisi_qm_hw_error_handle(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/Makefile b/drivers/crypto/hisilicon/sec2/Makefile
new file mode 100644
index 000000000000..b4f6cf14be3a
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += hisi_sec2.o
+hisi_sec2-objs = sec_main.o sec_crypto.o
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
new file mode 100644
index 000000000000..26754d0570ba
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#ifndef __HISI_SEC_V2_H
+#define __HISI_SEC_V2_H
+
+#include <linux/list.h>
+
+#include "../qm.h"
+#include "sec_crypto.h"
+
+/* Cipher resource per hardware SEC queue */
+struct sec_cipher_res {
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+};
+
+/* Cipher request of SEC private */
+struct sec_cipher_req {
+ struct hisi_acc_hw_sgl *c_in;
+ dma_addr_t c_in_dma;
+ struct hisi_acc_hw_sgl *c_out;
+ dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ struct skcipher_request *sk_req;
+ u32 c_len;
+ bool encrypt;
+};
+
+/* SEC request of Crypto */
+struct sec_req {
+ struct sec_sqe sec_sqe;
+ struct sec_ctx *ctx;
+ struct sec_qp_ctx *qp_ctx;
+
+ /* Cipher supported only at present */
+ struct sec_cipher_req c_req;
+ int err_type;
+ int req_id;
+
+ /* Status of the SEC request */
+ int fake_busy;
+};
+
+/**
+ * struct sec_req_op - Operations for SEC request
+ * @get_res: Get resources for TFM on the SEC device
+ * @resource_alloc: Allocate resources for queue context on the SEC device
+ * @resource_free: Free resources for queue context on the SEC device
+ * @buf_map: DMA map the SGL buffers of the request
+ * @buf_unmap: DMA unmap the SGL buffers of the request
+ * @bd_fill: Fill the SEC queue BD
+ * @bd_send: Send the SEC BD into the hardware queue
+ * @callback: Call back for the request
+ * @process: Main processing logic of Skcipher
+ */
+struct sec_req_op {
+ int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
+ void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
+ int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*process)(struct sec_ctx *ctx, struct sec_req *req);
+};
+
+/* SEC cipher context which cipher's relatives */
+struct sec_cipher_ctx {
+ u8 *c_key;
+ dma_addr_t c_key_dma;
+ sector_t iv_offset;
+ u32 c_gran_size;
+ u32 ivsize;
+ u8 c_mode;
+ u8 c_alg;
+ u8 c_key_len;
+};
+
+/* SEC queue context which defines queue's relatives */
+struct sec_qp_ctx {
+ struct hisi_qp *qp;
+ struct sec_req **req_list;
+ struct idr req_idr;
+ void *alg_meta_data;
+ struct sec_ctx *ctx;
+ struct mutex req_lock;
+ struct hisi_acc_sgl_pool *c_in_pool;
+ struct hisi_acc_sgl_pool *c_out_pool;
+ atomic_t pending_reqs;
+};
+
+/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
+struct sec_ctx {
+ struct sec_qp_ctx *qp_ctx;
+ struct sec_dev *sec;
+ const struct sec_req_op *req_op;
+
+ /* Half queues for encipher, and half for decipher */
+ u32 hlf_q_num;
+
+ /* Threshold for fake busy, trigger to return -EBUSY to user */
+ u32 fake_req_limit;
+
+ /* Currrent cyclic index to select a queue for encipher */
+ atomic_t enc_qcyclic;
+
+ /* Currrent cyclic index to select a queue for decipher */
+ atomic_t dec_qcyclic;
+ struct sec_cipher_ctx c_ctx;
+};
+
+enum sec_endian {
+ SEC_LE = 0,
+ SEC_32BE,
+ SEC_64BE
+};
+
+enum sec_debug_file_index {
+ SEC_CURRENT_QM,
+ SEC_CLEAR_ENABLE,
+ SEC_DEBUG_FILE_NUM,
+};
+
+struct sec_debug_file {
+ enum sec_debug_file_index index;
+ spinlock_t lock;
+ struct hisi_qm *qm;
+};
+
+struct sec_dfx {
+ u64 send_cnt;
+ u64 recv_cnt;
+};
+
+struct sec_debug {
+ struct sec_dfx dfx;
+ struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
+};
+
+struct sec_dev {
+ struct hisi_qm qm;
+ struct list_head list;
+ struct sec_debug debug;
+ u32 ctx_q_num;
+ u32 num_vfs;
+ unsigned long status;
+};
+
+struct sec_dev *sec_find_device(int node);
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
+#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
new file mode 100644
index 000000000000..dc1eb97d57f7
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+
+#include "sec.h"
+#include "sec_crypto.h"
+
+#define SEC_PRIORITY 4001
+#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
+#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
+#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
+
+/* SEC sqe(bd) bit operational relative MACRO */
+#define SEC_DE_OFFSET 1
+#define SEC_CIPHER_OFFSET 4
+#define SEC_SCENE_OFFSET 3
+#define SEC_DST_SGL_OFFSET 2
+#define SEC_SRC_SGL_OFFSET 7
+#define SEC_CKEY_OFFSET 9
+#define SEC_CMODE_OFFSET 12
+#define SEC_FLAG_OFFSET 7
+#define SEC_FLAG_MASK 0x0780
+#define SEC_TYPE_MASK 0x0F
+#define SEC_DONE_MASK 0x0001
+
+#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_SGL_SGE_NR 128
+#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+
+static DEFINE_MUTEX(sec_algs_lock);
+static unsigned int sec_active_devs;
+
+/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
+static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
+ ctx->hlf_q_num;
+
+ return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
+ ctx->hlf_q_num;
+}
+
+static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ atomic_dec(&ctx->enc_qcyclic);
+ else
+ atomic_dec(&ctx->dec_qcyclic);
+}
+
+static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ int req_id;
+
+ mutex_lock(&qp_ctx->req_lock);
+
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
+ 0, QM_Q_DEPTH, GFP_ATOMIC);
+ mutex_unlock(&qp_ctx->req_lock);
+ if (req_id < 0) {
+ dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ return req_id;
+ }
+
+ req->qp_ctx = qp_ctx;
+ qp_ctx->req_list[req_id] = req;
+ return req_id;
+}
+
+static void sec_free_req_id(struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int req_id = req->req_id;
+
+ if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+ return;
+ }
+
+ qp_ctx->req_list[req_id] = NULL;
+ req->qp_ctx = NULL;
+
+ mutex_lock(&qp_ctx->req_lock);
+ idr_remove(&qp_ctx->req_idr, req_id);
+ mutex_unlock(&qp_ctx->req_lock);
+}
+
+static void sec_req_cb(struct hisi_qp *qp, void *resp)
+{
+ struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_sqe *bd = resp;
+ u16 done, flag;
+ u8 type;
+ struct sec_req *req;
+
+ type = bd->type_cipher_auth & SEC_TYPE_MASK;
+ if (type == SEC_BD_TYPE2) {
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (req->err_type || done != 0x1 || flag != 0x2)
+ dev_err(SEC_CTX_DEV(req->ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ } else {
+ pr_err("err bd type [%d]\n", type);
+ return;
+ }
+
+ __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+
+ req->ctx->req_op->buf_unmap(req->ctx, req);
+
+ req->ctx->req_op->callback(req->ctx, req);
+}
+
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+
+ mutex_lock(&qp_ctx->req_lock);
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+ mutex_unlock(&qp_ctx->req_lock);
+ __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+
+ if (ret == -EBUSY)
+ return -ENOBUFS;
+
+ if (!ret) {
+ if (req->fake_busy)
+ ret = -EBUSY;
+ else
+ ret = -EINPROGRESS;
+ }
+
+ return ret;
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+ int qp_ctx_id, int alg_type)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
+ qp = hisi_qm_create_qp(qm, alg_type);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp->req_type = 0;
+ qp->qp_ctx = qp_ctx;
+ qp->req_cb = sec_req_cb;
+ qp_ctx->qp = qp;
+ qp_ctx->ctx = ctx;
+
+ mutex_init(&qp_ctx->req_lock);
+ atomic_set(&qp_ctx->pending_reqs, 0);
+ idr_init(&qp_ctx->req_idr);
+
+ qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
+ if (!qp_ctx->req_list)
+ goto err_destroy_idr;
+
+ qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
+ SEC_SGL_SGE_NR);
+ if (!qp_ctx->c_in_pool) {
+ dev_err(dev, "fail to create sgl pool for input!\n");
+ goto err_free_req_list;
+ }
+
+ qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
+ SEC_SGL_SGE_NR);
+ if (!qp_ctx->c_out_pool) {
+ dev_err(dev, "fail to create sgl pool for output!\n");
+ goto err_free_c_in_pool;
+ }
+
+ ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ if (ret)
+ goto err_free_c_out_pool;
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0)
+ goto err_queue_free;
+
+ return 0;
+
+err_queue_free:
+ ctx->req_op->resource_free(ctx, qp_ctx);
+err_free_c_out_pool:
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
+err_free_c_in_pool:
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+err_free_req_list:
+ kfree(qp_ctx->req_list);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
+ hisi_qm_release_qp(qp);
+
+ return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ hisi_qm_stop_qp(qp_ctx->qp);
+ ctx->req_op->resource_free(ctx, qp_ctx);
+
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+
+ idr_destroy(&qp_ctx->req_idr);
+ kfree(qp_ctx->req_list);
+ hisi_qm_release_qp(qp_ctx->qp);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx;
+ struct sec_dev *sec;
+ struct device *dev;
+ struct hisi_qm *qm;
+ int i, ret;
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+
+ sec = sec_find_device(cpu_to_node(smp_processor_id()));
+ if (!sec) {
+ pr_err("find no Hisilicon SEC device!\n");
+ return -ENODEV;
+ }
+ ctx->sec = sec;
+ qm = &sec->qm;
+ dev = &qm->pdev->dev;
+ ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+
+ /* Half of queue depth is taken as fake requests limit in the queue. */
+ ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ GFP_KERNEL);
+ if (!ctx->qp_ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < sec->ctx_q_num; i++) {
+ ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ if (ret)
+ goto err_sec_release_qp_ctx;
+ }
+
+ c_ctx = &ctx->c_ctx;
+ c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
+ if (c_ctx->ivsize > SEC_IV_SIZE) {
+ dev_err(dev, "get error iv size!\n");
+ ret = -EINVAL;
+ goto err_sec_release_qp_ctx;
+ }
+ c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key) {
+ ret = -ENOMEM;
+ goto err_sec_release_qp_ctx;
+ }
+
+ return 0;
+
+err_sec_release_qp_ctx:
+ for (i = i - 1; i >= 0; i--)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+
+ kfree(ctx->qp_ctx);
+ return ret;
+}
+
+static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int i = 0;
+
+ if (c_ctx->c_key) {
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+ c_ctx->c_key = NULL;
+ }
+
+ for (i = 0; i < ctx->sec->ctx_q_num; i++)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+
+ kfree(ctx->qp_ctx);
+}
+
+static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
+{
+ switch (keylen) {
+ case SEC_DES3_2KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
+ break;
+ case SEC_DES3_3KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
+{
+ if (c_mode == SEC_CMODE_XTS) {
+ switch (keylen) {
+ case SEC_XTS_MIN_KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case SEC_XTS_MAX_KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: xts mode key error!\n");
+ return -EINVAL;
+ }
+ } else {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aes key error!\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ const u32 keylen, const enum sec_calg c_alg,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int ret;
+
+ if (c_mode == SEC_CMODE_XTS) {
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+ return ret;
+ }
+ }
+
+ c_ctx->c_alg = c_alg;
+ c_ctx->c_mode = c_mode;
+
+ switch (c_alg) {
+ case SEC_CALG_3DES:
+ ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
+ break;
+ case SEC_CALG_AES:
+ case SEC_CALG_SM4:
+ ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+ return ret;
+ }
+
+ memcpy(c_ctx->c_key, key, keylen);
+
+ return 0;
+}
+
+#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
+static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
+ u32 keylen) \
+{ \
+ return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
+}
+
+GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
+
+GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
+
+GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
+GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
+
+static int sec_skcipher_get_res(struct sec_ctx *ctx,
+ struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
+ struct sec_cipher_req *c_req = &req->c_req;
+ int req_id = req->req_id;
+
+ c_req->c_ivin = c_res[req_id].c_ivin;
+ c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
+
+ return 0;
+}
+
+static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_res *res;
+ int i;
+
+ res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin) {
+ kfree(res);
+ return -ENOMEM;
+ }
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+ qp_ctx->alg_meta_data = res;
+
+ return 0;
+}
+
+static void sec_skcipher_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_cipher_res *res = qp_ctx->alg_meta_data;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (!res)
+ return;
+
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
+ kfree(res);
+}
+
+static int sec_skcipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &c_req->c_in_dma);
+
+ if (IS_ERR(c_req->c_in)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
+ return PTR_ERR(c_req->c_in);
+ }
+
+ if (dst == src) {
+ c_req->c_out = c_req->c_in;
+ c_req->c_out_dma = c_req->c_in_dma;
+ } else {
+ c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
+ qp_ctx->c_out_pool,
+ req->req_id,
+ &c_req->c_out_dma);
+
+ if (IS_ERR(c_req->c_out)) {
+ dev_err(dev, "fail to dma map output sgl buffers!\n");
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ return PTR_ERR(c_req->c_out);
+ }
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_cipher_req *c_req = &req->c_req;
+
+ return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
+ c_req->sk_req->src, c_req->sk_req->dst);
+}
+
+static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sk_req = c_req->sk_req;
+
+ if (sk_req->dst != sk_req->src)
+ hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+}
+
+static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ int ret;
+
+ ret = ctx->req_op->buf_map(ctx, req);
+ if (ret)
+ return ret;
+
+ ctx->req_op->do_transfer(ctx, req);
+
+ ret = ctx->req_op->bd_fill(ctx, req);
+ if (ret)
+ goto unmap_req_buf;
+
+ return ret;
+
+unmap_req_buf:
+ ctx->req_op->buf_unmap(ctx, req);
+
+ return ret;
+}
+
+static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ ctx->req_op->buf_unmap(ctx, req);
+}
+
+static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+
+ c_req->c_len = sk_req->cryptlen;
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+}
+
+static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ u8 de = 0;
+ u8 scene, sa_type, da_type;
+ u8 bd_type, cipher;
+
+ memset(sec_sqe, 0, sizeof(struct sec_sqe));
+
+ sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
+ SEC_CMODE_OFFSET);
+ sec_sqe->type2.c_alg = c_ctx->c_alg;
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET);
+
+ bd_type = SEC_BD_TYPE2;
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
+ else
+ cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
+ sec_sqe->type_cipher_auth = bd_type | cipher;
+
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
+ if (c_req->c_in_dma != c_req->c_out_dma)
+ de = 0x1 << SEC_DE_OFFSET;
+
+ sec_sqe->sds_sa_type = (de | scene | sa_type);
+
+ /* Just set DST address type */
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ sec_sqe->sdm_addr_type |= da_type;
+
+ sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
+ sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
+
+ return 0;
+}
+
+static void sec_update_iv(struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ u32 iv_size = req->ctx->c_ctx.ivsize;
+ struct scatterlist *sgl;
+ size_t sz;
+
+ if (req->c_req.encrypt)
+ sgl = sk_req->dst;
+ else
+ sgl = sk_req->src;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
+ iv_size, sk_req->cryptlen - iv_size);
+ if (sz != iv_size)
+ dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+}
+
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+
+ /* IV output at encrypto of CBC mode */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req);
+
+ if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+
+ sk_req->base.complete(&sk_req->base, req->err_type);
+}
+
+static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+ sec_put_queue_id(ctx, req);
+}
+
+static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx;
+ int issue_id, ret;
+
+ /* To load balance */
+ issue_id = sec_get_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[issue_id];
+
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (req->req_id < 0) {
+ sec_put_queue_id(ctx, req);
+ return req->req_id;
+ }
+
+ if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+ req->fake_busy = 1;
+ else
+ req->fake_busy = 0;
+
+ ret = ctx->req_op->get_res(ctx, req);
+ if (ret) {
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_request_uninit(ctx, req);
+ dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
+ }
+
+ return ret;
+}
+
+static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
+{
+ int ret;
+
+ ret = sec_request_init(ctx, req);
+ if (ret)
+ return ret;
+
+ ret = sec_request_transfer(ctx, req);
+ if (ret)
+ goto err_uninit_req;
+
+ /* Output IV as decrypto */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ sec_update_iv(req);
+
+ ret = ctx->req_op->bd_send(ctx, req);
+ if (ret != -EBUSY && ret != -EINPROGRESS) {
+ dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ goto err_send_req;
+ }
+
+ return ret;
+
+err_send_req:
+ /* As failing, restore the IV from user */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
+ ctx->c_ctx.ivsize);
+
+ sec_request_untransfer(ctx, req);
+err_uninit_req:
+ sec_request_uninit(ctx, req);
+
+ return ret;
+}
+
+static struct sec_req_op sec_req_ops_tbl = {
+ .get_res = sec_skcipher_get_res,
+ .resource_alloc = sec_skcipher_resource_alloc,
+ .resource_free = sec_skcipher_resource_free,
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->req_op = &sec_req_ops_tbl;
+
+ return sec_skcipher_init(tfm);
+}
+
+static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
+{
+ sec_skcipher_exit(tfm);
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx,
+ struct skcipher_request *sk_req)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (!sk_req->src || !sk_req->dst) {
+ dev_err(dev, "skcipher input param error!\n");
+ return -EINVAL;
+ }
+
+ if (c_alg == SEC_CALG_3DES) {
+ if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ dev_err(dev, "skcipher 3des input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
+ if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ dev_err(dev, "skcipher aes input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ dev_err(dev, "skcipher algorithm error!\n");
+ return -EINVAL;
+}
+
+static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ if (!sk_req->cryptlen)
+ return 0;
+
+ ret = sec_skcipher_param_check(ctx, sk_req);
+ if (ret)
+ return ret;
+
+ req->c_req.sk_req = sk_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
+{
+ return sec_skcipher_crypto(sk_req, true);
+}
+
+static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
+{
+ return sec_skcipher_crypto(sk_req, false);
+}
+
+#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
+ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_skcipher_decrypt,\
+ .encrypt = sec_skcipher_encrypt,\
+ .min_keysize = sec_min_key_size,\
+ .max_keysize = sec_max_key_size,\
+ .ivsize = iv_size,\
+},
+
+#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
+ max_key_size, blk_size, iv_size) \
+ SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
+ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
+
+static struct skcipher_alg sec_algs[] = {
+ SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+};
+
+int sec_register_to_crypto(void)
+{
+ int ret = 0;
+
+ /* To avoid repeat register */
+ mutex_lock(&sec_algs_lock);
+ if (++sec_active_devs == 1)
+ ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ mutex_unlock(&sec_algs_lock);
+
+ return ret;
+}
+
+void sec_unregister_from_crypto(void)
+{
+ mutex_lock(&sec_algs_lock);
+ if (--sec_active_devs == 0)
+ crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ mutex_unlock(&sec_algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
new file mode 100644
index 000000000000..097dce828340
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#ifndef __HISI_SEC_V2_CRYPTO_H
+#define __HISI_SEC_V2_CRYPTO_H
+
+#define SEC_IV_SIZE 24
+#define SEC_MAX_KEY_SIZE 64
+#define SEC_COMM_SCENE 0
+
+enum sec_calg {
+ SEC_CALG_3DES = 0x1,
+ SEC_CALG_AES = 0x2,
+ SEC_CALG_SM4 = 0x3,
+};
+
+enum sec_cmode {
+ SEC_CMODE_ECB = 0x0,
+ SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_XTS = 0x7,
+};
+
+enum sec_ckey_type {
+ SEC_CKEY_128BIT = 0x0,
+ SEC_CKEY_192BIT = 0x1,
+ SEC_CKEY_256BIT = 0x2,
+ SEC_CKEY_3DES_3KEY = 0x1,
+ SEC_CKEY_3DES_2KEY = 0x3,
+};
+
+enum sec_bd_type {
+ SEC_BD_TYPE1 = 0x1,
+ SEC_BD_TYPE2 = 0x2,
+};
+
+enum sec_cipher_dir {
+ SEC_CIPHER_ENC = 0x1,
+ SEC_CIPHER_DEC = 0x2,
+};
+
+enum sec_addr_type {
+ SEC_PBUF = 0x0,
+ SEC_SGL = 0x1,
+ SEC_PRP = 0x2,
+};
+
+struct sec_sqe_type2 {
+
+ /*
+ * mac_len: 0~5 bits
+ * a_key_len: 6~10 bits
+ * a_alg: 11~16 bits
+ */
+ __le32 mac_key_alg;
+
+ /*
+ * c_icv_len: 0~5 bits
+ * c_width: 6~8 bits
+ * c_key_len: 9~11 bits
+ * c_mode: 12~15 bits
+ */
+ __le16 icvw_kmode;
+
+ /* c_alg: 0~3 bits */
+ __u8 c_alg;
+ __u8 rsvd4;
+
+ /*
+ * a_len: 0~23 bits
+ * iv_offset_l: 24~31 bits
+ */
+ __le32 alen_ivllen;
+
+ /*
+ * c_len: 0~23 bits
+ * iv_offset_h: 24~31 bits
+ */
+ __le32 clen_ivhlen;
+
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+ __le16 cs_ip_header_offset;
+ __le16 cs_udp_header_offset;
+ __le16 pass_word_len;
+ __le16 dk_len;
+ __u8 salt3;
+ __u8 salt2;
+ __u8 salt1;
+ __u8 salt0;
+
+ __le16 tag;
+ __le16 rsvd5;
+
+ /*
+ * c_pad_type: 0~3 bits
+ * c_pad_len: 4~11 bits
+ * c_pad_data_type: 12~15 bits
+ */
+ __le16 cph_pad;
+
+ /* c_pad_len_field: 0~1 bits */
+ __le16 c_pad_len_field;
+
+
+ __le64 long_a_data_len;
+ __le64 a_ivin_addr;
+ __le64 a_key_addr;
+ __le64 mac_addr;
+ __le64 c_ivin_addr;
+ __le64 c_key_addr;
+
+ __le64 data_src_addr;
+ __le64 data_dst_addr;
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bits
+ * csc: 4~6 bits
+ * flag: 7-10 bits
+ * dif_check: 11~13 bits
+ */
+ __le16 done_flag;
+
+ __u8 error_type;
+ __u8 warning_type;
+ __u8 mac_i3;
+ __u8 mac_i2;
+ __u8 mac_i1;
+ __u8 mac_i0;
+ __le16 check_sum_i;
+ __u8 tls_pad_len_i;
+ __u8 rsvd12;
+ __le32 counter;
+};
+
+struct sec_sqe {
+ /*
+ * type: 0~3 bits
+ * cipher: 4~5 bits
+ * auth: 6~7 bit s
+ */
+ __u8 type_cipher_auth;
+
+ /*
+ * seq: 0 bit
+ * de: 1~2 bits
+ * scene: 3~6 bits
+ * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
+ */
+ __u8 sds_sa_type;
+
+ /*
+ * src_addr_type: 0~1 bits, not used now,
+ * if support PRP, set this field, or set zero.
+ * dst_addr_type: 2~4 bits
+ * mac_addr_type: 5~7 bits
+ */
+ __u8 sdm_addr_type;
+ __u8 rsvd0;
+
+ /*
+ * nonce_len(type2): 0~3 bits
+ * huk(type2): 4 bit
+ * key_s(type2): 5 bit
+ * ci_gen: 6~7 bits
+ */
+ __u8 huk_key_ci;
+
+ /*
+ * ai_gen: 0~1 bits
+ * a_pad(type2): 2~3 bits
+ * c_s(type2): 4~5 bits
+ */
+ __u8 ai_apd_cs;
+
+ /*
+ * rhf(type2): 0 bit
+ * c_key_type: 1~2 bits
+ * a_key_type: 3~4 bits
+ * write_frame_len(type2): 5~7 bits
+ */
+ __u8 rca_key_frm;
+
+ /*
+ * cal_iv_addr_en(type2): 0 bit
+ * tls_up(type2): 1 bit
+ * inveld: 7 bit
+ */
+ __u8 iv_tls_ld;
+
+ /* Just using type2 BD now */
+ struct sec_sqe_type2 type2;
+};
+
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
+#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
new file mode 100644
index 000000000000..74f0654028c9
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -0,0 +1,1095 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <linux/acpi.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/topology.h>
+
+#include "sec.h"
+
+#define SEC_VF_NUM 63
+#define SEC_QUEUE_NUM_V1 4096
+#define SEC_QUEUE_NUM_V2 1024
+#define SEC_PF_PCI_DEVICE_ID 0xa255
+#define SEC_VF_PCI_DEVICE_ID 0xa256
+
+#define SEC_XTS_MIV_ENABLE_REG 0x301384
+#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
+#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0xfffff7fd
+#define SEC_BD_ERR_CHK_EN2 0xffffbfff
+
+#define SEC_SQE_SIZE 128
+#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
+#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_BASE 0
+#define SEC_CTX_Q_NUM_DEF 24
+
+#define SEC_CTRL_CNT_CLR_CE 0x301120
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define SEC_ENGINE_PF_CFG_OFF 0x300000
+#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CORE_INT_SOURCE 0x301010
+#define SEC_CORE_INT_MASK 0x301000
+#define SEC_CORE_INT_STATUS 0x301008
+#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
+#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
+#define SEC_ECC_ADDR(err) ((err) >> 0)
+#define SEC_CORE_INT_DISABLE 0x0
+#define SEC_CORE_INT_ENABLE 0x1ff
+
+#define SEC_RAS_CE_REG 0x50
+#define SEC_RAS_FE_REG 0x54
+#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_ENB_MSK 0x88
+#define SEC_RAS_FE_ENB_MSK 0x0
+#define SEC_RAS_NFE_ENB_MSK 0x177
+#define SEC_RAS_DISABLE 0x0
+#define SEC_MEM_START_INIT_REG 0x0100
+#define SEC_MEM_INIT_DONE_REG 0x0104
+#define SEC_QM_ABNORMAL_INT_MASK 0x100004
+
+#define SEC_CONTROL_REG 0x0200
+#define SEC_TRNG_EN_SHIFT 8
+#define SEC_CLK_GATE_ENABLE BIT(3)
+#define SEC_CLK_GATE_DISABLE (~BIT(3))
+#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
+#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
+
+#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
+#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_BD_ERR_CHK_EN_REG1 0x0384
+#define SEC_BD_ERR_CHK_EN_REG2 0x038c
+
+#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
+#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
+
+#define SEC_DELAY_10_US 10
+#define SEC_POLL_TIMEOUT_US 1000
+#define SEC_VF_CNT_MASK 0xffffffc0
+#define SEC_DBGFS_VAL_MAX_LEN 20
+
+#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
+ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
+
+struct sec_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const char sec_name[] = "hisi_sec2";
+static struct dentry *sec_debugfs_root;
+static LIST_HEAD(sec_list);
+static DEFINE_MUTEX(sec_list_lock);
+
+static const struct sec_hw_error sec_hw_errors[] = {
+ {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
+ {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
+ {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
+ {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
+ {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
+ {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
+ {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
+ {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
+ {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
+ { /* sentinel */ }
+};
+
+struct sec_dev *sec_find_device(int node)
+{
+#define SEC_NUMA_MAX_DISTANCE 100
+ int min_distance = SEC_NUMA_MAX_DISTANCE;
+ int dev_node = 0, free_qp_num = 0;
+ struct sec_dev *sec, *ret = NULL;
+ struct hisi_qm *qm;
+ struct device *dev;
+
+ mutex_lock(&sec_list_lock);
+ list_for_each_entry(sec, &sec_list, list) {
+ qm = &sec->qm;
+ dev = &qm->pdev->dev;
+#ifdef CONFIG_NUMA
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+#endif
+ if (node_distance(dev_node, node) < min_distance) {
+ free_qp_num = hisi_qm_get_free_qp_num(qm);
+ if (free_qp_num >= sec->ctx_q_num) {
+ ret = sec;
+ min_distance = node_distance(dev_node, node);
+ }
+ }
+ }
+ mutex_unlock(&sec_list_lock);
+
+ return ret;
+}
+
+static const char * const sec_dbg_file_name[] = {
+ [SEC_CURRENT_QM] = "current_qm",
+ [SEC_CLEAR_ENABLE] = "clear_enable",
+};
+
+static struct debugfs_reg32 sec_dfx_regs[] = {
+ {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
+ {"SEC_SAA_EN ", 0x301270},
+ {"SEC_BD_LATENCY_MIN ", 0x301600},
+ {"SEC_BD_LATENCY_MAX ", 0x301608},
+ {"SEC_BD_LATENCY_AVG ", 0x30160C},
+ {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
+ {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
+ {"SEC_BD_NUM_IN_SEC ", 0x301680},
+ {"SEC_ECC_1BIT_CNT ", 0x301C00},
+ {"SEC_ECC_1BIT_INFO ", 0x301C04},
+ {"SEC_ECC_2BIT_CNT ", 0x301C10},
+ {"SEC_ECC_2BIT_INFO ", 0x301C14},
+ {"SEC_BD_SAA0 ", 0x301C20},
+ {"SEC_BD_SAA1 ", 0x301C24},
+ {"SEC_BD_SAA2 ", 0x301C28},
+ {"SEC_BD_SAA3 ", 0x301C2C},
+ {"SEC_BD_SAA4 ", 0x301C30},
+ {"SEC_BD_SAA5 ", 0x301C34},
+ {"SEC_BD_SAA6 ", 0x301C38},
+ {"SEC_BD_SAA7 ", 0x301C3C},
+ {"SEC_BD_SAA8 ", 0x301C40},
+};
+
+static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ SEC_PF_PCI_DEVICE_ID, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
+ pr_info("No device, suppose queue number is %d!\n", q_num);
+ } else {
+ rev_id = pdev->revision;
+
+ switch (rev_id) {
+ case QM_HW_V1:
+ q_num = SEC_QUEUE_NUM_V1;
+ break;
+ case QM_HW_V2:
+ q_num = SEC_QUEUE_NUM_V2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sec_pf_q_num_ops = {
+ .set = sec_pf_q_num_set,
+ .get = param_get_int,
+};
+static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
+
+static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 ctx_q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &ctx_q_num);
+ if (ret)
+ return -EINVAL;
+
+ if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
+ return -EINVAL;
+ }
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sec_ctx_q_num_ops = {
+ .set = sec_ctx_q_num_set,
+ .get = param_get_int,
+};
+static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
+module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
+MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+
+static const struct pci_device_id sec_dev_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sec_dev_ids);
+
+static inline void sec_add_to_list(struct sec_dev *sec)
+{
+ mutex_lock(&sec_list_lock);
+ list_add_tail(&sec->list, &sec_list);
+ mutex_unlock(&sec_list_lock);
+}
+
+static inline void sec_remove_from_list(struct sec_dev *sec)
+{
+ mutex_lock(&sec_list_lock);
+ list_del(&sec->list);
+ mutex_unlock(&sec_list_lock);
+}
+
+static u8 sec_get_endian(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 reg;
+
+ /*
+ * As for VF, it is a wrong way to get endian setting by
+ * reading a register of the engine
+ */
+ if (qm->pdev->is_virtfn) {
+ dev_err_ratelimited(&qm->pdev->dev,
+ "cannot access a register in VF!\n");
+ return SEC_LE;
+ }
+ reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
+ SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
+
+ /* BD little endian mode */
+ if (!(reg & BIT(0)))
+ return SEC_LE;
+
+ /* BD 32-bits big endian mode */
+ else if (!(reg & BIT(1)))
+ return SEC_32BE;
+
+ /* BD 64-bits big endian mode */
+ else
+ return SEC_64BE;
+}
+
+static int sec_engine_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+ u32 reg;
+
+ /* disable clock gate control */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg &= SEC_CLK_GATE_DISABLE;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+
+ ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ reg, reg & 0x1, SEC_DELAY_10_US,
+ SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ return ret;
+ }
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= (0x1 << SEC_TRNG_EN_SHIFT);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ reg |= SEC_USER1_SMMU_NORMAL;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+
+ writel_relaxed(SEC_BD_ERR_CHK_EN1,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ writel_relaxed(SEC_BD_ERR_CHK_EN2,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2));
+
+ /* enable clock gate control */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= SEC_CLK_GATE_ENABLE;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* config endian */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= sec_get_endian(sec);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* Enable sm4 xts mode multiple iv */
+ writel_relaxed(SEC_XTS_MIV_ENABLE_MSK,
+ qm->io_base + SEC_XTS_MIV_ENABLE_REG);
+
+ return 0;
+}
+
+static int sec_set_user_domain_and_cache(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+
+ /* qm user domain */
+ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
+
+ /* qm cache */
+ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
+
+ /* disable FLR triggered by BME(bus master enable) */
+ writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
+
+ /* enable sqc,cqc writeback */
+ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
+ CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
+ FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
+
+ return sec_engine_init(sec);
+}
+
+/* sec_debug_regs_clear() - clear the sec debug regs */
+static void sec_debug_regs_clear(struct hisi_qm *qm)
+{
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear rdclr_en */
+ writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+
+ hisi_qm_debug_regs_clear(qm);
+}
+
+static void sec_hw_error_enable(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 val;
+
+ if (qm->ver == QM_HW_V1) {
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+ dev_info(&qm->pdev->dev, "V1 not support hw error handle\n");
+ return;
+ }
+
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+
+ /* clear SEC hw error source if having */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE);
+
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* enable RAS int */
+ writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* enable SEC block master OOO when m-bit error occur */
+ val = val | SEC_AXI_SHUTDOWN_ENABLE;
+
+ writel(val, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_hw_error_disable(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 val;
+
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+
+ /* disable RAS int */
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* disable SEC block master OOO when m-bit error occur */
+ val = val & SEC_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_hw_error_init(struct sec_dev *sec)
+{
+ if (sec->qm.fun_type == QM_HW_VF)
+ return;
+
+ hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE,
+ QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
+ | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
+ QM_DB_RANDOM_INVALID);
+ sec_hw_error_enable(sec);
+}
+
+static void sec_hw_error_uninit(struct sec_dev *sec)
+{
+ if (sec->qm.fun_type == QM_HW_VF)
+ return;
+
+ sec_hw_error_disable(sec);
+ writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK);
+}
+
+static u32 sec_current_qm_read(struct sec_debug_file *file)
+{
+ struct hisi_qm *qm = file->qm;
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
+{
+ struct hisi_qm *qm = file->qm;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
+ u32 vfq_num;
+ u32 tmp;
+
+ if (val > sec->num_vfs)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (!val) {
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ } else {
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
+
+ if (val == sec->num_vfs)
+ qm->debug.curr_qm_qp_num =
+ qm->ctrl_qp_num - qm->qp_num -
+ (sec->num_vfs - 1) * vfq_num;
+ else
+ qm->debug.curr_qm_qp_num = vfq_num;
+ }
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static u32 sec_clear_enable_read(struct sec_debug_file *file)
+{
+ struct hisi_qm *qm = file->qm;
+
+ return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
+ SEC_CTRL_CNT_CLR_CE_BIT;
+}
+
+static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
+{
+ struct hisi_qm *qm = file->qm;
+ u32 tmp;
+
+ if (val != 1 && val)
+ return -EINVAL;
+
+ tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+
+ return 0;
+}
+
+static ssize_t sec_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct sec_debug_file *file = filp->private_data;
+ char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ u32 val;
+ int ret;
+
+ spin_lock_irq(&file->lock);
+
+ switch (file->index) {
+ case SEC_CURRENT_QM:
+ val = sec_current_qm_read(file);
+ break;
+ case SEC_CLEAR_ENABLE:
+ val = sec_clear_enable_read(file);
+ break;
+ default:
+ spin_unlock_irq(&file->lock);
+ return -EINVAL;
+ }
+
+ spin_unlock_irq(&file->lock);
+ ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
+
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct sec_debug_file *file = filp->private_data;
+ char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ unsigned long val;
+ int len, ret;
+
+ if (*pos != 0)
+ return 0;
+
+ if (count >= SEC_DBGFS_VAL_MAX_LEN)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
+ pos, buf, count);
+ if (len < 0)
+ return len;
+
+ tbuf[len] = '\0';
+ if (kstrtoul(tbuf, 0, &val))
+ return -EFAULT;
+
+ spin_lock_irq(&file->lock);
+
+ switch (file->index) {
+ case SEC_CURRENT_QM:
+ ret = sec_current_qm_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case SEC_CLEAR_ENABLE:
+ ret = sec_clear_enable_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_input;
+ }
+
+ spin_unlock_irq(&file->lock);
+
+ return count;
+
+ err_input:
+ spin_unlock_irq(&file->lock);
+ return ret;
+}
+
+static const struct file_operations sec_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = sec_debug_read,
+ .write = sec_debug_write,
+};
+
+static int sec_core_debug_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct sec_dfx *dfx = &sec->debug.dfx;
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp_d;
+
+ tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOENT;
+
+ regset->regs = sec_dfx_regs;
+ regset->nregs = ARRAY_SIZE(sec_dfx_regs);
+ regset->base = qm->io_base;
+
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
+
+ debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+
+ debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+
+ return 0;
+}
+
+static int sec_debug_init(struct sec_dev *sec)
+{
+ int i;
+
+ for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = &sec->qm;
+
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
+ sec->qm.debug.debug_root,
+ sec->debug.files + i,
+ &sec_dbg_fops);
+ }
+
+ return sec_core_debug_init(sec);
+}
+
+static int sec_debugfs_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
+ ret = hisi_qm_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ ret = sec_debug_init(sec);
+ if (ret)
+ goto failed_to_create;
+ }
+
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(sec_debugfs_root);
+
+ return ret;
+}
+
+static void sec_debugfs_exit(struct sec_dev *sec)
+{
+ debugfs_remove_recursive(sec->qm.debug.debug_root);
+}
+
+static int sec_pf_probe_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
+ break;
+
+ case QM_HW_V2:
+ qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = sec_set_user_domain_and_cache(sec);
+ if (ret)
+ return ret;
+
+ sec_hw_error_init(sec);
+ sec_debug_regs_clear(qm);
+
+ return 0;
+}
+
+static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ enum qm_hw_ver rev_id;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id == QM_HW_UNKNOWN)
+ return -ENODEV;
+
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+
+ qm->sqe_size = SEC_SQE_SIZE;
+ qm->dev_name = sec_name;
+ qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
+ QM_HW_PF : QM_HW_VF;
+ qm->use_dma_api = true;
+
+ return hisi_qm_init(qm);
+}
+
+static void sec_qm_uninit(struct hisi_qm *qm)
+{
+ hisi_qm_uninit(qm);
+}
+
+static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
+{
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = SEC_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+
+ return sec_pf_probe_init(sec);
+ } else if (qm->fun_type == QM_HW_VF) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ * v2 hardware has no such problem.
+ */
+ if (qm->ver == QM_HW_V1) {
+ qm->qp_base = SEC_PF_DEF_Q_NUM;
+ qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
+ } else if (qm->ver == QM_HW_V2) {
+ /* v2 starts to support get vft by mailbox */
+ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void sec_probe_uninit(struct sec_dev *sec)
+{
+ sec_hw_error_uninit(sec);
+}
+
+static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sec_dev *sec;
+ struct hisi_qm *qm;
+ int ret;
+
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, sec);
+
+ sec->ctx_q_num = ctx_q_num;
+
+ qm = &sec->qm;
+
+ ret = sec_qm_init(qm, pdev);
+ if (ret) {
+ pci_err(pdev, "Failed to pre init qm!\n");
+ return ret;
+ }
+
+ ret = sec_probe_init(qm, sec);
+ if (ret) {
+ pci_err(pdev, "Failed to probe!\n");
+ goto err_qm_uninit;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start sec qm!\n");
+ goto err_probe_uninit;
+ }
+
+ ret = sec_debugfs_init(sec);
+ if (ret)
+ pci_warn(pdev, "Failed to init debugfs!\n");
+
+ sec_add_to_list(sec);
+
+ ret = sec_register_to_crypto();
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_remove_from_list;
+ }
+
+ return 0;
+
+err_remove_from_list:
+ sec_remove_from_list(sec);
+ sec_debugfs_exit(sec);
+ hisi_qm_stop(qm);
+
+err_probe_uninit:
+ sec_probe_uninit(sec);
+
+err_qm_uninit:
+ sec_qm_uninit(qm);
+
+ return ret;
+}
+
+/* now we only support equal assignment */
+static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 qp_num = qm->qp_num;
+ u32 q_base = qp_num;
+ u32 q_num, remain_q_num;
+ int i, j, ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qp_num;
+ q_num = remain_q_num / num_vfs;
+
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret) {
+ for (j = i; j > 0; j--)
+ hisi_qm_set_vft(qm, j, 0, 0);
+ return ret;
+ }
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int sec_clear_vft_config(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 num_vfs = sec->num_vfs;
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= num_vfs; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ sec->num_vfs = 0;
+
+ return 0;
+}
+
+static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ int pre_existing_vfs, ret;
+ u32 num_vfs;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+
+ if (pre_existing_vfs) {
+ pci_err(pdev, "Can't enable VF. Please disable at first!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
+
+ ret = sec_vf_q_assign(sec, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ sec->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ sec_clear_vft_config(sec);
+ return ret;
+ }
+
+ return num_vfs;
+}
+
+static int sec_sriov_disable(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in sec_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+
+ return sec_clear_vft_config(sec);
+}
+
+static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs)
+ return sec_sriov_enable(pdev, num_vfs);
+ else
+ return sec_sriov_disable(pdev);
+}
+
+static void sec_remove(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ struct hisi_qm *qm = &sec->qm;
+
+ sec_unregister_from_crypto();
+
+ sec_remove_from_list(sec);
+
+ if (qm->fun_type == QM_HW_PF && sec->num_vfs)
+ (void)sec_sriov_disable(pdev);
+
+ sec_debugfs_exit(sec);
+
+ (void)hisi_qm_stop(qm);
+
+ if (qm->fun_type == QM_HW_PF)
+ sec_debug_regs_clear(qm);
+
+ sec_probe_uninit(sec);
+
+ sec_qm_uninit(qm);
+}
+
+static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts)
+{
+ const struct sec_hw_error *errs = sec_hw_errors;
+ struct device *dev = &sec->qm.pdev->dev;
+ u32 err_val;
+
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
+
+ if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
+ err_val = readl(sec->qm.io_base +
+ SEC_CORE_SRAM_ECC_ERR_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ dev_err(dev, "multi ecc sram addr=0x%x\n",
+ SEC_ECC_ADDR(err_val));
+ }
+ }
+ errs++;
+ }
+}
+
+static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS);
+ if (err_sts) {
+ sec_log_hw_error(sec, err_sts);
+
+ /* clear error interrupts */
+ writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, sec_ret;
+
+ if (!sec) {
+ pci_err(pdev, "Can't recover error during device init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* log qm error */
+ qm_ret = hisi_qm_hw_error_handle(&sec->qm);
+
+ /* log sec error */
+ sec_ret = sec_hw_error_handle(sec);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ sec_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t sec_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return sec_process_hw_error(pdev);
+}
+
+static const struct pci_error_handlers sec_err_handler = {
+ .error_detected = sec_error_detected,
+};
+
+static struct pci_driver sec_pci_driver = {
+ .name = "hisi_sec2",
+ .id_table = sec_dev_ids,
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .err_handler = &sec_err_handler,
+ .sriov_configure = sec_sriov_configure,
+};
+
+static void sec_register_debugfs(void)
+{
+ if (!debugfs_initialized())
+ return;
+
+ sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
+}
+
+static void sec_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(sec_debugfs_root);
+}
+
+static int __init sec_init(void)
+{
+ int ret;
+
+ sec_register_debugfs();
+
+ ret = pci_register_driver(&sec_pci_driver);
+ if (ret < 0) {
+ sec_unregister_debugfs();
+ pr_err("Failed to register pci driver.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit sec_exit(void)
+{
+ pci_unregister_driver(&sec_pci_driver);
+ sec_unregister_debugfs();
+}
+
+module_init(sec_init);
+module_exit(sec_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
+MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index e083d172b618..012023c347b1 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -2,37 +2,13 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include "./sgl.h"
+#include <linux/slab.h>
+#include "qm.h"
#define HISI_ACC_SGL_SGE_NR_MIN 1
-#define HISI_ACC_SGL_SGE_NR_MAX 255
-#define HISI_ACC_SGL_SGE_NR_DEF 10
#define HISI_ACC_SGL_NR_MAX 256
#define HISI_ACC_SGL_ALIGN_SIZE 64
-
-static int acc_sgl_sge_set(const char *val, const struct kernel_param *kp)
-{
- int ret;
- u32 n;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0)
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops acc_sgl_sge_ops = {
- .set = acc_sgl_sge_set,
- .get = param_get_int,
-};
-
-static u32 acc_sgl_sge_nr = HISI_ACC_SGL_SGE_NR_DEF;
-module_param_cb(acc_sgl_sge_nr, &acc_sgl_sge_ops, &acc_sgl_sge_nr, 0444);
-MODULE_PARM_DESC(acc_sgl_sge_nr, "Number of sge in sgl(1-255)");
+#define HISI_ACC_MEM_BLOCK_NR 5
struct acc_hw_sge {
dma_addr_t buf;
@@ -55,37 +31,91 @@ struct hisi_acc_hw_sgl {
struct acc_hw_sge sge_entries[];
} __aligned(1);
+struct hisi_acc_sgl_pool {
+ struct mem_block {
+ struct hisi_acc_hw_sgl *sgl;
+ dma_addr_t sgl_dma;
+ size_t size;
+ } mem_block[HISI_ACC_MEM_BLOCK_NR];
+ u32 sgl_num_per_block;
+ u32 block_num;
+ u32 count;
+ u32 sge_nr;
+ size_t sgl_size;
+};
+
/**
* hisi_acc_create_sgl_pool() - Create a hw sgl pool.
* @dev: The device which hw sgl pool belongs to.
- * @pool: Pointer of pool.
* @count: Count of hisi_acc_hw_sgl in pool.
+ * @sge_nr: The count of sge in hw_sgl
*
* This function creates a hw sgl pool, after this user can get hw sgl memory
* from it.
*/
-int hisi_acc_create_sgl_pool(struct device *dev,
- struct hisi_acc_sgl_pool *pool, u32 count)
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr)
{
- u32 sgl_size;
- u32 size;
+ u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0;
+ struct hisi_acc_sgl_pool *pool;
+ struct mem_block *block;
+ u32 i, j;
- if (!dev || !pool || !count)
- return -EINVAL;
+ if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX)
+ return ERR_PTR(-EINVAL);
- sgl_size = sizeof(struct acc_hw_sge) * acc_sgl_sge_nr +
+ sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl);
- size = sgl_size * count;
+ block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1));
+ sgl_num_per_block = block_size / sgl_size;
+ block_num = count / sgl_num_per_block;
+ remain_sgl = count % sgl_num_per_block;
+
+ if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) ||
+ (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1))
+ return ERR_PTR(-EINVAL);
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ block = pool->mem_block;
+
+ for (i = 0; i < block_num; i++) {
+ block[i].sgl = dma_alloc_coherent(dev, block_size,
+ &block[i].sgl_dma,
+ GFP_KERNEL);
+ if (!block[i].sgl)
+ goto err_free_mem;
+
+ block[i].size = block_size;
+ }
+
+ if (remain_sgl > 0) {
+ block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
+ &block[i].sgl_dma,
+ GFP_KERNEL);
+ if (!block[i].sgl)
+ goto err_free_mem;
- pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL);
- if (!pool->sgl)
- return -ENOMEM;
+ block[i].size = remain_sgl * sgl_size;
+ }
- pool->size = size;
+ pool->sgl_num_per_block = sgl_num_per_block;
+ pool->block_num = remain_sgl ? block_num + 1 : block_num;
pool->count = count;
pool->sgl_size = sgl_size;
+ pool->sge_nr = sge_nr;
- return 0;
+ return pool;
+
+err_free_mem:
+ for (j = 0; j < i; j++) {
+ dma_free_coherent(dev, block_size, block[j].sgl,
+ block[j].sgl_dma);
+ memset(block + j, 0, sizeof(*block));
+ }
+ kfree(pool);
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
@@ -98,38 +128,57 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
*/
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
{
- dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma);
- memset(pool, 0, sizeof(struct hisi_acc_sgl_pool));
+ struct mem_block *block;
+ int i;
+
+ if (!dev || !pool)
+ return;
+
+ block = pool->mem_block;
+
+ for (i = 0; i < pool->block_num; i++)
+ dma_free_coherent(dev, block[i].size, block[i].sgl,
+ block[i].sgl_dma);
+
+ kfree(pool);
}
EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool);
-struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index,
- dma_addr_t *hw_sgl_dma)
+static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma)
{
- if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl)
+ struct mem_block *block;
+ u32 block_index, offset;
+
+ if (!pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
- *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index;
- return (void *)pool->sgl + pool->sgl_size * index;
-}
+ block = pool->mem_block;
+ block_index = index / pool->sgl_num_per_block;
+ offset = index % pool->sgl_num_per_block;
-void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {}
+ *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset;
+ return (void *)block[block_index].sgl + pool->sgl_size * offset;
+}
static void sg_map_to_hw_sg(struct scatterlist *sgl,
struct acc_hw_sge *hw_sge)
{
- hw_sge->buf = sgl->dma_address;
- hw_sge->len = sgl->dma_length;
+ hw_sge->buf = sg_dma_address(sgl);
+ hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
}
static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
- hw_sgl->entry_sum_in_sgl++;
+ u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
+
+ var++;
+ hw_sgl->entry_sum_in_sgl = cpu_to_le16(var);
}
static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
{
- hw_sgl->entry_sum_in_chain = sum;
+ hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
}
/**
@@ -153,10 +202,13 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int sg_n = sg_nents(sgl);
- int i, ret;
+ int i, ret, sg_n;
- if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr)
+ if (!dev || !sgl || !pool || !hw_sgl_dma)
+ return ERR_PTR(-EINVAL);
+
+ sg_n = sg_nents(sgl);
+ if (sg_n > pool->sge_nr)
return ERR_PTR(-EINVAL);
ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
@@ -164,11 +216,12 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
return ERR_PTR(-EINVAL);
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
- if (!curr_hw_sgl) {
- ret = -ENOMEM;
- goto err_unmap_sg;
+ if (IS_ERR(curr_hw_sgl)) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ return ERR_PTR(-ENOMEM);
+
}
- curr_hw_sgl->entry_length_in_sgl = acc_sgl_sge_nr;
+ curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
for_each_sg(sgl, sg, sg_n, i) {
@@ -177,14 +230,10 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sge++;
}
- update_hw_sgl_sum_sge(curr_hw_sgl, acc_sgl_sge_nr);
+ update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr);
*hw_sgl_dma = curr_sgl_dma;
return curr_hw_sgl;
-
-err_unmap_sg:
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
@@ -201,6 +250,9 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
struct hisi_acc_hw_sgl *hw_sgl)
{
+ if (!dev || !sgl || !hw_sgl)
+ return;
+
dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
hw_sgl->entry_sum_in_chain = 0;
diff --git a/drivers/crypto/hisilicon/sgl.h b/drivers/crypto/hisilicon/sgl.h
deleted file mode 100644
index 3ac8871c7acf..000000000000
--- a/drivers/crypto/hisilicon/sgl.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 HiSilicon Limited. */
-#ifndef HISI_ACC_SGL_H
-#define HISI_ACC_SGL_H
-
-struct hisi_acc_sgl_pool {
- struct hisi_acc_hw_sgl *sgl;
- dma_addr_t sgl_dma;
- size_t size;
- u32 count;
- size_t sgl_size;
-};
-
-struct hisi_acc_hw_sgl *
-hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl,
- struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma);
-void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl);
-int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool,
- u32 count);
-void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool);
-#endif
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index ffb00d987d02..79fc4dd3fe00 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -8,7 +8,6 @@
#include <linux/list.h>
#include "../qm.h"
-#include "../sgl.h"
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 59023545a1c4..795428c1d07e 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -22,6 +22,7 @@
#define HZIP_CTX_Q_NUM 2
#define HZIP_GZIP_HEAD_BUF 256
#define HZIP_ALG_PRIORITY 300
+#define HZIP_SGL_SGE_NR 10
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0,
@@ -41,7 +42,7 @@ enum hisi_zip_alg_type {
#define TO_HEAD(req_type) \
(((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \
- ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : 0) \
+ ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \
struct hisi_zip_req {
struct acomp_req *req;
@@ -67,7 +68,7 @@ struct hisi_zip_qp_ctx {
struct hisi_qp *qp;
struct hisi_zip_sqe zip_sqe;
struct hisi_zip_req_q req_q;
- struct hisi_acc_sgl_pool sgl_pool;
+ struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
struct hisi_zip_ctx *ctx;
};
@@ -78,6 +79,30 @@ struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
};
+static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ u16 n;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou16(val, 10, &n);
+ if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sgl_sge_nr_ops = {
+ .set = sgl_sge_nr_set,
+ .get = param_get_int,
+};
+
+static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
+module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
+MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
+
static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
{
u32 val;
@@ -265,14 +290,15 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
{
struct hisi_zip_qp_ctx *tmp;
- int i, ret;
+ struct device *dev;
+ int i;
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
tmp = &ctx->qp_ctx[i];
- ret = hisi_acc_create_sgl_pool(&tmp->qp->qm->pdev->dev,
- &tmp->sgl_pool,
- QM_Q_DEPTH << 1);
- if (ret < 0) {
+ dev = &tmp->qp->qm->pdev->dev;
+ tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+ sgl_sge_nr);
+ if (IS_ERR(tmp->sgl_pool)) {
if (i == 1)
goto err_free_sgl_pool0;
return -ENOMEM;
@@ -283,7 +309,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
err_free_sgl_pool0:
hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev,
- &ctx->qp_ctx[QPC_COMP].sgl_pool);
+ ctx->qp_ctx[QPC_COMP].sgl_pool);
return -ENOMEM;
}
@@ -293,7 +319,7 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
- &ctx->qp_ctx[i].sgl_pool);
+ ctx->qp_ctx[i].sgl_pool);
}
static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
@@ -512,7 +538,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
- struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool;
+ struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
dma_addr_t input;
dma_addr_t output;
int ret;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 1b2ee96c888d..e1bab1a91333 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -79,47 +79,80 @@
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define HZIP_NUMA_DISTANCE 100
#define HZIP_BUF_SIZE 22
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-LIST_HEAD(hisi_zip_list);
-DEFINE_MUTEX(hisi_zip_list_lock);
+static LIST_HEAD(hisi_zip_list);
+static DEFINE_MUTEX(hisi_zip_list_lock);
-#ifdef CONFIG_NUMA
-static struct hisi_zip *find_zip_device_numa(int node)
+struct hisi_zip_resource {
+ struct hisi_zip *hzip;
+ int distance;
+ struct list_head list;
+};
+
+static void free_list(struct list_head *head)
{
- struct hisi_zip *zip = NULL;
- struct hisi_zip *hisi_zip;
- int min_distance = HZIP_NUMA_DISTANCE;
- struct device *dev;
+ struct hisi_zip_resource *res, *tmp;
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = &hisi_zip->qm.pdev->dev;
- if (node_distance(dev->numa_node, node) < min_distance) {
- zip = hisi_zip;
- min_distance = node_distance(dev->numa_node, node);
- }
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
}
-
- return zip;
}
-#endif
struct hisi_zip *find_zip_device(int node)
{
- struct hisi_zip *zip = NULL;
+ struct hisi_zip_resource *res, *tmp;
+ struct hisi_zip *ret = NULL;
+ struct hisi_zip *hisi_zip;
+ struct list_head *n;
+ struct device *dev;
+ LIST_HEAD(head);
mutex_lock(&hisi_zip_list_lock);
-#ifdef CONFIG_NUMA
- zip = find_zip_device_numa(node);
-#else
- zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
-#endif
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ goto err;
+
+ dev = &hisi_zip->qm.pdev->dev;
+ res->hzip = hisi_zip;
+ res->distance = node_distance(dev_to_node(dev), node);
+
+ n = &head;
+ list_for_each_entry(tmp, &head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
+ ret = tmp->hzip;
+ break;
+ }
+ }
+
+ free_list(&head);
+ } else {
+ ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
+ }
+
mutex_unlock(&hisi_zip_list_lock);
- return zip;
+ return ret;
+
+err:
+ free_list(&head);
+ mutex_unlock(&hisi_zip_list_lock);
+ return NULL;
}
struct hisi_zip_hw_error {
@@ -267,6 +300,10 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
static int uacce_mode;
module_param(uacce_mode, int, 0);
+static u32 vfs_num;
+module_param(vfs_num, uint, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
+
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
@@ -335,8 +372,7 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n",
- qm->ver);
+ dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
return;
}
@@ -511,7 +547,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
@@ -521,10 +557,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
else
sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
- if (!tmp_d)
- return -ENOENT;
-
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
return -ENOENT;
@@ -533,9 +565,8 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
}
return 0;
@@ -543,7 +574,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
{
- struct dentry *tmp;
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
@@ -551,11 +581,9 @@ static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
ctrl->files[i].ctrl = ctrl;
ctrl->files[i].index = i;
- tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
- &ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(ctrl_debug_file_name[i], 0600,
+ ctrl->debug_root, ctrl->files + i,
+ &ctrl_debug_fops);
}
return hisi_zip_core_debug_init(ctrl);
@@ -569,8 +597,6 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
int ret;
dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
- if (!dev_d)
- return -ENOENT;
qm->debug.debug_root = dev_d;
ret = hisi_qm_debug_init(qm);
@@ -652,90 +678,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return 0;
}
-static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
- struct hisi_qm *qm;
- int ret;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
- hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
- if (!hisi_zip)
- return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
-
- qm = &hisi_zip->qm;
- qm->pdev = pdev;
- qm->ver = rev_id;
-
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- switch (uacce_mode) {
- case 0:
- qm->use_dma_api = true;
- break;
- case 1:
- qm->use_dma_api = false;
- break;
- case 2:
- qm->use_dma_api = true;
- break;
- default:
- return -EINVAL;
- }
-
- ret = hisi_qm_init(qm);
- if (ret) {
- dev_err(&pdev->dev, "Failed to init qm!\n");
- return ret;
- }
-
- if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
- if (ret)
- return ret;
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- *
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = HZIP_PF_DEF_Q_NUM;
- qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2)
- /* v2 starts to support get vft by mailbox */
- hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- }
-
- ret = hisi_qm_start(qm);
- if (ret)
- goto err_qm_uninit;
-
- ret = hisi_zip_debugfs_init(hisi_zip);
- if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
-
- hisi_zip_add_to_list(hisi_zip);
-
- return 0;
-
-err_qm_uninit:
- hisi_qm_uninit(qm);
- return ret;
-}
-
/* Currently we only support equal assignment */
static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
{
@@ -832,6 +774,100 @@ static int hisi_zip_sriov_disable(struct pci_dev *pdev)
return hisi_zip_clear_vft_config(hisi_zip);
}
+static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_zip *hisi_zip;
+ enum qm_hw_ver rev_id;
+ struct hisi_qm *qm;
+ int ret;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id == QM_HW_UNKNOWN)
+ return -EINVAL;
+
+ hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
+ if (!hisi_zip)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, hisi_zip);
+
+ qm = &hisi_zip->qm;
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
+ QM_HW_VF;
+ switch (uacce_mode) {
+ case 0:
+ qm->use_dma_api = true;
+ break;
+ case 1:
+ qm->use_dma_api = false;
+ break;
+ case 2:
+ qm->use_dma_api = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init qm!\n");
+ return ret;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hisi_zip_pf_probe_init(hisi_zip);
+ if (ret)
+ return ret;
+
+ qm->qp_base = HZIP_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ } else if (qm->fun_type == QM_HW_VF) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ *
+ * v2 hardware has no such problem.
+ */
+ if (qm->ver == QM_HW_V1) {
+ qm->qp_base = HZIP_PF_DEF_Q_NUM;
+ qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
+ } else if (qm->ver == QM_HW_V2)
+ /* v2 starts to support get vft by mailbox */
+ hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ ret = hisi_zip_debugfs_init(hisi_zip);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
+
+ hisi_zip_add_to_list(hisi_zip);
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_zip_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_remove_from_list;
+ }
+
+ return 0;
+
+err_remove_from_list:
+ hisi_zip_remove_from_list(hisi_zip);
+ hisi_zip_debugfs_exit(hisi_zip);
+ hisi_qm_stop(qm);
+err_qm_uninit:
+ hisi_qm_uninit(qm);
+ return ret;
+}
+
static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
@@ -945,7 +981,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
- hisi_zip_sriov_configure : 0,
+ hisi_zip_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
};
@@ -955,8 +991,6 @@ static void hisi_zip_register_debugfs(void)
return;
hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
- if (IS_ERR_OR_NULL(hzip_debugfs_root))
- hzip_debugfs_root = NULL;
}
static void hisi_zip_unregister_debugfs(void)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4ab1bde8dd9b..64894d8b442a 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -75,9 +75,9 @@ static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
}
static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
- int maxbanks, u32 probemask)
+ int maxbanks, u32 probemask, u32 stride)
{
- u32 val, addrhi, addrlo, addrmid;
+ u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
int actbank;
/*
@@ -87,32 +87,37 @@ static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
addrhi = 1 << (16 + maxbanks);
addrlo = 0;
actbank = min(maxbanks - 1, 0);
- while ((addrhi - addrlo) > 32) {
+ while ((addrhi - addrlo) > stride) {
/* write marker to lowest address in top half */
addrmid = (addrhi + addrlo) >> 1;
+ marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
- writel((addrmid | (addrlo << 16)) & probemask,
+ writel(marker,
priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
- /* write marker to lowest address in bottom half */
- eip197_trc_cache_banksel(priv, addrlo, &actbank);
- writel((addrlo | (addrhi << 16)) & probemask,
- priv->base + EIP197_CLASSIFICATION_RAMS +
- (addrlo & 0xffff));
+ /* write invalid markers to possible aliases */
+ delta = 1 << __fls(addrmid);
+ while (delta >= stride) {
+ addralias = addrmid - delta;
+ eip197_trc_cache_banksel(priv, addralias, &actbank);
+ writel(~marker,
+ priv->base + EIP197_CLASSIFICATION_RAMS +
+ (addralias & 0xffff));
+ delta >>= 1;
+ }
/* read back marker from top half */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
- if (val == ((addrmid | (addrlo << 16)) & probemask)) {
+ if ((val & probemask) == marker)
/* read back correct, continue with top half */
addrlo = addrmid;
- } else {
+ else
/* not read back correct, continue with bottom half */
addrhi = addrmid;
- }
}
return addrhi;
}
@@ -150,7 +155,7 @@ static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
htable_offset + i * sizeof(u32));
}
-static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, dsize, asize;
int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
@@ -183,7 +188,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed data RAM size in bytes */
- dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff);
+ dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
/*
* Now probe the administration RAM size pretty much the same way
@@ -196,11 +201,18 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed admin RAM size in admin words */
- asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4;
+ asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
/* Clear any ECC errors detected while probing! */
writel(0, priv->base + EIP197_TRC_ECCCTRL);
+ /* Sanity check probing results */
+ if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
+ dev_err(priv->dev, "Record cache probing failed (%d,%d).",
+ dsize, asize);
+ return -ENODEV;
+ }
+
/*
* Determine optimal configuration from RAM sizes
* Note that we assume that the physical RAM configuration is sane
@@ -221,9 +233,9 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Step #3: Determine log2 of hash table size */
cs_ht_sz = __fls(asize - cs_rc_max) - 2;
/* Step #4: determine current size of hash table in dwords */
- cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */
+ cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
/* Step #5: add back excess words and see if we can fit more records */
- cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4));
+ cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
/* Clear the cache RAMs */
eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
@@ -251,6 +263,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
+ return 0;
}
static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
@@ -298,13 +311,14 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
const struct firmware *fw)
{
- const u32 *data = (const u32 *)fw->data;
+ const __be32 *data = (const __be32 *)fw->data;
int i;
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
writel(be32_to_cpu(data[i]),
- priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
+ priv->base + EIP197_CLASSIFICATION_RAMS +
+ i * sizeof(__be32));
/* Exclude final 2 NOPs from size */
return i - EIP197_FW_TERMINAL_NOPS;
@@ -471,6 +485,14 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
cd_size_rnd) - 1;
}
+ /*
+ * Since we're using command desc's way larger than formally specified,
+ * we need to check whether we can fit even 1 for low-end EIP196's!
+ */
+ if (!cd_fetch_cnt) {
+ dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
+ return -ENODEV;
+ }
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
@@ -479,12 +501,12 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
- (cd_fetch_cnt * priv->config.cd_offset),
+ (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
@@ -527,13 +549,13 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
+ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
priv->config.rd_size,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((rd_fetch_cnt *
(rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
- (rd_fetch_cnt * priv->config.rd_offset),
+ (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
@@ -559,7 +581,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 val;
- int i, ret, pe;
+ int i, ret, pe, opbuflo, opbufhi;
dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
priv->config.pes, priv->config.rings);
@@ -595,8 +617,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(EIP197_DxE_THR_CTRL_RESET_PE,
EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- if (priv->flags & SAFEXCEL_HW_EIP197)
- /* Reset HIA input interface arbiter (EIP197 only) */
+ if (priv->flags & EIP197_PE_ARB)
+ /* Reset HIA input interface arbiter (if present) */
writel(EIP197_HIA_RA_PE_CTRL_RESET,
EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
@@ -639,9 +661,16 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
;
/* DMA transfer size to use */
+ if (priv->hwconfig.hwnumpes > 4) {
+ opbuflo = 9;
+ opbufhi = 10;
+ } else {
+ opbuflo = 7;
+ opbufhi = 8;
+ }
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
- EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
/* FIXME: instability issues can occur for EIP97 but disabling
@@ -655,8 +684,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
/* Configure the procesing engine thresholds */
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
- EIP197_PE_OUT_DBUF_THRES_MAX(8),
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
+ EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
/* Processing Engine configuration */
@@ -696,7 +725,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(0,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
@@ -719,7 +748,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
@@ -736,19 +765,28 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- if (priv->flags & SAFEXCEL_HW_EIP197) {
- eip197_trc_cache_init(priv);
- priv->flags |= EIP197_TRC_CACHE;
+ if (priv->flags & EIP197_SIMPLE_TRC) {
+ writel(EIP197_STRC_CONFIG_INIT |
+ EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
+ EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
+ priv->base + EIP197_STRC_CONFIG);
+ writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
+ EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
+ } else if (priv->flags & SAFEXCEL_HW_EIP197) {
+ ret = eip197_trc_cache_init(priv);
+ if (ret)
+ return ret;
+ }
+ if (priv->flags & EIP197_ICE) {
ret = eip197_load_firmwares(priv);
if (ret)
return ret;
}
- safexcel_hw_setup_cdesc_rings(priv);
- safexcel_hw_setup_rdesc_rings(priv);
-
- return 0;
+ return safexcel_hw_setup_cdesc_rings(priv) ?:
+ safexcel_hw_setup_rdesc_rings(priv) ?:
+ 0;
}
/* Called with ring's lock taken */
@@ -836,20 +874,24 @@ finalize:
spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
- writel((rdesc * priv->config.rd_offset) << 2,
+ writel((rdesc * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
- writel((cdesc * priv->config.cd_offset) << 2,
+ writel((cdesc * priv->config.cd_offset),
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
- struct safexcel_result_desc *rdesc)
+ void *rdp)
{
- if (likely((!rdesc->descriptor_overflow) &&
- (!rdesc->buffer_overflow) &&
- (!rdesc->result_data.error_code)))
+ struct safexcel_result_desc *rdesc = rdp;
+ struct result_data_desc *result_data = rdp + priv->config.res_offset;
+
+ if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
+ ((!rdesc->descriptor_overflow) &&
+ (!rdesc->buffer_overflow) &&
+ (!result_data->error_code))))
return 0;
if (rdesc->descriptor_overflow)
@@ -858,13 +900,14 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
if (rdesc->buffer_overflow)
dev_err(priv->dev, "Buffer overflow detected");
- if (rdesc->result_data.error_code & 0x4066) {
+ if (result_data->error_code & 0x4066) {
/* Fatal error (bits 1,2,5,6 & 14) */
dev_err(priv->dev,
"result descriptor error (%x)",
- rdesc->result_data.error_code);
+ result_data->error_code);
+
return -EIO;
- } else if (rdesc->result_data.error_code &
+ } else if (result_data->error_code &
(BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
/*
* Give priority over authentication fails:
@@ -872,7 +915,7 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
* something wrong with the input!
*/
return -EINVAL;
- } else if (rdesc->result_data.error_code & BIT(9)) {
+ } else if (result_data->error_code & BIT(9)) {
/* Authentication failed */
return -EBADMSG;
}
@@ -1003,7 +1046,7 @@ handle_results:
acknowledge:
if (i)
writel(EIP197_xDR_PROC_xD_PKT(i) |
- EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ (tot_descs * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
/* If the number of requests overflowed the counter, try to proceed more
@@ -1120,6 +1163,8 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
irq_name, irq);
return irq;
}
+ } else {
+ return -ENXIO;
}
ret = devm_request_threaded_irq(dev, irq, handler,
@@ -1169,6 +1214,44 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
+ &safexcel_alg_crc32,
+ &safexcel_alg_cbcmac,
+ &safexcel_alg_xcbcmac,
+ &safexcel_alg_cmac,
+ &safexcel_alg_chacha20,
+ &safexcel_alg_chachapoly,
+ &safexcel_alg_chachapoly_esp,
+ &safexcel_alg_sm3,
+ &safexcel_alg_hmac_sm3,
+ &safexcel_alg_ecb_sm4,
+ &safexcel_alg_cbc_sm4,
+ &safexcel_alg_ofb_sm4,
+ &safexcel_alg_cfb_sm4,
+ &safexcel_alg_ctr_sm4,
+ &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
+ &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
+ &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
+ &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
+ &safexcel_alg_sha3_224,
+ &safexcel_alg_sha3_256,
+ &safexcel_alg_sha3_384,
+ &safexcel_alg_sha3_512,
+ &safexcel_alg_hmac_sha3_224,
+ &safexcel_alg_hmac_sha3_256,
+ &safexcel_alg_hmac_sha3_384,
+ &safexcel_alg_hmac_sha3_512,
+ &safexcel_alg_authenc_hmac_sha1_cbc_des,
+ &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha256_cbc_des,
+ &safexcel_alg_authenc_hmac_sha224_cbc_des,
+ &safexcel_alg_authenc_hmac_sha512_cbc_des,
+ &safexcel_alg_authenc_hmac_sha384_cbc_des,
+ &safexcel_alg_rfc4106_gcm,
+ &safexcel_alg_rfc4543_gcm,
+ &safexcel_alg_rfc4309_ccm,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -1238,30 +1321,28 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
- u32 val, mask = 0;
-
- val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
-
- /* Read number of PEs from the engine */
- if (priv->flags & SAFEXCEL_HW_EIP197)
- /* Wider field width for all EIP197 type engines */
- mask = EIP197_N_PES_MASK;
- else
- /* Narrow field width for EIP97 type engine */
- mask = EIP97_N_PES_MASK;
+ u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
- priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+ priv->config.pes = priv->hwconfig.hwnumpes;
+ priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
+ /* Cannot currently support more rings than we have ring AICs! */
+ priv->config.rings = min_t(u32, priv->config.rings,
+ priv->hwconfig.hwnumraic);
- priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
-
- val = (val & GENMASK(27, 25)) >> 25;
- mask = BIT(val) - 1;
-
- priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
+ priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
- priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
+ /* res token is behind the descr, but ofs must be rounded to buswdth */
+ priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
+ /* now the size of the descr is this 1st part plus the result struct */
+ priv->config.rd_size = priv->config.res_offset +
+ EIP197_RD64_RESULT_SIZE;
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
+
+ /* convert dwords to bytes */
+ priv->config.cd_offset *= sizeof(u32);
+ priv->config.rd_offset *= sizeof(u32);
+ priv->config.res_offset *= sizeof(u32);
}
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
@@ -1307,7 +1388,7 @@ static int safexcel_probe_generic(void *pdev,
int is_pci_dev)
{
struct device *dev = priv->dev;
- u32 peid, version, mask, val, hiaopt;
+ u32 peid, version, mask, val, hiaopt, hwopt, peopt;
int i, ret, hwctg;
priv->context_pool = dmam_pool_create("safexcel-context", dev,
@@ -1369,13 +1450,16 @@ static int safexcel_probe_generic(void *pdev,
*/
version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
if (((priv->flags & SAFEXCEL_HW_EIP197) &&
- (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
+ (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
+ (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
((!(priv->flags & SAFEXCEL_HW_EIP197) &&
(EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
/*
* We did not find the device that matched our initial probing
* (or our initial probing failed) Report appropriate error.
*/
+ dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
+ version);
return -ENODEV;
}
@@ -1383,6 +1467,14 @@ static int safexcel_probe_generic(void *pdev,
hwctg = version >> 28;
peid = version & 255;
+ /* Detect EIP206 processing pipe */
+ version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
+ dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
+
/* Detect EIP96 packet engine and version */
version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
@@ -1391,10 +1483,13 @@ static int safexcel_probe_generic(void *pdev,
}
priv->hwconfig.pever = EIP197_VERSION_MASK(version);
+ hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197 */
+ peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
+
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
EIP197_HWDATAW_MASK;
priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
@@ -1403,6 +1498,19 @@ static int safexcel_probe_generic(void *pdev,
priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
EIP197_RFSIZE_MASK) +
EIP197_RFSIZE_ADJUST;
+ priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
+ EIP197_N_PES_MASK;
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
+ EIP197_N_RINGS_MASK;
+ if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
+ priv->flags |= EIP197_PE_ARB;
+ if (EIP206_OPT_ICE_TYPE(peopt) == 1)
+ priv->flags |= EIP197_ICE;
+ /* If not a full TRC, then assume simple TRC */
+ if (!(hwopt & EIP197_OPT_HAS_TRC))
+ priv->flags |= EIP197_SIMPLE_TRC;
+ /* EIP197 always has SOME form of TRC */
+ priv->flags |= EIP197_TRC_CACHE;
} else {
/* EIP97 */
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
@@ -1411,6 +1519,23 @@ static int safexcel_probe_generic(void *pdev,
EIP97_CFSIZE_MASK;
priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
EIP97_RFSIZE_MASK;
+ priv->hwconfig.hwnumpes = 1; /* by definition */
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
+ EIP197_N_RINGS_MASK;
+ }
+
+ /* Scan for ring AIC's */
+ for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
+ version = readl(EIP197_HIA_AIC_R(priv) +
+ EIP197_HIA_AIC_R_VERSION(i));
+ if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
+ break;
+ }
+ priv->hwconfig.hwnumraic = i;
+ /* Low-end EIP196 may not have any ring AIC's ... */
+ if (!priv->hwconfig.hwnumraic) {
+ dev_err(priv->dev, "No ring interrupt controller present!\n");
+ return -ENODEV;
}
/* Get supported algorithms from EIP96 transform engine */
@@ -1418,10 +1543,12 @@ static int safexcel_probe_generic(void *pdev,
EIP197_PE_EIP96_OPTIONS(0));
/* Print single info line describing what we just detected */
- dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
- peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
- priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
- priv->hwconfig.hwrfsize, priv->hwconfig.pever,
+ dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
+ peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
+ priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
+ priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
+ priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
+ priv->hwconfig.ppver, priv->hwconfig.pever,
priv->hwconfig.algo_flags);
safexcel_configure(priv);
@@ -1545,7 +1672,6 @@ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
}
}
-#if IS_ENABLED(CONFIG_OF)
/* for Device Tree platform driver */
static int safexcel_probe(struct platform_device *pdev)
@@ -1623,6 +1749,7 @@ static int safexcel_remove(struct platform_device *pdev)
safexcel_unregister_algorithms(priv);
safexcel_hw_reset_rings(priv);
+ clk_disable_unprepare(priv->reg_clk);
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
@@ -1664,9 +1791,7 @@ static struct platform_driver crypto_safexcel = {
.of_match_table = safexcel_of_match_table,
},
};
-#endif
-#if IS_ENABLED(CONFIG_PCI)
/* PCIE devices - i.e. Inside Secure development boards */
static int safexcel_pci_probe(struct pci_dev *pdev,
@@ -1757,7 +1882,7 @@ static int safexcel_pci_probe(struct pci_dev *pdev,
return rc;
}
-void safexcel_pci_remove(struct pci_dev *pdev)
+static void safexcel_pci_remove(struct pci_dev *pdev)
{
struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
int i;
@@ -1787,54 +1912,32 @@ static struct pci_driver safexcel_pci_driver = {
.probe = safexcel_pci_probe,
.remove = safexcel_pci_remove,
};
-#endif
-
-/* Unfortunately, we have to resort to global variables here */
-#if IS_ENABLED(CONFIG_PCI)
-int pcireg_rc = -EINVAL; /* Default safe value */
-#endif
-#if IS_ENABLED(CONFIG_OF)
-int ofreg_rc = -EINVAL; /* Default safe value */
-#endif
static int __init safexcel_init(void)
{
-#if IS_ENABLED(CONFIG_PCI)
+ int ret;
+
/* Register PCI driver */
- pcireg_rc = pci_register_driver(&safexcel_pci_driver);
-#endif
+ ret = pci_register_driver(&safexcel_pci_driver);
-#if IS_ENABLED(CONFIG_OF)
/* Register platform driver */
- ofreg_rc = platform_driver_register(&crypto_safexcel);
- #if IS_ENABLED(CONFIG_PCI)
- /* Return success if either PCI or OF registered OK */
- return pcireg_rc ? ofreg_rc : 0;
- #else
- return ofreg_rc;
- #endif
-#else
- #if IS_ENABLED(CONFIG_PCI)
- return pcireg_rc;
- #else
- return -EINVAL;
- #endif
-#endif
+ if (IS_ENABLED(CONFIG_OF) && !ret) {
+ ret = platform_driver_register(&crypto_safexcel);
+ if (ret)
+ pci_unregister_driver(&safexcel_pci_driver);
+ }
+
+ return ret;
}
static void __exit safexcel_exit(void)
{
-#if IS_ENABLED(CONFIG_OF)
/* Unregister platform driver */
- if (!ofreg_rc)
+ if (IS_ENABLED(CONFIG_OF))
platform_driver_unregister(&crypto_safexcel);
-#endif
-#if IS_ENABLED(CONFIG_PCI)
/* Unregister PCI driver if successfully registered before */
- if (!pcireg_rc)
- pci_unregister_driver(&safexcel_pci_driver);
-#endif
+ pci_unregister_driver(&safexcel_pci_driver);
}
module_init(safexcel_init);
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 930cc48a6f85..b4624b5687ce 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -17,8 +17,11 @@
#define EIP197_HIA_VERSION_BE 0xca35
#define EIP197_HIA_VERSION_LE 0x35ca
#define EIP97_VERSION_LE 0x9e61
+#define EIP196_VERSION_LE 0x3bc4
#define EIP197_VERSION_LE 0x3ac5
#define EIP96_VERSION_LE 0x9f60
+#define EIP201_VERSION_LE 0x36c9
+#define EIP206_VERSION_LE 0x31ce
#define EIP197_REG_LO16(reg) (reg & 0xffff)
#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff)
#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff)
@@ -26,12 +29,22 @@
((reg >> 4) & 0xf0) | \
((reg >> 12) & 0xf))
+/* EIP197 HIA OPTIONS ENCODING */
+#define EIP197_HIA_OPT_HAS_PE_ARB BIT(29)
+
+/* EIP206 OPTIONS ENCODING */
+#define EIP206_OPT_ICE_TYPE(n) ((n>>8)&3)
+
+/* EIP197 OPTIONS ENCODING */
+#define EIP197_OPT_HAS_TRC BIT(31)
+
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 18
+#define EIP197_MAX_TOKENS 19
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
+#define EIP197_MAX_RING_AIC 14
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
@@ -138,6 +151,7 @@
#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_VERSION(r) (0xe01c - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
#define EIP197_HIA_AIC_G_ACK 0xf810
@@ -157,12 +171,16 @@
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
+#define EIP197_PE_EIP96_TOKEN_CTRL2(n) (0x102c + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n)))
#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n)))
#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n)))
#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
+#define EIP197_PE_OPTIONS(n) (0x1ff8 + (0x2000 * (n)))
+#define EIP197_PE_VERSION(n) (0x1ffc + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
+#define EIP197_OPTIONS 0xfff8
#define EIP197_VERSION 0xfffc
/* EIP197-specific registers, no indirection */
@@ -178,6 +196,7 @@
#define EIP197_TRC_ECCADMINSTAT 0xf0838
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
+#define EIP197_STRC_CONFIG 0xf43f0
#define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n)))
#define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n)))
#define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n)))
@@ -213,7 +232,6 @@
/* EIP197_HIA_xDR_PROC_COUNT */
#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
-#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -228,6 +246,8 @@
#define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
/* EIP197_HIA_OPTIONS */
+#define EIP197_N_RINGS_OFFSET 0
+#define EIP197_N_RINGS_MASK GENMASK(3, 0)
#define EIP197_N_PES_OFFSET 4
#define EIP197_N_PES_MASK GENMASK(4, 0)
#define EIP97_N_PES_MASK GENMASK(2, 0)
@@ -237,13 +257,13 @@
#define EIP197_CFSIZE_OFFSET 9
#define EIP197_CFSIZE_ADJUST 4
#define EIP97_CFSIZE_OFFSET 8
-#define EIP197_CFSIZE_MASK GENMASK(3, 0)
-#define EIP97_CFSIZE_MASK GENMASK(4, 0)
+#define EIP197_CFSIZE_MASK GENMASK(2, 0)
+#define EIP97_CFSIZE_MASK GENMASK(3, 0)
#define EIP197_RFSIZE_OFFSET 12
#define EIP197_RFSIZE_ADJUST 4
#define EIP97_RFSIZE_OFFSET 12
-#define EIP197_RFSIZE_MASK GENMASK(3, 0)
-#define EIP97_RFSIZE_MASK GENMASK(4, 0)
+#define EIP197_RFSIZE_MASK GENMASK(2, 0)
+#define EIP97_RFSIZE_MASK GENMASK(3, 0)
/* EIP197_HIA_AIC_R_ENABLE_CTRL */
#define EIP197_CDR_IRQ(n) BIT((n) * 2)
@@ -327,13 +347,21 @@
#define EIP197_ADDRESS_MODE BIT(8)
#define EIP197_CONTROL_MODE BIT(9)
+/* EIP197_PE_EIP96_TOKEN_CTRL2 */
+#define EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE BIT(3)
+
+/* EIP197_STRC_CONFIG */
+#define EIP197_STRC_CONFIG_INIT BIT(31)
+#define EIP197_STRC_CONFIG_LARGE_REC(s) (s<<8)
+#define EIP197_STRC_CONFIG_SMALL_REC(s) (s<<0)
+
/* EIP197_FLUE_CONFIG */
#define EIP197_FLUE_CONFIG_MAGIC 0xc7000004
/* Context Control */
struct safexcel_context_record {
- u32 control0;
- u32 control1;
+ __le32 control0;
+ __le32 control1;
__le32 data[40];
} __packed;
@@ -358,10 +386,14 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 (0x8 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SM4 (0xd << 17)
+#define CONTEXT_CONTROL_DIGEST_INITIAL (0x0 << 21)
#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
#define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21)
#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_CRC32 (0x0 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
@@ -371,17 +403,25 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SM3 (0x7 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256 (0xb << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224 (0xc << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512 (0xd << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384 (0xe << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_POLY1305 (0xf << 23)
#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
/* control1 */
#define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0)
+#define CONTEXT_CONTROL_CHACHA20_MODE_256_32 (2 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17))
+#define CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK (12 << 0)
#define CONTEXT_CONTROL_IV0 BIT(5)
#define CONTEXT_CONTROL_IV1 BIT(6)
#define CONTEXT_CONTROL_IV2 BIT(7)
@@ -394,6 +434,13 @@ struct safexcel_context_record {
#define EIP197_XCM_MODE_GCM 1
#define EIP197_XCM_MODE_CCM 2
+#define EIP197_AEAD_TYPE_IPSEC_ESP 2
+#define EIP197_AEAD_TYPE_IPSEC_ESP_GMAC 3
+#define EIP197_AEAD_IPSEC_IV_SIZE 8
+#define EIP197_AEAD_IPSEC_NONCE_SIZE 4
+#define EIP197_AEAD_IPSEC_COUNTER_SIZE 4
+#define EIP197_AEAD_IPSEC_CCM_NONCE_SIZE 3
+
/* The hash counter given to the engine in the context has a granularity of
* 64 bits.
*/
@@ -423,6 +470,8 @@ struct safexcel_context_record {
#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
/* Cache helpers */
+#define EIP197_MIN_DSIZE 1024
+#define EIP197_MIN_ASIZE 8
#define EIP197_CS_TRC_REC_WC 64
#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
#define EIP197_CS_RC_NEXT(x) (x)
@@ -447,7 +496,7 @@ struct result_data_desc {
u16 application_id;
u16 rsvd1;
- u32 rsvd2;
+ u32 rsvd2[5];
} __packed;
@@ -465,16 +514,15 @@ struct safexcel_result_desc {
u32 data_lo;
u32 data_hi;
-
- struct result_data_desc result_data;
} __packed;
/*
* The EIP(1)97 only needs to fetch the descriptor part of
* the result descriptor, not the result token part!
*/
-#define EIP197_RD64_FETCH_SIZE ((sizeof(struct safexcel_result_desc) -\
- sizeof(struct result_data_desc)) /\
+#define EIP197_RD64_FETCH_SIZE (sizeof(struct safexcel_result_desc) /\
+ sizeof(u32))
+#define EIP197_RD64_RESULT_SIZE (sizeof(struct result_data_desc) /\
sizeof(u32))
struct safexcel_token {
@@ -561,6 +609,9 @@ struct safexcel_command_desc {
struct safexcel_control_data_desc control_data;
} __packed;
+#define EIP197_CD64_FETCH_SIZE (sizeof(struct safexcel_command_desc) /\
+ sizeof(u32))
+
/*
* Internal structures & functions
*/
@@ -604,6 +655,7 @@ struct safexcel_config {
u32 rd_size;
u32 rd_offset;
+ u32 res_offset;
};
struct safexcel_work_data {
@@ -654,6 +706,12 @@ enum safexcel_eip_version {
/* Priority we use for advertising our algorithms */
#define SAFEXCEL_CRA_PRIORITY 300
+/* SM3 digest result for zero length message */
+#define EIP197_SM3_ZEROM_HASH "\x1A\xB2\x1D\x83\x55\xCF\xA1\x7F" \
+ "\x8E\x61\x19\x48\x31\xE8\x1A\x8F" \
+ "\x22\xBE\xC8\xC7\x28\xFE\xFB\x74" \
+ "\x7E\xD0\x35\xEB\x50\x82\xAA\x2B"
+
/* EIP algorithm presence flags */
enum safexcel_eip_algorithms {
SAFEXCEL_ALG_BC0 = BIT(5),
@@ -697,16 +755,23 @@ struct safexcel_register_offsets {
enum safexcel_flags {
EIP197_TRC_CACHE = BIT(0),
SAFEXCEL_HW_EIP197 = BIT(1),
+ EIP197_PE_ARB = BIT(2),
+ EIP197_ICE = BIT(3),
+ EIP197_SIMPLE_TRC = BIT(4),
};
struct safexcel_hwconfig {
enum safexcel_eip_algorithms algo_flags;
int hwver;
int hiaver;
+ int ppver;
int pever;
int hwdataw;
int hwcfsize;
int hwrfsize;
+ int hwnumpes;
+ int hwnumrings;
+ int hwnumraic;
};
struct safexcel_crypto_priv {
@@ -778,7 +843,7 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
- struct safexcel_result_desc *rdesc);
+ void *rdp);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
@@ -853,5 +918,43 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_xts_aes;
extern struct safexcel_alg_template safexcel_alg_gcm;
extern struct safexcel_alg_template safexcel_alg_ccm;
+extern struct safexcel_alg_template safexcel_alg_crc32;
+extern struct safexcel_alg_template safexcel_alg_cbcmac;
+extern struct safexcel_alg_template safexcel_alg_xcbcmac;
+extern struct safexcel_alg_template safexcel_alg_cmac;
+extern struct safexcel_alg_template safexcel_alg_chacha20;
+extern struct safexcel_alg_template safexcel_alg_chachapoly;
+extern struct safexcel_alg_template safexcel_alg_chachapoly_esp;
+extern struct safexcel_alg_template safexcel_alg_sm3;
+extern struct safexcel_alg_template safexcel_alg_hmac_sm3;
+extern struct safexcel_alg_template safexcel_alg_ecb_sm4;
+extern struct safexcel_alg_template safexcel_alg_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_ofb_sm4;
+extern struct safexcel_alg_template safexcel_alg_cfb_sm4;
+extern struct safexcel_alg_template safexcel_alg_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_sha3_224;
+extern struct safexcel_alg_template safexcel_alg_sha3_256;
+extern struct safexcel_alg_template safexcel_alg_sha3_384;
+extern struct safexcel_alg_template safexcel_alg_sha3_512;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_rfc4106_gcm;
+extern struct safexcel_alg_template safexcel_alg_rfc4543_gcm;
+extern struct safexcel_alg_template safexcel_alg_rfc4309_ccm;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index ef51f8c2b473..c02995694b41 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -5,18 +5,22 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <asm/unaligned.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
+#include <crypto/chacha.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
+#include <crypto/poly1305.h>
#include <crypto/sha.h>
+#include <crypto/sm3.h>
+#include <crypto/sm4.h>
#include <crypto/xts.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -33,6 +37,8 @@ enum safexcel_cipher_alg {
SAFEXCEL_DES,
SAFEXCEL_3DES,
SAFEXCEL_AES,
+ SAFEXCEL_CHACHA20,
+ SAFEXCEL_SM4,
};
struct safexcel_cipher_ctx {
@@ -41,8 +47,8 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- bool aead;
- int xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
__le32 key[16];
u32 nonce;
@@ -51,10 +57,11 @@ struct safexcel_cipher_ctx {
/* All the below is AEAD specific */
u32 hash_alg;
u32 state_sz;
- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ __be32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ __be32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
struct crypto_cipher *hkaes;
+ struct crypto_aead *fback;
};
struct safexcel_cipher_req {
@@ -70,24 +77,50 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
{
u32 block_sz = 0;
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] = cpu_to_be32(1);
+
+ if (ctx->alg == SAFEXCEL_CHACHA20 ||
+ ctx->xcm == EIP197_XCM_MODE_CCM) {
+ /* 32 bit counter, starting at 0 */
+ cdesc->control_data.token[3] = 0;
+ } else {
+ /* 32 bit counter, start at 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(1);
+ }
return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM) {
+ } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
+ (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 96 bit IV part */
memcpy(&cdesc->control_data.token[0], iv, 12);
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] = cpu_to_be32(1);
+
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, starting at 0 */
+ cdesc->control_data.token[3] = 0;
+ } else {
+ /* 32 bit counter, start at 1 (big endian!) */
+ *(__be32 *)&cdesc->control_data.token[3] =
+ cpu_to_be32(1);
+ }
+
+ return;
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+
+ /* 96 bit nonce part */
+ memcpy(&cdesc->control_data.token[0], &iv[4], 12);
+ /* 32 bit counter */
+ cdesc->control_data.token[3] = *(u32 *)iv;
return;
} else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
@@ -112,10 +145,16 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
block_sz = DES3_EDE_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
+ case SAFEXCEL_SM4:
+ block_sz = SM4_BLOCK_SIZE;
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ break;
case SAFEXCEL_AES:
block_sz = AES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
break;
+ default:
+ break;
}
memcpy(cdesc->control_data.token, iv, block_sz);
}
@@ -153,72 +192,84 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
if (direction == SAFEXCEL_ENCRYPT) {
/* align end of instruction sequence to end of token */
token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 13);
+ EIP197_MAX_TOKENS - 14);
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[12].packet_length = digestsize;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[13].packet_length = digestsize;
+ token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
} else {
cryptlen -= digestsize;
/* align end of instruction sequence to end of token */
token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
+ EIP197_MAX_TOKENS - 15);
- token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[12].packet_length = digestsize;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ token[13].packet_length = digestsize;
+ token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[13].packet_length = digestsize |
+ token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ token[14].packet_length = digestsize |
EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
- if (likely(cryptlen)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[10].packet_length = cryptlen;
- token[10].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[10].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[6].packet_length = assoclen;
+ token[6].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[11].packet_length = cryptlen;
+ token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ /* Do not send to crypt engine in case of GMAC */
+ token[11].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ token[11].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
} else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
}
if (!ctx->xcm)
return;
- token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[8].packet_length = 0;
- token[8].instructions = AES_BLOCK_SIZE;
+ token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ token[9].packet_length = 0;
+ token[9].instructions = AES_BLOCK_SIZE;
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[9].packet_length = AES_BLOCK_SIZE;
- token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[10].packet_length = AES_BLOCK_SIZE;
+ token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
- if (ctx->xcm == EIP197_XCM_MODE_GCM) {
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
- } else {
+ if (ctx->xcm != EIP197_XCM_MODE_GCM) {
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
u8 *cbcmaciv = (u8 *)&token[1];
- u32 *aadlen = (u32 *)&token[5];
+ __le32 *aadlen = (__le32 *)&token[5];
/* Construct IV block B0 for the CBC-MAC */
token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -227,17 +278,18 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
EIP197_TOKEN_INS_TYPE_HASH;
/* Variable length IV part */
- memcpy(cbcmaciv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
/* fixup flags byte */
cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
/* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
/* insert lower 2 bytes of message length */
cbcmaciv[14] = cryptlen >> 8;
cbcmaciv[15] = cryptlen & 255;
if (assoclen) {
- *aadlen = cpu_to_le32(cpu_to_be16(assoclen));
+ *aadlen = cpu_to_le32((assoclen >> 8) |
+ ((assoclen & 0xff) << 8));
assoclen += 2;
}
@@ -252,13 +304,13 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
/* Align crypto data towards hash engine */
- token[10].stat = 0;
+ token[11].stat = 0;
- token[11].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
cryptlen &= 15;
- token[11].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
+ token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[7].instructions = EIP197_TOKEN_INS_LAST |
@@ -284,7 +336,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -309,25 +361,29 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_authenc_keys keys;
struct crypto_aes_ctx aes;
- int err = -EINVAL;
+ int err = -EINVAL, i;
- if (crypto_authenc_extractkeys(&keys, key, len) != 0)
+ if (unlikely(crypto_authenc_extractkeys(&keys, key, len)))
goto badkey;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
- /* Minimum keysize is minimum AES key size + nonce size */
- if (keys.enckeylen < (AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE))
+ /* Must have at least space for the nonce here */
+ if (unlikely(keys.enckeylen < CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* last 4 bytes of key are the nonce! */
ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen -
CTR_RFC3686_NONCE_SIZE);
/* exclude the nonce here */
- keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
}
/* Encryption key */
switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
+ if (unlikely(err))
+ goto badkey_expflags;
+ break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
@@ -338,14 +394,24 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
if (unlikely(err))
goto badkey;
break;
+ case SAFEXCEL_SM4:
+ if (unlikely(keys.enckeylen != SM4_KEY_SIZE))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported cipher algorithm\n");
goto badkey;
}
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
- memcmp(ctx->key, keys.enckey, keys.enckeylen))
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < keys.enckeylen / sizeof(u32); i++) {
+ if (le32_to_cpu(ctx->key[i]) !=
+ ((u32 *)keys.enckey)[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+ }
+ }
/* Auth key */
switch (ctx->hash_alg) {
@@ -374,6 +440,11 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
keys.authkeylen, &istate, &ostate))
goto badkey;
break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SM3:
+ if (safexcel_hmac_setkey("safexcel-sm3", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
@@ -388,7 +459,8 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
ctx->base.needs_inv = true;
/* Now copy the keys into the context */
- memcpy(ctx->key, keys.enckey, keys.enckeylen);
+ for (i = 0; i < keys.enckeylen / sizeof(u32); i++)
+ ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]);
ctx->key_len = keys.enckeylen;
memcpy(ctx->ipad, &istate.state, ctx->state_sz);
@@ -423,6 +495,17 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
CONTEXT_CONTROL_DIGEST_XCM |
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* Chacha20-Poly1305 */
+ cdesc->control_data.control0 =
+ CONTEXT_CONTROL_KEY_EN |
+ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 |
+ (sreq->direction == SAFEXCEL_ENCRYPT ?
+ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT :
+ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN) |
+ ctx->hash_alg |
+ CONTEXT_CONTROL_SIZE(ctrl_size);
+ return 0;
} else {
ctrl_size += ctx->state_sz / sizeof(u32) * 2;
cdesc->control_data.control0 =
@@ -431,17 +514,21 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
}
- if (sreq->direction == SAFEXCEL_ENCRYPT)
- cdesc->control_data.control0 |=
- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
- CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT :
- CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+ if (sreq->direction == SAFEXCEL_ENCRYPT &&
+ (ctx->xcm == EIP197_XCM_MODE_CCM ||
+ ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC))
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT;
+ else if (sreq->direction == SAFEXCEL_ENCRYPT)
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+ else if (ctx->xcm == EIP197_XCM_MODE_CCM)
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN;
else
cdesc->control_data.control0 |=
- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
- CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN :
- CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
} else {
if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 =
@@ -480,6 +567,12 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
ctx->key_len >> ctx->xts);
return -EINVAL;
}
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20;
+ } else if (ctx->alg == SAFEXCEL_SM4) {
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_CRYPTO_ALG_SM4;
}
return 0;
@@ -1295,7 +1388,7 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -1451,13 +1544,11 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma) {
+ if (ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
- }
memcpy(ctx->key, key, len);
-
ctx->key_len = len;
return 0;
@@ -1777,6 +1868,312 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
},
};
+static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha256_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha256_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha224_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha224_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha512_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha384_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha1_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha1_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha256_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha256_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha224_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha224_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha512_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha384_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1972,7 +2369,7 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -1991,8 +2388,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i + keylen / sizeof(u32)] !=
- cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i + keylen / sizeof(u32)]) !=
+ aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2082,7 +2479,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2109,7 +2506,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) {
+ if (be32_to_cpu(ctx->ipad[i]) != hashkey[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2199,7 +2596,7 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2303,3 +2700,938 @@ struct safexcel_alg_template safexcel_alg_ccm = {
},
},
};
+
+static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx,
+ const u8 *key)
+{
+ struct safexcel_crypto_priv *priv = ctx->priv;
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, CHACHA_KEY_SIZE))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, CHACHA_KEY_SIZE);
+ ctx->key_len = CHACHA_KEY_SIZE;
+}
+
+static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+
+ if (len != CHACHA_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ safexcel_chacha20_setkey(ctx, key);
+
+ return 0;
+}
+
+static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_chacha20 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_chacha20_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .base = {
+ .cra_name = "chacha20",
+ .cra_driver_name = "safexcel-chacha20",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_chacha20_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_aead_ctx(ctfm);
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP &&
+ len > EIP197_AEAD_IPSEC_NONCE_SIZE) {
+ /* ESP variant has nonce appended to key */
+ len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
+ ctx->nonce = *(u32 *)(key + len);
+ }
+ if (len != CHACHA_KEY_SIZE) {
+ crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ safexcel_chacha20_setkey(ctx, key);
+
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_crypt(struct aead_request *req,
+ enum safexcel_cipher_direction dir)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_request *subreq = aead_request_ctx(req);
+ u32 key[CHACHA_KEY_SIZE / sizeof(u32) + 1];
+ int ret = 0;
+
+ /*
+ * Instead of wasting time detecting umpteen silly corner cases,
+ * just dump all "small" requests to the fallback implementation.
+ * HW would not be faster on such small requests anyway.
+ */
+ if (likely((ctx->aead != EIP197_AEAD_TYPE_IPSEC_ESP ||
+ req->assoclen >= EIP197_AEAD_IPSEC_IV_SIZE) &&
+ req->cryptlen > POLY1305_DIGEST_SIZE)) {
+ return safexcel_queue_req(&req->base, creq, dir);
+ }
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ memcpy(key, ctx->key, CHACHA_KEY_SIZE);
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* ESP variant has nonce appended to the key */
+ key[CHACHA_KEY_SIZE / sizeof(u32)] = ctx->nonce;
+ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
+ CHACHA_KEY_SIZE +
+ EIP197_AEAD_IPSEC_NONCE_SIZE);
+ } else {
+ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
+ CHACHA_KEY_SIZE);
+ }
+ if (ret) {
+ crypto_aead_clear_flags(aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aead, crypto_aead_get_flags(ctx->fback) &
+ CRYPTO_TFM_REQ_MASK);
+ return ret;
+ }
+
+ aead_request_set_tfm(subreq, ctx->fback);
+ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return (dir == SAFEXCEL_ENCRYPT) ?
+ crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static int safexcel_aead_chachapoly_encrypt(struct aead_request *req)
+{
+ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_chachapoly_decrypt(struct aead_request *req)
+{
+ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_fallback_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *aead = __crypto_aead_cast(tfm);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+
+ /* Allocate fallback implementation */
+ ctx->fback = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fback))
+ return PTR_ERR(ctx->fback);
+
+ crypto_aead_set_reqsize(aead, max(sizeof(struct safexcel_cipher_req),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->fback)));
+
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_fallback_cra_init(tfm);
+ ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
+ CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
+ ctx->state_sz = 0; /* Precomputed by HW */
+ return 0;
+}
+
+static void safexcel_aead_fallback_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->fback);
+ safexcel_aead_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_chachapoly = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
+ .alg.aead = {
+ .setkey = safexcel_aead_chachapoly_setkey,
+ .setauthsize = safexcel_aead_chachapoly_setauthsize,
+ .encrypt = safexcel_aead_chachapoly_encrypt,
+ .decrypt = safexcel_aead_chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "safexcel-chacha20-poly1305",
+ /* +1 to put it above HW chacha + SW poly */
+ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_chachapoly_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_chachapoly_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_chachapoly_esp = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
+ .alg.aead = {
+ .setkey = safexcel_aead_chachapoly_setkey,
+ .setauthsize = safexcel_aead_chachapoly_setauthsize,
+ .encrypt = safexcel_aead_chachapoly_encrypt,
+ .decrypt = safexcel_aead_chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE - EIP197_AEAD_IPSEC_NONCE_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "safexcel-chacha20-poly1305-esp",
+ /* +1 to put it above HW chacha + SW poly */
+ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_chachapolyesp_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+
+ if (len != SM4_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, SM4_KEY_SIZE))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, SM4_KEY_SIZE);
+ ctx->key_len = SM4_KEY_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sm4_blk_encrypt(struct skcipher_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_sm4_blk_decrypt(struct skcipher_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_sm4_blk_encrypt,
+ .decrypt = safexcel_sm4_blk_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "safexcel-ecb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ecb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_sm4_blk_encrypt,
+ .decrypt = safexcel_sm4_blk_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "safexcel-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_cbc_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ofb(sm4)",
+ .cra_driver_name = "safexcel-ofb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ofb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cfb(sm4)",
+ .cra_driver_name = "safexcel-cfb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_cfb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* last 4 bytes of key are the nonce! */
+ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
+ /* exclude the nonce here */
+ len -= CTR_RFC3686_NONCE_SIZE;
+
+ return safexcel_skcipher_sm4_setkey(ctfm, key, len);
+}
+
+static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4ctr_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ /* Add nonce size */
+ .min_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .base = {
+ .cra_name = "rfc3686(ctr(sm4))",
+ .cra_driver_name = "safexcel-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ctr_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4_blk_encrypt(struct aead_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, aead_request_ctx(req),
+ SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_sm4_blk_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, aead_request_ctx(req),
+ SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->state_sz = SHA1_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_sm4_blk_encrypt,
+ .decrypt = safexcel_aead_sm4_blk_decrypt,
+ .ivsize = SM4_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(sm4))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4cbc_sha1_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_fallback_setkey(struct crypto_aead *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Keep fallback cipher synchronized */
+ return crypto_aead_setkey(ctx->fback, (u8 *)key, len) ?:
+ safexcel_aead_setkey(ctfm, key, len);
+}
+
+static int safexcel_aead_fallback_setauthsize(struct crypto_aead *ctfm,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Keep fallback cipher synchronized */
+ return crypto_aead_setauthsize(ctx->fback, authsize);
+}
+
+static int safexcel_aead_fallback_crypt(struct aead_request *req,
+ enum safexcel_cipher_direction dir)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->fback);
+ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return (dir == SAFEXCEL_ENCRYPT) ?
+ crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static int safexcel_aead_sm4cbc_sm3_encrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else if (req->cryptlen || req->assoclen) /* If input length > 0 only */
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ return safexcel_aead_fallback_crypt(req, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sm3_decrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else if (req->cryptlen > crypto_aead_authsize(tfm) || req->assoclen)
+ /* If input length > 0 only */
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ return safexcel_aead_fallback_crypt(req, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_fallback_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ ctx->state_sz = SM3_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
+ .alg.aead = {
+ .setkey = safexcel_aead_fallback_setkey,
+ .setauthsize = safexcel_aead_fallback_setauthsize,
+ .encrypt = safexcel_aead_sm4cbc_sm3_encrypt,
+ .decrypt = safexcel_aead_sm4cbc_sm3_decrypt,
+ .ivsize = SM4_BLOCK_SIZE,
+ .maxauthsize = SM3_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sm3),cbc(sm4))",
+ .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4cbc_sm3_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4ctr_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sm4cbc_sha1_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(sm4)))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4ctr_sha1_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4ctr_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sm4cbc_sm3_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SM3_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sm3),rfc3686(ctr(sm4)))",
+ .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4ctr_sm3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_rfc4106_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* last 4 bytes of key are the nonce! */
+ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
+
+ len -= CTR_RFC3686_NONCE_SIZE;
+ return safexcel_aead_gcm_setkey(ctfm, key, len);
+}
+
+static int safexcel_rfc4106_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int safexcel_rfc4106_encrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ safexcel_aead_encrypt(req);
+}
+
+static int safexcel_rfc4106_decrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ safexcel_aead_decrypt(req);
+}
+
+static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_gcm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4106_gcm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
+ .alg.aead = {
+ .setkey = safexcel_rfc4106_gcm_setkey,
+ .setauthsize = safexcel_rfc4106_gcm_setauthsize,
+ .encrypt = safexcel_rfc4106_encrypt,
+ .decrypt = safexcel_rfc4106_decrypt,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "safexcel-rfc4106-gcm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4106_gcm_cra_init,
+ .cra_exit = safexcel_aead_gcm_cra_exit,
+ },
+ },
+};
+
+static int safexcel_rfc4543_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize != GHASH_DIGEST_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int safexcel_rfc4543_gcm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_gcm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP_GMAC;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4543_gcm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
+ .alg.aead = {
+ .setkey = safexcel_rfc4106_gcm_setkey,
+ .setauthsize = safexcel_rfc4543_gcm_setauthsize,
+ .encrypt = safexcel_rfc4106_encrypt,
+ .decrypt = safexcel_rfc4106_decrypt,
+ .ivsize = GCM_RFC4543_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "safexcel-rfc4543-gcm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4543_gcm_cra_init,
+ .cra_exit = safexcel_aead_gcm_cra_exit,
+ },
+ },
+};
+
+static int safexcel_rfc4309_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* First byte of the nonce = L = always 3 for RFC4309 (4 byte ctr) */
+ *(u8 *)&ctx->nonce = EIP197_AEAD_IPSEC_COUNTER_SIZE - 1;
+ /* last 3 bytes of key are the nonce! */
+ memcpy((u8 *)&ctx->nonce + 1, key + len -
+ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE,
+ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE);
+
+ len -= EIP197_AEAD_IPSEC_CCM_NONCE_SIZE;
+ return safexcel_aead_ccm_setkey(ctfm, key, len);
+}
+
+static int safexcel_rfc4309_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ /* Borrowed from crypto/ccm.c */
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int safexcel_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Borrowed from crypto/ccm.c */
+ if (req->assoclen != 16 && req->assoclen != 20)
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Borrowed from crypto/ccm.c */
+ if (req->assoclen != 16 && req->assoclen != 20)
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_ccm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4309_ccm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
+ .alg.aead = {
+ .setkey = safexcel_rfc4309_ccm_setkey,
+ .setauthsize = safexcel_rfc4309_ccm_setauthsize,
+ .encrypt = safexcel_rfc4309_ccm_encrypt,
+ .decrypt = safexcel_rfc4309_ccm_decrypt,
+ .ivsize = EIP197_AEAD_IPSEC_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "safexcel-rfc4309-ccm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4309_ccm_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2effb6d21e8b..2134daef24f6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -5,9 +5,13 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <crypto/aes.h>
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
+#include <crypto/sha3.h>
+#include <crypto/skcipher.h>
+#include <crypto/sm3.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
@@ -19,9 +23,19 @@ struct safexcel_ahash_ctx {
struct safexcel_crypto_priv *priv;
u32 alg;
-
- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u8 key_sz;
+ bool cbcmac;
+ bool do_fallback;
+ bool fb_init_done;
+ bool fb_do_setkey;
+
+ __le32 ipad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
+ __le32 opad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
+
+ struct crypto_cipher *kaes;
+ struct crypto_ahash *fback;
+ struct crypto_shash *shpre;
+ struct shash_desc *shdesc;
};
struct safexcel_ahash_req {
@@ -31,6 +45,8 @@ struct safexcel_ahash_req {
bool needs_inv;
bool hmac_zlen;
bool len_is_le;
+ bool not_first;
+ bool xcbcmac;
int nents;
dma_addr_t result_dma;
@@ -39,7 +55,9 @@ struct safexcel_ahash_req {
u8 state_sz; /* expected state size, only set once */
u8 block_sz; /* block size, only set once */
- u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ u8 digest_sz; /* output digest size, only set once */
+ __le32 state[SHA3_512_BLOCK_SIZE /
+ sizeof(__le32)] __aligned(sizeof(__le32));
u64 len;
u64 processed;
@@ -57,21 +75,31 @@ static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
}
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
- u32 input_length, u32 result_length)
+ u32 input_length, u32 result_length,
+ bool cbcmac)
{
struct safexcel_token *token =
(struct safexcel_token *)cdesc->control_data.token;
token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
token[0].packet_length = input_length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[1].packet_length = result_length;
- token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ input_length &= 15;
+ if (unlikely(cbcmac && input_length)) {
+ token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[1].packet_length = 16 - input_length;
+ token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ } else {
+ token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ }
+
+ token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ token[2].packet_length = result_length;
+ token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
}
@@ -82,29 +110,48 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_crypto_priv *priv = ctx->priv;
u64 count = 0;
- cdesc->control_data.control0 |= ctx->alg;
+ cdesc->control_data.control0 = ctx->alg;
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
- if (!req->processed) {
- /* First - and possibly only - block of basic hash only */
- if (req->finish) {
+ if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
+ if (req->xcbcmac)
+ memcpy(ctx->base.ctxr->data, ctx->ipad, ctx->key_sz);
+ else
+ memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
+
+ if (!req->finish && req->xcbcmac)
cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_DIGEST_XCM |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_NO_FINISH_HASH |
+ CONTEXT_CONTROL_SIZE(req->state_sz /
+ sizeof(u32));
+ else
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_DIGEST_XCM |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_SIZE(req->state_sz /
+ sizeof(u32));
+ return;
+ } else if (!req->processed) {
+ /* First - and possibly only - block of basic hash only */
+ if (req->finish)
+ cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
- } else {
- cdesc->control_data.control0 |=
+ else
+ cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
CONTEXT_CONTROL_NO_FINISH_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
- }
return;
}
@@ -204,7 +251,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
}
if (sreq->result_dma) {
- dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+ dma_unmap_single(priv->dev, sreq->result_dma, sreq->digest_sz,
DMA_FROM_DEVICE);
sreq->result_dma = 0;
}
@@ -223,7 +270,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
memcpy(sreq->cache, sreq->state,
crypto_ahash_digestsize(ahash));
- memcpy(sreq->state, ctx->opad, sreq->state_sz);
+ memcpy(sreq->state, ctx->opad, sreq->digest_sz);
sreq->len = sreq->block_sz +
crypto_ahash_digestsize(ahash);
@@ -238,8 +285,14 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
return 1;
}
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
+ if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
+ /* Undo final XOR with 0xffffffff ...*/
+ *(__le32 *)areq->result = ~sreq->state[0];
+ } else {
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
+ }
}
cache_len = safexcel_queued_len(sreq);
@@ -261,10 +314,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, extra = 0, n_cdesc = 0, ret = 0;
- u64 queued, len, cache_len;
+ int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
+ u64 queued, len;
- queued = len = safexcel_queued_len(req);
+ queued = safexcel_queued_len(req);
if (queued <= HASH_CACHE_SIZE)
cache_len = queued;
else
@@ -287,15 +340,52 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
areq->nbytes - extra);
queued -= extra;
- len -= extra;
if (!queued) {
*commands = 0;
*results = 0;
return 0;
}
+
+ extra = 0;
}
+ if (unlikely(req->xcbcmac && req->processed > AES_BLOCK_SIZE)) {
+ if (unlikely(cache_len < AES_BLOCK_SIZE)) {
+ /*
+ * Cache contains less than 1 full block, complete.
+ */
+ extra = AES_BLOCK_SIZE - cache_len;
+ if (queued > cache_len) {
+ /* More data follows: borrow bytes */
+ u64 tmp = queued - cache_len;
+
+ skip = min_t(u64, tmp, extra);
+ sg_pcopy_to_buffer(areq->src,
+ sg_nents(areq->src),
+ req->cache + cache_len,
+ skip, 0);
+ }
+ extra -= skip;
+ memset(req->cache + cache_len + skip, 0, extra);
+ if (!ctx->cbcmac && extra) {
+ // 10- padding for XCBCMAC & CMAC
+ req->cache[cache_len + skip] = 0x80;
+ // HW will use K2 iso K3 - compensate!
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
+ ((__be32 *)req->cache)[i] ^=
+ cpu_to_be32(le32_to_cpu(
+ ctx->ipad[i] ^ ctx->ipad[i + 4]));
+ }
+ cache_len = AES_BLOCK_SIZE;
+ queued = queued + extra;
+ }
+
+ /* XCBC continue: XOR previous result into 1st word */
+ crypto_xor(req->cache, (const u8 *)req->state, AES_BLOCK_SIZE);
+ }
+
+ len = queued;
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
@@ -306,8 +396,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
- req->cache_dma, cache_len, len,
- ctx->base.ctxr_dma);
+ req->cache_dma, cache_len,
+ len, ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -319,10 +409,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
goto send_command;
}
- /* Skip descriptor generation for zero-length requests */
- if (!areq->nbytes)
- goto send_command;
-
/* Now handle the current ahash request buffer(s) */
req->nents = dma_map_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src,
@@ -336,26 +422,34 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
+ if (unlikely(sglen <= skip)) {
+ skip -= sglen;
+ continue;
+ }
+
/* Do not overflow the request */
- if (queued < sglen)
+ if ((queued + skip) <= sglen)
sglen = queued;
+ else
+ sglen -= skip;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
- sg_dma_address(sg),
- sglen, len, ctx->base.ctxr_dma);
+ sg_dma_address(sg) + skip, sglen,
+ len, ctx->base.ctxr_dma);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
}
- n_cdesc++;
- if (n_cdesc == 1)
+ if (!n_cdesc)
first_cdesc = cdesc;
+ n_cdesc++;
queued -= sglen;
if (!queued)
break;
+ skip = 0;
}
send_command:
@@ -363,9 +457,9 @@ send_command:
safexcel_context_control(ctx, req, first_cdesc);
/* Add the token */
- safexcel_hash_token(first_cdesc, len, req->state_sz);
+ safexcel_hash_token(first_cdesc, len, req->digest_sz, ctx->cbcmac);
- req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+ req->result_dma = dma_map_single(priv->dev, req->state, req->digest_sz,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, req->result_dma)) {
ret = -EINVAL;
@@ -374,7 +468,7 @@ send_command:
/* Add a result descriptor */
rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
- req->state_sz);
+ req->digest_sz);
if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc);
goto unmap_result;
@@ -382,17 +476,20 @@ send_command:
safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
- req->processed += len;
+ req->processed += len - extra;
*commands = n_cdesc;
*results = 1;
return 0;
unmap_result:
- dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+ dma_unmap_single(priv->dev, req->result_dma, req->digest_sz,
DMA_FROM_DEVICE);
unmap_sg:
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ if (req->nents) {
+ dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ req->nents = 0;
+ }
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
@@ -590,16 +687,12 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (ctx->base.ctxr) {
if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
- req->processed &&
- (/* invalidate for basic hash continuation finish */
- (req->finish &&
- (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
+ /* invalidate for *any* non-XCBC continuation */
+ ((req->not_first && !req->xcbcmac) ||
/* invalidate if (i)digest changed */
memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
- /* invalidate for HMAC continuation finish */
- (req->finish && (req->processed != req->block_sz)) ||
/* invalidate for HMAC finish with odigest changed */
- (req->finish &&
+ (req->finish && req->hmac &&
memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
ctx->opad, req->state_sz))))
/*
@@ -622,6 +715,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (!ctx->base.ctxr)
return -ENOMEM;
}
+ req->not_first = true;
ring = ctx->base.ring;
@@ -691,8 +785,34 @@ static int safexcel_ahash_final(struct ahash_request *areq)
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
memcpy(areq->result, sha512_zero_message_hash,
SHA512_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SM3) {
+ memcpy(areq->result,
+ EIP197_SM3_ZEROM_HASH, SM3_DIGEST_SIZE);
+ }
return 0;
+ } else if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM &&
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
+ req->len == sizeof(u32) && !areq->nbytes)) {
+ /* Zero length CRC32 */
+ memcpy(areq->result, ctx->ipad, sizeof(u32));
+ return 0;
+ } else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
+ !areq->nbytes)) {
+ /* Zero length CBC MAC */
+ memset(areq->result, 0, AES_BLOCK_SIZE);
+ return 0;
+ } else if (unlikely(req->xcbcmac && req->len == AES_BLOCK_SIZE &&
+ !areq->nbytes)) {
+ /* Zero length (X)CBC/CMAC */
+ int i;
+
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
+ ((__be32 *)areq->result)[i] =
+ cpu_to_be32(le32_to_cpu(ctx->ipad[i + 4]));//K3
+ areq->result[0] ^= 0x80; // 10- padding
+ crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+ return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
!areq->nbytes)) {
@@ -792,6 +912,7 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->base.send = safexcel_ahash_send;
ctx->base.handle_result = safexcel_handle_result;
+ ctx->fb_do_setkey = false;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
@@ -808,6 +929,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
+ req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
return 0;
@@ -889,6 +1011,7 @@ static int safexcel_hmac_sha1_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
+ req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
req->hmac = true;
@@ -1125,6 +1248,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
@@ -1180,6 +1304,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
@@ -1248,6 +1373,7 @@ static int safexcel_hmac_sha224_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
@@ -1318,6 +1444,7 @@ static int safexcel_hmac_sha256_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
@@ -1375,6 +1502,7 @@ static int safexcel_sha512_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
@@ -1430,6 +1558,7 @@ static int safexcel_sha384_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
@@ -1498,6 +1627,7 @@ static int safexcel_hmac_sha512_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
@@ -1568,6 +1698,7 @@ static int safexcel_hmac_sha384_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
@@ -1625,6 +1756,7 @@ static int safexcel_md5_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
+ req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
return 0;
@@ -1686,6 +1818,7 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
+ req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
req->len_is_le = true; /* MD5 is little endian! ... */
req->hmac = true;
@@ -1740,3 +1873,1247 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
},
},
};
+
+static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret = safexcel_ahash_cra_init(tfm);
+
+ /* Default 'key' is all zeroes */
+ memset(ctx->ipad, 0, sizeof(u32));
+ return ret;
+}
+
+static int safexcel_crc32_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from loaded key */
+ req->state[0] = (__force __le32)le32_to_cpu(~ctx->ipad[0]);
+ /* Set processed to non-zero to enable invalidation detection */
+ req->len = sizeof(u32);
+ req->processed = sizeof(u32);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
+ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
+ req->state_sz = sizeof(u32);
+ req->digest_sz = sizeof(u32);
+ req->block_sz = sizeof(u32);
+
+ return 0;
+}
+
+static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+
+ if (keylen != sizeof(u32)) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->ipad, key, sizeof(u32));
+ return 0;
+}
+
+static int safexcel_crc32_digest(struct ahash_request *areq)
+{
+ return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_crc32 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_crc32_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_crc32_digest,
+ .setkey = safexcel_crc32_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = sizeof(u32),
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "safexcel-crc32",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_crc32_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_cbcmac_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from loaded keys */
+ memcpy(req->state, ctx->ipad, ctx->key_sz);
+ /* Set processed to non-zero to enable invalidation detection */
+ req->len = AES_BLOCK_SIZE;
+ req->processed = AES_BLOCK_SIZE;
+
+ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
+ req->state_sz = ctx->key_sz;
+ req->digest_sz = AES_BLOCK_SIZE;
+ req->block_sz = AES_BLOCK_SIZE;
+ req->xcbcmac = true;
+
+ return 0;
+}
+
+static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
+ for (i = 0; i < len / sizeof(u32); i++)
+ ctx->ipad[i + 8] = (__force __le32)cpu_to_be32(aes.key_enc[i]);
+
+ if (len == AES_KEYSIZE_192) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else if (len == AES_KEYSIZE_256) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ }
+ ctx->cbcmac = true;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+static int safexcel_cbcmac_digest(struct ahash_request *areq)
+{
+ return safexcel_cbcmac_init(areq) ?: safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_cbcmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_cbcmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "cbcmac(aes)",
+ .cra_driver_name = "safexcel-cbcmac-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ /* precompute the XCBC key material */
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes, key, len);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
+ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+ for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
+ ctx->ipad[i] =
+ cpu_to_le32((__force u32)cpu_to_be32(key_tmp[i]));
+
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes,
+ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ AES_MIN_KEY_SIZE);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ ctx->cbcmac = false;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_ahash_cra_init(tfm);
+ ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
+ return PTR_ERR_OR_ZERO(ctx->kaes);
+}
+
+static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(ctx->kaes);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_xcbcmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_xcbcmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "xcbc(aes)",
+ .cra_driver_name = "safexcel-xcbc-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_xcbcmac_cra_init,
+ .cra_exit = safexcel_xcbcmac_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ __be64 consts[4];
+ u64 _const[2];
+ u8 msb_mask, gfmask;
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ for (i = 0; i < len / sizeof(u32); i++)
+ ctx->ipad[i + 8] =
+ cpu_to_le32((__force u32)cpu_to_be32(aes.key_enc[i]));
+
+ /* precompute the CMAC key material */
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes, key, len);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ /* code below borrowed from crypto/cmac.c */
+ /* encrypt the zero block */
+ memset(consts, 0, AES_BLOCK_SIZE);
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+
+ gfmask = 0x87;
+ _const[0] = be64_to_cpu(consts[1]);
+ _const[1] = be64_to_cpu(consts[0]);
+
+ /* gf(2^128) multiply zero-ciphertext with u and u^2 */
+ for (i = 0; i < 4; i += 2) {
+ msb_mask = ((s64)_const[1] >> 63) & gfmask;
+ _const[1] = (_const[1] << 1) | (_const[0] >> 63);
+ _const[0] = (_const[0] << 1) ^ msb_mask;
+
+ consts[i + 0] = cpu_to_be64(_const[1]);
+ consts[i + 1] = cpu_to_be64(_const[0]);
+ }
+ /* end of code borrowed from crypto/cmac.c */
+
+ for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
+ ctx->ipad[i] = (__force __le32)cpu_to_be32(((u32 *)consts)[i]);
+
+ if (len == AES_KEYSIZE_192) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else if (len == AES_KEYSIZE_256) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ }
+ ctx->cbcmac = false;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_cmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "safexcel-cmac-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_xcbcmac_cra_init,
+ .cra_exit = safexcel_xcbcmac_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sm3_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SM3_DIGEST_SIZE;
+ req->digest_sz = SM3_DIGEST_SIZE;
+ req->block_sz = SM3_BLOCK_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sm3_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sm3_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sm3 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SM3,
+ .alg.ahash = {
+ .init = safexcel_sm3_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sm3_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "safexcel-sm3",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sm3",
+ SM3_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sm3_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from ipad precompute */
+ memcpy(req->state, ctx->ipad, SM3_DIGEST_SIZE);
+ /* Already processed the key^ipad part now! */
+ req->len = SM3_BLOCK_SIZE;
+ req->processed = SM3_BLOCK_SIZE;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SM3_DIGEST_SIZE;
+ req->digest_sz = SM3_DIGEST_SIZE;
+ req->block_sz = SM3_BLOCK_SIZE;
+ req->hmac = true;
+
+ return 0;
+}
+
+static int safexcel_hmac_sm3_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sm3_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SM3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sm3_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sm3_digest,
+ .setkey = safexcel_hmac_sm3_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "safexcel-hmac-sm3",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_224_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_224_DIGEST_SIZE;
+ req->digest_sz = SHA3_224_DIGEST_SIZE;
+ req->block_sz = SHA3_224_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_fbcheck(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+ int ret = 0;
+
+ if (ctx->do_fallback) {
+ ahash_request_set_tfm(subreq, ctx->fback);
+ ahash_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(subreq, req->src, req->result,
+ req->nbytes);
+ if (!ctx->fb_init_done) {
+ if (ctx->fb_do_setkey) {
+ /* Set fallback cipher HMAC key */
+ u8 key[SHA3_224_BLOCK_SIZE];
+
+ memcpy(key, ctx->ipad,
+ crypto_ahash_blocksize(ctx->fback) / 2);
+ memcpy(key +
+ crypto_ahash_blocksize(ctx->fback) / 2,
+ ctx->opad,
+ crypto_ahash_blocksize(ctx->fback) / 2);
+ ret = crypto_ahash_setkey(ctx->fback, key,
+ crypto_ahash_blocksize(ctx->fback));
+ memzero_explicit(key,
+ crypto_ahash_blocksize(ctx->fback));
+ ctx->fb_do_setkey = false;
+ }
+ ret = ret ?: crypto_ahash_init(subreq);
+ ctx->fb_init_done = true;
+ }
+ }
+ return ret;
+}
+
+static int safexcel_sha3_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq);
+}
+
+static int safexcel_sha3_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq);
+}
+
+static int safexcel_sha3_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback |= !req->nbytes;
+ if (ctx->do_fallback)
+ /* Update or ex/import happened or len 0, cannot use the HW */
+ return safexcel_sha3_fbcheck(req) ?:
+ crypto_ahash_finup(subreq);
+ else
+ return safexcel_ahash_finup(req);
+}
+
+static int safexcel_sha3_digest_fallback(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ ctx->fb_init_done = false;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_finup(subreq);
+}
+
+static int safexcel_sha3_224_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_224_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_sha3_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out);
+}
+
+static int safexcel_sha3_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in);
+ // return safexcel_ahash_import(req, in);
+}
+
+static int safexcel_sha3_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_ahash_cra_init(tfm);
+
+ /* Allocate fallback implementation */
+ ctx->fback = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fback))
+ return PTR_ERR(ctx->fback);
+
+ /* Update statesize from fallback algorithm! */
+ crypto_hash_alg_common(ahash)->statesize =
+ crypto_ahash_statesize(ctx->fback);
+ crypto_ahash_set_reqsize(ahash, max(sizeof(struct safexcel_ahash_req),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ctx->fback)));
+ return 0;
+}
+
+static void safexcel_sha3_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->fback);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_224_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_224_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "safexcel-sha3-224",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_256_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_256_DIGEST_SIZE;
+ req->digest_sz = SHA3_256_DIGEST_SIZE;
+ req->block_sz = SHA3_256_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_256_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_256_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_256_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_256_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "safexcel-sha3-256",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_384_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_384_DIGEST_SIZE;
+ req->digest_sz = SHA3_384_DIGEST_SIZE;
+ req->block_sz = SHA3_384_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_384_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_384_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_384_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_384_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "safexcel-sha3-384",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_512_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_512_DIGEST_SIZE;
+ req->digest_sz = SHA3_512_DIGEST_SIZE;
+ req->block_sz = SHA3_512_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_512_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_512_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_512_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_512_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "safexcel-sha3-512",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_cra_init(struct crypto_tfm *tfm, const char *alg)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_sha3_cra_init(tfm);
+ if (ret)
+ return ret;
+
+ /* Allocate precalc basic digest implementation */
+ ctx->shpre = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shpre))
+ return PTR_ERR(ctx->shpre);
+
+ ctx->shdesc = kmalloc(sizeof(*ctx->shdesc) +
+ crypto_shash_descsize(ctx->shpre), GFP_KERNEL);
+ if (!ctx->shdesc) {
+ crypto_free_shash(ctx->shpre);
+ return -ENOMEM;
+ }
+ ctx->shdesc->tfm = ctx->shpre;
+ return 0;
+}
+
+static void safexcel_hmac_sha3_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->fback);
+ crypto_free_shash(ctx->shpre);
+ kfree(ctx->shdesc);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret = 0;
+
+ if (keylen > crypto_ahash_blocksize(tfm)) {
+ /*
+ * If the key is larger than the blocksize, then hash it
+ * first using our fallback cipher
+ */
+ ret = crypto_shash_digest(ctx->shdesc, key, keylen,
+ (u8 *)ctx->ipad);
+ keylen = crypto_shash_digestsize(ctx->shpre);
+
+ /*
+ * If the digest is larger than half the blocksize, we need to
+ * move the rest to opad due to the way our HMAC infra works.
+ */
+ if (keylen > crypto_ahash_blocksize(tfm) / 2)
+ /* Buffers overlap, need to use memmove iso memcpy! */
+ memmove(ctx->opad,
+ (u8 *)ctx->ipad +
+ crypto_ahash_blocksize(tfm) / 2,
+ keylen - crypto_ahash_blocksize(tfm) / 2);
+ } else {
+ /*
+ * Copy the key to our ipad & opad buffers
+ * Note that ipad and opad each contain one half of the key,
+ * to match the existing HMAC driver infrastructure.
+ */
+ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
+ memcpy(ctx->ipad, key, keylen);
+ } else {
+ memcpy(ctx->ipad, key,
+ crypto_ahash_blocksize(tfm) / 2);
+ memcpy(ctx->opad,
+ key + crypto_ahash_blocksize(tfm) / 2,
+ keylen - crypto_ahash_blocksize(tfm) / 2);
+ }
+ }
+
+ /* Pad key with zeroes */
+ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
+ memset((u8 *)ctx->ipad + keylen, 0,
+ crypto_ahash_blocksize(tfm) / 2 - keylen);
+ memset(ctx->opad, 0, crypto_ahash_blocksize(tfm) / 2);
+ } else {
+ memset((u8 *)ctx->opad + keylen -
+ crypto_ahash_blocksize(tfm) / 2, 0,
+ crypto_ahash_blocksize(tfm) - keylen);
+ }
+
+ /* If doing fallback, still need to set the new key! */
+ ctx->fb_do_setkey = true;
+ return ret;
+}
+
+static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_224_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_224_BLOCK_SIZE;
+ req->processed = SHA3_224_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_224_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_224_DIGEST_SIZE;
+ req->block_sz = SHA3_224_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_224_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_224_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_224_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-224");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_224_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_224_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-224)",
+ .cra_driver_name = "safexcel-hmac-sha3-224",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_224_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_256_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_256_BLOCK_SIZE;
+ req->processed = SHA3_256_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_256_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_256_DIGEST_SIZE;
+ req->block_sz = SHA3_256_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_256_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_256_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_256_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-256");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_256_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_256_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-256)",
+ .cra_driver_name = "safexcel-hmac-sha3-256",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_256_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_384_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_384_BLOCK_SIZE;
+ req->processed = SHA3_384_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_384_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_384_DIGEST_SIZE;
+ req->block_sz = SHA3_384_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_384_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_384_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_384_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-384");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_384_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_384_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-384)",
+ .cra_driver_name = "safexcel-hmac-sha3-384",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_384_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_512_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_512_BLOCK_SIZE;
+ req->processed = SHA3_512_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_512_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_512_DIGEST_SIZE;
+ req->block_sz = SHA3_512_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_512_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_512_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_512_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-512");
+}
+struct safexcel_alg_template safexcel_alg_hmac_sha3_512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_512_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_512_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-512)",
+ .cra_driver_name = "safexcel-hmac-sha3-512",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_512_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 0f269b89cfd4..9237ba745c2f 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,7 +14,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
- cdr->offset = sizeof(u32) * priv->config.cd_offset;
+ cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
&cdr->base_dma, GFP_KERNEL);
@@ -24,7 +24,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
- rdr->offset = sizeof(u32) * priv->config.rd_offset;
+ rdr->offset = priv->config.rd_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -180,6 +180,7 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
rdesc->first_seg = first;
rdesc->last_seg = last;
+ rdesc->result_size = EIP197_RD64_RESULT_SIZE;
rdesc->particle_size = len;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9181523ba760..391e3b4df364 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -23,6 +23,7 @@
#include <crypto/sha.h>
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -137,7 +138,7 @@ struct crypt_ctl {
/* Used by Host: 4*4 bytes*/
unsigned ctl_flags;
union {
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *ablk_req;
struct aead_request *aead_req;
struct crypto_tfm *tfm;
} data;
@@ -186,7 +187,7 @@ struct ixp_ctx {
};
struct ixp_alg {
- struct crypto_alg crypto;
+ struct skcipher_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
@@ -239,17 +240,17 @@ static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
+ return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
+ return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
}
static int setup_crypt_desc(void)
@@ -378,8 +379,8 @@ static void one_packet(dma_addr_t phys)
break;
}
case CTL_FLAG_PERFORM_ABLK: {
- struct ablkcipher_request *req = crypt->data.ablk_req;
- struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = crypt->data.ablk_req;
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
if (req_ctx->dst) {
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
@@ -571,10 +572,10 @@ static int init_tfm(struct crypto_tfm *tfm)
return ret;
}
-static int init_tfm_ablk(struct crypto_tfm *tfm)
+static int init_tfm_ablk(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
- return init_tfm(tfm);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
+ return init_tfm(crypto_skcipher_tfm(tfm));
}
static int init_tfm_aead(struct crypto_aead *tfm)
@@ -590,6 +591,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
free_sa_dir(&ctx->decrypt);
}
+static void exit_tfm_ablk(struct crypto_skcipher *tfm)
+{
+ exit_tfm(crypto_skcipher_tfm(tfm));
+}
+
static void exit_tfm_aead(struct crypto_aead *tfm)
{
exit_tfm(crypto_aead_tfm(tfm));
@@ -809,10 +815,10 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
return buf;
}
-static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
int ret;
@@ -845,17 +851,17 @@ out:
return ret;
}
-static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- return verify_ablkcipher_des3_key(tfm, key) ?:
+ return verify_skcipher_des3_key(tfm, key) ?:
ablk_setkey(tfm, key, key_len);
}
-static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
/* the nonce is stored in bytes at end of key */
if (key_len < CTR_RFC3686_NONCE_SIZE)
@@ -868,16 +874,16 @@ static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ablk_setkey(tfm, key, key_len);
}
-static int ablk_perform(struct ablkcipher_request *req, int encrypt)
+static int ablk_perform(struct skcipher_request *req, int encrypt)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
- unsigned int nbytes = req->nbytes;
+ unsigned int nbytes = req->cryptlen;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
- struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
struct buffer_desc src_hook;
struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
@@ -902,8 +908,8 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->crypt_offs = 0;
crypt->crypt_len = nbytes;
- BUG_ON(ivsize && !req->info);
- memcpy(crypt->iv, req->info, ivsize);
+ BUG_ON(ivsize && !req->iv);
+ memcpy(crypt->iv, req->iv, ivsize);
if (req->src != req->dst) {
struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
@@ -941,22 +947,22 @@ free_buf_dest:
return -ENOMEM;
}
-static int ablk_encrypt(struct ablkcipher_request *req)
+static int ablk_encrypt(struct skcipher_request *req)
{
return ablk_perform(req, 1);
}
-static int ablk_decrypt(struct ablkcipher_request *req)
+static int ablk_decrypt(struct skcipher_request *req)
{
return ablk_perform(req, 0);
}
-static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
+static int ablk_rfc3686_crypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 iv[CTR_RFC3686_BLOCK_SIZE];
- u8 *info = req->info;
+ u8 *info = req->iv;
int ret;
/* set up counter block */
@@ -967,9 +973,9 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- req->info = iv;
+ req->iv = iv;
ret = ablk_perform(req, 1);
- req->info = info;
+ req->iv = info;
return ret;
}
@@ -1212,107 +1218,91 @@ static int aead_decrypt(struct aead_request *req)
static struct ixp_alg ixp4xx_algos[] = {
{
.crypto = {
- .cra_name = "cbc(des)",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "ecb(des)",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- }
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = ablk_des3_setkey,
- }
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = ablk_des3_setkey,
- }
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "cbc(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
- .cra_name = "ecb(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- }
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
}, {
.crypto = {
- .cra_name = "ctr(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_rfc3686_setkey,
- .encrypt = ablk_rfc3686_crypt,
- .decrypt = ablk_rfc3686_crypt }
- }
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_rfc3686_setkey,
+ .encrypt = ablk_rfc3686_crypt,
+ .decrypt = ablk_rfc3686_crypt,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
@@ -1421,10 +1411,10 @@ static int __init ixp_module_init(void)
return err;
}
for (i=0; i< num; i++) {
- struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
+ struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
- if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s"IXP_POSTFIX, cra->cra_name) >=
+ if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
{
continue;
@@ -1434,26 +1424,24 @@ static int __init ixp_module_init(void)
}
/* block ciphers */
- cra->cra_type = &crypto_ablkcipher_type;
- cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
- if (!cra->cra_ablkcipher.setkey)
- cra->cra_ablkcipher.setkey = ablk_setkey;
- if (!cra->cra_ablkcipher.encrypt)
- cra->cra_ablkcipher.encrypt = ablk_encrypt;
- if (!cra->cra_ablkcipher.decrypt)
- cra->cra_ablkcipher.decrypt = ablk_decrypt;
- cra->cra_init = init_tfm_ablk;
-
- cra->cra_ctxsize = sizeof(struct ixp_ctx);
- cra->cra_module = THIS_MODULE;
- cra->cra_alignmask = 3;
- cra->cra_priority = 300;
- cra->cra_exit = exit_tfm;
- if (crypto_register_alg(cra))
+ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC;
+ if (!cra->setkey)
+ cra->setkey = ablk_setkey;
+ if (!cra->encrypt)
+ cra->encrypt = ablk_encrypt;
+ if (!cra->decrypt)
+ cra->decrypt = ablk_decrypt;
+ cra->init = init_tfm_ablk;
+ cra->exit = exit_tfm_ablk;
+
+ cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+ cra->base.cra_module = THIS_MODULE;
+ cra->base.cra_alignmask = 3;
+ cra->base.cra_priority = 300;
+ if (crypto_register_skcipher(cra))
printk(KERN_ERR "Failed to register '%s'\n",
- cra->cra_name);
+ cra->base.cra_name);
else
ixp4xx_algos[i].registered = 1;
}
@@ -1504,7 +1492,7 @@ static void __exit ixp_module_exit(void)
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
- crypto_unregister_alg(&ixp4xx_algos[i].crypto);
+ crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto(&pdev->dev);
platform_device_unregister(pdev);
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index d63a6ee905c9..f1ed3b85c0d2 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -232,13 +232,13 @@ struct mv_cesa_sec_accel_desc {
};
/**
- * struct mv_cesa_blkcipher_op_ctx - cipher operation context
+ * struct mv_cesa_skcipher_op_ctx - cipher operation context
* @key: cipher key
* @iv: cipher IV
*
* Context associated to a cipher operation.
*/
-struct mv_cesa_blkcipher_op_ctx {
+struct mv_cesa_skcipher_op_ctx {
u32 key[8];
u32 iv[4];
};
@@ -265,7 +265,7 @@ struct mv_cesa_hash_op_ctx {
struct mv_cesa_op_ctx {
struct mv_cesa_sec_accel_desc desc;
union {
- struct mv_cesa_blkcipher_op_ctx blkcipher;
+ struct mv_cesa_skcipher_op_ctx skcipher;
struct mv_cesa_hash_op_ctx hash;
} ctx;
};
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 84ceddfee76b..d8e8c857770c 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -209,7 +209,7 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
+ memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize);
} else {
memcpy_fromio(skreq->iv,
@@ -470,7 +470,7 @@ static int mv_cesa_des_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+ memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
@@ -523,7 +523,7 @@ static int mv_cesa_cbc_des_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
return mv_cesa_des_op(req, tmpl);
}
@@ -575,7 +575,7 @@ static int mv_cesa_des3_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
+ memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
@@ -628,7 +628,7 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
return mv_cesa_des3_op(req, tmpl);
}
@@ -694,7 +694,7 @@ static int mv_cesa_aes_op(struct skcipher_request *req,
key = ctx->aes.key_enc;
for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
- tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
+ tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
if (ctx->aes.key_length == 24)
cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
@@ -755,7 +755,7 @@ static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
return mv_cesa_aes_op(req, tmpl);
}
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90c9644fb8a8..90880a81c534 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -11,6 +11,7 @@
#include <crypto/aes.h>
#include <crypto/gcm.h>
+#include <crypto/internal/skcipher.h>
#include "mtk-platform.h"
#define AES_QUEUE_SIZE 512
@@ -414,7 +415,7 @@ exit:
static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
size_t len)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_info *info = &ctx->info;
u32 cnt = 0;
@@ -450,7 +451,7 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
return;
}
- mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
+ mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
AES_BLOCK_SIZE);
ctr:
info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
@@ -552,13 +553,13 @@ static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
+ struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
aes->resume = mtk_aes_transfer_complete;
- return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
+ return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
}
static inline struct mtk_aes_ctr_ctx *
@@ -571,7 +572,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
struct scatterlist *src, *dst;
u32 start, end, ctr, blocks;
size_t datalen;
@@ -579,11 +580,11 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Check for transfer completion. */
cctx->offset += aes->total;
- if (cctx->offset >= req->nbytes)
+ if (cctx->offset >= req->cryptlen)
return mtk_aes_transfer_complete(cryp, aes);
/* Compute data length. */
- datalen = req->nbytes - cctx->offset;
+ datalen = req->cryptlen - cctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(cctx->iv[3]);
@@ -591,7 +592,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
start = ctr;
end = start + blocks - 1;
if (end < start) {
- ctr |= 0xffffffff;
+ ctr = 0xffffffff;
datalen = AES_BLOCK_SIZE * -start;
fragmented = true;
}
@@ -620,12 +621,12 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
+ struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
- memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
cctx->offset = 0;
aes->total = 0;
aes->resume = mtk_aes_ctr_transfer;
@@ -634,10 +635,10 @@ static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
}
/* Check and set the AES key to transform state buffer */
-static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
+static int mtk_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
- struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (keylen) {
case AES_KEYSIZE_128:
@@ -651,7 +652,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
break;
default:
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -661,10 +662,10 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
return 0;
}
-static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
+static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct mtk_aes_reqctx *rctx;
struct mtk_cryp *cryp;
@@ -672,185 +673,168 @@ static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
if (!cryp)
return -ENODEV;
- rctx = ablkcipher_request_ctx(req);
+ rctx = skcipher_request_ctx(req);
rctx->mode = mode;
return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
&req->base);
}
-static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
}
-static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ECB);
}
-static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
}
-static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CBC);
}
-static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
}
-static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CTR);
}
-static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
}
-static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_OFB);
}
-static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
}
-static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CFB128);
}
-static int mtk_aes_cra_init(struct crypto_tfm *tfm)
+static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_start;
return 0;
}
-static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
+static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
- struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_ctr_start;
return 0;
}
-static struct crypto_alg aes_algs[] = {
+static struct skcipher_alg aes_algs[] = {
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cbc_encrypt,
- .decrypt = mtk_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_cbc_encrypt,
+ .decrypt = mtk_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mtk_aes_init_tfm,
},
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ecb_encrypt,
- .decrypt = mtk_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ecb_encrypt,
+ .decrypt = mtk_aes_ecb_decrypt,
+ .init = mtk_aes_init_tfm,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_ctr_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ctr_encrypt,
- .decrypt = mtk_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ctr_encrypt,
+ .decrypt = mtk_aes_ctr_decrypt,
+ .init = mtk_aes_ctr_init_tfm,
},
{
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ofb_encrypt,
- .decrypt = mtk_aes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ofb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ofb_encrypt,
+ .decrypt = mtk_aes_ofb_decrypt,
},
{
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cfb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cfb_encrypt,
- .decrypt = mtk_aes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cfb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_cfb_encrypt,
+ .decrypt = mtk_aes_cfb_decrypt,
},
};
@@ -1259,7 +1243,7 @@ static void mtk_aes_unregister_algs(void)
crypto_unregister_aead(&aes_gcm_alg);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
}
static int mtk_aes_register_algs(void)
@@ -1267,7 +1251,7 @@ static int mtk_aes_register_algs(void)
int err, i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
@@ -1280,7 +1264,7 @@ static int mtk_aes_register_algs(void)
err_aes_algs:
for (; i--; )
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
return err;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index bf8d2197bc11..f438b425c655 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -211,11 +211,11 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
* Encryption (AES128)
*/
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
- struct ablkcipher_request *req, int init)
+ struct skcipher_request *req, int init)
{
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
@@ -274,9 +274,9 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
{
struct dcp *sdcp = global_sdcp;
- struct ablkcipher_request *req = ablkcipher_request_cast(arq);
+ struct skcipher_request *req = skcipher_request_cast(arq);
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
@@ -305,7 +305,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
if (!rctx->ecb) {
/* Copy the CBC IV just past the key. */
- memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
+ memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
/* CBC needs the INIT set. */
init = 1;
} else {
@@ -316,10 +316,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
- limit_hit = tlen > req->nbytes;
+ limit_hit = tlen > req->cryptlen;
if (limit_hit)
- len = req->nbytes - (tlen - len);
+ len = req->cryptlen - (tlen - len);
do {
if (actx->fill + len > out_off)
@@ -375,10 +375,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
/* Copy the IV for CBC for chaining */
if (!rctx->ecb) {
if (rctx->enc)
- memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
else
- memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
}
@@ -422,17 +422,17 @@ static int dcp_chan_thread_aes(void *data)
return 0;
}
-static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
+static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
if (enc)
ret = crypto_skcipher_encrypt(subreq);
@@ -444,12 +444,12 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
return ret;
}
-static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
+static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
{
struct dcp *sdcp = global_sdcp;
struct crypto_async_request *arq = &req->base;
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
if (unlikely(actx->key_len != AES_KEYSIZE_128))
@@ -468,30 +468,30 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
return ret;
}
-static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 0, 1);
}
-static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 1, 1);
}
-static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 0, 0);
}
-static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 1, 0);
}
-static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
- struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
unsigned int ret;
/*
@@ -525,10 +525,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ret;
}
-static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
+static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -536,13 +536,13 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
return PTR_ERR(blk);
actx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
return 0;
}
-static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
+static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
{
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(actx->fallback);
}
@@ -854,54 +854,44 @@ static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
}
/* AES 128 ECB and AES 128 CBC */
-static struct crypto_alg dcp_aes_algs[] = {
+static struct skcipher_alg dcp_aes_algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-dcp",
- .cra_priority = 400,
- .cra_alignmask = 15,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-dcp",
+ .base.cra_priority = 400,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = mxs_dcp_aes_fallback_init,
- .cra_exit = mxs_dcp_aes_fallback_exit,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mxs_dcp_aes_setkey,
- .encrypt = mxs_dcp_aes_ecb_encrypt,
- .decrypt = mxs_dcp_aes_ecb_decrypt
- },
- },
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt,
+ .init = mxs_dcp_aes_fallback_init_tfm,
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-dcp",
- .cra_priority = 400,
- .cra_alignmask = 15,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-dcp",
+ .base.cra_priority = 400,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = mxs_dcp_aes_fallback_init,
- .cra_exit = mxs_dcp_aes_fallback_exit,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mxs_dcp_aes_setkey,
- .encrypt = mxs_dcp_aes_cbc_encrypt,
- .decrypt = mxs_dcp_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mxs_dcp_aes_fallback_init_tfm,
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
},
};
@@ -1104,8 +1094,8 @@ static int mxs_dcp_probe(struct platform_device *pdev)
sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
- ret = crypto_register_algs(dcp_aes_algs,
- ARRAY_SIZE(dcp_aes_algs));
+ ret = crypto_register_skciphers(dcp_aes_algs,
+ ARRAY_SIZE(dcp_aes_algs));
if (ret) {
/* Failed to register algorithm. */
dev_err(dev, "Failed to register AES crypto!\n");
@@ -1139,7 +1129,7 @@ err_unregister_sha1:
err_unregister_aes:
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
err_destroy_aes_thread:
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
@@ -1164,7 +1154,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
crypto_unregister_ahash(&dcp_sha1_alg);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index dc15b06e96ab..63bd565048f4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
@@ -381,8 +382,8 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm)
fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
- pr_warning("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
+ pr_warn("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
err = PTR_ERR(fallback_tfm);
goto out;
}
@@ -418,16 +419,16 @@ static int n2_hmac_cra_init(struct crypto_tfm *tfm)
fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
- pr_warning("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
+ pr_warn("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
err = PTR_ERR(fallback_tfm);
goto out;
}
child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
if (IS_ERR(child_shash)) {
- pr_warning("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
+ pr_warn("Child shash '%s' could not be loaded!\n",
+ n2alg->child_alg);
err = PTR_ERR(child_shash);
goto out_free_fallback;
}
@@ -657,7 +658,7 @@ static int n2_hmac_async_digest(struct ahash_request *req)
ctx->hash_key_len);
}
-struct n2_cipher_context {
+struct n2_skcipher_context {
int key_len;
int enc_type;
union {
@@ -683,7 +684,7 @@ struct n2_crypto_chunk {
};
struct n2_request_context {
- struct ablkcipher_walk walk;
+ struct skcipher_walk walk;
struct list_head chunk_list;
struct n2_crypto_chunk chunk;
u8 temp_iv[16];
@@ -708,29 +709,29 @@ struct n2_request_context {
* is not a valid sequence.
*/
-struct n2_cipher_alg {
+struct n2_skcipher_alg {
struct list_head entry;
u8 enc_type;
- struct crypto_alg alg;
+ struct skcipher_alg skcipher;
};
-static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- return container_of(alg, struct n2_cipher_alg, alg);
+ return container_of(alg, struct n2_skcipher_alg, skcipher);
}
-struct n2_cipher_request_context {
- struct ablkcipher_walk walk;
+struct n2_skcipher_request_context {
+ struct skcipher_walk walk;
};
-static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
@@ -745,7 +746,7 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -754,15 +755,15 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(skcipher, key);
if (err)
return err;
@@ -773,15 +774,15 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(skcipher, key);
if (err)
return err;
@@ -792,12 +793,12 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
u8 *s = ctx->key.arc4;
u8 *x = s + 256;
u8 *y = x + 1;
@@ -822,7 +823,7 @@ static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
{
int this_len = nbytes;
@@ -830,10 +831,11 @@ static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
return this_len > (1 << 16) ? (1 << 16) : this_len;
}
-static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
+ struct n2_crypto_chunk *cp,
struct spu_queue *qp, bool encrypt)
{
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
struct cwq_initial_entry *ent;
bool in_place;
int i;
@@ -877,18 +879,17 @@ static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
}
-static int n2_compute_chunks(struct ablkcipher_request *req)
+static int n2_compute_chunks(struct skcipher_request *req)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct ablkcipher_walk *walk = &rctx->walk;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct skcipher_walk *walk = &rctx->walk;
struct n2_crypto_chunk *chunk;
unsigned long dest_prev;
unsigned int tot_len;
bool prev_in_place;
int err, nbytes;
- ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
- err = ablkcipher_walk_phys(req, walk);
+ err = skcipher_walk_async(walk, req);
if (err)
return err;
@@ -910,12 +911,12 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
bool in_place;
int this_len;
- src_paddr = (page_to_phys(walk->src.page) +
- walk->src.offset);
- dest_paddr = (page_to_phys(walk->dst.page) +
- walk->dst.offset);
+ src_paddr = (page_to_phys(walk->src.phys.page) +
+ walk->src.phys.offset);
+ dest_paddr = (page_to_phys(walk->dst.phys.page) +
+ walk->dst.phys.offset);
in_place = (src_paddr == dest_paddr);
- this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+ this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
if (chunk->arr_len != 0) {
if (in_place != prev_in_place ||
@@ -946,7 +947,7 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
prev_in_place = in_place;
tot_len += this_len;
- err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+ err = skcipher_walk_done(walk, nbytes - this_len);
if (err)
break;
}
@@ -958,15 +959,14 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
return err;
}
-static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
struct n2_crypto_chunk *c, *tmp;
if (final_iv)
memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
- ablkcipher_walk_complete(&rctx->walk);
list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
list_del(&c->entry);
if (unlikely(c != &rctx->chunk))
@@ -975,10 +975,10 @@ static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
}
-static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct crypto_tfm *tfm = req->base.tfm;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
int err = n2_compute_chunks(req);
struct n2_crypto_chunk *c, *tmp;
unsigned long flags, hv_ret;
@@ -1017,20 +1017,20 @@ out:
return err;
}
-static int n2_encrypt_ecb(struct ablkcipher_request *req)
+static int n2_encrypt_ecb(struct skcipher_request *req)
{
return n2_do_ecb(req, true);
}
-static int n2_decrypt_ecb(struct ablkcipher_request *req)
+static int n2_decrypt_ecb(struct skcipher_request *req)
{
return n2_do_ecb(req, false);
}
-static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct crypto_tfm *tfm = req->base.tfm;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned long flags, hv_ret, iv_paddr;
int err = n2_compute_chunks(req);
struct n2_crypto_chunk *c, *tmp;
@@ -1107,32 +1107,32 @@ out:
return err;
}
-static int n2_encrypt_chaining(struct ablkcipher_request *req)
+static int n2_encrypt_chaining(struct skcipher_request *req)
{
return n2_do_chaining(req, true);
}
-static int n2_decrypt_chaining(struct ablkcipher_request *req)
+static int n2_decrypt_chaining(struct skcipher_request *req)
{
return n2_do_chaining(req, false);
}
-struct n2_cipher_tmpl {
+struct n2_skcipher_tmpl {
const char *name;
const char *drv_name;
u8 block_size;
u8 enc_type;
- struct ablkcipher_alg ablkcipher;
+ struct skcipher_alg skcipher;
};
-static const struct n2_cipher_tmpl cipher_tmpls[] = {
+static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
/* ARC4: only ECB is supported (chaining bits ignored) */
{ .name = "ecb(arc4)",
.drv_name = "ecb-arc4",
.block_size = 1,
.enc_type = (ENC_TYPE_ALG_RC4_STREAM |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 1,
.max_keysize = 256,
.setkey = n2_arc4_setkey,
@@ -1147,7 +1147,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = n2_des_setkey,
@@ -1160,7 +1160,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1174,7 +1174,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_CFB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = n2_des_setkey,
@@ -1189,7 +1189,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = n2_3des_setkey,
@@ -1202,7 +1202,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
@@ -1216,7 +1216,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_CFB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = n2_3des_setkey,
@@ -1230,7 +1230,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = n2_aes_setkey,
@@ -1243,7 +1243,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1257,7 +1257,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_COUNTER),
- .ablkcipher = {
+ .skcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1268,9 +1268,9 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
},
};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-static LIST_HEAD(cipher_algs);
+static LIST_HEAD(skcipher_algs);
struct n2_hash_tmpl {
const char *name;
@@ -1344,14 +1344,14 @@ static int algs_registered;
static void __n2_unregister_algs(void)
{
- struct n2_cipher_alg *cipher, *cipher_tmp;
+ struct n2_skcipher_alg *skcipher, *skcipher_tmp;
struct n2_ahash_alg *alg, *alg_tmp;
struct n2_hmac_alg *hmac, *hmac_tmp;
- list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
- crypto_unregister_alg(&cipher->alg);
- list_del(&cipher->entry);
- kfree(cipher);
+ list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&skcipher->skcipher);
+ list_del(&skcipher->entry);
+ kfree(skcipher);
}
list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
crypto_unregister_ahash(&hmac->derived.alg);
@@ -1365,44 +1365,42 @@ static void __n2_unregister_algs(void)
}
}
-static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
return 0;
}
-static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
{
- struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct crypto_alg *alg;
+ struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct skcipher_alg *alg;
int err;
if (!p)
return -ENOMEM;
- alg = &p->alg;
+ alg = &p->skcipher;
+ *alg = tmpl->skcipher;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->cra_priority = N2_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
- alg->cra_blocksize = tmpl->block_size;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+ alg->base.cra_priority = N2_CRA_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->base.cra_blocksize = tmpl->block_size;
p->enc_type = tmpl->enc_type;
- alg->cra_ctxsize = sizeof(struct n2_cipher_context);
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_u.ablkcipher = tmpl->ablkcipher;
- alg->cra_init = n2_cipher_cra_init;
- alg->cra_module = THIS_MODULE;
-
- list_add(&p->entry, &cipher_algs);
- err = crypto_register_alg(alg);
+ alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
+ alg->base.cra_module = THIS_MODULE;
+ alg->init = n2_skcipher_init_tfm;
+
+ list_add(&p->entry, &skcipher_algs);
+ err = crypto_register_skcipher(alg);
if (err) {
- pr_err("%s alg registration failed\n", alg->cra_name);
+ pr_err("%s alg registration failed\n", alg->base.cra_name);
list_del(&p->entry);
kfree(p);
} else {
- pr_info("%s alg registered\n", alg->cra_name);
+ pr_info("%s alg registered\n", alg->base.cra_name);
}
return err;
}
@@ -1517,7 +1515,7 @@ static int n2_register_algs(void)
}
}
for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_cipher(&cipher_tmpls[i]);
+ err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
if (err) {
__n2_unregister_algs();
goto out;
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index e631f9979127..92e921eceed7 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int cbc_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int cbc_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,11 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_cbc.iv);
+ rc = nx_build_sg_lists(nx_ctx, req->iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_cbc.iv);
if (rc)
goto out;
@@ -83,56 +82,46 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_encrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return cbc_aes_nx_crypt(req, 1);
}
-static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_decrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return cbc_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_cbc_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_aes_nx_set_key,
- .encrypt = cbc_aes_nx_encrypt,
- .decrypt = cbc_aes_nx_decrypt,
- }
+struct skcipher_alg nx_cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_cbc_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_nx_set_key,
+ .encrypt = cbc_aes_nx_encrypt,
+ .decrypt = cbc_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5be8f01c5da8..4c9362eebefd 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -405,7 +405,7 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -481,67 +481,50 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_encrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc, req->assoclen);
+ return ccm_nx_encrypt(req, req->iv, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_decrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc, req->assoclen);
+ return ccm_nx_decrypt(req, req->iv, req->assoclen);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_ccm_aes_alg = {
.base = {
.cra_name = "ccm(aes)",
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 191e226a11a1..6d5ce1a66f1e 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -19,11 +19,11 @@
#include "nx.h"
-static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -51,11 +51,11 @@ static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr3686_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -69,12 +69,10 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
return ctr_aes_nx_set_key(tfm, in_key, key_len);
}
-static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -83,10 +81,11 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_ctr.iv);
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
@@ -96,59 +95,51 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
u8 iv[16];
memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
- memcpy(iv + CTR_RFC3686_NONCE_SIZE,
- desc->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = iv;
-
- return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+ return ctr_aes_nx_crypt(req, iv);
}
-struct crypto_alg nx_ctr3686_aes_alg = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ctr_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = ctr3686_aes_nx_set_key,
- .encrypt = ctr3686_aes_nx_crypt,
- .decrypt = ctr3686_aes_nx_crypt,
- }
+struct skcipher_alg nx_ctr3686_aes_alg = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ctr_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = ctr3686_aes_nx_set_key,
+ .encrypt = ctr3686_aes_nx_crypt,
+ .decrypt = ctr3686_aes_nx_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index c67570470c9d..77e338dc33f1 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ecb_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int ecb_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,10 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, NULL);
+ rc = nx_build_sg_lists(nx_ctx, NULL, req->dst, req->src,
+ &to_process, processed, NULL);
if (rc)
goto out;
@@ -83,7 +81,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
@@ -92,46 +90,36 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_encrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return ecb_aes_nx_crypt(req, 1);
}
-static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_decrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return ecb_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_alignmask = 0xf,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ecb_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ecb_aes_nx_set_key,
- .encrypt = ecb_aes_nx_encrypt,
- .decrypt = ecb_aes_nx_decrypt,
- }
+struct skcipher_alg nx_ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_alignmask = 0xf,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ecb_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_nx_set_key,
+ .encrypt = ecb_aes_nx_encrypt,
+ .decrypt = ecb_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 7d3d67871270..19c6ed5baea4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -166,8 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
return rc;
}
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
- unsigned int assoclen)
+static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -190,7 +189,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* Copy IV */
- memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
do {
/*
@@ -240,8 +239,7 @@ out:
return rc;
}
-static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
- int enc)
+static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -268,7 +266,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
len = AES_BLOCK_SIZE;
/* Encrypt the counter/IV */
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
&len, nx_ctx->ap->sglen);
if (len != AES_BLOCK_SIZE)
@@ -285,7 +283,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -313,7 +311,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
unsigned int processed = 0, to_process;
unsigned long irq_flags;
@@ -321,15 +318,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = rctx->iv;
/* initialize the counter */
- *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+ *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
if (nbytes == 0) {
if (assoclen == 0)
- rc = gcm_empty(req, &desc, enc);
+ rc = gcm_empty(req, rctx->iv, enc);
else
- rc = gmac(req, &desc, assoclen);
+ rc = gmac(req, rctx->iv, assoclen);
if (rc)
goto out;
else
@@ -358,7 +354,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
- rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
+ rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
req->src, &to_process,
processed + req->assoclen,
csbcpb->cpb.aes_gcm.iv_or_cnt);
@@ -377,7 +373,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
if (rc)
goto out;
- memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_s0,
@@ -471,11 +467,6 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_gcm_aes_alg = {
.base = {
.cra_name = "gcm(aes)",
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 28817880c76d..f03c238f5a31 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -243,25 +243,25 @@ static long int trim_sg_list(struct nx_sg *sg,
* scatterlists based on them.
*
* @nx_ctx: NX crypto context for the lists we're building
- * @desc: the block cipher descriptor for the operation
+ * @iv: iv data, if the algorithm requires it
* @dst: destination scatterlist
* @src: source scatterlist
* @nbytes: length of data described in the scatterlists
* @offset: number of bytes to fast-forward past at the beginning of
* scatterlists.
- * @iv: destination for the iv data, if the algorithm requires it
+ * @oiv: destination for the iv data, if the algorithm requires it
*
- * This is common code shared by all the AES algorithms. It uses the block
- * cipher walk routines to traverse input and output scatterlists, building
+ * This is common code shared by all the AES algorithms. It uses the crypto
+ * scatterlist walk routines to traverse input and output scatterlists, building
* corresponding NX scatterlists
*/
int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
- struct blkcipher_desc *desc,
+ const u8 *iv,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int *nbytes,
unsigned int offset,
- u8 *iv)
+ u8 *oiv)
{
unsigned int delta = 0;
unsigned int total = *nbytes;
@@ -274,8 +274,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
- if (iv)
- memcpy(iv, desc->info, AES_BLOCK_SIZE);
+ if (oiv)
+ memcpy(oiv, iv, AES_BLOCK_SIZE);
*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
@@ -511,10 +511,10 @@ static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
return true;
}
-static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
- crypto_register_alg(alg) : 0;
+ crypto_register_skcipher(alg) : 0;
}
static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -531,10 +531,10 @@ static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
crypto_register_shash(alg) : 0;
}
-static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
if (nx_check_props(NULL, fc, mode))
- crypto_unregister_alg(alg);
+ crypto_unregister_skcipher(alg);
}
static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -573,15 +573,16 @@ static int nx_register_algs(void)
nx_driver.of.status = NX_OKAY;
- rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
if (rc)
goto out;
- rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
if (rc)
goto out_unreg_ecb;
- rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CTR);
if (rc)
goto out_unreg_cbc;
@@ -633,11 +634,11 @@ out_unreg_gcm4106:
out_unreg_gcm:
nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_ctr3686:
- nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
out_unreg_cbc:
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
out_unreg_ecb:
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
out:
return rc;
}
@@ -704,21 +705,21 @@ int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
NX_MODE_AES_GCM);
}
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CTR);
}
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CBC);
}
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_ECB);
}
@@ -752,6 +753,11 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
nx_ctx->out_sg = NULL;
}
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
+}
+
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
{
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
@@ -798,10 +804,12 @@ static int nx_remove(struct vio_dev *viodev)
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_aead(&nx_gcm_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
- nx_unregister_alg(&nx_ctr3686_aes_alg,
- NX_FC_AES, NX_MODE_AES_CTR);
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
+ NX_MODE_AES_ECB);
}
return 0;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 7ecca168f8c4..91c54289124a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -145,19 +145,20 @@ struct crypto_aead;
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
-int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
- struct scatterlist *, struct scatterlist *, unsigned int *,
- unsigned int, u8 *);
+int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int *nbytes, unsigned int offset, u8 *oiv);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
unsigned int *);
@@ -175,11 +176,11 @@ void nx_debugfs_fini(struct nx_crypto_driver *);
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
-extern struct crypto_alg nx_cbc_aes_alg;
-extern struct crypto_alg nx_ecb_aes_alg;
+extern struct skcipher_alg nx_cbc_aes_alg;
+extern struct skcipher_alg nx_ecb_aes_alg;
extern struct aead_alg nx_gcm_aes_alg;
extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr3686_aes_alg;
+extern struct skcipher_alg nx_ctr3686_aes_alg;
extern struct aead_alg nx_ccm_aes_alg;
extern struct aead_alg nx_ccm4309_aes_alg;
extern struct shash_alg nx_shash_aes_xcbc_alg;
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index e0d44a5512ab..1975bcbee997 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -38,23 +38,23 @@ void nx_debugfs_init(struct nx_crypto_driver *drv)
drv->dfs_root = root;
debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.aes_ops);
+ root, &drv->stats.aes_ops.counter);
debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha256_ops);
+ root, &drv->stats.sha256_ops.counter);
debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha512_ops);
+ root, &drv->stats.sha512_ops.counter);
debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.aes_bytes);
+ root, &drv->stats.aes_bytes.counter);
debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha256_bytes);
+ root, &drv->stats.sha256_bytes.counter);
debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha512_bytes);
+ root, &drv->stats.sha512_bytes.counter);
debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.errors);
+ root, &drv->stats.errors.counter);
debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error);
+ root, &drv->stats.last_error.counter);
debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error_pid);
+ root, &drv->stats.last_error_pid.counter);
}
void
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 2f53fbb74100..a1fc03ed01f3 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -142,8 +142,8 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd)
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
- omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
+ if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv)
+ omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4);
if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
rctx = aead_request_ctx(dd->aead_req);
@@ -382,11 +382,11 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- crypto_finalize_ablkcipher_request(dd->engine, req, err);
+ crypto_finalize_skcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -403,10 +403,10 @@ int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
}
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req)
- return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -414,10 +414,10 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
static int omap_aes_prepare_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
int ret;
u16 flags;
@@ -427,8 +427,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
- dd->total_save = req->nbytes;
+ dd->total = req->cryptlen;
+ dd->total_save = req->cryptlen;
dd->in_sg = req->src;
dd->out_sg = req->dst;
dd->orig_out = req->dst;
@@ -469,8 +469,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
if (!dd)
@@ -505,26 +505,26 @@ static void omap_aes_done_task(unsigned long data)
pr_debug("exit\n");
}
-static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd;
int ret;
- pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (req->nbytes < aes_fallback_sz) {
+ if (req->cryptlen < aes_fallback_sz) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
ret = crypto_skcipher_encrypt(subreq);
@@ -545,10 +545,10 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
/* ********************** ALG API ************************************ */
-static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
@@ -571,32 +571,32 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, 0);
}
-static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CBC);
}
-static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
}
-static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CTR);
}
@@ -606,10 +606,10 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *req);
-static int omap_aes_cra_init(struct crypto_tfm *tfm)
+static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -618,7 +618,7 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
ctx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -657,9 +657,9 @@ static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
return 0;
}
-static void omap_aes_cra_exit(struct crypto_tfm *tfm)
+static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
crypto_free_sync_skcipher(ctx->fallback);
@@ -671,7 +671,10 @@ static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
{
struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- omap_aes_cra_exit(crypto_aead_tfm(tfm));
+ if (ctx->fallback)
+ crypto_free_sync_skcipher(ctx->fallback);
+
+ ctx->fallback = NULL;
if (ctx->ctr)
crypto_free_skcipher(ctx->ctr);
@@ -679,78 +682,69 @@ static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs_ecb_cbc[] = {
+static struct skcipher_alg algs_ecb_cbc[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ecb_encrypt,
- .decrypt = omap_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ecb_encrypt,
+ .decrypt = omap_aes_ecb_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_cbc_encrypt,
- .decrypt = omap_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_cbc_encrypt,
+ .decrypt = omap_aes_cbc_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
}
};
-static struct crypto_alg algs_ctr[] = {
+static struct skcipher_alg algs_ctr[] = {
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ctr_encrypt,
- .decrypt = omap_aes_ctr_decrypt,
- }
-} ,
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+}
};
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
@@ -1121,7 +1115,7 @@ static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct crypto_alg *algp;
+ struct skcipher_alg *algp;
struct aead_alg *aalg;
struct resource res;
int err = -ENOMEM, i, j, irq = -1;
@@ -1215,9 +1209,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", algp->base.cra_name);
- err = crypto_register_alg(algp);
+ err = crypto_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1230,9 +1224,8 @@ static int omap_aes_probe(struct platform_device *pdev)
!dd->pdata->aead_algs_info->registered) {
for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- algp = &aalg->base;
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", aalg->base.cra_name);
err = crypto_register_aead(aalg);
if (err)
@@ -1257,7 +1250,7 @@ err_aead_algs:
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1290,7 +1283,7 @@ static int omap_aes_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d4b1f87a1c9..2d3575231e31 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -112,7 +112,7 @@ struct omap_aes_reqctx {
#define OMAP_AES_CACHE_SIZE 0
struct omap_aes_algs_info {
- struct crypto_alg *algs_list;
+ struct skcipher_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -162,7 +162,7 @@ struct omap_aes_dev {
struct aead_queue aead_queue;
spinlock_t lock;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct aead_request *aead_req;
struct crypto_engine *engine;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index b19d7e5d55ec..4c4dbc2b377e 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -34,6 +34,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
@@ -98,7 +99,7 @@ struct omap_des_reqctx {
#define OMAP_DES_CACHE_SIZE 0
struct omap_des_algs_info {
- struct crypto_alg *algs_list;
+ struct skcipher_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -139,7 +140,7 @@ struct omap_des_dev {
struct tasklet_struct done_task;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
@@ -261,8 +262,8 @@ static int omap_des_write_ctrl(struct omap_des_dev *dd)
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & FLAGS_CBC) && dd->req->info)
- omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ omap_des_write_n(dd, DES_REG_IV(dd, 0), (void *)dd->req->iv, 2);
if (dd->flags & FLAGS_CBC)
val |= DES_REG_CTRL_CBC;
@@ -456,8 +457,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
- crypto_ablkcipher_reqtfm(dd->req));
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(
+ crypto_skcipher_reqtfm(dd->req));
int err;
pr_debug("total: %d\n", dd->total);
@@ -491,11 +492,11 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
static void omap_des_finish_req(struct omap_des_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- crypto_finalize_ablkcipher_request(dd->engine, req, err);
+ crypto_finalize_skcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -514,10 +515,10 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
}
static int omap_des_handle_queue(struct omap_des_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req)
- return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -525,9 +526,9 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
static int omap_des_prepare_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
struct omap_des_reqctx *rctx;
int ret;
@@ -538,8 +539,8 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
- dd->total_save = req->nbytes;
+ dd->total = req->cryptlen;
+ dd->total_save = req->cryptlen;
dd->in_sg = req->src;
dd->out_sg = req->dst;
dd->orig_out = req->dst;
@@ -568,8 +569,8 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
if (dd->out_sg_len < 0)
return dd->out_sg_len;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
@@ -582,9 +583,9 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
static int omap_des_crypt_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
if (!dd)
@@ -619,18 +620,18 @@ static void omap_des_done_task(unsigned long data)
pr_debug("exit\n");
}
-static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_des_reqctx *rctx = skcipher_request_ctx(req);
struct omap_des_dev *dd;
- pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
@@ -646,15 +647,15 @@ static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
/* ********************** ALG API ************************************ */
-static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int omap_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug("enter, keylen: %d\n", keylen);
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -664,15 +665,15 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int omap_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug("enter, keylen: %d\n", keylen);
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -682,22 +683,22 @@ static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
+static int omap_des_ecb_encrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT);
}
-static int omap_des_ecb_decrypt(struct ablkcipher_request *req)
+static int omap_des_ecb_decrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, 0);
}
-static int omap_des_cbc_encrypt(struct ablkcipher_request *req)
+static int omap_des_cbc_encrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
+static int omap_des_cbc_decrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_CBC);
}
@@ -707,13 +708,13 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
static int omap_des_crypt_req(struct crypto_engine *engine,
void *areq);
-static int omap_des_cra_init(struct crypto_tfm *tfm)
+static int omap_des_init_tfm(struct crypto_skcipher *tfm)
{
- struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm);
pr_debug("enter\n");
- tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx));
ctx->enginectx.op.prepare_request = omap_des_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -722,103 +723,78 @@ static int omap_des_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void omap_des_cra_exit(struct crypto_tfm *tfm)
-{
- pr_debug("enter\n");
-}
-
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs_ecb_cbc[] = {
+static struct skcipher_alg algs_ecb_cbc[] = {
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- }
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- }
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- }
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- }
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
}
};
@@ -976,7 +952,7 @@ static int omap_des_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_des_dev *dd;
- struct crypto_alg *algp;
+ struct skcipher_alg *algp;
struct resource *res;
int err = -ENOMEM, i, j, irq = -1;
u32 reg;
@@ -1071,9 +1047,9 @@ static int omap_des_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", algp->base.cra_name);
- err = crypto_register_alg(algp);
+ err = crypto_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1086,7 +1062,7 @@ static int omap_des_probe(struct platform_device *pdev)
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1119,7 +1095,7 @@ static int omap_des_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a0661250078..c5b60f50e1b5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/padlock.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -97,9 +98,9 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
return aes_ctx_common(crypto_tfm_ctx(tfm));
}
-static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
+static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
{
- return aes_ctx_common(crypto_blkcipher_ctx(tfm));
+ return aes_ctx_common(crypto_skcipher_ctx(tfm));
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
@@ -162,6 +163,12 @@ ok:
return 0;
}
+static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
/* ====== Encryption/decryption routines ====== */
/* These are the real call to PadLock. */
@@ -338,25 +345,24 @@ static struct crypto_alg aes_alg = {
}
};
-static int ecb_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -364,25 +370,24 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
return err;
}
-static int ecb_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.decrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -390,48 +395,41 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
return err;
}
-static struct crypto_alg ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-padlock",
- .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_alignmask = PADLOCK_ALIGNMENT - 1,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_set_key,
- .encrypt = ecb_aes_encrypt,
- .decrypt = ecb_aes_decrypt,
- }
- }
+static struct skcipher_alg ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-padlock",
+ .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
};
-static int cbc_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
walk.iv, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.decrypt);
@@ -439,25 +437,24 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return err;
}
-static int cbc_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -465,26 +462,20 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return err;
}
-static struct crypto_alg cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-padlock",
- .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_alignmask = PADLOCK_ALIGNMENT - 1,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = cbc_aes_encrypt,
- .decrypt = cbc_aes_decrypt,
- }
- }
+static struct skcipher_alg cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-padlock",
+ .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
};
static const struct x86_cpu_id padlock_cpu_id[] = {
@@ -506,13 +497,13 @@ static int __init padlock_init(void)
return -ENODEV;
}
- if ((ret = crypto_register_alg(&aes_alg)))
+ if ((ret = crypto_register_alg(&aes_alg)) != 0)
goto aes_err;
- if ((ret = crypto_register_alg(&ecb_aes_alg)))
+ if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
goto ecb_aes_err;
- if ((ret = crypto_register_alg(&cbc_aes_alg)))
+ if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
goto cbc_aes_err;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
@@ -527,7 +518,7 @@ out:
return ret;
cbc_aes_err:
- crypto_unregister_alg(&ecb_aes_alg);
+ crypto_unregister_skcipher(&ecb_aes_alg);
ecb_aes_err:
crypto_unregister_alg(&aes_alg);
aes_err:
@@ -537,8 +528,8 @@ aes_err:
static void __exit padlock_fini(void)
{
- crypto_unregister_alg(&cbc_aes_alg);
- crypto_unregister_alg(&ecb_aes_alg);
+ crypto_unregister_skcipher(&cbc_aes_alg);
+ crypto_unregister_skcipher(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 3cbefb41b099..29da449b3e9e 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -134,7 +134,7 @@ struct spacc_engine {
struct spacc_alg {
unsigned long ctrl_default;
unsigned long type;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
struct spacc_engine *engine;
struct list_head entry;
int key_offs;
@@ -173,7 +173,7 @@ struct spacc_aead_ctx {
static int spacc_ablk_submit(struct spacc_req *req);
-static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
+static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg)
{
return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
}
@@ -733,13 +733,13 @@ static void spacc_aead_cra_exit(struct crypto_aead *tfm)
* Set the DES key for a block cipher transform. This also performs weak key
* checking if the transform has requested it.
*/
-static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -753,13 +753,13 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the 3DES key for a block cipher transform. This also performs weak key
* checking if the transform has requested it.
*/
-static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -773,15 +773,15 @@ static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the key for an AES block cipher. Some key lengths are not supported in
* hardware so this must also check whether a fallback is needed.
*/
-static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -822,15 +822,15 @@ sw_setkey_failed:
return err;
}
-static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
+static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -844,12 +844,12 @@ out:
static int spacc_ablk_need_fallback(struct spacc_req *req)
{
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm));
struct spacc_ablk_ctx *ctx;
- struct crypto_tfm *tfm = req->req->tfm;
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
- ctx = crypto_tfm_ctx(tfm);
+ ctx = crypto_skcipher_ctx(tfm);
return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
SPA_CTRL_CIPH_ALG_AES &&
@@ -859,39 +859,39 @@ static int spacc_ablk_need_fallback(struct spacc_req *req)
static void spacc_ablk_complete(struct spacc_req *req)
{
- struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
if (ablk_req->src != ablk_req->dst) {
spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
- ablk_req->nbytes, DMA_TO_DEVICE);
+ ablk_req->cryptlen, DMA_TO_DEVICE);
spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->nbytes, DMA_FROM_DEVICE);
+ ablk_req->cryptlen, DMA_FROM_DEVICE);
} else
spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->nbytes, DMA_BIDIRECTIONAL);
+ ablk_req->cryptlen, DMA_BIDIRECTIONAL);
req->req->complete(req->req, req->result);
}
static int spacc_ablk_submit(struct spacc_req *req)
{
- struct crypto_tfm *tfm = req->req->tfm;
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
struct spacc_engine *engine = ctx->generic.engine;
u32 ctrl;
req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
- ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
+ ctx->key_len, ablk_req->iv, alg->ivsize,
NULL, 0);
writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
- writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+ writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET);
writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
@@ -907,11 +907,11 @@ static int spacc_ablk_submit(struct spacc_req *req)
return -EINPROGRESS;
}
-static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
+static int spacc_ablk_do_fallback(struct skcipher_request *req,
unsigned alg_type, bool is_encrypt)
{
struct crypto_tfm *old_tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
@@ -924,7 +924,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
@@ -932,12 +932,13 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
return err;
}
-static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
+static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type,
bool is_encrypt)
{
- struct crypto_alg *alg = req->base.tfm->__crt_alg;
- struct spacc_engine *engine = to_spacc_alg(alg)->engine;
- struct spacc_req *dev_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_engine *engine = to_spacc_skcipher(alg)->engine;
+ struct spacc_req *dev_req = skcipher_request_ctx(req);
unsigned long flags;
int err = -ENOMEM;
@@ -956,17 +957,17 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
*/
if (req->src != req->dst) {
dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
- req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
+ req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr);
if (!dev_req->src_ddt)
goto out;
dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
+ req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr);
if (!dev_req->dst_ddt)
goto out_free_src;
} else {
dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
+ req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
if (!dev_req->dst_ddt)
goto out;
@@ -999,65 +1000,65 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
out_free_ddts:
spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
- req->nbytes, req->src == req->dst ?
+ req->cryptlen, req->src == req->dst ?
DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
out_free_src:
if (req->src != req->dst)
spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
- req->src, req->nbytes, DMA_TO_DEVICE);
+ req->src, req->cryptlen, DMA_TO_DEVICE);
out:
return err;
}
-static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
+static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
{
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
struct spacc_engine *engine = spacc_alg->engine;
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher = crypto_alloc_sync_skcipher(
- alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
- alg->cra_name);
+ alg->base.cra_name);
return PTR_ERR(ctx->sw_cipher);
}
}
ctx->generic.key_offs = spacc_alg->key_offs;
ctx->generic.iv_offs = spacc_alg->iv_offs;
- tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req));
return 0;
}
-static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
+static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
{
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->sw_cipher);
}
-static int spacc_ablk_encrypt(struct ablkcipher_request *req)
+static int spacc_ablk_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- return spacc_ablk_setup(req, alg->type, 1);
+ return spacc_ablk_setup(req, spacc_alg->type, 1);
}
-static int spacc_ablk_decrypt(struct ablkcipher_request *req)
+static int spacc_ablk_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- return spacc_ablk_setup(req, alg->type, 0);
+ return spacc_ablk_setup(req, spacc_alg->type, 0);
}
static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
@@ -1233,27 +1234,24 @@ static struct spacc_alg ipsec_engine_algs[] = {
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
.alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1261,25 +1259,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = AES_MAX_KEY_SIZE,
.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1287,26 +1283,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
.alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1314,25 +1307,22 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1340,26 +1330,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
.alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-ede-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des3_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1367,25 +1354,22 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-ede-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des3_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
};
@@ -1581,25 +1565,23 @@ static struct spacc_alg l2_engine_algs[] = {
.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
SPA_CTRL_CIPH_MODE_F8,
.alg = {
- .cra_name = "f8(kasumi)",
- .cra_driver_name = "f8-kasumi-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 8,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_kasumi_f8_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = 16,
- .max_keysize = 16,
- .ivsize = 8,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "f8(kasumi)",
+ .base.cra_driver_name = "f8-kasumi-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 8,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_kasumi_f8_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = 16,
+ .max_keysize = 16,
+ .ivsize = 8,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
};
@@ -1721,7 +1703,7 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
- err = crypto_register_alg(&engine->algs[i].alg);
+ err = crypto_register_skcipher(&engine->algs[i].alg);
if (!err) {
list_add_tail(&engine->algs[i].entry,
&engine->registered_algs);
@@ -1729,10 +1711,10 @@ static int spacc_probe(struct platform_device *pdev)
}
if (err)
dev_err(engine->dev, "failed to register alg \"%s\"\n",
- engine->algs[i].alg.cra_name);
+ engine->algs[i].alg.base.cra_name);
else
dev_dbg(engine->dev, "registered alg \"%s\"\n",
- engine->algs[i].alg.cra_name);
+ engine->algs[i].alg.base.cra_name);
}
INIT_LIST_HEAD(&engine->registered_aeads);
@@ -1781,7 +1763,7 @@ static int spacc_remove(struct platform_device *pdev)
list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
list_del(&alg->entry);
- crypto_unregister_alg(&alg->alg);
+ crypto_unregister_skcipher(&alg->alg);
}
clk_disable_unprepare(engine->clk);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 6ab7e5a88756..2006322345de 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -3,7 +3,7 @@ config CRYPTO_DEV_QAT
tristate
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
select CRYPTO_HMAC
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index b50eb55f8f57..35bca76b640f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -48,6 +48,7 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/hash.h>
@@ -122,7 +123,7 @@ struct qat_alg_aead_ctx {
char opad[SHA512_BLOCK_SIZE];
};
-struct qat_alg_ablkcipher_ctx {
+struct qat_alg_skcipher_ctx {
struct icp_qat_hw_cipher_algo_blk *enc_cd;
struct icp_qat_hw_cipher_algo_blk *dec_cd;
dma_addr_t enc_cd_paddr;
@@ -130,7 +131,7 @@ struct qat_alg_ablkcipher_ctx {
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
+ struct crypto_skcipher *tfm;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -463,10 +464,10 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
return 0;
}
-static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const uint8_t *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
@@ -485,28 +486,28 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
}
-static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
}
-static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
@@ -577,21 +578,21 @@ error:
return -EFAULT;
}
-static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
- const uint8_t *key,
- unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
+ const uint8_t *key,
+ unsigned int keylen,
+ int mode)
{
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
goto bad_key;
- qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
- qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -832,12 +833,12 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
areq->base.complete(&areq->base, res);
}
-static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
- struct qat_crypto_request *qat_req)
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
{
- struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
- struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+ struct skcipher_request *sreq = qat_req->skcipher_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -846,11 +847,11 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
- memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
+ memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
- areq->base.complete(&areq->base, res);
+ sreq->base.complete(&sreq->base, res);
}
void qat_alg_callback(void *resp)
@@ -949,21 +950,21 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
struct qat_crypto_instance *inst = NULL;
struct device *dev;
@@ -990,7 +991,7 @@ static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
goto out_free_enc;
}
- ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
if (ret)
goto out_free_all;
@@ -1012,51 +1013,51 @@ out_free_instance:
return ret;
}
-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->enc_cd)
- return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
else
- return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CBC_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
}
-static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CTR_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CTR_MODE);
}
-static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_XTS_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_XTS_MODE);
}
-static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
@@ -1073,17 +1074,17 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -1097,26 +1098,26 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_encrypt(req);
+ return qat_alg_skcipher_encrypt(req);
}
-static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
@@ -1133,17 +1134,17 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -1157,12 +1158,12 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_decrypt(req);
+ return qat_alg_skcipher_decrypt(req);
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
@@ -1218,18 +1219,18 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
qat_crypto_put_instance(inst);
}
-static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
ctx->tfm = tfm;
return 0;
}
-static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
@@ -1308,92 +1309,75 @@ static struct aead_alg qat_aeads[] = { {
.maxauthsize = SHA512_DIGEST_SIZE,
} };
-static struct crypto_alg qat_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "qat_aes_cbc",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_cbc_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+static struct skcipher_alg qat_skciphers[] = { {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "qat_aes_cbc",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_cbc_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "qat_aes_ctr",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_ctr_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "qat_aes_ctr",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_ctr_setkey,
+ .decrypt = qat_alg_skcipher_decrypt,
+ .encrypt = qat_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "qat_aes_xts",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_xts_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "qat_aes_xts",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_xts_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
} };
int qat_algs_register(void)
{
- int ret = 0, i;
+ int ret = 0;
mutex_lock(&algs_lock);
if (++active_devs != 1)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
-
- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_skciphers(qat_skciphers,
+ ARRAY_SIZE(qat_skciphers));
if (ret)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
- qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
-
ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
if (ret)
goto unreg_algs;
@@ -1403,7 +1387,7 @@ unlock:
return ret;
unreg_algs:
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
goto unlock;
}
@@ -1414,7 +1398,7 @@ void qat_algs_unregister(void)
goto unlock;
crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
unlock:
mutex_unlock(&algs_lock);
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index c77a80020cde..300bb919a33a 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -79,11 +79,11 @@ struct qat_crypto_request {
struct icp_qat_fw_la_bulk_req req;
union {
struct qat_alg_aead_ctx *aead_ctx;
- struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
+ struct qat_alg_skcipher_ctx *skcipher_ctx;
};
union {
struct aead_request *aead_req;
- struct ablkcipher_request *ablkcipher_req;
+ struct skcipher_request *skcipher_req;
};
struct qat_crypto_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 19a7f899acff..8caa04e1ec43 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -4,4 +4,4 @@ qcrypto-objs := core.o \
common.o \
dma.o \
sha.o \
- ablkcipher.o
+ skcipher.o
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 5cab8f0706a8..7770660bc853 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -45,12 +45,12 @@ struct qce_cipher_reqctx {
unsigned int cryptlen;
};
-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- return container_of(alg, struct qce_alg_template, alg.crypto);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ return container_of(alg, struct qce_alg_template, alg.skcipher);
}
-extern const struct qce_algo_ops ablkcipher_ops;
+extern const struct qce_algo_ops skcipher_ops;
#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 3fb510164326..da1188abc9ba 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -304,13 +304,13 @@ go_proc:
return 0;
}
-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
@@ -389,8 +389,8 @@ int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return qce_setup_regs_skcipher(async_req, totallen, offset);
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
default:
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 47fb523357ac..282d4317470d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
+#include <crypto/internal/skcipher.h>
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
@@ -79,7 +80,7 @@ struct qce_alg_template {
unsigned long alg_flags;
const u32 *std_iv;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 08d4ce3bfddf..0a44a6eeacf5 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,7 +22,7 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
- &ablkcipher_ops,
+ &skcipher_ops,
&ahash_ops,
};
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 0984a719144d..40a59214d2e1 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -12,11 +12,11 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
- dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+ dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
return PTR_ERR(dma->txchan);
- dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+ dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
ret = PTR_ERR(dma->rxchan);
goto error_rx;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 0853e74583ad..95ab16fc8fd6 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -495,7 +495,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;
- base->cra_flags = CRYPTO_ALG_ASYNC;
+ base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/skcipher.c
index 7a98bf5cc967..fee07323f8f9 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -12,14 +12,14 @@
#include "cipher.h"
-static LIST_HEAD(ablkcipher_algs);
+static LIST_HEAD(skcipher_algs);
-static void qce_ablkcipher_done(void *data)
+static void qce_skcipher_done(void *data)
{
struct crypto_async_request *async_req = data;
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
u32 status;
@@ -32,7 +32,7 @@ static void qce_ablkcipher_done(void *data)
error = qce_dma_terminate_all(&qce->dma);
if (error)
- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
error);
if (diff_dst)
@@ -43,18 +43,18 @@ static void qce_ablkcipher_done(void *data)
error = qce_check_status(qce, &status);
if (error < 0)
- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
qce->async_req_done(tmpl->qce, error);
}
static int
-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
struct scatterlist *sg;
@@ -62,17 +62,17 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
gfp_t gfp;
int ret;
- rctx->iv = req->info;
- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- rctx->cryptlen = req->nbytes;
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->cryptlen = req->cryptlen;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (diff_dst)
- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
else
rctx->dst_nents = rctx->src_nents;
if (rctx->src_nents < 0) {
@@ -125,13 +125,13 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
rctx->dst_sg, rctx->dst_nents,
- qce_ablkcipher_done, async_req);
+ qce_skcipher_done, async_req);
if (ret)
goto error_unmap_src;
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
if (ret)
goto error_terminate;
@@ -149,10 +149,10 @@ error_free:
return ret;
}
-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
@@ -177,13 +177,13 @@ fallback:
return ret;
}
-static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
int err;
- err = verify_ablkcipher_des_key(ablk, key);
+ err = verify_skcipher_des_key(ablk, key);
if (err)
return err;
@@ -192,13 +192,13 @@ static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
return 0;
}
-static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
int err;
- err = verify_ablkcipher_des3_key(ablk, key);
+ err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
@@ -207,12 +207,11 @@ static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
return 0;
}
-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
int ret;
@@ -227,7 +226,7 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
@@ -237,36 +236,36 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
}
-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qce_skcipher_encrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 1);
+ return qce_skcipher_crypt(req, 1);
}
-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qce_skcipher_decrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 0);
+ return qce_skcipher_crypt(req, 0);
}
-static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+static int qce_skcipher_init(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
-static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qce_skcipher_exit(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->fallback);
}
-struct qce_ablkcipher_def {
+struct qce_skcipher_def {
unsigned long flags;
const char *name;
const char *drv_name;
@@ -276,7 +275,7 @@ struct qce_ablkcipher_def {
unsigned int max_keysize;
};
-static const struct qce_ablkcipher_def ablkcipher_def[] = {
+static const struct qce_skcipher_def skcipher_def[] = {
{
.flags = QCE_ALG_AES | QCE_MODE_ECB,
.name = "ecb(aes)",
@@ -351,90 +350,91 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = {
},
};
-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
struct qce_device *qce)
{
struct qce_alg_template *tmpl;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg;
int ret;
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl)
return -ENOMEM;
- alg = &tmpl->alg.crypto;
+ alg = &tmpl->alg.skcipher;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
- alg->cra_ablkcipher.min_keysize = def->min_keysize;
- alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey :
- IS_DES(def->flags) ? qce_des_setkey :
- qce_ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
-
- alg->cra_priority = 300;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
- alg->cra_alignmask = 0;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_module = THIS_MODULE;
- alg->cra_init = qce_ablkcipher_init;
- alg->cra_exit = qce_ablkcipher_exit;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
+ alg->min_keysize = def->min_keysize;
+ alg->max_keysize = def->max_keysize;
+ alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
+ IS_DES(def->flags) ? qce_des_setkey :
+ qce_skcipher_setkey;
+ alg->encrypt = qce_skcipher_encrypt;
+ alg->decrypt = qce_skcipher_decrypt;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = qce_skcipher_init;
+ alg->exit = qce_skcipher_exit;
INIT_LIST_HEAD(&tmpl->entry);
- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
tmpl->alg_flags = def->flags;
tmpl->qce = qce;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
kfree(tmpl);
- dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
return ret;
}
- list_add_tail(&tmpl->entry, &ablkcipher_algs);
- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+ list_add_tail(&tmpl->entry, &skcipher_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
return 0;
}
-static void qce_ablkcipher_unregister(struct qce_device *qce)
+static void qce_skcipher_unregister(struct qce_device *qce)
{
struct qce_alg_template *tmpl, *n;
- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
- crypto_unregister_alg(&tmpl->alg.crypto);
+ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&tmpl->alg.skcipher);
list_del(&tmpl->entry);
kfree(tmpl);
}
}
-static int qce_ablkcipher_register(struct qce_device *qce)
+static int qce_skcipher_register(struct qce_device *qce)
{
int ret, i;
- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
+ ret = qce_skcipher_register_one(&skcipher_def[i], qce);
if (ret)
goto err;
}
return 0;
err:
- qce_ablkcipher_unregister(qce);
+ qce_skcipher_unregister(qce);
return ret;
}
-const struct qce_algo_ops ablkcipher_ops = {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .register_algs = qce_ablkcipher_register,
- .unregister_algs = qce_ablkcipher_unregister,
- .async_req_handle = qce_ablkcipher_async_req_handle,
+const struct qce_algo_ops skcipher_ops = {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .register_algs = qce_skcipher_register,
+ .unregister_algs = qce_skcipher_unregister,
+ .async_req_handle = qce_skcipher_async_req_handle,
};
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
index 6e23764e6c8a..785277aca71e 100644
--- a/drivers/crypto/rockchip/Makefile
+++ b/drivers/crypto/rockchip/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk3288_crypto.o \
- rk3288_crypto_ablkcipher.o \
+ rk3288_crypto_skcipher.o \
rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index e5714ef24bf2..f385587f99af 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -264,8 +264,8 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- err = crypto_register_alg(
- &rk_cipher_algs[i]->alg.crypto);
+ err = crypto_register_skcipher(
+ &rk_cipher_algs[i]->alg.skcipher);
else
err = crypto_register_ahash(
&rk_cipher_algs[i]->alg.hash);
@@ -277,7 +277,7 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
err_cipher_algs:
for (k = 0; k < i; k++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
+ crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
@@ -290,7 +290,7 @@ static void rk_crypto_unregister(void)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
+ crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 18e2b3f29336..2b49c677afdb 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
@@ -256,7 +257,7 @@ enum alg_type {
struct rk_crypto_tmp {
struct rk_crypto_info *dev;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
} alg;
enum alg_type type;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
deleted file mode 100644
index d0f4b2d18059..000000000000
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Crypto acceleration support for Rockchip RK3288
- *
- * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
- *
- * Author: Zain Wang <zain.wang@rock-chips.com>
- *
- * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
- */
-#include "rk3288_crypto.h"
-
-#define RK_CRYPTO_DEC BIT(0)
-
-static void rk_crypto_complete(struct crypto_async_request *base, int err)
-{
- if (base->complete)
- base->complete(base, err);
-}
-
-static int rk_handle_req(struct rk_crypto_info *dev,
- struct ablkcipher_request *req)
-{
- if (!IS_ALIGNED(req->nbytes, dev->align_size))
- return -EINVAL;
- else
- return dev->enqueue(dev, &req->base);
-}
-
-static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
- return 0;
-}
-
-static int rk_des_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- int err;
-
- err = verify_ablkcipher_des_key(cipher, key);
- if (err)
- return err;
-
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
-}
-
-static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- int err;
-
- err = verify_ablkcipher_des3_key(cipher, key);
- if (err)
- return err;
-
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
-}
-
-static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_ECB_MODE;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_CBC_MODE;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = 0;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
- RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- u32 ivsize, block, conf_reg = 0;
-
- block = crypto_tfm_alg_blocksize(tfm);
- ivsize = crypto_ablkcipher_ivsize(cipher);
-
- if (block == DES_BLOCK_SIZE) {
- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
- RK_CRYPTO_TDES_BYTESWAP_KEY |
- RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
- conf_reg = RK_CRYPTO_DESSEL;
- } else {
- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
- RK_CRYPTO_AES_KEY_CHANGE |
- RK_CRYPTO_AES_BYTESWAP_KEY |
- RK_CRYPTO_AES_BYTESWAP_IV;
- if (ctx->keylen == AES_KEYSIZE_192)
- ctx->mode |= RK_CRYPTO_AES_192BIT_key;
- else if (ctx->keylen == AES_KEYSIZE_256)
- ctx->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
- }
- conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
- RK_CRYPTO_BYTESWAP_BRFIFO;
- CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
- CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
- RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
-}
-
-static void crypto_dma_start(struct rk_crypto_info *dev)
-{
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
- CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
- _SBF(RK_CRYPTO_BLOCK_START, 16));
-}
-
-static int rk_set_data_start(struct rk_crypto_info *dev)
-{
- int err;
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
- dev->sg_src->offset + dev->sg_src->length - ivsize;
-
- /* Store the iv that need to be updated in chain mode.
- * And update the IV buffer to contain the next IV for decryption mode.
- */
- if (ctx->mode & RK_CRYPTO_DEC) {
- memcpy(ctx->iv, src_last_blk, ivsize);
- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
- ivsize, dev->total - ivsize);
- }
-
- err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
- if (!err)
- crypto_dma_start(dev);
- return err;
-}
-
-static int rk_ablk_start(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- unsigned long flags;
- int err = 0;
-
- dev->left_bytes = req->nbytes;
- dev->total = req->nbytes;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->src_nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->dst_nents = sg_nents(req->dst);
- dev->aligned = 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- rk_ablk_hw_init(dev);
- err = rk_set_data_start(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
- return err;
-}
-
-static void rk_iv_copyback(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
-
- /* Update the IV buffer to contain the next IV for encryption mode. */
- if (!(ctx->mode & RK_CRYPTO_DEC)) {
- if (dev->aligned) {
- memcpy(req->info, sg_virt(dev->sg_dst) +
- dev->sg_dst->length - ivsize, ivsize);
- } else {
- memcpy(req->info, dev->addr_vir +
- dev->count - ivsize, ivsize);
- }
- }
-}
-
-static void rk_update_iv(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- u8 *new_iv = NULL;
-
- if (ctx->mode & RK_CRYPTO_DEC) {
- new_iv = ctx->iv;
- } else {
- new_iv = page_address(sg_page(dev->sg_dst)) +
- dev->sg_dst->offset + dev->sg_dst->length - ivsize;
- }
-
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
-}
-
-/* return:
- * true some err was occurred
- * fault no err, continue
- */
-static int rk_ablk_rx(struct rk_crypto_info *dev)
-{
- int err = 0;
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
-
- dev->unload_data(dev);
- if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
- dev->addr_vir, dev->count,
- dev->total - dev->left_bytes -
- dev->count)) {
- err = -EINVAL;
- goto out_rx;
- }
- }
- if (dev->left_bytes) {
- rk_update_iv(dev);
- if (dev->aligned) {
- if (sg_is_last(dev->sg_src)) {
- dev_err(dev->dev, "[%s:%d] Lack of data\n",
- __func__, __LINE__);
- err = -ENOMEM;
- goto out_rx;
- }
- dev->sg_src = sg_next(dev->sg_src);
- dev->sg_dst = sg_next(dev->sg_dst);
- }
- err = rk_set_data_start(dev);
- } else {
- rk_iv_copyback(dev);
- /* here show the calculation is over without any err */
- dev->complete(dev->async_req, 0);
- tasklet_schedule(&dev->queue_task);
- }
-out_rx:
- return err;
-}
-
-static int rk_ablk_cra_init(struct crypto_tfm *tfm)
-{
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct rk_crypto_tmp *algt;
-
- algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
-
- ctx->dev = algt->dev;
- ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
- ctx->dev->start = rk_ablk_start;
- ctx->dev->update = rk_ablk_rx;
- ctx->dev->complete = rk_crypto_complete;
- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
-
- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
-}
-
-static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
-{
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- free_page((unsigned long)ctx->dev->addr_vir);
- ctx->dev->disable_clk(ctx->dev);
-}
-
-struct rk_crypto_tmp rk_ecb_aes_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = rk_aes_setkey,
- .encrypt = rk_aes_ecb_encrypt,
- .decrypt = rk_aes_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_aes_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = rk_aes_setkey,
- .encrypt = rk_aes_cbc_encrypt,
- .decrypt = rk_aes_cbc_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_ecb_des_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = rk_des_setkey,
- .encrypt = rk_des_ecb_encrypt,
- .decrypt = rk_des_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_des_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_des_setkey,
- .encrypt = rk_des_cbc_encrypt,
- .decrypt = rk_des_cbc_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-ede-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
- .encrypt = rk_des3_ede_ecb_encrypt,
- .decrypt = rk_des3_ede_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-ede-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
- .encrypt = rk_des3_ede_cbc_encrypt,
- .decrypt = rk_des3_ede_cbc_decrypt,
- }
- }
-};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
new file mode 100644
index 000000000000..ca4de4ddfe1f
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+#define RK_CRYPTO_DEC BIT(0)
+
+static void rk_crypto_complete(struct crypto_async_request *base, int err)
+{
+ if (base->complete)
+ base->complete(base, err);
+}
+
+static int rk_handle_req(struct rk_crypto_info *dev,
+ struct skcipher_request *req)
+{
+ if (!IS_ALIGNED(req->cryptlen, dev->align_size))
+ return -EINVAL;
+ else
+ return dev->enqueue(dev, &req->base);
+}
+
+static int rk_aes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
+ return 0;
+}
+
+static int rk_des_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des3_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = 0;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ u32 ivsize, block, conf_reg = 0;
+
+ block = crypto_tfm_alg_blocksize(tfm);
+ ivsize = crypto_skcipher_ivsize(cipher);
+
+ if (block == DES_BLOCK_SIZE) {
+ ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ RK_CRYPTO_TDES_BYTESWAP_KEY |
+ RK_CRYPTO_TDES_BYTESWAP_IV;
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
+ conf_reg = RK_CRYPTO_DESSEL;
+ } else {
+ ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ RK_CRYPTO_AES_KEY_CHANGE |
+ RK_CRYPTO_AES_BYTESWAP_KEY |
+ RK_CRYPTO_AES_BYTESWAP_IV;
+ if (ctx->keylen == AES_KEYSIZE_192)
+ ctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ else if (ctx->keylen == AES_KEYSIZE_256)
+ ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
+ }
+ conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO;
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
+ RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+}
+
+static void crypto_dma_start(struct rk_crypto_info *dev)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+ _SBF(RK_CRYPTO_BLOCK_START, 16));
+}
+
+static int rk_set_data_start(struct rk_crypto_info *dev)
+{
+ int err;
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+ u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+ dev->sg_src->offset + dev->sg_src->length - ivsize;
+
+ /* Store the iv that need to be updated in chain mode.
+ * And update the IV buffer to contain the next IV for decryption mode.
+ */
+ if (ctx->mode & RK_CRYPTO_DEC) {
+ memcpy(ctx->iv, src_last_blk, ivsize);
+ sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
+ ivsize, dev->total - ivsize);
+ }
+
+ err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+ if (!err)
+ crypto_dma_start(dev);
+ return err;
+}
+
+static int rk_ablk_start(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ unsigned long flags;
+ int err = 0;
+
+ dev->left_bytes = req->cryptlen;
+ dev->total = req->cryptlen;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->src_nents = sg_nents(req->src);
+ dev->sg_dst = req->dst;
+ dev->dst_nents = sg_nents(req->dst);
+ dev->aligned = 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ rk_ablk_hw_init(dev);
+ err = rk_set_data_start(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return err;
+}
+
+static void rk_iv_copyback(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+
+ /* Update the IV buffer to contain the next IV for encryption mode. */
+ if (!(ctx->mode & RK_CRYPTO_DEC)) {
+ if (dev->aligned) {
+ memcpy(req->iv, sg_virt(dev->sg_dst) +
+ dev->sg_dst->length - ivsize, ivsize);
+ } else {
+ memcpy(req->iv, dev->addr_vir +
+ dev->count - ivsize, ivsize);
+ }
+ }
+}
+
+static void rk_update_iv(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+ u8 *new_iv = NULL;
+
+ if (ctx->mode & RK_CRYPTO_DEC) {
+ new_iv = ctx->iv;
+ } else {
+ new_iv = page_address(sg_page(dev->sg_dst)) +
+ dev->sg_dst->offset + dev->sg_dst->length - ivsize;
+ }
+
+ if (ivsize == DES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
+ else if (ivsize == AES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
+}
+
+/* return:
+ * true some err was occurred
+ * fault no err, continue
+ */
+static int rk_ablk_rx(struct rk_crypto_info *dev)
+{
+ int err = 0;
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+
+ dev->unload_data(dev);
+ if (!dev->aligned) {
+ if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
+ dev->addr_vir, dev->count,
+ dev->total - dev->left_bytes -
+ dev->count)) {
+ err = -EINVAL;
+ goto out_rx;
+ }
+ }
+ if (dev->left_bytes) {
+ rk_update_iv(dev);
+ if (dev->aligned) {
+ if (sg_is_last(dev->sg_src)) {
+ dev_err(dev->dev, "[%s:%d] Lack of data\n",
+ __func__, __LINE__);
+ err = -ENOMEM;
+ goto out_rx;
+ }
+ dev->sg_src = sg_next(dev->sg_src);
+ dev->sg_dst = sg_next(dev->sg_dst);
+ }
+ err = rk_set_data_start(dev);
+ } else {
+ rk_iv_copyback(dev);
+ /* here show the calculation is over without any err */
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
+ }
+out_rx:
+ return err;
+}
+
+static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt;
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+
+ ctx->dev = algt->dev;
+ ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
+ ctx->dev->start = rk_ablk_start;
+ ctx->dev->update = rk_ablk_rx;
+ ctx->dev->complete = rk_crypto_complete;
+ ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+
+ return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
+}
+
+static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ free_page((unsigned long)ctx->dev->addr_vir);
+ ctx->dev->disable_clk(ctx->dev);
+}
+
+struct rk_crypto_tmp rk_ecb_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_ecb_encrypt,
+ .decrypt = rk_aes_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_cbc_encrypt,
+ .decrypt = rk_aes_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_ecb_encrypt,
+ .decrypt = rk_des_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_cbc_encrypt,
+ .decrypt = rk_des_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_ecb_encrypt,
+ .decrypt = rk_des3_ede_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_cbc_encrypt,
+ .decrypt = rk_des3_ede_cbc_decrypt,
+ }
+};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 010f1bb20dad..d66e20a2f54c 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -303,7 +303,7 @@ struct s5p_aes_dev {
void __iomem *aes_ioaddr;
int irq_fc;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct s5p_aes_ctx *ctx;
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
@@ -456,7 +456,7 @@ static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
if (!*sg)
return;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
free_pages((unsigned long)sg_virt(*sg), get_order(len));
kfree(*sg);
@@ -478,27 +478,27 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
static void s5p_sg_done(struct s5p_aes_dev *dev)
{
- struct ablkcipher_request *req = dev->req;
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = dev->req;
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
if (dev->sg_dst_cpy) {
dev_dbg(dev->dev,
"Copying %d bytes of output data back to original place\n",
- dev->req->nbytes);
+ dev->req->cryptlen);
s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
- dev->req->nbytes, 1);
+ dev->req->cryptlen, 1);
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
if (reqctx->mode & FLAGS_AES_CBC)
- memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
else if (reqctx->mode & FLAGS_AES_CTR)
- memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
}
/* Calls the completion. Cannot be called with dev->lock hold. */
-static void s5p_aes_complete(struct ablkcipher_request *req, int err)
+static void s5p_aes_complete(struct skcipher_request *req, int err)
{
req->base.complete(&req->base, err);
}
@@ -523,7 +523,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
if (!*dst)
return -ENOMEM;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
if (!pages) {
kfree(*dst);
@@ -531,7 +531,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
return -ENOMEM;
}
- s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+ s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
sg_init_table(*dst, 1);
sg_set_buf(*dst, pages, len);
@@ -660,7 +660,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
int err_dma_tx = 0;
int err_dma_rx = 0;
int err_dma_hx = 0;
@@ -1870,7 +1870,7 @@ static bool s5p_is_sg_aligned(struct scatterlist *sg)
}
static int s5p_set_indata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1897,7 +1897,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1925,7 +1925,7 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
- struct ablkcipher_request *req = dev->req;
+ struct skcipher_request *req = dev->req;
u32 aes_control;
unsigned long flags;
int err;
@@ -1938,12 +1938,12 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
- iv = req->info;
+ iv = req->iv;
ctr = NULL;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
iv = NULL;
- ctr = req->info;
+ ctr = req->iv;
} else {
iv = NULL; /* AES_ECB */
ctr = NULL;
@@ -2021,21 +2021,21 @@ static void s5p_tasklet_cb(unsigned long data)
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- dev->req = ablkcipher_request_cast(async_req);
+ dev->req = skcipher_request_cast(async_req);
dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
- reqctx = ablkcipher_request_ctx(dev->req);
+ reqctx = skcipher_request_ctx(dev->req);
s5p_aes_crypt_start(dev, reqctx->mode);
}
static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
unsigned long flags;
int err;
spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
return err;
@@ -2049,17 +2049,17 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
return err;
}
-static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
- struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct s5p_aes_dev *dev = ctx->dev;
- if (!req->nbytes)
+ if (!req->cryptlen)
return 0;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
@@ -2070,10 +2070,10 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return s5p_aes_handle_req(dev, req);
}
-static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
+static int s5p_aes_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
@@ -2087,106 +2087,97 @@ static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, 0);
}
-static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
}
-static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_CBC);
}
-static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}
-static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
+static int s5p_aes_ctr_crypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_CTR);
}
-static int s5p_aes_cra_init(struct crypto_tfm *tfm)
+static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->dev = s5p_dev;
- tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
return 0;
}
-static struct crypto_alg algs[] = {
+static struct skcipher_alg algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_ecb_encrypt,
- .decrypt = s5p_aes_ecb_decrypt,
- }
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ecb_encrypt,
+ .decrypt = s5p_aes_ecb_decrypt,
+ .init = s5p_aes_init_tfm,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_cbc_encrypt,
- .decrypt = s5p_aes_cbc_decrypt,
- }
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_cbc_encrypt,
+ .decrypt = s5p_aes_cbc_decrypt,
+ .init = s5p_aes_init_tfm,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_ctr_crypt,
- .decrypt = s5p_aes_ctr_crypt,
- }
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ctr_crypt,
+ .decrypt = s5p_aes_ctr_crypt,
+ .init = s5p_aes_init_tfm,
},
};
@@ -2297,7 +2288,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
for (i = 0; i < ARRAY_SIZE(algs); i++) {
- err = crypto_register_alg(&algs[i]);
+ err = crypto_register_skcipher(&algs[i]);
if (err)
goto err_algs;
}
@@ -2334,11 +2325,11 @@ err_hash:
err_algs:
if (i < ARRAY_SIZE(algs))
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
+ dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
err);
for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
+ crypto_unregister_skcipher(&algs[j]);
tasklet_kill(&pdata->tasklet);
@@ -2362,7 +2353,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_alg(&algs[i]);
+ crypto_unregister_skcipher(&algs[i]);
tasklet_kill(&pdata->tasklet);
if (pdata->use_hash) {
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8ac8ec6decd5..d4ea2f11ca68 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -547,7 +547,7 @@ unmap_in:
return -EINVAL;
}
-static int sahara_aes_process(struct ablkcipher_request *req)
+static int sahara_aes_process(struct skcipher_request *req)
{
struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
@@ -558,20 +558,20 @@ static int sahara_aes_process(struct ablkcipher_request *req)
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
- req->nbytes, req->src, req->dst);
+ req->cryptlen, req->src, req->dst);
/* assign new request to device */
- dev->total = req->nbytes;
+ dev->total = req->cryptlen;
dev->in_sg = req->src;
dev->out_sg = req->dst;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
- if ((dev->flags & FLAGS_CBC) && req->info)
- memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
+ if ((dev->flags & FLAGS_CBC) && req->iv)
+ memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
/* assign new context to device */
dev->ctx = ctx;
@@ -597,10 +597,10 @@ static int sahara_aes_process(struct ablkcipher_request *req)
return 0;
}
-static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
ctx->keylen = keylen;
@@ -630,16 +630,16 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ret;
}
-static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
int err = 0;
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
- req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+ req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
dev_err(dev->device,
"request size is not exact amount of AES blocks\n");
return -EINVAL;
@@ -648,7 +648,7 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
mutex_lock(&dev->queue_mutex);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
mutex_unlock(&dev->queue_mutex);
wake_up_process(dev->kthread);
@@ -656,10 +656,10 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return err;
}
-static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -669,7 +669,7 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_encrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -678,10 +678,10 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -691,7 +691,7 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -700,10 +700,10 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, 0);
}
-static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -713,7 +713,7 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_encrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -722,10 +722,10 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -735,7 +735,7 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -744,10 +744,10 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_CBC);
}
-static int sahara_aes_cra_init(struct crypto_tfm *tfm)
+static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
@@ -756,14 +756,14 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->fallback);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx));
return 0;
}
-static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
+static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->fallback);
}
@@ -1071,8 +1071,8 @@ static int sahara_queue_manage(void *data)
ret = sahara_sha_process(req);
} else {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(async_req);
+ struct skcipher_request *req =
+ skcipher_request_cast(async_req);
ret = sahara_aes_process(req);
}
@@ -1189,48 +1189,42 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct crypto_alg aes_algs[] = {
+static struct skcipher_alg aes_algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "sahara-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_ecb_encrypt,
- .decrypt = sahara_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "sahara-ecb-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "sahara-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_cbc_encrypt,
- .decrypt = sahara_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "sahara-cbc-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
}
};
@@ -1318,7 +1312,7 @@ static int sahara_register_algs(struct sahara_dev *dev)
unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
@@ -1348,7 +1342,7 @@ err_sha_v3_algs:
err_aes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&aes_algs[j]);
+ crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
@@ -1358,7 +1352,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index ba5ea6434f9c..d347a1d6e351 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -19,6 +19,7 @@
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#define DRIVER_NAME "stm32-cryp"
@@ -137,7 +138,7 @@ struct stm32_cryp {
struct crypto_engine *engine;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct aead_request *areq;
size_t authsize;
@@ -395,8 +396,8 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
{
- struct ablkcipher_request *req = cryp->req;
- u32 *tmp = req->info;
+ struct skcipher_request *req = cryp->req;
+ u32 *tmp = (void *)req->iv;
if (!tmp)
return;
@@ -616,7 +617,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
case CR_TDES_CBC:
case CR_AES_CBC:
case CR_AES_CTR:
- stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->iv);
break;
default:
@@ -667,7 +668,7 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (is_gcm(cryp) || is_ccm(cryp))
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
else
- crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
+ crypto_finalize_skcipher_request(cryp->engine, cryp->req,
err);
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
@@ -685,11 +686,11 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
void *areq);
-static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
@@ -714,11 +715,11 @@ static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
return 0;
}
-static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int stm32_cryp_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = skcipher_request_ctx(req);
struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
if (!cryp)
@@ -726,7 +727,7 @@ static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
- return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
}
static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
@@ -743,10 +744,10 @@ static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
-static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -754,7 +755,7 @@ static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
@@ -764,17 +765,17 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return stm32_cryp_setkey(tfm, key, keylen);
}
-static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return verify_ablkcipher_des_key(tfm, key) ?:
+ return verify_skcipher_des_key(tfm, key) ?:
stm32_cryp_setkey(tfm, key, keylen);
}
-static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return verify_ablkcipher_des3_key(tfm, key) ?:
+ return verify_skcipher_des3_key(tfm, key) ?:
stm32_cryp_setkey(tfm, key, keylen);
}
@@ -818,32 +819,32 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
return 0;
}
-static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
}
-static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
}
-static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
}
@@ -868,47 +869,47 @@ static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
}
-static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
}
-static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
}
-static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
}
-static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
-static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
+static int stm32_cryp_prepare_req(struct skcipher_request *req,
struct aead_request *areq)
{
struct stm32_cryp_ctx *ctx;
@@ -919,7 +920,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!req && !areq)
return -EINVAL;
- ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
+ ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
crypto_aead_ctx(crypto_aead_reqtfm(areq));
cryp = ctx->cryp;
@@ -927,7 +928,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!cryp)
return -ENODEV;
- rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
+ rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
ctx->cryp = cryp;
@@ -939,7 +940,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (req) {
cryp->req = req;
cryp->areq = NULL;
- cryp->total_in = req->nbytes;
+ cryp->total_in = req->cryptlen;
cryp->total_out = cryp->total_in;
} else {
/*
@@ -1016,8 +1017,8 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq,
- struct ablkcipher_request,
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
base);
return stm32_cryp_prepare_req(req, NULL);
@@ -1025,11 +1026,11 @@ static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
- struct ablkcipher_request *req = container_of(areq,
- struct ablkcipher_request,
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
base);
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
if (!cryp)
@@ -1724,150 +1725,129 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
-static struct crypto_alg crypto_algs[] = {
-{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "stm32-ecb-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ecb_encrypt,
- .decrypt = stm32_cryp_aes_ecb_decrypt,
- }
+static struct skcipher_alg crypto_algs[] = {
+{
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "stm32-ecb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "stm32-cbc-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_cbc_encrypt,
- .decrypt = stm32_cryp_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "stm32-cbc-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "stm32-ctr-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ctr_encrypt,
- .decrypt = stm32_cryp_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "stm32-ctr-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
},
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "stm32-ecb-des",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_ecb_encrypt,
- .decrypt = stm32_cryp_des_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "stm32-ecb-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "stm32-cbc-des",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_cbc_encrypt,
- .decrypt = stm32_cryp_des_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "stm32-cbc-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "stm32-ecb-des3",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_ecb_encrypt,
- .decrypt = stm32_cryp_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "stm32-ecb-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "stm32-cbc-des3",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_cbc_encrypt,
- .decrypt = stm32_cryp_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "stm32-cbc-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
},
};
@@ -2010,7 +1990,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_engine2;
}
- ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
if (ret) {
dev_err(dev, "Could not register algs\n");
goto err_algs;
@@ -2027,7 +2007,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return 0;
err_aead_algs:
- crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
err_algs:
err_engine2:
crypto_engine_exit(cryp->engine);
@@ -2059,7 +2039,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
return ret;
crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
- crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 56e3068c9947..d71d65846e47 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -35,7 +35,7 @@
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
@@ -1490,10 +1490,10 @@ static int aead_decrypt(struct aead_request *req)
return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct device *dev = ctx->dev;
if (ctx->keylen)
@@ -1507,39 +1507,39 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_des_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- return verify_ablkcipher_des_key(cipher, key) ?:
- ablkcipher_setkey(cipher, key, keylen);
+ return verify_skcipher_des_key(cipher, key) ?:
+ skcipher_setkey(cipher, key, keylen);
}
-static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
- ablkcipher_setkey(cipher, key, keylen);
+ return verify_skcipher_des3_key(cipher, key) ?:
+ skcipher_setkey(cipher, key, keylen);
}
-static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
keylen == AES_KEYSIZE_256)
- return ablkcipher_setkey(cipher, key, keylen);
+ return skcipher_setkey(cipher, key, keylen);
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
- struct ablkcipher_request *areq)
+ struct skcipher_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
if (edesc->dma_len)
@@ -1547,20 +1547,20 @@ static void common_nonsnoop_unmap(struct device *dev,
DMA_BIDIRECTIONAL);
}
-static void ablkcipher_done(struct device *dev,
+static void skcipher_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
- struct ablkcipher_request *areq = context;
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ struct skcipher_request *areq = context;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
struct talitos_edesc *edesc;
edesc = container_of(desc, struct talitos_edesc, desc);
common_nonsnoop_unmap(dev, edesc, areq);
- memcpy(areq->info, ctx->iv, ivsize);
+ memcpy(areq->iv, ctx->iv, ivsize);
kfree(edesc);
@@ -1568,17 +1568,17 @@ static void ablkcipher_done(struct device *dev,
}
static int common_nonsnoop(struct talitos_edesc *edesc,
- struct ablkcipher_request *areq,
+ struct skcipher_request *areq,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
- unsigned int cryptlen = areq->nbytes;
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ unsigned int cryptlen = areq->cryptlen;
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
int sg_count, ret;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
@@ -1638,65 +1638,65 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
return ret;
}
-static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
areq, bool encrypt)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
- areq->info, 0, areq->nbytes, 0, ivsize, 0,
+ areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
areq->base.flags, encrypt);
}
-static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+static int skcipher_encrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct talitos_edesc *edesc;
unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+ crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
- if (!areq->nbytes)
+ if (!areq->cryptlen)
return 0;
- if (areq->nbytes % blocksize)
+ if (areq->cryptlen % blocksize)
return -EINVAL;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq, true);
+ edesc = skcipher_edesc_alloc(areq, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return common_nonsnoop(edesc, areq, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, skcipher_done);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+static int skcipher_decrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct talitos_edesc *edesc;
unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+ crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
- if (!areq->nbytes)
+ if (!areq->cryptlen)
return 0;
- if (areq->nbytes % blocksize)
+ if (areq->cryptlen % blocksize)
return -EINVAL;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq, false);
+ edesc = skcipher_edesc_alloc(areq, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
- return common_nonsnoop(edesc, areq, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, skcipher_done);
}
static void common_nonsnoop_hash_unmap(struct device *dev,
@@ -1704,6 +1704,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
struct talitos_desc *desc = &edesc->desc;
@@ -1714,6 +1715,9 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
+ if (req_ctx->last)
+ memcpy(areq->result, req_ctx->hw_context,
+ crypto_ahash_digestsize(tfm));
if (req_ctx->psrc)
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
@@ -1845,7 +1849,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (req_ctx->last)
map_single_talitos_ptr(dev, &desc->ptr[5],
crypto_ahash_digestsize(tfm),
- areq->result, DMA_FROM_DEVICE);
+ req_ctx->hw_context, DMA_FROM_DEVICE);
else
map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
req_ctx->hw_context_size,
@@ -2253,7 +2257,7 @@ struct talitos_alg_template {
u32 type;
u32 priority;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -2698,123 +2702,102 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
- /* ABLKCIPHER algorithms. */
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ /* SKCIPHER algorithms. */
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-talitos",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-talitos",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-talitos",
+ .base.cra_blocksize = 1,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CTR,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-talitos",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = ablkcipher_des_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-talitos",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = skcipher_des_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-talitos",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = ablkcipher_des_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-talitos",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = skcipher_des_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = ablkcipher_des3_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-3des-talitos",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = skcipher_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_3DES,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = ablkcipher_des3_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-3des-talitos",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = skcipher_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -3032,46 +3015,45 @@ static int talitos_init_common(struct talitos_ctx *ctx,
return 0;
}
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.aead);
return talitos_init_common(ctx, talitos_alg);
}
-static int talitos_cra_init_aead(struct crypto_aead *tfm)
+static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
{
- struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.aead);
+ algt.alg.skcipher);
return talitos_init_common(ctx, talitos_alg);
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- talitos_cra_init(tfm);
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
ctx->keylen = 0;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct talitos_ahash_req_ctx));
- return 0;
+ return talitos_init_common(ctx, talitos_alg);
}
static void talitos_cra_exit(struct crypto_tfm *tfm)
@@ -3112,7 +3094,8 @@ static int talitos_remove(struct platform_device *ofdev)
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&t_alg->algt.alg.aead);
@@ -3156,15 +3139,14 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt = *template;
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg = &t_alg->algt.alg.crypto;
- alg->cra_init = talitos_cra_init;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ alg = &t_alg->algt.alg.skcipher.base;
alg->cra_exit = talitos_cra_exit;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
- ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+ t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
+ t_alg->algt.alg.skcipher.setkey =
+ t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
+ t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
+ t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
@@ -3461,10 +3443,10 @@ static int talitos_probe(struct platform_device *ofdev)
}
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = crypto_register_alg(
- &t_alg->algt.alg.crypto);
- alg = &t_alg->algt.alg.crypto;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = crypto_register_skcipher(
+ &t_alg->algt.alg.skcipher);
+ alg = &t_alg->algt.alg.skcipher.base;
break;
case CRYPTO_ALG_TYPE_AEAD:
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b1c6f739f77b..b731895aa241 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -8,7 +8,7 @@ config CRYPTO_DEV_UX500_CRYP
tristate "UX500 crypto driver for CRYP block"
depends on CRYPTO_DEV_UX500
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
This selects the crypto driver for the UX500_CRYP hardware. It supports
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 1628ae7a1467..95fb694a2667 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -30,6 +30,7 @@
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/platform_data/crypto-ux500.h>
@@ -828,10 +829,10 @@ static int get_nents(struct scatterlist *sg, int nbytes)
return nents;
}
-static int ablk_dma_crypt(struct ablkcipher_request *areq)
+static int ablk_dma_crypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
struct cryp_device_data *device_data;
int bytes_written = 0;
@@ -840,8 +841,8 @@ static int ablk_dma_crypt(struct ablkcipher_request *areq)
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- ctx->datalen = areq->nbytes;
- ctx->outlen = areq->nbytes;
+ ctx->datalen = areq->cryptlen;
+ ctx->outlen = areq->cryptlen;
ret = cryp_get_device_data(ctx, &device_data);
if (ret)
@@ -885,11 +886,11 @@ out:
return 0;
}
-static int ablk_crypt(struct ablkcipher_request *areq)
+static int ablk_crypt(struct skcipher_request *areq)
{
- struct ablkcipher_walk walk;
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct skcipher_walk walk;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
struct cryp_device_data *device_data;
unsigned long src_paddr;
unsigned long dst_paddr;
@@ -902,21 +903,20 @@ static int ablk_crypt(struct ablkcipher_request *areq)
if (ret)
goto out;
- ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
- ret = ablkcipher_walk_phys(areq, &walk);
+ ret = skcipher_walk_async(&walk, areq);
if (ret) {
- pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+ pr_err(DEV_DBG_NAME "[%s]: skcipher_walk_async() failed!",
__func__);
goto out;
}
while ((nbytes = walk.nbytes) > 0) {
ctx->iv = walk.iv;
- src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+ src_paddr = (page_to_phys(walk.src.phys.page) + walk.src.phys.offset);
ctx->indata = phys_to_virt(src_paddr);
- dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+ dst_paddr = (page_to_phys(walk.dst.phys.page) + walk.dst.phys.offset);
ctx->outdata = phys_to_virt(dst_paddr);
ctx->datalen = nbytes - (nbytes % ctx->blocksize);
@@ -926,11 +926,10 @@ static int ablk_crypt(struct ablkcipher_request *areq)
goto out;
nbytes -= ctx->datalen;
- ret = ablkcipher_walk_done(areq, &walk, nbytes);
+ ret = skcipher_walk_done(&walk, nbytes);
if (ret)
goto out;
}
- ablkcipher_walk_complete(&walk);
out:
/* Release the device */
@@ -948,10 +947,10 @@ out:
return ret;
}
-static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -983,15 +982,15 @@ static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int des_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1002,15 +1001,15 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int des3_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1021,10 +1020,10 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int cryp_blk_encrypt(struct ablkcipher_request *areq)
+static int cryp_blk_encrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -1039,10 +1038,10 @@ static int cryp_blk_encrypt(struct ablkcipher_request *areq)
return ablk_crypt(areq);
}
-static int cryp_blk_decrypt(struct ablkcipher_request *areq)
+static int cryp_blk_decrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -1058,19 +1057,19 @@ static int cryp_blk_decrypt(struct ablkcipher_request *areq)
struct cryp_algo_template {
enum cryp_algo_mode algomode;
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
};
-static int cryp_cra_init(struct crypto_tfm *tfm)
+static int cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct cryp_algo_template *cryp_alg = container_of(alg,
struct cryp_algo_template,
- crypto);
+ skcipher);
ctx->config.algomode = cryp_alg->algomode;
- ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+ ctx->blocksize = crypto_skcipher_blocksize(tfm);
return 0;
}
@@ -1078,205 +1077,147 @@ static int cryp_cra_init(struct crypto_tfm *tfm)
static struct cryp_algo_template cryp_algs[] = {
{
.algomode = CRYP_ALGO_AES_ECB,
- .crypto = {
- .cra_name = "aes",
- .cra_driver_name = "aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
- },
- {
- .algomode = CRYP_ALGO_AES_ECB,
- .crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_AES_CBC,
- .crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
+ .ivsize = AES_BLOCK_SIZE,
}
},
{
.algomode = CRYP_ALGO_AES_CTR,
- .crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
}
},
{
.algomode = CRYP_ALGO_DES_ECB,
- .crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_TDES_ECB,
- .crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_DES_CBC,
- .crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_TDES_CBC,
- .crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .init = cryp_init_tfm,
}
}
};
@@ -1293,18 +1234,18 @@ static int cryp_algs_register_all(void)
pr_debug("[%s]", __func__);
for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
- ret = crypto_register_alg(&cryp_algs[i].crypto);
+ ret = crypto_register_skcipher(&cryp_algs[i].skcipher);
if (ret) {
count = i;
pr_err("[%s] alg registration failed",
- cryp_algs[i].crypto.cra_driver_name);
+ cryp_algs[i].skcipher.base.cra_driver_name);
goto unreg;
}
}
return 0;
unreg:
for (i = 0; i < count; i++)
- crypto_unregister_alg(&cryp_algs[i].crypto);
+ crypto_unregister_skcipher(&cryp_algs[i].skcipher);
return ret;
}
@@ -1318,7 +1259,7 @@ static void cryp_algs_unregister_all(void)
pr_debug(DEV_DBG_NAME " [%s]", __func__);
for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
- crypto_unregister_alg(&cryp_algs[i].crypto);
+ crypto_unregister_skcipher(&cryp_algs[i].skcipher);
}
static int ux500_cryp_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index c172a6953477..c24f2db8d5e8 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -140,7 +140,6 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
{
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *channel = NULL;
- dma_cookie_t cookie;
if (direction != DMA_TO_DEVICE) {
dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
@@ -176,7 +175,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
desc->callback = hash_dma_callback;
desc->callback_param = ctx;
- cookie = dmaengine_submit(desc);
+ dmaengine_submit(desc);
dma_async_issue_pending(channel);
return 0;
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 01b625e4e5ad..fb294174e408 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -3,7 +3,7 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver"
depends on VIRTIO
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
default m
help
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 42d19205166b..4b71e80951b7 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -8,6 +8,7 @@
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <crypto/scatterwalk.h>
#include <linux/atomic.h>
@@ -16,10 +17,10 @@
#include "virtio_crypto_common.h"
-struct virtio_crypto_ablkcipher_ctx {
+struct virtio_crypto_skcipher_ctx {
struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
- struct crypto_tfm *tfm;
+ struct crypto_skcipher *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
@@ -30,8 +31,8 @@ struct virtio_crypto_sym_request {
/* Cipher or aead */
uint32_t type;
- struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
- struct ablkcipher_request *ablkcipher_req;
+ struct virtio_crypto_skcipher_ctx *skcipher_ctx;
+ struct skcipher_request *skcipher_req;
uint8_t *iv;
/* Encryption? */
bool encrypt;
@@ -41,7 +42,7 @@ struct virtio_crypto_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
- struct crypto_alg algo;
+ struct skcipher_alg algo;
};
/*
@@ -49,9 +50,9 @@ struct virtio_crypto_algo {
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
-static void virtio_crypto_ablkcipher_finalize_req(
+static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+ struct skcipher_request *req,
int err);
static void virtio_crypto_dataq_sym_callback
@@ -59,7 +60,7 @@ static void virtio_crypto_dataq_sym_callback
{
struct virtio_crypto_sym_request *vc_sym_req =
container_of(vc_req, struct virtio_crypto_sym_request, base);
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *ablk_req;
int error;
/* Finish the encrypt or decrypt process */
@@ -79,8 +80,8 @@ static void virtio_crypto_dataq_sym_callback
error = -EIO;
break;
}
- ablk_req = vc_sym_req->ablkcipher_req;
- virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
+ ablk_req = vc_sym_req->skcipher_req;
+ virtio_crypto_skcipher_finalize_req(vc_sym_req,
ablk_req, error);
}
}
@@ -105,15 +106,13 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
break;
default:
- pr_err("virtio_crypto: Unsupported key length: %d\n",
- key_len);
return -EINVAL;
}
return 0;
}
-static int virtio_crypto_alg_ablkcipher_init_session(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_init_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
uint32_t alg, const uint8_t *key,
unsigned int keylen,
int encrypt)
@@ -202,8 +201,8 @@ static int virtio_crypto_alg_ablkcipher_init_session(
return 0;
}
-static int virtio_crypto_alg_ablkcipher_close_session(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_close_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
int encrypt)
{
struct scatterlist outhdr, status_sg, *sgs[2];
@@ -263,8 +262,8 @@ static int virtio_crypto_alg_ablkcipher_close_session(
return 0;
}
-static int virtio_crypto_alg_ablkcipher_init_sessions(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_init_sessions(
+ struct virtio_crypto_skcipher_ctx *ctx,
const uint8_t *key, unsigned int keylen)
{
uint32_t alg;
@@ -280,30 +279,30 @@ static int virtio_crypto_alg_ablkcipher_init_sessions(
goto bad_key;
/* Create encryption session */
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 1);
if (ret)
return ret;
/* Create decryption session */
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 0);
if (ret) {
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
return ret;
}
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/* Note: kernel crypto API realization */
-static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
const uint8_t *key,
unsigned int keylen)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
uint32_t alg;
int ret;
@@ -325,11 +324,11 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
ctx->vcrypto = vcrypto;
} else {
/* Rekeying, we should close the created sessions previously */
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
}
- ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+ ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
if (ret) {
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
@@ -341,14 +340,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
}
static int
-__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ struct skcipher_request *req,
struct data_queue *data_vq)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
@@ -361,7 +360,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int sg_total;
uint8_t *iv;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@@ -398,7 +397,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
req_data->u.sym_req.u.cipher.para.src_data_len =
- cpu_to_le32(req->nbytes);
+ cpu_to_le32(req->cryptlen);
dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
if (unlikely(dst_len > U32_MAX)) {
@@ -408,9 +407,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
}
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
- req->nbytes, dst_len);
+ req->cryptlen, dst_len);
- if (unlikely(req->nbytes + dst_len + ivsize +
+ if (unlikely(req->cryptlen + dst_len + ivsize +
sizeof(vc_req->status) > vcrypto->max_size)) {
pr_err("virtio_crypto: The length is too big\n");
err = -EINVAL;
@@ -436,7 +435,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
err = -ENOMEM;
goto free;
}
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
+ if (!vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
vc_sym_req->iv = iv;
@@ -473,83 +477,93 @@ free:
return err;
}
-static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->ablkcipher_ctx = ctx;
- vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = true;
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
-static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->ablkcipher_ctx = ctx;
- vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = false;
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
-static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
ctx->tfm = tfm;
- ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
+ ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
-static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (!ctx->vcrypto)
return;
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
}
-int virtio_crypto_ablkcipher_crypt_req(
+int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq)
{
- struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
+ struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq;
int ret;
- ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
+ ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0)
return ret;
@@ -558,12 +572,16 @@ int virtio_crypto_ablkcipher_crypt_req(
return 0;
}
-static void virtio_crypto_ablkcipher_finalize_req(
+static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+ struct skcipher_request *req,
int err)
{
- crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+ if (vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
@@ -573,27 +591,21 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
.algo = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "virtio_crypto_aes_cbc",
- .cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = virtio_crypto_ablkcipher_init,
- .cra_exit = virtio_crypto_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = virtio_crypto_ablkcipher_setkey,
- .decrypt = virtio_crypto_ablkcipher_decrypt,
- .encrypt = virtio_crypto_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "virtio_crypto_aes_cbc",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = virtio_crypto_skcipher_init,
+ .exit = virtio_crypto_skcipher_exit,
+ .setkey = virtio_crypto_skcipher_setkey,
+ .decrypt = virtio_crypto_skcipher_decrypt,
+ .encrypt = virtio_crypto_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
} };
@@ -613,14 +625,14 @@ int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 0) {
- ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+ ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
- virtio_crypto_algs[i].algo.cra_name);
+ virtio_crypto_algs[i].algo.base.cra_name);
}
unlock:
@@ -644,7 +656,7 @@ void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 1)
- crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+ crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
virtio_crypto_algs[i].active_devs--;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 1c6e00da5a29..a24f85c589e7 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -112,7 +112,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node,
uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
-int virtio_crypto_ablkcipher_crypt_req(
+int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq);
void
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index cab32cfec9c4..709670d2b553 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-TARGET := linux-ppc64le
+override flavour := linux-ppc64le
else
-TARGET := linux-ppc64
+override flavour := linux-ppc64
endif
quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
+ cmd_perl = $(PERL) $(<) $(flavour) > $(@)
targets += aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index f33c73e4af41..3b6c06f07326 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -32,19 +32,36 @@ config DEV_DAX_PMEM
Say M if unsure
+config DEV_DAX_HMEM
+ tristate "HMEM DAX: direct access to 'specific purpose' memory"
+ depends on EFI_SOFT_RESERVE
+ default DEV_DAX
+ help
+ EFI 2.8 platforms, and others, may advertise 'specific purpose'
+ memory. For example, a high bandwidth memory pool. The
+ indication from platform firmware is meant to reserve the
+ memory from typical usage by default. This driver creates
+ device-dax instances for these memory ranges, and that also
+ enables the possibility to assign them to the DEV_DAX_KMEM
+ driver to override the reservation and add them to kernel
+ "System RAM" pool.
+
+ Say M if unsure.
+
config DEV_DAX_KMEM
tristate "KMEM DAX: volatile-use of persistent memory"
default DEV_DAX
depends on DEV_DAX
depends on MEMORY_HOTPLUG # for add_memory() and friends
help
- Support access to persistent memory as if it were RAM. This
- allows easier use of persistent memory by unmodified
- applications.
+ Support access to persistent, or other performance
+ differentiated memory as if it were System RAM. This allows
+ easier use of persistent memory by unmodified applications, or
+ adds core kernel memory services to heterogeneous memory types
+ (HMEM) marked "reserved" by platform firmware.
To use this feature, a DAX device must be unbound from the
- device_dax driver (PMEM DAX) and bound to this kmem driver
- on each boot.
+ device_dax driver and bound to this kmem driver on each boot.
Say N if unsure.
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 81f7d54dadfb..80065b38b3c4 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -2,9 +2,11 @@
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
+obj-$(CONFIG_DEV_DAX_HMEM) += dax_hmem.o
dax-y := super.o
dax-y += bus.o
device_dax-y := device.o
+dax_hmem-y := hmem.o
obj-y += pmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 8fafbeab510a..eccdda1f7b71 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -227,7 +227,7 @@ static void dax_region_unregister(void *region)
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align,
- unsigned long pfn_flags)
+ unsigned long long pfn_flags)
{
struct dax_region *dax_region;
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 8619e3299943..9e4eba67e8b9 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -11,7 +11,7 @@ struct dax_region;
void dax_region_put(struct dax_region *dax_region);
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align,
- unsigned long flags);
+ unsigned long long flags);
enum dev_dax_subsys {
DEV_DAX_BUS,
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 6ccca3b890d6..3107ce80e809 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -32,7 +32,7 @@ struct dax_region {
struct device *dev;
unsigned int align;
struct resource res;
- unsigned long pfn_flags;
+ unsigned long long pfn_flags;
};
/**
diff --git a/drivers/dax/hmem.c b/drivers/dax/hmem.c
new file mode 100644
index 000000000000..fe7214daf62e
--- /dev/null
+++ b/drivers/dax/hmem.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/platform_device.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include "bus.h"
+
+static int dax_hmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_pagemap pgmap = { };
+ struct dax_region *dax_region;
+ struct memregion_info *mri;
+ struct dev_dax *dev_dax;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ mri = dev->platform_data;
+ memcpy(&pgmap.res, res, sizeof(*res));
+
+ dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
+ PMD_SIZE, PFN_DEV|PFN_MAP);
+ if (!dax_region)
+ return -ENOMEM;
+
+ dev_dax = devm_create_dev_dax(dax_region, 0, &pgmap);
+ if (IS_ERR(dev_dax))
+ return PTR_ERR(dev_dax);
+
+ /* child dev_dax instances now own the lifetime of the dax_region */
+ dax_region_put(dax_region);
+ return 0;
+}
+
+static int dax_hmem_remove(struct platform_device *pdev)
+{
+ /* devm handles teardown */
+ return 0;
+}
+
+static struct platform_driver dax_hmem_driver = {
+ .probe = dax_hmem_probe,
+ .remove = dax_hmem_remove,
+ .driver = {
+ .name = "hmem",
+ },
+};
+
+module_platform_driver(dax_hmem_driver);
+
+MODULE_ALIAS("platform:hmem*");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 446490c9d635..f840e61e5a27 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -160,6 +160,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
int lev, prev_lev, ret = 0;
unsigned long cur_time;
+ lockdep_assert_held(&devfreq->lock);
cur_time = jiffies;
/* Immediately exit if previous_freq is not initialized yet. */
@@ -409,6 +410,9 @@ static void devfreq_monitor(struct work_struct *work)
*/
void devfreq_monitor_start(struct devfreq *devfreq)
{
+ if (devfreq->governor->interrupt_driven)
+ return;
+
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
@@ -426,6 +430,9 @@ EXPORT_SYMBOL(devfreq_monitor_start);
*/
void devfreq_monitor_stop(struct devfreq *devfreq)
{
+ if (devfreq->governor->interrupt_driven)
+ return;
+
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_stop);
@@ -453,6 +460,10 @@ void devfreq_monitor_suspend(struct devfreq *devfreq)
devfreq_update_status(devfreq, devfreq->previous_freq);
devfreq->stop_polling = true;
mutex_unlock(&devfreq->lock);
+
+ if (devfreq->governor->interrupt_driven)
+ return;
+
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_suspend);
@@ -473,11 +484,15 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
if (!devfreq->stop_polling)
goto out;
+ if (devfreq->governor->interrupt_driven)
+ goto out_update;
+
if (!delayed_work_pending(&devfreq->work) &&
devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
+out_update:
devfreq->last_stat_updated = jiffies;
devfreq->stop_polling = false;
@@ -509,6 +524,9 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
if (devfreq->stop_polling)
goto out;
+ if (devfreq->governor->interrupt_driven)
+ goto out;
+
/* if new delay is zero, stop polling */
if (!new_delay) {
mutex_unlock(&devfreq->lock);
@@ -625,7 +643,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq = find_device_devfreq(dev);
mutex_unlock(&devfreq_list_lock);
if (!IS_ERR(devfreq)) {
- dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+ dev_err(dev, "%s: devfreq device already exists!\n",
__func__);
err = -EINVAL;
goto err_out;
@@ -1195,7 +1213,7 @@ static ssize_t available_governors_show(struct device *d,
* The devfreq with immutable governor (e.g., passive) shows
* only own governor.
*/
- if (df->governor->immutable) {
+ if (df->governor && df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
"%s ", df->governor_name);
/*
@@ -1397,12 +1415,17 @@ static ssize_t trans_stat_show(struct device *dev,
int i, j;
unsigned int max_state = devfreq->profile->max_state;
- if (!devfreq->stop_polling &&
- devfreq_update_status(devfreq, devfreq->previous_freq))
- return 0;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling &&
+ devfreq_update_status(devfreq, devfreq->previous_freq)) {
+ mutex_unlock(&devfreq->lock);
+ return 0;
+ }
+ mutex_unlock(&devfreq->lock);
+
len = sprintf(buf, " From : To\n");
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 87b42055e6bc..85c7a77bf3f0 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -673,7 +673,6 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
for (i = 0; i < info->num_events; i++) {
edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
if (IS_ERR(edev[i])) {
- ret = PTR_ERR(edev[i]);
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
return PTR_ERR(edev[i]);
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index bbe5ff9fcecf..dc7533ccc3db 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -31,6 +31,8 @@
* @name: Governor's name
* @immutable: Immutable flag for governor. If the value is 1,
* this govenror is never changeable to other governor.
+ * @interrupt_driven: Devfreq core won't schedule polling work for this
+ * governor if value is set to 1.
* @get_target_freq: Returns desired operating frequency for the device.
* Basically, get_target_freq will run
* devfreq_dev_profile.get_dev_status() to get the
@@ -49,6 +51,7 @@ struct devfreq_governor {
const char name[DEVFREQ_NAME_LEN];
const unsigned int immutable;
+ const unsigned int interrupt_driven;
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
int (*event_handler)(struct devfreq *devfreq,
unsigned int event, void *data);
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index a6ba75f4106d..0b65f89d74d5 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -11,11 +11,13 @@
#include <linux/devfreq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
+#include <linux/workqueue.h>
#include "governor.h"
@@ -33,6 +35,8 @@
#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
#define ACTMON_DEV_CTRL_ENB BIT(31)
+#define ACTMON_DEV_CTRL_STOP 0x00000000
+
#define ACTMON_DEV_UPPER_WMARK 0x4
#define ACTMON_DEV_LOWER_WMARK 0x8
#define ACTMON_DEV_INIT_AVG 0xc
@@ -68,6 +72,8 @@
#define KHZ 1000
+#define KHZ_MAX (ULONG_MAX / KHZ)
+
/* Assume that the bus is saturated if the utilization is 25% */
#define BUS_SATURATION_RATIO 25
@@ -90,9 +96,10 @@ struct tegra_devfreq_device_config {
unsigned int boost_down_threshold;
/*
- * Threshold of activity (cycles) below which the CPU frequency isn't
- * to be taken into account. This is to avoid increasing the EMC
- * frequency when the CPU is very busy but not accessing the bus often.
+ * Threshold of activity (cycles translated to kHz) below which the
+ * CPU frequency isn't to be taken into account. This is to avoid
+ * increasing the EMC frequency when the CPU is very busy but not
+ * accessing the bus often.
*/
u32 avg_dependency_threshold;
};
@@ -102,7 +109,7 @@ enum tegra_actmon_device {
MCCPU,
};
-static struct tegra_devfreq_device_config actmon_device_configs[] = {
+static const struct tegra_devfreq_device_config actmon_device_configs[] = {
{
/* MCALL: All memory accesses (including from the CPUs) */
.offset = 0x1c0,
@@ -117,10 +124,10 @@ static struct tegra_devfreq_device_config actmon_device_configs[] = {
.offset = 0x200,
.irq_mask = 1 << 25,
.boost_up_coeff = 800,
- .boost_down_coeff = 90,
+ .boost_down_coeff = 40,
.boost_up_threshold = 27,
.boost_down_threshold = 10,
- .avg_dependency_threshold = 50000,
+ .avg_dependency_threshold = 16000, /* 16MHz in kHz units */
},
};
@@ -156,11 +163,16 @@ struct tegra_devfreq {
struct clk *emc_clock;
unsigned long max_freq;
unsigned long cur_freq;
- struct notifier_block rate_change_nb;
+ struct notifier_block clk_rate_change_nb;
+
+ struct delayed_work cpufreq_update_work;
+ struct notifier_block cpu_rate_change_nb;
struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
- int irq;
+ unsigned int irq;
+
+ bool started;
};
struct tegra_actmon_emc_ratio {
@@ -168,8 +180,8 @@ struct tegra_actmon_emc_ratio {
unsigned long emc_freq;
};
-static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
- { 1400000, ULONG_MAX },
+static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+ { 1400000, KHZ_MAX },
{ 1200000, 750000 },
{ 1100000, 600000 },
{ 1000000, 500000 },
@@ -199,18 +211,26 @@ static void device_writel(struct tegra_devfreq_device *dev, u32 val,
writel_relaxed(val, dev->regs + offset);
}
-static unsigned long do_percent(unsigned long val, unsigned int pct)
+static unsigned long do_percent(unsigned long long val, unsigned int pct)
{
- return val * pct / 100;
+ val = val * pct;
+ do_div(val, 100);
+
+ /*
+ * High freq + high boosting percent + large polling interval are
+ * resulting in integer overflow when watermarks are calculated.
+ */
+ return min_t(u64, val, U32_MAX);
}
static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
- u32 avg = dev->avg_count;
u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
- u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
+ u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
+ u32 avg;
+ avg = min(dev->avg_count, U32_MAX - band);
device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
avg = max(dev->avg_count, band);
@@ -220,7 +240,7 @@ static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
- u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
ACTMON_DEV_UPPER_WMARK);
@@ -229,12 +249,6 @@ static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
ACTMON_DEV_LOWER_WMARK);
}
-static void actmon_write_barrier(struct tegra_devfreq *tegra)
-{
- /* ensure the update has reached the ACTMON */
- readl(tegra->regs + ACTMON_GLB_STATUS);
-}
-
static void actmon_isr_device(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
@@ -256,10 +270,10 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- if (dev->boost_freq >= tegra->max_freq)
+ if (dev->boost_freq >= tegra->max_freq) {
+ dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
dev->boost_freq = tegra->max_freq;
- else
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+ }
} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
/*
* new_boost = old_boost * down_coef
@@ -270,31 +284,22 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
- if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
- dev->boost_freq = 0;
- else
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- }
-
- if (dev->config->avg_dependency_threshold) {
- if (dev->avg_count >= dev->config->avg_dependency_threshold)
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- else if (dev->boost_freq == 0)
+ if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ dev->boost_freq = 0;
+ }
}
device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
-
- actmon_write_barrier(tegra);
}
static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
unsigned long cpu_freq)
{
unsigned int i;
- struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+ const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
if (cpu_freq >= ratio->cpu_freq) {
@@ -308,25 +313,37 @@ static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
return 0;
}
+static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
+{
+ unsigned int avg_sustain_coef;
+ unsigned long target_freq;
+
+ target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
+ avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+ target_freq = do_percent(target_freq, avg_sustain_coef);
+
+ return target_freq;
+}
+
static void actmon_update_target(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
unsigned long cpu_freq = 0;
unsigned long static_cpu_emc_freq = 0;
- unsigned int avg_sustain_coef;
- if (dev->config->avg_dependency_threshold) {
- cpu_freq = cpufreq_get(0);
- static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- }
+ dev->target_freq = actmon_device_target_freq(tegra, dev);
- dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
- avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
- dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
- dev->target_freq += dev->boost_freq;
+ if (dev->config->avg_dependency_threshold &&
+ dev->config->avg_dependency_threshold <= dev->target_freq) {
+ cpu_freq = cpufreq_quick_get(0);
+ static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- if (dev->avg_count >= dev->config->avg_dependency_threshold)
+ dev->target_freq += dev->boost_freq;
dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+ } else {
+ dev->target_freq += dev->boost_freq;
+ }
}
static irqreturn_t actmon_thread_isr(int irq, void *data)
@@ -354,8 +371,8 @@ static irqreturn_t actmon_thread_isr(int irq, void *data)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
- unsigned long action, void *ptr)
+static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
{
struct clk_notifier_data *data = ptr;
struct tegra_devfreq *tegra;
@@ -365,7 +382,7 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
if (action != POST_RATE_CHANGE)
return NOTIFY_OK;
- tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
+ tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
tegra->cur_freq = data->new_rate / KHZ;
@@ -375,7 +392,79 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
tegra_devfreq_update_wmark(tegra, dev);
}
- actmon_write_barrier(tegra);
+ return NOTIFY_OK;
+}
+
+static void tegra_actmon_delayed_update(struct work_struct *work)
+{
+ struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
+ cpufreq_update_work.work);
+
+ mutex_lock(&tegra->devfreq->lock);
+ update_devfreq(tegra->devfreq);
+ mutex_unlock(&tegra->devfreq->lock);
+}
+
+static unsigned long
+tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
+ unsigned int cpu_freq)
+{
+ struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
+ unsigned long static_cpu_emc_freq, dev_freq;
+
+ dev_freq = actmon_device_target_freq(tegra, actmon_dev);
+
+ /* check whether CPU's freq is taken into account at all */
+ if (dev_freq < actmon_dev->config->avg_dependency_threshold)
+ return 0;
+
+ static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+
+ if (dev_freq >= static_cpu_emc_freq)
+ return 0;
+
+ return static_cpu_emc_freq;
+}
+
+static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ struct cpufreq_freqs *freqs = ptr;
+ struct tegra_devfreq *tegra;
+ unsigned long old, new, delay;
+
+ if (action != CPUFREQ_POSTCHANGE)
+ return NOTIFY_OK;
+
+ tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
+
+ /*
+ * Quickly check whether CPU frequency should be taken into account
+ * at all, without blocking CPUFreq's core.
+ */
+ if (mutex_trylock(&tegra->devfreq->lock)) {
+ old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
+ new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
+ mutex_unlock(&tegra->devfreq->lock);
+
+ /*
+ * If CPU's frequency shouldn't be taken into account at
+ * the moment, then there is no need to update the devfreq's
+ * state because ISR will re-check CPU's frequency on the
+ * next interrupt.
+ */
+ if (old == new)
+ return NOTIFY_OK;
+ }
+
+ /*
+ * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
+ * to allow asynchronous notifications. This means we can't block
+ * here for too long, otherwise CPUFreq's core will complain with a
+ * warning splat.
+ */
+ delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
+ schedule_delayed_work(&tegra->cpufreq_update_work, delay);
return NOTIFY_OK;
}
@@ -385,9 +474,12 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
{
u32 val = 0;
+ /* reset boosting on governor's restart */
+ dev->boost_freq = 0;
+
dev->target_freq = tegra->cur_freq;
- dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
tegra_devfreq_update_avg_wmark(tegra, dev);
@@ -405,45 +497,116 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
- val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
val |= ACTMON_DEV_CTRL_ENB;
device_writel(dev, val, ACTMON_DEV_CTRL);
}
-static void tegra_actmon_start(struct tegra_devfreq *tegra)
+static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
{
+ struct tegra_devfreq_device *dev = tegra->devices;
unsigned int i;
- disable_irq(tegra->irq);
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
+ device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
+ device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
+ ACTMON_DEV_INTR_STATUS);
+ }
+}
- actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+static int tegra_actmon_resume(struct tegra_devfreq *tegra)
+{
+ unsigned int i;
+ int err;
+
+ if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+ return 0;
+
+ actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
ACTMON_GLB_PERIOD_CTRL);
+ /*
+ * CLK notifications are needed in order to reconfigure the upper
+ * consecutive watermark in accordance to the actual clock rate
+ * to avoid unnecessary upper interrupts.
+ */
+ err = clk_notifier_register(tegra->emc_clock,
+ &tegra->clk_rate_change_nb);
+ if (err) {
+ dev_err(tegra->devfreq->dev.parent,
+ "Failed to register rate change notifier\n");
+ return err;
+ }
+
+ tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
tegra_actmon_configure_device(tegra, &tegra->devices[i]);
- actmon_write_barrier(tegra);
+ /*
+ * We are estimating CPU's memory bandwidth requirement based on
+ * amount of memory accesses and system's load, judging by CPU's
+ * frequency. We also don't want to receive events about CPU's
+ * frequency transaction when governor is stopped, hence notifier
+ * is registered dynamically.
+ */
+ err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (err) {
+ dev_err(tegra->devfreq->dev.parent,
+ "Failed to register rate change notifier: %d\n", err);
+ goto err_stop;
+ }
enable_irq(tegra->irq);
+
+ return 0;
+
+err_stop:
+ tegra_actmon_stop_devices(tegra);
+
+ clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+
+ return err;
}
-static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+static int tegra_actmon_start(struct tegra_devfreq *tegra)
{
- unsigned int i;
+ int ret = 0;
- disable_irq(tegra->irq);
+ if (!tegra->started) {
+ tegra->started = true;
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- device_writel(&tegra->devices[i], 0x00000000, ACTMON_DEV_CTRL);
- device_writel(&tegra->devices[i], ACTMON_INTR_STATUS_CLEAR,
- ACTMON_DEV_INTR_STATUS);
+ ret = tegra_actmon_resume(tegra);
+ if (ret)
+ tegra->started = false;
}
- actmon_write_barrier(tegra);
+ return ret;
+}
- enable_irq(tegra->irq);
+static void tegra_actmon_pause(struct tegra_devfreq *tegra)
+{
+ if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+ return;
+
+ disable_irq(tegra->irq);
+
+ cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ cancel_delayed_work_sync(&tegra->cpufreq_update_work);
+
+ tegra_actmon_stop_devices(tegra);
+
+ clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+}
+
+static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+{
+ tegra_actmon_pause(tegra);
+ tegra->started = false;
}
static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
@@ -463,7 +626,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
rate = dev_pm_opp_get_freq(opp);
dev_pm_opp_put(opp);
- err = clk_set_min_rate(tegra->emc_clock, rate);
+ err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
if (err)
return err;
@@ -492,7 +655,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
stat->private_data = tegra;
/* The below are to be used by the other governors */
- stat->current_frequency = cur_freq * KHZ;
+ stat->current_frequency = cur_freq;
actmon_dev = &tegra->devices[MCALL];
@@ -503,7 +666,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
stat->busy_time *= 100 / BUS_SATURATION_RATIO;
/* Number of cycles in a sampling period */
- stat->total_time = ACTMON_SAMPLING_PERIOD * cur_freq;
+ stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
stat->busy_time = min(stat->busy_time, stat->total_time);
@@ -511,7 +674,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
}
static struct devfreq_dev_profile tegra_devfreq_profile = {
- .polling_ms = 0,
+ .polling_ms = ACTMON_SAMPLING_PERIOD,
.target = tegra_devfreq_target,
.get_dev_status = tegra_devfreq_get_dev_status,
};
@@ -542,7 +705,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
target_freq = max(target_freq, dev->target_freq);
}
- *freq = target_freq * KHZ;
+ *freq = target_freq;
return 0;
}
@@ -551,11 +714,19 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
+ unsigned int *new_delay = data;
+ int ret = 0;
+
+ /*
+ * Couple devfreq-device with the governor early because it is
+ * needed at the moment of governor's start (used by ISR).
+ */
+ tegra->devfreq = devfreq;
switch (event) {
case DEVFREQ_GOV_START:
devfreq_monitor_start(devfreq);
- tegra_actmon_start(tegra);
+ ret = tegra_actmon_start(tegra);
break;
case DEVFREQ_GOV_STOP:
@@ -563,6 +734,21 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
devfreq_monitor_stop(devfreq);
break;
+ case DEVFREQ_GOV_INTERVAL:
+ /*
+ * ACTMON hardware supports up to 256 milliseconds for the
+ * sampling period.
+ */
+ if (*new_delay > 256) {
+ ret = -EINVAL;
+ break;
+ }
+
+ tegra_actmon_pause(tegra);
+ devfreq_interval_update(devfreq, new_delay);
+ ret = tegra_actmon_resume(tegra);
+ break;
+
case DEVFREQ_GOV_SUSPEND:
tegra_actmon_stop(tegra);
devfreq_monitor_suspend(devfreq);
@@ -570,11 +756,11 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
case DEVFREQ_GOV_RESUME:
devfreq_monitor_resume(devfreq);
- tegra_actmon_start(tegra);
+ ret = tegra_actmon_start(tegra);
break;
}
- return 0;
+ return ret;
}
static struct devfreq_governor tegra_devfreq_governor = {
@@ -582,14 +768,16 @@ static struct devfreq_governor tegra_devfreq_governor = {
.get_target_freq = tegra_governor_get_target,
.event_handler = tegra_governor_event_handler,
.immutable = true,
+ .interrupt_driven = true,
};
static int tegra_devfreq_probe(struct platform_device *pdev)
{
- struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
+ struct tegra_devfreq *tegra;
+ struct devfreq *devfreq;
unsigned int i;
- unsigned long rate;
+ long rate;
int err;
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
@@ -618,12 +806,22 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return PTR_ERR(tegra->emc_clock);
}
- tegra->irq = platform_get_irq(pdev, 0);
- if (tegra->irq < 0) {
- err = tegra->irq;
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
return err;
}
+ tegra->irq = err;
+
+ irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
+
+ err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
+ actmon_thread_isr, IRQF_ONESHOT,
+ "tegra-devfreq", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
+ return err;
+ }
reset_control_assert(tegra->reset);
@@ -636,8 +834,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
reset_control_deassert(tegra->reset);
- tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
- tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+ rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
+ if (rate < 0) {
+ dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
+ return rate;
+ }
+
+ tegra->max_freq = rate / KHZ;
for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
dev = tegra->devices + i;
@@ -648,7 +851,14 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
rate = clk_round_rate(tegra->emc_clock, rate);
- err = dev_pm_opp_add(&pdev->dev, rate, 0);
+ if (rate < 0) {
+ dev_err(&pdev->dev,
+ "Failed to round clock rate: %ld\n", rate);
+ err = rate;
+ goto remove_opps;
+ }
+
+ err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
goto remove_opps;
@@ -657,49 +867,33 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tegra);
- tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
- err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to register rate change notifier\n");
- goto remove_opps;
- }
+ tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
+ tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
+
+ INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
+ tegra_actmon_delayed_update);
err = devfreq_add_governor(&tegra_devfreq_governor);
if (err) {
dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
- goto unreg_notifier;
+ goto remove_opps;
}
tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
- tegra->devfreq = devfreq_add_device(&pdev->dev,
- &tegra_devfreq_profile,
- "tegra_actmon",
- NULL);
- if (IS_ERR(tegra->devfreq)) {
- err = PTR_ERR(tegra->devfreq);
- goto remove_governor;
- }
+ tegra_devfreq_profile.initial_freq /= KHZ;
- err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
- actmon_thread_isr, IRQF_ONESHOT,
- "tegra-devfreq", tegra);
- if (err) {
- dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
- goto remove_devfreq;
+ devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
+ "tegra_actmon", NULL);
+ if (IS_ERR(devfreq)) {
+ err = PTR_ERR(devfreq);
+ goto remove_governor;
}
return 0;
-remove_devfreq:
- devfreq_remove_device(tegra->devfreq);
-
remove_governor:
devfreq_remove_governor(&tegra_devfreq_governor);
-unreg_notifier:
- clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
-
remove_opps:
dev_pm_opp_remove_all_dynamic(&pdev->dev);
@@ -716,7 +910,6 @@ static int tegra_devfreq_remove(struct platform_device *pdev)
devfreq_remove_device(tegra->devfreq);
devfreq_remove_governor(&tegra_devfreq_governor);
- clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
dev_pm_opp_remove_all_dynamic(&pdev->dev);
reset_control_reset(tegra->reset);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 433d91d710e4..d377b4ca66bf 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
@@ -334,7 +334,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
kfree(name);
@@ -344,7 +344,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
dmabuf->name = name;
out_unlock:
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return ret;
}
@@ -403,10 +403,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
}
static const struct file_operations dma_buf_fops = {
@@ -525,6 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return ERR_PTR(-EINVAL);
}
+ if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
+ exp_info->ops->dynamic_mapping))
+ return ERR_PTR(-EINVAL);
+
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
@@ -645,10 +649,11 @@ void dma_buf_put(struct dma_buf *dmabuf)
EXPORT_SYMBOL_GPL(dma_buf_put);
/**
- * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
* calls attach() of dma_buf_ops to allow device-specific attach functionality
- * @dmabuf: [in] buffer to attach device to.
- * @dev: [in] device to be attached.
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ * @dynamic_mapping: [in] calling convention for map/unmap
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -662,8 +667,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* accessible to @dev, and cannot be moved to a more suitable place. This is
* indicated with the error code -EBUSY.
*/
-struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping)
{
struct dma_buf_attachment *attach;
int ret;
@@ -677,25 +683,69 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
attach->dev = dev;
attach->dmabuf = dmabuf;
-
- mutex_lock(&dmabuf->lock);
+ attach->dynamic_mapping = dynamic_mapping;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
+ dma_resv_lock(dmabuf->resv, NULL);
list_add(&attach->node, &dmabuf->attachments);
+ dma_resv_unlock(dmabuf->resv);
- mutex_unlock(&dmabuf->lock);
+ /* When either the importer or the exporter can't handle dynamic
+ * mappings we cache the mapping here to avoid issues with the
+ * reservation object lock.
+ */
+ if (dma_buf_attachment_is_dynamic(attach) !=
+ dma_buf_is_dynamic(dmabuf)) {
+ struct sg_table *sgt;
+
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
+ sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ if (!sgt)
+ sgt = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_unlock;
+ }
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ attach->sgt = sgt;
+ attach->dir = DMA_BIDIRECTIONAL;
+ }
return attach;
err_attach:
kfree(attach);
- mutex_unlock(&dmabuf->lock);
+ return ERR_PTR(ret);
+
+err_unlock:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+
+/**
+ * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
+ * mapping.
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ return dma_buf_dynamic_attach(dmabuf, dev, false);
+}
EXPORT_SYMBOL_GPL(dma_buf_attach);
/**
@@ -711,15 +761,22 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
- if (attach->sgt)
+ if (attach->sgt) {
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- mutex_lock(&dmabuf->lock);
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ }
+
+ dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
+ dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
- mutex_unlock(&dmabuf->lock);
kfree(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);
@@ -749,6 +806,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt) {
/*
* Two mappings with different directions for the same
@@ -761,6 +821,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -793,9 +856,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt == sg_table)
return;
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@@ -1171,13 +1240,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
- ret = mutex_lock_interruptible(&buf_obj->lock);
- if (ret) {
- seq_puts(s,
- "\tERROR locking buffer object: skipping\n");
- continue;
- }
+ ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
+ if (ret)
+ goto error_unlock;
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
@@ -1223,19 +1289,23 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
attach_count++;
}
+ dma_resv_unlock(buf_obj->resv);
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
size += buf_obj->size;
- mutex_unlock(&buf_obj->lock);
}
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
mutex_unlock(&db_list.lock);
return 0;
+
+error_unlock:
+ mutex_unlock(&db_list.lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 2c136aee3e79..052a41e2451c 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_fence_free);
+static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+{
+ bool was_set;
+
+ lockdep_assert_held(fence->lock);
+
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return false;
+
+ if (!was_set && fence->ops->enable_signaling) {
+ trace_dma_fence_enable_signal(fence);
+
+ if (!fence->ops->enable_signaling(fence)) {
+ dma_fence_signal_locked(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: the fence to enable
@@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
- fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- spin_lock_irqsave(fence->lock, flags);
-
- if (!fence->ops->enable_signaling(fence))
- dma_fence_signal_locked(fence);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return;
- spin_unlock_irqrestore(fence->lock, flags);
- }
+ spin_lock_irqsave(fence->lock, flags);
+ __dma_fence_enable_signaling(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
{
unsigned long flags;
int ret = 0;
- bool was_set;
if (WARN_ON(!fence || !func))
return -EINVAL;
@@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- ret = -ENOENT;
- else if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- ret = -ENOENT;
- }
- }
-
- if (!ret) {
+ if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
- } else
+ } else {
INIT_LIST_HEAD(&cb->node);
+ ret = -ENOENT;
+ }
+
spin_unlock_irqrestore(fence->lock, flags);
return ret;
@@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return ret;
@@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!__dma_fence_enable_signaling(fence))
goto out;
- if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- goto out;
- }
- }
-
if (!timeout) {
ret = 0;
goto out;
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index fbda4b876afd..e91cf1147a4e 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
+#include <linux/mfd/altera-sysmgr.h>
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of_address.h>
@@ -275,7 +276,6 @@ release:
return ret;
}
-static int socfpga_is_a10(void);
static int altr_sdram_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
@@ -399,7 +399,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
goto err;
/* Only the Arria10 has separate IRQs */
- if (socfpga_is_a10()) {
+ if (of_machine_is_compatible("altr,socfpga-arria10")) {
/* Arria10 specific initialization */
res = a10_init(mc_vbase);
if (res < 0)
@@ -502,68 +502,6 @@ module_platform_driver(altr_sdram_edac_driver);
#endif /* CONFIG_EDAC_ALTERA_SDRAM */
-/**************** Stratix 10 EDAC Memory Controller Functions ************/
-
-/**
- * s10_protected_reg_write
- * Write to a protected SMC register.
- * @context: Not used.
- * @reg: Address of register
- * @value: Value to write
- * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
- * INTEL_SIP_SMC_REG_ERROR on error
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
- */
-static int s10_protected_reg_write(void *context, unsigned int reg,
- unsigned int val)
-{
- struct arm_smccc_res result;
- unsigned long offset = (unsigned long)context;
-
- arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, offset + reg, val, 0, 0,
- 0, 0, 0, &result);
-
- return (int)result.a0;
-}
-
-/**
- * s10_protected_reg_read
- * Read the status of a protected SMC register
- * @context: Not used.
- * @reg: Address of register
- * @value: Value read.
- * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
- * INTEL_SIP_SMC_REG_ERROR on error
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
- */
-static int s10_protected_reg_read(void *context, unsigned int reg,
- unsigned int *val)
-{
- struct arm_smccc_res result;
- unsigned long offset = (unsigned long)context;
-
- arm_smccc_smc(INTEL_SIP_SMC_REG_READ, offset + reg, 0, 0, 0,
- 0, 0, 0, &result);
-
- *val = (unsigned int)result.a1;
-
- return (int)result.a0;
-}
-
-static const struct regmap_config s10_sdram_regmap_cfg = {
- .name = "s10_ddr",
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = 0xffd12228,
- .reg_read = s10_protected_reg_read,
- .reg_write = s10_protected_reg_write,
- .use_single_read = true,
- .use_single_write = true,
-};
-
-/************** </Stratix10 EDAC Memory Controller Functions> ***********/
-
/************************* EDAC Parent Probe *************************/
static const struct of_device_id altr_edac_device_of_match[];
@@ -1008,16 +946,6 @@ static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port)
return ret;
}
-static int socfpga_is_a10(void)
-{
- return of_machine_is_compatible("altr,socfpga-arria10");
-}
-
-static int socfpga_is_s10(void)
-{
- return of_machine_is_compatible("altr,socfpga-stratix10");
-}
-
static __init int __maybe_unused
altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
u32 ecc_ctrl_en_mask, bool dual_port)
@@ -1033,34 +961,10 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
/* Get the ECC Manager - parent of the device EDACs */
np_eccmgr = of_get_parent(np);
- if (socfpga_is_a10()) {
- ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
- "altr,sysmgr-syscon");
- } else {
- struct device_node *sysmgr_np;
- struct resource res;
- uintptr_t base;
-
- sysmgr_np = of_parse_phandle(np_eccmgr,
- "altr,sysmgr-syscon", 0);
- if (!sysmgr_np) {
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "Unable to find altr,sysmgr-syscon\n");
- return -ENODEV;
- }
-
- if (of_address_to_resource(sysmgr_np, 0, &res)) {
- of_node_put(sysmgr_np);
- return -ENOMEM;
- }
+ ecc_mgr_map =
+ altr_sysmgr_regmap_lookup_by_phandle(np_eccmgr,
+ "altr,sysmgr-syscon");
- /* Need physical address for SMCC call */
- base = res.start;
-
- ecc_mgr_map = regmap_init(NULL, NULL, (void *)base,
- &s10_sdram_regmap_cfg);
- of_node_put(sysmgr_np);
- }
of_node_put(np_eccmgr);
if (IS_ERR(ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -1125,9 +1029,6 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
int irq;
struct device_node *child, *np;
- if (!socfpga_is_a10() && !socfpga_is_s10())
- return -ENODEV;
-
np = of_find_compatible_node(NULL, NULL,
"altr,socfpga-a10-ecc-manager");
if (!np) {
@@ -2178,33 +2079,9 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, edac);
INIT_LIST_HEAD(&edac->a10_ecc_devices);
- if (socfpga_is_a10()) {
- edac->ecc_mgr_map =
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "altr,sysmgr-syscon");
- } else {
- struct device_node *sysmgr_np;
- struct resource res;
- uintptr_t base;
-
- sysmgr_np = of_parse_phandle(pdev->dev.of_node,
- "altr,sysmgr-syscon", 0);
- if (!sysmgr_np) {
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "Unable to find altr,sysmgr-syscon\n");
- return -ENODEV;
- }
-
- if (of_address_to_resource(sysmgr_np, 0, &res))
- return -ENOMEM;
-
- /* Need physical address for SMCC call */
- base = res.start;
-
- edac->ecc_mgr_map = devm_regmap_init(&pdev->dev, NULL,
- (void *)base,
- &s10_sdram_regmap_cfg);
- }
+ edac->ecc_mgr_map =
+ altr_sysmgr_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "altr,sysmgr-syscon");
if (IS_ERR(edac->ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -2270,18 +2147,7 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
if (!of_device_is_available(child))
continue;
- if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-a10-ocram-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-eth-mac-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-nand-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-dma-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-usb-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-qspi-ecc") ||
-#ifdef CONFIG_EDAC_ALTERA_SDRAM
- of_device_is_compatible(child, "altr,sdram-edac-s10") ||
-#endif
- of_device_is_compatible(child, "altr,socfpga-sdmmc-ecc"))
-
+ if (of_match_node(altr_edac_a10_device_of_match, child))
altr_edac_a10_device_add(edac, child);
#ifdef CONFIG_EDAC_ALTERA_SDRAM
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c1d4536ae466..428ce98f6776 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -16,12 +16,11 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
+static struct amd64_family_type *fam_type;
+
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
-/* Number of Unified Memory Controllers */
-static u8 num_umcs;
-
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -454,7 +453,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
#define for_each_umc(i) \
- for (i = 0; i < num_umcs; i++)
+ for (i = 0; i < fam_type->max_mcs; i++)
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
@@ -2224,6 +2223,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
+ .max_mcs = 2,
.ops = {
.early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
@@ -2234,6 +2234,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F10h",
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2244,6 +2245,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F15h",
.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2254,6 +2256,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F15h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2264,6 +2267,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F15h_M60h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2274,6 +2278,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F16h",
.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2284,6 +2289,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F16h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2294,6 +2300,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h",
.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2303,6 +2310,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h_M10h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2312,6 +2320,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h_M30h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
+ .max_mcs = 8,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2321,6 +2330,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h_M70h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2838,8 +2848,6 @@ skip:
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
determine_ecc_sym_sz(pvt);
-
- dump_misc_regs(pvt);
}
/*
@@ -2936,6 +2944,7 @@ static int init_csrows_df(struct mem_ctl_info *mci)
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
+ dimm->grain = 64;
}
}
@@ -3012,6 +3021,7 @@ static int init_csrows(struct mem_ctl_info *mci)
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
+ dimm->grain = 64;
}
}
@@ -3178,43 +3188,27 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
amd64_warn("Error restoring NB MCGCTL settings!\n");
}
-/*
- * EDAC requires that the BIOS have ECC enabled before
- * taking over the processing of ECC errors. A command line
- * option allows to force-enable hardware ECC later in
- * enable_ecc_error_reporting().
- */
-static const char *ecc_msg =
- "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
- " Either enable ECC checking or force module loading by setting "
- "'ecc_enable_override'.\n"
- " (Note that use of the override may cause unknown side effects.)\n";
-
-static bool ecc_enabled(struct pci_dev *F3, u16 nid)
+static bool ecc_enabled(struct amd64_pvt *pvt)
{
+ u16 nid = pvt->mc_node_id;
bool nb_mce_en = false;
u8 ecc_en = 0, i;
u32 value;
if (boot_cpu_data.x86 >= 0x17) {
u8 umc_en_mask = 0, ecc_en_mask = 0;
+ struct amd64_umc *umc;
for_each_umc(i) {
- u32 base = get_umc_base(i);
+ umc = &pvt->umc[i];
/* Only check enabled UMCs. */
- if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
- continue;
-
- if (!(value & UMC_SDP_INIT))
+ if (!(umc->sdp_ctrl & UMC_SDP_INIT))
continue;
umc_en_mask |= BIT(i);
- if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
- continue;
-
- if (value & UMC_ECC_ENABLED)
+ if (umc->umc_cap_hi & UMC_ECC_ENABLED)
ecc_en_mask |= BIT(i);
}
@@ -3227,7 +3221,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
/* Assume UMC MCA banks are enabled. */
nb_mce_en = true;
} else {
- amd64_read_pci_cfg(F3, NBCFG, &value);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
ecc_en = !!(value & NBCFG_ECC_ENABLE);
@@ -3240,11 +3234,10 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
amd64_info("Node %d: DRAM ECC %s.\n",
nid, (ecc_en ? "enabled" : "disabled"));
- if (!ecc_en || !nb_mce_en) {
- amd64_info("%s", ecc_msg);
+ if (!ecc_en || !nb_mce_en)
return false;
- }
- return true;
+ else
+ return true;
}
static inline void
@@ -3278,8 +3271,7 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
}
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
- struct amd64_family_type *fam)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
@@ -3298,7 +3290,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = fam->ctl_name;
+ mci->ctl_name = fam_type->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
@@ -3312,8 +3304,6 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
*/
static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
{
- struct amd64_family_type *fam_type = NULL;
-
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
@@ -3401,51 +3391,15 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
-/* Set the number of Unified Memory Controllers in the system. */
-static void compute_num_umcs(void)
-{
- u8 model = boot_cpu_data.x86_model;
-
- if (boot_cpu_data.x86 < 0x17)
- return;
-
- if (model >= 0x30 && model <= 0x3f)
- num_umcs = 8;
- else
- num_umcs = 2;
-
- edac_dbg(1, "Number of UMCs: %x", num_umcs);
-}
-
-static int init_one_instance(unsigned int nid)
+static int hw_info_get(struct amd64_pvt *pvt)
{
- struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
- struct amd64_family_type *fam_type = NULL;
- struct mem_ctl_info *mci = NULL;
- struct edac_mc_layer layers[2];
- struct amd64_pvt *pvt = NULL;
u16 pci_id1, pci_id2;
- int err = 0, ret;
-
- ret = -ENOMEM;
- pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
- if (!pvt)
- goto err_ret;
-
- pvt->mc_node_id = nid;
- pvt->F3 = F3;
-
- ret = -EINVAL;
- fam_type = per_family_init(pvt);
- if (!fam_type)
- goto err_free;
+ int ret = -EINVAL;
if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
- if (!pvt->umc) {
- ret = -ENOMEM;
- goto err_free;
- }
+ pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc)
+ return -ENOMEM;
pci_id1 = fam_type->f0_id;
pci_id2 = fam_type->f6_id;
@@ -3454,21 +3408,37 @@ static int init_one_instance(unsigned int nid)
pci_id2 = fam_type->f2_id;
}
- err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
- if (err)
- goto err_post_init;
+ ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
+ if (ret)
+ return ret;
read_mc_regs(pvt);
+ return 0;
+}
+
+static void hw_info_put(struct amd64_pvt *pvt)
+{
+ if (pvt->F0 || pvt->F1)
+ free_mc_sibling_devs(pvt);
+
+ kfree(pvt->umc);
+}
+
+static int init_one_instance(struct amd64_pvt *pvt)
+{
+ struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
+ int ret = -EINVAL;
+
/*
* We need to determine how many memory channels there are. Then use
* that information for calculating the size of the dynamic instance
* tables in the 'mci' structure.
*/
- ret = -EINVAL;
pvt->channel_count = pvt->ops->early_channel_count(pvt);
if (pvt->channel_count < 0)
- goto err_siblings;
+ return ret;
ret = -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
@@ -3480,24 +3450,18 @@ static int init_one_instance(unsigned int nid)
* Always allocate two channels since we can have setups with DIMMs on
* only one channel. Also, this simplifies handling later for the price
* of a couple of KBs tops.
- *
- * On Fam17h+, the number of controllers may be greater than two. So set
- * the size equal to the maximum number of UMCs.
*/
- if (pvt->fam >= 0x17)
- layers[1].size = num_umcs;
- else
- layers[1].size = 2;
+ layers[1].size = fam_type->max_mcs;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
+ mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
if (!mci)
- goto err_siblings;
+ return ret;
mci->pvt_info = pvt;
mci->pdev = &pvt->F3->dev;
- setup_mci_misc_attrs(mci, fam_type);
+ setup_mci_misc_attrs(mci);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
@@ -3505,31 +3469,30 @@ static int init_one_instance(unsigned int nid)
ret = -ENODEV;
if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
edac_dbg(1, "failed edac_mc_add_mc()\n");
- goto err_add_mc;
+ edac_mc_free(mci);
+ return ret;
}
return 0;
+}
-err_add_mc:
- edac_mc_free(mci);
-
-err_siblings:
- free_mc_sibling_devs(pvt);
-
-err_post_init:
- if (pvt->fam >= 0x17)
- kfree(pvt->umc);
+static bool instance_has_memory(struct amd64_pvt *pvt)
+{
+ bool cs_enabled = false;
+ int cs = 0, dct = 0;
-err_free:
- kfree(pvt);
+ for (dct = 0; dct < fam_type->max_mcs; dct++) {
+ for_each_chip_select(cs, dct, pvt)
+ cs_enabled |= csrow_enabled(cs, dct, pvt);
+ }
-err_ret:
- return ret;
+ return cs_enabled;
}
static int probe_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
+ struct amd64_pvt *pvt = NULL;
struct ecc_settings *s;
int ret;
@@ -3540,8 +3503,29 @@ static int probe_one_instance(unsigned int nid)
ecc_stngs[nid] = s;
- if (!ecc_enabled(F3, nid)) {
- ret = 0;
+ pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
+ if (!pvt)
+ goto err_settings;
+
+ pvt->mc_node_id = nid;
+ pvt->F3 = F3;
+
+ fam_type = per_family_init(pvt);
+ if (!fam_type)
+ goto err_enable;
+
+ ret = hw_info_get(pvt);
+ if (ret < 0)
+ goto err_enable;
+
+ ret = 0;
+ if (!instance_has_memory(pvt)) {
+ amd64_info("Node %d: No DIMMs detected.\n", nid);
+ goto err_enable;
+ }
+
+ if (!ecc_enabled(pvt)) {
+ ret = -ENODEV;
if (!ecc_enable_override)
goto err_enable;
@@ -3556,7 +3540,7 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
- ret = init_one_instance(nid);
+ ret = init_one_instance(pvt);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
@@ -3566,9 +3550,15 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
+ dump_misc_regs(pvt);
+
return ret;
err_enable:
+ hw_info_put(pvt);
+ kfree(pvt);
+
+err_settings:
kfree(s);
ecc_stngs[nid] = NULL;
@@ -3595,14 +3585,13 @@ static void remove_one_instance(unsigned int nid)
restore_ecc_error_reporting(s, nid, F3);
- free_mc_sibling_devs(pvt);
-
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
/* Free the EDAC CORE resources */
mci->pvt_info = NULL;
+ hw_info_put(pvt);
kfree(pvt);
edac_mc_free(mci);
}
@@ -3668,8 +3657,6 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
- compute_num_umcs();
-
for (i = 0; i < amd_nb_num(); i++) {
err = probe_one_instance(i);
if (err) {
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8c3cda81e619..9be31688110b 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -479,6 +479,8 @@ struct low_ops {
struct amd64_family_type {
const char *ctl_name;
u16 f0_id, f1_id, f2_id, f6_id;
+ /* Maximum number of memory controllers per die/node. */
+ u8 max_mcs;
struct low_ops ops;
};
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 5634437bb39d..09a9e3de9595 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -281,16 +281,11 @@ static int aspeed_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
- struct resource *res;
void __iomem *regs;
u32 reg04;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 65cf2b9355c4..8c4d947fb848 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -555,12 +555,16 @@ static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
return edac_dev->panic_on_ue;
}
-void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
+void edac_device_handle_ce_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
+ if (!count)
+ return;
+
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
@@ -582,27 +586,31 @@ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
- block->counters.ce_count++;
+ block->counters.ce_count += count;
}
/* Propagate the count up the 'totals' tree */
- instance->counters.ce_count++;
- edac_dev->counters.ce_count++;
+ instance->counters.ce_count += count;
+ edac_dev->counters.ce_count += count;
if (edac_device_get_log_ce(edac_dev))
edac_device_printk(edac_dev, KERN_WARNING,
- "CE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ "CE: %s instance: %s block: %s count: %d '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", count, msg);
}
-EXPORT_SYMBOL_GPL(edac_device_handle_ce);
+EXPORT_SYMBOL_GPL(edac_device_handle_ce_count);
-void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
+void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
+ if (!count)
+ return;
+
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
@@ -624,22 +632,22 @@ void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
- block->counters.ue_count++;
+ block->counters.ue_count += count;
}
/* Propagate the count up the 'totals' tree */
- instance->counters.ue_count++;
- edac_dev->counters.ue_count++;
+ instance->counters.ue_count += count;
+ edac_dev->counters.ue_count += count;
if (edac_device_get_log_ue(edac_dev))
edac_device_printk(edac_dev, KERN_EMERG,
- "UE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ "UE: %s instance: %s block: %s count: %d '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", count, msg);
if (edac_device_get_panic_on_ue(edac_dev))
- panic("EDAC %s: UE instance: %s block %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ panic("EDAC %s: UE instance: %s block %s count: %d '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", count, msg);
}
-EXPORT_SYMBOL_GPL(edac_device_handle_ue);
+EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h
index 1aaba74ae411..c4c0e0bdce14 100644
--- a/drivers/edac/edac_device.h
+++ b/drivers/edac/edac_device.h
@@ -286,27 +286,60 @@ extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
/**
- * edac_device_handle_ue():
- * perform a common output and handling of an 'edac_dev' UE event
+ * Log correctable errors.
*
* @edac_dev: pointer to struct &edac_device_ctl_info
- * @inst_nr: number of the instance where the UE error happened
- * @block_nr: number of the block where the UE error happened
+ * @inst_nr: number of the instance where the CE error happened
+ * @count: Number of errors to log.
+ * @block_nr: number of the block where the CE error happened
+ * @msg: message to be printed
+ */
+void edac_device_handle_ce_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg);
+
+/**
+ * Log uncorrectable errors.
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the CE error happened
+ * @count: Number of errors to log.
+ * @block_nr: number of the block where the CE error happened
* @msg: message to be printed
*/
-extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg);
+void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg);
+
/**
- * edac_device_handle_ce():
- * perform a common output and handling of an 'edac_dev' CE event
+ * edac_device_handle_ce(): Log a single correctable error
*
* @edac_dev: pointer to struct &edac_device_ctl_info
* @inst_nr: number of the instance where the CE error happened
* @block_nr: number of the block where the CE error happened
* @msg: message to be printed
*/
-extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg);
+static inline void
+edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, int inst_nr,
+ int block_nr, const char *msg)
+{
+ edac_device_handle_ce_count(edac_dev, 1, inst_nr, block_nr, msg);
+}
+
+/**
+ * edac_device_handle_ue(): Log a single uncorrectable error
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the UE error happened
+ * @block_nr: number of the block where the UE error happened
+ * @msg: message to be printed
+ */
+static inline void
+edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, int inst_nr,
+ int block_nr, const char *msg)
+{
+ edac_device_handle_ue_count(edac_dev, 1, inst_nr, block_nr, msg);
+}
/**
* edac_device_alloc_index: Allocate a unique device index number
@@ -316,5 +349,4 @@ extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
*/
extern int edac_device_alloc_index(void);
extern const char *edac_layer_name[];
-
#endif
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e6fd079783bd..7243b88f81d8 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -145,15 +145,18 @@ static void edac_mc_dump_channel(struct rank_info *chan)
edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
}
-static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
{
char location[80];
+ if (!dimm->nr_pages)
+ return;
+
edac_dimm_info_location(dimm, location, sizeof(location));
edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
dimm->mci->csbased ? "rank" : "dimm",
- number, location, dimm->csrow, dimm->cschannel);
+ dimm->idx, location, dimm->csrow, dimm->cschannel);
edac_dbg(4, " dimm = %p\n", dimm);
edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
@@ -314,25 +317,28 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
struct dimm_info *dimm;
u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
unsigned int pos[EDAC_MAX_LAYERS];
- unsigned int size, tot_dimms = 1, count = 1;
+ unsigned int idx, size, tot_dimms = 1, count = 1;
unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
void *pvt, *p, *ptr = NULL;
- int i, j, row, chn, n, len, off;
+ int i, j, row, chn, n, len;
bool per_rank = false;
- BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+ if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
+ return NULL;
+
/*
* Calculate the total amount of dimms and csrows/cschannels while
* in the old API emulation mode
*/
- for (i = 0; i < n_layers; i++) {
- tot_dimms *= layers[i].size;
- if (layers[i].is_virt_csrow)
- tot_csrows *= layers[i].size;
+ for (idx = 0; idx < n_layers; idx++) {
+ tot_dimms *= layers[idx].size;
+
+ if (layers[idx].is_virt_csrow)
+ tot_csrows *= layers[idx].size;
else
- tot_channels *= layers[i].size;
+ tot_channels *= layers[idx].size;
- if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+ if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
per_rank = true;
}
@@ -425,19 +431,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
memset(&pos, 0, sizeof(pos));
row = 0;
chn = 0;
- for (i = 0; i < tot_dimms; i++) {
+ for (idx = 0; idx < tot_dimms; idx++) {
chan = mci->csrows[row]->channels[chn];
- off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
- if (off < 0 || off >= tot_dimms) {
- edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
- goto error;
- }
dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
if (!dimm)
goto error;
- mci->dimms[off] = dimm;
+ mci->dimms[idx] = dimm;
dimm->mci = mci;
+ dimm->idx = idx;
/*
* Copy DIMM location and initialize it.
@@ -714,6 +716,7 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
edac_mc_dump_mci(mci);
if (edac_debug_level >= 4) {
+ struct dimm_info *dimm;
int i;
for (i = 0; i < mci->nr_csrows; i++) {
@@ -730,9 +733,9 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
if (csrow->channels[j]->dimm->nr_pages)
edac_mc_dump_channel(csrow->channels[j]);
}
- for (i = 0; i < mci->tot_dimms; i++)
- if (mci->dimms[i]->nr_pages)
- edac_mc_dump_dimm(mci->dimms[i], i);
+
+ mci_for_each_dimm(mci, dimm)
+ edac_mc_dump_dimm(dimm);
}
#endif
mutex_lock(&mem_ctls_mutex);
@@ -1055,6 +1058,21 @@ void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
{
char detail[80];
int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+ u8 grain_bits;
+
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+
+ grain_bits = fls_long(e->grain - 1);
+
+ /* Report the error via the trace interface */
+ if (IS_ENABLED(CONFIG_RAS))
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer,
+ e->low_layer,
+ (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
+ grain_bits, e->syndrome, e->other_detail);
/* Memory type dependent details about the error */
if (type == HW_EVENT_ERR_CORRECTED) {
@@ -1090,11 +1108,11 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
const char *msg,
const char *other_detail)
{
+ struct dimm_info *dimm;
char *p;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i, n_labels = 0;
- u8 grain_bits;
struct edac_raw_error_desc *e = &mci->error_desc;
edac_dbg(3, "MC%d\n", mci->mc_idx);
@@ -1150,9 +1168,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
p = e->label;
*p = '\0';
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm = mci->dimms[i];
-
+ mci_for_each_dimm(mci, dimm) {
if (top_layer >= 0 && top_layer != dimm->location[0])
continue;
if (mid_layer >= 0 && mid_layer != dimm->location[1])
@@ -1170,37 +1186,37 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* channel/memory controller/... may be affected.
* Also, don't show errors for empty DIMM slots.
*/
- if (e->enable_per_layer_report && dimm->nr_pages) {
- if (n_labels >= EDAC_MAX_LABELS) {
- e->enable_per_layer_report = false;
- break;
- }
- n_labels++;
- if (p != e->label) {
- strcpy(p, OTHER_LABEL);
- p += strlen(OTHER_LABEL);
- }
- strcpy(p, dimm->label);
- p += strlen(p);
- *p = '\0';
+ if (!e->enable_per_layer_report || !dimm->nr_pages)
+ continue;
- /*
- * get csrow/channel of the DIMM, in order to allow
- * incrementing the compat API counters
- */
- edac_dbg(4, "%s csrows map: (%d,%d)\n",
- mci->csbased ? "rank" : "dimm",
- dimm->csrow, dimm->cschannel);
- if (row == -1)
- row = dimm->csrow;
- else if (row >= 0 && row != dimm->csrow)
- row = -2;
-
- if (chan == -1)
- chan = dimm->cschannel;
- else if (chan >= 0 && chan != dimm->cschannel)
- chan = -2;
+ if (n_labels >= EDAC_MAX_LABELS) {
+ e->enable_per_layer_report = false;
+ break;
+ }
+ n_labels++;
+ if (p != e->label) {
+ strcpy(p, OTHER_LABEL);
+ p += strlen(OTHER_LABEL);
}
+ strcpy(p, dimm->label);
+ p += strlen(p);
+
+ /*
+ * get csrow/channel of the DIMM, in order to allow
+ * incrementing the compat API counters
+ */
+ edac_dbg(4, "%s csrows map: (%d,%d)\n",
+ mci->csbased ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
+ if (row == -1)
+ row = dimm->csrow;
+ else if (row >= 0 && row != dimm->csrow)
+ row = -2;
+
+ if (chan == -1)
+ chan = dimm->cschannel;
+ else if (chan >= 0 && chan != dimm->cschannel)
+ chan = -2;
}
if (!e->enable_per_layer_report) {
@@ -1234,20 +1250,6 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
if (p > e->location)
*(p - 1) = '\0';
- /* Sanity-check driver-supplied grain value. */
- if (WARN_ON_ONCE(!e->grain))
- e->grain = 1;
-
- grain_bits = fls_long(e->grain - 1);
-
- /* Report the error via the trace interface */
- if (IS_ENABLED(CONFIG_RAS))
- trace_mc_event(type, e->msg, e->label, e->error_count,
- mci->mc_idx, e->top_layer, e->mid_layer,
- e->low_layer,
- (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
- grain_bits, e->syndrome, e->other_detail);
-
edac_raw_mc_handle_error(type, mci, e);
}
EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 32d016f1ecd1..0367554e7437 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -557,14 +557,8 @@ static ssize_t dimmdev_ce_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
u32 count;
- int off;
-
- off = EDAC_DIMM_OFF(dimm->mci->layers,
- dimm->mci->n_layers,
- dimm->location[0],
- dimm->location[1],
- dimm->location[2]);
- count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][off];
+
+ count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][dimm->idx];
return sprintf(data, "%u\n", count);
}
@@ -574,14 +568,8 @@ static ssize_t dimmdev_ue_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
u32 count;
- int off;
-
- off = EDAC_DIMM_OFF(dimm->mci->layers,
- dimm->mci->n_layers,
- dimm->location[0],
- dimm->location[1],
- dimm->location[2]);
- count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][off];
+
+ count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][dimm->idx];
return sprintf(data, "%u\n", count);
}
@@ -633,8 +621,7 @@ static const struct device_type dimm_attr_type = {
/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
- struct dimm_info *dimm,
- int index)
+ struct dimm_info *dimm)
{
int err;
dimm->mci = mci;
@@ -644,9 +631,9 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
dimm->dev.parent = &mci->dev;
if (mci->csbased)
- dev_set_name(&dimm->dev, "rank%d", index);
+ dev_set_name(&dimm->dev, "rank%d", dimm->idx);
else
- dev_set_name(&dimm->dev, "dimm%d", index);
+ dev_set_name(&dimm->dev, "dimm%d", dimm->idx);
dev_set_drvdata(&dimm->dev, dimm);
pm_runtime_forbid(&mci->dev);
@@ -928,7 +915,8 @@ static const struct device_type mci_attr_type = {
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups)
{
- int i, err;
+ struct dimm_info *dimm;
+ int err;
/* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type;
@@ -952,13 +940,12 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
/*
* Create the dimm/rank devices
*/
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm = mci->dimms[i];
+ mci_for_each_dimm(mci, dimm) {
/* Only expose populated DIMMs */
if (!dimm->nr_pages)
continue;
- err = edac_create_dimm_object(mci, dimm, i);
+ err = edac_create_dimm_object(mci, dimm);
if (err)
goto fail_unregister_dimm;
}
@@ -973,12 +960,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
return 0;
fail_unregister_dimm:
- for (i--; i >= 0; i--) {
- struct dimm_info *dimm = mci->dimms[i];
- if (!dimm->nr_pages)
- continue;
-
- device_unregister(&dimm->dev);
+ mci_for_each_dimm(mci, dimm) {
+ if (device_is_registered(&dimm->dev))
+ device_unregister(&dimm->dev);
}
device_unregister(&mci->dev);
@@ -990,7 +974,7 @@ fail_unregister_dimm:
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ struct dimm_info *dimm;
edac_dbg(0, "\n");
@@ -1001,8 +985,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
edac_delete_csrow_objects(mci);
#endif
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm = mci->dimms[i];
+ mci_for_each_dimm(mci, dimm) {
if (dimm->nr_pages == 0)
continue;
edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 0bb62857ffb2..b99080d8a10c 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -21,14 +21,22 @@ struct ghes_edac_pvt {
struct mem_ctl_info *mci;
/* Buffers for the error handling routine */
- char detail_location[240];
- char other_detail[160];
+ char other_detail[400];
char msg[80];
};
-static atomic_t ghes_init = ATOMIC_INIT(0);
+static refcount_t ghes_refcount = REFCOUNT_INIT(0);
+
+/*
+ * Access to ghes_pvt must be protected by ghes_lock. The spinlock
+ * also provides the necessary (implicit) memory barrier for the SMP
+ * case to make the pointer visible on another CPU.
+ */
static struct ghes_edac_pvt *ghes_pvt;
+/* GHES registration mutex */
+static DEFINE_MUTEX(ghes_reg_mutex);
+
/*
* Sync with other, potentially concurrent callers of
* ghes_edac_report_mem_error(). We don't know what the
@@ -79,15 +87,15 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
(*num_dimm)++;
}
-static int get_dimm_smbios_index(u16 handle)
+static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
{
- struct mem_ctl_info *mci = ghes_pvt->mci;
- int i;
+ struct dimm_info *dimm;
- for (i = 0; i < mci->tot_dimms; i++) {
- if (mci->dimms[i]->smbios_handle == handle)
- return i;
+ mci_for_each_dimm(mci, dimm) {
+ if (dimm->smbios_handle == handle)
+ return dimm->idx;
}
+
return -1;
}
@@ -98,9 +106,7 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
if (dh->type == DMI_ENTRY_MEM_DEVICE) {
struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
- struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers,
- dimm_fill->count, 0, 0);
+ struct dimm_info *dimm = edac_get_dimm(mci, dimm_fill->count, 0, 0);
u16 rdr_mask = BIT(7) | BIT(13);
if (entry->size == 0xffff) {
@@ -198,13 +204,9 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt = ghes_pvt;
+ struct ghes_edac_pvt *pvt;
unsigned long flags;
char *p;
- u8 grain_bits;
-
- if (!pvt)
- return;
/*
* We can do the locking below because GHES defers error processing
@@ -216,12 +218,17 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
spin_lock_irqsave(&ghes_lock, flags);
+ pvt = ghes_pvt;
+ if (!pvt)
+ goto unlock;
+
mci = pvt->mci;
e = &mci->error_desc;
/* Cleans the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = 1;
+ e->grain = 1;
strcpy(e->label, "unknown label");
e->msg = pvt->msg;
e->other_detail = pvt->other_detail;
@@ -311,13 +318,13 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* Error address */
if (mem_err->validation_bits & CPER_MEM_VALID_PA) {
- e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
- e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
+ e->page_frame_number = PHYS_PFN(mem_err->physical_addr);
+ e->offset_in_page = offset_in_page(mem_err->physical_addr);
}
/* Error grain */
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
- e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+ e->grain = ~mem_err->physical_addr_mask + 1;
/* Memory error location, mapped on e->location */
p = e->location;
@@ -348,7 +355,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle);
- index = get_dimm_smbios_index(mem_err->mem_dev_handle);
+ index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
if (index >= 0) {
e->top_layer = index;
e->enable_per_layer_report = true;
@@ -360,6 +367,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* All other fields are mapped on e->other_detail */
p = pvt->other_detail;
+ p += snprintf(p, sizeof(pvt->other_detail),
+ "APEI location: %s ", e->location);
if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
u64 status = mem_err->error_status;
@@ -433,16 +442,9 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
if (p > pvt->other_detail)
*(p - 1) = '\0';
- /* Generate the trace event */
- grain_bits = fls_long(e->grain);
- snprintf(pvt->detail_location, sizeof(pvt->detail_location),
- "APEI location: %s %s", e->location, e->other_detail);
- trace_mc_event(type, e->msg, e->label, e->error_count,
- mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
- (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
- grain_bits, e->syndrome, pvt->detail_location);
-
edac_raw_mc_handle_error(type, mci, e);
+
+unlock:
spin_unlock_irqrestore(&ghes_lock, flags);
}
@@ -457,10 +459,12 @@ static struct acpi_platform_list plat_list[] = {
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
bool fake = false;
- int rc, num_dimm = 0;
+ int rc = 0, num_dimm = 0;
struct mem_ctl_info *mci;
+ struct ghes_edac_pvt *pvt;
struct edac_mc_layer layers[1];
struct ghes_edac_dimm_fill dimm_fill;
+ unsigned long flags;
int idx = -1;
if (IS_ENABLED(CONFIG_X86)) {
@@ -472,11 +476,14 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
idx = 0;
}
+ /* finish another registration/unregistration instance first */
+ mutex_lock(&ghes_reg_mutex);
+
/*
* We have only one logical memory controller to which all DIMMs belong.
*/
- if (atomic_inc_return(&ghes_init) > 1)
- return 0;
+ if (refcount_inc_not_zero(&ghes_refcount))
+ goto unlock;
/* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm);
@@ -494,12 +501,13 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto unlock;
}
- ghes_pvt = mci->pvt_info;
- ghes_pvt->ghes = ghes;
- ghes_pvt->mci = mci;
+ pvt = mci->pvt_info;
+ pvt->ghes = ghes;
+ pvt->mci = mci;
mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
@@ -527,8 +535,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
dimm_fill.mci = mci;
dmi_walk(ghes_edac_dmidecode, &dimm_fill);
} else {
- struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, 0, 0, 0);
+ struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0);
dimm->nr_pages = 1;
dimm->grain = 128;
@@ -541,23 +548,48 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) {
pr_info("Can't register at EDAC core\n");
edac_mc_free(mci);
- return -ENODEV;
+ rc = -ENODEV;
+ goto unlock;
}
- return 0;
+
+ spin_lock_irqsave(&ghes_lock, flags);
+ ghes_pvt = pvt;
+ spin_unlock_irqrestore(&ghes_lock, flags);
+
+ /* only set on success */
+ refcount_set(&ghes_refcount, 1);
+
+unlock:
+ mutex_unlock(&ghes_reg_mutex);
+
+ return rc;
}
void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
+ unsigned long flags;
- if (!ghes_pvt)
- return;
+ mutex_lock(&ghes_reg_mutex);
- if (atomic_dec_return(&ghes_init))
- return;
+ if (!refcount_dec_and_test(&ghes_refcount))
+ goto unlock;
- mci = ghes_pvt->mci;
+ /*
+ * Wait for the irq handler being finished.
+ */
+ spin_lock_irqsave(&ghes_lock, flags);
+ mci = ghes_pvt ? ghes_pvt->mci : NULL;
ghes_pvt = NULL;
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
+ spin_unlock_irqrestore(&ghes_lock, flags);
+
+ if (!mci)
+ goto unlock;
+
+ mci = edac_mc_del_mc(mci->pdev);
+ if (mci)
+ edac_mc_free(mci);
+
+unlock:
+ mutex_unlock(&ghes_reg_mutex);
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index c370d5457e6b..059eccf0582b 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -154,8 +154,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
ndimms = 0;
for (j = 0; j < I10NM_NUM_DIMMS; j++) {
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
mtr = I10NM_GET_DIMMMTR(imc, i, j);
mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 299b441647cd..432b375a4075 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -392,8 +392,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
unsigned long nr_pages;
for (j = 0; j < nr_channels; j++) {
- struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i, j, 0);
+ struct dimm_info *dimm = edac_get_dimm(mci, i, j, 0);
nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
if (nr_pages == 0)
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 078a7351bf05..1a6f69c859ab 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1275,9 +1275,8 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- channel / MAX_BRANCHES,
- channel % MAX_BRANCHES, slot);
+ dimm = edac_get_dimm(mci, channel / MAX_BRANCHES,
+ channel % MAX_BRANCHES, slot);
csrow_megs = pvt->dimm_info[slot][channel].megabytes;
dimm->grain = 8;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 251f2b692785..0ddc41e47a96 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -713,7 +713,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
{
struct i5100_priv *priv = mci->pvt_info;
u16 w;
- unsigned long et;
pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
if (i5100_spddata_busy(w))
@@ -724,7 +723,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
0, 0));
/* wait up to 100ms */
- et = jiffies + HZ / 10;
udelay(100);
while (1) {
pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
@@ -848,21 +846,17 @@ static void i5100_init_interleaving(struct pci_dev *pdev,
static void i5100_init_csrows(struct mem_ctl_info *mci)
{
- int i;
struct i5100_priv *priv = mci->pvt_info;
+ struct dimm_info *dimm;
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm;
- const unsigned long npages = i5100_npages(mci, i);
- const unsigned int chan = i5100_csrow_to_chan(mci, i);
- const unsigned int rank = i5100_csrow_to_rank(mci, i);
+ mci_for_each_dimm(mci, dimm) {
+ const unsigned long npages = i5100_npages(mci, dimm->idx);
+ const unsigned int chan = i5100_csrow_to_chan(mci, dimm->idx);
+ const unsigned int rank = i5100_csrow_to_rank(mci, dimm->idx);
if (!npages)
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- chan, rank, 0);
-
dimm->nr_pages = npages;
dimm->grain = 32;
dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6f8bcdb9256a..f131c05ade9f 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -548,8 +548,8 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
ras = nrec_ras(info);
cas = nrec_cas(info);
- edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
+ edac_dbg(0, "\t\t%s DIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
+ type, rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
/* Only 1 bit will be on */
@@ -1054,8 +1054,6 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
u32 actual_tolm;
u16 limit;
int slot_row;
- int maxch;
- int maxdimmperch;
int way0, way1;
pvt = mci->pvt_info;
@@ -1065,9 +1063,6 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
&pvt->u.ambase_top);
- maxdimmperch = pvt->maxdimmperch;
- maxch = pvt->maxch;
-
edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
@@ -1170,17 +1165,13 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
struct dimm_info *dimm;
- int ndimms, channel_count;
- int max_dimms;
+ int ndimms;
int mtr;
int size_mb;
int channel, slot;
pvt = mci->pvt_info;
- channel_count = pvt->maxch;
- max_dimms = pvt->maxdimmperch;
-
ndimms = 0;
/*
@@ -1196,8 +1187,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- channel / 2, channel % 2, slot);
+ dimm = edac_get_dimm(mci, channel / 2, channel % 2, slot);
size_mb = pvt->dimm_info[slot][channel].megabytes;
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 7bf910d54d11..2e9bbe56cde9 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -580,7 +580,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
* @ch: Channel number within the branch (0 or 1)
* @branch: Branch number (0 or 1)
* @dinfo: Pointer to DIMM info where dimm size is stored
- * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
+ * @dimm: Pointer to the struct dimm_info that corresponds to that element
*/
static int decode_mtr(struct i7300_pvt *pvt,
int slot, int ch, int branch,
@@ -794,8 +794,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
for (ch = 0; ch < max_channel; ch++) {
int channel = to_channel(ch, branch);
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, branch, ch, slot);
+ dimm = edac_get_dimm(mci, branch, ch, slot);
dinfo = &pvt->dimm_info[slot][channel];
@@ -817,7 +816,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
/**
* decode_mir() - Decodes Memory Interleave Register (MIR) info
- * @int mir_no: number of the MIR register to decode
+ * @mir_no: number of the MIR register to decode
* @mir: array with the MIR data cached on the driver
*/
static void decode_mir(int mir_no, u16 mir[MAX_MIR])
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index a71cca6eeb33..b3135b208f9a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -585,8 +585,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
if (!DIMM_PRESENT(dimm_dod[j]))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index d26300f9cb07..4f65073f230b 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -490,9 +490,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
if (dimm_info[j][i].dual_rank) {
nr_pages = nr_pages / 2;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, (i * 2) + 1,
- j, 0);
+ dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* just a guess */
@@ -503,8 +501,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i * 2, j, 0);
+ dimm = edac_get_dimm(mci, i * 2, j, 0);
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* same guess */
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index b1193be1ef1d..933f7722b893 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1231,7 +1231,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci)
if (!(chan_mask & BIT(i)))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
+ dimm = edac_get_dimm(mci, i, 0, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d\n", i);
continue;
@@ -1311,7 +1311,7 @@ static void dnv_get_dimm_config(struct mem_ctl_info *mci)
if (!ranks_of_dimm[j])
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
continue;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f743502ca9b7..4957e8ee1879 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -254,18 +254,20 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
* FIXME: Implement the error count reads directly
*/
-static const u32 correrrcnt[] = {
- 0x104, 0x108, 0x10c, 0x110,
-};
-
#define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
#define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
#define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
#define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
+#if 0 /* Currently unused*/
+static const u32 correrrcnt[] = {
+ 0x104, 0x108, 0x10c, 0x110,
+};
+
static const u32 correrrthrsld[] = {
0x11c, 0x120, 0x124, 0x128,
};
+#endif
#define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
#define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
@@ -1340,7 +1342,7 @@ static void knl_show_mc_route(u32 reg, char *s)
*/
static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
{
- u64 sad_base, sad_size, sad_limit = 0;
+ u64 sad_base, sad_limit = 0;
u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
int sad_rule = 0;
int tad_rule = 0;
@@ -1427,7 +1429,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
edram_only = KNL_EDRAM_ONLY(dram_rule);
sad_limit = pvt->info.sad_limit(dram_rule)+1;
- sad_size = sad_limit - sad_base;
pci_read_config_dword(pvt->pci_sad0,
pvt->info.interleave_list[sad_rule], &interleave_reg);
@@ -1620,7 +1621,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
}
for (j = 0; j < max_dimms_per_channel; j++) {
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
if (pvt->info.type == KNIGHTS_LANDING) {
pci_read_config_dword(pvt->knl.pci_channel[i],
knl_mtr_reg, &mtr);
@@ -2952,7 +2953,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
enum hw_event_mc_err_type tp_event;
- char *type, *optype, msg[256];
+ char *optype, msg[256];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -2981,14 +2982,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
} else {
- type = "NON_FATAL";
tp_event = HW_EVENT_ERR_UNCORRECTED;
}
} else {
- type = "CORRECTED";
tp_event = HW_EVENT_ERR_CORRECTED;
}
@@ -3200,7 +3198,6 @@ static struct notifier_block sbridge_mce_dec = {
static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
{
struct mem_ctl_info *mci = sbridge_dev->mci;
- struct sbridge_pvt *pvt;
if (unlikely(!mci || !mci->pvt_info)) {
edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
@@ -3209,8 +3206,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
return;
}
- pvt = mci->pvt_info;
-
edac_dbg(0, "MC: mci = %p, dev = %p\n",
mci, &sbridge_dev->pdev[0]->dev);
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 0fcf3785e8f3..83545b4facb7 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -46,7 +46,8 @@ static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
}
enum munittype {
- CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
+ CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD,
+ ERRCHAN0, ERRCHAN1, ERRCHAN2,
};
struct munit {
@@ -68,6 +69,9 @@ static const struct munit skx_all_munits[] = {
{ 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
{ 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
{ 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
+ { 0x2043, { PCI_DEVFN(10, 3), PCI_DEVFN(12, 3) }, 2, 2, ERRCHAN0 },
+ { 0x2047, { PCI_DEVFN(10, 7), PCI_DEVFN(12, 7) }, 2, 2, ERRCHAN1 },
+ { 0x204b, { PCI_DEVFN(11, 3), PCI_DEVFN(13, 3) }, 2, 2, ERRCHAN2 },
{ 0x208e, { }, 1, 0, SAD },
{ }
};
@@ -104,10 +108,18 @@ static int get_all_munits(const struct munit *m)
}
switch (m->mtype) {
- case CHAN0: case CHAN1: case CHAN2:
+ case CHAN0:
+ case CHAN1:
+ case CHAN2:
pci_dev_get(pdev);
d->imc[i].chan[m->mtype].cdev = pdev;
break;
+ case ERRCHAN0:
+ case ERRCHAN1:
+ case ERRCHAN2:
+ pci_dev_get(pdev);
+ d->imc[i].chan[m->mtype - ERRCHAN0].edev = pdev;
+ break;
case SAD_ALL:
pci_dev_get(pdev);
d->sad_all = pdev;
@@ -177,8 +189,7 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
for (j = 0; j < SKX_NUM_DIMMS; j++) {
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
if (IS_DIMM_PRESENT(mtr)) {
@@ -216,6 +227,39 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
+static void skx_show_retry_rd_err_log(struct decoded_addr *res,
+ char *msg, int len)
+{
+ u32 log0, log1, log2, log3, log4;
+ u32 corr0, corr1, corr2, corr3;
+ struct pci_dev *edev;
+ int n;
+
+ edev = res->dev->imc[res->imc].chan[res->channel].edev;
+
+ pci_read_config_dword(edev, 0x154, &log0);
+ pci_read_config_dword(edev, 0x148, &log1);
+ pci_read_config_dword(edev, 0x150, &log2);
+ pci_read_config_dword(edev, 0x15c, &log3);
+ pci_read_config_dword(edev, 0x114, &log4);
+
+ n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x]",
+ log0, log1, log2, log3, log4);
+
+ pci_read_config_dword(edev, 0x104, &corr0);
+ pci_read_config_dword(edev, 0x108, &corr1);
+ pci_read_config_dword(edev, 0x10c, &corr2);
+ pci_read_config_dword(edev, 0x110, &corr3);
+
+ if (len - n > 0)
+ snprintf(msg + n, len - n,
+ " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
+ corr0 & 0xffff, corr0 >> 16,
+ corr1 & 0xffff, corr1 >> 16,
+ corr2 & 0xffff, corr2 >> 16,
+ corr3 & 0xffff, corr3 >> 16);
+}
+
static bool skx_sad_decode(struct decoded_addr *res)
{
struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list);
@@ -659,7 +703,7 @@ static int __init skx_init(void)
}
}
- skx_set_decode(skx_decode);
+ skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
if (nvdimm_count && skx_adxl_get() == -ENODEV)
skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index d8ff63d91b86..95662a4ff4c4 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -37,6 +37,7 @@ static char *adxl_msg;
static char skx_msg[MSG_SIZE];
static skx_decode_f skx_decode;
+static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
@@ -100,6 +101,7 @@ void __exit skx_adxl_put(void)
static bool skx_adxl_decode(struct decoded_addr *res)
{
+ struct skx_dev *d;
int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
@@ -118,6 +120,24 @@ static bool skx_adxl_decode(struct decoded_addr *res)
res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
+ if (res->imc > NUM_IMC - 1) {
+ skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
+ return false;
+ }
+
+ list_for_each_entry(d, &dev_edac_list, list) {
+ if (d->imc[0].src_id == res->socket) {
+ res->dev = d;
+ break;
+ }
+ }
+
+ if (!res->dev) {
+ skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
+ res->socket, res->imc);
+ return false;
+ }
+
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
@@ -131,9 +151,10 @@ static bool skx_adxl_decode(struct decoded_addr *res)
return true;
}
-void skx_set_decode(skx_decode_f decode)
+void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
skx_decode = decode;
+ skx_show_retry_rd_err_log = show_retry_log;
}
int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
@@ -452,34 +473,17 @@ static void skx_unregister_mci(struct skx_imc *imc)
edac_mc_free(mci);
}
-static struct mem_ctl_info *get_mci(int src_id, int lmc)
-{
- struct skx_dev *d;
-
- if (lmc > NUM_IMC - 1) {
- skx_printk(KERN_ERR, "Bad lmc %d\n", lmc);
- return NULL;
- }
-
- list_for_each_entry(d, &dev_edac_list, list) {
- if (d->imc[0].src_id == src_id)
- return d->imc[lmc].mci;
- }
-
- skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc);
- return NULL;
-}
-
static void skx_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m,
struct decoded_addr *res)
{
enum hw_event_mc_err_type tp_event;
- char *type, *optype;
+ char *optype;
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
bool recoverable;
+ int len;
u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
u32 mscod = GET_BITFIELD(m->status, 16, 31);
u32 errcode = GET_BITFIELD(m->status, 0, 15);
@@ -490,14 +494,11 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
} else {
- type = "NON_FATAL";
tp_event = HW_EVENT_ERR_UNCORRECTED;
}
} else {
- type = "CORRECTED";
tp_event = HW_EVENT_ERR_CORRECTED;
}
@@ -539,12 +540,12 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
}
}
if (adxl_component_count) {
- snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
+ len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
- snprintf(skx_msg, MSG_SIZE,
+ len = snprintf(skx_msg, MSG_SIZE,
"%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
@@ -553,6 +554,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
res->bank_group, res->bank_address, res->row, res->column);
}
+ if (skx_show_retry_rd_err_log)
+ skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len);
+
edac_dbg(0, "%s\n", skx_msg);
/* Call the helper to output message */
@@ -583,15 +587,12 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if (adxl_component_count) {
if (!skx_adxl_decode(&res))
return NOTIFY_DONE;
-
- mci = get_mci(res.socket, res.imc);
- } else {
- if (!skx_decode || !skx_decode(&res))
- return NOTIFY_DONE;
-
- mci = res.dev->imc[res.imc].mci;
+ } else if (!skx_decode || !skx_decode(&res)) {
+ return NOTIFY_DONE;
}
+ mci = res.dev->imc[res.imc].mci;
+
if (!mci)
return NOTIFY_DONE;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 08cc971a50ea..60d1ea669afd 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -64,6 +64,7 @@ struct skx_dev {
u8 src_id, node_id;
struct skx_channel {
struct pci_dev *cdev;
+ struct pci_dev *edev;
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -113,10 +114,11 @@ struct decoded_addr {
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci);
typedef bool (*skx_decode_f)(struct decoded_addr *res);
+typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len);
int __init skx_adxl_get(void);
void __exit skx_adxl_put(void);
-void skx_set_decode(skx_decode_f decode);
+void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_node_id(struct skx_dev *d, u8 *id);
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 6ac26d1b929f..8be3e89a510e 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -135,7 +135,7 @@ static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
u32 val;
u32 memsize;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
+ dimm = edac_get_dimm(mci, 0, 0, 0);
val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 415afaf479e7..a7f216191493 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -322,6 +322,25 @@ static void axp288_put_role_sw(void *data)
usb_role_switch_put(info->role_sw);
}
+static int axp288_extcon_find_role_sw(struct axp288_extcon_info *info)
+{
+ const struct software_node *swnode;
+ struct fwnode_handle *fwnode;
+
+ if (!x86_match_cpu(cherry_trail_cpu_ids))
+ return 0;
+
+ swnode = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!swnode)
+ return -EPROBE_DEFER;
+
+ fwnode = software_node_fwnode(swnode);
+ info->role_sw = usb_role_switch_find_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return info->role_sw ? 0 : -EPROBE_DEFER;
+}
+
static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
@@ -343,9 +362,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- info->role_sw = usb_role_switch_get(dev);
- if (IS_ERR(info->role_sw))
- return PTR_ERR(info->role_sw);
+ ret = axp288_extcon_find_role_sw(info);
+ if (ret)
+ return ret;
+
if (info->role_sw) {
ret = devm_add_action_or_reset(dev, axp288_put_role_sw, info);
if (ret)
@@ -440,26 +460,14 @@ static struct platform_driver axp288_extcon_driver = {
},
};
-static struct device_connection axp288_extcon_role_sw_conn = {
- .endpoint[0] = "axp288_extcon",
- .endpoint[1] = "intel_xhci_usb_sw-role-switch",
- .id = "usb-role-switch",
-};
-
static int __init axp288_extcon_init(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_add(&axp288_extcon_role_sw_conn);
-
return platform_driver_register(&axp288_extcon_driver);
}
module_init(axp288_extcon_init);
static void __exit axp288_extcon_exit(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_remove(&axp288_extcon_role_sw_conn);
-
platform_driver_unregister(&axp288_extcon_driver);
}
module_exit(axp288_extcon_exit);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 9d32150e68db..771f6f4cf92e 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -338,6 +338,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct cht_wc_extcon_data *ext;
unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
+ int pwrsrc_sts, id;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -387,8 +388,19 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
goto disable_sw_control;
}
- /* Route D+ and D- to PMIC for initial charger detection */
- cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
+ ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts);
+ if (ret) {
+ dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret);
+ goto disable_sw_control;
+ }
+
+ /*
+ * If no USB host or device connected, route D+ and D- to PMIC for
+ * initial charger detection
+ */
+ id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
+ if (id != INTEL_USB_ID_GND)
+ cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
/* Get initial state */
cht_wc_extcon_pwrsrc_event(ext);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index dc43847ad2b0..bcf65aaca5d2 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -65,6 +65,10 @@ struct sm5502_muic_info {
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
+ .reg = SM5502_REG_RESET,
+ .val = SM5502_REG_RESET_MASK,
+ .invert = true,
+ }, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
.invert = false,
@@ -272,7 +276,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
/* Return cable type of attached or detached accessories */
static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
{
- unsigned int cable_type = -1, adc, dev_type1;
+ unsigned int cable_type, adc, dev_type1;
int ret;
/* Read ADC value according to external cable or button */
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
index 9dbb634d213b..ce1f1ec310c4 100644
--- a/drivers/extcon/extcon-sm5502.h
+++ b/drivers/extcon/extcon-sm5502.h
@@ -237,6 +237,8 @@ enum sm5502_reg {
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define SM5502_REG_RESET_MASK (0x1)
+
/* SM5502 Interrupts */
enum sm5502_irq {
/* INT1 */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index b132ab9ad607..715e491dfbc3 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -250,7 +250,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
- hh->hh_len = FWNET_HLEN;
+
+ /* Pairs with the READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, FWNET_HLEN);
return 0;
}
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 9cd70d1a5622..a479023fa036 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -967,29 +967,29 @@ static int sdei_get_conduit(struct platform_device *pdev)
if (np) {
if (of_property_read_string(np, "method", &method)) {
pr_warn("missing \"method\" property\n");
- return CONDUIT_INVALID;
+ return SMCCC_CONDUIT_NONE;
}
if (!strcmp("hvc", method)) {
sdei_firmware_call = &sdei_smccc_hvc;
- return CONDUIT_HVC;
+ return SMCCC_CONDUIT_HVC;
} else if (!strcmp("smc", method)) {
sdei_firmware_call = &sdei_smccc_smc;
- return CONDUIT_SMC;
+ return SMCCC_CONDUIT_SMC;
}
pr_warn("invalid \"method\" property: %s\n", method);
} else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
if (acpi_psci_use_hvc()) {
sdei_firmware_call = &sdei_smccc_hvc;
- return CONDUIT_HVC;
+ return SMCCC_CONDUIT_HVC;
} else {
sdei_firmware_call = &sdei_smccc_smc;
- return CONDUIT_SMC;
+ return SMCCC_CONDUIT_SMC;
}
}
- return CONDUIT_INVALID;
+ return SMCCC_CONDUIT_NONE;
}
static int sdei_probe(struct platform_device *pdev)
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
index d03ed8e43ad7..8e3d355a637a 100644
--- a/drivers/firmware/broadcom/Kconfig
+++ b/drivers/firmware/broadcom/Kconfig
@@ -22,3 +22,11 @@ config BCM47XX_SPROM
In case of SoC devices SPROM content is stored on a flash used by
bootloader firmware CFE. This driver provides method to ssb and bcma
drivers to read SPROM on SoC.
+
+config TEE_BNXT_FW
+ tristate "Broadcom BNXT firmware manager"
+ depends on (ARCH_BCM_IPROC && OPTEE) || (COMPILE_TEST && TEE)
+ default ARCH_BCM_IPROC
+ help
+ This module help to manage firmware on Broadcom BNXT device. The module
+ registers on tee bus and invoke calls to manage firmware on BNXT device.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
index 72c7fdc20c77..17c5061c47a7 100644
--- a/drivers/firmware/broadcom/Makefile
+++ b/drivers/firmware/broadcom/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BCM47XX_NVRAM) += bcm47xx_nvram.o
obj-$(CONFIG_BCM47XX_SPROM) += bcm47xx_sprom.o
+obj-$(CONFIG_TEE_BNXT_FW) += tee_bnxt_fw.o
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
new file mode 100644
index 000000000000..5b7ef89eb701
--- /dev/null
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+
+#define MAX_SHM_MEM_SZ SZ_4M
+
+#define MAX_TEE_PARAM_ARRY_MEMB 4
+
+enum ta_cmd {
+ /*
+ * TA_CMD_BNXT_FASTBOOT - boot bnxt device by copying f/w into sram
+ *
+ * param[0] unused
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt f/w image found on memory
+ */
+ TA_CMD_BNXT_FASTBOOT = 0,
+
+ /*
+ * TA_CMD_BNXT_COPY_COREDUMP - copy the core dump into shm
+ *
+ * param[0] (inout memref) - Coredump buffer memory reference
+ * param[1] (in value) - value.a: offset, data to be copied from
+ * value.b: size of data to be copied
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt core dump
+ */
+ TA_CMD_BNXT_COPY_COREDUMP = 3,
+};
+
+/**
+ * struct tee_bnxt_fw_private - OP-TEE bnxt private data
+ * @dev: OP-TEE based bnxt device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: TA session identifier.
+ */
+struct tee_bnxt_fw_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *fw_shm_pool;
+};
+
+static struct tee_bnxt_fw_private pvt_data;
+
+static void prepare_args(int cmd,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ memset(arg, 0, sizeof(*arg));
+ memset(param, 0, MAX_TEE_PARAM_ARRY_MEMB * sizeof(*param));
+
+ arg->func = cmd;
+ arg->session = pvt_data.session_id;
+ arg->num_params = MAX_TEE_PARAM_ARRY_MEMB;
+
+ /* Fill invoke cmd params */
+ switch (cmd) {
+ case TA_CMD_BNXT_COPY_COREDUMP:
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data.fw_shm_pool;
+ param[0].u.memref.size = MAX_SHM_MEM_SZ;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ break;
+ case TA_CMD_BNXT_FASTBOOT:
+ default:
+ /* Nothing to do */
+ break;
+ }
+}
+
+/**
+ * tee_bnxt_fw_load() - Load the bnxt firmware
+ * Uses an OP-TEE call to start a secure
+ * boot process.
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_fw_load(void)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_FASTBOOT, &arg, param);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_FASTBOOT invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_fw_load);
+
+/**
+ * tee_bnxt_copy_coredump() - Copy coredump from the allocated memory
+ * Uses an OP-TEE call to copy coredump
+ * @buf: destination buffer where core dump is copied into
+ * @offset: offset from the base address of core dump area
+ * @size: size of the dump
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size)
+{
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+ void *core_data;
+ u32 rbytes = size;
+ u32 nbytes = 0;
+ int ret = 0;
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_COPY_COREDUMP, &arg, param);
+
+ while (rbytes) {
+ nbytes = rbytes;
+
+ nbytes = min_t(u32, rbytes, param[0].u.memref.size);
+
+ /* Fill additional invoke cmd params */
+ param[1].u.value.a = offset;
+ param[1].u.value.b = nbytes;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_COPY_COREDUMP invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ core_data = tee_shm_get_va(pvt_data.fw_shm_pool, 0);
+ if (IS_ERR(core_data)) {
+ dev_err(pvt_data.dev, "tee_shm_get_va failed\n");
+ return PTR_ERR(core_data);
+ }
+
+ memcpy(buf, core_data, nbytes);
+
+ rbytes -= nbytes;
+ buf += nbytes;
+ offset += nbytes;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_copy_coredump);
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return (ver->impl_id == TEE_IMPL_ID_OPTEE);
+}
+
+static int tee_bnxt_fw_probe(struct device *dev)
+{
+ struct tee_client_device *bnxt_device = to_tee_client_device(dev);
+ int ret, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *fw_shm_pool;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with Bnxt load Trusted App */
+ memcpy(sess_arg.uuid, bnxt_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if (ret < 0 || sess_arg.ret != 0) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ pvt_data.dev = dev;
+
+ fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(fw_shm_pool)) {
+ tee_client_close_context(pvt_data.ctx);
+ dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+ err = PTR_ERR(fw_shm_pool);
+ goto out_sess;
+ }
+
+ pvt_data.fw_shm_pool = fw_shm_pool;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int tee_bnxt_fw_remove(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+
+ return 0;
+}
+
+static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
+ {UUID_INIT(0x6272636D, 0x2019, 0x0716,
+ 0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, tee_bnxt_fw_id_table);
+
+static struct tee_client_driver tee_bnxt_fw_driver = {
+ .id_table = tee_bnxt_fw_id_table,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .bus = &tee_bus_type,
+ .probe = tee_bnxt_fw_probe,
+ .remove = tee_bnxt_fw_remove,
+ },
+};
+
+static int __init tee_bnxt_fw_mod_init(void)
+{
+ return driver_register(&tee_bnxt_fw_driver.driver);
+}
+
+static void __exit tee_bnxt_fw_mod_exit(void)
+{
+ driver_unregister(&tee_bnxt_fw_driver.driver);
+}
+
+module_init(tee_bnxt_fw_mod_init);
+module_exit(tee_bnxt_fw_mod_exit);
+
+MODULE_AUTHOR("Vikas Gupta <vikas.gupta@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom bnxt firmware manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index b248870a9806..bcc378c19ebe 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -75,6 +75,27 @@ config EFI_MAX_FAKE_MEM
Ranges can be set up to this value using comma-separated list.
The default value is 8.
+config EFI_SOFT_RESERVE
+ bool "Reserve EFI Specific Purpose Memory"
+ depends on EFI && EFI_STUB && ACPI_HMAT
+ default ACPI_HMAT
+ help
+ On systems that have mixed performance classes of memory EFI
+ may indicate specific purpose memory with an attribute (See
+ EFI_MEMORY_SP in UEFI 2.8). A memory range tagged with this
+ attribute may have unique performance characteristics compared
+ to the system's general purpose "System RAM" pool. On the
+ expectation that such memory has application specific usage,
+ and its base EFI memory type is "conventional" answer Y to
+ arrange for the kernel to reserve it as a "Soft Reserved"
+ resource, and set aside for direct-access (device-dax) by
+ default. The memory range can later be optionally assigned to
+ the page allocator by system administrator policy via the
+ device-dax kmem facility. Say N to have the kernel treat this
+ memory as "System RAM" by default.
+
+ If unsure, say Y.
+
config EFI_PARAMS_FROM_FDT
bool
help
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 4ac2de4dfa72..554d795270d9 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -20,13 +20,16 @@ obj-$(CONFIG_UEFI_CPER) += cper.o
obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
obj-$(CONFIG_EFI_STUB) += libstub/
-obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
+fake_map-y += fake_mem.o
+fake_map-$(CONFIG_X86) += x86_fake_mem.o
+
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 0e206c9e0d7a..5ccf39986a14 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -53,7 +53,8 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
for (i = 0; i < dev_header->prop_count; i++) {
int remaining = dev_header->len - (ptr - (void *)dev_header);
- u32 key_len, val_len;
+ u32 key_len, val_len, entry_len;
+ const u8 *entry_data;
char *key;
if (sizeof(key_len) > remaining)
@@ -85,17 +86,14 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
ucs2_as_utf8(key, ptr + sizeof(key_len),
key_len - sizeof(key_len));
- entry[i].name = key;
- entry[i].length = val_len - sizeof(val_len);
- entry[i].is_array = !!entry[i].length;
- entry[i].type = DEV_PROP_U8;
- entry[i].pointer.u8_data = ptr + key_len + sizeof(val_len);
-
+ entry_data = ptr + key_len + sizeof(val_len);
+ entry_len = val_len - sizeof(val_len);
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
if (dump_properties) {
- dev_info(dev, "property: %s\n", entry[i].name);
+ dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
- 16, 1, entry[i].pointer.u8_data,
- entry[i].length, true);
+ 16, 1, entry_data, entry_len, true);
}
ptr += key_len + val_len;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 311cd349a862..904fa09e6a6b 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -164,6 +164,15 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
+ * Special purpose memory is 'soft reserved', which means it
+ * is set aside initially, but can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
* them as System RAM if they can be mapped writeback cacheable.
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index e2ac5fa5531b..899b803842bb 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -121,6 +121,30 @@ static int __init arm_enable_runtime_services(void)
return 0;
}
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return 0;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e98bbf8e56d9..d101f072c8f8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -81,6 +81,11 @@ bool efi_runtime_disabled(void)
return disable_runtime;
}
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
+}
+
static int __init parse_efi_cmdline(char *str)
{
if (!str) {
@@ -94,6 +99,9 @@ static int __init parse_efi_cmdline(char *str)
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
+ if (parse_option_str(str, "nosoftreserve"))
+ set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
+
return 0;
}
early_param("efi", parse_efi_cmdline);
@@ -296,7 +304,7 @@ static __init int efivar_ssdt_load(void)
goto free_data;
}
- ret = acpi_load_table(data);
+ ret = acpi_load_table(data, NULL);
if (ret) {
pr_err("failed to load table: %d\n", ret);
goto free_data;
@@ -842,15 +850,16 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
- EFI_MEMORY_NV |
+ EFI_MEMORY_NV | EFI_MEMORY_SP |
EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_SP ? "SP" : "",
attr & EFI_MEMORY_NV ? "NV" : "",
attr & EFI_MEMORY_XP ? "XP" : "",
attr & EFI_MEMORY_RP ? "RP" : "",
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index d6dd5f503fa2..2762e0662bf4 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -246,6 +246,9 @@ void __init efi_esrt_init(void)
int rc;
phys_addr_t end;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
pr_debug("esrt-init: loading.\n");
if (!esrt_table_exists())
return;
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 9501edc0fcfb..bb9fc70d0cfa 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -17,12 +17,10 @@
#include <linux/memblock.h>
#include <linux/types.h>
#include <linux/sort.h>
-#include <asm/efi.h>
+#include "fake_mem.h"
-#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
-
-static struct efi_mem_range fake_mems[EFI_MAX_FAKEMEM];
-static int nr_fake_mem;
+struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+int nr_fake_mem;
static int __init cmp_fake_mem(const void *x1, const void *x2)
{
@@ -44,13 +42,13 @@ void __init efi_fake_memmap(void)
void *new_memmap;
int i;
- if (!nr_fake_mem)
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
return;
/* count up the number of EFI memory descriptor */
for (i = 0; i < nr_fake_mem; i++) {
for_each_efi_memory_desc(md) {
- struct range *r = &fake_mems[i].range;
+ struct range *r = &efi_fake_mems[i].range;
new_nr_map += efi_memmap_split_count(md, r);
}
@@ -70,7 +68,7 @@ void __init efi_fake_memmap(void)
}
for (i = 0; i < nr_fake_mem; i++)
- efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
+ efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]);
/* swap into new EFI memmap */
early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
@@ -104,22 +102,22 @@ static int __init setup_fake_mem(char *p)
if (nr_fake_mem >= EFI_MAX_FAKEMEM)
break;
- fake_mems[nr_fake_mem].range.start = start;
- fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
- fake_mems[nr_fake_mem].attribute = attribute;
+ efi_fake_mems[nr_fake_mem].range.start = start;
+ efi_fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
+ efi_fake_mems[nr_fake_mem].attribute = attribute;
nr_fake_mem++;
if (*p == ',')
p++;
}
- sort(fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
+ sort(efi_fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
cmp_fake_mem, NULL);
for (i = 0; i < nr_fake_mem; i++)
pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
- fake_mems[i].attribute, fake_mems[i].range.start,
- fake_mems[i].range.end);
+ efi_fake_mems[i].attribute, efi_fake_mems[i].range.start,
+ efi_fake_mems[i].range.end);
return *p == '\0' ? 0 : -EINVAL;
}
diff --git a/drivers/firmware/efi/fake_mem.h b/drivers/firmware/efi/fake_mem.h
new file mode 100644
index 000000000000..d52791af4b18
--- /dev/null
+++ b/drivers/firmware/efi/fake_mem.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __EFI_FAKE_MEM_H__
+#define __EFI_FAKE_MEM_H__
+#include <asm/efi.h>
+
+#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
+
+extern struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+extern int nr_fake_mem;
+#endif /* __EFI_FAKE_MEM_H__ */
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ee0661ddb25b..c35f893897e1 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -38,7 +38,8 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
+ random.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
@@ -47,7 +48,7 @@ arm-deps-$(CONFIG_ARM64) += sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
+lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
$(patsubst %.c,lib-%.o,$(arm-deps-y))
lib-$(CONFIG_ARM) += arm32-stub.o
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index c382a48c6678..817237ce2420 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -189,6 +189,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail_free_cmdline;
}
+ efi_retrieve_tpm2_eventlog(sys_table);
+
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation(sys_table);
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 41213bf5fcf5..4566640de650 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -146,6 +146,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
continue;
case EFI_CONVENTIONAL_MEMORY:
+ /* Skip soft reserved conventional memory */
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
/*
* Reserve the intersection between this entry and the
* region.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 35dbc2791c97..e02579907f2e 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,6 +32,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
static int __section(.data) __nokaslr;
static int __section(.data) __quiet;
static int __section(.data) __novamap;
+static bool __section(.data) efi_nosoftreserve;
int __pure nokaslr(void)
{
@@ -45,6 +46,10 @@ int __pure novamap(void)
{
return __novamap;
}
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_nosoftreserve;
+}
#define EFI_MMAP_NR_SLACK_SLOTS 8
@@ -211,6 +216,10 @@ again:
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
if (desc->num_pages < nr_pages)
continue;
@@ -305,6 +314,10 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
if (desc->num_pages < nr_pages)
continue;
@@ -484,6 +497,12 @@ efi_status_t efi_parse_options(char const *cmdline)
__novamap = 1;
}
+ if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
+ !strncmp(str, "nosoftreserve", 7)) {
+ str += strlen("nosoftreserve");
+ efi_nosoftreserve = 1;
+ }
+
/* Group words together, delimited by "," */
while (*str && *str != ' ' && *str != ',')
str++;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 7f1556fd867d..05739ae013c8 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -63,8 +63,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
-efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
-
void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
/* Helper macros for the usual case of using simple C variables: */
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index b4b1d1dcb5fd..35edd7cfb6a1 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -9,6 +9,18 @@
#include "efistub.h"
+typedef struct efi_rng_protocol efi_rng_protocol_t;
+
+typedef struct {
+ u32 get_info;
+ u32 get_rng;
+} efi_rng_protocol_32_t;
+
+typedef struct {
+ u64 get_info;
+ u64 get_rng;
+} efi_rng_protocol_64_t;
+
struct efi_rng_protocol {
efi_status_t (*get_info)(struct efi_rng_protocol *,
unsigned long *, efi_guid_t *);
@@ -28,7 +40,7 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
if (status != EFI_SUCCESS)
return status;
- return rng->get_rng(rng, NULL, size, out);
+ return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out);
}
/*
@@ -46,6 +58,10 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return 0;
+
region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
first_slot = round_up(md->phys_addr, align);
@@ -161,15 +177,16 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
if (status != EFI_SUCCESS)
return status;
- status = rng->get_rng(rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE,
- seed->bits);
+ status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
+
if (status == EFI_UNSUPPORTED)
/*
* Use whatever algorithm we have available if the raw algorithm
* is not implemented.
*/
- status = rng->get_rng(rng, NULL, EFI_RANDOM_SEED_SIZE,
- seed->bits);
+ status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
if (status != EFI_SUCCESS)
goto err_freepool;
diff --git a/drivers/firmware/efi/x86_fake_mem.c b/drivers/firmware/efi/x86_fake_mem.c
new file mode 100644
index 000000000000..e5d6d5a1b240
--- /dev/null
+++ b/drivers/firmware/efi/x86_fake_mem.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights reserved. */
+#include <linux/efi.h>
+#include <asm/e820/api.h>
+#include "fake_mem.h"
+
+void __init efi_fake_memmap_early(void)
+{
+ int i;
+
+ /*
+ * The late efi_fake_mem() call can handle all requests if
+ * EFI_MEMORY_SP support is disabled.
+ */
+ if (!efi_soft_reserve_enabled())
+ return;
+
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ /*
+ * Given that efi_fake_memmap() needs to perform memblock
+ * allocations it needs to run after e820__memblock_setup().
+ * However, if efi_fake_mem specifies EFI_MEMORY_SP for a given
+ * address range that potentially needs to mark the memory as
+ * reserved prior to e820__memblock_setup(). Update e820
+ * directly if EFI_MEMORY_SP is specified for an
+ * EFI_CONVENTIONAL_MEMORY descriptor.
+ */
+ for (i = 0; i < nr_fake_mem; i++) {
+ struct efi_mem_range *mem = &efi_fake_mems[i];
+ efi_memory_desc_t *md;
+ u64 m_start, m_end;
+
+ if ((mem->attribute & EFI_MEMORY_SP) == 0)
+ continue;
+
+ m_start = mem->range.start;
+ m_end = mem->range.end;
+ for_each_efi_memory_desc(md) {
+ u64 start, end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ start = md->phys_addr;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ if (m_start <= end && m_end >= start)
+ /* fake range overlaps descriptor */;
+ else
+ continue;
+
+ /*
+ * Trim the boundary of the e820 update to the
+ * descriptor in case the fake range overlaps
+ * !EFI_CONVENTIONAL_MEMORY
+ */
+ start = max(start, m_start);
+ end = min(end, m_end);
+
+ if (end <= start)
+ continue;
+ e820__range_update(start, end - start + 1, E820_TYPE_RAM,
+ E820_TYPE_SOFT_RESERVED);
+ e820__update_table(e820_table);
+ }
+ }
+}
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 84f4ff351c62..b3b6c15e7b36 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -53,10 +53,18 @@ bool psci_tos_resident_on(int cpu)
}
struct psci_operations psci_ops = {
- .conduit = PSCI_CONDUIT_NONE,
+ .conduit = SMCCC_CONDUIT_NONE,
.smccc_version = SMCCC_VERSION_1_0,
};
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
+{
+ if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
+ return SMCCC_CONDUIT_NONE;
+
+ return psci_ops.conduit;
+}
+
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
static psci_fn *invoke_psci_fn;
@@ -212,13 +220,13 @@ static unsigned long psci_migrate_info_up_cpu(void)
0, 0, 0);
}
-static void set_conduit(enum psci_conduit conduit)
+static void set_conduit(enum arm_smccc_conduit conduit)
{
switch (conduit) {
- case PSCI_CONDUIT_HVC:
+ case SMCCC_CONDUIT_HVC:
invoke_psci_fn = __invoke_psci_fn_hvc;
break;
- case PSCI_CONDUIT_SMC:
+ case SMCCC_CONDUIT_SMC:
invoke_psci_fn = __invoke_psci_fn_smc;
break;
default:
@@ -240,9 +248,9 @@ static int get_set_conduit_method(struct device_node *np)
}
if (!strcmp("hvc", method)) {
- set_conduit(PSCI_CONDUIT_HVC);
+ set_conduit(SMCCC_CONDUIT_HVC);
} else if (!strcmp("smc", method)) {
- set_conduit(PSCI_CONDUIT_SMC);
+ set_conduit(SMCCC_CONDUIT_SMC);
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
@@ -583,9 +591,9 @@ int __init psci_acpi_init(void)
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
- set_conduit(PSCI_CONDUIT_HVC);
+ set_conduit(SMCCC_CONDUIT_HVC);
else
- set_conduit(PSCI_CONDUIT_SMC);
+ set_conduit(SMCCC_CONDUIT_SMC);
return psci_probe();
}
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index bb008c019920..f8533338b018 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -20,7 +20,6 @@
#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
-#define RSU_FW_VERSION_MASK GENMASK_ULL(15, 0)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
@@ -109,9 +108,12 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status != BIT(SVC_STATUS_RSU_OK))
- dev_err(client->dev, "RSU returned status is %i\n",
- data->status);
+ if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support notify\n");
+ else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ dev_err(client->dev, "Failure, returned status is %lu\n",
+ BIT(data->status));
+
complete(&priv->completion);
}
@@ -133,9 +135,11 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_RSU_OK))
priv->retry_counter = *counter;
+ else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
- dev_err(client->dev, "Failed to get retry counter %i\n",
- data->status);
+ dev_err(client->dev, "Failed to get retry counter %lu\n",
+ BIT(data->status));
complete(&priv->completion);
}
@@ -333,15 +337,10 @@ static ssize_t notify_store(struct device *dev,
return ret;
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
- 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- return ret;
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ return ret;
}
return count;
@@ -413,15 +412,10 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
- rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
}
return ret;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index b485321189e1..c6c31402848d 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -493,8 +493,24 @@ static int svc_normal_to_secure_thread(void *data)
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
break;
default:
- pr_warn("it shouldn't happen\n");
+ pr_warn("Secure firmware doesn't support...\n");
+
+ /*
+ * be compatible with older version firmware which
+ * doesn't support RSU notify or retry
+ */
+ if ((pdata->command == COMMAND_RSU_RETRY) ||
+ (pdata->command == COMMAND_RSU_NOTIFY)) {
+ cbdata->status =
+ BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(
+ pdata->chan->scl, cbdata);
+ }
break;
+
}
};
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 73c779e920ed..72380e1d31c7 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL
+ depends on FPGA_DFL && HWMON
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 4d78e182878f..7c930e6b314d 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -14,6 +14,8 @@
* Henry Mitchel <henry.mitchel@intel.com>
*/
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -181,6 +183,381 @@ static const struct dfl_feature_ops fme_hdr_ops = {
.ioctl = fme_hdr_ioctl,
};
+#define FME_THERM_THRESHOLD 0x8
+#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
+#define TEMP_THRESHOLD1_EN BIT_ULL(7)
+#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
+#define TEMP_THRESHOLD2_EN BIT_ULL(15)
+#define TRIP_THRESHOLD GENMASK_ULL(30, 24)
+#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
+#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
+/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
+#define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
+
+#define FME_THERM_RDSENSOR_FMT1 0x10
+#define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
+
+#define FME_THERM_CAP 0x20
+#define THERM_NO_THROTTLE BIT_ULL(0)
+
+#define MD_PRE_DEG
+
+static bool fme_thermal_throttle_support(void __iomem *base)
+{
+ u64 v = readq(base + FME_THERM_CAP);
+
+ return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
+}
+
+static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct dfl_feature *feature = drvdata;
+
+ /* temperature is always supported, and check hardware cap for others */
+ if (attr == hwmon_temp_input)
+ return 0444;
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
+}
+
+static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
+ *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
+ break;
+ case hwmon_temp_max:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
+ break;
+ case hwmon_temp_crit:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
+ break;
+ case hwmon_temp_emergency:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
+ break;
+ case hwmon_temp_max_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_temp_crit_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops thermal_hwmon_ops = {
+ .is_visible = thermal_hwmon_attrs_visible,
+ .read = thermal_hwmon_read,
+};
+
+static const struct hwmon_channel_info *thermal_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info thermal_hwmon_chip_info = {
+ .ops = &thermal_hwmon_ops,
+ .info = thermal_hwmon_info,
+};
+
+static ssize_t temp1_max_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
+}
+
+static DEVICE_ATTR_RO(temp1_max_policy);
+
+static struct attribute *thermal_extra_attrs[] = {
+ &dev_attr_temp1_max_policy.attr,
+ NULL,
+};
+
+static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
+}
+
+static const struct attribute_group thermal_extra_group = {
+ .attrs = thermal_extra_attrs,
+ .is_visible = thermal_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(thermal_extra);
+
+static int fme_thermal_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ /*
+ * create hwmon to allow userspace monitoring temperature and other
+ * threshold information.
+ *
+ * temp1_input -> FPGA device temperature
+ * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
+ * temp1_crit -> hardware threshold 2 -> 100% throttling
+ * temp1_emergency -> hardware trip_threshold to shutdown FPGA
+ * temp1_max_alarm -> hardware threshold 1 alarm
+ * temp1_crit_alarm -> hardware threshold 2 alarm
+ *
+ * create device specific sysfs interfaces, e.g. read temp1_max_policy
+ * to understand the actual hardware throttling action (50% vs 90%).
+ *
+ * If hardware doesn't support automatic throttling per thresholds,
+ * then all above sysfs interfaces are not visible except temp1_input
+ * for temperature.
+ */
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_thermal", feature,
+ &thermal_hwmon_chip_info,
+ thermal_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_THERMAL_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+};
+
+#define FME_PWR_STATUS 0x8
+#define FME_LATENCY_TOLERANCE BIT_ULL(18)
+#define PWR_CONSUMED GENMASK_ULL(17, 0)
+
+#define FME_PWR_THRESHOLD 0x10
+#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
+#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
+#define PWR_THRESHOLD_MAX 0x7f /* in Watts */
+#define PWR_THRESHOLD1_STATUS BIT_ULL(16)
+#define PWR_THRESHOLD2_STATUS BIT_ULL(17)
+
+#define FME_PWR_XEON_LIMIT 0x18
+#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define XEON_PWR_EN BIT_ULL(15)
+#define FME_PWR_FPGA_LIMIT 0x20
+#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define FPGA_PWR_EN BIT_ULL(15)
+
+static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_power_input:
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+ *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
+ break;
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
+ break;
+ case hwmon_power_max_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_power_crit_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ int ret = 0;
+ u64 v;
+
+ val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
+
+ mutex_lock(&pdata->lock);
+
+ switch (attr) {
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD1;
+ v |= FIELD_PREP(PWR_THRESHOLD1, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD2;
+ v |= FIELD_PREP(PWR_THRESHOLD2, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static umode_t power_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max_alarm:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_crit:
+ return 0644;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops power_hwmon_ops = {
+ .is_visible = power_hwmon_attrs_visible,
+ .read = power_hwmon_read,
+ .write = power_hwmon_write,
+};
+
+static const struct hwmon_channel_info *power_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MAX_ALARM |
+ HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info power_hwmon_chip_info = {
+ .ops = &power_hwmon_ops,
+ .info = power_hwmon_info,
+};
+
+static ssize_t power1_xeon_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 xeon_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
+
+ if (FIELD_GET(XEON_PWR_EN, v))
+ xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", xeon_limit * 100000);
+}
+
+static ssize_t power1_fpga_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 fpga_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
+
+ if (FIELD_GET(FPGA_PWR_EN, v))
+ fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", fpga_limit * 100000);
+}
+
+static ssize_t power1_ltr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
+}
+
+static DEVICE_ATTR_RO(power1_xeon_limit);
+static DEVICE_ATTR_RO(power1_fpga_limit);
+static DEVICE_ATTR_RO(power1_ltr);
+
+static struct attribute *power_extra_attrs[] = {
+ &dev_attr_power1_xeon_limit.attr,
+ &dev_attr_power1_fpga_limit.attr,
+ &dev_attr_power1_ltr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(power_extra);
+
+static int fme_power_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_power", feature,
+ &power_hwmon_chip_info,
+ power_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register power hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_POWER_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+};
+
static struct dfl_feature_driver fme_feature_drvs[] = {
{
.id_table = fme_hdr_id_table,
@@ -195,6 +572,14 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_global_err_ops,
},
{
+ .id_table = fme_thermal_mgmt_id_table,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .id_table = fme_power_mgmt_id_table,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
.ops = NULL,
},
};
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 31ef38e38537..ee7765049607 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -578,10 +578,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
init_completion(&priv->dma_done);
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(dev, "No IRQ available\n");
+ if (priv->irq < 0)
return priv->irq;
- }
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index c612db7a914a..92ce6d85802c 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -53,6 +53,14 @@ config FSI_MASTER_AST_CF
lines driven by the internal ColdFire coprocessor. This requires
the corresponding machine specific ColdFire firmware to be available.
+config FSI_MASTER_ASPEED
+ tristate "FSI ASPEED master"
+ help
+ This option enables a FSI master that is present behind an OPB bridge
+ in the AST2600.
+
+ Enable it for your BMC kernel in an OpenPower or IBM Power system.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
---help---
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index e4a2ff043c32..da218a1ad8e1 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
+obj-$(CONFIG_FSI_MASTER_ASPEED) += fsi-master-aspeed.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 1f76740f33b6..8244da8a7241 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -544,6 +544,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
return 0;
}
+static unsigned long aligned_access_size(size_t offset, size_t count)
+{
+ unsigned long offset_unit, count_unit;
+
+ /* Criteria:
+ *
+ * 1. Access size must be less than or equal to the maximum access
+ * width or the highest power-of-two factor of offset
+ * 2. Access size must be less than or equal to the amount specified by
+ * count
+ *
+ * The access width is optimal if we can calculate 1 to be strictly
+ * equal while still satisfying 2.
+ */
+
+ /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
+ offset_unit = BIT(__builtin_ctzl(offset | 4));
+
+ /* Find 2 by the top bit of count */
+ count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
+
+ /* Constrain the maximum access width to the minimum of both criteria */
+ return BIT(__builtin_ctzl(offset_unit | count_unit));
+}
+
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -559,8 +584,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += read_len) {
- read_len = min_t(size_t, count, 4);
- read_len -= off & 0x3;
+ read_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_read(slave, off, buf + total_len, read_len);
if (rc)
@@ -587,8 +611,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += write_len) {
- write_len = min_t(size_t, count, 4);
- write_len -= off & 0x3;
+ write_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_write(slave, off, buf + total_len, write_len);
if (rc)
@@ -1241,6 +1264,19 @@ static ssize_t master_break_store(struct device *dev,
static DEVICE_ATTR(break, 0200, NULL, master_break_store);
+static struct attribute *master_attrs[] = {
+ &dev_attr_break.attr,
+ &dev_attr_rescan.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(master);
+
+static struct class fsi_master_class = {
+ .name = "fsi-master",
+ .dev_groups = master_groups,
+};
+
int fsi_master_register(struct fsi_master *master)
{
int rc;
@@ -1249,6 +1285,7 @@ int fsi_master_register(struct fsi_master *master)
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
dev_set_name(&master->dev, "fsi%d", master->idx);
+ master->dev.class = &fsi_master_class;
rc = device_register(&master->dev);
if (rc) {
@@ -1256,20 +1293,6 @@ int fsi_master_register(struct fsi_master *master)
return rc;
}
- rc = device_create_file(&master->dev, &dev_attr_rescan);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
- rc = device_create_file(&master->dev, &dev_attr_break);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
np = dev_of_node(&master->dev);
if (!of_property_read_bool(np, "no-scan-on-init")) {
mutex_lock(&master->scan_lock);
@@ -1350,8 +1373,15 @@ static int __init fsi_init(void)
rc = bus_register(&fsi_bus_type);
if (rc)
goto fail_bus;
+
+ rc = class_register(&fsi_master_class);
+ if (rc)
+ goto fail_class;
+
return 0;
+ fail_class:
+ bus_unregister(&fsi_bus_type);
fail_bus:
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
return rc;
@@ -1360,6 +1390,7 @@ postcore_initcall(fsi_init);
static void fsi_exit(void)
{
+ class_unregister(&fsi_master_class);
bus_unregister(&fsi_bus_type);
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
ida_destroy(&fsi_minor_ida);
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
new file mode 100644
index 000000000000..f49742b310c2
--- /dev/null
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (C) IBM Corporation 2018
+// FSI master driver for AST2600
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fsi.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+
+#include "fsi-master.h"
+
+struct fsi_master_aspeed {
+ struct fsi_master master;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+#define to_fsi_master_aspeed(m) \
+ container_of(m, struct fsi_master_aspeed, master)
+
+/* Control register (size 0x400) */
+static const u32 ctrl_base = 0x80000000;
+
+static const u32 fsi_base = 0xa0000000;
+
+#define OPB_FSI_VER 0x00
+#define OPB_TRIGGER 0x04
+#define OPB_CTRL_BASE 0x08
+#define OPB_FSI_BASE 0x0c
+#define OPB_CLK_SYNC 0x3c
+#define OPB_IRQ_CLEAR 0x40
+#define OPB_IRQ_MASK 0x44
+#define OPB_IRQ_STATUS 0x48
+
+#define OPB0_SELECT 0x10
+#define OPB0_RW 0x14
+#define OPB0_XFER_SIZE 0x18
+#define OPB0_FSI_ADDR 0x1c
+#define OPB0_FSI_DATA_W 0x20
+#define OPB0_STATUS 0x80
+#define OPB0_FSI_DATA_R 0x84
+
+#define OPB0_WRITE_ORDER1 0x4c
+#define OPB0_WRITE_ORDER2 0x50
+#define OPB1_WRITE_ORDER1 0x54
+#define OPB1_WRITE_ORDER2 0x58
+#define OPB0_READ_ORDER1 0x5c
+#define OPB1_READ_ORDER2 0x60
+
+#define OPB_RETRY_COUNTER 0x64
+
+/* OPBn_STATUS */
+#define STATUS_HALFWORD_ACK BIT(0)
+#define STATUS_FULLWORD_ACK BIT(1)
+#define STATUS_ERR_ACK BIT(2)
+#define STATUS_RETRY BIT(3)
+#define STATUS_TIMEOUT BIT(4)
+
+/* OPB_IRQ_MASK */
+#define OPB1_XFER_ACK_EN BIT(17)
+#define OPB0_XFER_ACK_EN BIT(16)
+
+/* OPB_RW */
+#define CMD_READ BIT(0)
+#define CMD_WRITE 0
+
+/* OPBx_XFER_SIZE */
+#define XFER_FULLWORD (BIT(1) | BIT(0))
+#define XFER_HALFWORD (BIT(0))
+#define XFER_BYTE (0)
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_aspeed.h>
+
+#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
+
+#define DEFAULT_DIVISOR 14
+#define OPB_POLL_TIMEOUT 10000
+
+static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
+ u32 val, u32 transfer_size)
+{
+ void __iomem *base = aspeed->base;
+ u32 reg, status;
+ int ret;
+
+ writel(CMD_WRITE, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(val, base + OPB0_FSI_DATA_W);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ trace_fsi_master_aspeed_opb_write(addr, val, transfer_size, status, reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ return 0;
+}
+
+static int opb_writeb(struct fsi_master_aspeed *aspeed, u32 addr, u8 val)
+{
+ return __opb_write(aspeed, addr, val, XFER_BYTE);
+}
+
+static int opb_writew(struct fsi_master_aspeed *aspeed, u32 addr, __be16 val)
+{
+ return __opb_write(aspeed, addr, (__force u16)val, XFER_HALFWORD);
+}
+
+static int opb_writel(struct fsi_master_aspeed *aspeed, u32 addr, __be32 val)
+{
+ return __opb_write(aspeed, addr, (__force u32)val, XFER_FULLWORD);
+}
+
+static int __opb_read(struct fsi_master_aspeed *aspeed, uint32_t addr,
+ u32 transfer_size, void *out)
+{
+ void __iomem *base = aspeed->base;
+ u32 result, reg;
+ int status, ret;
+
+ writel(CMD_READ, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ result = readl(base + OPB0_FSI_DATA_R);
+
+ trace_fsi_master_aspeed_opb_read(addr, transfer_size, result,
+ readl(base + OPB0_STATUS),
+ reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ if (out) {
+ switch (transfer_size) {
+ case XFER_BYTE:
+ *(u8 *)out = result;
+ break;
+ case XFER_HALFWORD:
+ *(u16 *)out = result;
+ break;
+ case XFER_FULLWORD:
+ *(u32 *)out = result;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+static int opb_readl(struct fsi_master_aspeed *aspeed, uint32_t addr, __be32 *out)
+{
+ return __opb_read(aspeed, addr, XFER_FULLWORD, out);
+}
+
+static int opb_readw(struct fsi_master_aspeed *aspeed, uint32_t addr, __be16 *out)
+{
+ return __opb_read(aspeed, addr, XFER_HALFWORD, (void *)out);
+}
+
+static int opb_readb(struct fsi_master_aspeed *aspeed, uint32_t addr, u8 *out)
+{
+ return __opb_read(aspeed, addr, XFER_BYTE, (void *)out);
+}
+
+static int check_errors(struct fsi_master_aspeed *aspeed, int err)
+{
+ int ret;
+
+ if (trace_fsi_master_aspeed_opb_error_enabled()) {
+ __be32 mresp0, mstap0, mesrb0;
+
+ opb_readl(aspeed, ctrl_base + FSI_MRESP0, &mresp0);
+ opb_readl(aspeed, ctrl_base + FSI_MSTAP0, &mstap0);
+ opb_readl(aspeed, ctrl_base + FSI_MESRB0, &mesrb0);
+
+ trace_fsi_master_aspeed_opb_error(
+ be32_to_cpu(mresp0),
+ be32_to_cpu(mstap0),
+ be32_to_cpu(mesrb0));
+ }
+
+ if (err == -EIO) {
+ /* Check MAEB (0x70) ? */
+
+ /* Then clear errors in master */
+ ret = opb_writel(aspeed, ctrl_base + FSI_MRESP0,
+ cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
+ if (ret) {
+ /* TODO: log? return different code? */
+ return ret;
+ }
+ /* TODO: confirm that 0x70 was okay */
+ }
+
+ /* This will pass through timeout errors */
+ return err;
+}
+
+static int aspeed_master_read(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_readb(aspeed, fsi_base + addr, val);
+ break;
+ case 2:
+ ret = opb_readw(aspeed, fsi_base + addr, val);
+ break;
+ case 4:
+ ret = opb_readl(aspeed, fsi_base + addr, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_write(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val);
+ break;
+ case 2:
+ ret = opb_writew(aspeed, fsi_base + addr, *(__be16 *)val);
+ break;
+ case 4:
+ ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_link_enable(struct fsi_master *master, int link)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int idx, bit, ret;
+ __be32 reg, result;
+
+ idx = link / 32;
+ bit = link % 32;
+
+ reg = cpu_to_be32(0x80000000 >> bit);
+
+ ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
+ if (ret)
+ return ret;
+
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ ret = opb_readl(aspeed, ctrl_base + FSI_MENP0 + (4 * idx), &result);
+ if (ret)
+ return ret;
+
+ if (result != reg) {
+ dev_err(aspeed->dev, "%s failed: %08x\n", __func__, result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x4;
+ cmd = cpu_to_be32(0xecc00000);
+
+ return aspeed_master_write(master, link, id, addr, &cmd, 4);
+}
+
+static int aspeed_master_break(struct fsi_master *master, int link)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x0;
+ cmd = cpu_to_be32(0xc0de0000);
+
+ return aspeed_master_write(master, link, 0, addr, &cmd, 4);
+}
+
+static void aspeed_master_release(struct device *dev)
+{
+ struct fsi_master_aspeed *aspeed =
+ to_fsi_master_aspeed(dev_to_fsi_master(dev));
+
+ kfree(aspeed);
+}
+
+/* mmode encoders */
+static inline u32 fsi_mmode_crs0(u32 x)
+{
+ return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
+}
+
+static inline u32 fsi_mmode_crs1(u32 x)
+{
+ return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
+}
+
+static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
+{
+ __be32 reg;
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ /* Initialize the MFSI (hub master) engine */
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
+ opb_writel(aspeed, ctrl_base + FSI_MECTRL, reg);
+
+ reg = cpu_to_be32(FSI_MMODE_ECRC | FSI_MMODE_EPC | FSI_MMODE_RELA
+ | fsi_mmode_crs0(DEFAULT_DIVISOR)
+ | fsi_mmode_crs1(DEFAULT_DIVISOR)
+ | FSI_MMODE_P8_TO_LSB);
+ opb_writel(aspeed, ctrl_base + FSI_MMODE, reg);
+
+ reg = cpu_to_be32(0xffff0000);
+ opb_writel(aspeed, ctrl_base + FSI_MDLYR, reg);
+
+ reg = cpu_to_be32(~0);
+ opb_writel(aspeed, ctrl_base + FSI_MSENP0, reg);
+
+ /* Leave enabled long enough for master logic to set up */
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ opb_writel(aspeed, ctrl_base + FSI_MCENP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MAEB, NULL);
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MLEVP0, NULL);
+
+ /* Reset the master bridge */
+ reg = cpu_to_be32(FSI_MRESB_RST_GEN);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ reg = cpu_to_be32(FSI_MRESB_RST_ERR);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ return 0;
+}
+
+static int fsi_master_aspeed_probe(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed;
+ struct resource *res;
+ int rc, links, reg;
+ __be32 raw;
+
+ aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
+ if (!aspeed)
+ return -ENOMEM;
+
+ aspeed->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aspeed->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aspeed->base))
+ return PTR_ERR(aspeed->base);
+
+ aspeed->clk = devm_clk_get(aspeed->dev, NULL);
+ if (IS_ERR(aspeed->clk)) {
+ dev_err(aspeed->dev, "couldn't get clock\n");
+ return PTR_ERR(aspeed->clk);
+ }
+ rc = clk_prepare_enable(aspeed->clk);
+ if (rc) {
+ dev_err(aspeed->dev, "couldn't enable clock\n");
+ return rc;
+ }
+
+ writel(0x1, aspeed->base + OPB_CLK_SYNC);
+ writel(OPB1_XFER_ACK_EN | OPB0_XFER_ACK_EN,
+ aspeed->base + OPB_IRQ_MASK);
+
+ /* TODO: determine an appropriate value */
+ writel(0x10, aspeed->base + OPB_RETRY_COUNTER);
+
+ writel(ctrl_base, aspeed->base + OPB_CTRL_BASE);
+ writel(fsi_base, aspeed->base + OPB_FSI_BASE);
+
+ /* Set read data order */
+ writel(0x00030b1b, aspeed->base + OPB0_READ_ORDER1);
+
+ /* Set write data order */
+ writel(0x0011101b, aspeed->base + OPB0_WRITE_ORDER1);
+ writel(0x0c330f3f, aspeed->base + OPB0_WRITE_ORDER2);
+
+ /*
+ * Select OPB0 for all operations.
+ * Will need to be reworked when enabling DMA or anything that uses
+ * OPB1.
+ */
+ writel(0x1, aspeed->base + OPB0_SELECT);
+
+ rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to read hub version\n");
+ return rc;
+ }
+
+ reg = be32_to_cpu(raw);
+ links = (reg >> 8) & 0xff;
+ dev_info(&pdev->dev, "hub version %08x (%d links)\n", reg, links);
+
+ aspeed->master.dev.parent = &pdev->dev;
+ aspeed->master.dev.release = aspeed_master_release;
+ aspeed->master.dev.of_node = of_node_get(dev_of_node(&pdev->dev));
+
+ aspeed->master.n_links = links;
+ aspeed->master.read = aspeed_master_read;
+ aspeed->master.write = aspeed_master_write;
+ aspeed->master.send_break = aspeed_master_break;
+ aspeed->master.term = aspeed_master_term;
+ aspeed->master.link_enable = aspeed_master_link_enable;
+
+ dev_set_drvdata(&pdev->dev, aspeed);
+
+ aspeed_master_init(aspeed);
+
+ rc = fsi_master_register(&aspeed->master);
+ if (rc)
+ goto err_release;
+
+ /* At this point, fsi_master_register performs the device_initialize(),
+ * and holds the sole reference on master.dev. This means the device
+ * will be freed (via ->release) during any subsequent call to
+ * fsi_master_unregister. We add our own reference to it here, so we
+ * can perform cleanup (in _remove()) without it being freed before
+ * we're ready.
+ */
+ get_device(&aspeed->master.dev);
+ return 0;
+
+err_release:
+ clk_disable_unprepare(aspeed->clk);
+ return rc;
+}
+
+static int fsi_master_aspeed_remove(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed = platform_get_drvdata(pdev);
+
+ fsi_master_unregister(&aspeed->master);
+ clk_disable_unprepare(aspeed->clk);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_aspeed_match[] = {
+ { .compatible = "aspeed,ast2600-fsi-master" },
+ { },
+};
+
+static struct platform_driver fsi_master_aspeed_driver = {
+ .driver = {
+ .name = "fsi-master-aspeed",
+ .of_match_table = fsi_master_aspeed_match,
+ },
+ .probe = fsi_master_aspeed_probe,
+ .remove = fsi_master_aspeed_remove,
+};
+
+module_platform_driver(fsi_master_aspeed_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index f158b1a88286..def35cf92571 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -13,53 +13,7 @@
#include "fsi-master.h"
-/* Control Registers */
-#define FSI_MMODE 0x0 /* R/W: mode */
-#define FSI_MDLYR 0x4 /* R/W: delay */
-#define FSI_MCRSP 0x8 /* R/W: clock rate */
-#define FSI_MENP0 0x10 /* R/W: enable */
-#define FSI_MLEVP0 0x18 /* R: plug detect */
-#define FSI_MSENP0 0x18 /* S: Set enable */
-#define FSI_MCENP0 0x20 /* C: Clear enable */
-#define FSI_MAEB 0x70 /* R: Error address */
-#define FSI_MVER 0x74 /* R: master version/type */
-#define FSI_MRESP0 0xd0 /* W: Port reset */
-#define FSI_MESRB0 0x1d0 /* R: Master error status */
-#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
-#define FSI_MECTRL 0x2e0 /* W: Error control */
-
-/* MMODE: Mode control */
-#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
-#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
-#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
-#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
- /* MSB=1, LSB=0 is 0.8 ms */
- /* MSB=0, LSB=1 is 0.9 ms */
-#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
-#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
-#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
-#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
-
-/* MRESB: Reset brindge */
-#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
-#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
-
-/* MRESB: Reset port */
-#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
-#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
-#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
-#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
-#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
-
-/* MECTRL: Error control */
-#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
- /* master 0 in error */
-#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
-
#define FSI_ENGID_HUB_MASTER 0x1c
-#define FSI_HUB_LINK_OFFSET 0x80000
-#define FSI_HUB_LINK_SIZE 0x80000
-#define FSI_HUB_MASTER_MAX_LINKS 8
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index c7174237e864..6e8d4d4d5149 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -12,6 +12,71 @@
#include <linux/device.h>
#include <linux/mutex.h>
+/*
+ * Master registers
+ *
+ * These are used by hardware masters, such as the one in the FSP2, AST2600 and
+ * the hub master in POWER processors.
+ */
+
+/* Control Registers */
+#define FSI_MMODE 0x0 /* R/W: mode */
+#define FSI_MDLYR 0x4 /* R/W: delay */
+#define FSI_MCRSP 0x8 /* R/W: clock rate */
+#define FSI_MENP0 0x10 /* R/W: enable */
+#define FSI_MLEVP0 0x18 /* R: plug detect */
+#define FSI_MSENP0 0x18 /* S: Set enable */
+#define FSI_MCENP0 0x20 /* C: Clear enable */
+#define FSI_MAEB 0x70 /* R: Error address */
+#define FSI_MVER 0x74 /* R: master version/type */
+#define FSI_MSTAP0 0xd0 /* R: Port status */
+#define FSI_MRESP0 0xd0 /* W: Port reset */
+#define FSI_MESRB0 0x1d0 /* R: Master error status */
+#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
+#define FSI_MSCSB0 0x1d4 /* R: Master sub command stack */
+#define FSI_MATRB0 0x1d8 /* R: Master address trace */
+#define FSI_MDTRB0 0x1dc /* R: Master data trace */
+#define FSI_MECTRL 0x2e0 /* W: Error control */
+
+/* MMODE: Mode control */
+#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
+#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
+#define FSI_MMODE_RELA 0x20000000 /* Enable relative address commands */
+#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
+#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
+ /* MSB=1, LSB=0 is 0.8 ms */
+ /* MSB=0, LSB=1 is 0.9 ms */
+#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
+#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
+#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
+#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
+
+/* MRESB: Reset brindge */
+#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
+#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
+
+/* MRESP: Reset port */
+#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
+#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
+#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
+#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
+#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
+
+/* MECTRL: Error control */
+#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
+ /* master 0 in error */
+#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
+
+#define FSI_HUB_LINK_OFFSET 0x80000
+#define FSI_HUB_LINK_SIZE 0x80000
+#define FSI_HUB_MASTER_MAX_LINKS 8
+
+/*
+ * Protocol definitions
+ *
+ * These are used by low level masters that bit-bang out the protocol
+ */
+
/* Various protocol delays */
#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
#define FSI_SEND_DELAY_CLOCKS 16 /* Number clocks for send delay */
@@ -47,6 +112,12 @@
/* fsi-master definition and flags */
#define FSI_MASTER_FLAG_SWCLOCK 0x1
+/*
+ * Structures and function prototypes
+ *
+ * These are common to all masters
+ */
+
struct fsi_master {
struct device dev;
int idx;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 38e096e6925f..92d0ff63b3ea 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -298,7 +298,7 @@ config GPIO_IXP4XX
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
- depends on CPU_LOONGSON2 || CPU_LOONGSON3
+ depends on CPU_LOONGSON2EF || CPU_LOONGSON64
help
driver for GPIO functionality on Loongson-2F/3A/3B processors.
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index 0c1ead12d883..4ba4d4a67881 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -25,13 +25,13 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
case 0:
val = BD70528_DEBOUNCE_DISABLE;
break;
- case 1 ... 15:
+ case 1 ... 15000:
val = BD70528_DEBOUNCE_15MS;
break;
- case 16 ... 30:
+ case 15001 ... 30000:
val = BD70528_DEBOUNCE_30MS;
break;
- case 31 ... 50:
+ case 30001 ... 50000:
val = BD70528_DEBOUNCE_50MS;
break;
default:
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index 00943170ce36..a42145873cc9 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -22,7 +22,7 @@
#define STLS2F_N_GPIO 4
#define STLS3A_N_GPIO 16
-#ifdef CONFIG_CPU_LOONGSON3
+#ifdef CONFIG_CPU_LOONGSON64
#define LOONGSON_N_GPIO STLS3A_N_GPIO
#else
#define LOONGSON_N_GPIO STLS2F_N_GPIO
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index faf86ea9c51a..642c6321c22a 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
- case 1000 ... 8000:
+ case 1 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
- case 9000 ... 16000:
+ case 8001 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
- case 17000 ... 32000:
+ case 16001 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index 70fdb42a8e88..1e21c661d79d 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -211,3 +211,4 @@ MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 6c0687694341..2f0f50336b9a 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -773,23 +773,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
- struct resource *res;
u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node,
"marvell,armada-370-gpio"))
return 0;
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
- if (!res)
- return 0;
-
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
@@ -812,7 +801,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- mvpwm->membase = devm_ioremap_resource(dev, res);
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
if (IS_ERR(mvpwm->membase))
return PTR_ERR(mvpwm->membase);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index a9058fda187e..ef40fbe923cf 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -407,7 +407,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
unsigned int i, j, offset;
struct gpio_irq_chip *irq;
struct tegra_gpio *gpio;
- struct resource *res;
char **names;
int err;
@@ -417,8 +416,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
- gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 609ed16ae933..59ccfd24627d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1304,11 +1304,28 @@ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
{
+ /*
+ * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+ * a non existing micro-USB-B connector which puts the HDMI
+ * DDC pins in GPIO mode, breaking HDMI support.
+ */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
}
},
+ {
+ /*
+ * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+ * instead of controlling the actual micro-USB-B turns the 5V
+ * boost for its USB-A connector off. The actual micro-USB-B
+ * connector is wired for charging only.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+ }
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 98e3c20d9730..4421be09b960 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -185,12 +185,11 @@ struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
/**
- * devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
- * device's child node
+ * devm_fwnode_gpiod_get_index - get a GPIO descriptor from a given node
* @dev: GPIO consumer
+ * @fwnode: firmware node containing GPIO reference
* @con_id: function within the GPIO consumer
* @index: index of the GPIO to obtain in the consumer
- * @child: firmware node (child of @dev)
* @flags: GPIO initialization flags
* @label: label to attach to the requested GPIO
*
@@ -200,35 +199,21 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
* On successful request the GPIO pin is configured in accordance with
* provided @flags.
*/
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
{
- char prop_name[32]; /* 32 is max size of property name */
struct gpio_desc **dr;
struct gpio_desc *desc;
- unsigned int i;
dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
- if (con_id)
- snprintf(prop_name, sizeof(prop_name), "%s-%s",
- con_id, gpio_suffixes[i]);
- else
- snprintf(prop_name, sizeof(prop_name), "%s",
- gpio_suffixes[i]);
-
- desc = fwnode_get_named_gpiod(child, prop_name, index, flags,
- label);
- if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
- break;
- }
+ desc = fwnode_gpiod_get_index(fwnode, con_id, index, flags, label);
if (IS_ERR(desc)) {
devres_free(dr);
return desc;
@@ -239,7 +224,7 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
return desc;
}
-EXPORT_SYMBOL_GPL(devm_fwnode_get_index_gpiod_from_child);
+EXPORT_SYMBOL_GPL(devm_fwnode_gpiod_get_index);
/**
* devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 104ed299d5ea..fb33ff6fc1a9 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -4356,6 +4356,54 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
}
/**
+ * fwnode_gpiod_get_index - obtain a GPIO from firmware node
+ * @fwnode: handle of the firmware node
+ * @con_id: function within the GPIO consumer
+ * @index: index of the GPIO to obtain for the consumer
+ * @flags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * This function can be used for drivers that get their configuration
+ * from opaque firmware.
+ *
+ * The function properly finds the corresponding GPIO using whatever is the
+ * underlying firmware interface and then makes sure that the GPIO
+ * descriptor is requested before it is returned to the caller.
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @flags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+ char prop_name[32]; /* 32 is max size of property name */
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ if (con_id)
+ snprintf(prop_name, sizeof(prop_name), "%s-%s",
+ con_id, gpio_suffixes[i]);
+ else
+ snprintf(prop_name, sizeof(prop_name), "%s",
+ gpio_suffixes[i]);
+
+ desc = fwnode_get_named_gpiod(fwnode, prop_name, index, flags,
+ label);
+ if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+ break;
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index);
+
+/**
* gpiod_count - return the number of GPIOs associated with a device / function
* or -ENOENT if no GPIO has been assigned to the requested function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e67c194c2aca..1168351267fd 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER
help
FBDEV helpers for KMS drivers.
+config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+ bool "Enable refcount backtrace history in the DP MST helpers"
+ select STACKDEPOT
+ depends on DRM_KMS_HELPER
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debug tracing for topology refs in DRM's DP MST helpers. A
+ history of each topology reference/dereference will be printed to the
+ kernel log once a port or branch device's topology refcount reaches 0.
+
+ This has the potential to use a lot of memory and print some very
+ large kernel messages. If in doubt, say "N".
+
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
@@ -165,13 +179,26 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_TTM_DMA_PAGE_POOL
+ bool
+ depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
+ default y
+ help
+ Choose this if you need the TTM dma page pool
+
config DRM_VRAM_HELPER
tristate
depends on DRM
- select DRM_TTM
help
Helpers for VRAM memory management
+config DRM_TTM_HELPER
+ tristate
+ depends on DRM
+ select DRM_TTM
+ help
+ Helpers for ttm-based gem objects
+
config DRM_GEM_CMA_HELPER
bool
depends on DRM
@@ -226,9 +253,9 @@ config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
- select DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_SCHED
- select DRM_TTM
+ select DRM_TTM
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
@@ -257,6 +284,7 @@ config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
+ select CRC32
default n
help
Virtual Kernel Mode-Setting (VKMS) is used for testing or for
@@ -397,7 +425,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
- # !PREEMPT because of missing ioctl locking
+ # !PREEMPTION because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 82ff826b33cc..9f1c7c486f88 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,10 +33,12 @@ drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_vram_helper-y := drm_gem_vram_helper.o \
- drm_vram_helper_common.o \
- drm_vram_mm_helper.o
+ drm_vram_helper_common.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
+drm_ttm_helper-y := drm_gem_ttm_helper.o
+obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
+
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 00962a659009..ca0e435559d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
+ amdgpu_umc.o smu_v11_0_i2c.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -67,7 +68,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
- arct_reg_init.o navi12_reg_init.o
+ arct_reg_init.o navi12_reg_init.o mxgpu_nv.o
# add DF block
amdgpu-y += \
@@ -83,7 +84,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_1.o
+ umc_v6_1.o umc_v6_0.o
# add IH block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bd37df5dd6d0..bcc5d40a8d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -106,6 +107,8 @@ struct amdgpu_mgpu_info
uint32_t num_apu;
};
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
+
/*
* Modules parameters.
*/
@@ -122,6 +125,7 @@ extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
+extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
@@ -135,6 +139,7 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_exp_hw_support;
extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
@@ -146,11 +151,7 @@ extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
-extern int amdgpu_ngg;
-extern int amdgpu_prim_buf_per_se;
-extern int amdgpu_pos_buf_per_se;
-extern int amdgpu_cntl_sb_buf_per_se;
-extern int amdgpu_param_buf_per_se;
+extern uint amdgpu_force_long_training;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
@@ -167,6 +168,12 @@ extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_noretry;
+extern int amdgpu_force_asic_type;
+#ifdef CONFIG_HSA_AMD
+extern int sched_policy;
+#else
+static const int sched_policy = KFD_SCHED_POLICY_HWS;
+#endif
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -283,6 +290,9 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs;
};
+#define HW_REV(_Major, _Minor, _Rev) \
+ ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
+
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
@@ -425,7 +435,6 @@ struct amdgpu_fpriv {
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
@@ -477,7 +486,6 @@ struct amdgpu_cs_parser {
uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
uint64_t bytes_moved_vis;
- struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
@@ -624,6 +632,11 @@ struct amdgpu_fw_vram_usage {
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
+
+ /* Offset on the top of VRAM, used as c2p write buffer.
+ */
+ u64 mem_train_fb_loc;
+ bool mem_train_support;
};
/*
@@ -644,71 +657,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
- u32 ref_and_mask_sdma2;
- u32 ref_and_mask_sdma3;
- u32 ref_and_mask_sdma4;
- u32 ref_and_mask_sdma5;
- u32 ref_and_mask_sdma6;
- u32 ref_and_mask_sdma7;
-};
-
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
};
-struct amdgpu_nbio_funcs {
- const struct nbio_hdp_flush_reg *hdp_flush_reg;
- u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
- u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
- u32 (*get_rev_id)(struct amdgpu_device *adev);
- void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
- u32 (*get_memsize)(struct amdgpu_device *adev);
- void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index, int doorbell_size);
- void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
- int doorbell_index, int instance);
- void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*ih_doorbell_range)(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*ih_control)(struct amdgpu_device *adev);
- void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
- void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
struct amdgpu_df_funcs {
void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
bool enable);
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
@@ -813,6 +769,7 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
+ struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -921,6 +878,12 @@ struct amdgpu_device {
u32 cg_flags;
u32 pg_flags;
+ /* nbio */
+ struct amdgpu_nbio nbio;
+
+ /* mmhub */
+ struct amdgpu_mmhub mmhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -974,9 +937,7 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_nbio_funcs *nbio_funcs;
const struct amdgpu_df_funcs *df_funcs;
- const struct amdgpu_mmhub_funcs *mmhub_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -1009,8 +970,6 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
- bool in_baco_reset;
-
long gfx_timeout;
long sdma_timeout;
long video_timeout;
@@ -1018,6 +977,9 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
+
+ /* device pstate */
+ int pstate;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1032,6 +994,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write);
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 07eb29885372..d3da9dde4ee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -63,45 +63,10 @@ void amdgpu_amdkfd_fini(void)
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
- const struct kfd2kgd_calls *kfd2kgd;
-
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
- break;
-#endif
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
- break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
- break;
- case CHIP_ARCTURUS:
- kfd2kgd = amdgpu_amdkfd_arcturus_get_functions();
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
- break;
- default:
- dev_info(adev->dev, "kfd not supported on this ASIC\n");
- return;
- }
+ bool vf = amdgpu_sriov_vf(adev);
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->pdev, adev->asic_type, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -165,14 +130,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
- /* remove the KIQ bit as well */
- if (adev->gfx.kiq.ring.sched.ready)
- clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
- adev->gfx.kiq.ring.me - 1,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue),
- gpu_resources.queue_bitmap);
-
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant
*/
@@ -202,7 +159,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
}
}
@@ -709,38 +666,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
return 0;
}
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
-{
- return NULL;
-}
-
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g)
+ unsigned int asic_type, bool vf)
{
return NULL;
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index e519df3fd2b6..069d5d230810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -57,7 +57,7 @@ struct kgd_mem {
unsigned int mapped_to_gpu_memory;
uint64_t va;
- uint32_t mapping_flags;
+ uint32_t alloc_flags;
atomic_t invalid;
struct amdkfd_process_info *process_info;
@@ -137,12 +137,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void);
-
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
@@ -179,10 +173,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
+/* Read user wptr from a specified user address space with page fault
+ * disabled. The memory must be pinned and mapped to the hardware when
+ * this is called in hqd_load functions, so it should never fault in
+ * the first place. This resolves a circular lock dependency involving
+ * four locks, including the DQM lock and mmap_sem.
+ */
#define read_user_wptr(mmptr, wptr, dst) \
({ \
bool valid = false; \
if ((mmptr) && (wptr)) { \
+ pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \
@@ -190,6 +191,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
valid = !get_user((dst), (wptr)); \
unuse_mm(mmptr); \
} \
+ pagefault_enable(); \
} \
valid; \
})
@@ -240,8 +242,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g);
+ unsigned int asic_type, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index c79aaebeeaf0..b6713e0ed1b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -19,10 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#undef pr_fmt
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
@@ -69,11 +65,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[8] = {
+ uint32_t sdma_engine_reg_base[8] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
@@ -91,111 +87,82 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA7, 0,
mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
-static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
- u32 instance, u32 offset)
-{
- switch (instance) {
- case 0:
- return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
- case 1:
- return (adev->reg_offset[SDMA1_HWIP][0][1] + offset);
- case 2:
- return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
- case 3:
- return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
- case 4:
- return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
- case 5:
- return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
- case 6:
- return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
- case 7:
- return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
- default:
- break;
- }
- return 0;
-}
-
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev,
- m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -205,7 +172,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -215,15 +183,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -235,14 +203,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -255,40 +223,42 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -304,20 +274,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index d10f483f5e27..61cd707158e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,18 +19,9 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#undef pr_fmt
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "navi10_enum.h"
@@ -42,6 +33,7 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
+#include "gfxhub_v2_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -50,63 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-#if 0
-static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
-#endif
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -139,37 +74,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -250,11 +154,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
ATC_VMID0_PASID_MAPPING__VALID_MASK;
pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
- /*
- * need to do this twice, once for gfx and once for mmhub
- * for ATC add 16 to VMID for mmhub, for IH different registers.
- * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
- */
pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
@@ -306,11 +205,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
/* On gfx10, mmSDMA1_xxx registers are defined NOT based
@@ -322,12 +221,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -488,72 +387,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -563,28 +457,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
- pr_debug("sdma base addr %x\n", sdma_base_addr);
-
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -618,14 +510,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -746,59 +638,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -830,6 +715,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
if (amdgpu_emu_mode == 0 && ring->sched.ready)
@@ -838,13 +725,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
- break;
- }
+
+ ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, 0);
+ break;
}
}
@@ -914,7 +801,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -922,18 +808,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ /* SDMA is on gfxhub as well for Navi1* series */
+ gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
+
+const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .get_hive_id = amdgpu_amdkfd_get_hive_id,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5f459bf5f622..6e6f0a99ec06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdma_rlc_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdma_rlc_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
@@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static void set_scratch_backing_va(struct kgd_dev *kgd,
@@ -855,3 +750,28 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+
+const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 6d2f61449606..bfbddedb2380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,9 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -44,62 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdmax_rlcx_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -798,3 +695,28 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e262f2ac07a3..47c853ef1051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,17 +19,10 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
@@ -50,9 +43,6 @@
#include "gmc_v9_0.h"
-#define V9_PIPE_PER_MEC (4)
-#define V9_QUEUES_PER_PIPE_MEC (8)
-
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
@@ -226,22 +216,21 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
};
- uint32_t retval;
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
-
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -388,71 +377,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -462,7 +447,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -472,15 +458,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -514,14 +500,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -584,59 +570,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
@@ -671,6 +650,8 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid, i;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
uint32_t flush_type = 0;
@@ -686,14 +667,14 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- break;
- }
+
+ ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ break;
}
}
@@ -777,15 +758,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid)
-{
- /* No longer needed on GFXv9. The scratch base address is
- * passed to the shader by the CP. It's the user mode driver's
- * responsibility.
- */
-}
-
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
@@ -811,7 +783,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -827,19 +799,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 26d8879bff9d..d9e9ad22b2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -55,14 +55,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid);
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base);
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6d021ecc8d59..ae6f5446262c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -19,9 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
#include <linux/dma-buf.h>
#include <linux/list.h>
#include <linux/pagemap.h>
@@ -33,11 +30,6 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
-/* Special VM and GART address alignment needed for VI pre-Fiji due to
- * a HW bug.
- */
-#define VI_BO_SIZE_ALIGN (0x8000)
-
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -349,13 +341,46 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
- ret = amdgpu_vm_update_directories(adev, vm);
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
return ret;
return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+{
+ struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ uint32_t mapping_flags;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (bo_adev == adev)
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else {
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+ break;
+ default:
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+
+ return amdgpu_gem_va_map_flags(adev, mapping_flags);
+}
+
/* add_bo_to_vm - Add a BO to a VM
*
* Everything that needs to bo done only once when a BO is first added
@@ -404,8 +429,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
}
bo_va_entry->va = va;
- bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
- mem->mapping_flags);
+ bo_va_entry->pte_flags = get_pte_flags(adev, mem);
bo_va_entry->kgd_dev = (void *)adev;
list_add(&bo_va_entry->bo_list, list_bo_va);
@@ -586,7 +610,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
@@ -659,7 +683,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
@@ -1079,10 +1103,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
- int byte_align;
u32 domain, alloc_domain;
u64 alloc_flags;
- uint32_t mapping_flags;
int ret;
/*
@@ -1135,25 +1157,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if ((*mem)->aql_queue)
size = size >> 1;
- /* Workaround for TLB bug on older VI chips */
- byte_align = (adev->family == AMDGPU_FAMILY_VI &&
- adev->asic_type != CHIP_FIJI &&
- adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12 &&
- adev->asic_type != CHIP_VEGAM) ?
- VI_BO_SIZE_ALIGN : 1;
-
- mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (flags & ALLOC_MEM_FLAGS_WRITABLE)
- mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
- mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- if (flags & ALLOC_MEM_FLAGS_COHERENT)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- (*mem)->mapping_flags = mapping_flags;
+ (*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
@@ -1168,7 +1172,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
memset(&bp, 0, sizeof(bp));
bp.size = size;
- bp.byte_align = byte_align;
+ bp.byte_align = 1;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
bp.type = bo_type;
@@ -1626,9 +1630,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->mapping_flags =
- AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+ (*mem)->alloc_flags =
+ ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
+ ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -1797,8 +1802,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
- true);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@@ -1996,7 +2000,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save, true);
+ false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1c9d40f97a9b..72232fccf61a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -2038,6 +2038,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
+ if (ret) {
+ DRM_ERROR("Failed to get mem train fb location.\n");
+ return ret;
+ }
} else {
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index daf687428cdb..ff4eb96bdfb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -27,6 +27,7 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
#include "atombios.h"
+#include "soc15_hw_ip.h"
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
@@ -120,65 +121,14 @@ union vram_info {
struct atom_vram_info_header_v2_3 v23;
struct atom_vram_info_header_v2_4 v24;
};
-/*
- * Return vram width from integrated system info table, if available,
- * or 0 if not.
- */
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
- u16 data_offset, size;
- union igp_info *igp_info;
- union vram_info *vram_info;
- u32 mem_channel_number;
- u32 mem_channel_width;
- u8 frev, crev;
-
- if (adev->flags & AMD_IS_APU)
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- integratedsysteminfo);
- else
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- vram_info);
- /* get any igp specific overrides */
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- return mem_channel_number * 64;
- default:
- return 0;
- }
- } else {
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 3:
- mem_channel_number = vram_info->v23.vram_module[0].channel_num;
- mem_channel_width = vram_info->v23.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- case 4:
- mem_channel_number = vram_info->v24.vram_module[0].channel_num;
- mem_channel_width = vram_info->v24.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- default:
- return 0;
- }
- }
- }
-
- return 0;
-}
+union vram_module {
+ struct atom_vram_module_v9 v9;
+ struct atom_vram_module_v10 v10;
+};
-static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
- int atom_mem_type)
+static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
+ int atom_mem_type)
{
int vram_type;
@@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
return vram_type;
}
-/*
- * Return vram type from either integrated system info table
- * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
- */
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+
+
+int
+amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
+ int index, i = 0;
u16 data_offset, size;
union igp_info *igp_info;
union vram_info *vram_info;
+ union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
if (adev->flags & AMD_IS_APU)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
else
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
@@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
default:
- return 0;
+ return -EINVAL;
}
} else {
vram_info = (union vram_info *)
(mode_info->atom_context->bios + data_offset);
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
switch (crev) {
case 3:
- mem_type = vram_info->v23.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
case 4:
- mem_type = vram_info->v24.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
default:
- return 0;
+ return -EINVAL;
}
}
+
}
return 0;
@@ -464,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
}
return -EINVAL;
}
+
+/*
+ * Check if VBIOS supports GDDR6 training data save/restore
+ */
+static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
+{
+ uint16_t data_offset;
+ int index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
+ NULL, NULL, &data_offset)) {
+ struct atom_firmware_info_v3_1 *firmware_info =
+ (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
+ data_offset);
+
+ DRM_DEBUG("atom firmware capability:0x%08x.\n",
+ le32_to_cpu(firmware_info->firmware_capability));
+
+ if (le32_to_cpu(firmware_info->firmware_capability) &
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
+ return true;
+ }
+
+ return false;
+}
+
+static int gddr6_mem_train_support(struct amdgpu_device *adev)
+{
+ int ret;
+ uint32_t major, minor, revision, hw_v;
+
+ if (gddr6_mem_train_vbios_support(adev)) {
+ amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
+ hw_v = HW_REV(major, minor, revision);
+ /*
+ * treat 0 revision as a special case since register for MP0 and MMHUB is missing
+ * for some Navi10 A0, preventing driver from discovering the hwip information since
+ * none of the functions will be initialized, it should not cause any problems
+ */
+ switch (hw_v) {
+ case HW_REV(11, 0, 0):
+ case HW_REV(11, 0, 5):
+ ret = 1;
+ break;
+ default:
+ DRM_ERROR("memory training vbios supports but psp hw(%08x)"
+ " doesn't support!\n", hw_v);
+ ret = -1;
+ break;
+ }
+ } else {
+ ret = 0;
+ hw_v = -1;
+ }
+
+
+ DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
+ return ret;
+}
+
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ unsigned char *bios = ctx->bios;
+ struct vram_reserve_block *reserved_block;
+ int index, block_number;
+ uint8_t frev, crev;
+ uint16_t data_offset, size;
+ uint32_t start_address_in_kb;
+ uint64_t offset;
+ int ret;
+
+ adev->fw_vram_usage.mem_train_support = false;
+
+ if (adev->asic_type != CHIP_NAVI10 &&
+ adev->asic_type != CHIP_NAVI14)
+ return 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = gddr6_mem_train_support(adev);
+ if (ret == -1)
+ return -EINVAL;
+ else if (ret == 0)
+ return 0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ vram_usagebyfirmware);
+ ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset);
+ if (ret == 0) {
+ DRM_ERROR("parse data header failed.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
+ " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
+ /* only support 2.1+ */
+ if (((uint16_t)frev << 8 | crev) < 0x0201) {
+ DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
+ return -EINVAL;
+ }
+
+ reserved_block = (struct vram_reserve_block *)
+ (bios + data_offset + sizeof(struct atom_common_table_header));
+ block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
+ / sizeof(struct vram_reserve_block);
+ reserved_block += (block_number > 0) ? block_number-1 : 0;
+ DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
+ block_number,
+ le32_to_cpu(reserved_block->start_address_in_kb),
+ le16_to_cpu(reserved_block->used_by_firmware_in_kb),
+ le16_to_cpu(reserved_block->used_by_driver_in_kb));
+ if (reserved_block->used_by_firmware_in_kb > 0) {
+ start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
+ offset = (uint64_t)start_address_in_kb * ONE_KiB;
+ if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
+ offset -= ONE_MiB;
+ }
+
+ offset &= ~(ONE_MiB - 1);
+ adev->fw_vram_usage.mem_train_fb_loc = offset;
+ adev->fw_vram_usage.mem_train_support = true;
+ DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
+ ret = 0;
+ } else {
+ DRM_ERROR("used_by_firmware_in_kb is 0!\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 5ec6f92f353c..f871af5ea6f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -29,8 +29,9 @@
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3e35a8f2c5e5..a97fb759e2f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void)
bool d3_supported = false;
struct pci_dev *parent_pdev;
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- vga_count++;
-
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
-
- parent_pdev = pci_upstream_bridge(pdev);
- d3_supported |= parent_pdev && parent_pdev->bridge_d3;
- amdgpu_atpx_get_quirks(pdev);
- }
-
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 649e68c4479b..d1495e1c9289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int i, r;
start_jiffies = jiffies;
@@ -44,16 +44,14 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (r)
goto exit_do_move;
- dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
- if (fence)
- dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ece55c8fa673..a62cbc8199de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -217,11 +217,10 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -236,9 +235,8 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -347,10 +345,9 @@ static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1022,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
*/
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *list_amdgpu_connector;
- list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(list_connector,
+ &iter) {
if (connector == list_connector)
continue;
list_amdgpu_connector = to_amdgpu_connector(list_connector);
@@ -1040,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
}
}
@@ -1065,9 +1067,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1117,9 +1118,8 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1134,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1271,9 +1271,8 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1292,10 +1291,9 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1501,6 +1499,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
@@ -1515,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
return;
/* see if we already added it */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->connector_id == connector_id) {
amdgpu_connector->devices |= supported_device;
+ drm_connector_list_iter_end(&iter);
return;
}
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
@@ -1533,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
}
}
+ drm_connector_list_iter_end(&iter);
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82823d9a8ba8..a169ff16277f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
+#include "amdgpu_ras.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -449,75 +450,12 @@ retry:
return r;
}
-/* Last resort, try to evict something from the current working set */
-static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *validated)
-{
- uint32_t domain = validated->allowed_domains;
- struct ttm_operation_ctx ctx = { true, false };
- int r;
-
- if (!p->evictable)
- return false;
-
- for (;&p->evictable->tv.head != &p->validated;
- p->evictable = list_prev_entry(p->evictable, tv.head)) {
-
- struct amdgpu_bo_list_entry *candidate = p->evictable;
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- bool update_bytes_moved_vis;
- uint32_t other;
-
- /* If we reached our current BO we can forget it */
- if (bo == validated)
- break;
-
- /* We can't move pinned BOs here */
- if (bo->pin_count)
- continue;
-
- other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this BO is in one of the domains we need space for */
- if (!(other & domain))
- continue;
-
- /* Check if we can move this BO somewhere else */
- other = bo->allowed_domains & ~domain;
- if (!other)
- continue;
-
- /* Good we can try to move this BO somewhere else */
- update_bytes_moved_vis =
- !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_bo_placement_from_domain(bo, other);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- p->bytes_moved += ctx.bytes_moved;
- if (update_bytes_moved_vis)
- p->bytes_moved_vis += ctx.bytes_moved;
-
- if (unlikely(r))
- break;
-
- p->evictable = list_prev_entry(p->evictable, tv.head);
- list_move(&candidate->tv.head, &p->validated);
-
- return true;
- }
-
- return false;
-}
-
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
@@ -554,9 +492,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages);
}
- if (p->evictable == lobj)
- p->evictable = NULL;
-
r = amdgpu_cs_validate(p, bo);
if (r)
return r;
@@ -646,7 +581,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, false);
+ &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
@@ -657,9 +592,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
p->bytes_moved_vis = 0;
- p->evictable = list_last_entry(&p->validated,
- struct amdgpu_bo_list_entry,
- tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
@@ -911,7 +843,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
if (r)
return r;
@@ -1355,6 +1287,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
bool reserved_buffers = false;
int i, r;
+ if (amdgpu_ras_intr_triggered())
+ return -EHWPOISON;
+
if (!adev->accel_working)
return -EBUSY;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 35a8d3c96fc9..08047bc4d588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5652cc72ed3a..8e6726e0d035 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
int r = 0, i;
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* hold on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
kthread_unpark(ring->sched.thread);
}
+ mutex_unlock(&adev->lock_reset);
+
return 0;
}
@@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
if (!fences)
return -ENOMEM;
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* stop the scheduler */
kthread_park(ring->sched.thread);
@@ -1075,10 +1083,11 @@ failure:
/* restart the scheduler */
kthread_unpark(ring->sched.thread);
+ mutex_unlock(&adev->lock_reset);
+
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (fences)
- kfree(fences);
+ kfree(fences);
return 0;
}
@@ -1090,8 +1099,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev->ddev->primary->debugfs_root,
- (void *)adev, &fops_ib_preempt);
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_ib_preempt);
if (!(adev->debugfs_preempt)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO;
@@ -1103,8 +1112,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
{
- if (adev->debugfs_preempt)
- debugfs_remove(adev->debugfs_preempt);
+ debugfs_remove(adev->debugfs_preempt);
}
#else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7a6c837c0a85..4f76beafb2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,6 +65,8 @@
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include <linux/suspend.h>
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -78,7 +80,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static const char *amdgpu_asic_name[] = {
+const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
"VERDE",
@@ -151,6 +153,36 @@ bool amdgpu_device_is_px(struct drm_device *dev)
return false;
}
+/**
+ * VRAM access helper functions.
+ *
+ * amdgpu_device_vram_access - read/write a buffer in vram
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ */
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write)
+{
+ uint64_t last;
+ unsigned long flags;
+
+ last = size - 4;
+ for (last += pos; pos <= last; pos += 4) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (write)
+ WREG32_NO_KIQ(mmMM_DATA, *buf++);
+ else
+ *buf++ = RREG32_NO_KIQ(mmMM_DATA);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+}
+
/*
* MMIO register access helper functions.
*/
@@ -1023,12 +1055,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_device_check_block_size(adev);
- ret = amdgpu_device_get_job_timeout_settings(adev);
- if (ret) {
- dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return ret;
- }
-
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
return ret;
@@ -1469,6 +1495,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ goto parse_soc_bounding_box;
+
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
@@ -1496,7 +1525,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->gfx.config.num_packer_per_sc =
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
}
+
+parse_soc_bounding_box:
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ /*
+ * soc bounding box info is not integrated in disocovery table,
+ * we always need to parse it from gpu info firmware.
+ */
if (hdr->version_minor == 2) {
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
@@ -1613,6 +1648,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ amdgpu_discovery_get_gfx_info(adev);
+
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
@@ -1622,7 +1660,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1839,6 +1877,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
+ /*
+ * retired pages will be loaded from eeprom and reserved here,
+ * it should be called after amdgpu_device_ip_hw_init_phase2 since
+ * for some ASICs the RAS EEPROM code relies on SMU fully functioning
+ * for I2C communication which only true at this point.
+ * recovery_init may fail, but it can free all resources allocated by
+ * itself and its failure should not stop amdgpu init process.
+ *
+ * Note: theoretically, this should be called before all vram allocations
+ * to protect retired page from abusing
+ */
+ amdgpu_ras_recovery_init(adev);
+
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
@@ -2006,6 +2057,7 @@ out:
*/
static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
+ struct amdgpu_gpu_instance *gpu_instance;
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -2031,8 +2083,39 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
- /* set to low pstate by default */
- amdgpu_xgmi_set_pstate(adev, 0);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * Reset device p-state to low as this was booted with high.
+ *
+ * This should be performed only after all devices from the same
+ * hive get initialized.
+ *
+ * However, it's unknown how many device in the hive in advance.
+ * As this is counted one by one during devices initializations.
+ *
+ * So, we wait for all XGMI interlinked devices initialized.
+ * This may bring some delays as those devices may come from
+ * different hives. But that should be OK.
+ */
+ if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
+ if (gpu_instance->adev->flags & AMD_IS_APU)
+ continue;
+
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+ if (r) {
+ DRM_ERROR("pstate setting failed (%d).\n", r);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&mgpu_info.mutex);
+ }
return 0;
}
@@ -2220,6 +2303,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* displays are handled in phase1 */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
continue;
+ /* PSP lost connection when err_event_athub occurs */
+ if (amdgpu_ras_intr_triggered() &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
+ adev->ip_blocks[i].status.hw = false;
+ continue;
+ }
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -2231,17 +2320,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* handle putting the SMC in the appropriate state */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
if (is_support_sw_smu(adev)) {
- /* todo */
+ r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
} else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_mp1_state) {
r = adev->powerplay.pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
- }
+ }
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
}
}
@@ -2556,6 +2645,73 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
adev->asic_reset_res, adev->ddev->unique);
}
+static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+{
+ char *input = amdgpu_lockup_timeout;
+ char *timeout_setting = NULL;
+ int index = 0;
+ long timeout;
+ int ret = 0;
+
+ /*
+ * By default timeout for non compute jobs is 10000.
+ * And there is no timeout enforced on compute jobs.
+ * In SR-IOV or passthrough mode, timeout for compute
+ * jobs are 10000 by default.
+ */
+ adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ else
+ adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
+
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * There is only one value specified and
+ * it should apply to all non-compute jobs.
+ */
+ if (index == 1) {
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ }
+ }
+
+ return ret;
+}
/**
* amdgpu_device_init - initialize the driver
@@ -2583,7 +2739,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags;
- adev->asic_type = flags & AMD_ASIC_MASK;
+
+ if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
+ adev->asic_type = amdgpu_force_asic_type;
+ else
+ adev->asic_type = flags & AMD_ASIC_MASK;
+
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2;
@@ -2726,6 +2887,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ r = amdgpu_device_get_job_timeout_settings(adev);
+ if (r) {
+ dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+ return r;
+ }
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -2942,7 +3109,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
int r;
DRM_INFO("amdgpu: finishing device.\n");
+ flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -2960,7 +3129,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
}
adev->accel_working = false;
- cancel_delayed_work_sync(&adev->delayed_init_work);
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@@ -3014,6 +3182,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -3036,9 +3205,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- }
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -3089,15 +3260,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- pci_save_state(dev->pdev);
if (suspend) {
+ pci_save_state(dev->pdev);
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
- } else {
- r = amdgpu_asic_reset(adev);
- if (r)
- DRM_ERROR("amdgpu asic reset failed\n");
}
return 0;
@@ -3117,6 +3284,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
int r = 0;
@@ -3187,9 +3355,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* turn on display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
drm_modeset_unlock_all(dev);
}
amdgpu_fbdev_set_suspend(adev, 0);
@@ -3635,11 +3807,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
-
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
- amdgpu_ras_reserve_bad_pages(tmp_adev);
- }
}
}
@@ -3743,25 +3910,18 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
adev->mp1_state = PP_MP1_STATE_NONE;
break;
}
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
return true;
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
}
-
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -3781,11 +3941,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
+
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
need_full_reset = job_signaled = false;
INIT_LIST_HEAD(&device_list);
- dev_info(adev->dev, "GPU reset begin!\n");
+ dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -3812,9 +3985,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
return 0;
}
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Build list of devices to reset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (!hive) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_device_unlock_adev(adev);
return -ENODEV;
}
@@ -3830,17 +4010,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
- /*
- * Mark these ASICs to be reseted as untracked first
- * And add them back after reset completed
- */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
- amdgpu_unregister_gpu_instance(tmp_adev);
-
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev != adev) {
+ amdgpu_device_lock_adev(tmp_adev, false);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+ }
+
+ /*
+ * Mark these ASICs to be reseted as untracked first
+ * And add them back after reset completed
+ */
+ amdgpu_unregister_gpu_instance(tmp_adev);
+
/* disable ras on ALL IPs */
- if (amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3850,10 +4035,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
+
+ if (in_ras_intr)
+ amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
}
+ if (in_ras_intr)
+ goto skip_sched_resume;
+
/*
* Must check guilty signal here since after this point all old
* HW fences are force signaled.
@@ -3864,9 +4055,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dma_fence_is_signaled(job->base.s_fence->parent))
job_signaled = true;
- if (!amdgpu_device_ip_need_full_reset(adev))
- device_list_handle = &device_list;
-
if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -3888,7 +4076,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (tmp_adev == adev)
continue;
- amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -3916,6 +4103,7 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -3937,12 +4125,18 @@ skip_hw_reset:
if (r) {
/* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
}
+ }
+skip_sched_resume:
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_post_reset(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1481899f86c1..f95092741c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
- uint32_t *p = (uint32_t *)binary;
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - BINARY_MAX_SIZE;
- unsigned long flags;
-
- while (pos < vram_size) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
- *p++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- pos += 4;
- }
+ uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
return 0;
}
@@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
+ adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
if (!adev->discovery)
return -ENOMEM;
@@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor)
+ int *major, int *minor, int *revision)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
@@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
*major = ip->major;
if (minor)
*minor = ip->minor;
+ if (revision)
+ *revision = ip->revision;
return 0;
}
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 85b8c4d4d576..ba78e15d9b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,11 +24,13 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#define DISCOVERY_TMR_SIZE (64 << 10)
+
int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor);
+ int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 1d4aaa9580f4..3cadb0b76f22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -370,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_connector_list_iter iter;
uint32_t devices;
int i = 0;
+ drm_connector_list_iter_begin(dev, &iter);
DRM_INFO("AMDGPU Display Connectors\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
@@ -438,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
}
i++;
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -511,7 +514,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/
if (adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN &&
+ adev->asic_type < CHIP_RAVEN &&
(adev->flags & AMD_IS_APU) &&
(bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 61f108ec2b5c..e2eec7b66334 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -34,27 +34,12 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
+#include "amdgpu_dma_buf.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/**
- * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
- * implementation
- * @obj: GEM buffer object (BO)
- *
- * Returns:
- * A scatter/gather table for the pinned pages of the BO's memory.
- */
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int npages = bo->tbo.num_pages;
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
-}
-
-/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
*
@@ -179,92 +164,126 @@ err_fences_put:
}
/**
- * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
+ *
+ * @dmabuf: DMA-buf where we attach to
+ * @attach: attachment to add
+ *
+ * Add the attachment as user to the exported DMA-buf.
+ */
+static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
+
+ if (attach->dev->driver == adev->dev->driver)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ /*
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
+ */
+ r = __dma_resv_make_exclusive(bo->tbo.base.resv);
+ if (r)
+ return r;
+
+ bo->prime_shared_count++;
+ amdgpu_bo_unreserve(bo);
+ return 0;
+}
+
+/**
+ * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
+ *
+ * @dmabuf: DMA-buf where we remove the attachment from
+ * @attach: the attachment to remove
+ *
+ * Called when an attachment is removed from the DMA-buf.
+ */
+static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
+ bo->prime_shared_count--;
+}
+
+/**
+ * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
+ * @dir: DMA direction
*
* Makes sure that the shared DMA buffer can be accessed by the target device.
* For now, simply pins it to the GTT domain, where it should be accessible by
* all DMA devices.
*
* Returns:
- * 0 on success or a negative error code on failure.
+ * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
+ * code.
*/
-static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
{
+ struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct sg_table *sgt;
long r;
- r = drm_gem_map_attach(dma_buf, attach);
- if (r)
- return r;
-
- r = amdgpu_bo_reserve(bo, false);
- if (unlikely(r != 0))
- goto error_detach;
-
-
- if (attach->dev->driver != adev->dev->driver) {
- /*
- * We only create shared fences for internal use, but importers
- * of the dmabuf rely on exclusive fences for implicitly
- * tracking write hazards. As any of the current fences may
- * correspond to a write, we need to convert all existing
- * fences on the reservation object into a single exclusive
- * fence.
- */
- r = __dma_resv_make_exclusive(bo->tbo.base.resv);
- if (r)
- goto error_unreserve;
- }
-
- /* pin buffer into GTT */
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
- goto error_unreserve;
+ return ERR_PTR(r);
- if (attach->dev->driver != adev->dev->driver)
- bo->prime_shared_count++;
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
-error_unreserve:
- amdgpu_bo_unreserve(bo);
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
-error_detach:
- if (r)
- drm_gem_map_detach(dma_buf, attach);
- return r;
+ return sgt;
+
+error_free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
}
/**
- * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
* @attach: DMA-buf attachment
+ * @sgt: sg_table to unmap
+ * @dir: DMA direction
*
* This is called when a shared DMA buffer no longer needs to be accessible by
* another device. For now, simply unpins the buffer from GTT.
*/
-static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
- struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int ret = 0;
-
- ret = amdgpu_bo_reserve(bo, true);
- if (unlikely(ret != 0))
- goto error;
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
amdgpu_bo_unpin(bo);
- if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
- bo->prime_shared_count--;
- amdgpu_bo_unreserve(bo);
-
-error:
- drm_gem_map_detach(dma_buf, attach);
}
/**
@@ -308,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .attach = amdgpu_dma_buf_map_attach,
- .detach = amdgpu_dma_buf_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .dynamic_mapping = true,
+ .attach = amdgpu_dma_buf_attach,
+ .detach = amdgpu_dma_buf_detach,
+ .map_dma_buf = amdgpu_dma_buf_map,
+ .unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
@@ -321,7 +341,6 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
/**
* amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
- * @dev: DRM device
* @gobj: GEM BO
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
*
@@ -350,31 +369,28 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
}
/**
- * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
- * implementation
+ * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
+ *
* @dev: DRM device
- * @attach: DMA-buf attachment
- * @sg: Scatter/gather table
+ * @dma_buf: DMA-buf
*
- * Imports shared DMA buffer memory exported by another device.
+ * Creates an empty SG BO for DMA-buf import.
*
* Returns:
* A new GEM BO of the given DRM device, representing the memory
* described by the given DMA-buf attachment and scatter/gather table.
*/
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
+static struct drm_gem_object *
+amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
- struct dma_resv *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
memset(&bp, 0, sizeof(bp));
- bp.size = attach->dmabuf->size;
+ bp.size = dma_buf->size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.flags = 0;
@@ -385,11 +401,9 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto error;
- bo->tbo.sg = sg;
- bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
@@ -405,15 +419,15 @@ error:
* @dev: DRM device
* @dma_buf: Shared DMA buffer
*
- * The main work is done by the &drm_gem_prime_import helper, which in turn
- * uses &amdgpu_gem_prime_import_sg_table.
+ * Import a dma_buf into a the driver and potentially create a new GEM object.
*
* Returns:
* GEM BO representing the shared DMA buffer for the given device.
*/
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
+ struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
@@ -428,5 +442,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, dma_buf);
+ obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
+ if (IS_ERR(obj))
+ return obj;
+
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ if (IS_ERR(attach)) {
+ drm_gem_object_put(obj);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+ obj->import_attach = attach;
+ return obj;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index 5012e6ab58f1..ec447a7b6b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -25,11 +25,6 @@
#include <drm/drm_gem.h>
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 5803fcbae22f..9cc270efee7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
@@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 1c5c0fd76dbf..2cfb677272af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -298,12 +298,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-#define amdgpu_smu_get_current_power_state(adev) \
- ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
-
-#define amdgpu_smu_set_power_state(adev) \
- ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
-
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e1c15721611a..0ffc9447b573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -43,6 +43,8 @@
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
+
/*
* KMS wrapper.
* - 3.0.0 - initial driver
@@ -82,13 +84,12 @@
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
+ * - 3.36.0 - Allow reading more status registers on si/cik
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 35
+#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
-#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
-
int amdgpu_vram_limit = 0;
int amdgpu_vis_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
@@ -101,7 +102,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
+char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -128,11 +129,7 @@ char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
/* OverDrive(bit 14) disabled by default*/
uint amdgpu_pp_feature_mask = 0xffffbfff;
-int amdgpu_ngg = 0;
-int amdgpu_prim_buf_per_se = 0;
-int amdgpu_pos_buf_per_se = 0;
-int amdgpu_cntl_sb_buf_per_se = 0;
-int amdgpu_param_buf_per_se = 0;
+uint amdgpu_force_long_training = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
@@ -146,12 +143,13 @@ int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry = 1;
+int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
-uint amdgpu_ras_mask = 0xfffffffb;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -244,16 +242,21 @@ module_param_named(msi, amdgpu_msi, int, 0444);
*
* The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
* multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to default timeout.
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX. The second one is for Compute.
- * And the third and fourth ones are for SDMA and Video.
+ * to the default timeout.
+ *
+ * - With one value specified, the setting will apply to all non-compute jobs.
+ * - With multiple values specified, the first one will be for GFX.
+ * The second one is for Compute. The third and fourth ones are
+ * for SDMA and Video.
+ *
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
* jobs is 10000. And there is no timeout enforced on compute jobs.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
+ "for passthrough or sriov, 10000 for all jobs."
" 0: keep default value. negative: infinity timeout), "
- "format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
+ "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -392,6 +395,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
/**
+ * DOC: forcelongtraining (uint)
+ * Force long memory training in resume.
+ * The default is zero, indicates short training in resume.
+ */
+MODULE_PARM_DESC(forcelongtraining, "force memory long training");
+module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
+
+/**
* DOC: pcie_gen_cap (uint)
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
* The default is 0 (automatic for each asic).
@@ -449,42 +460,6 @@ MODULE_PARM_DESC(virtual_display,
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
/**
- * DOC: ngg (int)
- * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
- */
-MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
-module_param_named(ngg, amdgpu_ngg, int, 0444);
-
-/**
- * DOC: prim_buf_per_se (int)
- * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
-module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
-
-/**
- * DOC: pos_buf_per_se (int)
- * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
-module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
-
-/**
- * DOC: cntl_sb_buf_per_se (int)
- * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
-module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
-
-/**
- * DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
- * The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
-module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
-
-/**
* DOC: job_hang_limit (int)
* Set how much time allow a job hang and not drop it. The default is 0.
*/
@@ -616,6 +591,16 @@ MODULE_PARM_DESC(noretry,
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
module_param_named(noretry, amdgpu_noretry, int, 0644);
+/**
+ * DOC: force_asic_type (int)
+ * A non negative value used to specify the asic type for all supported GPUs.
+ */
+MODULE_PARM_DESC(force_asic_type,
+ "A non negative value used to specify the asic type for all supported GPUs");
+module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+
+
+
#ifdef CONFIG_HSA_AMD
/**
* DOC: sched_policy (int)
@@ -1013,16 +998,17 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
- {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
{0, 0, 0}
};
@@ -1085,7 +1071,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
#endif
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
if (ret)
return ret;
@@ -1128,7 +1114,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
+#ifdef MODULE
+ if (THIS_MODULE->state != MODULE_STATE_GOING)
+#endif
+ DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
drm_dev_put(dev);
pci_disable_device(pdev);
@@ -1141,6 +1130,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private;
+ if (amdgpu_ras_intr_triggered())
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -1175,8 +1167,13 @@ static int amdgpu_pmops_resume(struct device *dev)
static int amdgpu_pmops_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+ int r;
- return amdgpu_device_suspend(drm_dev, false, true);
+ r = amdgpu_device_suspend(drm_dev, false, true);
+ if (r)
+ return r;
+ return amdgpu_asic_reset(adev);
}
static int amdgpu_pmops_thaw(struct device *dev)
@@ -1348,66 +1345,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
-{
- char *input = amdgpu_lockup_timeout;
- char *timeout_setting = NULL;
- int index = 0;
- long timeout;
- int ret = 0;
-
- /*
- * By default timeout for non compute jobs is 10000.
- * And there is no timeout enforced on compute jobs.
- */
- adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
-
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
-
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
-
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
- }
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1)
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- }
-
- return ret;
-}
-
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -1446,8 +1383,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 571a6dfb473e..61fcf247a638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ drm_connector_list_iter_begin(dev, &iter);
/* walk the list and link encoders to connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
@@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
@@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
amdgpu_connector->devices, encoder->encoder_type);
}
}
+ drm_connector_list_iter_end(&iter);
}
struct drm_connector *
@@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->active_device & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->active_device & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_connector *
@@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->devices & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->devices & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 23085b352cf2..377fe20bce23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -462,18 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
timeout = adev->gfx_timeout;
break;
case AMDGPU_RING_TYPE_COMPUTE:
- /*
- * For non-sriov case, no timeout enforce
- * on compute ring by default. Unless user
- * specifies a timeout for compute ring.
- *
- * For sriov case, always use the timeout
- * as gfx ring
- */
- if (!amdgpu_sriov_vf(ring->adev))
- timeout = adev->compute_timeout;
- else
- timeout = adev->gfx_timeout;
+ timeout = adev->compute_timeout;
break;
case AMDGPU_RING_TYPE_SDMA:
timeout = adev->sdma_timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 5e8bdded265f..19705e399905 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
+ struct page *dummy_page = ttm_bo_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8ceb44925947..4277125a79ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -527,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error;
}
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
+/**
+ * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: GEM UAPI flags
+ *
+ * Returns the GEM UAPI flags mapped into hardware for the ASIC.
+ */
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
+{
+ uint64_t pte_flag = 0;
+
+ if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ pte_flag |= AMDGPU_PTE_EXECUTABLE;
+ if (flags & AMDGPU_VM_PAGE_READABLE)
+ pte_flag |= AMDGPU_PTE_READABLE;
+ if (flags & AMDGPU_VM_PAGE_WRITEABLE)
+ pte_flag |= AMDGPU_PTE_WRITEABLE;
+ if (flags & AMDGPU_VM_PAGE_PRT)
+ pte_flag |= AMDGPU_PTE_PRT;
+
+ if (adev->gmc.gmc_funcs->map_mtype)
+ pte_flag |= amdgpu_gmc_map_mtype(adev,
+ flags & AMDGPU_VM_MTYPE_MASK);
+
+ return pte_flag;
+}
+
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -613,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -631,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -646,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 0b66d2e6b5d5..e0f025dd1b14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f9bef3154b99..e00b46180d2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
+#include "amdgpu_ras.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe, me;
+ int i, queue, me;
for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
queue = i % adev->gfx.me.num_queue_per_pipe;
- pipe = (i / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
me = (i / adev->gfx.me.num_queue_per_pipe)
/ adev->gfx.me.num_pipe_per_me;
@@ -320,8 +319,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
return r;
}
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq)
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
@@ -456,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
}
ring = &adev->gfx.kiq.ring;
- if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring)
- kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
@@ -569,3 +565,102 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ if (!adev->gfx.ras_if) {
+ adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gfx.ras_if)
+ return -ENOMEM;
+ adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
+ adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if->sub_block_index = 0;
+ strcpy(adev->gfx.ras_if->name, "gfx");
+ }
+ fs_info.head = ih_info.head = *adev->gfx.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ /* free gfx ras_if if ras is not supported */
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
+free:
+ kfree(adev->gfx.ras_if);
+ adev->gfx.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt.
+ *
+ * When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->gfx.funcs->query_ras_error_count)
+ adev->gfx.funcs->query_ras_error_count(adev, err_data);
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6d19183b478b..0ae0a2715b0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -201,28 +201,6 @@ struct amdgpu_gfx_funcs {
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
};
-struct amdgpu_ngg_buf {
- struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- uint32_t size;
- uint32_t bo_size;
-};
-
-enum {
- NGG_PRIM = 0,
- NGG_POS,
- NGG_CNTL,
- NGG_PARAM,
- NGG_BUF_MAX
-};
-
-struct amdgpu_ngg {
- struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
- uint32_t gds_reserve_addr;
- uint32_t gds_reserve_size;
- bool init;
-};
-
struct sq_work {
struct work_struct work;
unsigned ih_data;
@@ -247,7 +225,7 @@ struct amdgpu_me {
uint32_t num_me;
uint32_t num_pipe_per_me;
uint32_t num_queue_per_pipe;
- void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1];
+ void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
@@ -312,9 +290,6 @@ struct amdgpu_gfx {
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- /* NGG */
- struct amdgpu_ngg ngg;
-
/* gfx off */
bool gfx_off_state; /* true: enabled, false: disabled */
struct mutex gfx_off_mutex;
@@ -356,8 +331,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq);
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq);
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
@@ -385,5 +359,12 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
-
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5790db61fa2c..a12f33c0f5df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -27,6 +27,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/**
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
@@ -305,3 +307,29 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
gmc->fault_hash[hash].idx = gmc->last_fault++;
return false;
}
+
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
+ r = adev->umc.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
+ r = adev->mmhub.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ return amdgpu_xgmi_ras_late_init(adev);
+}
+
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_umc_ras_fini(adev);
+ amdgpu_mmhub_ras_fini(adev);
+ amdgpu_xgmi_ras_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b6e1d98ef01e..b499a3de8bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -77,6 +77,7 @@ struct amdgpu_gmc_fault {
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
+ uint32_t vm_inv_eng0_sem;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
@@ -99,12 +100,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
+ /* map mtype to hardware flags */
+ uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
+ /* get the pte flags to use for a BO VA mapping */
+ void (*get_vm_pte)(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags);
};
struct amdgpu_xgmi {
@@ -120,21 +124,52 @@ struct amdgpu_xgmi {
/* gpu list in the same hive */
struct list_head head;
bool supported;
+ struct ras_common_if *ras_if;
};
struct amdgpu_gmc {
+ /* FB's physical address in MMIO space (for CPU to
+ * map FB). This is different compared to the agp/
+ * gart/vram_start/end field as the later is from
+ * GPU's view and aper_base is from CPU's view.
+ */
resource_size_t aper_size;
resource_size_t aper_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
+ /* AGP aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place AGP by setting MC_VM_AGP_BOT/TOP registers
+ * Under VMID0, logical address == MC address. AGP
+ * aperture maps to physical bus or IOVA addressed.
+ * AGP aperture is used to simulate FB in ZFB case.
+ * AGP aperture is also used for page table in system
+ * memory (mainly for APU).
+ *
+ */
u64 agp_size;
u64 agp_start;
u64 agp_end;
+ /* GART aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR
+ * registers
+ * Under VMID0, logical address inside GART aperture will
+ * be translated through gpuvm gart page table to access
+ * paged system memory
+ */
u64 gart_size;
u64 gart_start;
u64 gart_end;
+ /* Frame buffer aperture of this GPU device. Different from
+ * fb_start (see below), this only covers the local GPU device.
+ * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios)
+ * and calculate vram_start of this local device by adding an
+ * offset inside the XGMI hive.
+ * Under VMID0, logical address == MC address
+ */
u64 vram_start;
u64 vram_end;
/* FB region , it's same as local vram region in single GPU, in XGMI
@@ -153,6 +188,7 @@ struct amdgpu_gmc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint8_t vram_vendor;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
@@ -177,15 +213,14 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
- struct ras_common_if *umc_ras_if;
- struct ras_common_if *mmhub_ras_if;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@@ -230,5 +265,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 53734da1c2df..6f9289735e31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->entity.fence_context ||
+ if ((*id)->owner != vm->direct.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->entity.fence_context)
+ if ((*id)->owner != vm->direct.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->entity.fence_context;
+ id->owner = vm->direct.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 2a3f5ec298db..30d540d23b77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -87,10 +87,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
struct drm_device *dev = adev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
- list_for_each_entry(connector, &mode_config->connector_list, head)
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
+ drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -153,6 +156,20 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
+
+ /* For the hardware that cannot enable bif ring for both ras_controller_irq
+ * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
+ * register to check whether the interrupt is triggered or not, and properly
+ * ack the interrupt if it is there
+ */
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+
return ret;
}
@@ -228,10 +245,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
- int ret = pci_enable_msi(adev->pdev);
- if (!ret) {
+ int nvec = pci_msix_vec_count(adev->pdev);
+ unsigned int flags;
+
+ if (nvec <= 0) {
+ flags = PCI_IRQ_MSI;
+ } else {
+ flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ }
+ /* we only need one vector */
+ nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
+ if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI.\n");
+ dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
}
}
@@ -254,7 +280,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
adev->irq.installed = true;
- r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
+ /* Use vector 0 for MSI-X */
+ r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
@@ -284,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
drm_irq_uninstall(adev->ddev);
adev->irq.installed = false;
if (adev->irq.msi_enabled)
- pci_disable_msi(adev->pdev);
+ pci_free_irq_vectors(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
}
@@ -369,7 +396,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector pointer
+ * @ih: interrupt ring instance
*
* Dispatches IRQ to IP blocks.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 96b2a31ccfed..4fb20e870e63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -248,6 +248,44 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 51e62504c279..dc7ee9358dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -76,4 +76,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence);
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a042ef471fbd..b6db28a570c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -583,9 +583,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
+ vram_gtt.vram_cpu_accessible_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ vram_gtt.vram_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
@@ -598,15 +601,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -649,15 +655,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
- for (i = 0; i < info->read_mmr_reg.count; i++)
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i,
&regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i);
kfree(regs);
+ amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT;
}
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
return n ? -EFAULT : 0;
@@ -728,17 +738,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
-
- if (amdgpu_ngg) {
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
- dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
- dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
- dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
- dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
- }
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
@@ -967,6 +966,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->delayed_init_work);
+
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_ERROR("RAS Intr triggered, device disabled!!");
+ return -EHWPOISON;
+ }
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..676c48c02d77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "mmhub_err_count",
+ .debugfs_name = "mmhub_err_inject",
+ };
+
+ if (!adev->mmhub.ras_if) {
+ adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->mmhub.ras_if)
+ return -ENOMEM;
+ adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
+ adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if->sub_block_index = 0;
+ strcpy(adev->mmhub.ras_if->name, "mmhub");
+ }
+ ih_info.head = fs_info.head = *adev->mmhub.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) {
+ kfree(adev->mmhub.ras_if);
+ adev->mmhub.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
+ adev->mmhub.ras_if) {
+ struct ras_common_if *ras_if = adev->mmhub.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 2d75ecfa199b..1cd78940cf82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -23,9 +23,17 @@
struct amdgpu_mmhub_funcs {
void (*ras_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
};
+struct amdgpu_mmhub {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_mmhub_funcs *funcs;
+};
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 31d4deb5d294..392300f77b13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -136,6 +136,7 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
* amdgpu_mn_read_lock - take the read side lock for this notifier
*
* @amn: our notifier
+ * @blockable: is the notifier blockable
*/
static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
new file mode 100644
index 000000000000..7d5c3a9de9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "pcie_bif_err_count",
+ .debugfs_name = "pcie_bif_err_inject",
+ };
+
+ if (!adev->nbio.ras_if) {
+ adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->nbio.ras_if)
+ return -ENOMEM;
+ adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if->sub_block_index = 0;
+ strcpy(adev->nbio.ras_if->name, "pcie_bif");
+ }
+ ih_info.head = fs_info.head = *adev->nbio.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
+ if (r)
+ goto late_fini;
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info);
+free:
+ kfree(adev->nbio.ras_if);
+ adev->nbio.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
+ adev->nbio.ras_if) {
+ struct ras_common_if *ras_if = adev->nbio.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
new file mode 100644
index 000000000000..919bd566ba3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_NBIO_H__
+#define __AMDGPU_NBIO_H__
+
+/*
+ * amdgpu nbio functions
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+ u32 ref_and_mask_sdma2;
+ u32 ref_and_mask_sdma3;
+ u32 ref_and_mask_sdma4;
+ u32 ref_and_mask_sdma5;
+ u32 ref_and_mask_sdma6;
+ u32 ref_and_mask_sdma7;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size);
+ void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_interrupt)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+ void (*remap_hdp_registers)(struct amdgpu_device *adev);
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_nbio {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ struct amdgpu_irq_src ras_controller_irq;
+ struct amdgpu_irq_src ras_err_event_athub_irq;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_nbio_funcs *funcs;
+};
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7289e1b4fb60..e3f16b49e970 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -343,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
+ *
+ * @adev: amdgpu device object
+ * @offset: offset of the BO
+ * @size: size of the BO
+ * @domain: where to place it
+ * @bo_ptr: used to initialize BOs in structures
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Creates a kernel BO at a specific offset in the address space of the domain.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ unsigned int i;
+ int r;
+
+ offset &= PAGE_MASK;
+ size = ALIGN(size, PAGE_SIZE);
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
+ NULL, cpu_addr);
+ if (r)
+ return r;
+
+ /*
+ * Remove the original mem node and create a new one at the request
+ * position.
+ */
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo_ptr);
+
+ ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+
+ for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
+ (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
+ (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+ r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
+ &(*bo_ptr)->tbo.mem, &ctx);
+ if (r)
+ goto error;
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r)
+ goto error;
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+ return 0;
+
+error:
+ amdgpu_bo_unreserve(*bo_ptr);
+ amdgpu_bo_unref(bo_ptr);
+ return r;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -451,7 +515,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
{
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
- .no_wait_gpu = false,
+ .no_wait_gpu = bp->no_wait_gpu,
.resv = bp->resv,
.flags = bp->type != ttm_bo_type_kernel ?
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
@@ -1059,7 +1123,10 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &bo->tbo);
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ return ttm_bo_mmap_obj(vma, &bo->tbo);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 658f4c9779b7..7e99f6c58c48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -41,6 +41,7 @@ struct amdgpu_bo_param {
u32 preferred_domain;
u64 flags;
enum ttm_bo_type type;
+ bool no_wait_gpu;
struct dma_resv *resv;
};
@@ -237,6 +238,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 03930313c263..f205f56e3358 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state)
- pm = amdgpu_smu_get_current_power_state(adev);
+ pm = smu_get_current_power_state(&adev->smu);
else
pm = adev->pm.dpm.user_state;
} else if (adev->powerplay.pp_funcs->get_current_power_state) {
@@ -805,8 +805,7 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
}
/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
- * pp_dpm_pcie
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
*
* The amdgpu driver provides a sysfs API for adjusting what power levels
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
@@ -822,9 +821,15 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
*
* To manually adjust these states, first select manual using
* power_dpm_force_performance_level.
- * Secondly,Enter a new value for each level by inputing a string that
+ * Secondly, enter a new value for each level by inputing a string that
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
- * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ * E.g.,
+ *
+ * .. code-block:: bash
+ *
+ * echo "4 5 6" > pp_dpm_sclk
+ *
+ * will enable sclk levels 4, 5, and 6.
*
* NOTE: change to the dcefclk max dpm level is not supported now
*/
@@ -902,7 +907,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
@@ -949,7 +954,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
@@ -989,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
@@ -1029,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
@@ -1069,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
@@ -1109,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
@@ -1301,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
if (is_support_sw_smu(adev))
- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
if (!ret)
@@ -2010,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
uint32_t limit = 0;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
+ smu_get_power_limit(&adev->smu, &limit, true, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2028,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
uint32_t limit = 0;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
+ smu_get_power_limit(&adev->smu, &limit, false, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -2196,9 +2201,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - fan1_input: fan speed in RPM
*
- * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
+ * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
*
- * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
+ * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
* hwmon interfaces for GPU clocks:
*
@@ -2825,6 +2830,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_dpm_sclk\n");
return ret;
}
+
+ /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_mclk.store = NULL;
+
+ dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_socclk.store = NULL;
+
+ dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_fclk.store = NULL;
+ }
+
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
if (ret) {
DRM_ERROR("failed to create device file pp_dpm_mclk\n");
@@ -3008,7 +3026,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
smu_handle_task(&adev->smu,
smu_dpm->dpm_level,
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ true);
} else {
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4d71537a960d..2770cba56a6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -34,6 +34,8 @@
#include "psp_v11_0.h"
#include "psp_v12_0.h"
+#include "amdgpu_ras.h"
+
static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
@@ -88,6 +90,17 @@ static int psp_sw_init(void *handle)
return ret;
}
+ ret = psp_mem_training_init(psp);
+ if (ret) {
+ DRM_ERROR("Failed to initialize memory training!\n");
+ return ret;
+ }
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
return 0;
}
@@ -95,6 +108,7 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ psp_mem_training_fini(&adev->psp);
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
@@ -151,10 +165,19 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
while (*((unsigned int *)psp->fence_buf) != index) {
if (--timeout == 0)
break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (amdgpu_ras_intr_triggered())
+ break;
msleep(1);
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
/* In some cases, psp response status is not 0 even there is no
@@ -168,8 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
- DRM_WARN("psp command failed and response status is (0x%X)\n",
- psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
+ DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
@@ -253,7 +277,8 @@ static int psp_tmr_init(struct psp_context *psp)
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed */
- if (psp->toc_start_addr &&
+ if (!amdgpu_sriov_vf(psp->adev) &&
+ psp->toc_start_addr &&
psp->toc_bin_size &&
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
@@ -287,15 +312,9 @@ static int psp_tmr_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
- if (ret)
- goto failed;
kfree(cmd);
- return 0;
-
-failed:
- kfree(cmd);
return ret;
}
@@ -548,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- if (!psp->adev->psp.ta_fw)
+ if (!psp->adev->psp.ta_fw ||
+ !psp->adev->psp.ta_xgmi_ucode_size ||
+ !psp->adev->psp.ta_xgmi_start_addr)
return -ENOENT;
if (!psp->xgmi_context.initialized) {
@@ -737,6 +758,12 @@ static int psp_ras_terminate(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
if (!psp->ras.ras_initialized)
return 0;
@@ -758,6 +785,18 @@ static int psp_ras_initialize(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_ras_ucode_size ||
+ !psp->adev->psp.ta_ras_start_addr) {
+ dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ return 0;
+ }
+
if (!psp->ras.ras_initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
@@ -772,6 +811,360 @@ static int psp_ras_initialize(struct psp_context *psp)
}
// ras end
+// HDCP start
+static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t hdcp_ta_mc,
+ uint64_t hdcp_mc_shared,
+ uint32_t hdcp_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
+ lower_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
+ upper_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_hdcp_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for hdcp ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return ret;
+}
+
+static int psp_hdcp_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
+ psp->ta_hdcp_ucode_size);
+
+ psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->hdcp_context.hdcp_shared_mc_addr,
+ psp->ta_hdcp_ucode_size,
+ PSP_HDCP_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->hdcp_context.hdcp_initialized = 1;
+ psp->hdcp_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+static int psp_hdcp_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_hdcp_ucode_size ||
+ !psp->adev->psp.ta_hdcp_start_addr) {
+ dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ ret = psp_hdcp_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_hdcp_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
+}
+
+static int psp_hdcp_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_hdcp_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->hdcp_context.hdcp_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->hdcp_context.hdcp_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return 0;
+}
+// HDCP end
+
+// DTM start
+static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t dtm_ta_mc,
+ uint64_t dtm_mc_shared,
+ uint32_t dtm_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_dtm_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for dtm ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return ret;
+}
+
+static int psp_dtm_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+
+ psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->dtm_context.dtm_shared_mc_addr,
+ psp->ta_dtm_ucode_size,
+ PSP_DTM_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->dtm_context.dtm_initialized = 1;
+ psp->dtm_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_dtm_ucode_size ||
+ !psp->adev->psp.ta_dtm_start_addr) {
+ dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->dtm_context.dtm_initialized) {
+ ret = psp_dtm_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_dtm_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t dtm_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->dtm_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->dtm_context.dtm_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->dtm_context.dtm_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return 0;
+}
+// DTM end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -845,6 +1238,16 @@ static int psp_hw_start(struct psp_context *psp)
if (ret)
dev_err(psp->adev->dev,
"RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
}
return 0;
@@ -950,21 +1353,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
struct amdgpu_device *adev = psp->adev;
- const struct sdma_firmware_header_v1_0 *sdma_hdr =
- (const struct sdma_firmware_header_v1_0 *)
- adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
- const struct gfx_firmware_header_v1_0 *ce_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- const struct gfx_firmware_header_v1_0 *pfp_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- const struct gfx_firmware_header_v1_0 *me_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- const struct gfx_firmware_header_v1_0 *mec_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- const struct rlc_firmware_header_v2_0 *rlc_hdr =
- (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
- const struct smc_firmware_header_v1_0 *smc_hdr =
- (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+ struct common_firmware_header *hdr;
switch (ucode->ucode_id) {
case AMDGPU_UCODE_ID_SDMA0:
@@ -975,25 +1364,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
case AMDGPU_UCODE_ID_SDMA5:
case AMDGPU_UCODE_ID_SDMA6:
case AMDGPU_UCODE_ID_SDMA7:
- amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
+ hdr = (struct common_firmware_header *)
+ adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
+ amdgpu_ucode_print_sdma_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_CE:
- amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_PFP:
- amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_ME:
- amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_MEC1:
- amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_RLC_G:
- amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(hdr);
break;
case AMDGPU_UCODE_ID_SMC:
- amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
break;
default:
break;
@@ -1070,7 +1467,10 @@ out:
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
/*skip ucode loading in SRIOV VF */
continue;
@@ -1079,10 +1479,6 @@ out:
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
/* skip mec JT when autoload is enabled */
continue;
- /* Renoir only needs to load mec jump table one time */
- if (adev->asic_type == CHIP_RENOIR &&
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)
- continue;
psp_print_fw_hdr(psp, ucode);
@@ -1091,7 +1487,8 @@ out:
return ret;
/* Start rlc autoload after psp recieved all the gfx firmware */
- if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+ if (psp->autoload_supported && ucode->ucode_id ==
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -1216,8 +1613,11 @@ static int psp_hw_fini(void *handle)
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
- if (psp->adev->psp.ta_fw)
+ if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_dtm_terminate(psp);
+ psp_hdcp_terminate(psp);
+ }
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@@ -1259,6 +1659,16 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate ras ta\n");
return ret;
}
+ ret = psp_hdcp_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate hdcp ta\n");
+ return ret;
+ }
+ ret = psp_dtm_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate dtm ta\n");
+ return ret;
+ }
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
@@ -1278,6 +1688,12 @@ static int psp_resume(void *handle)
DRM_INFO("PSP is resuming...\n");
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
mutex_lock(&adev->firmware.mutex);
ret = psp_hw_start(psp);
@@ -1317,9 +1733,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd;
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index bc0947f6bc8a..09c5474ebcc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -37,6 +37,9 @@
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
+#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
+#define PSP_DTM_SHARED_MEM_SIZE 0x4000
+#define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context;
struct psp_xgmi_node_info;
@@ -46,6 +49,8 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SYSDRV = 0x10000,
PSP_BL__LOAD_SOSDRV = 0x20000,
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
+ PSP_BL__DRAM_LONG_TRAIN = 0x100000,
+ PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
};
enum psp_ring_type
@@ -108,6 +113,9 @@ struct psp_funcs
struct ta_ras_trigger_error_input *info);
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
int (*rlc_autoload_start)(struct psp_context *psp);
+ int (*mem_training_init)(struct psp_context *psp);
+ void (*mem_training_fini)(struct psp_context *psp);
+ int (*mem_training)(struct psp_context *psp, uint32_t ops);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -142,6 +150,65 @@ struct psp_ras_context {
struct amdgpu_ras *ras;
};
+struct psp_hdcp_context {
+ bool hdcp_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *hdcp_shared_bo;
+ uint64_t hdcp_shared_mc_addr;
+ void *hdcp_shared_buf;
+};
+
+struct psp_dtm_context {
+ bool dtm_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *dtm_shared_bo;
+ uint64_t dtm_shared_mc_addr;
+ void *dtm_shared_buf;
+};
+
+#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
+#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
+#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+
+enum psp_memory_training_init_flag {
+ PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
+ PSP_MEM_TRAIN_SUPPORT = 0x1,
+ PSP_MEM_TRAIN_INIT_FAILED = 0x2,
+ PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
+ PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
+};
+
+enum psp_memory_training_ops {
+ PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
+ PSP_MEM_TRAIN_SAVE = 0x2,
+ PSP_MEM_TRAIN_RESTORE = 0x4,
+ PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
+ PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
+ PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
+};
+
+struct psp_memory_training_context {
+ /*training data size*/
+ u64 train_data_size;
+ /*
+ * sys_cache
+ * cpu virtual address
+ * system memory buffer that used to store the training data.
+ */
+ void *sys_cache;
+
+ /*vram offset of the p2c training data*/
+ u64 p2c_train_data_offset;
+ struct amdgpu_bo *p2c_bo;
+
+ /*vram offset of the c2p training data*/
+ u64 c2p_train_data_offset;
+ struct amdgpu_bo *c2p_bo;
+
+ enum psp_memory_training_init_flag init;
+ u32 training_cnt;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -206,9 +273,21 @@ struct psp_context
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_ucode_size;
uint8_t *ta_ras_start_addr;
+
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_ucode_size;
+ uint8_t *ta_hdcp_start_addr;
+
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_ucode_size;
+ uint8_t *ta_dtm_start_addr;
+
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
+ struct psp_hdcp_context hdcp_context;
+ struct psp_dtm_context dtm_context;
struct mutex mutex;
+ struct psp_memory_training_context mem_train_ctx;
};
struct amdgpu_psp_funcs {
@@ -251,6 +330,12 @@ struct amdgpu_psp_funcs {
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
#define psp_rlc_autoload(psp) \
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
+#define psp_mem_training_init(psp) \
+ ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
+#define psp_mem_training_fini(psp) \
+ ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0)
+#define psp_mem_training(psp, ops) \
+ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
@@ -279,6 +364,8 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 016ea274b955..404483437bd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -25,10 +25,13 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
"none",
@@ -65,11 +68,16 @@ const char *ras_block_string[] = {
/* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr);
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr);
+enum amdgpu_ras_retire_page_reservation {
+ AMDGPU_RAS_RETIRE_PAGE_RESERVED,
+ AMDGPU_RAS_RETIRE_PAGE_PENDING,
+ AMDGPU_RAS_RETIRE_PAGE_FAULT,
+};
+
+atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr);
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
@@ -189,6 +197,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
+
+static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
/**
* DOC: AMDGPU RAS debugfs control interface
*
@@ -208,31 +220,44 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* As their names indicate, inject operation will write the
* value to the address.
*
- * Second member: struct ras_debug_if::op.
+ * The second member: struct ras_debug_if::op.
* It has three kinds of operations.
- * 0: disable RAS on the block. Take ::head as its data.
- * 1: enable RAS on the block. Take ::head as its data.
- * 2: inject errors on the block. Take ::inject as its data.
+ *
+ * - 0: disable RAS on the block. Take ::head as its data.
+ * - 1: enable RAS on the block. Take ::head as its data.
+ * - 2: inject errors on the block. Take ::inject as its data.
*
* How to use the interface?
- * programs:
- * copy the struct ras_debug_if in your codes and initialize it.
- * write the struct to the control node.
*
- * bash:
- * echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, smda, gfx, .........
- * see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
- * sub_block: sub block index, pass 0 if there is no sub block
+ * Programs
+ *
+ * Copy the struct ras_debug_if in your codes and initialize it.
+ * Write the struct to the control node.
+ *
+ * Shells
+ *
+ * .. code-block:: bash
+ *
+ * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
+ *
+ * Parameters:
+ *
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, sdma, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ * sub_block:
+ * sub block index, pass 0 if there is no sub block
+ *
+ * here are some examples for bash commands:
+ *
+ * .. code-block:: bash
*
- * here are some examples for bash commands,
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
@@ -245,8 +270,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* For inject, please check corresponding err count at
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
*
- * NOTE: operation is only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * .. note::
+ * Operations are only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * to see which blocks support RAS on a particular asic.
+ *
*/
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
@@ -276,6 +304,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
break;
}
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ amdgpu_ras_check_bad_page(adev, data.inject.address)) {
+ DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ data.inject.address);
+ break;
+ }
+
/* data.inject.address is offset instead of absolute gpu address */
ret = amdgpu_ras_error_inject(adev, &data.inject);
break;
@@ -290,6 +326,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
return size;
}
+/**
+ * DOC: AMDGPU RAS debugfs EEPROM table reset interface
+ *
+ * Some boards contain an EEPROM which is used to persistently store a list of
+ * bad pages which experiences ECC errors in vram. This interface provides
+ * a way to reset the EEPROM, e.g., after testing error injection.
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo 1 > ../ras/ras_eeprom_reset
+ *
+ * will reset EEPROM table to 0 entries.
+ *
+ */
+static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ int ret;
+
+ ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
+
+ return ret == 1 ? size : -EIO;
+}
+
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.owner = THIS_MODULE,
.read = NULL,
@@ -297,6 +360,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_eeprom_write,
+ .llseek = default_llseek
+};
+
+/**
+ * DOC: AMDGPU RAS sysfs Error Count Interface
+ *
+ * It allows the user to read the error count for each IP block on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * It outputs the multiple lines which report the uncorrected (ue) and corrected
+ * (ce) error counts.
+ *
+ * The format of one line is below,
+ *
+ * [ce|ue]: count
+ *
+ * Example:
+ *
+ * .. code-block:: bash
+ *
+ * ue: 0
+ * ce: 1
+ *
+ */
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -475,15 +566,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
return 0;
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
- if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
- if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ if (!amdgpu_ras_intr_triggered()) {
+ ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ if (ret) {
+ DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ ret);
+ if (ret == TA_RAS_STATUS__RESET_NEEDED)
+ return -EAGAIN;
+ return -EINVAL;
+ }
}
/* setup the obj */
@@ -615,8 +708,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub_funcs->query_ras_error_count)
- adev->mmhub_funcs->query_ras_error_count(adev, &err_data);
+ if (adev->mmhub.funcs->query_ras_error_count)
+ adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ if (adev->nbio.funcs->query_ras_error_count)
+ adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
@@ -628,12 +725,14 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
- if (err_data.ce_count)
+ if (err_data.ce_count) {
dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
obj->err_data.ce_count, ras_block_str(info->head.block));
- if (err_data.ue_count)
+ }
+ if (err_data.ue_count) {
dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
obj->err_data.ue_count, ras_block_str(info->head.block));
+ }
return 0;
}
@@ -664,6 +763,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
default:
@@ -723,18 +824,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
switch (flags) {
- case 0:
+ case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
return "R";
- case 1:
+ case AMDGPU_RAS_RETIRE_PAGE_PENDING:
return "P";
- case 2:
+ case AMDGPU_RAS_RETIRE_PAGE_FAULT:
default:
return "F";
};
}
-/*
- * DOC: ras sysfs gpu_vram_bad_pages interface
+/**
+ * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
*
* It allows user to read the bad pages of vram on the gpu through
* /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
@@ -746,14 +847,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*
* gpu pfn and gpu page size are printed in hex format.
* flags can be one of below character,
+ *
* R: reserved, this gpu page is reserved and not able to use.
+ *
* P: pending for reserve, this gpu page is marked as bad, will be reserved
- * in next window of page_reserve.
+ * in next window of page_reserve.
+ *
* F: unable to reserve. this gpu page can't be reserved due to some reasons.
*
- * examples:
- * 0x00000001 : 0x00001000 : R
- * 0x00000002 : 0x00001000 : P
+ * Examples:
+ *
+ * .. code-block:: bash
+ *
+ * 0x00000001 : 0x00001000 : R
+ * 0x00000002 : 0x00001000 : P
+ *
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
@@ -927,6 +1035,24 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
}
/* sysfs end */
+/**
+ * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
+ *
+ * Normally when there is an uncorrectable error, the driver will reset
+ * the GPU to recover. However, in the event of an unrecoverable error,
+ * the driver provides an interface to reboot the system automatically
+ * in that event.
+ *
+ * The following file in debugfs provides that interface:
+ * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo true > .../ras/auto_reboot
+ *
+ */
/* debugfs begin */
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
@@ -934,8 +1060,21 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
con->dir = debugfs_create_dir("ras", minor->debugfs_root);
- con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_eeprom_ops);
+
+ /*
+ * After one uncorrectable error happens, usually GPU recovery will
+ * be scheduled. But due to the known problem in GPU recovery failing
+ * to bring GPU back, below interface provides one direct way to
+ * user to reboot system automatically in such case within
+ * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
+ * will never be called.
+ */
+ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
+ &con->reboot);
}
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@@ -980,10 +1119,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove(con->ent);
- debugfs_remove(con->dir);
+ debugfs_remove_recursive(con->dir);
con->dir = NULL;
- con->ent = NULL;
}
/* debugfs end */
@@ -1188,15 +1325,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
for (; i < data->count; i++) {
(*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].bp,
+ .bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
- .flags = 0,
+ .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
if (data->last_reserved <= i)
- (*bps)[i].flags = 1;
- else if (data->bps[i].bo == NULL)
- (*bps)[i].flags = 2;
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (data->bps_bo[i] == NULL)
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
*count = data->count;
@@ -1214,105 +1351,46 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
atomic_set(&ras->in_recovery, 0);
}
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr)
-{
- /* no need to free it actually. */
- amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
- return 0;
-}
-
-/* reserve vram with size@offset */
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- struct amdgpu_bo *bo;
-
- if (bo_ptr)
- *bo_ptr = NULL;
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
-
- r = amdgpu_bo_create(adev, &bp, &bo);
- if (r)
- return -EINVAL;
-
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- goto error_reserve;
-
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
-
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
-
- r = amdgpu_bo_pin_restricted(bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- offset,
- offset + size);
- if (r)
- goto error_pin;
-
- if (bo_ptr)
- *bo_ptr = bo;
-
- amdgpu_bo_unreserve(bo);
- return r;
-
-error_pin:
- amdgpu_bo_unreserve(bo);
-error_reserve:
- amdgpu_bo_unref(&bo);
- return r;
-}
-
/* alloc/realloc bps array */
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
struct ras_err_handler_data *data, int pages)
{
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
- unsigned int align_space = ALIGN(new_space, 1024);
- void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
-
- if (!tmp)
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ struct amdgpu_bo **bps_bo =
+ kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
+
+ if (!bps || !bps_bo) {
+ kfree(bps);
+ kfree(bps_bo);
return -ENOMEM;
+ }
if (data->bps) {
- memcpy(tmp, data->bps,
+ memcpy(bps, data->bps,
data->count * sizeof(*data->bps));
kfree(data->bps);
}
+ if (data->bps_bo) {
+ memcpy(bps_bo, data->bps_bo,
+ data->count * sizeof(*data->bps_bo));
+ kfree(data->bps_bo);
+ }
- data->bps = tmp;
+ data->bps = bps;
+ data->bps_bo = bps_bo;
data->space_left += align_space - old_space;
return 0;
}
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages)
+ struct eeprom_table_record *bps, int pages)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = pages;
int ret = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
@@ -1329,24 +1407,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- while (i--)
- data->bps[data->count++].bp = bps[i];
-
+ memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
+ data->count += pages;
data->space_left -= pages;
+
out:
mutex_unlock(&con->recovery_lock);
return ret;
}
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ */
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_ras_eeprom_control *control;
+ int save_count;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ control = &con->eeprom_control;
+ data = con->eh_data;
+ save_count = data->count - control->num_recs;
+ /* only new entries are saved */
+ if (save_count > 0)
+ if (amdgpu_ras_eeprom_process_recods(control,
+ &data->bps[control->num_recs],
+ true,
+ save_count)) {
+ DRM_ERROR("Failed to save EEPROM table data!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras.ras->eeprom_control;
+ struct eeprom_table_record *bps = NULL;
+ int ret = 0;
+
+ /* no bad page record, skip eeprom access */
+ if (!control->num_recs)
+ return ret;
+
+ bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ if (amdgpu_ras_eeprom_process_recods(control, bps, false,
+ control->num_recs)) {
+ DRM_ERROR("Failed to load EEPROM table records!");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
+
+out:
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * check if an address belongs to bad page
+ *
+ * Note: this check is only for umc block
+ */
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i;
+ bool ret = false;
+
+ if (!con || !con->eh_data)
+ return ret;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ addr >>= AMDGPU_GPU_PAGE_SHIFT;
+ for (i = 0; i < data->count; i++)
+ if (addr == data->bps[i].retired_page) {
+ ret = true;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
/* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
uint64_t bp;
- struct amdgpu_bo *bo;
- int i;
+ struct amdgpu_bo *bo = NULL;
+ int i, ret = 0;
if (!con || !con->eh_data)
return 0;
@@ -1357,18 +1531,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
goto out;
/* reserve vram at driver post stage. */
for (i = data->last_reserved; i < data->count; i++) {
- bp = data->bps[i].bp;
+ bp = data->bps[i].retired_page;
- if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
- PAGE_SIZE, &bo))
- DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+ /* There are two cases of reserve error should be ignored:
+ * 1) a ras bad page has been allocated (used by someone);
+ * 2) a ras bad page has been reserved (duplicate error injection
+ * for one page);
+ */
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL))
+ DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i + 1;
+ bo = NULL;
}
+
+ /* continue to save bad pages to eeprom even reesrve_vram fails */
+ ret = amdgpu_ras_save_bad_pages(adev);
out:
mutex_unlock(&con->recovery_lock);
- return 0;
+ return ret;
}
/* called when driver unload */
@@ -1388,11 +1573,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
goto out;
for (i = data->last_reserved - 1; i >= 0; i--) {
- bo = data->bps[i].bo;
+ bo = data->bps_bo[i];
- amdgpu_ras_release_vram(adev, &bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i;
}
out:
@@ -1400,41 +1585,54 @@ out:
return 0;
}
-static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * write the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * read the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data **data = &con->eh_data;
+ struct ras_err_handler_data **data;
+ int ret;
- *data = kmalloc(sizeof(**data),
- GFP_KERNEL|__GFP_ZERO);
- if (!*data)
- return -ENOMEM;
+ if (con)
+ data = &con->eh_data;
+ else
+ return 0;
+
+ *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+ if (!*data) {
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_init(&con->recovery_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- amdgpu_ras_load_bad_pages(adev);
- amdgpu_ras_reserve_bad_pages(adev);
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
+ if (ret)
+ goto free;
+
+ if (con->eeprom_control.num_recs) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret)
+ goto free;
+ ret = amdgpu_ras_reserve_bad_pages(adev);
+ if (ret)
+ goto release;
+ }
return 0;
+
+release:
+ amdgpu_ras_release_bad_pages(adev);
+free:
+ kfree((*data)->bps);
+ kfree((*data)->bps_bo);
+ kfree(*data);
+ con->eh_data = NULL;
+out:
+ DRM_WARN("Failed to initialize ras recovery!\n");
+
+ return ret;
}
static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
@@ -1442,13 +1640,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data = con->eh_data;
+ /* recovery_init failed to init it, fini is useless */
+ if (!data)
+ return 0;
+
cancel_work_sync(&con->recovery_work);
- amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_release_bad_pages(adev);
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
+ kfree(data->bps_bo);
kfree(data);
mutex_unlock(&con->recovery_lock);
@@ -1500,6 +1702,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int r;
if (con)
return 0;
@@ -1527,31 +1730,106 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* Might need get this flag from vbios. */
con->flags = RAS_DEFAULT_FLAGS;
- if (amdgpu_ras_recovery_init(adev))
- goto recovery_out;
+ if (adev->nbio.funcs->init_ras_controller_interrupt) {
+ r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+ if (r)
+ return r;
+ }
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- /* ras init for each ras block */
- if (adev->umc.funcs->ras_init)
- adev->umc.funcs->ras_init(adev);
-
DRM_INFO("RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
fs_out:
- amdgpu_ras_recovery_fini(adev);
-recovery_out:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
return -EINVAL;
}
+/* helper function to handle common stuff in ip late init phase */
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info)
+{
+ int r;
+
+ /* disable RAS feature per IP block if it is not supported */
+ if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ return 0;
+ }
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request gpu reset. will run again */
+ amdgpu_ras_request_reset_on_boot(adev,
+ ras_block->block);
+ return 0;
+ } else if (adev->in_suspend || adev->in_gpu_reset) {
+ /* in resume phase, if fail to enable ras,
+ * clean up all ras fs nodes, and disable ras */
+ goto cleanup;
+ } else
+ return r;
+ }
+
+ /* in resume phase, no need to create ras fs node */
+ if (adev->in_suspend || adev->in_gpu_reset)
+ return 0;
+
+ if (ih_info->cb) {
+ r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
+ if (r)
+ goto interrupt;
+ }
+
+ amdgpu_ras_debugfs_create(adev, fs_info);
+
+ r = amdgpu_ras_sysfs_create(adev, fs_info);
+ if (r)
+ goto sysfs;
+
+ return 0;
+cleanup:
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+ return r;
+}
+
+/* helper function to remove ras fs node and interrupt handler */
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info)
+{
+ if (!ras_block || !ih_info)
+ return;
+
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+}
+
/* do some init work after IP late init as dependence.
* and it runs in resume/gpu reset/booting up cases.
*/
@@ -1645,3 +1923,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
return 0;
}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+{
+ uint32_t hw_supported, supported;
+
+ amdgpu_ras_check_supported(adev, &hw_supported, &supported);
+ if (!hw_supported)
+ return;
+
+ if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+
+ amdgpu_ras_reset_gpu(adev, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6c76bb2a6843..f80fd3428c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -317,8 +317,6 @@ struct amdgpu_ras {
struct list_head head;
/* debugfs */
struct dentry *dir;
- /* debugfs ctrl */
- struct dentry *ent;
/* sysfs */
struct device_attribute features_attr;
struct bin_attribute badpages_attr;
@@ -334,7 +332,7 @@ struct amdgpu_ras {
struct mutex recovery_lock;
uint32_t flags;
-
+ bool reboot;
struct amdgpu_ras_eeprom_control eeprom_control;
};
@@ -347,15 +345,14 @@ struct ras_err_data {
unsigned long ue_count;
unsigned long ce_count;
unsigned long err_addr_cnt;
- uint64_t *err_addr;
+ struct eeprom_table_record *err_addr;
};
struct ras_err_handler_data {
- /* point to bad pages array */
- struct {
- unsigned long bp;
- struct amdgpu_bo *bo;
- } *bps;
+ /* point to bad page records array */
+ struct eeprom_table_record *bps;
+ /* point to reserved bo array */
+ struct amdgpu_bo **bps_bo;
/* the count of entries */
int count;
/* the space can place new entries */
@@ -365,7 +362,7 @@ struct ras_err_handler_data {
};
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
struct ras_ih_data {
@@ -481,6 +478,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
return ras && (ras->supported & (1 << block));
}
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
unsigned int block);
@@ -492,7 +490,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages);
+ struct eeprom_table_record *bps, int pages);
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
@@ -501,6 +499,12 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ /* save bad page to eeprom before gpu reset,
+ * i2c may be unstable in gpu reset
+ */
+ if (in_task())
+ amdgpu_ras_reserve_bad_pages(adev);
+
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
schedule_work(&ras->recovery_work);
return 0;
@@ -566,6 +570,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
int amdgpu_ras_init(struct amdgpu_device *adev);
int amdgpu_ras_fini(struct amdgpu_device *adev);
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info);
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info);
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
@@ -599,4 +610,14 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info);
+
+extern atomic_t amdgpu_ras_in_intr;
+
+static inline bool amdgpu_ras_intr_triggered(void)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 8a32b5c93778..7de16c0c2f20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -100,7 +100,101 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
return ret;
}
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control);
+
+
+static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
+{
+ int i;
+ uint32_t tbl_sum = 0;
+
+ /* Header checksum, skip checksum field in the calculation */
+ for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
+ tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
+
+ return tbl_sum;
+}
+
+static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
+ int num)
+{
+ int i, j;
+ uint32_t tbl_sum = 0;
+
+ /* Records checksum */
+ for (i = 0; i < num; i++) {
+ struct eeprom_table_record *record = &records[i];
+
+ for (j = 0; j < sizeof(*record); j++) {
+ tbl_sum += *(((unsigned char *)record) + j);
+ }
+ }
+
+ return tbl_sum;
+}
+
+static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
+}
+
+/* Checksum = 256 -((sum of all table entries) mod 256) */
+static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num,
+ uint32_t old_hdr_byte_sum)
+{
+ /*
+ * This will update the table sum with new records.
+ *
+ * TODO: What happens when the EEPROM table is to be wrapped around
+ * and old records from start will get overridden.
+ */
+
+ /* need to recalculate updated header byte sum */
+ control->tbl_byte_sum -= old_hdr_byte_sum;
+ control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
+
+ control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
+}
+
+/* table sum mod 256 + checksum must equals 256 */
+static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
+
+ if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
+ DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
+ return false;
+ }
+
+ return true;
+}
+
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
+{
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ int ret = 0;
+
+ mutex_lock(&control->tbl_mutex);
+
+ hdr->header = EEPROM_TABLE_HDR_VAL;
+ hdr->version = EEPROM_TABLE_VER;
+ hdr->first_rec_offset = EEPROM_RECORD_START;
+ hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
+
+ control->tbl_byte_sum = 0;
+ __update_tbl_checksum(control, NULL, 0, 0);
+ control->next_addr = EEPROM_RECORD_START;
+
+ ret = __update_table_header(control, buff);
+
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret;
+
+}
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
@@ -122,6 +216,10 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
break;
+ case CHIP_ARCTURUS:
+ ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
+ break;
+
default:
return 0;
}
@@ -143,25 +241,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
EEPROM_TABLE_RECORD_SIZE;
+ control->tbl_byte_sum = __calc_hdr_byte_sum(control);
+ control->next_addr = EEPROM_RECORD_START;
+
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->num_recs);
} else {
DRM_INFO("Creating new EEPROM table");
- hdr->header = EEPROM_TABLE_HDR_VAL;
- hdr->version = EEPROM_TABLE_VER;
- hdr->first_rec_offset = EEPROM_RECORD_START;
- hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
-
- adev->psp.ras.ras->eeprom_control.tbl_byte_sum =
- __calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control);
- ret = __update_table_header(control, buff);
+ ret = amdgpu_ras_eeprom_reset_table(control);
}
- /* Start inserting records from here */
- adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START;
-
return ret == 1 ? 0 : -EIO;
}
@@ -173,6 +264,9 @@ void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
case CHIP_VEGA20:
smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
break;
+ case CHIP_ARCTURUS:
+ smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
+ break;
default:
return;
@@ -226,8 +320,8 @@ static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *co
record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
i += 6;
- buff[i++] = record->mem_channel;
- buff[i++] = record->mcumc_id;
+ record->mem_channel = buff[i++];
+ record->mcumc_id = buff[i++];
memcpy(&tmp, buff + i, 6);
record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
@@ -266,87 +360,18 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
-
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
-{
- int i;
- uint32_t tbl_sum = 0;
-
- /* Header checksum, skip checksum field in the calculation */
- for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
- tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
-
- return tbl_sum;
-}
-
-static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
- int num)
-{
- int i, j;
- uint32_t tbl_sum = 0;
-
- /* Records checksum */
- for (i = 0; i < num; i++) {
- struct eeprom_table_record *record = &records[i];
-
- for (j = 0; j < sizeof(*record); j++) {
- tbl_sum += *(((unsigned char *)record) + j);
- }
- }
-
- return tbl_sum;
-}
-
-static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
-}
-
-/* Checksum = 256 -((sum of all table entries) mod 256) */
-static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num,
- uint32_t old_hdr_byte_sum)
-{
- /*
- * This will update the table sum with new records.
- *
- * TODO: What happens when the EEPROM table is to be wrapped around
- * and old records from start will get overridden.
- */
-
- /* need to recalculate updated header byte sum */
- control->tbl_byte_sum -= old_hdr_byte_sum;
- control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
-
- control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
-}
-
-/* table sum mod 256 + checksum must equals 256 */
-static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
-
- if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
- DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
- return false;
- }
-
- return true;
-}
-
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
int num)
{
int i, ret = 0;
- struct i2c_msg *msgs;
- unsigned char *buffs;
+ struct i2c_msg *msgs, *msg;
+ unsigned char *buffs, *buff;
+ struct eeprom_table_record *record;
struct amdgpu_device *adev = to_amdgpu_device(control);
- if (adev->asic_type != CHIP_VEGA20)
+ if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
return 0;
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
@@ -373,9 +398,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
* 256b
*/
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
- struct i2c_msg *msg = &msgs[i];
+ buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
+ msg = &msgs[i];
control->next_addr = __correct_eeprom_dest_address(control->next_addr);
@@ -415,8 +440,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
if (!write) {
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
+ buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
__decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 41f3fcb9a29b..622269957c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -79,6 +79,7 @@ struct eeprom_table_record {
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 5c13c503e61f..6010999d9020 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
return csa_mc_addr;
}
+
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info)
+{
+ int r, i;
+ struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+
+ if (!ih_info)
+ return -EINVAL;
+
+ if (!adev->sdma.ras_if) {
+ adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->sdma.ras_if)
+ return -ENOMEM;
+ adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
+ adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if->sub_block_index = 0;
+ strcpy(adev->sdma.ras_if->name, "sdma");
+ }
+ fs_info.head = ih_info->head = *adev->sdma.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
+ &fs_info, ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (r)
+ goto late_fini;
+ }
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
+free:
+ kfree(adev->sdma.ras_if);
+ adev->sdma.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ /* the cb member will not be used by
+ * amdgpu_ras_interrupt_remove_handler, init it only
+ * to cheat the check in ras_late_fini
+ */
+ .cb = amdgpu_sdma_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index a9ae0d8a0589..761ff8be6314 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -104,4 +104,13 @@ struct amdgpu_sdma_instance *
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info);
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b66d29d5ffa2..b158230af8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 77674a7b9616..63e734a125fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(unsigned int, context)
__field(unsigned int, seqno)
__field(struct dma_fence *, fence)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
TRACE_EVENT(amdgpu_sched_run_job,
@@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__field(unsigned int, context)
__field(unsigned int, seqno)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint64_t flags),
- TP_ARGS(pe, addr, count, incr, flags),
+ uint32_t incr, uint64_t flags, bool direct),
+ TP_ARGS(pe, addr, count, incr, flags, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
__field(u64, flags)
+ __field(bool, direct)
),
TP_fast_assign(
@@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->count = count;
__entry->incr = incr;
__entry->flags = flags;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
- __entry->pe, __entry->addr, __entry->incr,
- __entry->flags, __entry->count)
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
+ "direct=%d", __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count, __entry->direct)
);
TRACE_EVENT(amdgpu_vm_copy_ptes,
- TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
- TP_ARGS(pe, src, count),
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct),
+ TP_ARGS(pe, src, count, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, src)
__field(u32, count)
+ __field(bool, direct)
),
TP_fast_assign(
__entry->pe = pe;
__entry->src = src;
__entry->count = count;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, src=%010Lx, count=%u",
- __entry->pe, __entry->src, __entry->count)
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d",
+ __entry->pe, __entry->src, __entry->count,
+ __entry->direct)
);
TRACE_EVENT(amdgpu_vm_flush,
@@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __field(const char *,name)
+ __string(ring, sched_job->base.sched->name)
__field(uint64_t, id)
__field(struct dma_fence *, fence)
__field(uint64_t, ctx)
@@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __entry->name = sched_job->base.sched->name;
+ __assign_str(ring, sched_job->base.sched->name)
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __entry->name, __entry->id,
+ __get_str(ring), __entry->id,
__entry->fence, __entry->ctx,
__entry->seqno)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dff41d0a85fe..61d9b7774d42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
+#include <linux/dma-buf.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -54,6 +55,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
@@ -484,15 +486,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* create space/pages for new_mem in GTT space */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -543,15 +542,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* make space in GTT for old_mem buffer */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -763,6 +759,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
*/
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
+ struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
struct task_struct *usertask;
@@ -1217,16 +1214,14 @@ static struct ttm_backend_func amdgpu_backend_func = {
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_ttm_adev(bo->bdev);
-
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
+ gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
@@ -1247,7 +1242,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt && gtt->userptr) {
@@ -1260,7 +1254,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return 0;
}
- if (slave && ttm->sg) {
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
@@ -1287,9 +1293,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ struct amdgpu_device *adev;
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@@ -1298,7 +1303,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
}
- if (slave)
+ if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ return;
+ }
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
adev = amdgpu_ttm_adev(ttm->bdev);
@@ -1634,81 +1648,105 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- u64 vram_size = adev->gmc.visible_vram_size;
- u64 offset = adev->fw_vram_usage.start_offset;
- u64 size = adev->fw_vram_usage.size;
- struct amdgpu_bo *bo;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = adev->fw_vram_usage.size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
+ uint64_t vram_size = adev->gmc.visible_vram_size;
+
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
+ if (adev->fw_vram_usage.size == 0 ||
+ adev->fw_vram_usage.size > vram_size)
+ return 0;
- r = amdgpu_bo_create(adev, &bp,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->fw_vram_usage.start_offset,
+ adev->fw_vram_usage.size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+}
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
+/*
+ * Memoy training reservation functions
+ */
- /* remove the original mem node and create a new one at the
- * request position
- */
- bo = adev->fw_vram_usage.reserved_bo;
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
+/**
+ * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free memory training reserved vram if it has been reserved.
+ */
+static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
- &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
+ ctx->c2p_bo = NULL;
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size));
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
+ amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
+ ctx->p2c_bo = NULL;
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from memory training.
+ */
+static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+
+ memset(ctx, 0, sizeof(*ctx));
+ if (!adev->fw_vram_usage.mem_train_support) {
+ DRM_DEBUG("memory training does not support!\n");
+ return 0;
}
- return r;
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
+ ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
+ ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
+ ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->p2c_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->p2c_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->c2p_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
+ return 0;
+
+Err_out:
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
}
+
/**
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1731,6 +1769,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
+ adev->ddev->vma_offset_manager,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1771,6 +1810,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /*
+ *The reserved vram for memory training must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
@@ -1781,6 +1828,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
NULL, &stolen_vga_buf);
if (r)
return r;
+
+ /*
+ * reserve one TMR (64K) memory at the top of VRAM which holds
+ * IP Discovery data and is protected by PSP.
+ */
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
+ DISCOVERY_TMR_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1856,7 +1917,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_debugfs_fini(adev);
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ /* return the IP Discovery TMR memory back to VRAM */
+ amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
+
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
@@ -1952,10 +2017,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- num_dw = adev->mman.buffer_funcs->copy_num_dw;
- while (num_dw & 0x7)
- num_dw++;
-
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
@@ -2015,11 +2077,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
- num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
-
- /* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3a6115ad0196..833fc4b68940 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_RAVEN:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -368,8 +369,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
- case CHIP_ARCTURUS:
- return AMDGPU_FW_LOAD_DIRECT;
default:
DRM_ERROR("Unknown firmware load type\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index b34f00d42049..410587b950f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -108,6 +108,12 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_offset_bytes;
uint32_t ta_ras_size_bytes;
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_offset_bytes;
+ uint32_t ta_hdcp_size_bytes;
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_offset_bytes;
+ uint32_t ta_dtm_size_bytes;
};
/* version_major=1, version_minor=0 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
new file mode 100644
index 000000000000..d4fb9cf27e21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras.h"
+
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ if (!adev->umc.ras_if) {
+ adev->umc.ras_if =
+ kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->umc.ras_if)
+ return -ENOMEM;
+ adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
+ adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if->sub_block_index = 0;
+ strcpy(adev->umc.ras_if->name, "umc");
+ }
+ ih_info.head = fs_info.head = *adev->umc.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ /* ras init of specific umc version */
+ if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
+ adev->umc.funcs->err_cnt_init(adev);
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
+free:
+ kfree(adev->umc.ras_if);
+ adev->umc.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->umc.ras_if) {
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return AMDGPU_RAS_SUCCESS;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ DRM_WARN("Failed to alloc memory for umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count) {
+ if (err_data->err_addr_cnt &&
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt))
+ DRM_WARN("Failed to add ras bad page!\n");
+
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+
+ kfree(err_data->err_addr);
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 975afa04df09..3283032a78e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,7 +54,8 @@
adev->umc.funcs->disable_umc_index_mode(adev);
struct amdgpu_umc_funcs {
- void (*ras_init)(struct amdgpu_device *adev);
+ void (*err_cnt_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
@@ -62,6 +63,7 @@ struct amdgpu_umc_funcs {
void (*enable_umc_index_mode)(struct amdgpu_device *adev,
uint32_t umc_instance);
void (*disable_umc_index_mode)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
};
struct amdgpu_umc {
@@ -75,8 +77,17 @@ struct amdgpu_umc {
uint32_t channel_offs;
/* channel index table of interleaved memory */
const uint32_t *channel_idx_tbl;
+ struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
};
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2c364b8695f..e324bfe6c58f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -39,6 +39,8 @@
#include "cikd.h"
#include "uvd/uvd_4_2_d.h"
+#include "amdgpu_ras.h"
+
/* 1 second timeout */
#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -297,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
@@ -372,7 +375,13 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
- memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ /* re-write 0 since err_event_athub will corrupt VCPU buffer */
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+ memset(adev->uvd.inst[j].saved_bo, 0, size);
+ } else {
+ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 65044b1b3d4c..46b590af2fd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence);
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence);
/**
* amdgpu_vce_init - allocate memory, load vce firmware
@@ -211,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
@@ -428,9 +434,9 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
*
* Open up a stream for HW test
*/
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
@@ -508,8 +514,8 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence)
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index e802f7d9db0a..d6d83a3ec803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -58,11 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence);
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3199e4a5ff12..9d870444d7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
&adev->vcn.dpg_sram_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5251352f5922..598c24505c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -130,7 +130,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
+ >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -341,7 +342,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
}
-/**
+/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
*/
struct amdgpu_vm_pt_cursor {
@@ -482,6 +483,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
*
* @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
+ * @start: optional cursor to start with
* @cursor: state to initialize
*
* Starts a deep first traversal of the PD/PT tree.
@@ -535,7 +537,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
amdgpu_vm_pt_ancestor(cursor);
}
-/**
+/*
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
@@ -566,6 +568,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
}
+/**
+ * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
+ *
+ * @bo: BO which was removed from the LRU
+ *
+ * Make sure the bulk_moveable flag is updated when a BO is removed from the
+ * LRU.
+ */
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_bo *abo;
@@ -600,19 +610,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -624,7 +633,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&vm->lru_bulk_move);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
vm->bulk_moveable = true;
}
@@ -693,6 +702,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
+ * @direct: use a direct update
*
* Root PD needs to be reserved when calling this.
*
@@ -701,7 +711,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo,
+ bool direct)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -760,6 +771,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
@@ -813,10 +825,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requesting vm
+ * @level: the page table level
+ * @direct: use a direct update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, struct amdgpu_bo_param *bp)
+ int level, bool direct,
+ struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -831,6 +846,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
+ bp->no_wait_gpu = direct;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
@@ -841,6 +857,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
+ * @direct: use a direct update
*
* Make sure a specific page table or directory is allocated.
*
@@ -850,7 +867,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+ struct amdgpu_vm_pt_cursor *cursor,
+ bool direct)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -871,7 +889,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -883,7 +901,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
if (r)
goto error_free_pt;
@@ -1020,7 +1038,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -1034,10 +1053,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush;
- bool pasid_mapping_needed = id->pasid != job->pasid ||
- !id->pasid_mapping ||
- !dma_fence_is_signaled(id->pasid_mapping);
struct dma_fence *fence = NULL;
+ bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
int r;
@@ -1047,6 +1064,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
pasid_mapping_needed = true;
}
+ mutex_lock(&id_mgr->lock);
+ if (id->pasid != job->pasid || !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping))
+ pasid_mapping_needed = true;
+ mutex_unlock(&id_mgr->lock);
+
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
@@ -1086,9 +1109,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
}
if (pasid_mapping_needed) {
+ mutex_lock(&id_mgr->lock);
id->pasid = job->pasid;
dma_fence_put(id->pasid_mapping);
id->pasid_mapping = dma_fence_get(fence);
+ mutex_unlock(&id_mgr->lock);
}
dma_fence_put(fence);
@@ -1172,10 +1197,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/*
+/**
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @param: parameters for the update
+ * @params: parameters for the update
* @vm: requested vm
* @entry: entry to update
*
@@ -1199,7 +1224,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
-/*
+/**
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
*
* @adev: amdgpu_device pointer
@@ -1218,19 +1243,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
amdgpu_vm_bo_relocated(&entry->base);
}
-/*
- * amdgpu_vm_update_directories - make sure that all directories are valid
+/**
+ * amdgpu_vm_update_pdes - make sure that all directories are valid
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @direct: submit directly to the paging queue
*
* Makes sure all directories are up to date.
*
* Returns:
* 0 for success, error for failure.
*/
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1241,6 +1267,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
if (r)
@@ -1268,7 +1295,7 @@ error:
return r;
}
-/**
+/*
* amdgpu_vm_update_flags - figure out flags for PTE updates
*
* Make sure to set the right flags for the PTEs at the desired level.
@@ -1391,7 +1418,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ /* make sure that the page tables covering the address range are
+ * actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
+ params->direct);
if (r)
return r;
@@ -1463,7 +1494,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
} while (frag_start < entry_end);
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Free all child entries */
+ /* Free all child entries.
+ * Update the tables with the flags and addresses and free up subsequent
+ * tables in the case of huge pages or freed up areas.
+ * This is the maximum you can free, because all other page tables are not
+ * completely covered by the range and so potentially still in use.
+ */
while (cursor.pfn < frag_start) {
amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
@@ -1482,13 +1518,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
+ * @direct: direct submission in a page fault
+ * @exclusive: fence we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
* @addr: addr to set the area to
+ * @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
@@ -1497,11 +1534,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct,
struct dma_fence *exclusive,
- dma_addr_t *pages_addr,
- struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
+ dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
@@ -1511,6 +1548,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
@@ -1569,27 +1607,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE;
- flags &= ~AMDGPU_PTE_EXECUTABLE;
- flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
-
- if (adev->asic_type >= CHIP_NAVI10) {
- flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
- } else {
- flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
- }
-
- if ((mapping->flags & AMDGPU_PTE_PRT) &&
- (adev->asic_type >= CHIP_VEGA10)) {
- flags |= AMDGPU_PTE_PRT;
- if (adev->asic_type >= CHIP_NAVI10) {
- flags |= AMDGPU_PTE_SNOOPED;
- flags |= AMDGPU_PTE_LOG;
- flags |= AMDGPU_PTE_SYSTEM;
- }
- flags &= ~AMDGPU_PTE_VALID;
- }
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1633,7 +1652,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = count *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1642,9 +1662,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
start, last, flags, addr,
- fence);
+ dma_addr, fence);
if (r)
return r;
@@ -1672,8 +1692,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
+int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1700,7 +1719,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = dma_resv_get_excl(bo->tbo.base.resv);
+ exclusive = bo->tbo.moving;
}
if (bo) {
@@ -1731,12 +1750,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
return r;
}
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- }
-
/* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the
* next command submission.
@@ -1744,7 +1757,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
@@ -1938,9 +1952,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
mapping->start, mapping->last,
- init_pte_value, 0, &f);
+ init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -2682,12 +2696,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
- /* create scheduler entity for page table updates */
- r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
+ /* create scheduler entities for page table updates */
+ r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
return r;
+ r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
+ adev->vm_manager.vm_pte_num_rqs, NULL);
+ if (r)
+ goto error_free_direct;
+
vm->pte_support_ats = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
@@ -2702,7 +2721,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2711,12 +2731,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
- amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_create(adev, &bp, &root);
if (r)
- goto error_free_sched_entity;
+ goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
if (r)
@@ -2728,7 +2748,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root);
+ r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
goto error_unreserve;
@@ -2759,8 +2779,11 @@ error_free_root:
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
-error_free_sched_entity:
- drm_sched_entity_destroy(&vm->entity);
+error_free_delayed:
+ drm_sched_entity_destroy(&vm->delayed);
+
+error_free_direct:
+ drm_sched_entity_destroy(&vm->direct);
return r;
}
@@ -2801,6 +2824,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @pasid: pasid to use
*
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
@@ -2816,7 +2840,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
* Returns:
* 0 for success, -errno for errors.
*/
-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned int pasid)
{
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
@@ -2848,7 +2873,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
if (r)
goto free_idr;
}
@@ -2858,7 +2883,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2937,19 +2963,38 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- int i, r;
+ int i;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ amdgpu_bo_reserve(root, true);
if (vm->pasid) {
unsigned long flags;
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ vm->pasid = 0;
+ }
+
+ list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
+ if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+ amdgpu_vm_prt_fini(adev, vm);
+ prt_fini_needed = false;
+ }
+
+ list_del(&mapping->list);
+ amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- drm_sched_entity_destroy(&vm->entity);
+ amdgpu_vm_free_pts(adev, vm, NULL);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.base.bo);
+
+ drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2962,26 +3007,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
list_del(&mapping->list);
kfree(mapping);
}
- list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
- amdgpu_vm_prt_fini(adev, vm);
- prt_fini_needed = false;
- }
-
- list_del(&mapping->list);
- amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
- }
- root = amdgpu_bo_ref(vm->root.base.bo);
- r = amdgpu_bo_reserve(root, true);
- if (r) {
- dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
- } else {
- amdgpu_vm_free_pts(adev, vm, NULL);
- amdgpu_bo_unreserve(root);
- }
- amdgpu_bo_unref(&root);
- WARN_ON(vm->root.base.bo);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3065,8 +3091,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
- /* current, we only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
+ /* We only have requirement to reserve vmid from gfxhub */
+ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
+ AMDGPU_GFXHUB_0);
if (r)
return r;
break;
@@ -3109,13 +3136,88 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
*/
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
{
- if (!vm->task_info.pid) {
- vm->task_info.pid = current->pid;
- get_task_comm(vm->task_info.task_name, current);
+ if (vm->task_info.pid)
+ return;
- if (current->group_leader->mm == current->mm) {
- vm->task_info.tgid = current->group_leader->pid;
- get_task_comm(vm->task_info.process_name, current->group_leader);
- }
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm != current->mm)
+ return;
+
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+}
+
+/**
+ * amdgpu_vm_handle_fault - graceful handling of VM faults.
+ * @adev: amdgpu device pointer
+ * @pasid: PASID of the VM
+ * @addr: Address of the fault
+ *
+ * Try to gracefully handle a VM fault. Return true if the fault was handled and
+ * shouldn't be reported any more.
+ */
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr)
+{
+ struct amdgpu_bo *root;
+ uint64_t value, flags;
+ struct amdgpu_vm *vm;
+ long r;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ else
+ root = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+
+ if (!root)
+ return false;
+
+ r = amdgpu_bo_reserve(root, true);
+ if (r)
+ goto error_unref;
+
+ /* Double check that the VM still exists */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm && vm->root.base.bo != root)
+ vm = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ goto error_unlock;
+
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+ flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
+ AMDGPU_PTE_SYSTEM;
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+ /* Redirect the access to the dummy page */
+ value = adev->dummy_page_addr;
+ flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
+ AMDGPU_PTE_WRITEABLE;
+ } else {
+ /* Let the hw retry silently on the PTE */
+ value = 0;
}
+
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
+ flags, value, NULL, NULL);
+ if (r)
+ goto error_unlock;
+
+ r = amdgpu_vm_update_pdes(adev, vm, true);
+
+error_unlock:
+ amdgpu_bo_unreserve(root);
+ if (r < 0)
+ DRM_ERROR("Can't handle page fault (%ld)\n", r);
+
+error_unref:
+ amdgpu_bo_unref(&root);
+
+ return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2eda3a8c330d..4dbbe1b6b413 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -99,6 +99,9 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* Reserve 4MB VRAM for page tables */
+#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
+
/* max number of VMHUB */
#define AMDGPU_MAX_VMHUBS 3
#define AMDGPU_GFXHUB_0 0
@@ -199,6 +202,11 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
+ * @direct: if changes should be made directly
+ */
+ bool direct;
+
+ /**
* @pages_addr:
*
* DMA addresses to use for mapping
@@ -254,8 +262,9 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root;
struct dma_fence *last_update;
- /* Scheduler entity for page table updates */
- struct drm_sched_entity entity;
+ /* Scheduler entities for page table updates */
+ struct drm_sched_entity direct;
+ struct drm_sched_entity delayed;
unsigned int pasid;
/* dedicated to vm */
@@ -357,8 +366,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
@@ -404,6 +413,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info);
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 5222d165abfc..73fec7a0ced5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
{
int r;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
@@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
return r;
}
- return 0;
+ /* Don't wait for submissions during page fault */
+ if (p->direct)
+ return 0;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
}
/**
@@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 61fc584cbb1a..832db59f441e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
+ p->num_dw_left = ndw;
+
+ /* Wait for moves to be completed */
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
if (r)
return r;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
- if (r)
- return r;
+ /* Don't wait for any submissions during page fault handling */
+ if (p->direct)
+ return 0;
- p->num_dw_left = ndw;
- return 0;
+ return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
+ owner, false);
}
/**
@@ -95,22 +97,23 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
struct dma_fence *f;
int r;
- ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+ entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, &p->vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;
amdgpu_bo_fence(root, f, true);
- if (fence)
+ if (fence && !p->direct)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -120,7 +123,6 @@ error:
return r;
}
-
/**
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
*
@@ -141,7 +143,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -168,7 +170,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3a9d8c15fe9f..82a3299e53c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,9 @@
*/
#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_atomfirmware.h"
+#include "atom.h"
struct amdgpu_vram_mgr {
struct drm_mm mm;
@@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
}
+static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ switch (adev->gmc.vram_vendor) {
+ case SAMSUNG:
+ return snprintf(buf, PAGE_SIZE, "samsung\n");
+ case INFINEON:
+ return snprintf(buf, PAGE_SIZE, "infineon\n");
+ case ELPIDA:
+ return snprintf(buf, PAGE_SIZE, "elpida\n");
+ case ETRON:
+ return snprintf(buf, PAGE_SIZE, "etron\n");
+ case NANYA:
+ return snprintf(buf, PAGE_SIZE, "nanya\n");
+ case HYNIX:
+ return snprintf(buf, PAGE_SIZE, "hynix\n");
+ case MOSEL:
+ return snprintf(buf, PAGE_SIZE, "mosel\n");
+ case WINBOND:
+ return snprintf(buf, PAGE_SIZE, "winbond\n");
+ case ESMT:
+ return snprintf(buf, PAGE_SIZE, "esmt\n");
+ case MICRON:
+ return snprintf(buf, PAGE_SIZE, "micron\n");
+ default:
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+ }
+}
+
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
amdgpu_mem_info_vram_total_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
@@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
amdgpu_mem_info_vram_used_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
amdgpu_mem_info_vis_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
+ amdgpu_mem_info_vram_vendor, NULL);
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
@@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
return ret;
}
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
+ return ret;
+ }
return 0;
}
@@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
return 0;
}
@@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
- uint64_t vis_usage = 0, mem_bytes;
+ uint64_t vis_usage = 0, mem_bytes, max_bytes;
unsigned i;
int r;
@@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
+ max_bytes = adev->gmc.mc_vram_size;
+ if (tbo->type != ttm_bo_type_kernel)
+ max_bytes -= AMDGPU_VM_RESERVED_VRAM;
+
/* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
- if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
+ if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage);
mem->mm_node = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 65aae75f80fd..61d13d8b7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
+#include "amdgpu_ras.h"
#include "df/df_3_6_offset.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -273,22 +274,55 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_device *tmp_adev;
+ bool update_hive_pstate = true;
+ bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
if (!hive)
return 0;
- if (hive->pstate == pstate)
- return 0;
+ mutex_lock(&hive->hive_lock);
+
+ if (hive->pstate == pstate) {
+ adev->pstate = is_high_pstate ? pstate : adev->pstate;
+ goto out;
+ }
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
if (is_support_sw_smu_xgmi(adev))
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- if (ret)
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_xgmi_pstate)
+ ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+ pstate);
+
+ if (ret) {
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
+ goto out;
+ }
+
+ /* Update device pstate */
+ adev->pstate = pstate;
+
+ /*
+ * Update the hive pstate only all devices of the hive
+ * are in the same pstate
+ */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ if (tmp_adev->pstate != adev->pstate) {
+ update_hive_pstate = false;
+ break;
+ }
+ }
+ if (update_hive_pstate || is_high_pstate)
+ hive->pstate = pstate;
+
+out:
+ mutex_unlock(&hive->hive_lock);
return ret;
}
@@ -363,6 +397,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
+ /* Set default device pstate */
+ adev->pstate = -1;
+
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -437,3 +474,52 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_unlock(&hive->hive_lock);
}
}
+
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "xgmi_wafl_err_count",
+ .debugfs_name = "xgmi_wafl_err_inject",
+ };
+
+ if (!adev->gmc.xgmi.supported ||
+ adev->gmc.xgmi.num_physical_nodes == 0)
+ return 0;
+
+ if (!adev->gmc.xgmi.ras_if) {
+ adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gmc.xgmi.ras_if)
+ return -ENOMEM;
+ adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if->sub_block_index = 0;
+ strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
+ }
+ ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
+ kfree(adev->gmc.xgmi.ras_if);
+ adev->gmc.xgmi.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
+ adev->gmc.xgmi.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index fbcee31788c4..bbf504ff7051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -42,6 +42,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
index 4853899b1824..fda99c958c3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "arct_ip_offset.h"
int arct_reg_base_init(struct amdgpu_device *adev)
@@ -52,6 +51,8 @@ int arct_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index b81bb414fcb3..2d64d270725d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -966,6 +966,25 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmGRBM_STATUS_SE2},
+ {mmGRBM_STATUS_SE3},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
+ {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
+ {mmCP_CPF_BUSY_STAT},
+ {mmCP_CPF_STALLED_STAT1},
+ {mmCP_CPF_STATUS},
+ {mmCP_CPC_BUSY_STAT},
+ {mmCP_CPC_STALLED_STAT1},
+ {mmCP_CPC_STATUS},
{mmGB_ADDR_CONFIG},
{mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
@@ -1270,15 +1289,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
}
/**
- * cik_asic_reset - soft reset GPU
+ * cik_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int cik_asic_reset(struct amdgpu_device *adev)
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -1294,7 +1313,45 @@ static int cik_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
cik_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ /* disable baco reset until it works */
+ /* smu7_asic_get_baco_capability(adev, &baco_reset); */
+ baco_reset = false;
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int cik_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = cik_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 54c625a2e570..9870bf27870e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 645550e7caf5..40d2ac723dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -330,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -368,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -382,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -397,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1219,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1230,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1261,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1273,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1313,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1339,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1352,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d9f470632b2c..898ef72d423c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -348,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -385,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -399,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -413,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1245,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1256,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1287,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1299,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1339,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1365,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1378,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 3eb2e7429269..db15a112becc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -281,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -309,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
-
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -324,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -338,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1124,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int interlace = 0;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1164,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u8 *sadb = NULL;
int sad_count;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1221,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1244,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1257,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 tmp = 0;
@@ -1632,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
int bpc = 8;
@@ -1639,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a16c5e9e610e..f06c9022c1fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -275,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -303,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -317,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -331,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1157,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp = 0, offset;
@@ -1169,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1214,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 offset, tmp;
u8 *sadb = NULL;
@@ -1228,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1263,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1292,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1305,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c9608ae8643b..e4f94863332c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -260,15 +260,14 @@ static struct drm_encoder *
dce_virtual_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index 844c03868248..d6221298b477 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -33,6 +33,10 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev)
{
}
+static void df_v1_7_sw_fini(struct amdgpu_device *adev)
+{
+}
+
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -111,6 +115,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v1_7_funcs = {
.sw_init = df_v1_7_sw_init,
+ .sw_fini = df_v1_7_sw_fini,
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 5850c8e34caa..16fbd2bc8ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
unsigned long flags, address, data;
uint32_t ficadl_val, ficadh_val;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -220,6 +220,13 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
adev->df_perfmon_config_assign_mask[i] = 0;
}
+static void df_v3_6_sw_fini(struct amdgpu_device *adev)
+{
+
+ device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
+
+}
+
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -537,6 +544,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
+ .sw_fini = df_v3_6_sw_fini,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 53090eae0082..ca5f0e7ea1ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -127,7 +127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
};
static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
@@ -171,7 +171,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
};
static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
@@ -1469,7 +1469,7 @@ static int gfx_v10_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v10_0_pfp_fini(adev);
@@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
-static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
+static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
{
+ int r;
+
+ if (adev->in_gpu_reset) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (!r) {
+ adev->gfx.rlc.funcs->get_csb_buffer(adev,
+ adev->gfx.rlc.cs_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ }
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ if (r)
+ return r;
+ }
+
/* csib */
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+
+ return 0;
}
-static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
+static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
{
int i;
+ int r;
- gfx_v10_0_init_csb(adev);
+ r = gfx_v10_0_init_csb(adev);
+ if (r)
+ return r;
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
/* TODO: init power gating */
- return;
+ return 0;
}
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
@@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
if (r)
return r;
- gfx_v10_0_init_pg(adev);
+
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
/* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
@@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- gfx_v10_0_init_pg(adev);
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
+
adev->gfx.rlc.funcs->start(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
@@ -2400,7 +2431,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
@@ -2413,7 +2444,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
+
+ return 0;
}
static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
@@ -2470,7 +2511,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2540,7 +2581,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -2609,7 +2650,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2930,7 +2971,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3114,6 +3155,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.gfx_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
@@ -3125,14 +3167,15 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
#endif
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->in_gpu_reset) {
/* reset mqd with the backup copy */
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
+ adev->wb.wb[ring->wptr_offs] = 0;
amdgpu_ring_clear_ring(ring);
#ifdef BRING_UP_DEBUG
mutex_lock(&adev->srbm_mutex);
@@ -4384,7 +4427,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -4404,8 +4447,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5331,15 +5374,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
{
- /* init asic gds info */
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- default:
- adev->gds.gds_size = 0x10000;
- adev->gds.gds_compute_max_wave_id = 0x4ff;
- break;
- }
+ unsigned total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines;
+ adev->gds.gds_size = 0x10000;
+ adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 87dd55e9d72b..ffbde9136372 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2103,7 +2103,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dfca83a2de47..faf2ffce5837 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -131,6 +131,18 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+struct ras_gfx_subblock_reg {
+ const char *name;
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+};
+
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -517,9 +529,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -582,9 +594,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
@@ -676,9 +688,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
@@ -691,6 +703,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -1038,8 +1052,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- &&((adev->gfx.rlc_fw_version != 106 &&
+ /* Disable GFXOFF on original raven. There are combinations
+ * of sbios and platforms that are not stable.
+ */
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ &&((adev->gfx.rlc_fw_version != 106 &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
@@ -1337,7 +1356,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
/* TODO: Determine if MEC2 JT FW loading can be removed
for all GFX V9 asic and above */
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->asic_type != CHIP_ARCTURUS &&
+ adev->asic_type != CHIP_RENOIR) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
info->fw = adev->gfx.mec2_fw;
@@ -1969,190 +1989,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
- struct amdgpu_ngg_buf *ngg_buf,
- int size_se,
- int default_size_se)
-{
- int r;
-
- if (size_se < 0) {
- dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
- return -EINVAL;
- }
- size_se = size_se ? size_se : default_size_se;
-
- ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
- r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &ngg_buf->bo,
- &ngg_buf->gpu_addr,
- NULL);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
- return r;
- }
- ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
-
- return r;
-}
-
-static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < NGG_BUF_MAX; i++)
- amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
- &adev->gfx.ngg.buf[i].gpu_addr,
- NULL);
-
- memset(&adev->gfx.ngg.buf[0], 0,
- sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
-
- adev->gfx.ngg.init = false;
-
- return 0;
-}
-
-static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
-{
- int r;
-
- if (!amdgpu_ngg || adev->gfx.ngg.init == true)
- return 0;
-
- /* GDS reserve memory: 64 bytes alignment */
- adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
- adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
- adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
-
- /* Primitive Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
- amdgpu_prim_buf_per_se,
- 64 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Primitive Buffer\n");
- goto err;
- }
-
- /* Position Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
- amdgpu_pos_buf_per_se,
- 256 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Position Buffer\n");
- goto err;
- }
-
- /* Control Sideband */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
- amdgpu_cntl_sb_buf_per_se,
- 256);
- if (r) {
- dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
- goto err;
- }
-
- /* Parameter Cache, not created by default */
- if (amdgpu_param_buf_per_se <= 0)
- goto out;
-
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
- amdgpu_param_buf_per_se,
- 512 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Parameter Cache\n");
- goto err;
- }
-
-out:
- adev->gfx.ngg.init = true;
- return 0;
-err:
- gfx_v9_0_ngg_fini(adev);
- return r;
-}
-
-static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
- int r;
- u32 data, base;
-
- if (!amdgpu_ngg)
- return 0;
-
- /* Program buffer size */
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_POS].size >> 8);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
-
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
-
- /* Program buffer base address */
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
-
- /* Clear GDS reserved memory */
- r = amdgpu_ring_alloc(ring, 17);
- if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
- ring->name, r);
- return r;
- }
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- (adev->gds.gds_size +
- adev->gfx.ngg.gds_reserve_size));
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
- amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
- PACKET3_DMA_DATA_DST_SEL(1) |
- PACKET3_DMA_DATA_SRC_SEL(2)));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
- adev->gfx.ngg.gds_reserve_size);
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
-
- amdgpu_ring_commit(ring);
-
- return 0;
-}
-
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
@@ -2320,10 +2156,6 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ngg_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -2333,19 +2165,7 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
- adev->gfx.ras_if) {
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_gfx_ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2353,11 +2173,10 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev);
- gfx_v9_0_ngg_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
@@ -2925,7 +2744,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
- gfx_v9_1_init_rlc_save_restore_list(adev);
+ if (adev->asic_type == CHIP_VEGA12 ||
+ (adev->asic_type == CHIP_RAVEN &&
+ adev->rev_id >= 8))
+ gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -3896,12 +3718,6 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
- r = gfx_v9_0_ngg_en(adev);
- if (r)
- return r;
- }
-
return r;
}
@@ -3943,8 +3759,10 @@ static int gfx_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
- /* disable KCQ to avoid CPC touch memory not valid anymore */
- gfx_v9_0_kcq_disable(adev);
+ /* DF freeze and kcq disable will fail */
+ if (!amdgpu_ras_intr_triggered())
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ gfx_v9_0_kcq_disable(adev);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -4080,9 +3898,22 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
+ uint32_t tmp, lsb, msb, i = 0;
+ do {
+ if (i != 0)
+ udelay(1);
+ tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
+ msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ i++;
+ } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
+ clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ } else {
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
@@ -4197,6 +4028,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
@@ -4216,6 +4048,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
int i, r;
+ /* only support when RAS is enabled */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 7);
if (r) {
DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
@@ -4406,33 +4242,14 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry);
-
static int gfx_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .cb = gfx_v9_0_process_ras_data_cb,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__GFX,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "gfx",
- };
int r;
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
+ r = amdgpu_gfx_ras_late_init(adev);
+ if (r)
+ return r;
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
@@ -4443,72 +4260,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
- if (r)
- goto irq;
-
return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
}
static int gfx_v9_0_late_init(void *handle)
@@ -4573,16 +4325,14 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
{
amdgpu_gfx_rlc_enter_safe_mode(adev);
- if (is_support_sw_smu(adev) && !enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
-
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
} else {
gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
- gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
amdgpu_gfx_rlc_exit_safe_mode(adev);
@@ -4851,8 +4601,6 @@ static int gfx_v9_0_set_powergating_state(void *handle,
gfx_v9_0_enable_cp_power_gating(adev, false);
/* update gfx cgpg state */
- if (is_support_sw_smu(adev) && enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
/* update mgcg state */
@@ -4983,7 +4731,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -5003,8 +4751,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5736,313 +5484,446 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- /* TODO ue will trigger an interrupt. */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, err_data);
- amdgpu_ras_reset_gpu(adev, 0);
- return AMDGPU_RAS_SUCCESS;
-}
-static const struct {
- const char *name;
- uint32_t ip;
- uint32_t inst;
- uint32_t seg;
- uint32_t reg_offset;
- uint32_t per_se_instance;
- int32_t num_instance;
- uint32_t sec_count_mask;
- uint32_t ded_count_mask;
-} gfx_ras_edc_regs[] = {
- { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
- { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) },
- { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 },
- { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 },
- { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) },
- { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 },
- { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) },
- { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) },
- { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 },
- { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 },
- { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 },
- { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) },
- { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 },
+static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
+ },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
+ },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
+ 0, 0
+ },
+ { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
+ 0, 0
+ },
+ { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
+ },
+ { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
+ },
+ { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
+ 0, 0
+ },
{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
- 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
+ 0, 0
+ },
{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
- { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1,
- REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 },
- { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
- { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 },
- { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 },
- { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 },
- { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 },
- { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 },
- { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 },
- { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
- { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
- { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
- { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
- { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
- { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 },
- { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 },
- { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 },
- { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 },
- { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 },
- { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 },
- { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 },
- { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 },
- { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
+ },
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
+ },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
+ },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
+ },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
+ },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
- 0 },
- { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
+ 0, 0
+ },
{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
- 0 },
- { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 },
- { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72,
- REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 },
- { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
- { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
- { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 },
- { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 },
- { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 },
- { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
- { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
- { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
- { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
- { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 },
- { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) },
- { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) },
- { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) },
- { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) },
- { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) },
- { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) },
- { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
+ },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
+ },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ 0, 0
+ },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
+ },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
+ },
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
+ },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
+ },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
+ },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
+ },
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
+ },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
+ },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
+ },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
+ },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
+ },
{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
- { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
- { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
- { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 },
- { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 },
- { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 },
- { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
- { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
- { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
- { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 },
- { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 },
- { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 },
- { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 },
- { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 },
- { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
+ },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
+ },
+ { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ 0, 0
+ }
};
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
@@ -6091,14 +5972,217 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
+static const char *vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+};
+
+static const char *vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_LOG_FIFO",
+};
+
+static const char *atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char *atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < 16; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 7; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = (data & 0x00018000L) >> 0xf;
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+
+ return 0;
+}
+
+static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id, uint32_t value,
+ uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
+ if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
+ ras_subblock_regs[i].seg != reg->seg ||
+ ras_subblock_regs[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value &
+ ras_subblock_regs[i].sec_count_mask) >>
+ ras_subblock_regs[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ ras_subblock_regs[i].ded_count_mask) >>
+ ras_subblock_regs[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t sec_count, ded_count;
- uint32_t i;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
uint32_t reg_value;
- uint32_t se_id, instance_id;
if (adev->asic_type != CHIP_VEGA20)
return -EINVAL;
@@ -6107,71 +6191,29 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count = 0;
mutex_lock(&adev->grbm_idx_mutex);
- for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) {
- for (instance_id = 0; instance_id < 256; instance_id++) {
- for (i = 0;
- i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]);
- i++) {
- if (se_id != 0 &&
- !gfx_ras_edc_regs[i].per_se_instance)
- continue;
- if (instance_id >= gfx_ras_edc_regs[i].num_instance)
- continue;
-
- gfx_v9_0_select_se_sh(adev, se_id, 0,
- instance_id);
-
- reg_value = RREG32(
- adev->reg_offset[gfx_ras_edc_regs[i].ip]
- [gfx_ras_edc_regs[i].inst]
- [gfx_ras_edc_regs[i].seg] +
- gfx_ras_edc_regs[i].reg_offset);
- sec_count = reg_value &
- gfx_ras_edc_regs[i].sec_count_mask;
- ded_count = reg_value &
- gfx_ras_edc_regs[i].ded_count_mask;
- if (sec_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, SEC %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- sec_count);
- err_data->ce_count++;
- }
- if (ded_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, DED %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- ded_count);
- err_data->ue_count++;
- }
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
+ for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
+ for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0, k);
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ if (reg_value)
+ __get_ras_error_count(&sec_ded_counter_registers[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return 0;
-}
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
-static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
- ih_data.head = *ras_if;
+ gfx_v9_0_query_utc_edc_status(adev, err_data);
- DRM_ERROR("CP ECC ERROR IRQ\n");
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -6338,7 +6380,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
.set = gfx_v9_0_set_cp_ecc_error_state,
- .process = gfx_v9_0_cp_ecc_error_irq,
+ .process = amdgpu_gfx_cp_ecc_error_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6ce37ce77d14..e91bd7945777 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -178,6 +178,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -365,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index db10640a3b2f..b70c7b483c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */
+ int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
-
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -175,6 +179,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
}
@@ -350,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
index 06807940748b..392b8cd94fc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
@@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v2_0_init(struct amdgpu_device *adev);
u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5c7d5f73f54f..321f8a997be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
const unsigned eng = 17;
unsigned int i;
+ spin_lock(&adev->gmc.invalidate_lock);
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
udelay(1);
}
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
+ spin_unlock(&adev->gmc.invalidate_lock);
+
if (i < adev->usec_timeout)
return;
@@ -278,7 +312,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
mutex_lock(&adev->mman.gtt_window_lock);
@@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
@@ -396,43 +453,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 1 system
* 0 valid
*/
-static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
+{
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -459,12 +496,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+ *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags |= AMDGPU_PTE_SNOOPED;
+ *flags |= AMDGPU_PTE_LOG;
+ *flags |= AMDGPU_PTE_SYSTEM;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v10_0_get_vm_pde
+ .map_mtype = gmc_v10_0_map_mtype,
+ .get_vm_pde = gmc_v10_0_get_vm_pde,
+ .get_vm_pte = gmc_v10_0_get_vm_pte
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -518,8 +575,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (!amdgpu_sriov_vf(adev))
- base = gfxhub_v2_0_get_fb_location(adev);
+ base = gfxhub_v2_0_get_fb_location(adev);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
@@ -539,24 +595,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
-
- if (!amdgpu_emu_mode)
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- else {
- /* hard code vram_width for emulation */
- chansize = 128;
- numchan = 1;
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* Could aper size report 0 ? */
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
adev->gmc.visible_vram_size = adev->gmc.aper_size;
@@ -635,7 +680,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v10_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v2_0_init(adev);
@@ -643,7 +688,15 @@ static int gmc_v10_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (!amdgpu_emu_mode)
+ adev->gmc.vram_width = vram_width;
+ else
+ adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -793,7 +846,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* Flush HDP after it is initialized */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fb1765e92d1..b205039350b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
@@ -1153,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
- .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
+ .get_vm_pte = gmc_v6_0_get_vm_pte,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0c3d9bc3a641..f08e5330642d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -463,27 +463,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1343,8 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
- .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v7_0_get_vm_pde
+ .get_vm_pde = gmc_v7_0_get_vm_pde,
+ .get_vm_pte = gmc_v7_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ea764dd9245d..6d96d40fbcb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -686,29 +686,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1711,8 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
- .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v8_0_get_vm_pde
+ .get_vm_pde = gmc_v8_0_get_vm_pde,
+ .get_vm_pte = gmc_v8_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f91337030dc0..3c355fb5d2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -51,10 +51,12 @@
#include "gfxhub_v1_1.h"
#include "mmhub_v9_4.h"
#include "umc_v6_1.h"
+#include "umc_v6_0.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
@@ -243,44 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, err_data);
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- if (adev->umc.funcs->query_ras_error_address)
- adev->umc.funcs->query_ras_error_address(adev, err_data);
-
- /* only uncorrectable error needs gpu reset */
- if (err_data->ue_count)
- amdgpu_ras_reset_gpu(adev, 0);
-
- return AMDGPU_RAS_SUCCESS;
-}
-
-static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -355,6 +319,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
/* If it's the first fault for this address, process it normally */
+ if (retry_fault && !in_interrupt() &&
+ amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -417,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
.set = gmc_v9_0_ecc_interrupt_state,
- .process = gmc_v9_0_process_ecc_irq,
+ .process = amdgpu_umc_process_ecc_irq,
};
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
@@ -491,6 +459,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
spin_lock(&adev->gmc.invalidate_lock);
+
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (j = 0; j < adev->usec_timeout; j++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (j >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -506,7 +497,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
break;
udelay(1);
}
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
spin_unlock(&adev->gmc.invalidate_lock);
+
if (j < adev->usec_timeout)
return;
@@ -521,6 +523,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -531,6 +547,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
@@ -584,44 +609,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
+static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
-
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
+ case AMDGPU_VM_MTYPE_RW:
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -648,12 +654,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
+ *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ !(*flags & AMDGPU_PTE_SYSTEM) &&
+ mapping->bo_va->is_xgmi)
+ *flags |= AMDGPU_PTE_SNOOPED;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v9_0_get_vm_pde
+ .map_mtype = gmc_v9_0_map_mtype,
+ .get_vm_pde = gmc_v9_0_get_vm_pde,
+ .get_vm_pte = gmc_v9_0_get_vm_pte
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -664,6 +692,9 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ adev->umc.funcs = &umc_v6_0_funcs;
+ break;
case CHIP_VEGA20:
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
@@ -681,7 +712,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA20:
- adev->mmhub_funcs = &mmhub_v1_0_funcs;
+ adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
default:
break;
@@ -762,140 +793,10 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
- struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = NULL;
- struct ras_ih_if ih_info = {
- .cb = gmc_v9_0_process_ras_data_cb,
- };
- int r;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- ras_if = &adev->gmc.umc_ras_if;
- else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
- ras_if = &adev->gmc.mmhub_ras_if;
- else
- BUG();
-
- if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
- amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = *ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info->head = **ras_if;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
- }
-
- amdgpu_ras_debugfs_create(adev, fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, fs_info);
- if (r)
- goto sysfs;
-resume:
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
-}
-
-static int gmc_v9_0_ecc_late_init(void *handle)
-{
- int r;
-
- struct ras_fs_if umc_fs_info = {
- .sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
- };
- struct ras_common_if umc_ras_block = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "umc",
- };
- struct ras_fs_if mmhub_fs_info = {
- .sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
- };
- struct ras_common_if mmhub_ras_block = {
- .block = AMDGPU_RAS_BLOCK__MMHUB,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "mmhub",
- };
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &umc_fs_info, &umc_ras_block);
- if (r)
- return r;
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &mmhub_fs_info, &mmhub_ras_block);
- return r;
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool r;
+ int r;
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
@@ -929,7 +830,7 @@ static int gmc_v9_0_late_init(void *handle)
}
}
- r = gmc_v9_0_ecc_late_init(handle);
+ r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
@@ -970,33 +871,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
int r;
- if (amdgpu_sriov_vf(adev)) {
- /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
- * and DF related registers is not readable, seems hardcord is the
- * only way to set the correct vram_width
- */
- adev->gmc.vram_width = 2048;
- } else if (amdgpu_emu_mode != 1) {
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- }
-
- if (!adev->gmc.vram_width) {
- /* hbm memory channel size */
- if (adev->flags & AMD_IS_APU)
- chansize = 64;
- else
- chansize = 128;
-
- numchan = adev->df_funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
@@ -1108,7 +987,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v9_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v1_0_init(adev);
@@ -1119,7 +998,32 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (amdgpu_sriov_vf(adev))
+ /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
+ * and DF related registers is not readable, seems hardcord is the
+ * only way to set the correct vram_width
+ */
+ adev->gmc.vram_width = 2048;
+ else if (amdgpu_emu_mode != 1)
+ adev->gmc.vram_width = vram_width;
+
+ if (!adev->gmc.vram_width) {
+ int chansize, numchan;
+
+ /* hbm memory channel size */
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
+
+ numchan = adev->df_funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_RAVEN:
adev->num_vmhubs = 2;
@@ -1240,33 +1144,7 @@ static int gmc_v9_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *stolen_vga_buf;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
- adev->gmc.umc_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /* remove fs first */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /* remove the IH */
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
- adev->gmc.mmhub_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
-
- /* remove fs and disable ras feature */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
+ amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1316,13 +1194,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
- int r, i;
- bool value;
- u32 tmp;
-
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
+ int r;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1332,15 +1204,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- /* TODO for renoir */
- mmhub_v1_0_update_power_gating(adev, true);
- break;
- default:
- break;
- }
-
r = gfxhub_v1_0_gart_enable(adev);
if (r)
return r;
@@ -1352,6 +1215,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+ adev->gart.ready = true;
+ return 0;
+}
+
+static int gmc_v9_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool value;
+ int r, i;
+ u32 tmp;
+
+ /* The sequence of these two function calls matters.*/
+ gmc_v9_0_init_golden_registers(adev);
+
+ if (adev->mode_info.num_crtc) {
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ /* Lockout access through VGA aperture*/
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+
+ /* disable VGA render */
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ }
+ }
+
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ /* TODO for renoir */
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
@@ -1361,7 +1267,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
/* After HDP is initialized, flush HDP.*/
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -1377,28 +1283,8 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
- (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
- adev->gart.ready = true;
- return 0;
-}
-
-static int gmc_v9_0_hw_init(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* The sequence of these two function calls matters.*/
- gmc_v9_0_init_golden_registers(adev);
-
- if (adev->mode_info.num_crtc) {
- /* Lockout access through VGA aperture*/
- WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
-
- /* disable VGA render */
- WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- }
+ if (adev->umc.funcs && adev->umc.funcs->init_registers)
+ adev->umc.funcs->init_registers(adev);
r = gmc_v9_0_gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 04cd4b6f95d4..28105e4af507 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -206,6 +206,8 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -418,6 +420,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
@@ -616,5 +620,6 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
}
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index b39bea6f54e9..a7cb185d639a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -31,20 +31,25 @@
#include "soc15_common.h"
-static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */
+ int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -161,6 +166,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
}
@@ -341,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
index db16f3ece218..3ea4344f0315 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
@@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev);
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 9ed178fa241c..66efe2f7bd76 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -249,6 +249,8 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
@@ -502,6 +504,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev)
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
new file mode 100644
index 000000000000..0d8767eb7a70
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_1_0_sh_mask.h"
+#include "soc15.h"
+#include "navi10_ih.h"
+#include "soc15_common.h"
+#include "mxgpu_nv.h"
+#include "mxgpu_ai.h"
+
+static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
+{
+ WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
+}
+
+static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
+{
+ WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
+}
+
+/*
+ * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
+ * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
+ * by host.
+ *
+ * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
+ * correct value since it doesn't return the RCV_DW0 under the case that
+ * RCV_MSG_VALID is set by host.
+ */
+static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
+{
+ return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+}
+
+
+static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
+ enum idh_event event)
+{
+ u32 reg;
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ if (reg != event)
+ return -ENOENT;
+
+ xgpu_nv_mailbox_send_ack(adev);
+
+ return 0;
+}
+
+static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
+{
+ return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
+}
+
+static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
+{
+ int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
+ u8 reg;
+
+ do {
+ reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
+ if (reg & 2)
+ return 0;
+
+ mdelay(5);
+ timeout -= 5;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
+
+ return -ETIME;
+}
+
+static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+{
+ int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+
+ do {
+ r = xgpu_nv_mailbox_rcv_msg(adev, event);
+ if (!r)
+ return 0;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+
+ return -ETIME;
+}
+
+static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3)
+{
+ u32 reg;
+ int r;
+ uint8_t trn;
+
+ /* IMPORTANT:
+ * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
+ * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
+ * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
+ * will return immediatly
+ */
+ do {
+ xgpu_nv_mailbox_set_valid(adev, false);
+ trn = xgpu_nv_peek_ack(adev);
+ if (trn) {
+ pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ msleep(1);
+ }
+ } while (trn);
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
+ reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, req);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
+ reg);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
+ data1);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
+ data2);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
+ data3);
+
+ xgpu_nv_mailbox_set_valid(adev, true);
+
+ /* start to poll ack */
+ r = xgpu_nv_poll_ack(adev);
+ if (r)
+ pr_err("Doesn't get ack from pf, continue\n");
+
+ xgpu_nv_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ int r;
+
+ xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ /* start to check msg if request is idh_req_gpu_init_access */
+ if (req == IDH_REQ_GPU_INIT_ACCESS ||
+ req == IDH_REQ_GPU_FINI_ACCESS ||
+ req == IDH_REQ_GPU_RESET_ACCESS) {
+ r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ if (r) {
+ pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
+ return r;
+ }
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ }
+ }
+
+ return 0;
+}
+
+static int xgpu_nv_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
+static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+
+ req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
+ return xgpu_nv_send_access_requests(adev, req);
+}
+
+static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+ int r = 0;
+
+ req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
+ r = xgpu_nv_send_access_requests(adev, req);
+
+ return r;
+}
+
+static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
+ int locked;
+
+ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
+ * otherwise the mailbox msg will be ruined/reseted by
+ * the VF FLR.
+ *
+ * we can unlock the lock_reset to allow "amdgpu_job_timedout"
+ * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
+ * which means host side had finished this VF's FLR.
+ */
+ locked = mutex_trylock(&adev->lock_reset);
+ if (locked)
+ adev->in_gpu_reset = 1;
+
+ do {
+ if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ goto flr_done;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+flr_done:
+ if (locked) {
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
+ }
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_device_should_recover_gpu(adev))
+ amdgpu_device_gpu_recover(adev, NULL);
+}
+
+static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.flr_work);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_ack_irq,
+ .process = xgpu_nv_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_rcv_irq,
+ .process = xgpu_nv_mailbox_rcv_irq,
+};
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
+const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
+ .req_full_gpu = xgpu_nv_request_full_gpu_access,
+ .rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .reset_gpu = xgpu_nv_request_reset,
+ .wait_reset = NULL,
+ .trans_msg = xgpu_nv_mailbox_trans_msg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
new file mode 100644
index 000000000000..99b15f6865cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MXGPU_NV_H__
+#define __MXGPU_NV_H__
+
+#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
+
+extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 9fe08408db58..9af73567e716 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
navi10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
@@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
index a56c93620e78..88efaecf9f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi10_ip_offset.h"
int navi10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
index cadc7603ca41..a786d159e5e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi12_ip_offset.h"
int navi12_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
index 3b5f0f65e096..4ea1e8fbb601 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi14_ip_offset.h"
int navi14_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index c05d78d4efc6..f3a3fe746222 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -27,11 +27,21 @@
#include "nbio/nbio_2_3_default.h"
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+
+static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
@@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
- .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
@@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
+ .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
index 5ae52085f6b7..a43b60acf7f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6590143c3f75..635d9e1fc0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
- .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 0743a6f016f3..6dc743b73218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 74eecb768a82..d6cbf26074bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
- .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 508d549c5029..e7aefb252550 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 910fffced43b..0db458f9fafc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -23,10 +23,12 @@
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
+#include "amdgpu_ras.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "nbio/nbio_7_4_0_smn.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
@@ -266,7 +268,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -306,17 +308,208 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
- uint32_t def, data;
- def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
- data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+}
- if (def != data)
- WREG32_PCIE(smnPCIE_CI_CNTL, data);
+static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+
+static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
+ .set = nbio_v7_4_set_ras_controller_irq_state,
+ .process = nbio_v7_4_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_4_process_err_event_athub_irq,
+};
+
+static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_4_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
+{
+
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_4_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ uint32_t global_sts, central_sts, int_eoi;
+ uint32_t corr, fatal, non_fatal;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+ corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
+ fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
+ non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
+ ParityErrNonFatal);
+
+ if (corr)
+ err_data->ce_count++;
+ if (fatal)
+ err_data->ue_count++;
+
+ if (corr || fatal || non_fatal) {
+ central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
+ /* clear error status register */
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+
+ if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
+ BIFL_RasContller_Intr_Recv)) {
+ /* clear interrupt status register */
+ WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
+ int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
+ int_eoi = REG_SET_FIELD(int_eoi,
+ IOHC_INTERRUPT_EOI, SMI_EOI, 1);
+ WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
+ }
+ }
+}
+
+static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
- .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
@@ -330,6 +523,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
+ .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
@@ -337,4 +531,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.init_registers = nbio_v7_4_init_registers,
.detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
+ .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index c442865bac4f..b1ac82872752 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index de9b995b65b1..0ba66bef5746 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -40,12 +40,14 @@
#include "gc/gc_10_1_0_sh_mask.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
#include "gmc_v10_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
@@ -53,6 +55,7 @@
#include "vcn_v2_0.h"
#include "dce_virtual.h"
#include "mes_v10_1.h"
+#include "mxgpu_nv.h"
static const struct amd_ip_funcs nv_common_ip_funcs;
@@ -63,8 +66,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -78,8 +81,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -119,7 +122,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 nv_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 nv_get_xclk(struct amdgpu_device *adev)
@@ -154,8 +157,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
- /* TODO: will implement it when SMU header is available */
- return false;
+ u32 *dw_ptr;
+ u32 i, length_dw;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+
+ /* set rom index to 0 */
+ WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ /* read out the rom data */
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+
+ return true;
}
static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
@@ -176,6 +198,7 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -279,7 +302,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -296,7 +319,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
- if (smu_baco_is_support(smu))
+ if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_MODE1;
@@ -368,8 +391,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version nv_common_ip_block =
@@ -423,9 +446,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
- adev->nbio_funcs = &nbio_v2_3_funcs;
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.ops = &xgpu_nv_virt_ops;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -435,7 +462,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -446,7 +473,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (adev->enable_mes)
@@ -458,7 +485,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -469,7 +496,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
break;
@@ -482,12 +509,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void nv_invalidate_hdp(struct amdgpu_device *adev,
@@ -532,6 +559,16 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+
+ /* TODO
+ * dummy implement for pcie_replay_count sysfs interface
+ * */
+
+ return 0;
+}
+
static void nv_init_doorbell_index(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
@@ -579,12 +616,16 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_full_reset = &nv_need_full_reset,
.get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
+ .get_pcie_replay_count = &nv_get_pcie_replay_count,
};
static int nv_common_early_init(void *handle)
{
+#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@@ -667,16 +708,31 @@ static int nv_common_early_init(void *handle)
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_setting(adev);
+ xgpu_nv_mailbox_set_irq_funcs(adev);
+ }
+
return 0;
}
static int nv_common_late_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_get_irq(adev);
+
return 0;
}
static int nv_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -694,7 +750,13 @@ static int nv_common_hw_init(void *handle)
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
+ /* remap HDP registers to a hole in mmio space,
+ * for the purpose of expose those registers
+ * to process space
+ */
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
@@ -856,9 +918,9 @@ static int nv_common_set_clockgating_state(void *handle,
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
nv_update_hdp_mem_power_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -886,7 +948,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 5d95e614369a..b345e69ba246 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -40,6 +40,9 @@
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
@@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
-
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v10.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)
+ adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version =
+ le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size =
+ le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version =
+ le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size =
+ le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
+
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
out:
if (err) {
dev_err(adev->dev,
@@ -228,6 +269,7 @@ static int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 10166104b8a3..ffeaa2f5588d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -57,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
#define mmRLC_GPM_UCODE_DATA_NV10 0x5b62
#define mmSDMA0_UCODE_ADDR_NV10 0x5880
#define mmSDMA0_UCODE_DATA_NV10 0x5881
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
@@ -155,6 +158,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
switch (adev->asic_type) {
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -182,7 +186,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- case CHIP_ARCTURUS:
break;
default:
BUG();
@@ -205,18 +208,26 @@ out:
return err;
}
+static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -233,7 +244,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
/* Copy PSP KDB binary to memory */
memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
- /* Provide the sys driver to bootloader */
+ /* Provide the PSP KDB to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
@@ -252,13 +263,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -296,13 +305,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
@@ -398,6 +405,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
return false;
}
+static int psp_v11_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -407,6 +442,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
if (psp_v11_0_support_vmr_ring(psp)) {
+ ret = psp_v11_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -426,6 +467,14 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
0x80000000, 0x8000FFFF, false);
} else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
@@ -451,33 +500,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
return ret;
}
-static int psp_v11_0_ring_stop(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
- else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
- GFX_CTRL_CMD_ID_DESTROY_RINGS);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
-
- return ret;
-}
static int psp_v11_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
@@ -541,6 +563,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
@@ -889,6 +912,162 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
return psp_rlc_autoload_start(psp);
}
+static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ DRM_DEBUG("training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+static void psp_v11_0_memory_training_fini(struct psp_context *psp)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ kfree(ctx->sys_cache);
+ ctx->sys_cache = NULL;
+}
+
+static int psp_v11_0_memory_training_init(struct psp_context *psp)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
+ DRM_DEBUG("memory training is not supported!\n");
+ return 0;
+ }
+
+ ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
+ if (ctx->sys_cache == NULL) {
+ DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+ ret = -ENOMEM;
+ goto Err_out;
+ }
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+ ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
+ return 0;
+
+Err_out:
+ psp_v11_0_memory_training_fini(psp);
+ return ret;
+}
+
+/*
+ * save and restore proces
+ */
+static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ int ret;
+ uint32_t p2c_header[4];
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ DRM_DEBUG("Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ DRM_ERROR("Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v11_0_is_sos_alive(psp)) {
+ DRM_DEBUG("SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ DRM_DEBUG("Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ DRM_DEBUG("Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ return ret;
+ }
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ DRM_ERROR("send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -909,6 +1088,9 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ras_trigger_error = psp_v11_0_ras_trigger_error,
.ras_cure_posion = psp_v11_0_ras_cure_posion,
.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
+ .mem_training_init = psp_v11_0_memory_training_init,
+ .mem_training_fini = psp_v11_0_memory_training_fini,
+ .mem_training = psp_v11_0_memory_training,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index c72e43f8e0be..8f553f6f92d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -378,6 +378,7 @@ static int psp_v12_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index d2c727f6a8bd..fdc00938327b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -454,6 +454,7 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4554e72c8378..4ef4d31f5231 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -747,13 +747,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
sdma_v4_0_wait_reg_mem(ring, 0, 1,
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
ref_and_mask, ref_and_mask, 10);
}
@@ -1691,102 +1691,17 @@ static int sdma_v4_0_early_init(void *handle)
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
static int sdma_v4_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->sdma.ras_if;
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- struct ras_fs_if fs_info = {
- .sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__SDMA,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "sdma",
- };
- int r, i;
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- r = 0;
- }
- goto feature;
- }
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- for (i = 0; i < adev->sdma.num_instances; i++) {
- r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
+ return amdgpu_sdma_ras_late_init(adev, &ih_info);
}
static int sdma_v4_0_sw_init(void *handle)
@@ -1858,21 +1773,7 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
- adev->sdma.ras_if) {
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /*remove fs first*/
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /*remove the IH*/
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_sdma_ras_fini(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -1892,7 +1793,7 @@ static int sdma_v4_0_hw_init(void *handle)
if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu) ||
- adev->asic_type == CHIP_RENOIR)
+ (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset))
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
if (!amdgpu_sriov_vf(adev))
@@ -2025,52 +1926,28 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry)
{
- uint32_t err_source;
int instance;
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
+
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0)
- return 0;
-
- switch (entry->src_id) {
- case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
- err_source = 0;
- break;
- case SDMA0_4_0__SRCID__SDMA_ECC:
- err_source = 1;
- break;
- default:
- return 0;
- }
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ goto out;
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
+out:
return AMDGPU_RAS_SUCCESS;
}
-static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -2418,7 +2295,7 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.set = sdma_v4_0_set_ecc_irq_state,
- .process = sdma_v4_0_process_ecc_irq,
+ .process = amdgpu_sdma_process_ecc_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 8493bfbbc148..f4ad2990f973 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 493af42152f2..29024e64c886 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -975,6 +975,17 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{GRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
{GB_ADDR_CONFIG},
{MC_ARB_RAMCFG},
{GB_TILE_MODE0},
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 57bb5f9e08b2..88ae27a5a03d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
si_ih_disable_interrupts(adev);
- WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4ccfcdf8f16a..8e1640bc07af 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -58,6 +58,9 @@
#include "mmhub_v1_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
#include "vega10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u64 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* read low 32 bit */
@@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* write low 32 bit */
@@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
@@ -336,6 +339,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -461,7 +465,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -475,42 +479,66 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
+ *cap = smu_baco_is_support(smu);
+ return 0;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ }
}
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
+ /* avoid NBIF got stuck when do RAS recovery in BACO reset */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
+ dev_info(adev->dev, "GPU BACO reset\n");
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- dev_info(adev->dev, "GPU BACO reset\n");
+ if (smu_baco_reset(smu))
+ return -EIO;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- adev->in_baco_reset = 1;
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+ }
+
+ /* re-enable doorbell interrupt after BACO exit */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
}
static int soc15_mode2_reset(struct amdgpu_device *adev)
{
+ if (is_support_sw_smu(adev))
+ return smu_mode2_reset(&adev->smu);
if (!adev->powerplay.pp_funcs ||
!adev->powerplay.pp_funcs->asic_reset_mode_2)
return -ENOENT;
@@ -525,6 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
return AMD_RESET_METHOD_MODE2;
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -626,8 +655,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -641,7 +670,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
int soc15_set_ip_blocks(struct amdgpu_device *adev)
@@ -667,13 +696,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
- if (adev->flags & AMD_IS_APU)
- adev->nbio_funcs = &nbio_v7_0_funcs;
- else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS)
- adev->nbio_funcs = &nbio_v7_4_funcs;
- else
- adev->nbio_funcs = &nbio_v6_1_funcs;
+ if (adev->flags & AMD_IS_APU) {
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ } else if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS) {
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ } else {
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ }
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df_funcs = &df_v3_6_funcs;
@@ -681,7 +714,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df_funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -750,13 +783,26 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ }
+
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+
+ if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
break;
case CHIP_RENOIR:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
@@ -785,7 +831,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
@@ -1099,7 +1145,9 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
} else if (adev->pdev->device == 0x15d8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1142,7 +1190,9 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
}
break;
case CHIP_ARCTURUS:
@@ -1157,7 +1207,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
- AMD_CG_SUPPORT_MC_LS;
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_IH_CG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x32;
break;
@@ -1203,11 +1254,15 @@ static int soc15_common_early_init(void *handle)
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- return 0;
+ if (adev->nbio.funcs->ras_late_init)
+ r = adev->nbio.funcs->ras_late_init(adev);
+
+ return r;
}
static int soc15_common_sw_init(void *handle)
@@ -1224,6 +1279,10 @@ static int soc15_common_sw_init(void *handle)
static int soc15_common_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_nbio_ras_fini(adev);
+ adev->df_funcs->sw_fini(adev);
return 0;
}
@@ -1236,12 +1295,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range);
}
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index);
}
}
@@ -1255,13 +1314,13 @@ static int soc15_common_hw_init(void *handle)
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio_funcs->remap_hdp_registers)
- adev->nbio_funcs->remap_hdp_registers(adev);
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -1284,6 +1343,14 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ if (adev->nbio.ras_if &&
+ amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ if (adev->nbio.funcs->init_ras_controller_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
+
return 0;
}
@@ -1424,9 +1491,9 @@ static int soc15_common_set_clockgating_state(void *handle,
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1441,9 +1508,9 @@ static int soc15_common_set_clockgating_state(void *handle,
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1472,7 +1539,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index a3dde0c31f57..57af489a5de3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -28,8 +28,8 @@
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
-#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
-#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
+#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
+#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
extern const struct amd_ip_funcs soc15_common_ip_funcs;
@@ -67,6 +67,8 @@ struct soc15_allowed_register_entry {
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
new file mode 100644
index 000000000000..0d6b50528d76
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_0.h"
+#include "amdgpu.h"
+
+static void umc_v6_0_init_registers(struct amdgpu_device *adev)
+{
+ unsigned i,j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++)
+ WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002);
+}
+
+const struct amdgpu_umc_funcs umc_v6_0_funcs = {
+ .init_registers = umc_v6_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
new file mode 100644
index 000000000000..109f1a57a46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_0_H__
+#define __UMC_V6_0_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+extern const struct amdgpu_umc_funcs umc_v6_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 8502e736f721..47c4b96b14d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -75,6 +75,17 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
RSMU_UMC_INDEX_MODE_EN, 0);
}
+static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
+{
+ uint32_t rsmu_umc_index;
+
+ rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ return REG_GET_FIELD(rsmu_umc_index,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_INSTANCE);
+}
+
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -165,7 +176,8 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
uint32_t umc_reg_offset, uint32_t channel_index)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr;
+ uint64_t mc_umc_status, err_addr, retired_page;
+ struct eeprom_table_record *err_rec;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -177,6 +189,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
/* calculate error address if ue/ce error is detected */
@@ -191,12 +204,24 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
err_addr &= ~((0x1ULL << lsb) - 1);
/* translate umc channel address to soc pa, 3 parts are included */
- err_data->err_addr[err_data->err_addr_cnt] =
- ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- err_data->err_addr_cnt++;
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
+
+ err_data->err_addr_cnt++;
+ }
}
/* clear umc status */
@@ -209,7 +234,7 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
}
-static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
+static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset, uint32_t channel_index)
{
@@ -239,15 +264,16 @@ static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
}
-static void umc_v6_1_ras_init(struct amdgpu_device *adev)
+static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
{
void *ras_error_status = NULL;
- amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel);
+ amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
}
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
- .ras_init = umc_v6_1_ras_init,
+ .err_cnt_init = umc_v6_1_err_cnt_init,
+ .ras_late_init = amdgpu_umc_ras_late_init,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
.enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 93b3500e522b..b4f84a820a44 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -202,7 +202,6 @@ static int vcn_v1_0_hw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 36ad0c0e8efb..38f787a560cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -244,33 +244,24 @@ static int vcn_v2_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst->ring_jpeg;
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
done:
if (!r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 395c2259f979..93edf9193a7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
@@ -255,32 +256,24 @@ static int vcn_v2_5_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[j].ring_dec;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, j);
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
- ring->sched.ready = false;
- continue;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst[j].ring_jpeg;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
done:
if (!r)
@@ -300,7 +293,7 @@ static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int i;
+ int i, j;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -312,8 +305,8 @@ static int vcn_v2_5_hw_fini(void *handle)
ring->sched.ready = false;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst[i].ring_enc[i];
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ ring = &adev->vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
@@ -423,7 +416,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Disable clock gating for VCN block
*/
@@ -542,7 +534,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Enable clock gating for VCN block
*/
@@ -716,6 +707,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -946,6 +940,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 9eae3536ddad..5cb7e231de5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
ih = &adev->irq.ih;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -675,10 +675,49 @@ static int vega10_ih_soft_reset(void *handle)
return 0;
}
+static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ /**
+ * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
+ * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ */
+ if (adev->asic_type > CHIP_VEGA10) {
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ }
+
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
static int vega10_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega10_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
return 0;
+
}
static int vega10_ih_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index bd0580334f83..6b52a539d51b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
int vega10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 587e33f5dcce..556f854e3551 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 5f8c8786cac5..78e5cdc0c058 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
+
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+}
+
+int smu7_asic_baco_reset(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ return 0;
+}
+
/**
- * vi_asic_reset - soft reset GPU
+ * vi_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int vi_asic_reset(struct amdgpu_device *adev)
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -714,7 +748,47 @@ static int vi_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ smu7_asic_get_baco_capability(adev, &baco_reset);
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * vi_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int vi_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = vi_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 vi_get_config_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 8de0772f986c..40d4174913a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 177d1e5329a5..9f59ba93cfe0 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
- unsigned int vmid, pasid;
+ unsigned int vmid;
+ uint16_t pasid;
+ bool ret;
/* This workaround is due to HW/FW limitation on Hawaii that
* VMID and PASID are not written into ih_ring_entry
@@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*tmp_ihre = *ihre;
vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
tmp_ihre->ring_id |= pasid << 16;
- return (pasid != 0) &&
+ return ret && (pasid != 0) &&
vmid >= dev->vm_info.first_vmid_kfd &&
vmid <= dev->vm_info.last_vmid_kfd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 901fe3590165..d3400da6ab64 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x7a5d0000, 0x807c817c,
0x807aff7a, 0x00000080,
0xbf0a717c, 0xbf85fff8,
- 0xbf820141, 0xbef4037e,
+ 0xbf820142, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
@@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304080,
0x725d0100, 0xe0304100,
0x725d0200, 0xe0304180,
- 0x725d0300, 0xbf820031,
+ 0x725d0300, 0xbf820032,
0xbef603ff, 0x01000000,
0xbef20378, 0x8078ff78,
0x00000400, 0xbefc0384,
@@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304100,
0x725d0100, 0xe0304200,
0x725d0200, 0xe0304300,
- 0x725d0300, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef603ff,
- 0x01000000, 0xbefc03ff,
- 0x0000006c, 0x80f89078,
- 0xf429003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc847c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0x80f8a078,
- 0xf42d003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc887c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0x80f8c078,
- 0xf431003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0xbe883108,
- 0xbe8a310a, 0xbe8c310c,
- 0xbe8e310e, 0xbf06807c,
- 0xbf84fff0, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0xf4211bfa,
+ 0x725d0300, 0xbf8c3f70,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef603ff, 0x01000000,
+ 0xbefc03ff, 0x0000006c,
+ 0x80f89078, 0xf429003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc847c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0x80f8a078, 0xf42d003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc887c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0x80f8c078, 0xf431003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0xbe883108, 0xbe8a310a,
+ 0xbe8c310c, 0xbe8e310e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0xbef603ff, 0x01000000,
+ 0xf4211bfa, 0xf0000000,
+ 0x80788478, 0xf4211b3a,
0xf0000000, 0x80788478,
- 0xf4211b3a, 0xf0000000,
- 0x80788478, 0xf4211b7a,
+ 0xf4211b7a, 0xf0000000,
+ 0x80788478, 0xf4211eba,
0xf0000000, 0x80788478,
- 0xf4211eba, 0xf0000000,
- 0x80788478, 0xf4211efa,
+ 0xf4211efa, 0xf0000000,
+ 0x80788478, 0xf4211c3a,
0xf0000000, 0x80788478,
- 0xf4211c3a, 0xf0000000,
- 0x80788478, 0xf4211c7a,
+ 0xf4211c7a, 0xf0000000,
+ 0x80788478, 0xf4211e7a,
0xf0000000, 0x80788478,
- 0xf4211e7a, 0xf0000000,
- 0x80788478, 0xf4211cfa,
+ 0xf4211cfa, 0xf0000000,
+ 0x80788478, 0xf4211bba,
0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef814, 0xf4211bba,
- 0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef815,
- 0xbef2036d, 0x876dff72,
- 0x0000ffff, 0xbefc036f,
- 0xbefe037a, 0xbeff037b,
- 0x876f71ff, 0x000003ff,
- 0xb9ef4803, 0xb9f9f816,
- 0x876f71ff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f3f801, 0x876fff72,
- 0xfc000000, 0x906f9a6f,
- 0x8f6f906f, 0xbef30380,
+ 0xb9eef815, 0xbef2036d,
+ 0x876dff72, 0x0000ffff,
+ 0xbefc036f, 0xbefe037a,
+ 0xbeff037b, 0x876f71ff,
+ 0x000003ff, 0xb9ef4803,
+ 0xb9f9f816, 0x876f71ff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0x876fff72, 0xfc000000,
+ 0x906f9a6f, 0x8f6f906f,
+ 0xbef30380, 0x88736f73,
+ 0x876fff72, 0x02000000,
+ 0x906f996f, 0x8f6f8f6f,
0x88736f73, 0x876fff72,
- 0x02000000, 0x906f996f,
- 0x8f6f8f6f, 0x88736f73,
- 0x876fff72, 0x01000000,
- 0x906f986f, 0x8f6f996f,
- 0x88736f73, 0x876fff70,
- 0x00800000, 0x906f976f,
- 0xb9f3f807, 0x87fe7e7e,
- 0x87ea6a6a, 0xb9f0f802,
- 0xbf8a0000, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0x01000000, 0x906f986f,
+ 0x8f6f996f, 0x88736f73,
+ 0x876fff70, 0x00800000,
+ 0x906f976f, 0xb9f3f807,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9f0f802, 0xbf8a0000,
+ 0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbf820001, 0xbf8202c4,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index cdaa523ce6be..4433bda2ce25 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -758,6 +758,7 @@ L_RESTORE_V0:
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
/* restore SGPRs */
//will be 2+8+16*6
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1d3cd5c50d5f..9af45d07515b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -282,7 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
+ pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
@@ -332,7 +332,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid %d\n",
+ pr_debug("Destroying queue id %d for pasid 0x%x\n",
args->queue_id,
p->pasid);
@@ -378,7 +378,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid %d\n",
+ pr_debug("Updating queue id %d for pasid 0x%x\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -855,7 +855,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
@@ -913,7 +913,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
uint32_t nodes = 0;
int ret;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -1128,7 +1128,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
mutex_unlock(&p->mutex);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
- pdd->qpd.vmid != 0)
+ pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
dev->kgd, args->va_addr, pdd->qpd.vmid);
@@ -1801,7 +1801,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
} else
goto err_i1;
- dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
if (IS_ERR(process)) {
@@ -1856,7 +1856,8 @@ err_i1:
kfree(kdata);
if (retcode)
- dev_dbg(kfd_device, "ret = %d\n", retcode);
+ dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
+ nr, arg, retcode);
return retcode;
}
@@ -1877,7 +1878,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("Process %d mapping mmio page\n"
+ pr_debug("pasid 0x%x mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 66387caf966e..de9f68d5c312 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
+#define renoir_cache_info carrizo_cache_info
/* TODO - check & update Navi10 cache details */
#define navi10_cache_info carrizo_cache_info
@@ -670,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
break;
+ case CHIP_RENOIR:
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
break;
@@ -703,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info,
cu_info,
mem_available,
- cu_info->cu_bitmap[i][j],
+ cu_info->cu_bitmap[i % 4][j + i / 4],
ct,
cu_processor_id,
k);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index a3441b0e385b..d59f2cd056c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
+ uint16_t queried_pasid;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
@@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
- (dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
- (dev->kgd, vmid) == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
- vmid, p->pasid);
- break;
- }
+ status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
+ (dev->kgd, vmid, &queried_pasid);
+
+ if (status && queried_pasid == p->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
+ vmid, p->pasid);
+ break;
}
}
if (vmid > last_vmid_to_scan) {
- pr_err("Didn't find vmid for pasid %d\n", p->pasid);
+ pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 9d4af961c5d1..9bfa50633654 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
if (pmgr->pasid != 0) {
- pr_debug("H/W debugger is already active using pasid %d\n",
+ pr_debug("H/W debugger is already active using pasid 0x%x\n",
pmgr->pasid);
return -EBUSY;
}
@@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
- pr_debug("H/W debugger is not registered by calling pasid %d\n",
+ pr_debug("H/W debugger is not registered by calling pasid 0x%x\n",
p->pasid);
return -EINVAL;
}
@@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
wac_info->process->pasid);
return -EINVAL;
}
@@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
adw_info->process->pasid);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0dc1084b5e82..4fa8834ce7cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -39,6 +39,41 @@
*/
static atomic_t kfd_locked = ATOMIC_INIT(0);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
+#endif
+extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
+extern const struct kfd2kgd_calls arcturus_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
+
+static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
+ [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_TONGA] = &gfx_v8_kfd2kgd,
+ [CHIP_FIJI] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
+ [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
+ [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
+ [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
+};
+
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
@@ -351,6 +386,24 @@ static const struct kfd_device_info arcturus_device_info = {
.num_sdma_queues_per_engine = 8,
};
+static const struct kfd_device_info renoir_device_info = {
+ .asic_family = CHIP_RENOIR,
+ .asic_name = "renoir",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info navi10_device_info = {
.asic_family = CHIP_NAVI10,
.asic_name = "navi10",
@@ -369,133 +422,64 @@ static const struct kfd_device_info navi10_device_info = {
.num_sdma_queues_per_engine = 8,
};
-struct kfd_deviceid {
- unsigned short did;
- const struct kfd_device_info *device_info;
+static const struct kfd_device_info navi12_device_info = {
+ .asic_family = CHIP_NAVI12,
+ .asic_name = "navi12",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+};
+
+static const struct kfd_device_info navi14_device_info = {
+ .asic_family = CHIP_NAVI14,
+ .asic_name = "navi14",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
};
-static const struct kfd_deviceid supported_devices[] = {
+/* For each entry, [0] is regular and [1] is virtualisation device. */
+static const struct kfd_device_info *kfd_supported_devices[][2] = {
#ifdef KFD_SUPPORT_IOMMU_V2
- { 0x1304, &kaveri_device_info }, /* Kaveri */
- { 0x1305, &kaveri_device_info }, /* Kaveri */
- { 0x1306, &kaveri_device_info }, /* Kaveri */
- { 0x1307, &kaveri_device_info }, /* Kaveri */
- { 0x1309, &kaveri_device_info }, /* Kaveri */
- { 0x130A, &kaveri_device_info }, /* Kaveri */
- { 0x130B, &kaveri_device_info }, /* Kaveri */
- { 0x130C, &kaveri_device_info }, /* Kaveri */
- { 0x130D, &kaveri_device_info }, /* Kaveri */
- { 0x130E, &kaveri_device_info }, /* Kaveri */
- { 0x130F, &kaveri_device_info }, /* Kaveri */
- { 0x1310, &kaveri_device_info }, /* Kaveri */
- { 0x1311, &kaveri_device_info }, /* Kaveri */
- { 0x1312, &kaveri_device_info }, /* Kaveri */
- { 0x1313, &kaveri_device_info }, /* Kaveri */
- { 0x1315, &kaveri_device_info }, /* Kaveri */
- { 0x1316, &kaveri_device_info }, /* Kaveri */
- { 0x1317, &kaveri_device_info }, /* Kaveri */
- { 0x1318, &kaveri_device_info }, /* Kaveri */
- { 0x131B, &kaveri_device_info }, /* Kaveri */
- { 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info }, /* Kaveri */
- { 0x9870, &carrizo_device_info }, /* Carrizo */
- { 0x9874, &carrizo_device_info }, /* Carrizo */
- { 0x9875, &carrizo_device_info }, /* Carrizo */
- { 0x9876, &carrizo_device_info }, /* Carrizo */
- { 0x9877, &carrizo_device_info }, /* Carrizo */
- { 0x15DD, &raven_device_info }, /* Raven */
- { 0x15D8, &raven_device_info }, /* Raven */
+ [CHIP_KAVERI] = {&kaveri_device_info, NULL},
+ [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
+ [CHIP_RAVEN] = {&raven_device_info, NULL},
#endif
- { 0x67A0, &hawaii_device_info }, /* Hawaii */
- { 0x67A1, &hawaii_device_info }, /* Hawaii */
- { 0x67A2, &hawaii_device_info }, /* Hawaii */
- { 0x67A8, &hawaii_device_info }, /* Hawaii */
- { 0x67A9, &hawaii_device_info }, /* Hawaii */
- { 0x67AA, &hawaii_device_info }, /* Hawaii */
- { 0x67B0, &hawaii_device_info }, /* Hawaii */
- { 0x67B1, &hawaii_device_info }, /* Hawaii */
- { 0x67B8, &hawaii_device_info }, /* Hawaii */
- { 0x67B9, &hawaii_device_info }, /* Hawaii */
- { 0x67BA, &hawaii_device_info }, /* Hawaii */
- { 0x67BE, &hawaii_device_info }, /* Hawaii */
- { 0x6920, &tonga_device_info }, /* Tonga */
- { 0x6921, &tonga_device_info }, /* Tonga */
- { 0x6928, &tonga_device_info }, /* Tonga */
- { 0x6929, &tonga_device_info }, /* Tonga */
- { 0x692B, &tonga_device_info }, /* Tonga */
- { 0x6938, &tonga_device_info }, /* Tonga */
- { 0x6939, &tonga_device_info }, /* Tonga */
- { 0x7300, &fiji_device_info }, /* Fiji */
- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
- { 0x67C4, &polaris10_device_info }, /* Polaris10 */
- { 0x67C7, &polaris10_device_info }, /* Polaris10 */
- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
- { 0x67DF, &polaris10_device_info }, /* Polaris10 */
- { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
- { 0x67E3, &polaris11_device_info }, /* Polaris11 */
- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
- { 0x67EF, &polaris11_device_info }, /* Polaris11 */
- { 0x67FF, &polaris11_device_info }, /* Polaris11 */
- { 0x6980, &polaris12_device_info }, /* Polaris12 */
- { 0x6981, &polaris12_device_info }, /* Polaris12 */
- { 0x6985, &polaris12_device_info }, /* Polaris12 */
- { 0x6986, &polaris12_device_info }, /* Polaris12 */
- { 0x6987, &polaris12_device_info }, /* Polaris12 */
- { 0x6995, &polaris12_device_info }, /* Polaris12 */
- { 0x6997, &polaris12_device_info }, /* Polaris12 */
- { 0x699F, &polaris12_device_info }, /* Polaris12 */
- { 0x694C, &vegam_device_info }, /* VegaM */
- { 0x694E, &vegam_device_info }, /* VegaM */
- { 0x694F, &vegam_device_info }, /* VegaM */
- { 0x6860, &vega10_device_info }, /* Vega10 */
- { 0x6861, &vega10_device_info }, /* Vega10 */
- { 0x6862, &vega10_device_info }, /* Vega10 */
- { 0x6863, &vega10_device_info }, /* Vega10 */
- { 0x6864, &vega10_device_info }, /* Vega10 */
- { 0x6867, &vega10_device_info }, /* Vega10 */
- { 0x6868, &vega10_device_info }, /* Vega10 */
- { 0x6869, &vega10_device_info }, /* Vega10 */
- { 0x686A, &vega10_device_info }, /* Vega10 */
- { 0x686B, &vega10_device_info }, /* Vega10 */
- { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
- { 0x686D, &vega10_device_info }, /* Vega10 */
- { 0x686E, &vega10_device_info }, /* Vega10 */
- { 0x686F, &vega10_device_info }, /* Vega10 */
- { 0x687F, &vega10_device_info }, /* Vega10 */
- { 0x69A0, &vega12_device_info }, /* Vega12 */
- { 0x69A1, &vega12_device_info }, /* Vega12 */
- { 0x69A2, &vega12_device_info }, /* Vega12 */
- { 0x69A3, &vega12_device_info }, /* Vega12 */
- { 0x69AF, &vega12_device_info }, /* Vega12 */
- { 0x66a0, &vega20_device_info }, /* Vega20 */
- { 0x66a1, &vega20_device_info }, /* Vega20 */
- { 0x66a2, &vega20_device_info }, /* Vega20 */
- { 0x66a3, &vega20_device_info }, /* Vega20 */
- { 0x66a4, &vega20_device_info }, /* Vega20 */
- { 0x66a7, &vega20_device_info }, /* Vega20 */
- { 0x66af, &vega20_device_info }, /* Vega20 */
- { 0x738C, &arcturus_device_info }, /* Arcturus */
- { 0x7388, &arcturus_device_info }, /* Arcturus */
- { 0x738E, &arcturus_device_info }, /* Arcturus */
- { 0x7390, &arcturus_device_info }, /* Arcturus vf */
- { 0x7310, &navi10_device_info }, /* Navi10 */
- { 0x7312, &navi10_device_info }, /* Navi10 */
- { 0x7318, &navi10_device_info }, /* Navi10 */
- { 0x731a, &navi10_device_info }, /* Navi10 */
- { 0x731f, &navi10_device_info }, /* Navi10 */
+ [CHIP_HAWAII] = {&hawaii_device_info, NULL},
+ [CHIP_TONGA] = {&tonga_device_info, NULL},
+ [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
+ [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
+ [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
+ [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
+ [CHIP_VEGAM] = {&vegam_device_info, NULL},
+ [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
+ [CHIP_VEGA12] = {&vega12_device_info, NULL},
+ [CHIP_VEGA20] = {&vega20_device_info, NULL},
+ [CHIP_RENOIR] = {&renoir_device_info, NULL},
+ [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
+ [CHIP_NAVI10] = {&navi10_device_info, NULL},
+ [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
+ [CHIP_NAVI14] = {&navi14_device_info, NULL},
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -504,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-static const struct kfd_device_info *lookup_device_info(unsigned short did)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, unsigned int asic_type, bool vf)
{
- size_t i;
+ struct kfd_dev *kfd;
+ const struct kfd_device_info *device_info;
+ const struct kfd2kgd_calls *f2g;
- for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
- if (supported_devices[i].did == did) {
- WARN_ON(!supported_devices[i].device_info);
- return supported_devices[i].device_info;
- }
+ if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
+ || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
+ dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
+ return NULL; /* asic_type out of range */
}
- dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
- did);
+ device_info = kfd_supported_devices[asic_type][vf];
+ f2g = kfd2kgd_funcs[asic_type];
- return NULL;
-}
-
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
-{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info =
- lookup_device_info(pdev->device);
-
- if (!device_info) {
- dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ if (!device_info || !f2g) {
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[asic_type], vf ? "VF" : "");
return NULL;
}
@@ -593,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size;
+ kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
@@ -751,9 +730,6 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
return 0;
kgd2kfd_suspend(kfd);
- /* hold dqm->lock to prevent further execution*/
- dqm_lock(kfd->dqm);
-
kfd_signal_reset_event(kfd);
return 0;
}
@@ -771,8 +747,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
if (!kfd->init_complete)
return 0;
- dqm_unlock(kfd->dqm);
-
ret = kfd_resume(kfd);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d985e31fcc1e..984c2f2b24b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit, allocated_vmid;
+ int allocated_vmid = -1, i;
- if (dqm->vmid_bitmap == 0)
- return -ENOMEM;
+ for (i = dqm->dev->vm_info.first_vmid_kfd;
+ i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
+ if (!dqm->vmid_pasid[i]) {
+ allocated_vmid = i;
+ break;
+ }
+ }
+
+ if (allocated_vmid < 0) {
+ pr_err("no more vmid to allocate\n");
+ return -ENOSPC;
+ }
+
+ pr_debug("vmid allocated: %d\n", allocated_vmid);
+
+ dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
- bit = ffs(dqm->vmid_bitmap) - 1;
- dqm->vmid_bitmap &= ~(1 << bit);
+ set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
- allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
- pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
program_sh_mem_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd));
- dqm->dev->kfd2kgd->set_scratch_backing_va(
- dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (dqm->dev->kfd2kgd->set_scratch_backing_va)
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ qpd->sh_hidden_private_base, qpd->vmid);
return 0;
}
@@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
-
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->device_info->asic_family == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
@@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+ dqm->vmid_pasid[qpd->vmid] = 0;
- dqm->vmid_bitmap |= (1 << bit);
qpd->vmid = 0;
q->properties.vmid = 0;
}
@@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (q->properties.is_active) {
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
+ goto add_queue_to_list;
+ }
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
@@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
goto out_free_mqd;
}
+add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
@@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
+ return 0;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
+
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Update non-HWS queue while stopped\n");
+ goto out_unlock;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
+ dqm->queue_count--;
+
+ if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
+ continue;
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count--;
}
out:
@@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
+ dqm->queue_count++;
+
+ if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
+ continue;
+
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, mm);
if (retval && !ret)
@@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count++;
}
qpd->evicted = 0;
out:
@@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -879,7 +912,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
+ memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
+
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -902,12 +936,20 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
init_interrupts(dqm);
- return pm_init(&dqm->packets, dqm);
+
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ return pm_init(&dqm->packets, dqm);
+ dqm->sched_running = true;
+
+ return 0;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- pm_uninit(&dqm->packets);
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ pm_uninit(&dqm->packets);
+ dqm->sched_running = false;
+
return 0;
}
@@ -1058,6 +1100,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
+ dqm->sched_running = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1073,6 +1116,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
{
dqm_lock(dqm);
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm->sched_running = false;
dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
@@ -1259,9 +1303,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
return 0;
-
if (dqm->active_runlist)
return 0;
@@ -1283,6 +1328,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->is_hws_hang)
return -EIO;
if (!dqm->active_runlist)
@@ -1676,7 +1723,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- dev->device_info->num_sdma_engines *
+ (dev->device_info->num_sdma_engines +
+ dev->device_info->num_xgmi_sdma_engines) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
@@ -1786,10 +1834,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
device_queue_manager_init_v9(&dqm->asic_ops);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
break;
default:
@@ -1883,6 +1934,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
int pipe, queue;
int r = 0;
+ if (!dqm->sched_running) {
+ seq_printf(m, " Device is stopped\n");
+
+ return 0;
+ }
+
r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
&dump, &n_regs);
@@ -1917,7 +1974,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
+ get_num_xgmi_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 90db2c9275f6..a8c37e6da027 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -32,6 +32,8 @@
#include "kfd_mqd_manager.h"
+#define VMID_NUM 16
+
struct device_process_node {
struct qcm_process_device *qpd;
struct list_head list;
@@ -185,7 +187,8 @@ struct device_queue_manager {
unsigned int *allocated_queues;
uint64_t sdma_bitmap;
uint64_t xgmi_sdma_bitmap;
- unsigned int vmid_bitmap;
+ /* the pasid mapping for each kfd vmid */
+ uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
@@ -198,6 +201,7 @@ struct device_queue_manager {
bool is_hws_hang;
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
+ bool sched_running;
};
void device_queue_manager_init_cik(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d674d4b3340f..908081c85de1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -852,8 +852,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGSEGV to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -861,13 +861,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGTERM to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "HSA Process (PID %d) got unhandled exception",
- p->lead_thread->pid);
+ "Process %d (pasid 0x%x) got unhandled exception",
+ p->lead_thread->pid, p->pasid);
}
}
}
@@ -936,7 +936,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN) {
+ if (dev->device_info->asic_family != CHIP_RAVEN &&
+ dev->device_info->asic_family != CHIP_RENOIR) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 9dc4bff8085e..bb77b8890e77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process)
/*Iterating over all devices*/
while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
- if (!dev) {
- id++; /* Skip non GPU devices */
+ if (!dev || kfd_devcgroup_check_permission(dev)) {
+ /* Skip non GPU devices and devices to which the
+ * current process have no access to. Access can be
+ * limited by placing the process in a specific
+ * cgroup hierarchy
+ */
+ id++;
continue;
}
@@ -405,8 +410,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kfd_init_apertures_v9(pdd, id);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 3ef67d2e0d9f..e05d75ecda21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
memcpy(patched_ihre, ih_ring_entry,
dev->device_info->ih_ring_entry_size);
- pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
- dev->kgd, vmid);
+ pasid = dev->dqm->vmid_pasid[vmid];
/* Patch the pasid field */
patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index c56ac47cd318..bc47f6a44456 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
}
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!kfd->ih_wq)) {
+ kfifo_free(&kfd->ih_fifo);
+ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5f35df23fb18..193e2835bd4d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -160,7 +160,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
if (!p)
return;
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
mutex_lock(kfd_get_dbgmgr_mutex());
@@ -194,7 +194,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
struct kfd_dev *dev;
dev_warn_ratelimited(kfd_device,
- "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
@@ -235,7 +235,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
- pr_err("Unexpected pasid %d binding failure\n",
+ pr_err("Unexpected pasid 0x%x binding failure\n",
p->pasid);
mutex_unlock(&p->mutex);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8b4564f71a7a..11d244891393 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -330,10 +330,13 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
kernel_queue_init_v9(&kq->ops_asic_specific);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kernel_queue_init_v10(&kq->ops_asic_specific);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 986ff52d5750..f4b7f7e6c40e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -82,7 +82,7 @@ static void kfd_exit(void)
kfd_chardev_exit();
}
-int kgd2kfd_init()
+int kgd2kfd_init(void)
{
return kfd_init();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 9cd3eb2d90bd..4a236b2c2354 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -69,35 +69,13 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
{
- int retval;
- struct kfd_mem_obj *mqd_mem_obj = NULL;
+ struct kfd_mem_obj *mqd_mem_obj;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
- */
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if (!mqd_mem_obj)
- return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
- &(mqd_mem_obj->gtt_mem),
- &(mqd_mem_obj->gpu_addr),
- (void *)&(mqd_mem_obj->cpu_ptr), true);
- } else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
- &mqd_mem_obj);
- }
-
- if (retval) {
- kfree(mqd_mem_obj);
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
+ &mqd_mem_obj))
return NULL;
- }
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -250,14 +228,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- struct kfd_dev *kfd = mm->dev;
-
- if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
- kfree(mqd_mem_obj);
- } else {
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
- }
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 2c8624c5b42c..83ef4b3dd2fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -239,10 +239,13 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
pm->pmf = &kfd_v9_pm_funcs;
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pm->pmf = &kfd_v10_pm_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index c89326125d71..060a9e8b301e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -36,6 +36,10 @@
#include <linux/seq_file.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
+#include <linux/device_cgroup.h>
+#include <drm/drm_file.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
@@ -179,10 +183,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11)
-#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \
- (chip) <= CHIP_NAVI10) || \
- (chip) == CHIP_HAWAII)
#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
struct kfd_event_interrupt_class {
@@ -230,6 +230,7 @@ struct kfd_dev {
const struct kfd_device_info *device_info;
struct pci_dev *pdev;
+ struct drm_device *ddev;
unsigned int id; /* topology stub index */
@@ -687,7 +688,7 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- unsigned int pasid;
+ uint16_t pasid;
unsigned int doorbell_index;
/*
@@ -1040,6 +1041,21 @@ bool kfd_is_locked(void);
void kfd_inc_compute_active(struct kfd_dev *dev);
void kfd_dec_compute_active(struct kfd_dev *dev);
+/* Cgroup Support */
+/* Check with device cgroup if @kfd device is accessible */
+static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+{
+#if defined(CONFIG_CGROUP_DEVICE)
+ struct drm_device *ddev = kfd->ddev;
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ ddev->render->index,
+ DEVCG_ACC_WRITE | DEVCG_ACC_READ);
+#else
+ return 0;
+#endif
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 40e3fc0c6942..10f9af5784f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -416,7 +416,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
@@ -687,6 +687,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
+ int range_start = dev->shared_resources.non_cp_doorbells_start;
+ int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev->device_info->asic_family))
return 0;
@@ -698,14 +700,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
return -ENOMEM;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
+ range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
+
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= dev->shared_resources.non_cp_doorbells_start
- && i <= dev->shared_resources.non_cp_doorbells_end) {
+ if (i >= range_start && i <= range_end) {
set_bit(i, qpd->doorbell_bitmap);
set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
- i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
}
@@ -1020,7 +1024,7 @@ static void evict_process_worker(struct work_struct *work)
*/
flush_delayed_work(&p->restore_work);
- pr_debug("Started evicting pasid %d\n", p->pasid);
+ pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p);
if (!ret) {
dma_fence_signal(p->ef);
@@ -1029,9 +1033,9 @@ static void evict_process_worker(struct work_struct *work)
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- pr_debug("Finished evicting pasid %d\n", p->pasid);
+ pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
- pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
@@ -1046,7 +1050,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid %d\n", p->pasid);
+ pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -1062,7 +1066,7 @@ static void restore_process_worker(struct work_struct *work)
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
- pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
+ pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
@@ -1072,9 +1076,9 @@ static void restore_process_worker(struct work_struct *work)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid %d\n", p->pasid);
+ pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
- pr_err("Failed to restore queues of pasid %d\n", p->pasid);
+ pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1088,7 +1092,7 @@ void kfd_suspend_all_processes(void)
cancel_delayed_work_sync(&p->restore_work);
if (kfd_process_evict_queues(p))
- pr_err("Failed to suspend process %d\n", p->pasid);
+ pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
@@ -1171,7 +1175,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID %d:\n",
+ seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7e6c3ee82f5b..2659d226c056 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid 0x%x\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -298,7 +298,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -377,7 +377,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
- pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+ pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 7551761f2aa9..69bd0628fdc6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
@@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
mem = container_of(attr, struct kfd_mem_properties, attr);
+ if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
sysfs_show_32bit_prop(buffer, "flags", mem->flags);
@@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
cache = container_of(attr, struct kfd_cache_properties, attr);
+ if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "processor_id_low",
cache->processor_id_low);
sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
@@ -414,6 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_32bit_val(buffer, dev->gpu_id);
}
@@ -421,11 +429,15 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev = container_of(attr, struct kfd_topology_device,
attr_name);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_str_val(buffer, dev->node_props.name);
}
dev = container_of(attr, struct kfd_topology_device,
attr_props);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
@@ -1098,6 +1110,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -1111,6 +1126,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->gpu = dev->gpu;
+ list_for_each_entry(cache, &dev->cache_props, list)
+ cache->gpu = dev->gpu;
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ iolink->gpu = dev->gpu;
break;
}
}
@@ -1317,8 +1339,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index d4718d58d0f2..15843e0fc756 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -102,6 +102,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -123,6 +124,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -141,6 +143,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 71991a28a775..313183b80032 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -23,16 +23,16 @@ config DRM_AMD_DC_DCN2_0
depends on DRM_AMD_DC && X86
depends on DRM_AMD_DC_DCN1_0
help
- Choose this option if you want to have
- Navi support for display engine
+ Choose this option if you want to have
+ Navi support for display engine
config DRM_AMD_DC_DCN2_1
- bool "DCN 2.1 family"
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN2_0
- help
- Choose this option if you want to have
- Renoir support for display engine
+ bool "DCN 2.1 family"
+ depends on DRM_AMD_DC && X86
+ depends on DRM_AMD_DC_DCN2_0
+ help
+ Choose this option if you want to have
+ Renoir support for display engine
config DRM_AMD_DC_DSC_SUPPORT
bool "DSC support"
@@ -41,8 +41,16 @@ config DRM_AMD_DC_DSC_SUPPORT
depends on DRM_AMD_DC_DCN1_0
depends on DRM_AMD_DC_DCN2_0
help
- Choose this option if you want to have
- Dynamic Stream Compression support
+ Choose this option if you want to have
+ Dynamic Stream Compression support
+
+config DRM_AMD_DC_HDCP
+ bool "Enable HDCP support in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to support
+ HDCP authentication
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 496cee000f10..36b3d6a5d04d 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -34,12 +34,19 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
+endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DAL_LIBS += modules/hdcp
+endif
+
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 94911871eb9b..9a3b7bf8ab0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
+ifdef CONFIG_DRM_AMD_DC_HDCP
+AMDGPUDM += amdgpu_dm_hdcp.o
+endif
+
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a52f0b13a2c8..7aac9568d3be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -37,6 +37,9 @@
#include "amdgpu_ucode.h"
#include "atom.h"
#include "amdgpu_dm.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#endif
#include "amdgpu_pm.h"
#include "amd_shared.h"
@@ -67,6 +70,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_hdcp.h>
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -143,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+
+
/*
* dm_vblank_get_counter
*
@@ -263,6 +273,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -407,6 +424,13 @@ static void dm_vupdate_high_irq(void *interrupt_params)
}
}
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -646,11 +670,18 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct dc_callback_init init_params;
+#endif
+
adev->dm.ddev = adev->ddev;
adev->dm.adev = adev;
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&init_params, 0, sizeof(init_params));
+#endif
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
@@ -688,7 +719,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
*/
if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN)
+ adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
@@ -697,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
init_data.flags.multi_mon_pp_mclk_switch = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
init_data.flags.power_down_display_on_boot = true;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
@@ -713,6 +747,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ dc_hardware_init(adev->dm.dc);
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -723,6 +759,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -764,6 +812,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc)
+ dc_deinit_callbacks(adev->dm.dc);
+#endif
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -897,27 +955,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int ret = 0;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
- aconnector, aconnector->base.base.id);
+ aconnector,
+ aconnector->base.base.id);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
DRM_ERROR("DM_MST: Failed to start MST\n");
- ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
- return ret;
- }
+ aconnector->dc_link->type =
+ dc_connection_single;
+ break;
}
+ }
}
+ drm_connector_list_iter_end(&iter);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
@@ -940,6 +1000,11 @@ static int dm_late_init(void *handle)
params.backlight_lut_array_size = 16;
params.backlight_lut_array = linear_lut;
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+
/* todo will enable for navi10 */
if (adev->asic_type <= CHIP_RAVEN) {
ret = dmcu_load_iram(dmcu, params);
@@ -955,14 +1020,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
@@ -973,15 +1037,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
- ret = drm_dp_mst_topology_mgr_resume(mgr);
+ ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true;
}
}
}
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
@@ -989,7 +1052,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
/**
* dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
@@ -1019,7 +1082,7 @@ static int dm_hw_init(void *handle)
/**
* dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
@@ -1163,6 +1226,7 @@ static int dm_resume(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state;
@@ -1185,17 +1249,18 @@ static int dm_resume(void *handle)
/* program HPD filter */
dc_resume(dm->dc);
- /* On resume we need to rewrite the MSTM control bits to enamble MST*/
- s3_handle_mst(ddev, false);
-
/*
* early enable HPD Rx IRQ, should be done before set mode as short
* pulse interrupts are used for MST
*/
amdgpu_dm_irq_resume_early(adev);
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
/* Do detection*/
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
/*
@@ -1223,6 +1288,7 @@ static int dm_resume(void *handle)
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
}
+ drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
@@ -1438,6 +1504,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1452,6 +1523,9 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct amdgpu_device *adev = dev->dev_private;
+#endif
/*
* In case of failure or MST no need to update connector status or notify the OS
@@ -1459,6 +1533,10 @@ static void handle_hpd_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1577,6 +1655,12 @@ static void handle_hpd_rx_irq(void *param)
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ union hpd_irq_data hpd_irq_data;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+#endif
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1586,7 +1670,12 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
+#else
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
+#endif
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_sink(dc_link, &new_connection_type))
@@ -1621,6 +1710,10 @@ static void handle_hpd_rx_irq(void *param)
drm_kms_helper_hotplug_event(dev);
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
@@ -2334,6 +2427,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
+ if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+ amdgpu_dm_set_psr_caps(link);
}
@@ -3311,8 +3406,12 @@ static void fill_stream_properties_from_drm_display_mode(
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
- memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
timing_out->h_border_left = 0;
timing_out->h_border_right = 0;
@@ -3322,6 +3421,9 @@ static void fill_stream_properties_from_drm_display_mode(
if (drm_mode_is_420_only(info, mode_in)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -3346,6 +3448,13 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
}
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
timing_out->h_sync_width =
@@ -3566,6 +3675,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->dm_stream_context = aconnector;
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
+
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
/* Search for preferred mode */
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
@@ -3621,8 +3733,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_link_get_link_cap(aconnector->dc_link));
if (dsc_caps.is_dsc_supported)
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
&dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg))
@@ -3639,6 +3752,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
+ if (stream->link->psr_feature_enabled) {
+ struct dc *core_dc = stream->link->ctx->dc;
+
+ if (dc_is_dmcu_initialized(core_dc)) {
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ stream->psr_version = dmcu->dmcu_version.psr_version;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+ }
+ }
finish:
dc_sink_release(sink);
@@ -4114,8 +4239,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
result = MODE_OK;
else
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
- mode->vdisplay,
mode->hdisplay,
+ mode->vdisplay,
mode->clock,
dc_result);
@@ -4494,7 +4619,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
tv.num_shared = 1;
list_add(&tv.head, &list);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
@@ -4837,7 +4962,13 @@ static int to_drm_connector_type(enum signal_type st)
static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
}
static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
@@ -5082,6 +5213,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ drm_connector_attach_content_protection_property(&aconnector->base, false);
+#endif
}
}
@@ -5324,6 +5459,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static bool is_content_protection_different(struct drm_connector_state *state,
+ const struct drm_connector_state *old_state,
+ const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* CP is being re enabled, ignore this */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
+ * hot-plug, headless s3, dpms
+ */
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
+ aconnector->dc_sink != NULL)
+ return true;
+
+ if (old_state->content_protection == state->content_protection)
+ return false;
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ return true;
+
+ return false;
+}
+
+static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
+ else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
+
+}
+#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -5665,6 +5847,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
+ bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -5710,6 +5893,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
+ if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
+ swizzle = false;
+
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -5864,6 +6050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
+ bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
bundle->stream_update.dst = acrtc_state->stream->dst;
@@ -5899,14 +6086,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
-
mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
dc_state);
+
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->psr_version &&
+ !acrtc_state->stream->link->psr_feature_enabled)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_allow_active &&
+ swizzle) {
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ }
+
mutex_unlock(&dm->dc_lock);
}
@@ -6215,10 +6417,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
crtc->hwmode = new_crtc_state->mode;
} else if (modereset_required(new_crtc_state)) {
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
-
/* i.e. reset mode */
- if (dm_old_crtc_state->stream)
+ if (dm_old_crtc_state->stream) {
+ if (dm_old_crtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
+
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
}
} /* for_each_crtc_in_state() */
@@ -6248,6 +6453,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ new_crtc_state = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ continue;
+ }
+
+ if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
+ }
+#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -6287,9 +6516,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
+ stream_update.stream = dm_new_crtc_state->stream;
if (scaling_changed) {
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
- dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ dm_new_con_state, dm_new_crtc_state->stream);
stream_update.src = dm_new_crtc_state->stream->src;
stream_update.dst = dm_new_crtc_state->stream->dst;
@@ -7158,7 +7388,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
-
+ stream_update.stream = new_dm_crtc_state->stream;
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
@@ -7569,3 +7799,92 @@ update:
freesync_capable);
}
+static void amdgpu_dm_set_psr_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return;
+ if (link->type == dc_connection_none)
+ return;
+ if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ dpcd_data, sizeof(dpcd_data))) {
+ link->psr_feature_enabled = dpcd_data[0] ? true:false;
+ DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ }
+}
+
+/*
+ * amdgpu_dm_link_setup_psr() - configure psr link
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+{
+ struct dc_link *link = NULL;
+ struct psr_config psr_config = {0};
+ struct psr_context psr_context = {0};
+ struct dc *dc = NULL;
+ bool ret = false;
+
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+ dc = link->ctx->dc;
+
+ psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+
+ if (psr_config.psr_version > 0) {
+ psr_config.psr_exit_link_training_required = 0x1;
+ psr_config.psr_frame_capture_indication_req = 0;
+ psr_config.psr_rfb_setup_time = 0x37;
+ psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
+ psr_config.allow_smu_optimizations = 0x0;
+
+ ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+
+ }
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+
+ return ret;
+}
+
+/*
+ * amdgpu_dm_psr_enable() - enable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+{
+ struct dc_link *link = stream->link;
+ struct dc_static_screen_events triggers = {0};
+
+ DRM_DEBUG_DRIVER("Enabling psr...\n");
+
+ triggers.cursor_update = true;
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ dc_stream_set_static_screen_events(link->ctx->dc,
+ &stream, 1,
+ &triggers);
+
+ return dc_link_set_psr_allow_active(link, true, false);
+}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+{
+
+ DRM_DEBUG_DRIVER("Disabling psr...\n");
+
+ return dc_link_set_psr_allow_active(stream->link, false, true);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index c8c525a2b505..77c5166e6b08 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -108,6 +108,12 @@ struct amdgpu_dm_backlight_caps {
* @display_indexes_num: Max number of display streams supported
* @irq_handler_list_table_lock: Synchronizes access to IRQ tables
* @backlight_dev: Backlight control device
+ * @backlight_link: Link on which to control backlight
+ * @backlight_caps: Capabilities of the backlight device
+ * @freesync_module: Module handling freesync calculations
+ * @fw_dmcu: Reference to DMCU firmware
+ * @dmcu_fw_version: Version of the DMCU firmware
+ * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
@@ -128,7 +134,7 @@ struct amdgpu_display_manager {
u16 display_indexes_num;
/**
- * @atomic_obj
+ * @atomic_obj:
*
* In combination with &dm_atomic_state it helps manage
* global atomic state that doesn't map cleanly into existing
@@ -225,6 +231,9 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct hdcp_workqueue *hdcp_workqueue;
+#endif
struct drm_atomic_state *cached_state;
@@ -234,6 +243,8 @@ struct amdgpu_display_manager {
uint32_t dmcu_fw_version;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
+ * @soc_bounding_box:
+ *
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
@@ -287,6 +298,7 @@ struct amdgpu_dm_connector {
uint32_t debugfs_dpcd_address;
uint32_t debugfs_dpcd_size;
#endif
+ bool force_yuv420_output;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b43bb7f90e4e..2233d293a707 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
NULL);
+ dc_gamma_release(&gamma);
+
return res ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index a549c7c717dd..eaad9099bc0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
}
/* Configure dithering */
- if (!dm_need_crc_dither(source))
+ if (!dm_need_crc_dither(source)) {
dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
- else
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_DISABLE);
+ } else {
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_DEFAULT);
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_AUTO);
+ }
unlock:
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f3dfb2887ae0..bdb37e611015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -942,6 +942,52 @@ static const struct {
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+/*
+ * Force YUV420 output if available from the given mode
+ */
+static int force_yuv420_output_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ connector->force_yuv420_output = (bool)val;
+
+ return 0;
+}
+
+/*
+ * Check if YUV420 is forced when available from the given mode
+ */
+static int force_yuv420_output_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ *val = connector->force_yuv420_output;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
+ force_yuv420_output_set, "%llu\n");
+
+/*
+ * Read PSR state
+ */
+static int psr_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ uint32_t psr_state = 0;
+
+ dc_link_get_psr_state(link, &psr_state);
+
+ *val = psr_state;
+
+ return 0;
+}
+
+
+DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+
void connector_debugfs_init(struct amdgpu_dm_connector *connector)
{
int i;
@@ -955,6 +1001,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
dp_debugfs_entries[i].fops);
}
}
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
+
+ debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
+ &force_yuv420_output_fops);
+
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
new file mode 100644
index 000000000000..77181ddf6c8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_dm_hdcp.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "dm_helpers.h"
+#include <drm/drm_hdcp.h>
+
+static bool
+lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+
+ struct dc_link *link = handle;
+ struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
+}
+
+static bool
+lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
+}
+
+static void process_output(struct hdcp_workqueue *hdcp_work)
+{
+ struct mod_hdcp_output output = hdcp_work->output;
+
+ if (output.callback_stop)
+ cancel_delayed_work(&hdcp_work->callback_dwork);
+
+ if (output.callback_needed)
+ schedule_delayed_work(&hdcp_work->callback_dwork,
+ msecs_to_jiffies(output.callback_delay));
+
+ if (output.watchdog_timer_stop)
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ if (output.watchdog_timer_needed)
+ schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
+ msecs_to_jiffies(output.watchdog_timer_delay));
+
+}
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ mutex_lock(&hdcp_w->mutex);
+ hdcp_w->aconnector = aconnector;
+
+ mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+
+ schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+}
+
+void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ schedule_work(&hdcp_w->cpirq_work);
+}
+
+
+
+
+static void event_callback(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
+ callback_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+
+}
+static void event_property_update(struct work_struct *work)
+{
+
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+ struct drm_device *dev = hdcp_work->aconnector->base.dev;
+ long ret;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp_work->mutex);
+
+
+ if (aconnector->base.state->commit) {
+ ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
+
+ if (ret == 0) {
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
+ hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+ }
+
+ if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ else
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+
+
+ mutex_unlock(&hdcp_work->mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void event_property_validate(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work =
+ container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
+ struct mod_hdcp_display_query query;
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+
+ mutex_lock(&hdcp_work->mutex);
+
+ query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
+
+ if (query.encryption_status != hdcp_work->encryption_status) {
+ hdcp_work->encryption_status = query.encryption_status;
+ schedule_work(&hdcp_work->property_update_work);
+ }
+
+ schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ mutex_unlock(&hdcp_work->mutex);
+}
+
+static void event_watchdog_timer(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work),
+ struct hdcp_workqueue,
+ watchdog_timer_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+static void event_cpirq(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+
+void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+{
+ int i = 0;
+
+ for (i = 0; i < hdcp_work->max_link; i++) {
+ cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ }
+
+ kfree(hdcp_work);
+
+}
+
+static void update_config(void *handle, struct cp_psp_stream_config *config)
+{
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
+ int link_index = aconnector->dc_link->link_index;
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+
+ if (aconnector->dc_sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+
+ display->controller = CONTROLLER_ID_D0 + config->otg_inst;
+ display->dig_fe = config->stream_enc_inst;
+ link->dig_be = config->link_enc_inst;
+ link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
+ link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->adjust.hdcp2.disable = 1;
+
+}
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+{
+
+ int max_caps = dc->caps.max_links;
+ struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ int i = 0;
+
+ if (hdcp_work == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->max_link = max_caps;
+
+ for (i = 0; i < max_caps; i++) {
+
+ mutex_init(&hdcp_work[i].mutex);
+
+ INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
+ INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
+ INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
+ INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
+ INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
+
+ hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
+ hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ }
+
+ cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->handle = hdcp_work;
+
+ return hdcp_work;
+
+fail_alloc_context:
+ kfree(hdcp_work);
+
+ return NULL;
+
+
+
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
new file mode 100644
index 000000000000..d3ba505d0696
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_
+#define AMDGPU_DM_AMDGPU_DM_HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp.h"
+#include "dc.h"
+#include "dm_cp_psp.h"
+
+struct mod_hdcp;
+struct mod_hdcp_link;
+struct mod_hdcp_display;
+struct cp_psp;
+
+struct hdcp_workqueue {
+ struct work_struct cpirq_work;
+ struct work_struct property_update_work;
+ struct delayed_work callback_dwork;
+ struct delayed_work watchdog_timer_dwork;
+ struct delayed_work property_validate_dwork;
+ struct amdgpu_dm_connector *aconnector;
+ struct mutex mutex;
+
+ struct mod_hdcp hdcp;
+ struct mod_hdcp_output output;
+ struct mod_hdcp_display display;
+ struct mod_hdcp_link link;
+
+ enum mod_hdcp_encryption_status encryption_status;
+ uint8_t max_link;
+};
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector);
+void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index);
+void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_destroy(struct hdcp_workqueue *work);
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+
+#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ee1dc75f5ddc..11e5784aa62a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -97,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
(struct edid *) edid->raw_edid);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
- if (sad_count <= 0) {
- DRM_INFO("SADs count is: %d, don't need to read it\n",
- sad_count);
+ if (sad_count < 0)
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return result;
- }
edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
@@ -282,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
* Polls for ACT (allocation change trigger) handled and sends
* ALLOCATE_PAYLOAD message.
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream)
{
@@ -293,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->mst_port)
- return false;
+ return ACT_FAILED;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
- return false;
+ return ACT_FAILED;
ret = drm_dp_check_act_status(mst_mgr);
if (ret)
- return false;
+ return ACT_FAILED;
- return true;
+ return ACT_SUCCESS;
}
bool dm_helpers_dp_mst_send_payload_allocation(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index fa5d503d379c..64445c4cc4c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
@@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
true);
}
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
@@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
false);
}
}
+ drm_connector_list_iter_end(&iter);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 16218a202b59..2bf8534c18fb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,7 +36,9 @@
#include "dc_link_ddc.h"
#include "i2caux_interface.h"
-
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
/* #define TRACE_DPCD */
#ifdef TRACE_DPCD
@@ -113,6 +115,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = -EIO;
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
result = -EBUSY;
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
@@ -123,31 +126,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
return result;
}
-static enum drm_connector_status
-dm_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_dm_connector *master = aconnector->mst_port;
-
- enum drm_connector_status status =
- drm_dp_mst_detect_port(
- connector,
- &master->mst_mgr,
- aconnector->port);
-
- return status;
-}
-
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
- if (amdgpu_dm_connector->edid) {
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
- }
+ kfree(amdgpu_dm_connector->edid);
+ amdgpu_dm_connector->edid = NULL;
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
@@ -163,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+ amdgpu_dm_connector->debugfs_dpcd_address = 0;
+ amdgpu_dm_connector->debugfs_dpcd_size = 0;
+#endif
+
return drm_dp_mst_connector_late_register(connector, port);
}
@@ -177,7 +169,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
}
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
- .detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dm_dp_mst_connector_destroy,
.reset = amdgpu_dm_connector_funcs_reset,
@@ -245,17 +236,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *
+dm_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
+}
+
+static int
+dm_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
- return &amdgpu_dm_connector->mst_encoder->base;
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ aconnector->port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = dm_mst_best_encoder,
+ .atomic_best_encoder = dm_mst_atomic_best_encoder,
+ .detect_ctx = dm_dp_mst_detect,
};
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -416,7 +419,11 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
- aconnector->base.name, dm->adev->dev);
+ &aconnector->base);
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ return;
+
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index f4cfa0caeba8..55a520a63712 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -345,7 +345,7 @@ bool dm_pp_get_clock_levels_by_type(
/* Error in pplib. Provide default values. */
return true;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
if (smu_get_clock_by_type(&adev->smu,
dc_to_pp_clock_type(clk_type),
&pp_clks)) {
@@ -365,7 +365,7 @@ bool dm_pp_get_clock_levels_by_type(
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) {
if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
validation_clks.engine_max_clock = 72000;
@@ -506,8 +506,8 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
- else if (adev->smu.funcs &&
- adev->smu.funcs->display_clock_voltage_request)
+ else if (adev->smu.ppt_funcs &&
+ adev->smu.ppt_funcs->display_clock_voltage_request)
ret = smu_display_clock_voltage_request(&adev->smu,
&pp_clock_request);
if (ret)
@@ -527,7 +527,7 @@ bool dm_pp_get_static_clocks(
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
if (ret)
return false;
@@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else if (adev->smu.funcs &&
- adev->smu.funcs->set_watermarks_for_clock_ranges)
+ else
smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges);
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -604,7 +603,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
pp_funcs->notify_smu_enable_pwe(pp_handle);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
smu_notify_smu_enable_pwe(&adev->smu);
}
@@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
wm_with_clock_ranges.wm_dmif_clocks_ranges;
@@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- if (!smu->funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
-
- /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
- * 1: fail
- */
- if (smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges))
- return PP_SMU_RESULT_UNSUPPORTED;
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
return PP_SMU_RESULT_OK;
}
@@ -727,10 +717,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */
if (smu_set_azalia_d3_pme(smu))
return PP_SMU_RESULT_FAIL;
@@ -743,10 +733,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */
if (smu_set_display_count(smu, count))
return PP_SMU_RESULT_FAIL;
@@ -759,10 +749,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
+ /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
if (smu_set_deep_sleep_dcefclk(smu, mhz))
return PP_SMU_RESULT_FAIL;
@@ -777,13 +767,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -799,13 +789,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_mem_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -835,7 +825,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
switch (clock_id) {
@@ -853,7 +843,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
}
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -869,13 +859,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc)
+ if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
+ if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
@@ -894,13 +884,97 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
if (!smu->ppt_funcs->get_uclk_dpm_states)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
+ if (!smu_get_uclk_dpm_states(smu,
clock_values_in_khz, num_states))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
}
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+enum pp_smu_status pp_rn_get_dpm_clock_table(
+ struct pp_smu *pp, struct dpm_clocks *clock_table)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu->ppt_funcs->get_dpm_clock_table)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu_get_dpm_clock_table(smu, clock_table))
+ return PP_SMU_RESULT_OK;
+
+ return PP_SMU_RESULT_FAIL;
+}
+
+enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
+ wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
+ wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_mhz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_mhz;
+ }
+
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+
+ return PP_SMU_RESULT_OK;
+}
+#endif
+
void dm_pp_get_funcs(
struct dc_context *ctx,
struct pp_smu_funcs *funcs)
@@ -945,6 +1019,15 @@ void dm_pp_get_funcs(
funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
break;
#endif
+
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+ case DCN_VERSION_2_1:
+ funcs->ctx.ver = PP_SMU_VER_RN;
+ funcs->rn_funcs.pp_smu.dm = ctx;
+ funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
+ funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
+ break;
+#endif
default:
DRM_ERROR("smu version is not supported !\n");
break;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 627982cb15d2..a160512a2f04 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -48,6 +48,10 @@ DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DC_LIBS += hdcp
+endif
+
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 221e0f56389f..823843cd2613 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info(
/* Sort voltage table from low to high*/
if (result == BP_RESULT_OK) {
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j].max_supported_clk <
info->disp_clk_voltage[j-1].max_supported_clk) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index dff65c0fe82f..7873abea4112 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info(
struct atom_common_table_header *header;
struct atom_data_revision revision;
-
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j-1].max_supported_clk
) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index c43797bea413..8828dd9c3783 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (dc->hwss.exit_optimized_pwr_state)
+ dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
+
+ if (edp_link) {
+ clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ dc_link_set_psr_allow_active(edp_link, false, false);
+ }
+
+}
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link)
+ dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false);
+
+ if (dc->hwss.optimize_pwr_state)
+ dc->hwss.optimize_pwr_state(dc, dc->current_state);
+
+}
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index c5c8c4901eed..26db1c5d4e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -239,7 +239,7 @@ int dce_set_clock(
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 64);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 64);
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
@@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
int i;
if (bp->integrated_info)
- clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
- clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0)
- clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) {
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0)
+ clk_mgr_dce->base.dentist_vco_freq_khz = 3600000;
}
/*update the maximum display clock for each power state*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 7c746ef1e32e..a6c46e903ff9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 62);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
@@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr->dentist_vco_freq_khz / 62);
+ clk_mgr->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 47f529ce280a..3fab9296918a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
ASSERT(clk_mgr->pp_smu);
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
pp_smu = &clk_mgr->pp_smu->rv_funcs;
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
@@ -266,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_
clk_mgr->base.dprefclk_khz = 600000;
if (bp->integrated_info)
- clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) {
- clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) {
+ clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 3e8ac303bd52..25d7b7c6681c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -104,84 +104,39 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
{
int i;
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz;
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ /* Loop index will match dpp->inst if resource exists,
+ * and we want to avoid dependency on dpp object
+ */
+ dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
clk_mgr->dccg->funcs->update_dpp_dto(
- clk_mgr->dccg, dpp_inst, dppclk_khz, false);
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
}
}
-static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
{
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
-
- uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
-
- REG_UPDATE(DENTIST_DISPCLK_CNTL,
- DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
-}
-
-static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
-{
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
+ uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
+// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
}
-static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dispclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dispclk_khz = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-
- update_display_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-}
-
-static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dppclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dppclk_khz = khz;
- clk_mgr->dccg->ref_dppclk = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-
- update_global_dpp_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-}
void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
@@ -192,11 +147,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc *dc = clk_mgr_base->ctx->dc;
struct pp_smu_funcs_nv *pp_smu = NULL;
int display_count;
+ bool update_dppclk = false;
bool update_dispclk = false;
bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
- int i;
if (dc->work_arounds.skip_clock_update)
return;
@@ -251,12 +207,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
@@ -264,50 +218,40 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000);
}
- if (dc->config.forced_clocks == false) {
- // First update display clock
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz))
- request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz);
-
- // Updating DPP clock requires some more logic
- if (!safe_to_lower) {
- // For pre-programming, we need to make sure any DPP clock that will go up has to go up
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
- // First raise the global reference if needed
- if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
- // Then raise any dividers that need raising
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
+ update_dppclk = true;
+ }
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ update_dispclk = true;
+ }
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true);
- }
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ // if clock is being lowered, increase DTO before lowering refclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dentist(clk_mgr);
} else {
- // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs
-
- if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
-
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
-
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
-
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false);
- }
+ // if clock is being raised, increase refclk before lowering DTO
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
}
+
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
@@ -320,6 +264,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
@@ -357,14 +303,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
}
- /* Both fclk and dppclk ref are run on the same scemi clock so we
- * need to keep the same value for both
+ /* Both fclk and ref_dppclk run on the same scemi clock.
+ * So take the higher value since the DPP DTO is typically programmed
+ * such that max dppclk is 1:1 with ref_dppclk.
*/
if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+ // Both fclk and ref_dppclk run on the same scemi clock.
+ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
}
@@ -409,12 +359,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
}
}
+static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->socclk_khz != b->socclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->phyclk_khz != b->phyclk_khz)
+ return false;
+ else if (a->dramclk_khz != b->dramclk_khz)
+ return false;
+ else if (a->p_state_change_support != b->p_state_change_support)
+ return false;
+
+ return true;
+}
+
static struct clk_mgr_funcs dcn2_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn2_update_clocks,
.init_clocks = dcn2_init_clocks,
.enable_pme_wa = dcn2_enable_pme_wa,
.get_clock = dcn2_get_clock,
+ .are_clock_states_equal = dcn2_are_clock_states_equal,
};
@@ -442,7 +416,7 @@ void dcn20_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn2_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
} else {
/* DFS Slice 2 should be used for DPREFCLK */
@@ -466,15 +440,15 @@ void dcn20_clk_mgr_construct(
pll_req = dc_fixpt_mul_int(pll_req, 100000);
/* integer part is now VCO frequency in kHz */
- clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
+ clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
/* Calculate the DPREFCLK in kHz.*/
clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
}
//Integrated_info table does not exist on dGPU projects so should not be referenced
//anywhere in code for dGPUs.
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index ac31a9787305..c9fd824f3c23 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
#endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 787f94d815f4..790a2d211bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -52,6 +52,45 @@
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
+int rn_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+ bool hdmi_present = false;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ hdmi_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+
+ /* WA for hang on HDMI after display off back back on*/
+ if (display_count == 0 && hdmi_present)
+ display_count = 1;
+
+ return display_count;
+}
+
void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
- bool enter_display_off = false;
bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- if (display_count == 0)
- enter_display_off = true;
+ if (dc->work_arounds.skip_clock_update)
+ return;
- if (enter_display_off == safe_to_lower) {
- rn_vbios_smu_set_display_count(clk_mgr, display_count);
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+
+ display_count = rn_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
+ } else {
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
+ }
}
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
@@ -113,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
- if (update_dppclk)
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
@@ -319,7 +378,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
- s->dprefclk_khz = sb.dprefclk;
+ s->dprefclk_khz = sb.dprefclk * 1000;
}
void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -329,12 +388,96 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
rn_vbios_smu_enable_pme_wa(clk_mgr);
}
+void rn_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+}
+
+void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+{
+ int i, num_valid_sets;
+
+ num_valid_sets = 0;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ /* skip empty entries, the smu array has no holes*/
+ if (!bw_params->wm_table.entries[i].valid)
+ continue;
+
+ ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
+ ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
+ /* We will not select WM based on dcfclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* fclk wil be used to select WM*/
+
+ if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
+ if (i == 0)
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+ else {
+ /* add 1 to make it non-overlapping with next lvl */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+ }
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+
+ } else {
+ /* unconstrained for memory retraining */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* Modify previous watermark range to cover up to max */
+ ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ }
+ num_valid_sets++;
+ }
+
+ ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
+ ranges->num_reader_wm_sets = num_valid_sets;
+
+ /* modify the min and max to make sure we cover the whole range*/
+ ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* This is for writeback only, does not matter currently as no writeback support*/
+ ranges->num_writer_wm_sets = 1;
+ ranges->writer_wm_sets[0].wm_inst = WM_A;
+ ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+}
+
+static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
+ struct pp_smu_wm_range_sets ranges = {0};
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
+
+ if (!debug->disable_pplib_wm_range) {
+ build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ }
+
+}
+
static struct clk_mgr_funcs dcn21_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rn_update_clocks,
- .init_clocks = dcn2_init_clocks,
+ .init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
- /* .dump_clk_registers = rn_dump_clk_registers */
+ /* .dump_clk_registers = rn_dump_clk_registers, */
+ .notify_wm_ranges = rn_notify_wm_ranges
};
struct clk_bw_params rn_bw_params = {
@@ -405,80 +548,50 @@ struct clk_bw_params rn_bw_params = {
}
};
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
- int i, num_valid_sets;
-
- num_valid_sets = 0;
-
- for (i = 0; i < WM_SET_COUNT; i++) {
- /* skip empty entries, the smu array has no holes*/
- if (!bw_params->wm_table.entries[i].valid)
- continue;
-
- ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
- ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
- /* We will not select WM based on dcfclk, so leave it as unconstrained */
- ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- /* fclk wil be used to select WM*/
-
- if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
- if (i == 0)
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
- else {
- /* add 1 to make it non-overlapping with next lvl */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
- }
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
-
- } else {
- /* unconstrained for memory retraining */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ int i;
- /* Modify previous watermark range to cover up to max */
- ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- }
- num_valid_sets++;
+ for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
+ if (clock_table->DcfClocks[i].Vol == voltage)
+ return clock_table->DcfClocks[i].Freq;
}
- ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
- ranges->num_reader_wm_sets = num_valid_sets;
-
- /* modify the min and max to make sure we cover the whole range*/
- ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
-
- /* This is for writeback only, does not matter currently as no writeback support*/
- ranges->num_writer_wm_sets = 1;
- ranges->writer_wm_sets[0].wm_inst = WM_A;
- ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
-
+ ASSERT(0);
+ return 0;
}
-void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
{
- int i;
+ int i, j = 0;
+
+ j = -1;
ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
- for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
- if (clock_table->FClocks[i].Freq == 0)
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
+ if (clock_table->FClocks[i].Freq != 0) {
+ j = i;
break;
+ }
+ }
+
+ if (j == -1) {
+ /* clock table is all 0s, just use our own hardcode */
+ ASSERT(0);
+ return;
+ }
+
+ bw_params->clk_table.num_entries = j + 1;
- bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq;
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq;
- bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq;
- bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol;
+ for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
+ bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
}
- bw_params->clk_table.num_entries = i;
bw_params->vram_type = asic_id->vram_type;
bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
@@ -486,7 +599,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
- if (clock_table->FClocks[i].Freq == 0) {
+ if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
@@ -534,57 +647,42 @@ void rn_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
clk_mgr->base.dprefclk_khz = 600000;
} else {
struct clk_log_info log_info = {0};
/* TODO: Check we get what we expect during bringup */
- clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
- clk_mgr->base.dprefclk_khz = s.dprefclk;
-
- if (clk_mgr->base.dprefclk_khz != 600000) {
- clk_mgr->base.dprefclk_khz = 600000;
- ASSERT(1); //TODO: Renoir follow up.
- }
+ /* Convert dprefclk units from MHz to KHz */
+ /* Value already divided by 10, some resolution lost */
+ clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
/* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dprefclk_khz == 0)
+ if (clk_mgr->base.dprefclk_khz == 0) {
+ ASSERT(clk_mgr->base.dprefclk_khz == 600000);
clk_mgr->base.dprefclk_khz = 600000;
+ }
}
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = &rn_bw_params;
- if (pp_smu) {
+ if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
- clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
+ rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
}
- /*
- * Notify SMU which set of WM should be selected for different ranges of fclk
- * On Renoir there is a maximumum of 4 DF pstates supported, could be less
- * depending on DDR speed and fused maximum fclk.
- */
- if (!debug->disable_pplib_wm_range) {
- struct pp_smu_wm_range_sets ranges = {0};
-
- build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
-
- /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
- if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
- pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
+ /* enable powerfeatures when displaycount goes to 0 */
+ rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
}
-
- /* enable powerfeatures when displaycount goes to 0 */
- if (!debug->disable_48mhz_pwrdwn)
- rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index aadec06fde10..e4322fa5475b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -26,11 +26,13 @@
#ifndef __RN_CLK_MGR_H__
#define __RN_CLK_MGR_H__
+#include "clk_mgr.h"
+#include "dm_pp_smu.h"
+
struct rn_clk_registers {
uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */
};
-
void rn_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 50984c1811bb..cb7c0e8b7e1b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -33,7 +33,7 @@
#include "mp/mp_12_0_0_sh_mask.h"
#define REG(reg_name) \
- (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
@@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
int actual_dispclk_set_mhz = -1;
struct dc *core_dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- uint32_t clk = requested_dispclk_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
- clk);
+ requested_dispclk_khz / 1000);
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
@@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
{
int actual_dcfclk_set_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_dcfclk_set_mhz;
actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
@@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int
{
int actual_min_ds_dcfclk_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_min_ds_dcfclk_mhz;
actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
@@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_
{
int actual_dppclk_set_mhz = -1;
- uint32_t clk = requested_dpp_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
-
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
- clk);
+ requested_dpp_khz / 1000);
return actual_dppclk_set_mhz * 1000;
}
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count)
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state)
{
+ int disp_count;
+
+ if (state == DCN_PWR_STATE_LOW_POWER)
+ disp_count = 0;
+ else
+ disp_count = 1;
+
rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDisplayCount,
- display_count);
+ clk_mgr,
+ VBIOSSMC_MSG_SetDisplayCount,
+ disp_count);
}
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr)
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
- 0);
+ enable);
}
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index da3a49487c6d..ccc01879c9d4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count);
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr);
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4b8819c27fcd..32f31bf91915 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -194,7 +194,7 @@ static bool create_links(
}
}
- if (!should_destory_link) {
+ if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
return false;
}
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option)
+{
+ /* OPP FMT dyn expansion updates*/
+ int i = 0;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx->stream_res.opp->dyn_expansion = option;
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ stream->signal);
+ }
+ }
+}
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
@@ -765,8 +786,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
#endif
- dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+#endif
}
current_ctx = dc->current_state;
@@ -789,9 +815,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (false == construct(dc, init_params))
goto construct_fail;
- /*TODO: separate HW and SW initialization*/
- dc->hwss.init_hw(dc);
-
full_pipe_count = dc->res_pool->pipe_count;
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
full_pipe_count--;
@@ -824,9 +847,24 @@ alloc_fail:
return NULL;
}
+void dc_hardware_init(struct dc *dc)
+{
+ dc->hwss.init_hw(dc);
+}
+
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ dc->ctx->cp_psp = init_params->cp_psp;
+#endif
+}
+
+void dc_deinit_callbacks(struct dc *dc)
+{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
+#endif
}
void dc_destroy(struct dc **dc)
@@ -905,15 +943,11 @@ static void program_timing_sync(
/* set first pipe with plane as master */
for (j = 0; j < group_size; j++) {
- struct pipe_ctx *temp;
-
if (pipe_set[j]->plane_state) {
if (j == 0)
break;
- temp = pipe_set[0];
- pipe_set[0] = pipe_set[j];
- pipe_set[j] = temp;
+ swap(pipe_set[0], pipe_set[j]);
break;
}
}
@@ -970,40 +1004,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_crtc_timing *crtc_timing)
{
struct timing_generator *tg;
+ struct stream_encoder *se = NULL;
+
+ struct dc_crtc_timing hw_crtc_timing = {0};
+
struct dc_link *link = sink->link;
- unsigned int enc_inst, tg_inst;
+ unsigned int i, enc_inst, tg_inst = 0;
+
+ // Seamless port only support single DP and EDP so far
+ if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
+ sink->sink_signal != SIGNAL_TYPE_EDP)
+ return false;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (enc_inst >= dc->res_pool->pipe_count)
+ if (enc_inst == ENGINE_ID_UNKNOWN)
return false;
- if (enc_inst >= dc->res_pool->stream_enc_count)
- return false;
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ if (dc->res_pool->stream_enc[i]->id == enc_inst) {
+
+ se = dc->res_pool->stream_enc[i];
+
+ tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[i]);
+ break;
+ }
+ }
- tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg(
- dc->res_pool->stream_enc[enc_inst]);
+ // tg_inst not found
+ if (i == dc->res_pool->stream_enc_count)
+ return false;
if (tg_inst >= dc->res_pool->timing_generator_count)
return false;
tg = dc->res_pool->timing_generators[tg_inst];
- if (!tg->funcs->is_matching_timing)
+ if (!tg->funcs->get_hw_timing)
+ return false;
+
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ return false;
+
+ if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ return false;
+
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
return false;
- if (!tg->funcs->is_matching_timing(tg, crtc_timing))
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ return false;
+
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ return false;
+
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ return false;
+
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ return false;
+
+ if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ return false;
+
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ return false;
+
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ return false;
+
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ return false;
+
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ return false;
+
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
if (dc_is_dp_signal(link->connector_signal)) {
@@ -1016,6 +1097,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
+ if (!se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+ se,
+ &hw_crtc_timing.pixel_encoding,
+ &hw_crtc_timing.display_color_depth))
+ return false;
+
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ return false;
+
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ return false;
}
return true;
@@ -1077,15 +1172,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mode_changed)
- continue;
+ if (dc->hwss.apply_ctx_for_surface)
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context); /* use new pipe config in new context */
- }
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1104,16 +1204,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
/*
* enable stereo
@@ -1140,15 +1245,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (!dc->optimize_seamless_boot)
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
-
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
- memset(&context->commit_hints, 0, sizeof(context->commit_hints));
-
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1496,20 +1595,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
- update_flags->raw = 0; // Reset all flags
-
if (u->flip_addr)
update_flags->bits.addr_update = 1;
- if (!is_surface_in_context(context, u->surface)) {
- update_flags->bits.new_plane = 1;
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
}
- if (u->surface->force_full_update) {
- update_flags->bits.full_update = 1;
- return UPDATE_TYPE_FULL;
- }
+ update_flags->raw = 0; // Reset all flags
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
@@ -1567,40 +1661,43 @@ static enum surface_update_type check_update_surfaces_for_stream(
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
if (stream_status == NULL || stream_status->plane_count != surface_count)
- return UPDATE_TYPE_FULL;
+ overall_type = UPDATE_TYPE_FULL;
/* some stream updates require passive update */
if (stream_update) {
- if ((stream_update->src.height != 0) &&
- (stream_update->src.width != 0))
- return UPDATE_TYPE_FULL;
+ union stream_update_flags *su_flags = &stream_update->stream->update_flags;
- if ((stream_update->dst.height != 0) &&
- (stream_update->dst.width != 0))
- return UPDATE_TYPE_FULL;
+ if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0))
+ su_flags->bits.scaling = 1;
if (stream_update->out_transfer_func)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.abm_level = 1;
if (stream_update->dpms_off)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.dpms_off = 1;
+
+ if (stream_update->gamut_remap)
+ su_flags->bits.gamut_remap = 1;
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (stream_update->wb_update)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.wb_update = 1;
#endif
+ if (su_flags->raw != 0)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update->output_csc_transform || stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
}
for (i = 0 ; i < surface_count; i++) {
enum surface_update_type type =
det_surface_update(dc, &updates[i]);
- if (type == UPDATE_TYPE_FULL)
- return type;
-
elevate_update_type(&overall_type, type);
}
@@ -1622,16 +1719,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
int i;
enum surface_update_type type;
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL)
+ if (type == UPDATE_TYPE_FULL) {
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
- if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
- dc->optimized_required = true;
+ if (type == UPDATE_TYPE_FAST) {
+ // If there's an available clock comparator, we use that.
+ if (dc->clk_mgr->funcs->are_clock_states_equal) {
+ if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
+ dc->optimized_required = true;
+ // Else we fallback to mem compare.
+ } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
+ dc->optimized_required = true;
+ }
+ }
return type;
}
@@ -1872,6 +1982,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
struct dc_state *context)
{
int j;
+ bool should_program_abm;
// Stream updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -1952,14 +2063,21 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
- if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
- // if otg funcs defined check if blanked before programming
- if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = true;
+
+ // if otg funcs defined check if blanked before programming
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked)
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = false;
+
+ if (should_program_abm) {
+ if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ } else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
- } else
- pipe_ctx->stream_res.abm->funcs->set_abm_level(
- pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+ }
}
}
}
@@ -2004,7 +2122,13 @@ static void commit_planes_for_stream(struct dc *dc,
* In case of turning off screen, no need to program front end a second time.
* just return after program blank.
*/
- dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
+
return;
}
@@ -2064,10 +2188,15 @@ static void commit_planes_for_stream(struct dc *dc,
stream_status =
stream_get_status(context, pipe_ctx->stream);
- dc->hwss.apply_ctx_for_surface(
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ca20b150afcc..12ba6fdf89b7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -79,7 +79,6 @@ static void destruct(struct dc_link *link)
int i;
if (link->hpd_gpio != NULL) {
- dal_gpio_close(link->hpd_gpio);
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
-static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
@@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
&link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
- /* If standard link rates are not being used,
- * Read DPCD 00115h to find the link rate set used
- */
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
- link->cur_link_settings.link_rate_set = link_rate_set;
- link->cur_link_settings.use_link_rate_set = true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the edp link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // edp_supported_link_rates_count = 0 for DP
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ // Link Rate not found. Seamless boot may not work.
+ ASSERT(false);
}
} else {
link->cur_link_settings.link_rate = link_bw_set;
@@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
-bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_alt_mode(struct dc_link *link)
{
/**
@@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link)
* This does not create remote sinks but will trigger DM
* to start MST detection if a branch is detected.
*/
-bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+static bool dc_link_detect_helper(struct dc_link *link,
+ enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
@@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
struct dpcd_caps prev_dpcd_caps;
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
+ bool perform_dp_seamless_boot = false;
+
DC_LOGGER_INIT(link->ctx->logger);
if (dc_is_virtual_signal(link->connector_signal))
@@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_EDP: {
- read_edp_current_link_settings_on_detect(link);
+ read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
- sink_caps.transaction_type =
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
+
/* wa HPD high coming too early*/
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
@@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
- link->verified_link_cap = link->reported_link_cap;
+ dp_verify_mst_link_cap(link);
+
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
+ // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
+ if (reason == DETECT_REASON_BOOT &&
+ dc_ctx->dc->config.power_down_display_on_boot == false &&
+ link->link_status.link_active == true)
+ perform_dp_seamless_boot = true;
+
+ if (perform_dp_seamless_boot) {
+ read_current_link_settings_on_detect(link);
+ link->verified_link_cap = link->reported_link_cap;
+ }
+
break;
}
@@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* two link trainings
*/
- /* deal with non-mst cases */
- dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
+ // verify link cap for SST non-seamless boot
+ if (!perform_dp_seamless_boot)
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
@@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
dc_sink_release(prev_sink);
return true;
+
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ const struct dc *dc = link->dc;
+ bool ret;
+
+ /* get out of low power state */
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
+ ret = dc_link_detect_helper(link, reason);
+
+ /* Go back to power optimized state */
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+
+ return ret;
}
bool dc_link_get_hpd_state(struct dc_link *dc_link)
@@ -1492,7 +1530,7 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- if (!apply_seamless_boot_optimization)
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
@@ -2169,8 +2207,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
dp_set_fec_ready(link, false);
}
#endif
- } else
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ } else {
+ if (signal != SIGNAL_TYPE_VIRTUAL)
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
+ }
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
/* MST disable link only when no stream use the link */
@@ -2217,7 +2257,7 @@ static bool dp_active_dongle_validate_timing(
break;
}
- if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
return true;
@@ -2381,17 +2421,206 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
return true;
}
-bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
{
struct dc *core_dc = link->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
- dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+
+ if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
+
+ link->psr_allow_active = allow_active;
return true;
}
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_feature_enabled)
+ dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+ return true;
+}
+
+static inline enum physical_phy_id
+transmitter_to_phy_id(enum transmitter transmitter_value)
+{
+ switch (transmitter_value) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYLD_0;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYLD_1;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYLD_2;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYLD_3;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYLD_4;
+ case TRANSMITTER_UNIPHY_F:
+ return PHYLD_5;
+ case TRANSMITTER_NUTMEG_CRT:
+ return PHYLD_6;
+ case TRANSMITTER_TRAVIS_CRT:
+ return PHYLD_7;
+ case TRANSMITTER_TRAVIS_LCD:
+ return PHYLD_8;
+ case TRANSMITTER_UNIPHY_G:
+ return PHYLD_9;
+ case TRANSMITTER_COUNT:
+ return PHYLD_COUNT;
+ case TRANSMITTER_UNKNOWN:
+ return PHYLD_UNKNOWN;
+ default:
+ WARN_ONCE(1, "Unknown transmitter value %d\n",
+ transmitter_value);
+ return PHYLD_UNKNOWN;
+ }
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *core_dc;
+ struct dmcu *dmcu;
+ int i;
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ core_dc = link->ctx->dc;
+ dmcu = core_dc->res_pool->dmcu;
+
+ if (!dmcu)
+ return false;
+
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (psr_config->psr_version == 0x2) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId =
+ transmitter_to_phy_id(link->link_enc->transmitter);
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->timing_generator_count;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+
+ /* psr_enabled == 0 indicates setup_psr did not succeed, but this
+ * should not happen since firmware should be running at this point
+ */
+ if (link->psr_feature_enabled == 0)
+ ASSERT(0);
+
+ return true;
+
+}
+
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
return &link->link_status;
@@ -2510,7 +2739,7 @@ static void update_mst_stream_alloc_table(
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm
*/
-static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
@@ -2521,6 +2750,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
uint8_t i;
+ enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
/* enable_link_dp_mst already check link->enabled_stream_count
@@ -2568,14 +2798,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&link->mst_stream_alloc_table);
/* send down message */
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
stream);
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
+ if (ret != ACT_LINK_LOST) {
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
@@ -2667,6 +2899,24 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
+{
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+ if (cp_psp && cp_psp->funcs.update_stream_config) {
+ struct cp_psp_stream_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+ config.dpms_off = dpms_off;
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
+ }
+}
+#endif
void core_link_enable_stream(
struct dc_state *state,
@@ -2677,6 +2927,10 @@ void core_link_enable_stream(
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
@@ -2727,6 +2981,9 @@ void core_link_enable_stream(
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2734,6 +2991,9 @@ void core_link_enable_stream(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2786,13 +3046,16 @@ void core_link_enable_stream(
#endif
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- allocate_mst_payload(pipe_ctx);
+ dc_link_allocate_mst_payload(pipe_ctx);
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
@@ -2810,6 +3073,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, true);
+#endif
+
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 51991bf26a93..7f904d55c1bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -508,7 +508,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size)
{
- bool ret;
+ bool ret = false;
uint32_t payload_size =
dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
@@ -527,34 +527,32 @@ bool dal_ddc_service_query_ddc_data(
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
- struct aux_payload write_payload = {
- .i2c_over_aux = true,
- .write = true,
- .mot = true,
- .address = address,
- .length = write_size,
- .data = write_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- struct aux_payload read_payload = {
- .i2c_over_aux = true,
- .write = false,
- .mot = false,
- .address = address,
- .length = read_size,
- .data = read_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- ret = dc_link_aux_transfer_with_retries(ddc, &write_payload);
+ struct aux_payload payload;
+ bool read_available = true;
+
+ payload.i2c_over_aux = true;
+ payload.address = address;
+ payload.reply = NULL;
+ payload.defer_delay = get_defer_delay(ddc);
+
+ if (write_size != 0) {
+ payload.write = true;
+ payload.mot = false;
+ payload.length = write_size;
+ payload.data = write_buf;
+
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ read_available = ret;
+ }
- if (!ret)
- return false;
+ if (read_size != 0 && read_available) {
+ payload.write = false;
+ payload.mot = false;
+ payload.length = read_size;
+ payload.data = read_buf;
- ret = dc_link_aux_transfer_with_retries(ddc, &read_payload);
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ }
} else {
struct i2c_payloads *payloads =
dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
@@ -585,6 +583,41 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload)
+{
+ uint8_t retrieved = 0;
+ bool ret = 0;
+
+ if (!ddc)
+ return false;
+
+ if (!payload)
+ return false;
+
+ do {
+ struct aux_payload current_payload;
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
+ payload->length ? true : false;
+
+ current_payload.address = payload->address;
+ current_payload.data = &payload->data[retrieved];
+ current_payload.defer_delay = payload->defer_delay;
+ current_payload.i2c_over_aux = payload->i2c_over_aux;
+ current_payload.length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
+ current_payload.mot = !is_end_of_payload;
+ current_payload.reply = payload->reply;
+ current_payload.write = payload->write;
+
+ ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
+
+ retrieved += current_payload.length;
+ } while (retrieved < payload->length && ret == true);
+
+ return ret;
+}
+
/* dc_link_aux_transfer_raw() - Attempt to transfer
* the given aux payload. This function does not perform
* retries or handle error states. The reply is returned
@@ -613,6 +646,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
return dce_aux_transfer_with_retries(ddc, payload);
}
+
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout)
+{
+ enum dc_status status = DC_OK;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL)
+ return DC_ERROR_UNEXPECTED;
+ if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout))
+ status = DC_ERROR_UNEXPECTED;
+ return status;
+}
+
/*test only function*/
void dal_ddc_service_set_ddc_pin(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index f5742719b5d9..0f59b68aa4c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (link->link_enc->funcs->get_max_link_cap)
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
max_link_cap.lane_count =
@@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries(
for (i = 0; i < attempts; i++) {
int fail_count = 0;
- enum dc_connection_type type;
+ enum dc_connection_type type = dc_connection_none;
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!dc_link_detect_sink(link, &type)) {
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+ link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
} else if (dp_verify_link_cap(link,
&link->reported_link_cap,
@@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries(
return success;
}
+bool dp_verify_mst_link_cap(
+ struct dc_link *link)
+{
+ struct dc_link_settings max_link_cap = {0};
+
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+
+ return true;
+}
+
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
@@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link)
return false;
}
-static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_enabled)
+ if (!link->psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_enable(link, false, true);
- dc_link_set_psr_enable(link, true, true);
+ dc_link_set_psr_allow_active(link, false, true);
+ dc_link_set_psr_allow_active(link, true, true);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -2364,6 +2383,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
enum dc_status result;
bool status = false;
+ struct pipe_ctx *pipe_ctx;
+ int i;
if (out_link_loss)
*out_link_loss = false;
@@ -2440,6 +2461,15 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&link->cur_link_settings,
true, LINK_TRAINING_ATTEMPTS);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_link_allocate_mst_payload(pipe_ctx);
+ }
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
@@ -2545,6 +2575,7 @@ static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
union dp_downstream_port_present ds_port = { .byte = data };
+ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
/* decode converter info*/
if (!ds_port.fields.PORT_PRESENT) {
@@ -2691,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
* keep receiver powered all the time.*/
case DP_BRANCH_DEVICE_ID_0010FA:
case DP_BRANCH_DEVICE_ID_0080E1:
+ case DP_BRANCH_DEVICE_ID_00E04C:
link->wa_flags.dp_keep_receiver_powered = true;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 79438c4f1e20..a519dbc5ecb6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link,
if (pipes[i].stream != NULL &&
!pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL) {
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->link == link) {
udelay(100);
pipes[i].stream_res.stream_enc->funcs->dp_blank(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f25ac17f47fa..37698305a2dc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -951,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
}
-static bool are_rect_integer_multiples(struct rect src, struct rect dest)
+static bool are_rects_integer_multiples(struct rect src, struct rect dest)
{
if (dest.width >= src.width && dest.width % src.width == 0 &&
dest.height >= src.height && dest.height % src.height == 0)
@@ -959,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, struct rect dest)
return false;
}
+
+static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx->plane_state->scaling_quality.integer_scaling)
+ return;
+
+ //for Centered Mode
+ if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width &&
+ pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) {
+ // calculate maximum # of replication of src onto addressable
+ unsigned int integer_multiple = min(
+ pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width,
+ pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height);
+
+ //scale dst
+ pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width;
+ pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height;
+
+ //center dst onto addressable
+ pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
+ pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
+ }
+
+ //disable taps if src & dst are integer ratio
+ if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) {
+ pipe_ctx->plane_state->scaling_quality.v_taps = 1;
+ pipe_ctx->plane_state->scaling_quality.h_taps = 1;
+ pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
+ pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
+ }
+}
+
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -972,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
+ calculate_integer_scaling(pipe_ctx);
+
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
@@ -1002,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
- if (res &&
- plane_state->scaling_quality.integer_scaling &&
- are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport,
- pipe_ctx->plane_res.scl_data.recout)) {
- pipe_ctx->plane_res.scl_data.taps.v_taps = 1;
- pipe_ctx->plane_res.scl_data.taps.h_taps = 1;
- }
if (!res) {
/* Try 24 bpp linebuffer */
@@ -1635,7 +1662,8 @@ static int acquire_first_free_pipe(
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
- enum engine_id id)
+ enum engine_id id,
+ enum dce_version dc_version)
{
int i, available_audio_count;
@@ -1854,28 +1882,28 @@ static int acquire_resource_from_hw_enabled_state(
struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
- unsigned int inst, tg_inst;
+ unsigned int i, inst, tg_inst = 0;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return -1;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (inst >= pool->pipe_count)
- return -1;
+ if (inst == ENGINE_ID_UNKNOWN)
+ return false;
- if (inst >= pool->stream_enc_count)
- return -1;
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i]->id == inst) {
+ tg_inst = pool->stream_enc[i]->funcs->dig_source_otg(
+ pool->stream_enc[i]);
+ break;
+ }
+ }
- tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]);
+ // tg_inst not found
+ if (i == pool->stream_enc_count)
+ return false;
if (tg_inst >= pool->timing_generator_count)
return false;
@@ -1971,7 +1999,7 @@ enum dc_status resource_map_pool_resources(
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
- &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
/*
* Audio assigned in order first come first get.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index bf1d7bb90e0f..bb09243758fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc,
if (dwb->funcs->is_enabled(dwb)) {
/* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, stream_status, wb_info);
+ dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
} else {
/* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, stream_status, wb_info);
+ dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a82352a87808..0416a17b0897 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.48"
+#define DC_VER "3.2.56"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -111,19 +111,20 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ bool extended_aux_timeout_support;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool hw_3d_lut;
#endif
struct dc_plane_cap planes[MAX_PLANES];
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa {
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool no_connect_phy_config;
bool dedcn20_305_wa;
+#endif
bool skip_clock_update;
};
-#endif
struct dc_dcc_surface_param {
struct dc_size surface_size;
@@ -219,7 +220,9 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+ bool force_enum_edp;
bool forced_clocks;
+ bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
};
@@ -227,6 +230,7 @@ enum visual_confirm {
VISUAL_CONFIRM_DISABLE = 0,
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
+ VISUAL_CONFIRM_MPCTREE = 4,
};
enum dcc_option {
@@ -245,6 +249,19 @@ enum wm_report_mode {
WM_REPORT_DEFAULT = 0,
WM_REPORT_OVERRIDE = 1,
};
+enum dtm_pstate{
+ dtm_level_p0 = 0,/*highest voltage*/
+ dtm_level_p1,
+ dtm_level_p2,
+ dtm_level_p3,
+ dtm_level_p4,/*when active_display_count = 0*/
+};
+
+enum dcn_pwr_state {
+ DCN_PWR_STATE_UNKNOWN = -1,
+ DCN_PWR_STATE_MISSION_MODE = 0,
+ DCN_PWR_STATE_LOW_POWER = 3,
+};
/*
* For any clocks that may differ per pipe
@@ -252,11 +269,7 @@ enum wm_report_mode {
*/
struct dc_clocks {
int dispclk_khz;
- int max_supported_dppclk_khz;
- int max_supported_dispclk_khz;
int dppclk_khz;
- int bw_dppclk_khz; /*a copy of dppclk_khz*/
- int bw_dispclk_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
@@ -264,12 +277,17 @@ struct dc_clocks {
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
-
+ enum dcn_pwr_state pwr_state;
/*
* Elements below are not compared for the purposes of
* optimization required
*/
bool prev_p_state_change_support;
+ enum dtm_pstate dtm_level;
+ int max_supported_dppclk_khz;
+ int max_supported_dispclk_khz;
+ int bw_dppclk_khz; /*a copy of dppclk_khz*/
+ int bw_dispclk_khz;
};
struct dc_bw_validation_profile {
@@ -347,6 +365,7 @@ struct dc_debug_options {
bool disable_hubp_power_gate;
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_dsc_power_gate;
+ int dsc_min_slice_height_override;
#endif
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
@@ -462,9 +481,7 @@ struct dc {
struct dc_config config;
struct dc_debug_options debug;
struct dc_bounding_box_overrides bb_overrides;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa work_arounds;
-#endif
struct dc_context *ctx;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config vm_pa_config;
@@ -553,10 +570,16 @@ struct dc_init_data {
};
struct dc_callback_init {
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#else
uint8_t reserved;
+#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
+void dc_hardware_init(struct dc *dc);
+
int dc_get_vmid_use_vector(struct dc *dc);
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
@@ -565,6 +588,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
#endif
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params);
+void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/*******************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 4ef97f65e55d..4f8f576d5fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -49,7 +49,8 @@ enum aux_channel_operation_result {
AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
- AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON,
+ AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 6e42209f0e20..0ed2962add5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -30,6 +30,7 @@
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+#include "dc_types.h"
struct dc_dsc_bw_range {
uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */
@@ -39,13 +40,21 @@ struct dc_dsc_bw_range {
uint32_t stream_kbps; /* Uncompressed stream bandwidth */
};
+struct display_stream_compressor {
+ const struct dsc_funcs *funcs;
+#ifndef AMD_EDID_UTILITY
+ struct dc_context *ctx;
+ int inst;
+#endif
+};
bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_kbps,
const uint32_t max_kbps,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -53,8 +62,9 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 0b8700a8a94a..e0856bb8511f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -26,6 +26,8 @@
#ifndef DC_HW_TYPES_H
#define DC_HW_TYPES_H
+#ifndef AMD_EDID_UTILITY
+
#include "os_types.h"
#include "fixed31_32.h"
#include "signal_types.h"
@@ -124,20 +126,6 @@ struct plane_size {
int chroma_pitch;
struct rect surface_size;
struct rect chroma_size;
-
- union {
- struct {
- struct rect surface_size;
- int surface_pitch;
- } grph;
-
- struct {
- struct rect luma_size;
- int luma_pitch;
- struct rect chroma_size;
- int chroma_pitch;
- } video;
- };
};
struct dc_plane_dcc_param {
@@ -148,21 +136,6 @@ struct dc_plane_dcc_param {
int meta_pitch_c;
bool independent_64b_blks_c;
-
- union {
- struct {
- int meta_pitch;
- bool independent_64b_blks;
- } grph;
-
- struct {
- int meta_pitch_l;
- bool independent_64b_blks_l;
-
- int meta_pitch_c;
- bool independent_64b_blks_c;
- } video;
- };
};
/*Displayable pixel format in fb*/
@@ -605,6 +578,11 @@ enum dc_quantization_range {
QUANTIZATION_RANGE_LIMITED
};
+enum dc_dynamic_expansion {
+ DYN_EXPANSION_AUTO,
+ DYN_EXPANSION_DISABLE
+};
+
/* XFM */
/* used in struct dc_plane_state */
@@ -616,6 +594,8 @@ struct scaling_taps {
bool integer_scaling;
};
+#endif /* AMD_EDID_UTILITY */
+
enum dc_timing_standard {
DC_TIMING_STANDARD_UNDEFINED,
DC_TIMING_STANDARD_DMT,
@@ -737,30 +717,6 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
-enum trigger_delay {
- TRIGGER_DELAY_NEXT_PIXEL = 0,
- TRIGGER_DELAY_NEXT_LINE,
-};
-
-enum crtc_event {
- CRTC_EVENT_VSYNC_RISING = 0,
- CRTC_EVENT_VSYNC_FALLING
-};
-
-struct crtc_trigger_info {
- bool enabled;
- struct dc_stream_state *event_source;
- enum crtc_event event;
- enum trigger_delay delay;
-};
-
-struct dc_crtc_timing_adjust {
- uint32_t v_total_min;
- uint32_t v_total_max;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
-};
-
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
@@ -804,6 +760,33 @@ struct dc_crtc_timing {
#endif
};
+#ifndef AMD_EDID_UTILITY
+
+enum trigger_delay {
+ TRIGGER_DELAY_NEXT_PIXEL = 0,
+ TRIGGER_DELAY_NEXT_LINE,
+};
+
+enum crtc_event {
+ CRTC_EVENT_VSYNC_RISING = 0,
+ CRTC_EVENT_VSYNC_FALLING
+};
+
+struct crtc_trigger_info {
+ bool enabled;
+ struct dc_stream_state *event_source;
+ enum crtc_event event;
+ enum trigger_delay delay;
+};
+
+struct dc_crtc_timing_adjust {
+ uint32_t v_total_min;
+ uint32_t v_total_max;
+ uint32_t v_total_mid;
+ uint32_t v_total_mid_frame_num;
+};
+
+
/* Passed on init */
enum vram_type {
VIDEO_MEMORY_TYPE_GDDR5 = 2,
@@ -874,5 +857,7 @@ struct tg_color {
uint16_t color_b_cb;
};
+#endif /* AMD_EDID_UTILITY */
+
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 9ea75db3484e..f24fd19ed93d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -126,7 +126,8 @@ struct dc_link {
unsigned short chip_caps;
unsigned int dpcd_sink_count;
enum edp_revision edp_revision;
- bool psr_enabled;
+ bool psr_feature_enabled;
+ bool psr_allow_active;
/* MST record stream using this link */
struct link_flags {
@@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
return dc->links[link_index];
}
+static inline struct dc_link *get_edp_link(const struct dc *dc)
+{
+ int i;
+
+ // report any eDP links, even unconnected DDI's
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
+ return dc->links[i];
+ }
+ return NULL;
+}
+
/* Set backlight level of an embedded panel (eDP, LVDS).
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
* and 16 bit fractional, where 1.0 is max backlight value.
@@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
-bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
@@ -192,6 +205,7 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 0fa1c26bc20d..fdb6adc37857 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -113,6 +113,21 @@ struct periodic_interrupt_config {
int lines_offset;
};
+union stream_update_flags {
+ struct {
+ uint32_t scaling:1;
+ uint32_t out_tf:1;
+ uint32_t out_csc:1;
+ uint32_t abm_level:1;
+ uint32_t dpms_off:1;
+ uint32_t gamut_remap:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ uint32_t wb_update:1;
+#endif
+ } bits;
+
+ uint32_t raw;
+};
struct dc_stream_state {
// sink is deprecated, new code should not reference
@@ -214,9 +229,14 @@ struct dc_stream_state {
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool is_dsc_enabled;
#endif
+ union stream_update_flags update_flags;
};
+#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
+
struct dc_stream_update {
+ struct dc_stream_state *stream;
+
struct rect src;
struct rect dst;
struct dc_transfer_func *out_transfer_func;
@@ -431,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc,
int num_streams,
const struct dc_static_screen_events *events);
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option);
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index b273735b6a3e..d9be8fc3889f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,6 +25,11 @@
#ifndef DC_TYPES_H_
#define DC_TYPES_H_
+#ifndef AMD_EDID_UTILITY
+/* AND EdidUtility only needs a portion
+ * of this file, including the rest only
+ * causes additional issues.
+ */
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
@@ -33,6 +38,10 @@
#include "dal_types.h"
#include "grph_object_defs.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
+
/* forward declarations */
struct dc_plane_state;
struct dc_stream_state;
@@ -100,6 +109,9 @@ struct dc_context {
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
uint64_t fbc_gpu_addr;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#endif
};
@@ -159,6 +171,12 @@ enum dc_edid_status {
EDID_THE_SAME,
};
+enum act_return_status {
+ ACT_SUCCESS,
+ ACT_LINK_LOST,
+ ACT_FAILED
+};
+
/* audio capability from EDID*/
struct dc_cea_audio_mode {
uint8_t format_code; /* ucData[0] [6:3]*/
@@ -739,6 +757,9 @@ struct dc_clock_config {
uint32_t current_clock_khz;/*current clock in use*/
};
+#endif /*AMD_EDID_UTILITY*/
+//AMD EDID UTILITY does not need any of the above structures
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC DPCD capabilities */
union dsc_slice_caps1 {
@@ -810,4 +831,5 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
#endif
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 58bd131d5b48..b8a3fc505c9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
return true;
}
@@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm)
/* Enable the backlight output */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+ /* Disable fractional pwm if configured */
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
+ abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
+
/* Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
@@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
- if (abm_dce->base.dmcu_is_running == true)
- abm_dce->base.funcs->set_abm_immediate_disable(*abm);
-
kfree(abm_dce);
*abm = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index c3f9f4185ce8..e472608faf33 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -42,6 +42,10 @@
#include "reg_helper.h"
+#undef FN
+#define FN(reg_name, field_name) \
+ aux110->shift->field_name, aux110->mask->field_name
+
#define FROM_AUX_ENGINE(ptr) \
container_of((ptr), struct aux_engine_dce110, base)
@@ -55,6 +59,14 @@ enum {
AUX_TIMED_OUT_RETRY_COUNTER = 2,
AUX_DEFER_RETRY_COUNTER = 6
};
+
+#define TIME_OUT_INCREMENT 1016
+#define TIME_OUT_MULTIPLIER_8 8
+#define TIME_OUT_MULTIPLIER_16 16
+#define TIME_OUT_MULTIPLIER_32 32
+#define TIME_OUT_MULTIPLIER_64 64
+#define MAX_TIMEOUT_LENGTH 127
+
static void release_engine(
struct dce_aux *engine)
{
@@ -198,7 +210,7 @@ static void submit_channel_request(
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
/* set the delay and the number of bytes to write */
@@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status(
/* poll to make sure that SW_DONE is asserted */
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
@@ -414,20 +426,77 @@ void dce110_engine_destroy(struct dce_aux **engine)
*engine = NULL;
}
+
+static bool dce_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout_in_us)
+{
+ uint32_t multiplier = 0;
+ uint32_t length = 0;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+
+ /* 1-Update polling timeout period */
+ aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
+
+ /* 2-Update aux timeout period length and multiplier */
+ if (timeout_in_us <= TIME_OUT_INCREMENT) {
+ multiplier = 0;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
+ length++;
+ } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) {
+ multiplier = 1;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_16;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0)
+ length++;
+ } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) {
+ multiplier = 2;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_32;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0)
+ length++;
+ } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) {
+ multiplier = 3;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_64;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0)
+ length++;
+ }
+
+ length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
+
+ REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
+
+ return true;
+}
+
+static struct dce_aux_funcs aux_functions = {
+ .configure_timeout = NULL,
+ .destroy = NULL,
+};
+
struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs)
+ const struct dce110_aux_registers *regs,
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable)
{
aux_engine110->base.ddc = NULL;
aux_engine110->base.ctx = ctx;
aux_engine110->base.delay = 0;
aux_engine110->base.max_defer_write_retry = 0;
aux_engine110->base.inst = inst;
- aux_engine110->timeout_period = timeout_period;
+ aux_engine110->polling_timeout_period = timeout_period;
aux_engine110->regs = regs;
+ aux_engine110->mask = mask;
+ aux_engine110->shift = shift;
+ aux_engine110->base.funcs = &aux_functions;
+ if (is_ext_aux_timeout_configurable)
+ aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout;
+
return &aux_engine110->base;
}
@@ -464,8 +533,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- if (!acquire(aux_engine, ddc_pin))
+ if (!acquire(aux_engine, ddc_pin)) {
+ *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE;
return -1;
+ }
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -475,7 +546,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
aux_req.action = i2caux_action_from_payload(payload);
aux_req.address = payload->address;
- aux_req.delay = payload->defer_delay * 10;
+ aux_req.delay = 0;
aux_req.length = payload->length;
aux_req.data = payload->data;
@@ -544,8 +615,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
- if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
+ } else {
+ if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
+ (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
+ if (payload->defer_delay > 0)
+ msleep(payload->defer_delay);
+ }
+ }
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
@@ -582,6 +660,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
default:
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index ed7fec8fe253..b4b2c79a8073 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -29,6 +29,7 @@
#include "i2caux_interface.h"
#include "inc/hw/aux_engine.h"
+
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define AUX_COMMON_REG_LIST0(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
@@ -36,6 +37,7 @@
SRI(AUX_SW_DATA, DP_AUX, id), \
SRI(AUX_SW_CONTROL, DP_AUX, id), \
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
SRI(AUX_SW_STATUS, DP_AUX, id)
#endif
@@ -55,6 +57,7 @@ struct dce110_aux_registers {
uint32_t AUX_SW_DATA;
uint32_t AUX_SW_CONTROL;
uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL1;
uint32_t AUX_SW_STATUS;
uint32_t AUXN_IMPCAL;
uint32_t AUXP_IMPCAL;
@@ -62,6 +65,156 @@ struct dce110_aux_registers {
uint32_t AUX_RESET_MASK;
};
+#define DCE_AUX_REG_FIELD_LIST(type)\
+ type AUX_EN;\
+ type AUX_RESET;\
+ type AUX_RESET_DONE;\
+ type AUX_REG_RW_CNTL_STATUS;\
+ type AUX_SW_USE_AUX_REG_REQ;\
+ type AUX_SW_DONE_USING_AUX_REG;\
+ type AUX_SW_AUTOINCREMENT_DISABLE;\
+ type AUX_SW_DATA_RW;\
+ type AUX_SW_INDEX;\
+ type AUX_SW_GO;\
+ type AUX_SW_DATA;\
+ type AUX_SW_REPLY_BYTE_COUNT;\
+ type AUX_SW_DONE;\
+ type AUX_SW_DONE_ACK;\
+ type AUXN_IMPCAL_ENABLE;\
+ type AUXP_IMPCAL_ENABLE;\
+ type AUXN_IMPCAL_OVERRIDE_ENABLE;\
+ type AUXP_IMPCAL_OVERRIDE_ENABLE;\
+ type AUX_RX_TIMEOUT_LEN;\
+ type AUX_RX_TIMEOUT_LEN_MUL;\
+ type AUXN_CALOUT_ERROR_AK;\
+ type AUXP_CALOUT_ERROR_AK;\
+ type AUX_SW_START_DELAY;\
+ type AUX_SW_WR_BYTES
+
+#define DCE10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE12_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* DCN10 MASK */
+#define DCN10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* for all other DCN */
+#define DCN_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
+
+#define AUX_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
enum { /* This is the timeout as defined in DP 1.2a,
* 2.3.4 "Detailed uPacket TX AUX CH State Description".
*/
@@ -97,20 +250,34 @@ struct dce_aux {
uint32_t max_defer_write_retry;
bool acquire_reset;
+ struct dce_aux_funcs *funcs;
+};
+
+struct dce110_aux_registers_mask {
+ DCE_AUX_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce110_aux_registers_shift {
+ DCE_AUX_REG_FIELD_LIST(uint8_t);
};
+
struct aux_engine_dce110 {
struct dce_aux base;
const struct dce110_aux_registers *regs;
+ const struct dce110_aux_registers_mask *mask;
+ const struct dce110_aux_registers_shift *shift;
struct {
uint32_t aux_control;
uint32_t aux_arb_control;
uint32_t aux_sw_data;
uint32_t aux_sw_control;
uint32_t aux_interrupt_control;
+ uint32_t aux_dphy_rx_control1;
+ uint32_t aux_dphy_rx_control0;
uint32_t aux_sw_status;
} addr;
- uint32_t timeout_period;
+ uint32_t polling_timeout_period;
};
struct aux_engine_dce110_init_data {
@@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data {
const struct dce110_aux_registers *regs;
};
-struct dce_aux *dce110_aux_engine_construct(
- struct aux_engine_dce110 *aux_engine110,
+struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs);
+ const struct dce110_aux_registers *regs,
+
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable);
void dce110_engine_destroy(struct dce_aux **engine);
@@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
+
+struct dce_aux_funcs {
+ bool (*configure_timeout)
+ (struct ddc_service *ddc,
+ uint32_t timeout);
+ void (*destroy)
+ (struct aux_engine **ptr);
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 0b86cee4876f..ba995d3f2318 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -907,9 +907,6 @@ void dce_dmcu_destroy(struct dmcu **dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
- if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
- dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
-
kfree(dmcu_dce);
*dmcu = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index ac04d77058f0..32d145a0d6fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -679,6 +679,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 31b698bf9cfc..8aa937f496c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif(
}
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
@@ -636,11 +636,11 @@ static void dce_mi_free_dmif(
10, 3500);
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 76d54885374a..a5e122c721ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = {
.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
static struct mem_input *dce100_mem_input_create(
struct dc_context *ctx,
uint32_t inst)
@@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -997,6 +1043,8 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
+ dc->caps.extended_aux_timeout_support = false;
+
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 01a924bf477a..f0e837d14000 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -944,7 +944,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
struct dc *core_dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
@@ -957,9 +956,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
- if (core_dc->res_pool->pp_smu)
- pp_smu = core_dc->res_pool->pp_smu;
-
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -984,7 +980,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
{
struct dc *dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1001,9 +996,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false;
- if (dc->res_pool->pp_smu)
- pp_smu = dc->res_pool->pp_smu;
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
@@ -1169,8 +1161,9 @@ static void build_audio_output(
}
}
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (state->clk_mgr &&
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
@@ -1418,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_enabled = false;
+ pipe_ctx->stream->link->psr_feature_enabled = false;
return DC_OK;
}
@@ -1428,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
int i;
- enum connector_id connector_id;
- enum signal_type signal = SIGNAL_TYPE_NONE;
/* do not know BIOS back-front mapping, simply blank all. It will not
* hurt for non-DP
@@ -1440,15 +1431,12 @@ static void power_down_encoders(struct dc *dc)
}
for (i = 0; i < dc->link_count; i++) {
- connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
- if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP)) {
+ enum signal_type signal = dc->links[i]->connector_signal;
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT))
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
- if (connector_id == CONNECTOR_ID_EDP)
- signal = SIGNAL_TYPE_EDP;
- }
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
@@ -1529,18 +1517,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context)
return NULL;
}
-static struct dc_link *get_edp_link(struct dc *dc)
-{
- int i;
-
- // report any eDP links, even unconnected DDI's
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
- return dc->links[i];
- }
- return NULL;
-}
-
static struct dc_link *get_edp_link_with_sink(
struct dc *dc,
struct dc_state *context)
@@ -1834,7 +1810,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_enabled)
+ if (pipe_ctx->stream->link->psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -2464,7 +2440,6 @@ static void dce110_program_front_end_for_pipe(
struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct mem_input *mi = pipe_ctx->plane_res.mi;
- struct pipe_ctx *old_pipe = NULL;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
@@ -2472,9 +2447,6 @@ static void dce110_program_front_end_for_pipe(
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
- if (dc->current_state)
- old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 89620adc81d8..83a4dbf6d76e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_110_REG_LIST(id),\
@@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1293,6 +1339,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.is_apu = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 21a657e79306..97dcc5d0862b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_DCE110_REG_LIST_DCE_BASE(id)\
@@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1163,7 +1209,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 7c52f7f9196c..63543f6918ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE12_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE12_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_120_REG_LIST(id),\
@@ -356,6 +364,37 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, id)\
[index] = {\
CS_COMMON_REG_LIST_DCE_112(id),\
@@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -655,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
@@ -1006,7 +1052,7 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.psp_setup_panel_mode = true;
-
+ dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
/*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 643ccb0ade00..3e8d4b49f279 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
@@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -895,6 +941,7 @@ static bool dce80_construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d8b2da18db39..997e9582edc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
-static bool dpp_get_optimal_number_of_taps(
+bool dpp1_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
@@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
.dpp_set_csc_default = dpp1_cm_set_output_csc_default,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e2c613611ac9..1d4a7d640334 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
+bool dpp1_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
void dpp1_construct(struct dcn10_dpp *dpp1,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 001db49e4bb2..14d1be6c66e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -841,6 +841,14 @@ void min_set_viewport(
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+ SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+ SEC_VIEWPORT_X_START_C, viewport_c->x,
+ SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
void hubp1_read_state_common(struct hubp *hubp)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index cb20d10288c0..ae70d9c0aa1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -47,6 +47,8 @@
SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
@@ -57,8 +59,12 @@
SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
@@ -150,6 +156,8 @@
uint32_t DCSURF_SEC_VIEWPORT_START; \
uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_START_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
@@ -160,8 +168,12 @@
uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \
uint32_t DCSURF_SURFACE_INUSE; \
uint32_t DCSURF_SURFACE_INUSE_HIGH; \
uint32_t DCSURF_SURFACE_INUSE_C; \
@@ -279,6 +291,10 @@
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
@@ -289,8 +305,12 @@
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
@@ -469,6 +489,10 @@
type PRI_VIEWPORT_HEIGHT_C; \
type PRI_VIEWPORT_X_START_C; \
type PRI_VIEWPORT_Y_START_C; \
+ type SEC_VIEWPORT_WIDTH_C; \
+ type SEC_VIEWPORT_HEIGHT_C; \
+ type SEC_VIEWPORT_X_START_C; \
+ type SEC_VIEWPORT_Y_START_C; \
type PRIMARY_SURFACE_ADDRESS_HIGH;\
type PRIMARY_SURFACE_ADDRESS;\
type SECONDARY_SURFACE_ADDRESS_HIGH;\
@@ -479,8 +503,12 @@
type SECONDARY_META_SURFACE_ADDRESS;\
type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_SURFACE_ADDRESS_C;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_SURFACE_ADDRESS_C;\
type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_C;\
type SURFACE_INUSE_ADDRESS;\
type SURFACE_INUSE_ADDRESS_HIGH;\
type SURFACE_INUSE_ADDRESS_C;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 60123db7ba02..eb91432621ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc)
int i;
bool allow_self_fresh_force_enable = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
+ return;
+#endif
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
@@ -1300,6 +1304,10 @@ static void dcn10_init_hw(struct dc *dc)
}
dc->hwss.enable_power_gating_plane(dc->hwseq, true);
+
+ if (dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
}
static void dcn10_reset_hw_ctx_wrap(
@@ -1452,15 +1460,15 @@ static void log_tf(struct dc_context *ctx,
DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
for (i = 0; i < hw_points_num; i++) {
- DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
- DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
}
@@ -2304,8 +2312,7 @@ void update_dchubp_dpp(
dc->res_pool->dccg->funcs->update_dpp_dto(
dc->res_pool->dccg,
dpp->inst,
- pipe_ctx->plane_res.bw.dppclk_khz,
- false);
+ pipe_ctx->plane_res.bw.dppclk_khz);
else
dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
dc->clk_mgr->clks.dispclk_khz / 2 :
@@ -2512,8 +2519,10 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
}
if (pipe_ctx->plane_state != NULL)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 8bf5f0f2301d..88fcc395adf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -113,6 +113,20 @@ struct dcn10_link_enc_registers {
uint32_t DIG_LANE_ENABLE;
/* UNIPHY */
uint32_t CHANNEL_XBAR_CNTL;
+ /* DPCS */
+ uint32_t RDPCSTX_PHY_CNTL3;
+ uint32_t RDPCSTX_PHY_CNTL4;
+ uint32_t RDPCSTX_PHY_CNTL5;
+ uint32_t RDPCSTX_PHY_CNTL6;
+ uint32_t RDPCSTX_PHY_CNTL7;
+ uint32_t RDPCSTX_PHY_CNTL8;
+ uint32_t RDPCSTX_PHY_CNTL9;
+ uint32_t RDPCSTX_PHY_CNTL10;
+ uint32_t RDPCSTX_PHY_CNTL11;
+ uint32_t RDPCSTX_PHY_CNTL12;
+ uint32_t RDPCSTX_PHY_CNTL13;
+ uint32_t RDPCSTX_PHY_CNTL14;
+ uint32_t RDPCSTX_PHY_CNTL15;
/* indirect registers */
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
@@ -250,6 +264,10 @@ struct dcn10_link_enc_registers {
type RDPCS_EXT_REFCLK_EN;\
type RDPCS_TX_FIFO_EN;\
type UNIPHY_LINK_ENABLE;\
+ type UNIPHY_CHANNEL0_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL1_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL2_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL3_XBAR_SOURCE;\
type UNIPHY_CHANNEL0_INVERT;\
type UNIPHY_CHANNEL1_INVERT;\
type UNIPHY_CHANNEL2_INVERT;\
@@ -337,16 +355,46 @@ struct dcn10_link_enc_registers {
type RDPCS_TX_FIFO_ERROR_MASK;\
type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\
type RDPCS_DPALT_4LANE_TOGGLE_MASK;\
+ type RDPCS_PHY_DPALT_DP4;\
type RDPCS_PHY_DPALT_DISABLE;\
type RDPCS_PHY_DPALT_DISABLE_ACK;\
type RDPCS_PHY_DP_MPLLB_V2I;\
type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\
+ type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\
+ type RDPCS_PHY_RX_VREF_CTRL;\
type RDPCS_PHY_DP_MPLLB_CP_INT;\
type RDPCS_PHY_DP_MPLLB_CP_PROP;\
type RDPCS_PHY_RX_REF_LD_VAL;\
type RDPCS_PHY_RX_VCO_LD_VAL;\
type DPCSTX_DEBUG_CONFIG; \
- type RDPCSTX_DEBUG_CONFIG
+ type RDPCSTX_DEBUG_CONFIG; \
+ type RDPCS_PHY_DP_TX0_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX0_EQ_PRE;\
+ type RDPCS_PHY_DP_TX0_EQ_POST;\
+ type RDPCS_PHY_DP_TX1_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX1_EQ_PRE;\
+ type RDPCS_PHY_DP_TX1_EQ_POST;\
+ type RDPCS_PHY_DP_TX2_EQ_MAIN;\
+ type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\
+ type RDPCS_PHY_DP_TX2_EQ_PRE;\
+ type RDPCS_PHY_DP_TX2_EQ_POST;\
+ type RDPCS_PHY_DP_TX3_EQ_MAIN;\
+ type RDPCS_PHY_DCO_RANGE;\
+ type RDPCS_PHY_DCO_FINETUNE;\
+ type RDPCS_PHY_DP_TX3_EQ_PRE;\
+ type RDPCS_PHY_DP_TX3_EQ_POST;\
+ type RDPCS_PHY_SUP_PRE_HP;\
+ type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\
+ type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\
+ type UNIPHYA_SOFT_RESET;\
+ type UNIPHYB_SOFT_RESET;\
+ type UNIPHYC_SOFT_RESET;\
+ type UNIPHYD_SOFT_RESET;\
+ type UNIPHYE_SOFT_RESET;\
+ type UNIPHYF_SOFT_RESET
#define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_LANE0EN;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index e9ebbbe256b4..0a9ad692f541 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding(
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
@@ -237,6 +240,9 @@ void opp1_set_dyn_expansion(
FMT_DYNAMIC_EXP_EN, 0,
FMT_DYNAMIC_EXP_MODE, 0);
+ if (opp->dyn_expansion == DYN_EXPANSION_DISABLE)
+ return;
+
/*00 - 10-bit -> 12-bit dynamic expansion*/
/*01 - 8-bit -> 12-bit dynamic expansion*/
if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 0f10adea000c..2c0ecfa5a643 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -116,6 +116,8 @@
type FMT_RAND_G_SEED; \
type FMT_RAND_B_SEED; \
type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS; \
type FMT_CLAMP_DATA_EN; \
type FMT_CLAMP_COLOR_FORMAT; \
type FMT_DYNAMIC_EXP_EN; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e74a07d03fde..dabccbd49ad4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc)
return ret;
}
-bool optc1_is_matching_timing(struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing)
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing)
{
- struct dc_crtc_timing hw_crtc_timing = {0};
struct dcn_otg_state s = {0};
- if (tg == NULL || otg_timing == NULL)
+ if (tg == NULL || hw_crtc_timing == NULL)
return false;
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
- hw_crtc_timing.h_total = s.h_total + 1;
- hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
- hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start;
- hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
+ hw_crtc_timing->h_total = s.h_total + 1;
+ hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
+ hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start;
+ hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
- hw_crtc_timing.v_total = s.v_total + 1;
- hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
- hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start;
- hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
-
- if (otg_timing->h_total != hw_crtc_timing.h_total)
- return false;
-
- if (otg_timing->h_border_left != hw_crtc_timing.h_border_left)
- return false;
-
- if (otg_timing->h_addressable != hw_crtc_timing.h_addressable)
- return false;
-
- if (otg_timing->h_border_right != hw_crtc_timing.h_border_right)
- return false;
-
- if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch)
- return false;
-
- if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width)
- return false;
-
- if (otg_timing->v_total != hw_crtc_timing.v_total)
- return false;
-
- if (otg_timing->v_border_top != hw_crtc_timing.v_border_top)
- return false;
-
- if (otg_timing->v_addressable != hw_crtc_timing.v_addressable)
- return false;
-
- if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
- return false;
-
- if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width)
- return false;
+ hw_crtc_timing->v_total = s.v_total + 1;
+ hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
+ hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start;
+ hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
return true;
}
@@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
- .is_matching_timing = optc1_is_matching_timing,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.configure_crc = optc1_configure_crc,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc1_program_manual_trigger,
- .setup_manual_trigger = optc1_setup_manual_trigger
+ .setup_manual_trigger = optc1_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 83575599672e..c8d795b335ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -547,9 +547,8 @@ struct dcn_otg_state {
void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
-bool optc1_is_matching_timing(
- struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing);
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing);
bool optc1_validate_timing(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 1599bb971111..15640aedd664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN10(id),\
@@ -471,6 +479,28 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN10(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
@@ -642,7 +672,10 @@ struct dce_aux *dcn10_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -751,14 +784,18 @@ struct link_encoder *dcn10_link_encoder_create(
{
struct dcn10_link_encoder *enc10 =
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc10)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn10_link_encoder_construct(enc10,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -1308,6 +1345,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ dc->caps.extended_aux_timeout_support = false;
+
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 9aa258f3550b..06e5bbb4545c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg(
return tg_inst;
}
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth)
+{
+ uint32_t hw_encoding = 0;
+ uint32_t hw_depth = 0;
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enc == NULL ||
+ encoding == NULL ||
+ depth == NULL)
+ return false;
+
+ REG_GET_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, &hw_encoding,
+ DP_COMPONENT_DEPTH, &hw_depth);
+
+ switch (hw_depth) {
+ case DP_COMPONENT_PIXEL_DEPTH_6BPC:
+ *depth = COLOR_DEPTH_666;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_8BPC:
+ *depth = COLOR_DEPTH_888;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_10BPC:
+ *depth = COLOR_DEPTH_101010;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_12BPC:
+ *depth = COLOR_DEPTH_121212;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_16BPC:
+ *depth = COLOR_DEPTH_161616;
+ break;
+ default:
+ *depth = COLOR_DEPTH_UNDEFINED;
+ break;
+ }
+
+ switch (hw_encoding) {
+ case DP_PIXEL_ENCODING_TYPE_RGB444:
+ *encoding = PIXEL_ENCODING_RGB;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR422:
+ *encoding = PIXEL_ENCODING_YCBCR422;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR444:
+ case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
+ *encoding = PIXEL_ENCODING_YCBCR444;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR420:
+ *encoding = PIXEL_ENCODING_YCBCR420;
+ break;
+ default:
+ *encoding = PIXEL_ENCODING_UNDEFINED;
+ break;
+ }
+ return true;
+}
+
static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dp_set_stream_attribute =
enc1_stream_encoder_dp_set_stream_attribute,
@@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
};
void dcn10_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index a512cbea00d1..c9cbc21d121e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -621,4 +621,9 @@ void get_audio_clock_info(
void enc1_reset_hdmi_stream_attribute(
struct stream_encoder *enc);
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 16476ed25536..1e1151356e60 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -44,16 +44,12 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg2_update_dpp_dto(struct dccg *dccg,
- int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only)
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
- int current_phase, current_modulo;
ASSERT(req_dppclk <= ref_dppclk);
/* need to clamp to 8 bits */
@@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg,
if (req_dppclk > ref_dppclk)
req_dppclk = ref_dppclk;
}
-
- REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst],
- DPPCLK0_DTO_PHASE, &current_phase,
- DPPCLK0_DTO_MODULO, &current_modulo);
-
- if (reduce_divider_only) {
- // requested phase/modulo greater than current
- if (req_dppclk * current_modulo >= current_phase * ref_dppclk) {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, current_phase,
- DPPCLK0_DTO_MODULO, current_modulo);
- }
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- }
-
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, req_dppclk,
+ DPPCLK0_DTO_MODULO, ref_dppclk);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
@@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
void dccg2_init(struct dccg *dccg)
{
- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
-
- // Fallthrough intentional to program all available dpp_dto's
- switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
- case 6:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
- /* Fall through */
- case 5:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
- /* Fall through */
- case 4:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
- /* Fall through */
- case 3:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
- /* Fall through */
- case 2:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
- /* Fall through */
- case 1:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
- break;
- default:
- ASSERT(false);
- break;
- }
}
static const struct dccg_funcs dccg2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 74a074a873cd..2205cb0204e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -97,7 +97,7 @@ struct dcn_dccg {
const struct dccg_mask *dccg_mask;
};
-void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only);
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 2f5aade1e882..4d7e45892f08 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
@@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 290b2854bd2c..5b03b737b1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -30,16 +30,20 @@
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
-#define TF_REG_LIST_DCN20(id) \
- TF_REG_LIST_DCN(id), \
+#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \
SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON(id) \
SRI(CM_BLNDGAM_CONTROL, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \
@@ -66,9 +70,6 @@
SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \
@@ -147,7 +148,12 @@
SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
- SRI(CM_SHAPER_LUT_INDEX, CM, id), \
+ SRI(CM_SHAPER_LUT_INDEX, CM, id)
+
+#define TF_REG_LIST_DCN20(id) \
+ TF_REG_LIST_DCN(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON_UPDATED(id), \
SRI(CURSOR_CONTROL, CURSOR0_, id), \
SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
@@ -166,27 +172,41 @@
SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
-#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
- TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+
+#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -261,18 +281,9 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -341,9 +352,6 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \
@@ -356,7 +364,6 @@
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \
- TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \
TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \
TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \
@@ -521,9 +528,14 @@
TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \
TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
@@ -560,6 +572,7 @@
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
@@ -593,6 +606,7 @@
type OBUF_MEM_PWR_FORCE;\
type LUT_MEM_PWR_FORCE
+
struct dcn2_dpp_shift {
TF_REG_FIELD_LIST_DCN2_0(uint8_t);
};
@@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps);
-
bool dpp2_construct(struct dcn20_dpp *dpp2,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 1b419407af94..63eb377ed9c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -118,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index cd8bc92ce3ba..880954ac0b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
struct scaling_taps num_taps)
{
uint32_t h_ratio_luma = 1;
- uint32_t h_ratio_chroma = 1;
uint32_t h_taps_luma = num_taps.h_taps;
uint32_t h_taps_chroma = num_taps.h_taps_c;
int32_t h_init_phase_luma = 0;
@@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
h_ratio_luma = -1;
else
h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
- h_ratio_chroma = h_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma);
@@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
enum dwb_subsample_position subsample_position)
{
uint32_t v_ratio_luma = 1;
- uint32_t v_ratio_chroma = 1;
uint32_t v_taps_luma = num_taps.v_taps;
uint32_t v_taps_chroma = num_taps.v_taps_c;
int32_t v_init_phase_luma = 0;
@@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
v_ratio_luma = -1;
else
v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
- v_ratio_chroma = v_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index b83c022e2c6f..8b8438566101 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl
}
static void hubbub2_det_request_size(
+ unsigned int detile_buf_size,
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
-
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
@@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
&segment_order_horz, &segment_order_vert))
return false;
- hubbub2_det_request_size(input->surface_size.height, input->surface_size.width,
+ hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
@@ -588,7 +588,7 @@ static void hubbub2_program_watermarks(
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
}
static const struct hubbub_funcs hubbub2_funcs = {
@@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
- .program_watermarks = hubbub2_program_watermarks
+ .program_watermarks = hubbub2_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
@@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 626117d3b4e9..501532dd523a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -81,6 +81,7 @@ struct dcn20_hubbub {
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
struct dcn20_vmid vmid[16];
+ unsigned int detile_buf_size;
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 1212da12c414..921a36668ced 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -688,7 +688,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
return true;
}
-static bool dcn20_set_blend_lut(
+bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -710,7 +710,7 @@ static bool dcn20_set_blend_lut(
return result;
}
-static bool dcn20_set_shaper_3dlut(
+bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -999,72 +999,6 @@ void dcn20_enable_plane(
}
-static void dcn20_program_pipe(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- pipe_ctx->plane_state->update_flags.bits.full_update =
- context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update;
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn20_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
-}
-
-static void dcn20_program_all_pipe_in_tree(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) {
- bool blank = !is_pipe_tree_visible(pipe_ctx);
-
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
-
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
-
- if (dc->hwss.update_odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
- }
-
- if (pipe_ctx->plane_state != NULL)
- dcn20_program_pipe(dc, pipe_ctx, context);
-
- if (pipe_ctx->bottom_pipe != NULL) {
- ASSERT(pipe_ctx->bottom_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
- } else if (pipe_ctx->next_odm_pipe != NULL) {
- ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context);
- }
-}
-
void dcn20_pipe_control_lock_global(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1124,114 +1058,456 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_apply_ctx_for_surface(
- struct dc *dc,
- const struct dc_stream_state *stream,
- int num_planes,
- struct dc_state *context)
+static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
- int i;
- struct timing_generator *tg;
- bool removed_pipe[6] = { false };
- bool interdependent_update = false;
- struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
- struct pipe_ctx *prev_top_pipe_to_program =
- find_top_pipe_for_stream(dc, dc->current_state, stream);
- DC_LOGGER_INIT(dc->ctx->logger);
+ new_pipe->update_flags.raw = 0;
- if (!top_pipe_to_program)
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- pipe_ctx->stream == old_pipe_ctx->stream)
- pipe_ctx->stream_res.gsl_group =
- old_pipe_ctx->stream_res.gsl_group;
+ /* Detect top pipe only changes */
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ /* Detect odm changes */
+ if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
+ || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
+ || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Detect global sync changes */
+ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
+ || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
+ || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
+ || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
+ new_pipe->update_flags.bits.global_sync = 1;
}
- tg = top_pipe_to_program->stream_res.tg;
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /* Detect mpcc blending changes, only dpp inst and bot matter here */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp
+ || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && new_pipe->bottom_pipe
+ && old_pipe->bottom_pipe->plane_res.mpcc_inst
+ != new_pipe->bottom_pipe->plane_res.mpcc_inst))
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
+ struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
+
+ /* Detect pipe interdependent updates */
+ if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
+ old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
+ old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
+ old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
+ old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
+ old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
+ old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
+ old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
+ old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
+ old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
+ old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
+ old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
+ old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
+ old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
+ old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
+ old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
+ old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
+ old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
+ old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
+ memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
+ memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+}
- interdependent_update = top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update;
+static void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- if (interdependent_update)
- lock_all_pipes(dc, context, true);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, true);
+ if (pipe_ctx->update_flags.bits.dppclk)
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
- if (num_planes == 0) {
- /* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
+ if (pipe_ctx->update_flags.bits.hubp_interdependent)
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ struct dc_bias_and_scale bns_params = {0};
+
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+
+ if (dpp->funcs->dpp_program_bias_and_scale) {
+ //TODO :for CNVC set scale and bias registers if necessary
+ dcn10_build_prescale_params(&bns_params, plane_state);
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
}
- /* Disconnect unused mpcc */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
+ if (pipe_ctx->update_flags.bits.mpcc
+ || plane_state->update_flags.bits.global_alpha_change
+ || plane_state->update_flags.bits.per_pixel_alpha_change) {
+ /* Need mpcc to be idle if changing opp */
+ if (pipe_ctx->update_flags.bits.opp_changed) {
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+ int mpcc_inst;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
+ continue;
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ }
}
+ dc->hwss.update_mpcc(dc, pipe_ctx);
+ }
- if ((!pipe_ctx->plane_state ||
- pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
- old_pipe_ctx->plane_state &&
- old_pipe_ctx->stream_res.tg == tg) {
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+ && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
- DC_LOG_DC("Reset mpcc for pipe %d\n",
- old_pipe_ctx->pipe_idx);
- }
+ /* Any updates are handled in dc interface, just need
+ * to apply existing for plane enable / opp change */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->stream->update_flags.bits.gamut_remap
+ || pipe_ctx->stream->update_flags.bits.out_csc) {
+ /* dpp/cm gamut remap*/
+ dc->hwss.program_gamut_remap(pipe_ctx);
+
+ /*call the dcn2 method which uses mpc csc*/
+ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
}
- if (num_planes > 0)
- dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror,
+ 0);
+ hubp->power_gated = false;
+ }
- /* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
+ if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
- if (interdependent_update)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (pipe_ctx->update_flags.bits.enable)
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void dcn20_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ /* Only need to unblank on top pipe */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
+ && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+
+ if (pipe_ctx->update_flags.bits.global_sync) {
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm)
+ dc->hwss.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable)
+ dcn20_enable_plane(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
+ set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+}
+
+static bool does_pipe_need_lock(struct pipe_ctx *pipe)
+{
+ if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->update_flags.raw)
+ return true;
+ if (pipe->bottom_pipe)
+ return does_pipe_need_lock(pipe->bottom_pipe);
+
+ return false;
+}
+
+static void dcn20_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ int i;
+ bool pipe_locked[MAX_PIPES] = {false};
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
+ context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (!context->res_ctx.pipe_ctx[i].top_pipe &&
+ does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- /* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
- !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
- continue;
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
+ pipe_locked[i] = true;
+ }
- pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
- pipe_ctx->plane_res.hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
- if (interdependent_update)
- lock_all_pipes(dc, context, false);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, false);
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ dcn20_program_pipe(dc, pipe, context);
+ pipe = pipe->bottom_pipe;
+ }
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
+ && dc->hwss.program_all_writeback_pipes_in_tree)
+ dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+ }
+ }
+ /* Unlock all locked pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
- dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ if (pipe_locked[i]) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -1239,15 +1515,22 @@ static void dcn20_apply_ctx_for_surface(
* will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
* is unsupported on DCN.
*/
- i = 0;
- if (num_planes > 0 && top_pipe_to_program &&
- (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) {
- while (i < TIMEOUT_FOR_PIPE_ENABLE_MS &&
- top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) {
- i += 1;
- msleep(1);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ msleep(1);
}
}
+
+ /* WA to apply WM setting*/
+ if (dc->hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
}
@@ -1319,8 +1602,12 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
if (pipe_ctx->prev_odm_pipe == NULL)
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1337,7 +1624,8 @@ bool dcn20_update_bandwidth(
static void dcn20_enable_writeback(
struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info)
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
@@ -1354,7 +1642,7 @@ static void dcn20_enable_writeback(
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
- mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
@@ -1702,6 +1990,28 @@ static void dcn20_reset_hw_ctx_wrap(
}
}
+void dcn20_get_mpctree_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ const struct tg_color pipe_colors[6] = {
+ {MAX_TG_COLOR_VALUE, 0, 0}, // red
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
+ {0, MAX_TG_COLOR_VALUE, 0}, // blue
+ {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
+ {0, 0, MAX_TG_COLOR_VALUE}, // green
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
+ };
+
+ struct pipe_ctx *top_pipe = pipe_ctx;
+
+ while (top_pipe->top_pipe) {
+ top_pipe = top_pipe->top_pipe;
+ }
+
+ *color = pipe_colors[top_pipe->pipe_idx];
+}
+
static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1719,6 +2029,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+ dcn20_get_mpctree_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
}
if (per_pixel_alpha)
@@ -1919,8 +2232,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
- if (link->dc->hwss.program_dmdata_engine)
- link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
+ if (link->dc->hwss.program_dmdata_engine)
+ link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ }
link->dc->hwss.update_info_frame(pipe_ctx);
@@ -2095,7 +2410,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc)
dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface;
+ dc->hwss.apply_ctx_for_surface = NULL;
+ dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 92ab3dd91814..3098f1049ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -96,4 +96,20 @@ void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
void dcn20_display_init(struct dc *dc);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_get_mpctree_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 3736b5548a25..0c98a0bbbd14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -91,6 +91,13 @@ struct mpll_cfg {
uint32_t ref_range;
uint32_t ref_clk;
bool hdmimode_enable;
+ bool sup_pre_hp;
+ bool dp_tx0_vergdrv_byp;
+ bool dp_tx1_vergdrv_byp;
+ bool dp_tx2_vergdrv_byp;
+ bool dp_tx3_vergdrv_byp;
+
+
};
struct dpcssys_phy_seq_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 2137e2be2140..3b613fb93ef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 2;
else
*num_of_src_opp = 1;
+
+ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
+ if (*src_opp_id_1 == 0xf)
+ *num_of_src_opp = 1;
}
void optc2_set_dwb_source(struct timing_generator *optc,
@@ -456,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
- .is_matching_timing = optc1_is_matching_timing
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn20_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 6b2f2f1a1c9c..bbd1c98564be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN10
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN10
};
#define dwbc_regs_dcn2(id)\
@@ -732,6 +734,42 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define dsc_regsDCN20(id)\
[id] = {\
@@ -825,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
@@ -922,7 +960,10 @@ struct dce_aux *dcn20_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1042,14 +1083,18 @@ struct link_encoder *dcn20_link_encoder_create(
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc20)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn20_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -1159,6 +1204,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn20_hwseq_create,
};
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
void dcn20_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
@@ -1601,7 +1648,7 @@ static void swizzle_to_dml_params(
}
}
-static bool dcn20_split_stream_for_odm(
+bool dcn20_split_stream_for_odm(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
@@ -1622,7 +1669,6 @@ static bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.dsc = NULL;
#endif
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
- ASSERT(!next_odm_pipe->next_odm_pipe);
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
}
@@ -1679,7 +1725,7 @@ static bool dcn20_split_stream_for_odm(
return true;
}
-static void dcn20_split_stream_for_mpc(
+void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
@@ -1765,7 +1811,7 @@ int dcn20_populate_dml_pipes_from_context(
pipe_cnt = i;
continue;
}
- if (!resource_are_streams_timing_synchronizable(
+ if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream)) {
synchronized_vblank = false;
@@ -1897,7 +1943,7 @@ int dcn20_populate_dml_pipes_from_context(
break;
case PIXEL_ENCODING_YCBCR420:
pipes[pipe_cnt].dout.output_format = dm_420;
- pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
+ pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
if (true) /* todo */
@@ -1911,6 +1957,11 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+#endif
+
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
@@ -2132,7 +2183,7 @@ void dcn20_set_mcif_arb_params(
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
@@ -2167,7 +2218,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
}
#endif
-static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
@@ -2207,7 +2258,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
*/
if (secondary_pipe == NULL) {
for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
- if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
preferred_pipe_idx = j;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
@@ -2243,29 +2295,11 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
}
-bool dcn20_fast_validate_bw(
+void dcn20_merge_pipes_for_validate(
struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *pipe_split_from,
- int *vlevel_out)
+ struct dc_state *context)
{
- bool out = false;
-
- int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
- bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
- bool force_split = false;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- bool failed_non_odm_dsc = false;
-#endif
- int split_threshold = dc->res_pool->pipe_count / 2;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
-
-
- ASSERT(pipes);
- if (!pipes)
- return false;
+ int i;
/* merge previously split odm pipes since mode support needs to make the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2320,51 +2354,19 @@ bool dcn20_fast_validate_bw(
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
+}
- if (dc->res_pool->funcs->populate_dml_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
- else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
-
- *pipe_cnt_out = pipe_cnt;
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- context->bw_ctx.dml.ip.odm_capable = 0;
-
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- context->bw_ctx.dml.ip.odm_capable = odm_capable;
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* 1 dsc per stream dsc validation */
- if (vlevel <= context->bw_ctx.dml.soc.num_states)
- if (!dcn20_validate_dsc(dc, context)) {
- failed_non_odm_dsc = true;
- vlevel = context->bw_ctx.dml.soc.num_states + 1;
- }
-#endif
-
- if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
- || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
- context->commit_hints.full_update_needed = true;
-
- /*initialize pipe_just_split_from to invalid idx*/
- for (i = 0; i < MAX_PIPES; i++)
- pipe_split_from[i] = -1;
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split)
+{
+ int i, pipe_idx, vlevel_split;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
- /* Single display only conditionals get set here */
+ /* Single display loop, exits if there is more than one display */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2391,38 +2393,107 @@ bool dcn20_fast_validate_bw(
if (exit_loop)
break;
}
-
- if (context->stream_count > split_threshold)
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
- vlevel_unsplit = vlevel;
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ pipe_idx++;
+ }
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
- break;
+
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ split[i] = true;
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = true;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = true;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
+ }
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
pipe_idx++;
}
+ return vlevel;
+}
+
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out)
+{
+ bool out = false;
+ bool split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ dcn20_merge_pipes_for_validate(dc, context);
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes);
+
+ *pipe_cnt_out = pipe_cnt;
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+
+ /*initialize pipe_just_split_from to invalid idx*/
+ for (i = 0; i < MAX_PIPES; i++)
+ pipe_split_from[i] = -1;
+
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
- bool need_split = true;
- bool need_split3d;
if (!pipe->stream || pipe_split_from[i] >= 0)
continue;
pipe_idx++;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- force_split = true;
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
- }
- if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
@@ -2440,40 +2511,26 @@ bool dcn20_fast_validate_bw(
if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
continue;
- need_split3d = ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE));
-
- if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
- need_split = false;
- vlevel = vlevel_unsplit;
- context->bw_ctx.dml.vba.maxMpcComb = 0;
- } else
- need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
-
/* We do not support mpo + odm at the moment */
if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (need_split3d || need_split || force_split) {
+ if (split[i]) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
- ASSERT(hsplit_pipe || force_split);
- if (!hsplit_pipe)
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe) {
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2;
continue;
-
+ }
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe))
goto validate_fail;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
} else
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
@@ -2487,7 +2544,7 @@ bool dcn20_fast_validate_bw(
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
- if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
+ if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
@@ -2506,7 +2563,7 @@ validate_out:
return out;
}
-void dcn20_calculate_wm(
+static void dcn20_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
@@ -2527,7 +2584,7 @@ void dcn20_calculate_wm(
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipe_idx++;
@@ -2536,7 +2593,7 @@ void dcn20_calculate_wm(
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
}
@@ -2579,6 +2636,11 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
@@ -2590,6 +2652,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
@@ -2601,6 +2667,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2610,6 +2680,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
}
void dcn20_calculate_dlg_params(
@@ -2629,7 +2703,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
@@ -2645,8 +2719,8 @@ void dcn20_calculate_dlg_params(
continue;
if (!visited[pipe_idx]) {
- display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src;
- display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest;
+ display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
@@ -2806,7 +2880,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
ASSERT(false);
restore_dml_state:
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
return voltage_supported;
@@ -2892,6 +2965,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
@@ -2900,8 +2974,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
- ASSERT(pipe_count > 0);
-
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
@@ -2947,7 +3019,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
@@ -2962,7 +3034,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -2970,7 +3042,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
}
}
-static void cap_soc_clocks(
+void dcn20_cap_soc_clocks(
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks)
{
@@ -3037,10 +3109,10 @@ static void cap_soc_clocks(
}
}
-static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
+ struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
@@ -3048,12 +3120,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
if (num_states == 0)
return;
+ memset(calculated_states, 0, sizeof(calculated_states));
+
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
- else
- // Accounting for SOC/DCF relationship, we can go as high as
- // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
- min_dcfclk = 507;
+ else {
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ min_dcfclk = 310;
+ else
+ // Accounting for SOC/DCF relationship, we can go as high as
+ // 506Mhz in Vmin.
+ min_dcfclk = 506;
+ }
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
@@ -3093,7 +3171,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
bb->clock_limits[num_calculated_states].state = bb->num_states;
}
-static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
kernel_fpu_begin();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
@@ -3292,14 +3370,14 @@ static bool init_soc_bounding_box(struct dc *dc,
}
if (clock_limits_available && uclk_states_available && num_states)
- update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
+ dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
else if (clock_limits_available)
- cap_soc_clocks(loaded_bb, max_clocks);
+ dcn20_cap_soc_clocks(loaded_bb, max_clocks);
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
- patch_bounding_box(dc, loaded_bb);
+ dcn20_patch_bounding_box(dc, loaded_bb);
return true;
}
@@ -3345,6 +3423,7 @@ static bool construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.hw_3d_lut = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 44f95aa0d61e..fef473d68a4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx);
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
-
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb);
+void dcn20_cap_soc_clocks(
+ struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table max_clocks);
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states);
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst);
@@ -116,6 +119,31 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context);
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split);
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
+#endif
+void dcn20_split_stream_for_mpc(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe);
+bool dcn20_split_stream_for_odm(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe);
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
bool dcn20_fast_validate_bw(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 5ab9d6240498..4b3401616434 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format =
+ enc1_stream_encoder_dp_get_pixel_format,
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index ff50ae71fe27..14113ccf498d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
+DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index d1266741763b..f546260c15b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include <linux/delay.h>
#include "dm_services.h"
#include "dcn20/dcn20_hubbub.h"
#include "dcn21_hubbub.h"
@@ -51,7 +52,7 @@
#ifdef NUM_VMID
#undef NUM_VMID
#endif
-#define NUM_VMID 1
+#define NUM_VMID 16
static uint32_t convert_and_clamp(
uint32_t wm_ns,
@@ -71,56 +72,76 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
//Poll until RIOMMU_ACTIVE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100);
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
- //Reflect the power status of DCHUBBUB
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
- //Start rIOMMU prefetching
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
- // Enable dynamic clock gating
- REG_UPDATE_4(DCHVM_CLK_CTRL,
- HVM_DISPCLK_R_GATE_DIS, 0,
- HVM_DISPCLK_G_GATE_DIS, 0,
- HVM_DCFCLK_R_GATE_DIS, 0,
- HVM_DCFCLK_G_GATE_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
- //Poll until HOSTVM_PREFETCH_DONE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ }
}
-static int hubbub21_init_dchub(struct hubbub *hubbub,
+int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
- FB_BASE, pa_config->system_aperture.fb_base);
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
- FB_TOP, pa_config->system_aperture.fb_top);
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
- FB_OFFSET, pa_config->system_aperture.fb_offset);
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
- AGP_BOT, pa_config->system_aperture.agp_bot);
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
- AGP_TOP, pa_config->system_aperture.agp_top);
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
- AGP_BASE, pa_config->system_aperture.agp_base);
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
dcn21_dchvm_init(hubbub);
return NUM_VMID;
}
-static void hubbub21_program_urgent_watermarks(
+void hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
+ hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
+ }
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
+ hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
+ }
+
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
@@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
+ hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
+ }
+
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
@@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
}
+
+ if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
+ hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
+ }
}
-static void hubbub21_program_stutter_watermarks(
+void hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks(
}
}
-static void hubbub21_program_pstate_watermarks(
+void hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
}
+void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+}
static const struct hubbub_funcs hubbub21_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub21_init_dchub,
- .init_vm_ctx = NULL,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
void hubbub21_construct(struct dcn20_hubbub *hubbub,
@@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index 6ff3cdb89178..c4840dfb1fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -36,6 +36,10 @@
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DCHVM_CTRL0), \
SR(DCHVM_MEM_CTRL), \
@@ -44,16 +48,9 @@
SR(DCHVM_RIOMMU_STAT0)
#define HUBBUB_REG_LIST_DCN21()\
- HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
HUBBUB_SR_WATERMARK_REG_LIST(), \
- HUBBUB_HVM_REG_LIST(), \
- SR(DCHUBBUB_CRC_CTRL), \
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE)
+ HUBBUB_HVM_REG_LIST()
#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \
@@ -102,7 +99,7 @@
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh)
#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\
- HUBBUB_MASK_SH_LIST_HVM(mask_sh),\
+ HUBBUB_MASK_SH_LIST_HVM(mask_sh), \
HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -114,11 +111,28 @@
HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
void dcn21_dchvm_init(struct hubbub *hubbub);
+int hubbub21_init_dchub(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
void hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
+void hubbub21_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
void hubbub21_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index a00af513aa2b..2f5a5867e674 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -22,6 +22,8 @@
* Authors: AMD
*
*/
+
+#include "dcn10/dcn10_hubp.h"
#include "dcn21_hubp.h"
#include "dm_services.h"
@@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
- .hubp_program_surface_config = hubp2_program_surface_config,
+ .hubp_program_surface_config = hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp21_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
new file mode 100644
index 000000000000..b25215cadf85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dce/dce_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "vmid.h"
+#include "reg_helper.h"
+#include "hw/clk_mgr.h"
+
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config,
+ struct dce_hwseq *hws)
+{
+ uint32_t page_table_base_hi;
+ uint32_t page_table_base_lo;
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo);
+
+ config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo;
+
+}
+
+static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+{
+ struct dcn_hubbub_phys_addr_config config;
+
+ config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
+ config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
+ config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
+ config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
+ config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
+ config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
+ config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
+ config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+
+ mmhub_update_page_table_config(&config, hws);
+
+ return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
+}
+
+// work around for Renoir s0i3, if register is programmed, bypass golden init.
+
+static bool dcn21_s0i3_golden_init_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t value = 0;
+
+ value = REG_READ(MICROSECOND_TIME_BASE_DIV);
+
+ return value != 0x00120464;
+}
+
+void dcn21_exit_optimized_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
+}
+
+void dcn21_optimize_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ true);
+}
+
+void dcn21_hw_sequencer_construct(struct dc *dc)
+{
+ dcn20_hw_sequencer_construct(dc);
+ dc->hwss.init_sys_ctx = dcn21_init_sys_ctx;
+ dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa;
+ dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state;
+ dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
new file mode 100644
index 000000000000..be67b62e6fb1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -0,0 +1,33 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN21_H__
+#define __DC_HWSS_DCN21_H__
+
+struct dc;
+
+void dcn21_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
new file mode 100644
index 000000000000..e8a504ca5890
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include <linux/delay.h>
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn21_link_encoder.h"
+#include "stream_encoder.h"
+
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+static struct mpll_cfg dcn21_mpll_cfg_ref[] = {
+ // RBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 238,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 44237,
+ .mpllb_ssc_stepsize = 59454,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 2,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 2,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ // HBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 1,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR2
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR3
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 304,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 49152,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 55296,
+ .mpllb_ssc_stepsize = 74318,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 1,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 16,
+ .hdmi_pixel_clk_div = 0,
+ },
+};
+
+
+static bool update_cfg_data(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings,
+ struct dpcssys_phy_seq_cfg *cfg)
+{
+ int i;
+
+ cfg->load_sram_fw = false;
+ cfg->use_calibration_setting = true;
+
+ //TODO: need to implement a proper lane mapping for Renoir.
+ for (i = 0; i < 4; i++)
+ cfg->lane_en[i] = true;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[0];
+ break;
+ case LINK_RATE_HIGH:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[1];
+ break;
+ case LINK_RATE_HIGH2:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[2];
+ break;
+ case LINK_RATE_HIGH3:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[3];
+ break;
+ default:
+ DC_LOG_ERROR("%s: No supported link rate found %X!\n",
+ __func__, link_settings->link_rate);
+ return false;
+ }
+
+ return true;
+}
+
+void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
+
+ if (!value && link_settings->lane_count > LANE_COUNT_TWO)
+ link_settings->lane_count = LANE_COUNT_TWO;
+}
+
+bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
+
+ // if value == 1 alt mode is disabled, otherwise it is enabled
+ return !value;
+}
+
+bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ int value;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+
+ if (value == 1) {
+ ASSERT(0);
+ return false;
+ }
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 0);
+
+ udelay(40);
+
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+ if (value == 1) {
+ ASSERT(0);
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ return false;
+ }
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1);
+
+ return true;
+}
+
+
+
+static void dcn21_link_encoder_release_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0);
+
+}
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10;
+ struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg;
+
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ return;
+ }
+
+ if (!update_cfg_data(enc10, link_settings, cfg))
+ return;
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
+
+}
+
+void dcn21_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+}
+
+void dcn21_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ dcn10_link_encoder_disable_output(enc, signal);
+
+ if (dc_is_dp_signal(signal))
+ dcn21_link_encoder_release_phy(enc);
+}
+
+
+static const struct link_encoder_funcs dcn21_link_enc_funcs = {
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ .read_state = link_enc2_read_state,
+#endif
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn21_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn21_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+};
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc21->enc10;
+
+ enc10->base.funcs = &dcn21_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
new file mode 100644
index 000000000000..1d7a1a51f13d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN21_H__
+#define __DC_LINK_ENCODER__DCN21_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+struct dcn21_link_encoder {
+ struct dcn10_link_encoder enc10;
+ struct dpcssys_phy_seq_cfg phy_seq_cfg;
+};
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index de182185fe1f..459bd9a5caed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dc.h"
@@ -42,11 +40,11 @@
#include "irq/dcn21/irq_service_dcn21.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn20/dcn20_optc.h"
-#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn20/dcn20_opp.h"
#include "dcn20/dcn20_dsc.h"
-#include "dcn20/dcn20_link_encoder.h"
+#include "dcn21/dcn21_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
@@ -84,8 +82,9 @@
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 1,
+ .hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
@@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 4,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
/*Extra state, no dispclk ramping*/
@@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 5,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
},
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_6)
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN20_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN20(_MASK)
+};
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
static const struct dcn21_dmcub_registers dmcub_regs = {
DMCUB_REG_LIST_DCN()
@@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(4),
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
};
@@ -636,6 +667,11 @@ static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
};
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -683,7 +719,10 @@ static struct dce_aux *dcn21_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -726,11 +765,12 @@ static const struct resource_caps res_cap_rn = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
- .num_audio = 6, // 6 audio endpoints. 4 audio streams
+ .num_audio = 4, // 4 audio endpoints. 4 audio streams
.num_stream_encoder = 5,
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
+ .num_vmid = 1,
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 3,
#endif
@@ -796,15 +836,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
- .force_single_disp_pipe_split = true,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 5120,/*upto 5K*/
+ .max_downscale_src_width = 3840,
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = true,
- .disable_48mhz_pwrdwn = true,
+ .disable_48mhz_pwrdwn = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -939,7 +979,7 @@ static void destruct(struct dcn21_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.pp_smu != NULL)
- dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ dcn21_pp_smu_destroy(&pool->base.pp_smu);
}
@@ -969,11 +1009,35 @@ static void calculate_wm_set_for_vlevel(
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
#endif
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
+static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+{
+ kernel_fpu_begin();
+ if (dc->bb_overrides.sr_exit_time_ns) {
+ bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
+ bb->sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.urgent_latency_ns) {
+ bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns) {
+ bb->dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
+ kernel_fpu_end();
+}
+
void dcn21_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -988,6 +1052,8 @@ void dcn21_calculate_wm(
ASSERT(bw_params);
+ patch_bounding_box(dc, &context->bw_ctx.dml.soc);
+
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -1021,7 +1087,7 @@ void dcn21_calculate_wm(
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
&context->res_ctx, pipes);
else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
+ pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
&context->res_ctx, pipes);
}
@@ -1271,6 +1337,12 @@ struct display_stream_compressor *dcn21_dsc_create(
static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ /*
+ TODO: Fix this function to calcualte correct values.
+ There are known issues with this function currently
+ that will need to be investigated. Use hardcoded known good values for now.
+
+
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i;
@@ -1278,7 +1350,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
- dcn2_1_soc.num_states = 0;
for (i = 0; i < clk_table->num_entries; i++) {
@@ -1286,10 +1357,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- /* This is probably wrong, TODO: find correct calculation */
dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
- dcn2_1_soc.num_states++;
}
+ dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
+ dcn2_1_soc.num_states = i;
+ */
}
/* Temporary Place holder until we can get them from fuse */
@@ -1317,32 +1389,42 @@ static struct dpm_clocks dummy_clocks = {
};
-enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
+static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
struct dpm_clocks *clock_table)
{
*clock_table = dummy_clocks;
return PP_SMU_RESULT_OK;
}
-struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
- pp_smu->ctx.ver = PP_SMU_VER_RN;
+ if (!pp_smu)
+ return pp_smu;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) {
+ pp_smu->ctx.ver = PP_SMU_VER_RN;
+ pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
+ pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ } else {
- pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
- pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ dm_pp_get_funcs(ctx, pp_smu);
+
+ if (pp_smu->ctx.ver != PP_SMU_VER_RN)
+ pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
+ }
return pp_smu;
}
-void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -1400,6 +1482,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ hws->wa.DEGVIDCN21 = true;
}
return hws;
}
@@ -1418,10 +1501,152 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn21_hwseq_create,
};
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E),
+};
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
+static struct link_encoder *dcn21_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn21_link_encoder *enc21 =
+ kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
+ int link_regs_id;
+
+ if (!enc21)
+ return NULL;
+
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
+ dcn21_link_encoder_construct(enc21,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[link_regs_id],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc21->enc10.base;
+}
+#define CTX ctx
+
+#define REG(reg_name) \
+ (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes);
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipes[i].pipe.src.hostvm = 1;
+ pipes[i].pipe.src.gpuvm = 1;
+ }
+
+ return pipe_cnt;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
- .link_enc_create = dcn20_link_encoder_create,
+ .link_enc_create = dcn21_link_encoder_create,
.validate_bandwidth = dcn21_validate_bandwidth,
+ .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
@@ -1437,9 +1662,11 @@ static bool construct(
struct dc *dc,
struct dcn21_resource_pool *pool)
{
- int i;
+ int i, j;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ uint32_t num_pipes;
ctx->dc_bios->regs = &bios_regs;
@@ -1457,7 +1684,9 @@ static bool construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
- pool->base.pipe_count = 4;
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
@@ -1467,6 +1696,7 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1516,6 +1746,26 @@ static bool construct(
goto create_fail;
}
+ pool->base.dmcu = dcn20_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
pool->base.dmcub = dcn21_dmcub_create(ctx,
&dmcub_regs,
@@ -1530,6 +1780,14 @@ static bool construct(
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
+ num_pipes = dcn2_1_ip.max_num_dpp;
+
+ for (i = 0; i < dcn2_1_ip.max_num_dpp; i++)
+ if (pipe_fuses & 1 << i)
+ num_pipes--;
+ dcn2_1_ip.max_num_dpp = num_pipes;
+ dcn2_1_ip.max_num_otg = num_pipes;
+
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
init_data.ctx = dc->ctx;
@@ -1537,8 +1795,15 @@ static bool construct(
if (!pool->base.irqs)
goto create_fail;
+ j = 0;
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
@@ -1562,6 +1827,23 @@ static bool construct(
"DC: failed to create dpps!\n");
goto create_fail;
}
+
+ pool->base.opps[i] = dcn21_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.timing_generators[i] = dcn21_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ j++;
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
@@ -1582,27 +1864,9 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL;
}
- for (i = 0; i < pool->base.res_cap->num_opp; i++) {
- pool->base.opps[i] = dcn21_opp_create(ctx, i);
- if (pool->base.opps[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error(
- "DC: failed to create output pixel processor!\n");
- goto create_fail;
- }
- }
-
- for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
- pool->base.timing_generators[i] = dcn21_timing_generator_create(
- ctx, i);
- if (pool->base.timing_generators[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error("DC: failed to create tg!\n");
- goto create_fail;
- }
- }
-
- pool->base.timing_generator_count = i;
+ pool->base.timing_generator_count = j;
+ pool->base.pipe_count = j;
+ pool->base.mpcc_count = j;
pool->base.mpc = dcn21_mpc_create(ctx);
if (pool->base.mpc == NULL) {
@@ -1645,7 +1909,7 @@ static bool construct(
&res_create_funcs : &res_create_maximus_funcs)))
goto create_fail;
- dcn20_hw_sequencer_construct(dc);
+ dcn21_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
new file mode 100644
index 000000000000..626d22d437f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_CP_PSP_IF__H
+#define DM_CP_PSP_IF__H
+
+struct dc_link;
+
+struct cp_psp_stream_config {
+ uint8_t otg_inst;
+ uint8_t link_enc_inst;
+ uint8_t stream_enc_inst;
+ void *dm_stream_ctx;
+ bool dpms_off;
+};
+
+struct cp_psp_funcs {
+ void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config);
+};
+
+struct cp_psp {
+ void *handle;
+ struct cp_psp_funcs funcs;
+};
+
+
+#endif /* DM_CP_PSP_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index b6b4333737f2..94b75e942607 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
/*
* Polls for ACT (allocation change trigger) handled and
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index c03a441ee638..ef7df9ef6d7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -249,10 +249,8 @@ struct pp_smu_funcs_nv {
};
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
-
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
-#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4
+#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
#define PP_SMU_NUM_FCLK_DPM_LEVELS 4
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
@@ -288,7 +286,6 @@ struct pp_smu_funcs_rn {
enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp,
struct dpm_clocks *clock_table);
};
-#endif
struct pp_smu_funcs {
struct pp_smu ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 0fafd693ffb4..3c70dd577292 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -38,6 +38,7 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_DSC_IMAGE_WIDTH 5184
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
@@ -2610,7 +2611,8 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
@@ -3901,6 +3903,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3925,7 +3931,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 878bf4782ce6..2c7455e22a65 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index ed8bf5f723c9..1e6aeb1bd2bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 3b6ed60dcd35..ba77957aefe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -65,6 +65,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP(
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
else
return BPP_INVALID;
}
@@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index f4c1ef9046bf..cfacd6027467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -269,7 +269,7 @@ struct writeback_st {
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
- int output_bpp;
+ double output_bpp;
int dsc_enable;
int wb_enable;
int num_active_wb;
@@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
+ unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 65cf4edddaff..7f9a5621922f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
+ mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -432,8 +433,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
+ mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
+ dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 91decac50557..1540ffbe3979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -387,6 +387,7 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
+ bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index ad8571f5a142..4c3e9cc30167 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -243,7 +243,7 @@ void dml1_extract_rq_regs(
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- /* FIXME: take the max between luma, chroma chunk size?
+ /* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
@@ -602,7 +602,7 @@ static void get_surf_rq_param(
unsigned int log2_dpte_group_length;
unsigned int func_meta_row_height, func_dpte_row_height;
- /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ /* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params(
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
- prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
@@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params(
dcc_en = e2e_pipe_param.pipe.src.dcc;
dual_plane = is_dual_plane(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format);
- mode_422 = 0; /* FIXME */
+ mode_422 = 0; /* TODO */
access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format,
@@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params(
cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
* (double) cur0_req_width;
cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
- hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */
if (vratio_pre_l <= 1.0) {
refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 5995bcdfed54..e60f760585e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -23,8 +23,7 @@
*/
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dc.h"
-#include "core_types.h"
+#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
@@ -47,6 +46,59 @@ const struct dc_dsc_policy dsc_policy = {
/* This module's internal functions */
+static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC) {
+ kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
+ kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ return kbps;
+ }
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
+
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
@@ -178,12 +230,11 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp
}
static void get_dsc_enc_caps(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
// This is a static HW query, so we can use any DSC
- struct display_stream_compressor *dsc = dc->res_pool->dscs[0];
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
if (dsc)
@@ -290,7 +341,7 @@ static void get_dsc_bandwidth_range(
struct dc_dsc_bw_range *range)
{
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing);
/* max dsc target bpp */
range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz);
@@ -512,6 +563,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ int min_slice_height_override,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -680,7 +732,10 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- slice_height = min(dsc_policy.min_sice_height, pic_height);
+ if (min_slice_height_override == 0)
+ slice_height = min(dsc_policy.min_sice_height, pic_height);
+ else
+ slice_height = min(min_slice_height_override, pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
@@ -802,7 +857,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
* If DSC is not possible, leave '*range' untouched.
*/
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_bpp,
const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -814,16 +870,14 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
- is_dsc_possible = setup_dsc_config(dsc_sink_caps,
- &dsc_enc_caps,
- 0,
- timing, &config);
+ is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
+ dsc_min_slice_height_override, &config);
if (is_dsc_possible)
get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range);
@@ -832,8 +886,9 @@ bool dc_dsc_compute_bandwidth_range(
}
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -841,11 +896,11 @@ bool dc_dsc_compute_config(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_cfg);
+ timing, dsc_min_slice_height_override, dsc_cfg);
return is_dsc_possible;
}
#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index ca51e83f8764..76c4b12d6824 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -177,7 +177,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
{
float bpp_group;
float initial_xmit_delay_factor;
- int source_bpp;
int padding_pixels;
int i;
@@ -217,8 +216,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->initial_xmit_delay++;
}
- source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5);
-
rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_det_thresh = 2 << (bpc - 8);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index f8f85490e77e..f67c18375bfd 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -321,8 +321,6 @@ void dal_gpio_destroy(
return;
}
- dal_gpio_close(*gpio);
-
switch ((*gpio)->id) {
case GPIO_ID_DDC_DATA:
kfree((*gpio)->hw_container.ddc);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index d03165e71dc6..92280cc05e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux(
return;
}
- dal_gpio_close(*mux);
dal_gpio_destroy(mux);
kfree(*mux);
@@ -460,7 +459,6 @@ void dal_gpio_destroy_irq(
return;
}
- dal_gpio_close(*irq);
dal_gpio_destroy(irq);
kfree(*irq);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
new file mode 100644
index 000000000000..4170b6eb9ec0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
@@ -0,0 +1,28 @@
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'hdcp' sub-component of DAL.
+#
+
+HDCP_MSG = hdcp_msg.o
+
+AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG)
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
new file mode 100644
index 000000000000..6f730b5bfe42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "include/hdcp_types.h"
+#include "include/i2caux_interface.h"
+#include "include/signal_types.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+#define HDCP14_KSV_SIZE 5
+#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
+
+static const bool hdcp_cmd_is_read[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = true,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = true,
+ [HDCP_MESSAGE_ID_READ_PJ] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = false,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = false,
+ [HDCP_MESSAGE_ID_WRITE_AN] = false,
+ [HDCP_MESSAGE_ID_READ_VH_X] = true,
+ [HDCP_MESSAGE_ID_READ_VH_0] = true,
+ [HDCP_MESSAGE_ID_READ_VH_1] = true,
+ [HDCP_MESSAGE_ID_READ_VH_2] = true,
+ [HDCP_MESSAGE_ID_READ_VH_3] = true,
+ [HDCP_MESSAGE_ID_READ_VH_4] = true,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = true,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = true,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true,
+ [HDCP_MESSAGE_ID_READ_BINFO] = true,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = true,
+ [HDCP_MESSAGE_ID_RX_CAPS] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = true,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
+};
+
+struct protection_properties {
+ bool supported;
+ bool (*process_transaction)(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+};
+
+static const struct protection_properties non_supported_protection = {
+ .supported = false
+};
+
+static bool hdmi_14_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ uint8_t *buff = NULL;
+ bool result;
+ const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ struct i2c_command i2c_command;
+ uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
+ struct i2c_payload i2c_payloads[] = {
+ { true, 0, 1, &offset },
+ /* actual hdcp payload, will be filled later, zeroed for now*/
+ { 0 }
+ };
+
+ switch (message_info->link) {
+ case HDCP_LINK_SECONDARY:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_secondary;
+ break;
+ case HDCP_LINK_PRIMARY:
+ default:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_primary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_primary;
+ break;
+ }
+
+ if (hdcp_cmd_is_read[message_info->msg_id]) {
+ i2c_payloads[1].write = false;
+ i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads);
+ i2c_payloads[1].length = message_info->length;
+ i2c_payloads[1].data = message_info->data;
+ } else {
+ i2c_command.number_of_payloads = 1;
+ buff = kzalloc(message_info->length + 1, GFP_KERNEL);
+
+ if (!buff)
+ return false;
+
+ buff[0] = offset;
+ memmove(&buff[1], message_info->data, message_info->length);
+ i2c_payloads[0].length = message_info->length + 1;
+ i2c_payloads[0].data = buff;
+ }
+
+ i2c_command.payloads = i2c_payloads;
+ i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW
+ i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ result = dm_helpers_submit_i2c(
+ link->ctx,
+ link,
+ &i2c_command);
+ kfree(buff);
+
+ return result;
+}
+
+static const struct protection_properties hdmi_14_protection = {
+ .supported = true,
+ .process_transaction = hdmi_14_process_transaction
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+ [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
+};
+
+static bool dpcd_access_helper(
+ struct dc_link *link,
+ uint32_t length,
+ uint8_t *data,
+ uint32_t dpcd_addr,
+ bool is_read)
+{
+ enum dc_status status;
+ uint32_t cur_length = 0;
+ uint32_t offset = 0;
+ uint32_t ksv_read_size = 0x6803b - 0x6802c;
+
+ /* Read KSV, need repeatedly handle */
+ if (dpcd_addr == 0x6802c) {
+ if (length % HDCP14_KSV_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_KSV_SIZE);
+ }
+ if (length > HDCP14_MAX_KSV_FIFO_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_MAX_KSV_FIFO_SIZE);
+ }
+
+ DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n",
+ __func__,
+ length / HDCP14_KSV_SIZE);
+
+ while (length > 0) {
+ if (length > ksv_read_size) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ ksv_read_size);
+
+ data += ksv_read_size;
+ length -= ksv_read_size;
+ } else {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ length);
+
+ data += length;
+ length = 0;
+ }
+
+ if (status != DC_OK)
+ return false;
+ }
+ } else {
+ while (length > 0) {
+ if (length > DEFAULT_AUX_MAX_DATA_SIZE)
+ cur_length = DEFAULT_AUX_MAX_DATA_SIZE;
+ else
+ cur_length = length;
+
+ if (is_read) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ } else {
+ status = core_link_write_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ }
+
+ if (status != DC_OK)
+ return false;
+
+ length -= cur_length;
+ offset += cur_length;
+ }
+ }
+ return true;
+}
+
+static bool dp_11_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ return dpcd_access_helper(
+ link,
+ message_info->length,
+ message_info->data,
+ hdcp_dpcd_addrs[message_info->msg_id],
+ hdcp_cmd_is_read[message_info->msg_id]);
+}
+
+static const struct protection_properties dp_11_protection = {
+ .supported = true,
+ .process_transaction = dp_11_process_transaction
+};
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f189307750ab..a831079607cd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -52,7 +52,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
/************ link *****************/
struct link_init_data {
@@ -231,6 +233,7 @@ struct resource_pool {
struct dcn_fe_bandwidth {
int dppclk_khz;
+
};
struct stream_resource {
@@ -395,10 +398,6 @@ struct dc_state {
struct clk_mgr *clk_mgr;
- struct {
- bool full_update_needed : 1;
- } commit_hints;
-
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index b1fab251c09b..14716ba35662 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_channel_operation_result *operation_result);
@@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout);
+
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 08a4df2c61a8..045138dbdccb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,6 +28,8 @@
#define LINK_TRAINING_ATTEMPTS 4
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/
struct dc_link;
struct dc_stream_state;
@@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries(
struct dc_link_settings *known_limit_link_setting,
int attempts);
+bool dp_verify_mst_link_cap(
+ struct dc_link *link);
+
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index e79cd4e92919..e77b3a76766d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -140,6 +140,9 @@ struct write_command_context {
struct aux_engine_funcs {
+ bool (*configure_timeout)(
+ struct ddc_service *ddc,
+ uint32_t timeout);
void (*destroy)(
struct aux_engine **ptr);
bool (*acquire_engine)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 76f9ad1b23df..4e18e77dcf42 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -47,7 +47,7 @@
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Will these bw structures be ASIC specific? */
-#define MAX_NUM_DPM_LVL 4
+#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
@@ -180,13 +180,19 @@ struct clk_mgr_funcs {
struct dc_state *context,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+
+ bool (*are_clock_states_equal) (struct dc_clocks *a,
+ struct dc_clocks *b);
+ void (*notify_wm_ranges)(struct clk_mgr *clk_mgr);
};
struct clk_mgr {
struct dc_context *ctx;
struct clk_mgr_funcs *funcs;
struct dc_clocks clks;
+ bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+ int dentist_vco_freq_khz;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
struct clk_bw_params *bw_params;
#endif
@@ -199,4 +205,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr);
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 7dd46eb96d67..a17a77192690 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -184,6 +184,21 @@ struct clk_mgr_registers {
uint32_t MP1_SMN_C2PMSG_91;
};
+enum clock_type {
+ clock_type_dispclk = 1,
+ clock_type_dcfclk,
+ clock_type_socclk,
+ clock_type_pixelclk,
+ clock_type_phyclk,
+ clock_type_dppclk,
+ clock_type_fclk,
+ clock_type_dcfdsclk,
+ clock_type_dscclk,
+ clock_type_uclk,
+ clock_type_dramclk,
+};
+
+
struct state_dependent_clocks {
int display_clk_khz;
int pixel_clk_khz;
@@ -210,8 +225,6 @@ struct clk_mgr_internal {
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
/*TODO: figure out which of the below fields should be here vs in asic specific portion */
- int dentist_vco_freq_khz;
-
/* Cache the status of DFS-bypass feature*/
bool dfs_bypass_enabled;
/* True if the DFS-bypass feature is enabled and active. */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index d8e744f366e5..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -38,8 +38,7 @@ struct dccg {
struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only);
+ int req_dppclk);
void (*get_dccg_ref_freq)(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index a6297219d7fc..c81a17aeaa25 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -147,6 +147,7 @@ struct hubbub_funcs {
bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
+ void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index 1ddb1c6fa149..c6ff3d78b435 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -28,7 +28,11 @@
#include "dc_dsc.h"
#include "dc_hw_types.h"
-#include "dc_dp_types.h"
+#include "dc_types.h"
+/* do not include any other headers
+ * or else it might break Edid Utility functionality.
+ */
+
/* Input parameters for configuring DSC from the outside of DSC */
struct dsc_config {
@@ -81,12 +85,6 @@ struct dsc_enc_caps {
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
};
-struct display_stream_compressor {
- const struct dsc_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index abb4e4237fb6..b21909216fb6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -184,6 +184,10 @@ struct link_encoder_funcs {
bool (*fec_is_active)(struct link_encoder *enc);
#endif
bool (*is_in_alt_mode) (struct link_encoder *enc);
+
+ void (*get_max_link_cap)(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index e8668388581b..67b610d6d91f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -43,6 +43,7 @@ struct dcn_watermarks {
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_flip;
+ int32_t urgent_latency_ns;
#endif
struct cstate_pstate_watermarks_st cstate_pstate;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 957e9047381a..18def2b6fafe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -208,6 +208,7 @@ struct output_pixel_processor {
struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
+ uint32_t dyn_expansion;
};
enum fmt_stereo_action {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe9b7a10a1c3..6305e388612a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -214,6 +214,11 @@ struct stream_encoder_funcs {
unsigned int (*dig_source_otg)(
struct stream_encoder *enc);
+ bool (*dp_get_pixel_format)(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 6196cc32356e..27c73caf74ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -261,6 +261,8 @@ struct timing_generator_funcs {
void (*program_manual_trigger)(struct timing_generator *optc);
void (*setup_manual_trigger)(struct timing_generator *optc);
+ bool (*get_hw_timing)(struct timing_generator *optc,
+ struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 3a938cd414ea..d39c1e11def5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -114,6 +114,9 @@ struct hw_sequencer_funcs {
int opp_id);
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ void (*program_front_end_for_ctx)(
+ struct dc *dc,
+ struct dc_state *context);
void (*program_triplebuffer)(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -229,6 +232,13 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct dc_state *context);
+ void (*exit_optimized_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+ void (*optimize_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool (*update_bandwidth)(
struct dc *dc,
@@ -321,10 +331,12 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*update_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*enable_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
#endif
@@ -337,6 +349,9 @@ struct hw_sequencer_funcs {
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ bool (*s0i3_golden_init_wa)(struct dc *dc);
+#endif
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 18961707db23..9ad49da50a17 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -31,6 +31,8 @@
#define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9
#define DP_BRANCH_DEVICE_ID_00001A 0x00001A
#define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1
+#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
+#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
new file mode 100644
index 000000000000..f31e6befc8d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -0,0 +1,96 @@
+/*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*
+* Authors: AMD
+*
+*/
+
+#ifndef __DC_HDCP_TYPES_H__
+#define __DC_HDCP_TYPES_H__
+
+enum hdcp_message_id {
+ HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ HDCP_MESSAGE_ID_READ_BKSV = 0,
+ /* HDMI is called Ri', DP is called R0' */
+ HDCP_MESSAGE_ID_READ_RI_R0,
+ HDCP_MESSAGE_ID_READ_PJ,
+ HDCP_MESSAGE_ID_WRITE_AKSV,
+ HDCP_MESSAGE_ID_WRITE_AINFO,
+ HDCP_MESSAGE_ID_WRITE_AN,
+ HDCP_MESSAGE_ID_READ_VH_X,
+ HDCP_MESSAGE_ID_READ_VH_0,
+ HDCP_MESSAGE_ID_READ_VH_1,
+ HDCP_MESSAGE_ID_READ_VH_2,
+ HDCP_MESSAGE_ID_READ_VH_3,
+ HDCP_MESSAGE_ID_READ_VH_4,
+ HDCP_MESSAGE_ID_READ_BCAPS,
+ HDCP_MESSAGE_ID_READ_BSTATUS,
+ HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ HDCP_MESSAGE_ID_READ_BINFO,
+
+ /* HDCP 2.2 */
+
+ HDCP_MESSAGE_ID_HDCP2VERSION,
+ HDCP_MESSAGE_ID_RX_CAPS,
+ HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ HDCP_MESSAGE_ID_READ_RXSTATUS,
+ HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
+ HDCP_MESSAGE_ID_MAX
+};
+
+enum hdcp_version {
+ HDCP_Unknown = 0,
+ HDCP_VERSION_14,
+ HDCP_VERSION_22,
+};
+
+enum hdcp_link {
+ HDCP_LINK_PRIMARY,
+ HDCP_LINK_SECONDARY
+};
+
+struct hdcp_protection_message {
+ enum hdcp_version version;
+ /* relevant only for DVI */
+ enum hdcp_link link;
+ enum hdcp_message_id msg_id;
+ uint32_t length;
+ uint8_t max_retries;
+ uint8_t *data;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 2d8f14b69117..1de4805cb8c7 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space(
return dc_fixpt_mul(args->arg, args->a1);
}
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+
+static struct fixed31_32 translate_from_linear_space_long(
+ struct translate_from_linear_space_args *args)
+{
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+ if (dc_fixpt_lt(one, args->arg))
+ return one;
+
+ if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0)))
+ return dc_fixpt_sub(
+ args->a2,
+ dc_fixpt_mul(
+ dc_fixpt_add(
+ one,
+ args->a3),
+ dc_fixpt_pow(
+ dc_fixpt_neg(args->arg),
+ dc_fixpt_recip(args->gamma))));
+ else if (dc_fixpt_le(args->a0, args->arg))
+ return dc_fixpt_sub(
+ dc_fixpt_mul(
+ dc_fixpt_add(
+ one,
+ args->a3),
+ dc_fixpt_pow(
+ args->arg,
+ dc_fixpt_recip(args->gamma))),
+ args->a2);
+ else
+ return dc_fixpt_mul(
+ args->arg,
+ args->a1);
+}
+
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf)
{
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
@@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
scratch_gamma_args.a3 = dc_fixpt_zero;
scratch_gamma_args.gamma = gamma;
+ if (use_eetf)
+ return translate_from_linear_space_long(&scratch_gamma_args);
+
return translate_from_linear_space(&scratch_gamma_args);
}
+
static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -920,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (fs_params->max_display < 100) // cap at 100 at the top
max_display = dc_fixpt_from_int(100);
- if (fs_params->min_content < fs_params->min_display)
- use_eetf = true;
- else
- min_content = min_display;
-
+ // only max used, we don't adjust min luminance
if (fs_params->max_content > fs_params->max_display)
use_eetf = true;
else
@@ -950,7 +985,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
- output = calculate_gamma22(scaledX);
+ output = calculate_gamma22(scaledX, use_eetf);
rgb->r = output;
rgb->g = output;
@@ -2173,5 +2208,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
rgb_degamma_alloc_fail:
return ret;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ec70c9b12e1a..16e69bbc69aa 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp(
current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
+ /* v_total cannot be less than nominal */
+ if (v_total < stream->timing.v_total)
+ v_total = stream->timing.v_total;
+
in_out_vrr->adjust.v_total_min = v_total;
in_out_vrr->adjust.v_total_max = v_total;
}
@@ -250,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -323,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+ (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+ mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -339,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
+ /* Rounded to the nearest Hz */
+ nominal_field_rate_in_uhz = 1000000ULL *
+ div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
+
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
@@ -788,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
@@ -803,6 +816,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
@@ -975,13 +989,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
unsigned int *inserted_frames,
unsigned int *inserted_duration_in_us)
{
- struct core_freesync *core_freesync = NULL;
-
if (mod_freesync == NULL)
return;
- core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
-
if (vrr->supported) {
*v_total_min = vrr->adjust.v_total_min;
*v_total_max = vrr->adjust.v_total_max;
@@ -996,14 +1006,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream)
{
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned int total = stream->timing.h_total * stream->timing.v_total;
- /* Calculate nominal field rate for stream */
+ /* Calculate nominal field rate for stream, rounded up to nearest integer */
nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.h_total);
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.v_total);
+
+ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
return nominal_field_rate_in_uhz;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
new file mode 100644
index 000000000000..1c3c6d47973a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'hdcp' sub-module of DAL.
+#
+
+HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \
+ hdcp1_execution.o hdcp1_transition.o
+
+AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))
+#$(info ************ DAL-HDCP_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
new file mode 100644
index 000000000000..d7ac445dec6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static void push_error_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_status status)
+{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
+ if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
+ trace->errors[trace->error_count].status = status;
+ trace->errors[trace->error_count].state_id = hdcp->state.id;
+ trace->error_count++;
+ HDCP_ERROR_TRACE(hdcp, status);
+ }
+
+ hdcp->connection.hdcp1_retry_count++;
+}
+
+static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
+{
+ int i, display_enabled = 0;
+
+ /* if all displays on the link are disabled, hdcp is not desired */
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->connection.displays[i].adjust.disable) {
+ display_enabled = 1;
+ break;
+ }
+ }
+
+ return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+ display_enabled && !hdcp->connection.link.adjust.hdcp1.disable;
+}
+
+static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* initialize transition input */
+ memset(input, 0, sizeof(union mod_hdcp_transition_input));
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* update topology event if hdcp is not desired */
+ status = mod_hdcp_add_display_topology(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_execution(hdcp,
+ event_ctx, &input->hdcp1);
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->unexpected_event)
+ goto out;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (is_dp_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ increment_stay_counter(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_hdcp1(hdcp)) {
+ if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN)
+ mod_hdcp_hdcp1_destroy_session(hdcp);
+
+ if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ }
+
+out:
+ /* stop callback and watchdog requests from previous authentication*/
+ output->watchdog_timer_stop = 1;
+ output->callback_stop = 1;
+ return status;
+}
+
+static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (current_state(hdcp) != HDCP_UNINITIALIZED) {
+ HDCP_TOP_RESET_CONN_TRACE(hdcp);
+ set_state_id(hdcp, output, HDCP_UNINITIALIZED);
+ }
+ memset(&hdcp->connection, 0, sizeof(hdcp->connection));
+out:
+ return status;
+}
+
+/*
+ * Implementation of functions in mod_hdcp.h
+ */
+size_t mod_hdcp_get_memory_size(void)
+{
+ return sizeof(struct mod_hdcp);
+}
+
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config)
+{
+ struct mod_hdcp_output output;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ memset(&output, 0, sizeof(output));
+ hdcp->config = *config;
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, &output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_output output;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ memset(&output, 0, sizeof(output));
+ status = reset_connection(hdcp, &output);
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ else
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display_container = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* skip inactive display */
+ if (display->state != MOD_HDCP_DISPLAY_ACTIVE) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* check existing display container */
+ if (get_active_display_at_index(hdcp, display->index)) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* find an empty display container */
+ display_container = get_empty_display_container(hdcp);
+ if (!display_container) {
+ status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND;
+ goto out;
+ }
+
+ /* reset existing authentication status */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+
+ /* reset retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication */
+ if (current_state(hdcp) != HDCP_INITIALIZED)
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* stop current authentication */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* remove display */
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+
+ /* clear retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication for remaining displays*/
+ if (get_active_display_count(hdcp) > 0)
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
+ output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ goto out;
+ }
+
+ /* populate query */
+ query->link = &hdcp->connection.link;
+ query->display = display;
+ query->trace = &hdcp->connection.trace;
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status);
+
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status exec_status, trans_status, reset_status, status;
+ struct mod_hdcp_event_context event_ctx;
+
+ HDCP_EVENT_TRACE(hdcp, event);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+ memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context));
+ event_ctx.event = event;
+
+ /* execute and transition */
+ exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input);
+ trans_status = transition(
+ hdcp, &event_ctx, &hdcp->auth.trans_input, output);
+ if (trans_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE;
+ push_error_status(hdcp, status);
+ } else {
+ status = exec_status;
+ push_error_status(hdcp, status);
+ }
+
+ /* reset authentication if needed */
+ if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
+ HDCP_FULL_DDC_TRACE(hdcp);
+ reset_status = reset_authentication(hdcp, output);
+ if (reset_status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, reset_status);
+ }
+ return status;
+}
+
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal)
+{
+ enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ mode = MOD_HDCP_MODE_DEFAULT;
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ mode = MOD_HDCP_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ mode = MOD_HDCP_MODE_DP_MST;
+ break;
+ default:
+ break;
+ };
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
new file mode 100644
index 000000000000..5664bc0b5bd0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef HDCP_H_
+#define HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp_log.h"
+
+#define BCAPS_READY_MASK 0x20
+#define BCAPS_REPEATER_MASK 0x40
+#define BSTATUS_DEVICE_COUNT_MASK 0X007F
+#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080
+#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800
+#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01
+#define BCAPS_REPEATER_MASK_DP 0x02
+#define BSTATUS_READY_MASK_DP 0x01
+#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02
+#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04
+#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08
+#define BINFO_DEVICE_COUNT_MASK_DP 0X007F
+#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080
+#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800
+
+#define RXSTATUS_MSG_SIZE_MASK 0x03FF
+#define RXSTATUS_READY_MASK 0x0400
+#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800
+#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0
+#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01
+#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02
+#define RXSTATUS_READY_MASK_DP 0x0001
+#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002
+#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004
+#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008
+#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010
+
+enum mod_hdcp_trans_input_result {
+ UNKNOWN = 0,
+ PASS,
+ FAIL
+};
+
+struct mod_hdcp_transition_input_hdcp1 {
+ uint8_t bksv_read;
+ uint8_t bksv_validation;
+ uint8_t add_topology;
+ uint8_t create_session;
+ uint8_t an_write;
+ uint8_t aksv_write;
+ uint8_t ainfo_write;
+ uint8_t bcaps_read;
+ uint8_t r0p_read;
+ uint8_t rx_validation;
+ uint8_t encryption;
+ uint8_t link_maintenance;
+ uint8_t ready_check;
+ uint8_t bstatus_read;
+ uint8_t max_cascade_check;
+ uint8_t max_devs_check;
+ uint8_t device_count_check;
+ uint8_t ksvlist_read;
+ uint8_t vp_read;
+ uint8_t ksvlist_vp_validation;
+
+ uint8_t hdcp_capable_dp;
+ uint8_t binfo_read_dp;
+ uint8_t r0p_available_dp;
+ uint8_t link_integiry_check;
+ uint8_t reauth_request_check;
+ uint8_t stream_encryption_dp;
+};
+
+union mod_hdcp_transition_input {
+ struct mod_hdcp_transition_input_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_message_hdcp1 {
+ uint8_t an[8];
+ uint8_t aksv[5];
+ uint8_t ainfo;
+ uint8_t bksv[5];
+ uint16_t r0p;
+ uint8_t bcaps;
+ uint16_t bstatus;
+ uint8_t ksvlist[635];
+ uint16_t ksvlist_size;
+ uint8_t vp[20];
+
+ uint16_t binfo_dp;
+};
+
+union mod_hdcp_message {
+ struct mod_hdcp_message_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_auth_counters {
+ uint8_t stream_management_retry_count;
+};
+
+/* contains values per connection */
+struct mod_hdcp_connection {
+ struct mod_hdcp_link link;
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
+ uint8_t is_repeater;
+ uint8_t is_km_stored;
+ struct mod_hdcp_trace trace;
+ uint8_t hdcp1_retry_count;
+};
+
+/* contains values per authentication cycle */
+struct mod_hdcp_authentication {
+ uint32_t id;
+ union mod_hdcp_message msg;
+ union mod_hdcp_transition_input trans_input;
+ struct mod_hdcp_auth_counters count;
+};
+
+/* contains values per state change */
+struct mod_hdcp_state {
+ uint8_t id;
+ uint32_t stay_count;
+};
+
+/* per event in a state */
+struct mod_hdcp_event_context {
+ enum mod_hdcp_event event;
+ uint8_t rx_id_list_ready;
+ uint8_t unexpected_event;
+};
+
+struct mod_hdcp {
+ /* per link */
+ struct mod_hdcp_config config;
+ /* per connection */
+ struct mod_hdcp_connection connection;
+ /* per authentication attempt */
+ struct mod_hdcp_authentication auth;
+ /* per state in an authentication */
+ struct mod_hdcp_state state;
+ /* reserved memory buffer */
+ uint8_t buf[2025];
+};
+
+enum mod_hdcp_initial_state_id {
+ HDCP_UNINITIALIZED = 0x0,
+ HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED,
+ HDCP_INITIALIZED,
+ HDCP_CP_NOT_DESIRED,
+ HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED
+};
+
+enum mod_hdcp_hdcp1_state_id {
+ HDCP1_STATE_START = HDCP_INITIAL_STATE_END,
+ H1_A0_WAIT_FOR_ACTIVE_RX,
+ H1_A1_EXCHANGE_KSVS,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER,
+ H1_A45_AUTHENTICATED,
+ H1_A8_WAIT_FOR_READY,
+ H1_A9_READ_KSV_LIST,
+ HDCP1_STATE_END = H1_A9_READ_KSV_LIST
+};
+
+enum mod_hdcp_hdcp1_dp_state_id {
+ HDCP1_DP_STATE_START = HDCP1_STATE_END,
+ D1_A0_DETERMINE_RX_HDCP_CAPABLE,
+ D1_A1_EXCHANGE_KSVS,
+ D1_A23_WAIT_FOR_R0_PRIME,
+ D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER,
+ D1_A4_AUTHENTICATED,
+ D1_A6_WAIT_FOR_READY,
+ D1_A7_READ_KSV_LIST,
+ HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,
+};
+
+/* hdcp1 executions and transitions */
+typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str);
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+
+/* log functions */
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size);
+/* TODO: add adjustment log */
+
+/* psp functions */
+enum mod_hdcp_status mod_hdcp_add_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_remove_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status);
+/* ddc functions */
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
+
+/* hdcp version helpers */
+static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
+ hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT);
+}
+
+/* hdcp state helpers */
+static inline uint8_t current_state(struct mod_hdcp *hdcp)
+{
+ return hdcp->state.id;
+}
+
+static inline void set_state_id(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output, uint8_t id)
+{
+ memset(&hdcp->state, 0, sizeof(hdcp->state));
+ hdcp->state.id = id;
+ /* callback timer should be reset per state */
+ output->callback_stop = 1;
+ output->watchdog_timer_stop = 1;
+ HDCP_NEXT_STATE_TRACE(hdcp, id, output);
+}
+
+static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_STATE_START &&
+ current_state(hdcp) <= HDCP1_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_DP_STATE_START &&
+ current_state(hdcp) <= HDCP1_DP_STATE_END);
+}
+
+static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
+{
+ return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
+}
+
+static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_CP_NOT_DESIRED;
+}
+
+static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_INITIALIZED;
+}
+
+/* transition operation helpers */
+static inline void increment_stay_counter(struct mod_hdcp *hdcp)
+{
+ hdcp->state.stay_count++;
+}
+
+static inline void fail_and_restart_in_ms(uint16_t time,
+ enum mod_hdcp_status *status,
+ struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+ output->watchdog_timer_needed = 0;
+ output->watchdog_timer_delay = 0;
+ *status = MOD_HDCP_STATUS_RESET_NEEDED;
+}
+
+static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+}
+
+static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
+ struct mod_hdcp_output *output)
+{
+ output->watchdog_timer_needed = 1;
+ output->watchdog_timer_delay = time;
+}
+
+/* connection topology helpers */
+static inline uint8_t is_display_active(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
+}
+
+static inline uint8_t is_display_added(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+}
+
+static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+}
+
+static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_active(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline struct mod_hdcp_display *get_first_added_display(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_active_display_at_index(
+ struct mod_hdcp *hdcp, uint8_t index)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (hdcp->connection.displays[i].index == index &&
+ is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_empty_display_container(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (!is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline void reset_retry_counts(struct mod_hdcp *hdcp)
+{
+ hdcp->connection.hdcp1_retry_count = 0;
+}
+
+#endif /* HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
new file mode 100644
index 000000000000..3db4a7da414f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
+{
+ uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv;
+ uint8_t count = 0;
+
+ while (n) {
+ count++;
+ n &= (n - 1);
+ }
+ return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+}
+
+static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
+{
+ if (is_dp_hdcp(hdcp))
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
+}
+
+static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ if (is_dp_hdcp(hdcp)) {
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_R0_P_AVAILABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ }
+ return status;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_DEVS_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+ return is_dp_hdcp(hdcp) ?
+ (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) :
+ (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK);
+}
+
+static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+ /* device count must be greater than or equal to tracked hdcp displays */
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+ &input->add_topology, &status,
+ hdcp, "add_topology"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
+ &input->create_session, &status,
+ hdcp, "create_session"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_an,
+ &input->an_write, &status,
+ hdcp, "an_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv,
+ &input->aksv_write, &status,
+ hdcp, "aksv_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(validate_bksv,
+ &input->bksv_validation, &status,
+ hdcp, "bksv_validation"))
+ goto out;
+ if (hdcp->auth.msg.hdcp1.ainfo) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo,
+ &input->ainfo_write, &status,
+ hdcp, "ainfo_write"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status computations_validate_rx_test_for_repeater(
+ struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p,
+ &input->r0p_read, &status,
+ hdcp, "r0p_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx,
+ &input->rx_validation, &status,
+ hdcp, "rx_validation"))
+ goto out;
+ if (hdcp->connection.is_repeater) {
+ if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption)
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+ &input->link_maintenance, &status,
+ hdcp, "link_maintenance"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_ksv_ready,
+ &input->ready_check, &status,
+ hdcp, "ready_check"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ uint8_t device_count;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo,
+ &input->binfo_read_dp, &status,
+ hdcp, "binfo_read_dp"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_no_max_cascade,
+ &input->max_cascade_check, &status,
+ hdcp, "max_cascade_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_max_devs,
+ &input->max_devs_check, &status,
+ hdcp, "max_devs_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_device_count,
+ &input->device_count_check, &status,
+ hdcp, "device_count_check"))
+ goto out;
+ device_count = get_device_count(hdcp);
+ hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist,
+ &input->ksvlist_read, &status,
+ hdcp, "ksvlist_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp,
+ &input->vp_read, &status,
+ hdcp, "vp_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp,
+ &input->ksvlist_vp_validation, &status,
+ hdcp, "ksvlist_vp_validation"))
+ goto out;
+ if (input->encryption != PASS)
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp,
+ &input->hdcp_capable_dp, &status,
+ hdcp, "hdcp_capable_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_r0p_available_dp,
+ &input->r0p_available_dp, &status,
+ hdcp, "r0p_available_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+out:
+ return status;
+}
+
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str)
+{
+ *status = func(hdcp);
+ if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) {
+ HDCP_INPUT_PASS_TRACE(hdcp, str);
+ *flag = PASS;
+ } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) {
+ HDCP_INPUT_FAIL_TRACE(hdcp, str);
+ *flag = FAIL;
+ }
+ return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ status = wait_for_active_rx(hdcp, event_ctx, input);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(hdcp,
+ event_ctx, input);
+ break;
+ case H1_A45_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
+
+extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ status = wait_for_r0_prime_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(
+ hdcp, event_ctx, input);
+ break;
+ case D1_A4_AUTHENTICATED:
+ status = authenticated_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
new file mode 100644
index 000000000000..136b8011ff3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ if (input->bksv_read != PASS || input->bcaps_read != PASS) {
+ /* 1A-04: repeatedly attempts on port access failure */
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(300, output);
+ set_state_id(hdcp, output,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ if (input->bcaps_read != PASS ||
+ input->r0p_read != PASS ||
+ input->rx_validation != PASS ||
+ (!conn->is_repeater && input->encryption != PASS)) {
+ /* 1A-06: consider invalid r0' a failure */
+ /* 1A-08: consider bksv listed in SRM a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case H1_A45_AUTHENTICATED:
+ if (input->link_maintenance != PASS) {
+ /* 1A-07: consider invalid ri' a failure */
+ /* 1A-07a: consider read ri' not returned a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-03: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ /* continue ksv list READY polling*/
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A9_READ_KSV_LIST);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ if (input->bstatus_read != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS ||
+ input->device_count_check != PASS ||
+ input->ksvlist_read != PASS ||
+ input->vp_read != PASS ||
+ input->ksvlist_vp_validation != PASS ||
+ input->encryption != PASS) {
+ /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */
+ /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-04: consider invalid v' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ if (input->bcaps_read != PASS) {
+ /* 1A-04: no authentication on bcaps read failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->hdcp_capable_dp != PASS) {
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ if (input->bstatus_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->r0p_available_dp != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ if (input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1A-06: consider invalid r0' a failure
+ * after 3 attempts.
+ * 1A-08: consider bksv listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if ((!conn->is_repeater && input->encryption != PASS) ||
+ (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
+ } else {
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case D1_A4_AUTHENTICATED:
+ if (input->link_integiry_check != PASS ||
+ input->reauth_request_check != PASS) {
+ /* 1A-07: restart hdcp on a link integrity failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ if (input->link_integiry_check == FAIL ||
+ input->reauth_request_check == FAIL) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-04: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A7_READ_KSV_LIST);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ if (input->binfo_read_dp != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS) {
+ /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->device_count_check != PASS) {
+ /*
+ * some slow dongle doesn't update
+ * device count as soon as downstream is connected.
+ * give it more time to react.
+ */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (input->ksvlist_read != PASS ||
+ input->vp_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ksvlist_vp_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1B-05: consider invalid v' a failure
+ * after 3 attempts.
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if (input->encryption != PASS ||
+ (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
new file mode 100644
index 000000000000..e7baae059b85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
+#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
+#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
+
+enum mod_hdcp_ddc_message_id {
+ MOD_HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ MOD_HDCP_MESSAGE_ID_READ_BKSV = 0,
+ MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ MOD_HDCP_MESSAGE_ID_READ_VH_X,
+ MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ MOD_HDCP_MESSAGE_ID_READ_BINFO,
+
+ MOD_HDCP_MESSAGE_ID_MAX
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+};
+
+static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ success = hdcp->config.ddc.funcs.read_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[msg_id],
+ buf,
+ (uint32_t)buf_len);
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len,
+ uint8_t read_size)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, read_size);
+ status = read(hdcp, msg_id, buf + data_offset, cur_size);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+
+ return status;
+}
+
+static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.write_dpcd(
+ hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ memmove(&hdcp->buf[1], buf, buf_len);
+ success = hdcp->config.ddc.funcs.write_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp->buf,
+ (uint32_t)(buf_len+1));
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV,
+ hdcp->auth.msg.hdcp1.bksv,
+ sizeof(hdcp->auth.msg.hdcp1.bksv));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ &hdcp->auth.msg.hdcp1.bcaps,
+ sizeof(hdcp->auth.msg.hdcp1.bcaps));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ 1);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ sizeof(hdcp->auth.msg.hdcp1.bstatus));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
+ sizeof(hdcp->auth.msg.hdcp1.r0p));
+}
+
+/* special case, reading repeatedly at the same address, don't use read() */
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size,
+ KSV_READ_SIZE);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ &hdcp->auth.msg.hdcp1.vp[0], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ &hdcp->auth.msg.hdcp1.vp[4], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ &hdcp->auth.msg.hdcp1.vp[8], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ &hdcp->auth.msg.hdcp1.vp[12], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ &hdcp->auth.msg.hdcp1.vp[16], 4);
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
+ else
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ hdcp->auth.msg.hdcp1.aksv,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+}
+
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ &hdcp->auth.msg.hdcp1.ainfo,
+ sizeof(hdcp->auth.msg.hdcp1.ainfo));
+}
+
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ hdcp->auth.msg.hdcp1.an,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
new file mode 100644
index 000000000000..3982ced5f969
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "hdcp.h"
+
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size)
+{
+ const uint8_t bytes_per_line = 16,
+ byte_size = 3,
+ newline_size = 1,
+ terminator_size = 1;
+ uint32_t line_count = msg_size / bytes_per_line,
+ trailing_bytes = msg_size % bytes_per_line;
+ uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count +
+ byte_size * trailing_bytes + newline_size + terminator_size;
+ uint32_t buf_pos = 0;
+ uint32_t i = 0;
+
+ if (buf_size >= target_size) {
+ for (i = 0; i < msg_size; i++) {
+ if (i % bytes_per_line == 0)
+ buf[buf_pos++] = '\n';
+ sprintf(&buf[buf_pos], "%02X ", msg[i]);
+ buf_pos += byte_size;
+ }
+ buf[buf_pos++] = '\0';
+ }
+}
+
+char *mod_hdcp_status_to_str(int32_t status)
+{
+ switch (status) {
+ case MOD_HDCP_STATUS_SUCCESS:
+ return "MOD_HDCP_STATUS_SUCCESS";
+ case MOD_HDCP_STATUS_FAILURE:
+ return "MOD_HDCP_STATUS_FAILURE";
+ case MOD_HDCP_STATUS_RESET_NEEDED:
+ return "MOD_HDCP_STATUS_RESET_NEEDED";
+ case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
+ case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
+ case MOD_HDCP_STATUS_INVALID_STATE:
+ return "MOD_HDCP_STATUS_INVALID_STATE";
+ case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
+ return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
+ case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
+ return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
+ case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
+ return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
+ case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
+ case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
+ case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
+ return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
+ case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
+ return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
+ case MOD_HDCP_STATUS_DDC_FAILURE:
+ return "MOD_HDCP_STATUS_DDC_FAILURE";
+ case MOD_HDCP_STATUS_INVALID_OPERATION:
+ return "MOD_HDCP_STATUS_INVALID_OPERATION";
+ default:
+ return "MOD_HDCP_STATUS_UNKNOWN";
+ }
+}
+
+char *mod_hdcp_state_id_to_str(int32_t id)
+{
+ switch (id) {
+ case HDCP_UNINITIALIZED:
+ return "HDCP_UNINITIALIZED";
+ case HDCP_INITIALIZED:
+ return "HDCP_INITIALIZED";
+ case HDCP_CP_NOT_DESIRED:
+ return "HDCP_CP_NOT_DESIRED";
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ return "H1_A0_WAIT_FOR_ACTIVE_RX";
+ case H1_A1_EXCHANGE_KSVS:
+ return "H1_A1_EXCHANGE_KSVS";
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER";
+ case H1_A45_AUTHENTICATED:
+ return "H1_A45_AUTHENTICATED";
+ case H1_A8_WAIT_FOR_READY:
+ return "H1_A8_WAIT_FOR_READY";
+ case H1_A9_READ_KSV_LIST:
+ return "H1_A9_READ_KSV_LIST";
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ return "D1_A0_DETERMINE_RX_HDCP_CAPABLE";
+ case D1_A1_EXCHANGE_KSVS:
+ return "D1_A1_EXCHANGE_KSVS";
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ return "D1_A23_WAIT_FOR_R0_PRIME";
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER";
+ case D1_A4_AUTHENTICATED:
+ return "D1_A4_AUTHENTICATED";
+ case D1_A6_WAIT_FOR_READY:
+ return "D1_A6_WAIT_FOR_READY";
+ case D1_A7_READ_KSV_LIST:
+ return "D1_A7_READ_KSV_LIST";
+ default:
+ return "UNKNOWN_STATE_ID";
+ };
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
new file mode 100644
index 000000000000..2fd0e0a893ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_LOG_H_
+#define MOD_HDCP_LOG_H_
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__)
+#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
+#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#endif
+
+/* default logs */
+#define HDCP_ERROR_TRACE(hdcp, status) \
+ HDCP_LOG_ERR(hdcp, \
+ "[Link %d] ERROR %s IN STATE %s", \
+ hdcp->config.index, \
+ mod_hdcp_status_to_str(status), \
+ mod_hdcp_state_id_to_str(hdcp->state.id))
+#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 enabled on display %d", \
+ hdcp->config.index, displayIndex)
+/* state machine logs */
+#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] HDCP_REMOVE_DISPLAY index %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_INPUT_PASS_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tPASS %s", \
+ hdcp->config.index, str)
+#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tFAIL %s", \
+ hdcp->config.index, str)
+#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \
+ if (output->watchdog_timer_needed) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s with %d ms watchdog", \
+ hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \
+ else \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s", hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id)); \
+} while (0)
+#define HDCP_TIMEOUT_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index)
+#define HDCP_CPIRQ_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
+#define HDCP_EVENT_TRACE(hdcp, event) \
+ if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+ HDCP_TIMEOUT_TRACE(hdcp); \
+ else if (event == MOD_HDCP_EVENT_CPIRQ) \
+ HDCP_CPIRQ_TRACE(hdcp)
+/* TODO: find some way to tell if logging is off to save time */
+#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \
+ msg_name, hdcp->buf); \
+} while (0)
+#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \
+ hdcp->config.index, msg_name,\
+ hdcp->buf); \
+} while (0)
+#define HDCP_FULL_DDC_TRACE(hdcp) do { \
+ HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
+ sizeof(hdcp->auth.msg.hdcp1.bksv)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
+ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
+ sizeof(hdcp->auth.msg.hdcp1.an)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
+ sizeof(hdcp->auth.msg.hdcp1.aksv)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
+ sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
+ HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
+ sizeof(hdcp->auth.msg.hdcp1.r0p)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
+ HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
+ hdcp->auth.msg.hdcp1.ksvlist_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
+ sizeof(hdcp->auth.msg.hdcp1.vp)); \
+} while (0)
+#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \
+ hdcp->config.index)
+#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)
+#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index)
+#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \
+} while (0)
+#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
+} while (0)
+
+#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
new file mode 100644
index 000000000000..646d909bbc37
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#define MAX_NUM_DISPLAYS 24
+
+
+#include "hdcp.h"
+
+#include "amdgpu.h"
+#include "hdcp_psp.h"
+
+enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ uint8_t i;
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) {
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ display = &hdcp->connection.displays[i];
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ struct mod_hdcp_link *link = &hdcp->connection.link;
+ uint8_t i;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
+ display = &hdcp->connection.displays[i];
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv,
+ TA_HDCP__HDCP1_KSV_SIZE);
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p;
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+ if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
+ /* needs second part of authentication */
+ hdcp->connection.is_repeater = 1;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ hdcp->connection.is_repeater = 0;
+ } else
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
+
+ if (!is_dp_mst_hdcp(hdcp)) {
+ display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
+ }
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size;
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp,
+ sizeof(hdcp->auth.msg.hdcp1.vp));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo =
+ is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ int i = 0;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->connection.displays[i].adjust.disable)
+ continue;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+
+ return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status)
+{
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS)
+ return MOD_HDCP_STATUS_FAILURE;
+
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
new file mode 100644
index 000000000000..986fc07ea9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_HDCP_HDCP_PSP_H_
+#define MODULES_HDCP_HDCP_PSP_H_
+
+/*
+ * NOTE: These parameters are a one-to-one copy of the
+ * parameters required by PSP
+ */
+enum bgd_security_hdcp_encryption_level {
+ HDCP_ENCRYPTION_LEVEL__INVALID = 0,
+ HDCP_ENCRYPTION_LEVEL__OFF,
+ HDCP_ENCRYPTION_LEVEL__ON
+};
+
+enum ta_dtm_command {
+ TA_DTM_COMMAND__UNUSED_1 = 1,
+ TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
+ TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE
+};
+
+/* DTM related enumerations */
+/**********************************************************/
+
+enum ta_dtm_status {
+ TA_DTM_STATUS__SUCCESS = 0x00,
+ TA_DTM_STATUS__GENERIC_FAILURE = 0x01,
+ TA_DTM_STATUS__INVALID_PARAMETER = 0x02,
+ TA_DTM_STATUS__NULL_POINTER = 0x3
+};
+
+/* input/output structures for DTM commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+enum ta_dtm_hdcp_version_max_supported {
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23
+};
+
+struct ta_dtm_topology_update_input_v2 {
+ /* display handle is unique across the driver and is used to identify a display */
+ /* for all security interfaces which reference displays such as HDCP */
+ uint32_t display_handle;
+ uint32_t is_active;
+ uint32_t is_miracast;
+ uint32_t controller;
+ uint32_t ddc_line;
+ uint32_t dig_be;
+ uint32_t dig_fe;
+ uint32_t dp_mst_vcid;
+ uint32_t is_assr;
+ uint32_t max_hdcp_supported_version;
+};
+
+struct ta_dtm_topology_assr_enable {
+ uint32_t display_topology_dig_be_index;
+};
+
+/**
+ * Output structures
+ */
+
+/* No output structures yet */
+
+union ta_dtm_cmd_input {
+ struct ta_dtm_topology_update_input_v2 topology_update_v2;
+ struct ta_dtm_topology_assr_enable topology_assr_enable;
+};
+
+union ta_dtm_cmd_output {
+ uint32_t reserved;
+};
+
+struct ta_dtm_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_dtm_status dtm_status;
+ uint32_t reserved;
+ union ta_dtm_cmd_input dtm_in_message;
+ union ta_dtm_cmd_output dtm_out_message;
+};
+
+int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
+ uint64_t fence_mc_addr);
+
+enum ta_hdcp_command {
+ TA_HDCP_COMMAND__INITIALIZE,
+ TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
+ TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION,
+ TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS,
+};
+
+
+/* HDCP related enumerations */
+/**********************************************************/
+#define TA_HDCP__INVALID_SESSION 0xFFFF
+#define TA_HDCP__HDCP1_AN_SIZE 8
+#define TA_HDCP__HDCP1_KSV_SIZE 5
+#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
+#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
+
+enum ta_hdcp_status {
+ TA_HDCP_STATUS__SUCCESS = 0x00,
+ TA_HDCP_STATUS__GENERIC_FAILURE = 0x01,
+ TA_HDCP_STATUS__NULL_POINTER = 0x02,
+ TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03,
+ TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04,
+ TA_HDCP_STATUS__INVALID_PARAMETER = 0x05,
+ TA_HDCP_STATUS__VHX_ERROR = 0x06,
+ TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07,
+ TA_HDCP_STATUS__SRM_FAILURE = 0x08,
+ TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09,
+ TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A,
+ TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B,
+ TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C,
+ TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D,
+ TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E,
+ TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F,
+ TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10,
+ TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11,
+ TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12,
+ TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13,
+ TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14,
+ TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15,
+ TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16,
+ TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17
+};
+
+enum ta_hdcp_authentication_status {
+ TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+};
+
+
+/* input/output structures for HDCP commands */
+/**********************************************************/
+struct ta_hdcp_cmd_hdcp1_create_session_input {
+ uint8_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_create_session_output {
+ uint32_t session_handle;
+ uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_primary;
+ uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_destroy_session_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_input {
+ uint32_t session_handle;
+ uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bcaps;
+ uint16_t r0_prime_primary;
+ uint16_t r0_prime_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_input {
+ uint32_t session_handle;
+ uint16_t bstatus_binfo;
+ uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE];
+ uint32_t ksv_list_size;
+ uint8_t pj_prime;
+ uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_encryption_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input {
+ uint32_t session_handle;
+ uint32_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {
+ uint32_t protection_level;
+};
+
+/**********************************************************/
+/* Common input structure for HDCP callbacks */
+union ta_hdcp_cmd_input {
+ struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;
+ struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status;
+};
+
+/* Common output structure for HDCP callbacks */
+union ta_hdcp_cmd_output {
+ struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status;
+};
+/**********************************************************/
+
+struct ta_hdcp_shared_memory {
+ uint32_t cmd_id;
+ enum ta_hdcp_status hdcp_status;
+ uint32_t reserved;
+ union ta_hdcp_cmd_input in_msg;
+ union ta_hdcp_cmd_output out_msg;
+};
+
+enum psp_status {
+ PSP_STATUS__SUCCESS = 0,
+ PSP_STATUS__ERROR_INVALID_PARAMS,
+ PSP_STATUS__ERROR_GENERIC,
+ PSP_STATUS__ERROR_OUT_OF_MEMORY,
+ PSP_STATUS__ERROR_UNSUPPORTED_FEATURE
+};
+
+#endif /* MODULES_HDCP_HDCP_PSP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc187844d10b..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
new file mode 100644
index 000000000000..dea21702edff
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_H_
+#define MOD_HDCP_H_
+
+#include "os_types.h"
+#include "signal_types.h"
+
+/* Forward Declarations */
+struct mod_hdcp;
+
+#define MAX_NUM_OF_DISPLAYS 6
+#define MAX_NUM_OF_ATTEMPTS 4
+#define MAX_NUM_OF_ERROR_TRACE 10
+
+/* detailed return status */
+enum mod_hdcp_status {
+ MOD_HDCP_STATUS_SUCCESS = 0,
+ MOD_HDCP_STATUS_FAILURE,
+ MOD_HDCP_STATUS_RESET_NEEDED,
+ MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
+ MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
+ MOD_HDCP_STATUS_INVALID_STATE,
+ MOD_HDCP_STATUS_NOT_IMPLEMENTED,
+ MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
+ MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
+ MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
+ MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
+ MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
+ MOD_HDCP_STATUS_INVALID_OPERATION,
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
+};
+
+struct mod_hdcp_displayport {
+ uint8_t rev;
+ uint8_t assr_supported;
+};
+
+struct mod_hdcp_hdmi {
+ uint8_t reserved;
+};
+enum mod_hdcp_operation_mode {
+ MOD_HDCP_MODE_OFF,
+ MOD_HDCP_MODE_DEFAULT,
+ MOD_HDCP_MODE_DP,
+ MOD_HDCP_MODE_DP_MST
+};
+
+enum mod_hdcp_display_state {
+ MOD_HDCP_DISPLAY_INACTIVE = 0,
+ MOD_HDCP_DISPLAY_ACTIVE,
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
+ MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
+};
+
+struct mod_hdcp_ddc {
+ void *handle;
+ struct {
+ bool (*read_i2c)(void *handle,
+ uint32_t address,
+ uint8_t offset,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_i2c)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ bool (*read_dpcd)(void *handle,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_dpcd)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ } funcs;
+};
+
+struct mod_hdcp_psp {
+ void *handle;
+ void *funcs;
+};
+
+struct mod_hdcp_display_adjustment {
+ uint8_t disable : 1;
+ uint8_t reserved : 7;
+};
+
+struct mod_hdcp_link_adjustment_hdcp1 {
+ uint8_t disable : 1;
+ uint8_t postpone_encryption : 1;
+ uint8_t reserved : 6;
+};
+
+struct mod_hdcp_link_adjustment_hdcp2 {
+ uint8_t disable : 1;
+ uint8_t disable_type1 : 1;
+ uint8_t force_no_stored_km : 1;
+ uint8_t increase_h_prime_timeout: 1;
+ uint8_t reserved : 4;
+};
+
+struct mod_hdcp_link_adjustment {
+ uint8_t auth_delay;
+ struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
+ struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
+};
+
+struct mod_hdcp_error {
+ enum mod_hdcp_status status;
+ uint8_t state_id;
+};
+
+struct mod_hdcp_trace {
+ struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
+ uint8_t error_count;
+};
+
+enum mod_hdcp_encryption_status {
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON
+};
+
+/* per link events dm has to notify to hdcp module */
+enum mod_hdcp_event {
+ MOD_HDCP_EVENT_CALLBACK = 0,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ MOD_HDCP_EVENT_CPIRQ
+};
+
+/* output flags from module requesting timer operations */
+struct mod_hdcp_output {
+ uint8_t callback_needed;
+ uint8_t callback_stop;
+ uint8_t watchdog_timer_needed;
+ uint8_t watchdog_timer_stop;
+ uint16_t callback_delay;
+ uint16_t watchdog_timer_delay;
+};
+
+/* used to represent per display info */
+struct mod_hdcp_display {
+ enum mod_hdcp_display_state state;
+ uint8_t index;
+ uint8_t controller;
+ uint8_t dig_fe;
+ union {
+ uint8_t vc_id;
+ };
+ struct mod_hdcp_display_adjustment adjust;
+};
+
+/* used to represent per link info */
+/* in case a link has multiple displays, they share the same link info */
+struct mod_hdcp_link {
+ enum mod_hdcp_operation_mode mode;
+ uint8_t dig_be;
+ uint8_t ddc_line;
+ union {
+ struct mod_hdcp_displayport dp;
+ struct mod_hdcp_hdmi hdmi;
+ };
+ struct mod_hdcp_link_adjustment adjust;
+};
+
+/* a query structure for a display's hdcp information */
+struct mod_hdcp_display_query {
+ const struct mod_hdcp_display *display;
+ const struct mod_hdcp_link *link;
+ const struct mod_hdcp_trace *trace;
+ enum mod_hdcp_encryption_status encryption_status;
+};
+
+/* contains values per on external display configuration change */
+struct mod_hdcp_config {
+ struct mod_hdcp_psp psp;
+ struct mod_hdcp_ddc ddc;
+ uint8_t index;
+};
+
+struct mod_hdcp;
+
+/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
+size_t mod_hdcp_get_memory_size(void);
+
+/* called per link on link creation */
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config);
+
+/* called per link on link destroy */
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp);
+
+/* called per display on cp_desired set to true */
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output);
+
+/* called per display on cp_desired set to false */
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output);
+
+/* called to query hdcp information on a specific index */
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query);
+
+/* called per link on connectivity change */
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output);
+
+/* called per link on events (i.e. callback, watchdog, CP_IRQ) */
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output);
+
+/* called to convert enum mod_hdcp_status to c string */
+char *mod_hdcp_status_to_str(int32_t status);
+
+/* called to convert state id to c string */
+char *mod_hdcp_state_id_to_str(int32_t id);
+
+/* called to convert signal type to operation mode */
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal);
+#endif /* MOD_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index d930bdecb117..ca8ce3c55337 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -35,4 +35,7 @@ struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index d885d642ed7f..db6b08f6d093 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -31,6 +31,7 @@
#include "dc.h"
#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HF_VSIF_VERSION 1
// VTEM Byte Offset
#define VTEM_PB0 0
@@ -395,3 +396,100 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
+/**
+ *****************************************************************************
+ * Function: mod_build_hf_vsif_infopacket
+ *
+ * @brief
+ * Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
+ *
+ * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @param [out] info_packet: output structure where to store VSIF
+ *****************************************************************************
+ */
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue)
+{
+ unsigned int length = 5;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ bool bALLM = (bool)ALLMEnabled;
+ bool bALLMVal = (bool)ALLMValue;
+
+ info_packet->valid = false;
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160
+ && format == TIMING_3D_FORMAT_NONE)
+ hdmi_vic_mode = true;
+
+ if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM)
+ return;
+
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+
+ if (bALLM) {
+ info_packet->sb[1] = 0xD8;
+ info_packet->sb[2] = 0x5D;
+ info_packet->sb[3] = 0xC4;
+ info_packet->sb[4] = HF_VSIF_VERSION;
+ }
+
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = (uint8_t) (length);
+
+ if (bALLM)
+ info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1);
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 05e2be856037..4e2f615c3566 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -80,18 +80,18 @@ struct abm_parameters {
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0},
- {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0},
- {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70},
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
+ {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters * const abm_settings[] = {
@@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = {
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
- uint16_t flags; /* 0x00 U16 */
+ uint16_t min_abm_backlight; /* 0x00 U16 */
/* parameters for ABM2.0 algorithm */
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
@@ -140,10 +140,10 @@ struct iram_table_v_2 {
/* For reading PSR State directly from IRAM */
uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
uint8_t dmcu_state; /* 0xf6 */
uint16_t blRampReduction; /* 0xf7 */
@@ -164,42 +164,43 @@ struct iram_table_v_2_2 {
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
- uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
- uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
- uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
- uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
- uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
- uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
- uint8_t pad[21]; /* 0x6b U0.8 */
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
+ uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
+ uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
+ uint16_t min_abm_backlight; /* 0x6b U16 */
+ uint8_t pad[19]; /* 0x6d U0.8 */
/* parameters for crgb conversion */
- uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
/* parameters for custom curve */
/* thresholds for brightness --> backlight */
- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
/* offsets for brightness --> backlight */
- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
/* For reading PSR State directly from IRAM */
- uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
- uint8_t dmcu_state; /* 0xf6 */
-
- uint8_t dummy1; /* 0xf7 */
- uint8_t dummy2; /* 0xf8 */
- uint8_t dummy3; /* 0xf9 */
- uint8_t dummy4; /* 0xfa */
- uint8_t dummy5; /* 0xfb */
- uint8_t dummy6; /* 0xfc */
- uint8_t dummy7; /* 0xfd */
- uint8_t dummy8; /* 0xfe */
- uint8_t dummy9; /* 0xff */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint8_t dummy1; /* 0xf7 */
+ uint8_t dummy2; /* 0xf8 */
+ uint8_t dummy3; /* 0xf9 */
+ uint8_t dummy4; /* 0xfa */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
};
#pragma pack(pop)
@@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
{
unsigned int set = params.set;
- ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
ram_table->deviation_gain = 0xb3;
ram_table->blRampReduction =
@@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
ram_table->deviation_gain[0] = 0xb3;
ram_table->deviation_gain[1] = 0xa8;
ram_table->deviation_gain[2] = 0x98;
@@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
unsigned int set = params.set;
ram_table->flags = 0x0;
+
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index da5df00fedce..e54157026330 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -38,6 +38,7 @@ struct dmcu_iram_parameters {
unsigned int backlight_lut_array_size;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
+ unsigned int min_abm_backlight;
unsigned int set;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 8889aaceec60..dc7eb28f0296 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -143,6 +143,8 @@ enum PP_FEATURE_MASK {
enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
+ DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
+ DC_PSR_MASK = 0x8,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
index a761ba07f937..fce965984e76 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmBUS_CNTL 0x1508
#define mmCONFIG_CNTL 0x1509
#define mmCONFIG_MEMSIZE 0x150a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
index 8fbfd0261d27..39cc4880beb4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0
#define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 809759f7bb81..8d05d6ca1c8d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmCC_BIF_BX_STRAP2 0x152A
#define mmBIF_MM_INDACCESS_CNTL 0x1500
#define mmBIF_DOORBELL_APER_EN 0x1501
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
index adc71b01f793..73435687d049 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
#define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
index be4249adb356..eddf83ec1c39 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -9859,6 +9859,8 @@
#define mmDP0_DP_STEER_FIFO_BASE_IDX 2
#define mmDP0_DP_MSA_MISC 0x210e
#define mmDP0_DP_MSA_MISC_BASE_IDX 2
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP0_DP_VID_TIMING 0x2110
#define mmDP0_DP_VID_TIMING_BASE_IDX 2
#define mmDP0_DP_VID_N 0x2111
@@ -10187,6 +10189,8 @@
#define mmDP1_DP_STEER_FIFO_BASE_IDX 2
#define mmDP1_DP_MSA_MISC 0x220e
#define mmDP1_DP_MSA_MISC_BASE_IDX 2
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP1_DP_VID_TIMING 0x2210
#define mmDP1_DP_VID_TIMING_BASE_IDX 2
#define mmDP1_DP_VID_N 0x2211
@@ -10515,6 +10519,8 @@
#define mmDP2_DP_STEER_FIFO_BASE_IDX 2
#define mmDP2_DP_MSA_MISC 0x230e
#define mmDP2_DP_MSA_MISC_BASE_IDX 2
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP2_DP_VID_TIMING 0x2310
#define mmDP2_DP_VID_TIMING_BASE_IDX 2
#define mmDP2_DP_VID_N 0x2311
@@ -10843,6 +10849,8 @@
#define mmDP3_DP_STEER_FIFO_BASE_IDX 2
#define mmDP3_DP_MSA_MISC 0x240e
#define mmDP3_DP_MSA_MISC_BASE_IDX 2
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP3_DP_VID_TIMING 0x2410
#define mmDP3_DP_VID_TIMING_BASE_IDX 2
#define mmDP3_DP_VID_N 0x2411
@@ -11171,6 +11179,8 @@
#define mmDP4_DP_STEER_FIFO_BASE_IDX 2
#define mmDP4_DP_MSA_MISC 0x250e
#define mmDP4_DP_MSA_MISC_BASE_IDX 2
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP4_DP_VID_TIMING 0x2510
#define mmDP4_DP_VID_TIMING_BASE_IDX 2
#define mmDP4_DP_VID_N 0x2511
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index ca16d9125fbc..2bfaaa8157d0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -1146,7 +1146,14 @@
#define mmATC_L2_MEM_POWER_LS_BASE_IDX 0
#define mmATC_L2_CGTT_CLK_CTRL 0x080c
#define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e
+#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f
+#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810
+#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811
+#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2pfdec
// base address: 0xa100
@@ -1206,7 +1213,14 @@
#define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
#define mmVM_L2_CGTT_CLK_CTRL 0x085e
#define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmVM_L2_MEM_ECC_INDEX 0x0860
+#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861
+#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_MEM_ECC_CNT 0x0862
+#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863
+#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2vcdec
// base address: 0xa200
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 064c4bb1dc62..d4c613a85352 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -6661,7 +6661,6 @@
#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
// addressBlock: gc_utcl2_vml2pfdec
//VM_L2_CNTL
#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
@@ -6991,7 +6990,22 @@
#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
+//VM_L2_MEM_ECC_INDEX
+#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_WALKER_MEM_ECC_INDEX
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_MEM_ECC_CNT
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
+//VM_L2_WALKER_MEM_ECC_CNT
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
// addressBlock: gc_utcl2_vml2vcdec
//VM_CONTEXT0_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
index 4bcacf529852..991128bb9476 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
@@ -22,6 +22,9 @@
#ifndef _nbio_7_4_0_SMN_HEADER
#define _nbio_7_4_0_SMN_HEADER
+// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk
+// base address: 0x10100000
+#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define smnCPM_CONTROL 0x11180460
@@ -53,4 +56,13 @@
#define smnPCIE_RX_NUM_NAK 0x11180038
#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+// addressBlock: nbio_iohub_nb_misc_misc_cfgdec
+// base address: 0x13a10000
+#define smnIOHC_INTERRUPT_EOI 0x13a10120
+
+// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec
+// base address: 0x13a20000
+#define smnRAS_GLOBAL_STATUS_LO 0x13a20020
+#define smnRAS_GLOBAL_STATUS_HI 0x13a20024
+
#endif // _nbio_7_4_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index 994e796a28d7..ce5830ebe095 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2793,8 +2793,8 @@
#define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2
#define mmBIF_FB_EN 0x00ff
#define mmBIF_FB_EN_BASE_IDX 2
-#define mmBIF_BUSY_DELAY_CNTR 0x0100
-#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2
+#define mmBIF_INTR_CNTL 0x0100
+#define mmBIF_INTR_CNTL_BASE_IDX 2
#define mmBIF_MST_TRANS_PENDING_VF 0x0109
#define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2
#define mmBIF_SLV_TRANS_PENDING_VF 0x010a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
index d467b939c971..07f04b2b5bdd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
@@ -20420,9 +20420,9 @@
#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
-//BIF_BUSY_DELAY_CNTR
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL
+//BIF_INTR_CNTL
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
//BIF_MST_TRANS_PENDING_VF
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
@@ -48436,4 +48436,47 @@
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
+//IOHC_INTERRUPT_EOI
+#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0
+#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1
+#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2
+#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L
+#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L
+#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L
+
+//RAS_GLOBAL_STATUS_LO
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8
+#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9
+#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa
+#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb
+#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L
+#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L
+#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L
+#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L
+#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L
+//RAS_GLOBAL_STATUS_HI
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
index dc9895a684fe..096d878eb1de 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
@@ -588,11 +588,15 @@
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
index dbc2e723f659..71169daa701a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 6af9f0217b34..61a9a84e0c3a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index bd3685166779..351446754c72 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
index 627906674fe8..4bfd5f8ba66c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index f35aba72e640..21da61c398f5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -52,6 +52,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 481ee6560aa9..f64fe0fbcb32 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -220,6 +220,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
index d3876052562b..687d6843c258 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
@@ -121,6 +121,98 @@
#define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
#define mmCKSVII2C_IC_COMP_TYPE 0x006d
#define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CON 0x0080
+#define mmCKSVII2C1_IC_CON_BASE_IDX 0
+#define mmCKSVII2C1_IC_TAR 0x0081
+#define mmCKSVII2C1_IC_TAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SAR 0x0082
+#define mmCKSVII2C1_IC_SAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_MADDR 0x0083
+#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DATA_CMD 0x0084
+#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085
+#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086
+#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087
+#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088
+#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089
+#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a
+#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_STAT 0x008b
+#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_MASK 0x008c
+#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
+#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d
+#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_RX_TL 0x008e
+#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_TL 0x008f
+#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_INTR 0x0090
+#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091
+#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092
+#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093
+#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094
+#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095
+#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096
+#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097
+#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098
+#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_START_DET 0x0099
+#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a
+#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE 0x009b
+#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0
+#define mmCKSVII2C1_IC_STATUS 0x009c
+#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_TXFLR 0x009d
+#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_RXFLR 0x009e
+#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_HOLD 0x009f
+#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_CR 0x00a2
+#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3
+#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4
+#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5
+#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7
+#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8
+#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9
+#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
+#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab
+#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac
+#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad
+#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
#define mmSMUIO_MP_RESET_INTR 0x00c1
#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
#define mmSMUIO_SOC_HALT 0x00c2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
index f8afa3518bf2..6905a9618127 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
@@ -268,6 +268,182 @@
//CKSVII2C_IC_COMP_TYPE
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
+//CKSVII2C1_IC_CON
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C1_IC_TAR
+#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
+#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
+#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
+#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
+#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C1_IC_SAR
+#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
+#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
+//CKSVII2C1_IC_HS_MADDR
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
+//CKSVII2C1_IC_DATA_CMD
+#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
+#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
+#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
+#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
+#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
+#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
+#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
+#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
+//CKSVII2C1_IC_SS_SCL_HCNT
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_SS_SCL_LCNT
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_HCNT
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_LCNT
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_HCNT
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_LCNT
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_INTR_STAT
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_INTR_MASK
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_RAW_INTR_STAT
+//CKSVII2C1_IC_RX_TL
+//CKSVII2C1_IC_TX_TL
+//CKSVII2C1_IC_CLR_INTR
+//CKSVII2C1_IC_CLR_RX_UNDER
+//CKSVII2C1_IC_CLR_RX_OVER
+//CKSVII2C1_IC_CLR_TX_OVER
+//CKSVII2C1_IC_CLR_RD_REQ
+//CKSVII2C1_IC_CLR_TX_ABRT
+//CKSVII2C1_IC_CLR_RX_DONE
+//CKSVII2C1_IC_CLR_ACTIVITY
+//CKSVII2C1_IC_CLR_STOP_DET
+//CKSVII2C1_IC_CLR_START_DET
+//CKSVII2C1_IC_CLR_GEN_CALL
+//CKSVII2C1_IC_ENABLE
+#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
+//CKSVII2C1_IC_STATUS
+#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
+#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
+#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
+#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
+#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
+#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
+#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
+#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
+#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
+#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
+//CKSVII2C1_IC_TXFLR
+//CKSVII2C1_IC_RXFLR
+//CKSVII2C1_IC_SDA_HOLD
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL
+//CKSVII2C1_IC_TX_ABRT_SOURCE
+//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C1_IC_DMA_CR
+//CKSVII2C1_IC_DMA_TDLR
+//CKSVII2C1_IC_DMA_RDLR
+//CKSVII2C1_IC_SDA_SETUP
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
+//CKSVII2C1_IC_ACK_GENERAL_CALL
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C1_IC_ENABLE_STATUS
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L
//SMUIO_MP_RESET_INTR
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
index cf2149cc12ee..90350f46a0c4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
@@ -24,6 +24,18 @@
// addressBlock: uvd0_mmsch_dec
// base address: 0x1e000
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
// addressBlock: uvd0_jpegnpdec
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index e88541d67aa0..dd7cbc00a0aa 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1
/* Total 32bit cap indication */
enum atombios_firmware_capability
{
- ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
- ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
- ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
- ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
- ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
- ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
+ ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
+ ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
};
enum atom_cooling_solution_id{
@@ -671,6 +672,20 @@ struct vram_usagebyfirmware_v2_1
uint16_t used_by_driver_in_kb;
};
+/* This is part of vram_usagebyfirmware_v2_1 */
+struct vram_reserve_block
+{
+ uint32_t start_address_in_kb;
+ uint16_t used_by_firmware_in_kb;
+ uint16_t used_by_driver_in_kb;
+};
+
+/* Definitions for constance */
+enum atomfirmware_internal_constants
+{
+ ONE_KiB = 0x400,
+ ONE_MiB = 0x100000,
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 5dcb776548d8..7ec4331e67f2 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -25,7 +25,6 @@
#define _DISCOVERY_H_
#define PSP_HEADER_SIZE 256
-#define BINARY_MAX_SIZE (64 << 10)
#define BINARY_SIGNATURE 0x28211407
#define DISCOVERY_TABLE_SIGNATURE 0x53445049
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
new file mode 100644
index 000000000000..79af4258f259
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_NBIF_7_4_H__
+#define __IRQSRCS_NBIF_7_4_H__
+
+#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated
+#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off
+#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller
+#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT
+#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT
+#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC
+#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space
+
+#endif // __IRQSRCS_NBIF_7_4_H__
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 98b9533e672b..2cd217e60125 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -291,15 +291,18 @@ struct kfd2kgd_calls {
uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
- bool (*get_atc_vmid_pasid_mapping_valid)(
+ bool (*get_atc_vmid_pasid_mapping_info)(
struct kgd_dev *kgd,
- uint8_t vmid);
- uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
- struct kgd_dev *kgd,
- uint8_t vmid);
+ uint8_t vmid,
+ uint16_t *p_pasid);
+ /* No longer needed from GFXv9 onward. The scratch base address is
+ * passed to the shader by the CP. It's the user mode driver's
+ * responsibility.
+ */
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
+
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 27cf0afaa0b4..a7f92d0b3a90 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -179,6 +179,11 @@ enum pp_mp1_state {
PP_MP1_STATE_RESET,
};
+enum pp_df_cstate {
+ DF_CSTATE_DISALLOW = 0,
+ DF_CSTATE_ALLOW,
+};
+
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
@@ -215,6 +220,9 @@ enum pp_mp1_state {
((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \
(support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT)
+#define XGMI_MODE_PSTATE_D3 0
+#define XGMI_MODE_PSTATE_D0 1
+
struct seq_file;
enum amd_pp_clock_type;
struct amd_pp_simple_clock_info;
@@ -312,6 +320,8 @@ struct amd_pm_funcs {
int (*get_ppfeature_status)(void *handle, char *buf);
int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
int (*asic_reset_mode_2)(void *handle);
+ int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
};
#endif
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 094648cac392..07633e22e99a 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -169,6 +169,11 @@ static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
@@ -1361,4 +1366,33 @@ static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x0240300
#define UVD0_BASE__INST6_SEG3 0
#define UVD0_BASE__INST6_SEG4 0
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define DCN_BASE__INST0_SEG2 0x000034C0
+#define DCN_BASE__INST0_SEG3 0
+#define DCN_BASE__INST0_SEG4 0
+
+#define DCN_BASE__INST1_SEG0 0
+#define DCN_BASE__INST1_SEG1 0
+#define DCN_BASE__INST1_SEG2 0
+#define DCN_BASE__INST1_SEG3 0
+#define DCN_BASE__INST1_SEG4 0
+
+#define DCN_BASE__INST2_SEG0 0
+#define DCN_BASE__INST2_SEG1 0
+#define DCN_BASE__INST2_SEG2 0
+#define DCN_BASE__INST2_SEG3 0
+#define DCN_BASE__INST2_SEG4 0
+
+#define DCN_BASE__INST3_SEG0 0
+#define DCN_BASE__INST3_SEG1 0
+#define DCN_BASE__INST3_SEG2 0
+#define DCN_BASE__INST3_SEG3 0
+#define DCN_BASE__INST3_SEG4 0
+
+#define DCN_BASE__INST4_SEG0 0
+#define DCN_BASE__INST4_SEG1 0
+#define DCN_BASE__INST4_SEG2 0
+#define DCN_BASE__INST4_SEG3 0
+#define DCN_BASE__INST4_SEG4 0
#endif
diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h
index c14ba65a2415..adf1b754666e 100644
--- a/drivers/gpu/drm/amd/include/vega10_enum.h
+++ b/drivers/gpu/drm/amd/include/vega10_enum.h
@@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001,
typedef enum MTYPE {
MTYPE_NC = 0x00000000,
MTYPE_WC = 0x00000001,
+MTYPE_RW = 0x00000001,
MTYPE_CC = 0x00000002,
MTYPE_UC = 0x00000003,
} MTYPE;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index fa8ad7db2b3a..7932eb163a00 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -969,6 +969,14 @@ static int pp_dpm_switch_power_profile(void *handle,
workload = hwmgr->workload_setting[index];
}
+ if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
+ hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
+ mutex_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+ }
+
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
mutex_unlock(&hwmgr->smu_lock);
@@ -1421,6 +1429,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
+ *cap = false;
if (!hwmgr)
return -EINVAL;
@@ -1548,6 +1557,40 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return ret;
}
+static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1606,4 +1649,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_ppfeature_status = pp_set_ppfeature_status,
.asic_reset_mode_2 = pp_asic_reset_mode_2,
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
+ .set_df_cstate = pp_set_df_cstate,
+ .set_xgmi_pstate = pp_set_xgmi_pstate,
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4acf139ea014..40b546c75fc2 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -25,11 +25,16 @@
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "vega20_ppt.h"
+#include "arcturus_ppt.h"
+#include "navi10_ppt.h"
+#include "renoir_ppt.h"
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
@@ -67,6 +72,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
goto failed;
@@ -92,9 +99,57 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
}
failed:
+ mutex_unlock(&smu->mutex);
+
return size;
}
+static int smu_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ feature_low = (feature_mask >> 0 ) & 0xffffffff;
+ feature_high = (feature_mask >> 32) & 0xffffffff;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&feature->mutex);
+ if (enabled)
+ bitmap_or(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ else
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
{
int ret = 0;
@@ -103,9 +158,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
- return ret;
+ goto out;
feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
@@ -115,14 +172,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
if (feature_2_enabled) {
ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
if (ret)
- return ret;
+ goto out;
}
if (feature_2_disabled) {
ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
if (ret)
- return ret;
+ goto out;
}
+out:
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -159,8 +219,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
- int ret = 0, clk_id = 0;
- uint32_t param;
+ int ret = 0;
if (min <= 0 && max <= 0)
return -EINVAL;
@@ -168,27 +227,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
- if (ret)
- return ret;
- }
-
-
+ ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
return ret;
}
@@ -229,7 +268,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
}
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max)
+ uint32_t *min, uint32_t *max, bool lock_needed)
{
uint32_t clock_limit;
int ret = 0;
@@ -237,6 +276,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!min && !max)
return -EINVAL;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
@@ -260,14 +302,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
-
- return 0;
+ } else {
+ /*
+ * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+ * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+ */
+ ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
- /*
- * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
- * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
- */
- ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -338,7 +383,20 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
return true;
}
-
+/**
+ * smu_dpm_set_power_gate - power gate/ungate the specific IP block
+ *
+ * @smu: smu_context pointer
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ *
+ * This API uses no smu->mutex lock protection due to:
+ * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
+ * This is guarded to be race condition free by the caller.
+ * 2. Or get called on user setting request of power_dpm_force_performance_level.
+ * Under this case, the smu->mutex lock protection is already enforced on
+ * the parent API smu_force_performance_level of the call path.
+ */
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -364,12 +422,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
return ret;
}
-enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
-{
- /* not support power state */
- return POWER_STATE_TYPE_DEFAULT;
-}
-
int smu_get_power_num_states(struct smu_context *smu,
struct pp_states_info *state_info)
{
@@ -439,7 +491,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int ret = 0;
int table_id = smu_table_get_index(smu, table_index);
- if (!table_data || table_id >= smu_table->table_count || table_id < 0)
+ if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table = &smu_table->tables[table_index];
@@ -463,7 +515,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
return ret;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (!drv2smu)
memcpy(table_data, table->cpu_addr, table->size);
@@ -483,7 +535,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
{
- if (amdgpu_dpm != 1)
+ if (!is_support_sw_smu(adev))
return false;
if (adev->asic_type == CHIP_VEGA20)
@@ -495,16 +547,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t powerplay_table_size;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- return smu_table->power_play_table_size;
+ powerplay_table_size = smu_table->power_play_table_size;
+
+ mutex_unlock(&smu->mutex);
+
+ return powerplay_table_size;
}
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
@@ -531,13 +590,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size;
- mutex_unlock(&smu->mutex);
+
+ /*
+ * Special hw_fini action(for Navi1x, the DPMs disablement will be
+ * skipped) may be needed for custom pptable uploading.
+ */
+ smu->uploading_custom_pp_table = true;
ret = smu_reset(smu);
if (ret)
pr_info("smu reset failed, ret = %d\n", ret);
- return ret;
+ smu->uploading_custom_pp_table = false;
failed:
mutex_unlock(&smu->mutex);
@@ -569,41 +633,7 @@ int smu_feature_init_dpm(struct smu_context *smu)
return ret;
}
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled)
-{
- uint32_t feature_low = 0, feature_high = 0;
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
- feature_low = (feature_mask >> 0 ) & 0xffffffff;
- feature_high = (feature_mask >> 32) & 0xffffffff;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- }
-
- return ret;
-}
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
@@ -633,8 +663,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- uint64_t feature_mask = 0;
- int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
@@ -642,22 +670,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
WARN_ON(feature_id > feature->feature_num);
- feature_mask = 1ULL << feature_id;
-
- mutex_lock(&feature->mutex);
- ret = smu_feature_update_enable_state(smu, feature_mask, enable);
- if (ret)
- goto failed;
-
- if (enable)
- test_and_set_bit(feature_id, feature->enabled);
- else
- test_and_clear_bit(feature_id, feature->enabled);
-
-failed:
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return smu_feature_update_enable_state(smu,
+ 1ULL << feature_id,
+ enable);
}
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
@@ -707,20 +722,27 @@ static int smu_set_funcs(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ vega20_set_ppt_funcs(smu);
+ break;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ navi10_set_ppt_funcs(smu);
+ break;
case CHIP_ARCTURUS:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v11_0_set_smu_funcs(smu);
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ arcturus_set_ppt_funcs(smu);
+ /* OD is not supported on Arcturus */
+ smu->od_enabled =false;
break;
case CHIP_RENOIR:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v12_0_set_smu_funcs(smu);
+ renoir_set_ppt_funcs(smu);
break;
default:
return -EINVAL;
@@ -736,6 +758,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
+ smu->is_apu = false;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -749,11 +772,10 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT);
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_COMPLETE_INIT,
+ false);
return 0;
}
@@ -919,14 +941,9 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
- uint32_t i = 0;
- int32_t ret = 0;
+ int ret, i;
- if (table_count <= 0)
- return -EINVAL;
-
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
ret = amdgpu_bo_create_kernel(adev,
@@ -942,7 +959,7 @@ static int smu_init_fb_allocations(struct smu_context *smu)
return 0;
failed:
- for (; i > 0; i--) {
+ while (--i >= 0) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -957,13 +974,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
uint32_t i = 0;
- if (table_count == 0 || tables == NULL)
+ if (!tables)
return 0;
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -974,50 +990,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
-static int smu_override_pcie_parameters(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
- int ret;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
- if (ret)
- pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
- return ret;
-}
-
static int smu_smc_table_hw_init(struct smu_context *smu,
bool initialize)
{
@@ -1092,8 +1064,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- /* issue RunAfllBtc msg */
- ret = smu_run_afll_btc(smu);
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
if (ret)
return ret;
@@ -1106,10 +1078,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
if (adev->asic_type != CHIP_ARCTURUS) {
- ret = smu_override_pcie_parameters(smu);
- if (ret)
- return ret;
-
ret = smu_notify_display_change(smu);
if (ret)
return ret;
@@ -1138,6 +1106,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+ }
+
ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -1147,7 +1121,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
if (ret)
return ret;
}
@@ -1226,29 +1200,46 @@ static int smu_free_memory_pool(struct smu_context *smu)
return ret;
}
-static int smu_hw_init(void *handle)
+static int smu_start_smc_engine(struct smu_context *smu)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->asic_type < CHIP_NAVI10) {
- ret = smu_load_microcode(smu);
- if (ret)
- return ret;
+ if (smu->ppt_funcs->load_microcode) {
+ ret = smu->ppt_funcs->load_microcode(smu);
+ if (ret)
+ return ret;
+ }
}
}
- ret = smu_check_fw_status(smu);
+ if (smu->ppt_funcs->check_fw_status) {
+ ret = smu->ppt_funcs->check_fw_status(smu);
+ if (ret)
+ pr_err("SMC is not ready\n");
+ }
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ ret = smu_start_smc_engine(smu);
if (ret) {
- pr_err("SMC firmware status is not correct\n");
+ pr_err("SMU is not ready yet!\n");
return ret;
}
if (adev->flags & AMD_IS_APU) {
smu_powergate_sdma(&adev->smu, false);
smu_powergate_vcn(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, true);
}
if (!smu->pm_enabled)
@@ -1291,6 +1282,11 @@ failed:
return ret;
}
+static int smu_stop_dpms(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1303,6 +1299,33 @@ static int smu_hw_fini(void *handle)
smu_powergate_vcn(&adev->smu, true);
}
+ ret = smu_stop_thermal_control(smu);
+ if (ret) {
+ pr_warn("Fail to stop thermal control!\n");
+ return ret;
+ }
+
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+ }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -1344,7 +1367,10 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
+ bool baco_feature_is_enabled = false;
+
+ if(!(adev->flags & AMD_IS_APU))
+ baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
@@ -1363,6 +1389,8 @@ static int smu_suspend(void *handle)
if (adev->asic_type >= CHIP_NAVI10 &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@@ -1375,7 +1403,11 @@ static int smu_resume(void *handle)
pr_info("SMU is resuming...\n");
- mutex_lock(&smu->mutex);
+ ret = smu_start_smc_engine(smu);
+ if (ret) {
+ pr_err("SMU is not ready yet!\n");
+ goto failed;
+ }
ret = smu_smc_table_hw_init(smu, false);
if (ret)
@@ -1385,13 +1417,16 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
- mutex_unlock(&smu->mutex);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, true);
+
+ smu->disable_uclk_switch = 0;
pr_info("SMU is resumed successfully!\n");
return 0;
+
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1409,8 +1444,9 @@ int smu_display_configuration_change(struct smu_context *smu,
mutex_lock(&smu->mutex);
- smu_set_deep_sleep_dcefclk(smu,
- display_config->min_dcef_deep_sleep_set_clk / 100);
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -1529,7 +1565,8 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
+
+ if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1587,9 +1624,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d
&soc_mask);
if (ret)
return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1653,7 +1690,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
workload = smu->workload_setting[index];
if (smu->power_profile_mode != workload)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
}
return ret;
@@ -1661,18 +1698,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id)
+ enum amd_pp_task task_id,
+ bool lock_needed)
{
int ret = 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_set_cpu_power_state(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1683,6 +1724,10 @@ int smu_handle_task(struct smu_context *smu,
break;
}
+out:
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -1715,7 +1760,7 @@ int smu_switch_power_profile(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
mutex_unlock(&smu->mutex);
@@ -1727,7 +1772,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum amd_dpm_forced_level level;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
@@ -1742,15 +1787,22 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int ret = 0;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
ret = smu_enable_umd_pstate(smu, &level);
- if (ret)
+ if (ret) {
+ mutex_unlock(&smu->mutex);
return ret;
+ }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+
+ mutex_unlock(&smu->mutex);
return ret;
}
@@ -1766,6 +1818,144 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_debug("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = SMU_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = SMU_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = SMU_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ /* some asics may not support those messages */
+ if (smu_msg_get_index(smu, msg) < 0) {
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ ret = smu_send_smc_msg(smu, msg);
+ if (ret)
+ pr_err("[PrepareMp1] Failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ int ret = 0;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->set_df_cstate(smu, state);
+ if (ret)
+ pr_err("[SetDfCstate] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_write_watermarks_table(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+
+ table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+
+ if (!table->cpu_addr)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+ true);
+
+ return ret;
+}
+
+int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
+ void *table = watermarks->cpu_addr;
+
+ mutex_lock(&smu->mutex);
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
@@ -1802,3 +1992,559 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
.rev = 0,
.funcs = &smu_ip_funcs,
};
+
+int smu_load_microcode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->load_microcode)
+ ret = smu->ppt_funcs->load_microcode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_check_fw_status(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->check_fw_status)
+ ret = smu->ppt_funcs->check_fw_status(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_gfx_cgpg)
+ ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_rpm)
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_limit)
+ ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_limit)
+ ret = smu->ppt_funcs->set_power_limit(smu, limit);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->print_clk_levels)
+ ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_od_percentage)
+ ret = smu->ppt_funcs->get_od_percentage(smu, type);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_od_percentage)
+ ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->od_edit_dpm_table)
+ ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->read_sensor)
+ ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_profile_mode)
+ ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_get_fan_control_mode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_control_mode)
+ ret = smu->ppt_funcs->get_fan_control_mode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_control_mode(struct smu_context *smu, int value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_control_mode)
+ ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_percent)
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_rpm)
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_active_display_count)
+ ret = smu->ppt_funcs->set_active_display_count(smu, count);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type)
+ ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_high_clocks)
+ ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_latency)
+ ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_voltage)
+ ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_clock_voltage_request)
+ ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_disable_memory_clock_switch)
+ ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_notify_smu_enable_pwe(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->notify_smu_enable_pwe)
+ ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_xgmi_pstate)
+ ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_azalia_d3_pme(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_azalia_d3_pme)
+ ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+bool smu_baco_is_support(struct smu_context *smu)
+{
+ bool ret = false;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_is_support)
+ ret = smu->ppt_funcs->baco_is_support(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
+{
+ if (smu->ppt_funcs->baco_get_state)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+ *state = smu->ppt_funcs->baco_get_state(smu);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_baco_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_reset)
+ ret = smu->ppt_funcs->baco_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_mode2_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->mode2_reset)
+ ret = smu->ppt_funcs->mode2_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+ ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_uclk_dpm_states)
+ ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_current_power_state)
+ pm_state = smu->ppt_funcs->get_current_power_state(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return pm_state;
+}
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_dpm_clock_table)
+ ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
+{
+ uint32_t ret = 0;
+
+ if (smu->ppt_funcs->get_pptable_power_limit)
+ ret = smu->ppt_funcs->get_pptable_power_limit(smu);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index d493a3f8c07a..58c7c4a3053e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -36,6 +37,12 @@
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
#include "nbio/nbio_7_4_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -112,8 +119,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc),
- MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc),
- MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
@@ -172,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(SMU_METRICS),
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(OVERDRIVE),
+ TAB_MAP(I2C_COMMANDS),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -294,6 +301,9 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -528,9 +538,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int arcturus_run_btc_afll(struct smu_context *smu)
+static int arcturus_run_btc(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ if (ret) {
+ pr_err("RunAfllBtc failed!\n");
+ return ret;
+ }
+
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -610,12 +628,17 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
return ret;
}
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n", i,
clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_MCLK:
@@ -635,9 +658,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_SOCCLK:
@@ -657,9 +681,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_FCLK:
@@ -679,9 +704,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < single_dpm_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
default:
@@ -756,8 +782,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
uint32_t soft_min_level, soft_max_level;
int ret = 0;
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -792,91 +816,19 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
- single_dpm_table = &(dpm_table->mem_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_SOCCLK:
- single_dpm_table = &(dpm_table->soc_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_FCLK:
- single_dpm_table = &(dpm_table->fclk_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
+ /*
+ * Should not arrive here since Arcturus does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
break;
default:
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1043,7 +995,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -1186,6 +1138,7 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_level;
int ret = 0;
@@ -1199,40 +1152,27 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_level].value;
- /* uclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
-
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->mem_table.dpm_levels[soft_level].value;
-
- /* socclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
-
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload boot level to %s!\n",
highest ? "highest" : "lowest");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload dpm max level to %s!\n!",
highest ? "highest" : "lowest");
return ret;
}
+ if (hive)
+ /*
+ * Force XGMI Pstate to highest or lowest
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0);
+
return ret;
}
@@ -1240,6 +1180,7 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_min_level, soft_max_level;
int ret = 0;
@@ -1251,34 +1192,25 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_max_level].value;
- /* uclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->gfx_table.dpm_levels[soft_min_level].value;
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->gfx_table.dpm_levels[soft_max_level].value;
-
- /* socclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_levels[soft_min_level].value;
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Bootup Levels!");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Max Levels!");
return ret;
}
+ if (hive)
+ /*
+ * Reset XGMI Pstate back to default
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, 0);
+
return ret;
}
@@ -1329,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1360,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
@@ -1891,6 +1816,260 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (ret) {
+ pr_err("[EnableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (ret) {
+ pr_err("[DisableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+
+static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->Cmd = 1;
+ else
+ cmd->Cmd = write;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->RegisterAddr = data[i];
+ }
+}
+
+static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].Data;
+
+ pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ pr_err("arcturus_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
+ .master_xfer = arcturus_i2c_eeprom_i2c_xfer,
+ .functionality = arcturus_i2c_eeprom_i2c_func,
+};
+
+static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &arcturus_i2c_eeprom_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
+{
+ i2c_del_adapter(control);
+}
+
+static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -1909,7 +2088,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
/* btc */
- .run_afll_btc = arcturus_run_btc_afll,
+ .run_btc = arcturus_run_btc,
/* dpm/clk tables */
.set_default_dpm_table = arcturus_set_default_dpm_table,
.populate_umd_state_clk = arcturus_populate_umd_state_clk,
@@ -1929,12 +2108,62 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
+ .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
+ .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
+ .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .get_pptable_power_limit = arcturus_get_pptable_power_limit,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &arcturus_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index cc63705920dc..2773966ae434 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
- vega12_baco.o smu9_baco.o
+ vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \
+ ci_baco.o smu7_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
new file mode 100644
index 000000000000..3be40114e63d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "ci_baco.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
+};
+
+int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
new file mode 100644
index 000000000000..17041f187020
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_BACO_H__
+#define __CI_BACO_H__
+#include "smu7_baco.h"
+
+extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
index 9c57c1f67749..1c73776bd606 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
@@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m
return ret;
}
+bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size)
+{
+ u32 i, reg = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if ((entry[i].cmd == CMD_WRITE) ||
+ (entry[i].cmd == CMD_READMODIFYWRITE) ||
+ (entry[i].cmd == CMD_WAITFOR))
+ reg = entry[i].reg_offset;
+ if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
+ entry[i].shift, entry[i].val, entry[i].timeout))
+ return false;
+ }
+
+ return true;
+}
+
bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
index 95296c916f4e..8393eb62706d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
@@ -33,6 +33,15 @@ enum baco_cmd_type {
CMD_DELAY_US,
};
+struct baco_cmd_entry {
+ enum baco_cmd_type cmd;
+ uint32_t reg_offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t timeout;
+ uint32_t val;
+};
+
struct soc15_baco_cmd_entry {
enum baco_cmd_type cmd;
uint32_t hwip;
@@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry {
uint32_t timeout;
uint32_t val;
};
+
+extern bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size);
extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
new file mode 100644
index 000000000000..c0368f2dfb21
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "fiji_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 }
+};
+
+int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
new file mode 100644
index 000000000000..47f402900bdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __FIJI_BACO_H__
+#define __FIJI_BACO_H__
+#include "smu7_baco.h"
+
+extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index a24beaa4fb01..d2909c91d65b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev;
+
if (!hwmgr)
return -EINVAL;
@@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_init_workload_prority(hwmgr);
hwmgr->gfxoff_state_changed_by_workload = false;
+ adev = hwmgr->adev;
+
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &ci_smu_funcs;
ci_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
@@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->od_enabled = false;
hwmgr->smumgr_funcs = &smu8_smu_funcs;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
@@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
@@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
vega12_hwmgr_init(hwmgr);
break;
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega20_smu_funcs;
vega20_hwmgr_init(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
new file mode 100644
index 000000000000..8f8e296f2fe9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "polaris_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl_vg[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl_vg[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg));
+ baco_program_registers(hwmgr, turn_off_plls_tbl_vg,
+ ARRAY_SIZE(turn_off_plls_tbl_vg));
+ } else {
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ }
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
new file mode 100644
index 000000000000..87a5fa0a157a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __POLARIS_BACO_H__
+#define __POLARIS_BACO_H__
+#include "smu7_baco.h"
+
+extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
new file mode 100644
index 000000000000..044cda005aed
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smu7_baco.h"
+#include "tonga_baco.h"
+#include "fiji_baco.h"
+#include "polaris_baco.h"
+#include "ci_baco.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
+
+ if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
+ *cap = true;
+
+ return 0;
+}
+
+int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32(mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
+
+int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ return tonga_baco_set_state(hwmgr, state);
+ case CHIP_FIJI:
+ return fiji_baco_set_state(hwmgr, state);
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ return polaris_baco_set_state(hwmgr, state);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return ci_baco_set_state(hwmgr, state);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
new file mode 100644
index 000000000000..be0d98abb536
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU7_BACO_H__
+#define __SMU7_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 34f95e0e3ea4..f73dff68e799 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,7 @@
#include "smu7_clockpowergating.h"
#include "processpptables.h"
#include "pp_thermal.h"
+#include "smu7_baco.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
table_size = lookup_table->count;
PP_ASSERT_WITH_CODE(0 != lookup_table->count,
@@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3478,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int i;
u32 tmp = 0;
if (!query)
return -EINVAL;
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- *query = tmp;
+ /*
+ * PPSMC_MSG_GetCurrPkgPwr is not supported on:
+ * - Hawaii
+ * - Bonaire
+ * - Fiji
+ * - Tonga
+ */
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA)) {
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *query = tmp;
- if (tmp != 0)
- return 0;
+ if (tmp != 0)
+ return 0;
+ }
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
@@ -3970,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if (hwmgr->hardcode_pp_table != NULL)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!",
@@ -5145,6 +5164,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
+ .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_asic_baco_state = smu7_baco_get_state,
+ .set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
new file mode 100644
index 000000000000..ea743bea8e29
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "tonga_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry gpio_tbl_iceland[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
+};
+
+static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl_iceland[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (hwmgr->chip_id == CHIP_TOPAZ)
+ baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland));
+ else
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (hwmgr->chip_id == CHIP_TOPAZ) {
+ if (baco_program_registers(hwmgr, exit_baco_tbl_iceland,
+ ARRAY_SIZE(exit_baco_tbl_iceland))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl_iceland,
+ ARRAY_SIZE(clean_baco_tbl_iceland)))
+ return 0;
+ }
+ } else {
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
new file mode 100644
index 000000000000..5dc16cc8a295
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __TONGA_BACO_H__
+#define __TONGA_BACO_H__
+#include "smu7_baco.h"
+
+extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index beacfffbdc3e..d71a492c87a3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
"Lookup table is empty", return -EINVAL);
@@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3691,6 +3689,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if(hwmgr->hardcode_pp_table != NULL)
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
vega10_update_avfs(hwmgr);
/*
@@ -5265,6 +5270,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return 0;
}
+static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t feature_mask = 0;
+
+ if (disable) {
+ feature_mask |= data->smu_features[GNLD_ULV].enabled ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ } else {
+ feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ }
+
+ if (feature_mask)
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+ !disable, feature_mask),
+ "enable/disable power features for compute performance Failed!",
+ return -EINVAL);
+
+ if (disable) {
+ data->smu_features[GNLD_ULV].enabled = false;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = false;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = false;
+ data->smu_features[GNLD_DS_LCLK].enabled = false;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
+ } else {
+ data->smu_features[GNLD_ULV].enabled = true;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = true;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = true;
+ data->smu_features[GNLD_DS_LCLK].enabled = true;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
+ }
+
+ return 0;
+
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5332,6 +5390,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_ppfeature_status = vega10_get_ppfeature_status,
.set_ppfeature_status = vega10_set_ppfeature_status,
.set_mp1_state = vega10_set_mp1_state,
+ .disable_power_features_for_compute_performance =
+ vega10_disable_power_features_for_compute_performance,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index df6ff9252401..9b5e72bdceca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -29,7 +29,7 @@
#include "vega20_baco.h"
#include "vega20_smumgr.h"
-
+#include "amdgpu_ras.h"
static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{
@@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum BACO_STATE cur_state;
uint32_t data;
@@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
if (state == BACO_STATE_IN) {
- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
- data |= 0x80000000;
- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
-
-
- if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -EINVAL;
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+ } else {
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 1))
+ return -EINVAL;
+ }
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f5915308e643..5bcf0d684151 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_BACO);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
@@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- if (adev->in_baco_reset) {
- adev->in_baco_reset = 0;
+ if (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
@@ -4155,6 +4158,38 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
return res;
}
+static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
+ enum pp_df_cstate state)
+{
+ int ret;
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (hwmgr->smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ if (ret)
+ pr_err("SetDfCstate failed!\n");
+
+ return ret;
+}
+
+static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
+ uint32_t pstate)
+{
+ int ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetXgmiMode,
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ if (ret)
+ pr_err("SetXgmiPstate failed!\n");
+
+ return ret;
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4223,6 +4258,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.set_asic_baco_state = vega20_baco_set_state,
.set_mp1_state = vega20_set_mp1_state,
.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
+ .set_df_cstate = vega20_set_df_cstate,
+ .set_xgmi_pstate = vega20_set_xgmi_pstate,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 23171a4d9a31..031e0c22fcc7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -259,10 +259,8 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
- uint32_t table_count;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
- uint16_t TDPODLimit;
void *overdrive_table;
};
@@ -322,6 +320,13 @@ struct mclock_latency_table {
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
};
+enum smu_reset_mode
+{
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+};
+
enum smu_baco_state
{
SMU_BACO_STATE_ENTER = 0,
@@ -341,7 +346,6 @@ struct smu_context
struct amdgpu_device *adev;
struct amdgpu_irq_src *irq_source;
- const struct smu_funcs *funcs;
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
struct mutex sensor_lock;
@@ -382,11 +386,15 @@ struct smu_context
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
bool pm_enabled;
+ bool is_apu;
uint32_t smc_if_version;
+ bool uploading_custom_pp_table;
};
+struct i2c_adapter;
+
struct pptable_funcs {
int (*alloc_dpm_context)(struct smu_context *smu);
int (*store_powerplay_table)(struct smu_context *smu);
@@ -398,7 +406,7 @@ struct pptable_funcs {
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
- int (*run_afll_btc)(struct smu_context *smu);
+ int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
int (*set_default_dpm_table)(struct smu_context *smu);
@@ -459,17 +467,19 @@ struct pptable_funcs {
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
void (*dump_pptable)(struct smu_context *smu);
int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default);
- int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max);
-};
-
-struct smu_funcs
-{
+ int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq);
+ int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+ int (*i2c_eeprom_init)(struct i2c_adapter *control);
+ void (*i2c_eeprom_fini)(struct i2c_adapter *control);
+ int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
int (*init_microcode)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
int (*init_smc_tables)(struct smu_context *smu);
int (*fini_smc_tables)(struct smu_context *smu);
int (*init_power)(struct smu_context *smu);
int (*fini_power)(struct smu_context *smu);
- int (*load_microcode)(struct smu_context *smu);
int (*check_fw_status)(struct smu_context *smu);
int (*setup_pptable)(struct smu_context *smu);
int (*get_vbios_bootup_values)(struct smu_context *smu);
@@ -485,7 +495,6 @@ struct smu_funcs
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
- int (*write_watermarks_table)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
@@ -499,8 +508,7 @@ struct smu_funcs
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
int (*start_thermal_control)(struct smu_context *smu);
- int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
+ int (*stop_thermal_control)(struct smu_context *smu);
int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
@@ -522,8 +530,6 @@ struct smu_funcs
int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
struct smu_clock_info *clocks);
int (*notify_smu_enable_pwe)(struct smu_context *smu);
- int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
@@ -538,234 +544,90 @@ struct smu_funcs
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
int (*baco_reset)(struct smu_context *smu);
+ int (*mode2_reset)(struct smu_context *smu);
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*override_pcie_parameters)(struct smu_context *smu);
+ uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
};
-#define smu_init_microcode(smu) \
- ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
-#define smu_init_smc_tables(smu) \
- ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
-#define smu_fini_smc_tables(smu) \
- ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
-#define smu_init_power(smu) \
- ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
-#define smu_fini_power(smu) \
- ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
-#define smu_load_microcode(smu) \
- ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
-#define smu_check_fw_status(smu) \
- ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
-#define smu_setup_pptable(smu) \
- ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0)
-#define smu_powergate_sdma(smu, gate) \
- ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0)
-#define smu_powergate_vcn(smu, gate) \
- ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0)
-#define smu_set_gfx_cgpg(smu, enabled) \
- ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0)
-#define smu_get_vbios_bootup_values(smu) \
- ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
-#define smu_get_clk_info_from_vbios(smu) \
- ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
-#define smu_check_pptable(smu) \
- ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
-#define smu_parse_pptable(smu) \
- ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
-#define smu_populate_smc_tables(smu) \
- ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0)
-#define smu_check_fw_version(smu) \
- ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
-#define smu_write_pptable(smu) \
- ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
-#define smu_set_min_dcef_deep_sleep(smu) \
- ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
-#define smu_set_tool_table_location(smu) \
- ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
-#define smu_notify_memory_pool_location(smu) \
- ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
-#define smu_gfx_off_control(smu, enable) \
- ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0)
-
-#define smu_write_watermarks_table(smu) \
- ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
-#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
- ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
-#define smu_system_features_control(smu, en) \
- ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
-#define smu_init_max_sustainable_clocks(smu) \
- ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
-#define smu_set_default_od_settings(smu, initialize) \
- ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_set_fan_speed_rpm(smu, speed) \
- ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
-#define smu_send_smc_msg(smu, msg) \
- ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
-#define smu_alloc_dpm_context(smu) \
- ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
-#define smu_init_display_count(smu, count) \
- ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0)
-#define smu_feature_set_allowed_mask(smu) \
- ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
-#define smu_feature_get_enabled_mask(smu, mask, num) \
- ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
-#define smu_is_dpm_running(smu) \
- ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
-#define smu_notify_display_change(smu) \
- ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
-#define smu_store_powerplay_table(smu) \
- ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
-#define smu_check_powerplay_table(smu) \
- ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
-#define smu_append_powerplay_table(smu) \
- ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
-#define smu_set_default_dpm_table(smu) \
- ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
-#define smu_populate_umd_state_clk(smu) \
- ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
-#define smu_set_default_od8_settings(smu) \
- ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
-#define smu_get_power_limit(smu, limit, def) \
- ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0)
-#define smu_set_power_limit(smu, limit) \
- ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
-#define smu_get_current_clk_freq(smu, clk_id, value) \
- ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
-#define smu_print_clk_levels(smu, clk_type, buf) \
- ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
-#define smu_force_clk_levels(smu, clk_type, level) \
- ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
-#define smu_get_od_percentage(smu, type) \
- ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
-#define smu_set_od_percentage(smu, type, value) \
- ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
-#define smu_od_edit_dpm_table(smu, type, input, size) \
- ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
-#define smu_tables_init(smu, tab) \
- ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
-#define smu_set_thermal_fan_table(smu) \
- ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
-#define smu_start_thermal_control(smu) \
- ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
-#define smu_read_sensor(smu, sensor, data, size) \
- ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
-#define smu_smc_read_sensor(smu, sensor, data, size) \
- ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
-#define smu_get_power_profile_mode(smu, buf) \
- ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0)
-#define smu_set_power_profile_mode(smu, param, param_size) \
- ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
-#define smu_pre_display_config_changed(smu) \
- ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
-#define smu_display_config_changed(smu) \
- ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
-#define smu_apply_clocks_adjust_rules(smu) \
- ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
- ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
-#define smu_force_dpm_limit_value(smu, highest) \
- ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
-#define smu_unforce_dpm_levels(smu) \
- ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
-#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
- ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
-#define smu_set_cpu_power_state(smu) \
- ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
-#define smu_get_fan_control_mode(smu) \
- ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
-#define smu_set_fan_control_mode(smu, value) \
- ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
-#define smu_get_fan_speed_percent(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
-#define smu_set_fan_speed_percent(smu, speed) \
- ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
-#define smu_get_fan_speed_rpm(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
-
-#define smu_msg_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_clk_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_feature_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_table_get_index(smu, tab) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
-#define smu_power_get_index(smu, src) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
-#define smu_workload_get_type(smu, profile) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
-#define smu_run_afll_btc(smu) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
-#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
-#define smu_set_deep_sleep_dcefclk(smu, clk) \
- ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
-#define smu_set_active_display_count(smu, count) \
- ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
-#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
- ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
-#define smu_get_clock_by_type(smu, type, clocks) \
- ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
-#define smu_get_max_high_clocks(smu, clocks) \
- ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
-#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0)
-#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
-#define smu_display_clock_voltage_request(smu, clock_req) \
- ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
-#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \
- ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL)
-#define smu_get_dal_power_level(smu, clocks) \
- ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
-#define smu_get_perf_level(smu, designation, level) \
- ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
-#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
- ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
-#define smu_notify_smu_enable_pwe(smu) \
- ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
-#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
- ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
-#define smu_dpm_set_uvd_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
-#define smu_dpm_set_vce_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
-#define smu_set_xgmi_pstate(smu, pstate) \
- ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
-#define smu_set_watermarks_table(smu, tab, clock_ranges) \
- ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
-#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
- ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
-#define smu_thermal_temperature_range_update(smu, range, rw) \
- ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
-#define smu_get_thermal_temperature_range(smu, range) \
- ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
-#define smu_register_irq_handler(smu) \
- ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
-#define smu_set_azalia_d3_pme(smu) \
- ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
-#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
- ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
-#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
- ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
-#define smu_baco_is_support(smu) \
- ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false)
-#define smu_baco_get_state(smu, state) \
- ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
-#define smu_baco_reset(smu) \
- ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
-#define smu_asic_set_performance_level(smu, level) \
- ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
-#define smu_dump_pptable(smu) \
- ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
-#define smu_get_dpm_uclk_limited(smu, clock, max) \
- ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL)
+int smu_load_microcode(struct smu_context *smu);
+
+int smu_check_fw_status(struct smu_context *smu);
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed);
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type);
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value);
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf);
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed);
+int smu_get_fan_control_mode(struct smu_context *smu);
+int smu_set_fan_control_mode(struct smu_context *smu, int value);
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed);
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks);
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req);
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
+int smu_notify_smu_enable_pwe(struct smu_context *smu);
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_set_azalia_d3_pme(struct smu_context *smu);
+
+bool smu_baco_is_support(struct smu_context *smu);
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
+
+int smu_baco_reset(struct smu_context *smu);
+int smu_mode2_reset(struct smu_context *smu);
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
@@ -799,6 +661,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+int smu_write_watermarks_table(struct smu_context *smu);
+int smu_set_watermarks_for_clock_ranges(
+ struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
/* smu to display interface */
extern int smu_display_configuration_change(struct smu_context *smu, const
@@ -809,7 +675,8 @@ extern int smu_get_current_clocks(struct smu_context *smu,
extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id);
+ enum amd_pp_task task_id,
+ bool lock_needed);
int smu_switch_power_profile(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE type,
bool en);
@@ -819,7 +686,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max);
+ uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -828,10 +695,29 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed);
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state);
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table);
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index 78e5927b7711..e3291259b249 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -95,8 +95,7 @@
//BTC
#define PPSMC_MSG_RunAfllBtc 0x30
-#define PPSMC_MSG_RunGfxDcBtc 0x31
-#define PPSMC_MSG_RunSocDcBtc 0x32
+#define PPSMC_MSG_RunDcBtc 0x31
//Debug
#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 7bf9a14bfa0b..af977675fd33 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -355,6 +355,10 @@ struct pp_hwmgr_func {
int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
+ int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
+ bool disable);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index e02950b505fa..a886f0644d24 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -137,29 +137,29 @@
#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
-#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT )
#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT )
-#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT )
+#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT )
#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT )
#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT )
#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
-#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT )
+#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT )
#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
-#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT )
-#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK )
+#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
+#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
//FIXME need updating
// Debug Overrides Bitmask
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001
-#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002
+#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002
// I2C Config Bit Defines
#define I2C_CONTROLLER_ENABLED 1
@@ -423,18 +423,30 @@ typedef enum {
} PwrConfig_e;
typedef enum {
- XGMI_LINK_RATE_12 = 0, // 12Gbps
- XGMI_LINK_RATE_16, // 16Gbps
- XGMI_LINK_RATE_22, // 22Gbps
- XGMI_LINK_RATE_25, // 25Gbps
+ XGMI_LINK_RATE_2 = 2, // 2Gbps
+ XGMI_LINK_RATE_4 = 4, // 4Gbps
+ XGMI_LINK_RATE_8 = 8, // 8Gbps
+ XGMI_LINK_RATE_12 = 12, // 12Gbps
+ XGMI_LINK_RATE_16 = 16, // 16Gbps
+ XGMI_LINK_RATE_17 = 17, // 17Gbps
+ XGMI_LINK_RATE_18 = 18, // 18Gbps
+ XGMI_LINK_RATE_19 = 19, // 19Gbps
+ XGMI_LINK_RATE_20 = 20, // 20Gbps
+ XGMI_LINK_RATE_21 = 21, // 21Gbps
+ XGMI_LINK_RATE_22 = 22, // 22Gbps
+ XGMI_LINK_RATE_23 = 23, // 23Gbps
+ XGMI_LINK_RATE_24 = 24, // 24Gbps
+ XGMI_LINK_RATE_25 = 25, // 25Gbps
XGMI_LINK_RATE_COUNT
} XGMI_LINK_RATE_e;
typedef enum {
- XGMI_LINK_WIDTH_2 = 0, // x2
- XGMI_LINK_WIDTH_4, // x4
- XGMI_LINK_WIDTH_8, // x8
- XGMI_LINK_WIDTH_16, // x16
+ XGMI_LINK_WIDTH_1 = 1, // x1
+ XGMI_LINK_WIDTH_2 = 2, // x2
+ XGMI_LINK_WIDTH_4 = 4, // x4
+ XGMI_LINK_WIDTH_8 = 8, // x8
+ XGMI_LINK_WIDTH_9 = 9, // x9
+ XGMI_LINK_WIDTH_16 = 16, // x16
XGMI_LINK_WIDTH_COUNT
} XGMI_LINK_WIDTH_e;
@@ -696,7 +708,11 @@ typedef struct {
uint8_t GpioI2cSda; // Serial Data
uint16_t GpioPadding;
- uint32_t BoardReserved[9];
+ // Platform input telemetry voltage coefficient
+ uint32_t BoardVoltageCoeffA; // decode by /1000
+ uint32_t BoardVoltageCoeffB; // decode by /1000
+
+ uint32_t BoardReserved[7];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8]; // SMU internal use
@@ -802,7 +818,7 @@ typedef struct {
uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units
- uint32_t EnabledAvfsModules[2];
+ uint32_t EnabledAvfsModules[3];
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsFuseOverride_t;
@@ -865,7 +881,8 @@ typedef struct {
//#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 7
#define TABLE_WAFL_XGMI_TOPOLOGY 8
-#define TABLE_COUNT 9
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_COUNT 10
// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index b0dd05d431dd..d8c9b7f91fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -114,6 +114,7 @@
__SMU_DUMMY_MAP(PowerDownJpeg), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
+ __SMU_DUMMY_MAP(RunDcBtc), \
__SMU_DUMMY_MAP(RunGfxDcBtc), \
__SMU_DUMMY_MAP(RunSocDcBtc), \
__SMU_DUMMY_MAP(SetMemoryChannelEnable), \
@@ -168,6 +169,7 @@
__SMU_DUMMY_MAP(PowerGateAtHub), \
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+ __SMU_DUMMY_MAP(DFCstateControl), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -251,6 +253,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
__SMU_DUMMY_MAP(MMHUB_PG), \
__SMU_DUMMY_MAP(ATHUB_PG), \
+ __SMU_DUMMY_MAP(APCC_DFLL), \
__SMU_DUMMY_MAP(WAFL_CG),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 5bda8539447a..606149085683 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x09
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x10
#define SMU11_DRIVER_IF_VERSION_NV10 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x34
@@ -48,6 +48,8 @@
#define SMU11_TOOL_SIZE 0x19000
+#define MAX_PCIE_CONF 2
+
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
@@ -88,6 +90,11 @@ struct smu_11_0_dpm_table {
uint32_t max; /* MHz */
};
+struct smu_11_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+};
+
struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table soc_table;
struct smu_11_0_dpm_table gfx_table;
@@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table display_table;
struct smu_11_0_dpm_table phy_table;
struct smu_11_0_dpm_table fclk_table;
+ struct smu_11_0_pcie_table pcie_table;
};
struct smu_11_0_dpm_context {
@@ -130,6 +138,128 @@ enum smu_v11_0_baco_seq {
BACO_SEQ_COUNT,
};
-void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+int smu_v11_0_init_microcode(struct smu_context *smu);
+
+int smu_v11_0_load_microcode(struct smu_context *smu);
+
+int smu_v11_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_init_power(struct smu_context *smu);
+
+int smu_v11_0_fini_power(struct smu_context *smu);
+
+int smu_v11_0_check_fw_status(struct smu_context *smu);
+
+int smu_v11_0_setup_pptable(struct smu_context *smu);
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu);
+
+int smu_v11_0_check_pptable(struct smu_context *smu);
+
+int smu_v11_0_parse_pptable(struct smu_context *smu);
+
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu);
+
+int smu_v11_0_check_fw_version(struct smu_context *smu);
+
+int smu_v11_0_write_pptable(struct smu_context *smu);
+
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+
+int smu_v11_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_v11_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num);
+
+int smu_v11_0_notify_display_change(struct smu_context *smu);
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n);
+
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value);
+
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu);
+
+int smu_v11_0_start_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_stop_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
+
+int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req);
+
+uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu);
+
+int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode);
+
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed);
+
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v11_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu);
+
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+bool smu_v11_0_baco_is_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
+
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v11_0_baco_reset(struct smu_context *smu);
+
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
+
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index 86cdc3393eac..b2f96a101124 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table
struct smu_11_0_power_saving_clock_table power_saving_clock;
struct smu_11_0_overdrive_table overdrive_table;
+#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+#endif
} __attribute__((packed));
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index acf3db12f59f..9b9f5df0911c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -37,6 +37,45 @@ struct smu_12_0_cmn2aisc_mapping {
int map_to;
};
-void smu_v12_0_set_smu_funcs(struct smu_context *smu);
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg);
+
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v12_0_wait_for_response(struct smu_context *smu);
+
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v12_0_check_fw_status(struct smu_context *smu);
+
+int smu_v12_0_check_fw_version(struct smu_context *smu);
+
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
+
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
+
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
+
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
+
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v12_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_populate_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v12_0_mode2_reset(struct smu_context *smu);
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index a0883038f3c3..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -120,7 +120,8 @@
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
-#define PPSMC_Message_Count 0x61
+#define PPSMC_MSG_DFCstateControl 0x63
+#define PPSMC_Message_Count 0x64
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 3ec5a10a7c4d..aaec884d63ed 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -35,6 +36,7 @@
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -177,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
FEA_MAP(TEMP_DEPENDENT_VMIN),
FEA_MAP(MMHUB_PG),
FEA_MAP(ATHUB_PG),
+ FEA_MAP(APCC_DFLL),
};
static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
@@ -327,40 +330,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
memset(feature_mask, 0, sizeof(uint32_t) * num);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_LINK_BIT)
- | FEATURE_MASK(FEATURE_GFX_ULV_BIT)
| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_PPT_BIT)
| FEATURE_MASK(FEATURE_TDC_BIT)
| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
+ | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
| FEATURE_MASK(FEATURE_VR0HOT_BIT)
| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
| FEATURE_MASK(FEATURE_THERMAL_BIT)
| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
- | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)
+ | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
| FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT);
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT)
+ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+
+ if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+
+ if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
- if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
- /* TODO: remove it once fw fix the bug */
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
- }
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
@@ -585,6 +600,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *driver_ppt = NULL;
+ int i;
driver_ppt = table_context->driver_pptable;
@@ -615,6 +631,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
+ }
+
return 0;
}
@@ -677,13 +698,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+{
+ return od_table->cap[feature];
+}
+
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
+ uint16_t *curve_settings;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
switch (clk_type) {
case SMU_GFXCLK:
@@ -734,6 +771,69 @@ static int navi10_print_clk_levels(struct smu_context *smu,
}
break;
+ case SMU_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ break;
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ break;
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ break;
+ size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
default:
break;
}
@@ -759,6 +859,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK:
+ /* There is only 2 levels for fine grained DPM */
+ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ }
+
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
@@ -783,13 +889,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
- ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
if (ret)
return ret;
smu->pstate_sclk = min_sclk_freq * 100;
- ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
if (ret)
return ret;
@@ -842,7 +948,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret)
return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
@@ -892,7 +998,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -919,7 +1025,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -1254,7 +1360,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu_display_clock_voltage_request(smu, &clock_req)) {
+
+ ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
+ if (!ret) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -1408,7 +1516,7 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -1451,18 +1559,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
uint32_t sclk_freq = 0, uclk_freq = 0;
uint32_t uclk_level = 0;
- switch (adev->pdev->revision) {
- case 0xf0: /* XTX */
- case 0xc0:
- sclk_freq = NAVI10_PEAK_SCLK_XTX;
- break;
- case 0xf1: /* XT */
- case 0xc1:
- sclk_freq = NAVI10_PEAK_SCLK_XT;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
break;
- default: /* XL */
- sclk_freq = NAVI10_PEAK_SCLK_XL;
+ case CHIP_NAVI14:
+ switch (adev->pdev->revision) {
+ case 0xc7: /* XT */
+ case 0xf4:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
+ break;
+ case 0xc1: /* XTM */
+ case 0xf2:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
+ break;
+ case 0xc3: /* XLM */
+ case 0xf3:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ case 0xc5: /* XTX */
+ case 0xf6:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
+ break;
+ }
break;
+ default:
+ return -EINVAL;
}
ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
@@ -1485,10 +1622,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->asic_type != CHIP_NAVI10)
- return -EINVAL;
switch (level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1541,17 +1674,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
return ret;
}
+static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static int navi10_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1574,23 +1712,291 @@ static int navi10_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
return 0;
}
+static int navi10_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+
+ if (ret)
+ return ret;
+
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+ if (pptable->PcieLaneCount[i] > pcie_width_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ }
+
+ return 0;
+}
+
+static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
+ pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
+ pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
+ pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
+ pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
+ pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
+}
+
+static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int navi10_setup_od_limits(struct smu_context *smu) {
+ struct smu_11_0_overdrive_table *overdrive_table = NULL;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+
+ if (!smu->smu_table.power_play_table) {
+ pr_err("powerplay table uninitialized!\n");
+ return -ENOENT;
+ }
+ powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
+ overdrive_table = &powerplay_table->overdrive_table;
+ if (!smu->od_settings) {
+ smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
+ } else {
+ memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
+ }
+ return 0;
+}
+
+static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
+ OverDriveTable_t *od_table;
+ int ret = 0;
+
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = navi10_setup_od_limits(smu);
+ if (ret) {
+ pr_err("Failed to retrieve board OD limits\n");
+ return ret;
+ }
+
+ }
+
+ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ if (od_table) {
+ navi10_dump_od_table(od_table);
+ }
+
+ return ret;
+}
+
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+ int i;
+ int ret = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table;
+ struct smu_11_0_overdrive_table *od_settings;
+ enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
+ uint16_t *freq_ptr, *voltage_ptr;
+ od_table = (OverDriveTable_t *)table_context->overdrive_table;
+
+ if (!smu->od_enabled) {
+ pr_warn("OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ pr_err("OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ od_settings = smu->od_settings;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ pr_warn("GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (!table_context->overdrive_table) {
+ pr_err("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+ switch (input[i]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ pr_info("Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[i + 1];
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ pr_warn("UCLK_MAX not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 2) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (input[0] != 1) {
+ pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
+ pr_info("Supported indices: [1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
+ if (ret)
+ return ret;
+ od_table->UclkFmax = input[1];
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ navi10_dump_od_table(od_table);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ // no lock needed because smu_od_edit_dpm_table has it
+ ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+ if (ret) {
+ return ret;
+ }
+ break;
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ pr_warn("GFXCLK_CURVE not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 3) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (!od_table) {
+ pr_info("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+
+ switch (input[0]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
+ freq_ptr = &od_table->GfxclkFreq1;
+ voltage_ptr = &od_table->GfxclkVolt1;
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
+ freq_ptr = &od_table->GfxclkFreq2;
+ voltage_ptr = &od_table->GfxclkVolt2;
+ break;
+ case 2:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
+ freq_ptr = &od_table->GfxclkFreq3;
+ voltage_ptr = &od_table->GfxclkVolt3;
+ break;
+ default:
+ pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
+ pr_info("Supported indices: [0, 1, 2]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
+ if (ret)
+ return ret;
+ // Allow setting zero to disable the OverDrive VDDC curve
+ if (input[2] != 0) {
+ ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[1];
+ *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
+ pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
+ } else {
+ // If setting 0, disable all voltage curve settings
+ od_table->GfxclkVolt1 = 0;
+ od_table->GfxclkVolt2 = 0;
+ od_table->GfxclkVolt3 = 0;
+ }
+ navi10_dump_od_table(od_table);
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return ret;
+}
+
+static int navi10_run_btc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ if (ret)
+ pr_err("RunBtc failed!\n");
+
+ return ret;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1629,12 +2035,63 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
.get_power_limit = navi10_get_power_limit,
+ .update_pcie_parameters = navi10_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .set_default_od_settings = navi10_set_default_od_settings,
+ .od_edit_dpm_table = navi10_od_edit_dpm_table,
+ .get_pptable_power_limit = navi10_get_pptable_power_limit,
+ .run_btc = navi10_run_btc,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &navi10_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 620ff17c2fef..ec03c7992f6d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,6 +27,17 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
+#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
+#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
+#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+
+#define NAVI10_VOLTAGE_SCALE (4)
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index e62bfba51562..04daf7e9fe05 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
@@ -160,21 +161,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
* This interface just for getting uclk ultimate freq and should't introduce
* other likewise function result in overmuch callback.
*/
-static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max)
+static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- DpmClocks_t *table = smu->smu_table.clocks_table;
-
- if (!clock || !table)
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
- if (max)
- *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq;
- else
- *clock = table->FClocks[0].Freq;
+ GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq);
return 0;
-
}
static int renoir_print_clk_levels(struct smu_context *smu,
@@ -183,11 +180,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- SmuMetrics_t metrics = {0};
+ SmuMetrics_t metrics;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
+ memset(&metrics, 0, sizeof(metrics));
+
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
@@ -198,7 +197,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_SCLK:
/* retirve table returned paramters unit is MHz */
cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
- ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max);
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false);
if (!ret) {
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (cur_value == max)
@@ -246,20 +245,474 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return size;
}
+static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+
+ return pm_type;
+}
+
+static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_unforce_dpm_levels(struct smu_context *smu) {
+
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
+ {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+ if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
+{
+
+ uint32_t pplib_workload = 0;
+
+ switch (profile) {
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_COUNT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pplib_workload;
+}
+
+static int renoir_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (sclk_mask)
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if(sclk_mask)
+ /* The sclk as gfxclk and has three level about max/min/current */
+ *sclk_mask = 3 - 1;
+
+ if(mclk_mask)
+ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+
+ if(soc_mask)
+ *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
+ }
+
+ return 0;
+}
+
+/**
+ * This interface get dpm clock table for dc
+ */
+static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
+ clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
+ clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
+ clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
+ clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
+ }
+
+ for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
+ clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
+ }
+
+ return 0;
+}
+
+static int renoir_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+
+ int ret = 0 ;
+ uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (soft_min_level > 2 || soft_max_level > 2) {
+ pr_info("Currently sclk only support 3 levels on APU\n");
+ return -EINVAL;
+ }
+
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ soft_max_level == 0 ? min_freq :
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ soft_min_level == 2 ? max_freq :
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int renoir_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = renoir_set_peak_clock_by_device(smu);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* save watermark settings into pplib smu structure,
+ * also pass data to smu controller
+ */
+static int renoir_set_watermarks_table(
+ struct smu_context *smu,
+ void *watermarks,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = watermarks;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ /* pass data to smu controller */
+ ret = smu_write_watermarks_table(smu);
+
+ return ret;
+}
+
+static int renoir_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!smu->pm_enabled || !buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
- .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited,
+ .get_dpm_clk_limited = renoir_get_dpm_clk_limited,
.print_clk_levels = renoir_print_clk_levels,
+ .get_current_power_state = renoir_get_current_power_state,
+ .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+ .force_dpm_limit_value = renoir_force_dpm_limit_value,
+ .unforce_dpm_levels = renoir_unforce_dpm_levels,
+ .get_workload_type = renoir_get_workload_type,
+ .get_profiling_clk_mask = renoir_get_profiling_clk_mask,
+ .force_clk_levels = renoir_force_clk_levels,
+ .set_power_profile_mode = renoir_set_power_profile_mode,
+ .set_performance_level = renoir_set_performance_level,
+ .get_dpm_clock_table = renoir_get_dpm_clock_table,
+ .set_watermarks_table = renoir_set_watermarks_table,
+ .get_power_profile_mode = renoir_get_power_profile_mode,
+ .check_fw_status = smu_v12_0_check_fw_status,
+ .check_fw_version = smu_v12_0_check_fw_version,
+ .powergate_sdma = smu_v12_0_powergate_sdma,
+ .powergate_vcn = smu_v12_0_powergate_vcn,
+ .send_smc_msg = smu_v12_0_send_msg,
+ .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+ .read_smc_arg = smu_v12_0_read_arg,
+ .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
+ .gfx_off_control = smu_v12_0_gfx_off_control,
+ .init_smc_tables = smu_v12_0_init_smc_tables,
+ .fini_smc_tables = smu_v12_0_fini_smc_tables,
+ .populate_smc_tables = smu_v12_0_populate_smc_tables,
+ .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
+ .mode2_reset = smu_v12_0_mode2_reset,
+ .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &renoir_ppt_funcs;
smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
- smu_table->table_count = TABLE_COUNT;
+ smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
new file mode 100644
index 000000000000..8bcda7871309
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_INTERNAL_H__
+#define __SMU_INTERNAL_H__
+
+#include "amdgpu_smu.h"
+
+#define smu_init_microcode(smu) \
+ ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0)
+
+#define smu_setup_pptable(smu) \
+ ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0)
+#define smu_powergate_sdma(smu, gate) \
+ ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
+#define smu_powergate_vcn(smu, gate) \
+ ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_tables(smu) \
+ ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_gfx_off_control(smu, enable) \
+ ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0)
+
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_default_od_settings(smu, initialize) \
+ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
+
+#define smu_send_smc_msg(smu, msg) \
+ ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0)
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display_count(smu, count) \
+ ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+
+#define smu_tables_init(smu, tab) \
+ ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
+#define smu_set_thermal_fan_table(smu) \
+ ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0)
+#define smu_stop_thermal_control(smu) \
+ ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0)
+
+#define smu_smc_read_sensor(smu, sensor, data, size) \
+ ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
+
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_dispaly_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_clk_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_feature_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_table_get_index(smu, tab) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
+#define smu_power_get_index(smu, src) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
+#define smu_workload_get_type(smu, profile) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
+#define smu_run_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0)
+#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+
+
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+
+#define smu_set_watermarks_table(smu, tab, clock_ranges) \
+ ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
+#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
+#define smu_thermal_temperature_range_update(smu, range, rw) \
+ ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
+#define smu_get_thermal_temperature_range(smu, range) \
+ ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
+#define smu_register_irq_handler(smu) \
+ ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0)
+
+#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
+ ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
+
+#define smu_asic_set_performance_level(smu, level) \
+ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+#define smu_dump_pptable(smu) \
+ ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
+#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \
+ ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL)
+
+#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
+ ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
+
+#define smu_override_pcie_parameters(smu) \
+ ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0)
+
+#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
+ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c5257ae3188a..fc9679ea2368 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -24,17 +24,19 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define SMU_11_0_PARTIAL_PPTABLE
+
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
+#include "smu_v11_0_pptable.h"
#include "soc15_common.h"
#include "atom.h"
-#include "vega20_ppt.h"
-#include "arcturus_ppt.h"
-#include "navi10_ppt.h"
+#include "amd_pcie.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -61,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -88,7 +90,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -113,7 +115,7 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -144,7 +146,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v11_0_init_microcode(struct smu_context *smu)
+int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const char *chip_name;
@@ -206,7 +208,7 @@ out:
return err;
}
-static int smu_v11_0_load_microcode(struct smu_context *smu)
+int smu_v11_0_load_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
@@ -244,7 +246,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_check_fw_status(struct smu_context *smu)
+int smu_v11_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -259,7 +261,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v11_0_check_fw_version(struct smu_context *smu)
+int smu_v11_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -354,7 +356,7 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
return 0;
}
-static int smu_v11_0_setup_pptable(struct smu_context *smu)
+int smu_v11_0_setup_pptable(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
@@ -369,6 +371,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
+ pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
@@ -385,6 +388,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
return ret;
} else {
+ pr_info("use vbios provided pptable\n");
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
@@ -433,13 +437,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
int ret = 0;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -460,18 +464,17 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
int ret = 0;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
smu_table->tables = NULL;
- smu_table->table_count = 0;
smu_table->metrics_table = NULL;
smu_table->metrics_time = 0;
@@ -481,7 +484,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_power(struct smu_context *smu)
+int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -499,7 +502,7 @@ static int smu_v11_0_init_power(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_power(struct smu_context *smu)
+int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -576,7 +579,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
{
int ret, index;
struct amdgpu_device *adev = smu->adev;
@@ -673,7 +676,7 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
@@ -719,7 +722,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_check_pptable(struct smu_context *smu)
+int smu_v11_0_check_pptable(struct smu_context *smu)
{
int ret;
@@ -727,7 +730,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_parse_pptable(struct smu_context *smu)
+int smu_v11_0_parse_pptable(struct smu_context *smu)
{
int ret;
@@ -751,7 +754,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
{
int ret;
@@ -760,7 +763,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_pptable(struct smu_context *smu)
+int smu_v11_0_write_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
@@ -771,24 +774,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
-{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
-
- if (!table->cpu_addr)
- return -EINVAL;
-
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
- true);
-
- return ret;
-}
-
-static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
@@ -800,7 +786,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl
return ret;
}
-static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -809,11 +795,10 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
if (!table_context)
return -EINVAL;
- return smu_set_deep_sleep_dcefclk(smu,
- table_context->boot_values.dcefclk / 100);
+ return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
}
-static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
@@ -831,7 +816,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
@@ -843,7 +828,7 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
}
-static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
@@ -870,7 +855,7 @@ failed:
return ret;
}
-static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
uint32_t feature_mask_high = 0, feature_mask_low = 0;
@@ -899,7 +884,7 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_system_features_control(struct smu_context *smu,
+int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -925,7 +910,7 @@ static int smu_v11_0_system_features_control(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_notify_display_change(struct smu_context *smu)
+int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
@@ -983,7 +968,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return ret;
}
-static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
@@ -1063,13 +1048,44 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
+ uint32_t od_limit, max_power_limit;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ powerplay_table = table_context->power_play_table;
+
+ max_power_limit = smu_get_pptable_power_limit(smu);
+
+ if (!max_power_limit) {
+ // If we couldn't get the table limit, fall back on first-read value
+ if (!smu->default_power_limit)
+ smu->default_power_limit = smu->power_limit;
+ max_power_limit = smu->default_power_limit;
+ }
+
+ if (smu->od_enabled) {
+ od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+ pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
+
+ max_power_limit *= (100 + od_limit);
+ max_power_limit /= 100;
+ }
+
+ return max_power_limit;
+}
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
+ uint32_t max_power_limit;
- if (n > smu->default_power_limit) {
- pr_err("New power limit is over the max allowed %d\n",
- smu->default_power_limit);
+ max_power_limit = smu_v11_0_get_max_power_limit(smu);
+
+ if (n > max_power_limit) {
+ pr_err("New power limit (%d) is over the max allowed %d\n",
+ n,
+ max_power_limit);
return -EINVAL;
}
@@ -1091,7 +1107,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return 0;
}
-static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_id,
uint32_t *value)
{
@@ -1170,7 +1186,7 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
int ret = 0;
struct smu_temperature_range range;
@@ -1212,6 +1228,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
+int smu_v11_0_stop_thermal_control(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+ return 0;
+}
+
static uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
@@ -1236,7 +1261,7 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-static int smu_v11_0_read_sensor(struct smu_context *smu,
+int smu_v11_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
@@ -1273,7 +1298,7 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
return ret;
}
-static int
+int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
*clock_req)
@@ -1316,9 +1341,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
- mutex_unlock(&smu->mutex);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
@@ -1328,27 +1351,7 @@ failed:
return ret;
}
-static int
-smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
-{
- int ret = 0;
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
-
- if (!smu->disable_watermark &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
- }
-
- return ret;
-}
-
-static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0;
struct amdgpu_device *adev = smu->adev;
@@ -1361,12 +1364,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_NAVI12:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
- mutex_lock(&smu->mutex);
if (enable)
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
else
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
- mutex_unlock(&smu->mutex);
break;
default:
break;
@@ -1375,7 +1376,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static uint32_t
+uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1415,7 +1416,7 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
return 0;
}
-static int
+int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1444,7 +1445,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
-static int
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1472,7 +1473,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1482,10 +1483,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
if (!speed)
return -EINVAL;
- mutex_lock(&(smu->mutex));
ret = smu_v11_0_auto_fan_control(smu, 0);
if (ret)
- goto set_fan_speed_rpm_failed;
+ return ret;
crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
@@ -1496,23 +1496,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-set_fan_speed_rpm_failed:
- mutex_unlock(&(smu->mutex));
return ret;
}
-#define XGMI_STATE_D0 1
-#define XGMI_STATE_D3 0
-
-static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
- mutex_lock(&(smu->mutex));
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
- mutex_unlock(&(smu->mutex));
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
return ret;
}
@@ -1559,7 +1552,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
.process = smu_v11_0_irq_process,
};
-static int smu_v11_0_register_irq_handler(struct smu_context *smu)
+int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = smu->irq_source;
@@ -1591,7 +1584,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -1621,13 +1614,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- mutex_lock(&smu->mutex);
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1637,7 +1628,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
}
-static bool smu_v11_0_baco_is_support(struct smu_context *smu)
+bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1661,7 +1652,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu)
return false;
}
-static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
enum smu_baco_state baco_state;
@@ -1673,7 +1664,7 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
return baco_state;
}
-static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1697,7 +1688,7 @@ out:
return ret;
}
-static int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_reset(struct smu_context *smu)
{
int ret = 0;
@@ -1718,13 +1709,12 @@ static int smu_v11_0_baco_reset(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
- mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0) {
ret = -EINVAL;
@@ -1751,80 +1741,102 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v11_0_funcs = {
- .init_microcode = smu_v11_0_init_microcode,
- .load_microcode = smu_v11_0_load_microcode,
- .check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
- .send_smc_msg = smu_v11_0_send_msg,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
- .setup_pptable = smu_v11_0_setup_pptable,
- .init_smc_tables = smu_v11_0_init_smc_tables,
- .fini_smc_tables = smu_v11_0_fini_smc_tables,
- .init_power = smu_v11_0_init_power,
- .fini_power = smu_v11_0_fini_power,
- .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
- .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
- .check_pptable = smu_v11_0_check_pptable,
- .parse_pptable = smu_v11_0_parse_pptable,
- .populate_smc_tables = smu_v11_0_populate_smc_pptable,
- .write_pptable = smu_v11_0_write_pptable,
- .write_watermarks_table = smu_v11_0_write_watermarks_table,
- .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
- .set_tool_table_location = smu_v11_0_set_tool_table_location,
- .init_display_count = smu_v11_0_init_display_count,
- .set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
- .system_features_control = smu_v11_0_system_features_control,
- .notify_display_change = smu_v11_0_notify_display_change,
- .set_power_limit = smu_v11_0_set_power_limit,
- .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
- .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
- .start_thermal_control = smu_v11_0_start_thermal_control,
- .read_sensor = smu_v11_0_read_sensor,
- .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
- .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
- .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
- .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
- .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
- .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
- .gfx_off_control = smu_v11_0_gfx_off_control,
- .register_irq_handler = smu_v11_0_register_irq_handler,
- .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
- .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
- .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
-};
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
-void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0;
+ int ret;
- smu->funcs = &smu_v11_0_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- vega20_set_ppt_funcs(smu);
- break;
- case CHIP_ARCTURUS:
- arcturus_set_ppt_funcs(smu);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- navi10_set_ppt_funcs(smu);
- break;
- default:
- pr_warn("Unknown asic for smu11\n");
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+
+ return ret;
+
+}
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (initialize) {
+ if (table_context->overdrive_table) {
+ return -EINVAL;
+ }
+ table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
+ if (!table_context->overdrive_table) {
+ return -ENOMEM;
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export overdrive table!\n");
+ return ret;
+ }
}
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 9d2280ca1f4b..139dd737eaa5 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -24,12 +24,12 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
-#include "renoir_ppt.h"
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
@@ -41,7 +41,7 @@
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
-static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
@@ -50,7 +50,7 @@ static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -58,7 +58,7 @@ static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
return 0;
}
-static int smu_v12_0_wait_for_response(struct smu_context *smu)
+int smu_v12_0_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i;
@@ -77,7 +77,7 @@ static int smu_v12_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -102,7 +102,7 @@ static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -132,7 +132,7 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v12_0_check_fw_status(struct smu_context *smu)
+int smu_v12_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -147,7 +147,7 @@ static int smu_v12_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v12_0_check_fw_version(struct smu_context *smu)
+int smu_v12_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -181,7 +181,7 @@ static int smu_v12_0_check_fw_version(struct smu_context *smu)
return ret;
}
-static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -192,7 +192,7 @@ static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
}
-static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -203,7 +203,7 @@ static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
}
-static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
@@ -224,7 +224,7 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
* Returns 2=Not in GFXOFF.
* Returns 3=Transition into GFXOFF.
*/
-static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
{
uint32_t reg;
uint32_t gfxOff_Status = 0;
@@ -237,22 +237,13 @@ static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
return gfxOff_Status;
}
-static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0, timeout = 500;
if (enable) {
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
- /* confirm gfx is back to "off" state, timeout is 5 seconds */
- while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) {
- msleep(10);
- timeout--;
- if (timeout == 0) {
- DRM_ERROR("enable gfxoff timeout and failed!\n");
- break;
- }
- }
} else {
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
@@ -270,12 +261,12 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static int smu_v12_0_init_smc_tables(struct smu_context *smu)
+int smu_v12_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -288,11 +279,11 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu)
return smu_tables_init(smu, tables);
}
-static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
+int smu_v12_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->clocks_table);
@@ -304,7 +295,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
+int smu_v12_0_populate_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = NULL;
@@ -319,14 +310,20 @@ static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
-static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0;
-
- mutex_lock(&smu->mutex);
+ uint32_t mclk_mask, soc_mask;
if (max) {
+ ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ NULL,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -340,14 +337,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, max, true);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
if (ret)
goto failed;
break;
default:
ret = -EINVAL;
goto failed;
-
}
}
@@ -365,7 +368,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, min, false);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
if (ret)
goto failed;
break;
@@ -373,40 +383,65 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
ret = -EINVAL;
goto failed;
}
-
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v12_0_funcs = {
- .check_fw_status = smu_v12_0_check_fw_status,
- .check_fw_version = smu_v12_0_check_fw_version,
- .powergate_sdma = smu_v12_0_powergate_sdma,
- .powergate_vcn = smu_v12_0_powergate_vcn,
- .send_smc_msg = smu_v12_0_send_msg,
- .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
- .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
- .gfx_off_control = smu_v12_0_gfx_off_control,
- .init_smc_tables = smu_v12_0_init_smc_tables,
- .fini_smc_tables = smu_v12_0_fini_smc_tables,
- .populate_smc_tables = smu_v12_0_populate_smc_tables,
- .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
-};
-
-void smu_v12_0_set_smu_funcs(struct smu_context *smu)
+int smu_v12_0_mode2_reset(struct smu_context *smu){
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+}
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
- smu->funcs = &smu_v12_0_funcs;
+ if (max < min)
+ return -EINVAL;
- switch (adev->asic_type) {
- case CHIP_RENOIR:
- renoir_set_ppt_funcs(smu);
- break;
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ if (ret)
+ return ret;
+ break;
default:
- pr_warn("Unknown asic for smu12\n");
+ return -EINVAL;
}
+
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 3f12cf341511..aa0ee2b46135 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 4728aa23a818..7dca04a89217 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int ret = 0;
struct cgs_firmware_info info = {0};
- struct smu8_smumgr *smu8_smu;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- smu8_smu = hwmgr->smu_backend;
ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 0dbdde69f2d9..0f3836fd9666 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index f9589806bf83..90c782c132d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return -EINVAL);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b9089c6bea85..f604612f411f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 92c393f613d3..0b4892833808 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -143,6 +144,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForShutdown),
MSG_MAP(SetMGpuFanBoostLimitRpm),
MSG_MAP(GetAVFSVoltageByDpm),
+ MSG_MAP(DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = {
@@ -464,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
sizeof(PPTable_t));
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
- table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
return 0;
}
@@ -634,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
!smu_dpm_ctx->dpm_current_power_state)
return -EINVAL;
- mutex_lock(&(smu->mutex));
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY;
@@ -652,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&(smu->mutex));
return pm_type;
}
@@ -1274,16 +1273,8 @@ static int vega20_force_clk_levels(struct smu_context *smu,
struct vega20_dpm_table *dpm_table;
struct vega20_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level, hard_min_level;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;
- if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
- return -EINVAL;
- }
-
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1436,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu,
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1451,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
dpm_table = smu_dpm->dpm_context;
- mutex_lock(&smu->mutex);
-
switch (clk_type) {
case SMU_GFXCLK:
single_dpm_table = &(dpm_table->gfx_table);
@@ -1474,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
ret = -EINVAL;
}
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2260,7 +2247,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -2547,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu,
int feature_enabled;
PPCLK_e clk_id;
- mutex_lock(&(smu->mutex));
-
dpm_table = smu_dpm->dpm_context;
golden_table = smu_dpm->golden_dpm_context;
@@ -2598,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu,
}
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
set_od_failed:
- mutex_unlock(&(smu->mutex));
-
return ret;
}
@@ -2827,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
- mutex_lock(&(smu->mutex));
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
- mutex_unlock(&(smu->mutex));
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
}
return ret;
@@ -3047,7 +3030,7 @@ static int vega20_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -3141,6 +3124,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
+static int vega20_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+}
+
+static int vega20_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ }
+
+ return ret;
+}
+
+
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
.alloc_dpm_context = vega20_allocate_dpm_context,
@@ -3153,7 +3179,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_smu_table_index = vega20_get_smu_table_index,
.get_smu_power_index = vega20_get_pwr_src_index,
.get_workload_type = vega20_get_workload_type,
- .run_afll_btc = vega20_run_btc_afll,
+ .run_btc = vega20_run_btc_afll,
.get_allowed_feature_mask = vega20_get_allowed_feature_mask,
.get_current_power_state = vega20_get_current_power_state,
.set_default_dpm_table = vega20_set_default_dpm_table,
@@ -3183,13 +3209,61 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_fan_speed_percent = vega20_get_fan_speed_percent,
.get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
- .get_thermal_temperature_range = vega20_get_thermal_temperature_range
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .set_df_cstate = vega20_set_df_cstate,
+ .update_pcie_parameters = vega20_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
};
void vega20_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &vega20_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 6b7f791685ec..d6a6692db0ac 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -45,7 +46,7 @@ static int arcpgu_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct arcpgu_drm_private *arcpgu;
- struct device_node *encoder_node;
+ struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct resource *res;
int ret;
@@ -80,14 +81,23 @@ static int arcpgu_load(struct drm_device *drm)
if (arc_pgu_setup_crtc(drm) < 0)
return -ENODEV;
- /* find the encoder node and initialize it */
- encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
+ /*
+ * There is only one output port inside each device. It is linked with
+ * encoder endpoint.
+ */
+ endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (endpoint_node) {
+ encoder_node = of_graph_get_remote_port_parent(endpoint_node);
+ of_node_put(endpoint_node);
+ }
+
if (encoder_node) {
ret = arcpgu_drm_hdmi_init(drm, encoder_node);
of_node_put(encoder_node);
if (ret < 0)
return ret;
} else {
+ dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
ret = arcpgu_drm_sim_init(drm, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 98aac743cc26..8fd7094beece 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index cec0639e3aa1..e87ff8623076 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -12,3 +12,9 @@ config DRM_KOMEDA
Processor driver. It supports the D71 variants of the hardware.
If compiled as a module it will be called komeda.
+
+config DRM_KOMEDA_ERROR_PRINT
+ bool "Enable komeda error print"
+ depends on DRM_KOMEDA
+ help
+ Choose this option to enable error printing.
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 5c3900c2e764..f095a1c68ac7 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -22,4 +22,6 @@ komeda-y += \
d71/d71_dev.o \
d71/d71_component.o
+komeda-$(CONFIG_DRM_KOMEDA_ERROR_PRINT) += komeda_event.o
+
obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 55a8cc94808a..f0ba26e282c3 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -106,6 +106,23 @@ static void dump_block_header(struct seq_file *sf, void __iomem *reg)
i, hdr.output_ids[i]);
}
+/* On D71, we are using the global line size. From D32, every component have
+ * a line size register to indicate the fifo size.
+ */
+static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg,
+ u32 max_default)
+{
+ if (!d71->periph_addr)
+ max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+
+ return max_default;
+}
+
+static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg)
+{
+ return __get_blk_line_size(d71, reg, d71->max_line_size);
+}
+
static u32 to_rot_ctrl(u32 rot)
{
u32 lr_ctrl = 0;
@@ -332,7 +349,56 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
+static int d71_layer_validate(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct komeda_layer *layer = to_layer(c);
+ struct drm_plane_state *plane_st;
+ struct drm_framebuffer *fb;
+ u32 fourcc, line_sz, max_line_sz;
+
+ plane_st = drm_atomic_get_new_plane_state(state->obj.state,
+ state->plane);
+ fb = plane_st->fb;
+ fourcc = fb->format->format;
+
+ if (drm_rotation_90_or_270(st->rot))
+ line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b;
+ else
+ line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r;
+
+ if (fb->modifier) {
+ if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ max_line_sz = layer->line_sz;
+ else
+ max_line_sz = layer->line_sz / 2;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+ }
+
+ if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) {
+ DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) {
+ DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct komeda_component_funcs d71_layer_funcs = {
+ .validate = d71_layer_validate,
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
@@ -365,7 +431,28 @@ static int d71_layer_init(struct d71_dev *d71,
else
layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
- set_range(&layer->hsize_in, 4, d71->max_line_size);
+ if (!d71->periph_addr) {
+ /* D32 or newer product */
+ layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+ layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info);
+ } else if (d71->max_line_size > 2048) {
+ /* D71 4K */
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ /* D71 2K */
+ if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) {
+ /* rich layer is 4K configuration */
+ layer->line_sz = d71->max_line_size * 2;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = 0;
+ }
+ }
+
+ set_range(&layer->hsize_in, 4, layer->line_sz);
+
set_range(&layer->vsize_in, 4, d71->max_vsize);
malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
@@ -456,9 +543,11 @@ static int d71_wb_layer_init(struct d71_dev *d71,
wb_layer = to_layer(c);
wb_layer->layer_type = KOMEDA_FMT_WB_LAYER;
+ wb_layer->line_sz = get_blk_line_size(d71, reg);
+ wb_layer->yuv_line_sz = wb_layer->line_sz;
- set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz);
+ set_range(&wb_layer->vsize_in, 64, d71->max_vsize);
return 0;
}
@@ -595,8 +684,8 @@ static int d71_compiz_init(struct d71_dev *d71,
compiz = to_compiz(c);
- set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg));
+ set_range(&compiz->vsize, 64, d71->max_vsize);
return 0;
}
@@ -703,7 +792,7 @@ static void d71_scaler_update(struct komeda_component *c,
static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
{
- u32 v[9];
+ u32 v[10];
dump_block_header(sf, c->reg);
@@ -723,6 +812,18 @@ static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]);
+
+ get_values_from_reg(c->reg, 0x130, 10, v);
+ seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]);
+ seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]);
+ seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]);
}
static const struct komeda_component_funcs d71_scaler_funcs = {
@@ -753,7 +854,7 @@ static int d71_scaler_init(struct d71_dev *d71,
}
scaler = to_scaler(c);
- set_range(&scaler->hsize, 4, 2048);
+ set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048));
set_range(&scaler->vsize, 4, 4096);
scaler->max_downscaling = 6;
scaler->max_upscaling = 64;
@@ -862,7 +963,7 @@ static int d71_splitter_init(struct d71_dev *d71,
splitter = to_splitter(c);
- set_range(&splitter->hsize, 4, d71->max_line_size);
+ set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg));
set_range(&splitter->vsize, 4, d71->max_vsize);
return 0;
@@ -933,7 +1034,8 @@ static int d71_merger_init(struct d71_dev *d71,
merger = to_merger(c);
- set_range(&merger->hsize_merged, 4, 4032);
+ set_range(&merger->hsize_merged, 4,
+ __get_blk_line_size(d71, reg, 4032));
set_range(&merger->vsize_merged, 4, 4096);
return 0;
@@ -944,13 +1046,26 @@ static void d71_improc_update(struct komeda_component *c,
{
struct komeda_improc_state *st = to_improc_st(state);
u32 __iomem *reg = c->reg;
- u32 index;
+ u32 index, mask = 0, ctrl = 0;
for_each_changed_input(state, index)
malidp_write32(reg, BLK_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+ malidp_write32(reg, IPS_DEPTH, st->color_depth);
+
+ mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+
+ /* config color format */
+ if (st->color_format == DRM_COLOR_FORMAT_YCRCB420)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444)
+ ctrl |= IPS_CTRL_YUV;
+
+ malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
@@ -1218,6 +1333,90 @@ int d71_probe_block(struct d71_dev *d71,
return err;
}
+static void d71_gcu_dump(struct d71_dev *d71, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_puts(sf, "\n------ GCU ------\n");
+
+ get_values_from_reg(d71->gcu_addr, 0, 3, v);
+ seq_printf(sf, "GLB_ARCH_ID:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GLB_CORE_ID:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GLB_CORE_INFO:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(d71->gcu_addr, 0x10, 1, v);
+ seq_printf(sf, "GLB_IRQ_STATUS:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(d71->gcu_addr, 0xA0, 5, v);
+ seq_printf(sf, "GCU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "GCU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "GCU_STATUS:\t\t0x%X\n", v[4]);
+
+ get_values_from_reg(d71->gcu_addr, 0xD0, 3, v);
+ seq_printf(sf, "GCU_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_CONFIG_VALID0:\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_CONFIG_VALID1:\t0x%X\n", v[2]);
+}
+
+static void d71_lpu_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[6];
+
+ seq_printf(sf, "\n------ LPU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->lpu_addr);
+
+ get_values_from_reg(pipe->lpu_addr, 0xA0, 6, v);
+ seq_printf(sf, "LPU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "LPU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "LPU_STATUS:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "LPU_TBU_STATUS:\t\t0x%X\n", v[5]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xC0, 1, v);
+ seq_printf(sf, "LPU_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xD0, 3, v);
+ seq_printf(sf, "LPU_RAXI_CONTROL:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_WAXI_CONTROL:\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_TBU_CONTROL:\t0x%X\n", v[2]);
+}
+
+static void d71_dou_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_printf(sf, "\n------ DOU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->dou_addr);
+
+ get_values_from_reg(pipe->dou_addr, 0xA0, 5, v);
+ seq_printf(sf, "DOU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "DOU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "DOU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "DOU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "DOU_STATUS:\t\t0x%X\n", v[4]);
+}
+
+static void d71_pipeline_dump(struct komeda_pipeline *pipe, struct seq_file *sf)
+{
+ struct d71_pipeline *d71_pipe = to_d71_pipeline(pipe);
+
+ d71_lpu_dump(d71_pipe, sf);
+ d71_dou_dump(d71_pipe, sf);
+}
+
const struct komeda_pipeline_funcs d71_pipeline_funcs = {
- .downscaling_clk_check = d71_downscaling_clk_check,
+ .downscaling_clk_check = d71_downscaling_clk_check,
+ .dump_register = d71_pipeline_dump,
};
+
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+
+ d71_gcu_dump(d71, sf);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index d567ab7ed314..822b23a1ce75 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -195,7 +195,7 @@ d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
if (gcu_status & GLB_IRQ_STATUS_PIPE1)
evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
- return gcu_status ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_RETVAL(gcu_status);
}
#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
@@ -395,6 +395,22 @@ static int d71_enum_resources(struct komeda_dev *mdev)
err = PTR_ERR(pipe);
goto err_cleanup;
}
+
+ /* D71 HW doesn't update shadow registers when display output
+ * is turning off, so when we disable all pipeline components
+ * together with display output disable by one flush or one
+ * operation, the disable operation updated registers will not
+ * be flush to or valid in HW, which may leads problem.
+ * To workaround this problem, introduce a two phase disable.
+ * Phase1: Disabling components with display is on to make sure
+ * the disable can be flushed to HW.
+ * Phase2: Only turn-off display output.
+ */
+ value = KOMEDA_PIPELINE_IMPROCS |
+ BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
+
+ pipe->standalone_disabled_comps = value;
+
d71->pipes[i] = to_d71_pipeline(pipe);
}
@@ -561,17 +577,18 @@ static int d71_disconnect_iommu(struct komeda_dev *mdev)
}
static const struct komeda_dev_funcs d71_chip_funcs = {
- .init_format_table = d71_init_fmt_tbl,
- .enum_resources = d71_enum_resources,
- .cleanup = d71_cleanup,
- .irq_handler = d71_irq_handler,
- .enable_irq = d71_enable_irq,
- .disable_irq = d71_disable_irq,
- .on_off_vblank = d71_on_off_vblank,
- .change_opmode = d71_change_opmode,
- .flush = d71_flush,
- .connect_iommu = d71_connect_iommu,
- .disconnect_iommu = d71_disconnect_iommu,
+ .init_format_table = d71_init_fmt_tbl,
+ .enum_resources = d71_enum_resources,
+ .cleanup = d71_cleanup,
+ .irq_handler = d71_irq_handler,
+ .enable_irq = d71_enable_irq,
+ .disable_irq = d71_disable_irq,
+ .on_off_vblank = d71_on_off_vblank,
+ .change_opmode = d71_change_opmode,
+ .flush = d71_flush,
+ .connect_iommu = d71_connect_iommu,
+ .disconnect_iommu = d71_disconnect_iommu,
+ .dump_register = d71_dump,
};
const struct komeda_dev_funcs *
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
index 84f1878b647d..c7357c2b9e62 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
@@ -49,4 +49,6 @@ int d71_probe_block(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg);
void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf);
+
#endif /* !_D71_DEV_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
index 2d5e6d00b42c..1727dc993909 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -10,6 +10,7 @@
/* Common block registers offset */
#define BLK_BLOCK_INFO 0x000
#define BLK_PIPELINE_INFO 0x004
+#define BLK_MAX_LINE_SIZE 0x008
#define BLK_VALID_INPUT_ID0 0x020
#define BLK_OUTPUT_ID0 0x060
#define BLK_INPUT_ID0 0x080
@@ -321,6 +322,7 @@
#define L_INFO_RF BIT(0)
#define L_INFO_CM BIT(1)
#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF)
/* Scaler registers */
#define SC_COEFFTAB 0x0DC
@@ -494,13 +496,6 @@ enum d71_blk_type {
#define D71_DEFAULT_PREPRETCH_LINE 5
#define D71_BUS_WIDTH_16_BYTES 16
-#define D71_MIN_LINE_SIZE 64
-#define D71_MIN_VERTICAL_SIZE 64
-#define D71_SC_MIN_LIN_SIZE 4
-#define D71_SC_MIN_VERTICAL_SIZE 4
-#define D71_SC_MAX_LIN_SIZE 2048
-#define D71_SC_MAX_VERTICAL_SIZE 4096
-
#define D71_SC_MAX_UPSCALING 64
#define D71_SC_MAX_DOWNSCALING 6
#define D71_SC_SPLIT_OVERLAP 8
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 624d257da20f..252015210fbc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -5,7 +5,6 @@
*
*/
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
@@ -18,6 +17,33 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats)
+{
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_st;
+ u32 conn_color_formats = ~0u;
+ int i, min_bpc = 31, conn_bpc = 0;
+
+ for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) {
+ if (conn_st->crtc != crtc_st->crtc)
+ continue;
+
+ conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8;
+ conn_color_formats &= conn->display_info.color_formats;
+
+ if (conn_bpc < min_bpc)
+ min_bpc = conn_bpc;
+ }
+
+ /* connector doesn't config any color_format, use RGB444 as default */
+ if (!conn_color_formats)
+ conn_color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ *color_depths = GENMASK(min_bpc, 0);
+ *color_formats = conn_color_formats;
+}
+
static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
{
u64 pxlclk, aclk;
@@ -250,23 +276,57 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc,
{
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc));
komeda_crtc_do_flush(crtc, old);
}
static void
+komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
+ struct completion *input_flip_done)
+{
+ struct drm_device *drm = kcrtc->base.dev;
+ struct komeda_dev *mdev = kcrtc->master->mdev;
+ struct completion *flip_done;
+ struct completion temp;
+ int timeout;
+
+ /* if caller doesn't send a flip_done, use a private flip_done */
+ if (input_flip_done) {
+ flip_done = input_flip_done;
+ } else {
+ init_completion(&temp);
+ kcrtc->disable_done = &temp;
+ flip_done = &temp;
+ }
+
+ mdev->funcs->flush(mdev, kcrtc->master->id, 0);
+
+ /* wait the flip take affect.*/
+ timeout = wait_for_completion_timeout(flip_done, HZ);
+ if (timeout == 0) {
+ DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
+ if (!input_flip_done) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ kcrtc->disable_done = NULL;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+ }
+}
+
+static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
- struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
- struct completion *disable_done = &crtc->state->commit->flip_done;
- struct completion temp;
- int timeout;
+ struct completion *disable_done;
+ bool needs_phase2 = false;
- DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n",
+ DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n",
drm_crtc_index(crtc),
old_st->active_pipes, old_st->affected_pipes);
@@ -274,7 +334,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
komeda_pipeline_disable(slave, old->state);
if (has_bit(master->id, old_st->active_pipes))
- komeda_pipeline_disable(master, old->state);
+ needs_phase2 = komeda_pipeline_disable(master, old->state);
/* crtc_disable has two scenarios according to the state->active switch.
* 1. active -> inactive
@@ -293,32 +353,23 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
* That's also the reason why skip modeset commit in
* komeda_crtc_atomic_flush()
*/
- if (crtc->state->active) {
- struct komeda_pipeline_state *pipe_st;
- /* clear the old active_comps to zero */
- pipe_st = komeda_pipeline_get_old_state(master, old->state);
- pipe_st->active_comps = 0;
+ disable_done = (needs_phase2 || crtc->state->active) ?
+ NULL : &crtc->state->commit->flip_done;
- init_completion(&temp);
- kcrtc->disable_done = &temp;
- disable_done = &temp;
- }
+ /* wait phase 1 disable done */
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
- mdev->funcs->flush(mdev, master->id, 0);
+ /* phase 2 */
+ if (needs_phase2) {
+ komeda_pipeline_disable(kcrtc->master, old->state);
- /* wait the disable take affect.*/
- timeout = wait_for_completion_timeout(disable_done, HZ);
- if (timeout == 0) {
- DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id);
- if (crtc->state->active) {
- unsigned long flags;
+ disable_done = crtc->state->active ?
+ NULL : &crtc->state->commit->flip_done;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- kcrtc->disable_done = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
}
+ drm_crtc_vblank_put(crtc);
drm_crtc_vblank_off(crtc);
komeda_crtc_unprepare(kcrtc);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index ca64a129c594..937a6d4c4865 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -25,6 +25,8 @@ static int komeda_register_show(struct seq_file *sf, void *x)
struct komeda_dev *mdev = sf->private;
int i;
+ seq_puts(sf, "\n====== Komeda register dump =========\n");
+
if (mdev->funcs->dump_register)
mdev->funcs->dump_register(mdev, sf);
@@ -91,9 +93,19 @@ config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(config_id);
+static ssize_t
+aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(mdev->aclk));
+}
+static DEVICE_ATTR_RO(aclk_hz);
+
static struct attribute *komeda_sysfs_entries[] = {
&dev_attr_core_id.attr,
&dev_attr_config_id.attr,
+ &dev_attr_aclk_hz.attr,
NULL,
};
@@ -216,7 +228,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
product->product_id,
MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
err = -ENODEV;
- goto err_cleanup;
+ goto disable_clk;
}
DRM_INFO("Found ARM Mali-D%x version r%dp%d\n",
@@ -229,19 +241,19 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
err = mdev->funcs->enum_resources(mdev);
if (err) {
DRM_ERROR("enumerate display resource failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_parse_dt(dev, mdev);
if (err) {
DRM_ERROR("parse device tree failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_assemble_pipelines(mdev);
if (err) {
DRM_ERROR("assemble display pipelines failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
dev->dma_parms = &mdev->dma_parms;
@@ -254,11 +266,14 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (mdev->iommu && mdev->funcs->connect_iommu) {
err = mdev->funcs->connect_iommu(mdev);
if (err) {
+ DRM_ERROR("connect iommu failed.\n");
mdev->iommu = NULL;
- goto err_cleanup;
+ goto disable_clk;
}
}
+ clk_disable_unprepare(mdev->aclk);
+
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
DRM_ERROR("create sysfs group failed.\n");
@@ -271,6 +286,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
return mdev;
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
err_cleanup:
komeda_dev_destroy(mdev);
return ERR_PTR(err);
@@ -288,8 +305,12 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
debugfs_remove_recursive(mdev->debugfs_root);
#endif
+ if (mdev->aclk)
+ clk_prepare_enable(mdev->aclk);
+
if (mdev->iommu && mdev->funcs->disconnect_iommu)
- mdev->funcs->disconnect_iommu(mdev);
+ if (mdev->funcs->disconnect_iommu(mdev))
+ DRM_ERROR("disconnect iommu failed.\n");
mdev->iommu = NULL;
for (i = 0; i < mdev->n_pipelines; i++) {
@@ -317,3 +338,47 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
devm_kfree(dev, mdev);
}
+
+int komeda_dev_resume(struct komeda_dev *mdev)
+{
+ int ret = 0;
+
+ clk_prepare_enable(mdev->aclk);
+
+ if (mdev->iommu && mdev->funcs->connect_iommu) {
+ ret = mdev->funcs->connect_iommu(mdev);
+ if (ret < 0) {
+ DRM_ERROR("connect iommu failed.\n");
+ goto disable_clk;
+ }
+ }
+
+ ret = mdev->funcs->enable_irq(mdev);
+
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
+
+ return ret;
+}
+
+int komeda_dev_suspend(struct komeda_dev *mdev)
+{
+ int ret = 0;
+
+ clk_prepare_enable(mdev->aclk);
+
+ if (mdev->iommu && mdev->funcs->disconnect_iommu) {
+ ret = mdev->funcs->disconnect_iommu(mdev);
+ if (ret < 0) {
+ DRM_ERROR("disconnect iommu failed.\n");
+ goto disable_clk;
+ }
+ }
+
+ ret = mdev->funcs->disable_irq(mdev);
+
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index d1c86b6174c8..414200233b64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -40,6 +40,17 @@
#define KOMEDA_ERR_TTNG BIT_ULL(30)
#define KOMEDA_ERR_TTF BIT_ULL(31)
+#define KOMEDA_ERR_EVENTS \
+ (KOMEDA_EVENT_URUN | KOMEDA_EVENT_IBSY | KOMEDA_EVENT_OVR |\
+ KOMEDA_ERR_TETO | KOMEDA_ERR_TEMR | KOMEDA_ERR_TITR |\
+ KOMEDA_ERR_CPE | KOMEDA_ERR_CFGE | KOMEDA_ERR_AXIE |\
+ KOMEDA_ERR_ACE0 | KOMEDA_ERR_ACE1 | KOMEDA_ERR_ACE2 |\
+ KOMEDA_ERR_ACE3 | KOMEDA_ERR_DRIFTTO | KOMEDA_ERR_FRAMETO |\
+ KOMEDA_ERR_ZME | KOMEDA_ERR_MERR | KOMEDA_ERR_TCF |\
+ KOMEDA_ERR_TTNG | KOMEDA_ERR_TTF)
+
+#define KOMEDA_WARN_EVENTS KOMEDA_ERR_CSCE
+
/* malidp device id */
enum {
MALI_D71 = 0,
@@ -207,4 +218,13 @@ void komeda_dev_destroy(struct komeda_dev *mdev);
struct komeda_dev *dev_to_mdev(struct device *dev);
+#ifdef CONFIG_DRM_KOMEDA_ERROR_PRINT
+void komeda_print_events(struct komeda_events *evts);
+#else
+static inline void komeda_print_events(struct komeda_events *evts) {}
+#endif
+
+int komeda_dev_resume(struct komeda_dev *mdev);
+int komeda_dev_suspend(struct komeda_dev *mdev);
+
#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 69ace6f9055d..d6c2222c5d33 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
@@ -136,13 +137,40 @@ static const struct of_device_id komeda_of_match[] = {
MODULE_DEVICE_TABLE(of, komeda_of_match);
+static int __maybe_unused komeda_pm_suspend(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+ struct drm_device *drm = &mdrv->kms->base;
+ int res;
+
+ res = drm_mode_config_helper_suspend(drm);
+
+ komeda_dev_suspend(mdrv->mdev);
+
+ return res;
+}
+
+static int __maybe_unused komeda_pm_resume(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+ struct drm_device *drm = &mdrv->kms->base;
+
+ komeda_dev_resume(mdrv->mdev);
+
+ return drm_mode_config_helper_resume(drm);
+}
+
+static const struct dev_pm_ops komeda_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume)
+};
+
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
.remove = komeda_platform_remove,
.driver = {
.name = "komeda",
.of_match_table = komeda_of_match,
- .pm = NULL,
+ .pm = &komeda_pm_ops,
},
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
new file mode 100644
index 000000000000..a36fb86cc054
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <drm/drm_print.h>
+
+#include "komeda_dev.h"
+
+struct komeda_str {
+ char *str;
+ u32 sz;
+ u32 len;
+};
+
+/* return 0 on success, < 0 on no space.
+ */
+static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...)
+{
+ va_list args;
+ int num, free_sz;
+ int err;
+
+ free_sz = str->sz - str->len - 1;
+ if (free_sz <= 0)
+ return -ENOSPC;
+
+ va_start(args, fmt);
+
+ num = vsnprintf(str->str + str->len, free_sz, fmt, args);
+
+ va_end(args);
+
+ if (num < free_sz) {
+ str->len += num;
+ err = 0;
+ } else {
+ str->len = str->sz - 1;
+ err = -ENOSPC;
+ }
+
+ return err;
+}
+
+static void evt_sprintf(struct komeda_str *str, u64 evt, const char *msg)
+{
+ if (evt)
+ komeda_sprintf(str, msg);
+}
+
+static void evt_str(struct komeda_str *str, u64 events)
+{
+ if (events == 0ULL) {
+ komeda_sprintf(str, "None");
+ return;
+ }
+
+ evt_sprintf(str, events & KOMEDA_EVENT_VSYNC, "VSYNC|");
+ evt_sprintf(str, events & KOMEDA_EVENT_FLIP, "FLIP|");
+ evt_sprintf(str, events & KOMEDA_EVENT_EOW, "EOW|");
+ evt_sprintf(str, events & KOMEDA_EVENT_MODE, "OP-MODE|");
+
+ evt_sprintf(str, events & KOMEDA_EVENT_URUN, "UNDERRUN|");
+ evt_sprintf(str, events & KOMEDA_EVENT_OVR, "OVERRUN|");
+
+ /* GLB error */
+ evt_sprintf(str, events & KOMEDA_ERR_MERR, "MERR|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+
+ /* DOU error */
+ evt_sprintf(str, events & KOMEDA_ERR_DRIFTTO, "DRIFTTO|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_TETO, "TETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_CSCE, "CSCE|");
+
+ /* LPU errors or events */
+ evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|");
+ evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE2, "ACE2|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE3, "ACE3|");
+
+ /* LPU TBU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_TCF, "TCF|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTNG, "TTNG|");
+ evt_sprintf(str, events & KOMEDA_ERR_TITR, "TITR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTF, "TTF|");
+
+ /* CU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_CPE, "COPROC|");
+ evt_sprintf(str, events & KOMEDA_ERR_ZME, "ZME|");
+ evt_sprintf(str, events & KOMEDA_ERR_CFGE, "CFGE|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+
+ if (str->len > 0 && (str->str[str->len - 1] == '|')) {
+ str->str[str->len - 1] = 0;
+ str->len--;
+ }
+}
+
+static bool is_new_frame(struct komeda_events *a)
+{
+ return (a->pipes[0] | a->pipes[1]) &
+ (KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW);
+}
+
+void komeda_print_events(struct komeda_events *evts)
+{
+ u64 print_evts = KOMEDA_ERR_EVENTS;
+ static bool en_print = true;
+
+ /* reduce the same msg print, only print the first evt for one frame */
+ if (evts->global || is_new_frame(evts))
+ en_print = true;
+ if (!en_print)
+ return;
+
+ if ((evts->global | evts->pipes[0] | evts->pipes[1]) & print_evts) {
+ char msg[256];
+ struct komeda_str str;
+
+ str.str = msg;
+ str.sz = sizeof(msg);
+ str.len = 0;
+
+ komeda_sprintf(&str, "gcu: ");
+ evt_str(&str, evts->global);
+ komeda_sprintf(&str, ", pipes[0]: ");
+ evt_str(&str, evts->pipes[0]);
+ komeda_sprintf(&str, ", pipes[1]: ");
+ evt_str(&str, evts->pipes[1]);
+
+ DRM_ERROR("err detect: %s\n", msg);
+
+ en_print = false;
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index ae274902ff92..52648b4008bc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -48,6 +48,8 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
memset(&evts, 0, sizeof(evts));
status = mdev->funcs->irq_handler(mdev, &evts);
+ komeda_print_events(&evts);
+
/* Notify the crtc to handle the events */
for (i = 0; i < kms->n_crtcs; i++)
komeda_crtc_handle_event(&kms->crtcs[i], &evts);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 45c498e15e7a..456f3c435719 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -166,6 +166,8 @@ static inline bool has_flip_h(u32 rot)
return !!(rotation & DRM_MODE_REFLECT_X);
}
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats);
unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st);
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index cf5bea578ad9..bd6ca7c87037 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -227,6 +227,8 @@ struct komeda_layer {
/* accepted h/v input range before rotation */
struct malidp_range hsize_in, vsize_in;
u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 line_sz;
+ u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */
u32 supported_rots;
/* komeda supports layer split which splits a whole image to two parts
* left and right and handle them by two individual layer processors
@@ -323,6 +325,7 @@ struct komeda_improc {
struct komeda_improc_state {
struct komeda_component_state base;
+ u8 color_format, color_depth;
u16 hsize, vsize;
};
@@ -389,6 +392,18 @@ struct komeda_pipeline {
int id;
/** @avail_comps: available components mask of pipeline */
u32 avail_comps;
+ /**
+ * @standalone_disabled_comps:
+ *
+ * When disable the pipeline, some components can not be disabled
+ * together with others, but need a sparated and standalone disable.
+ * The standalone_disabled_comps are the components which need to be
+ * disabled standalone, and this concept also introduce concept of
+ * two phase.
+ * phase 1: for disabling the common components.
+ * phase 2: for disabling the standalong_disabled_comps.
+ */
+ u32 standalone_disabled_comps;
/** @n_layers: the number of layer on @layers */
int n_layers;
/** @layers: the pipeline layers */
@@ -535,7 +550,7 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
struct komeda_pipeline_state *
komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
struct drm_atomic_state *state);
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
void komeda_pipeline_update(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index b848270e0a1f..52750116aa19 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -285,6 +285,7 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
struct komeda_data_flow_cfg *dflow)
{
u32 src_x, src_y, src_w, src_h;
+ u32 line_sz, max_line_sz;
if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
return -EINVAL;
@@ -314,6 +315,22 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
return -EINVAL;
}
+ if (drm_rotation_90_or_270(dflow->rot))
+ line_sz = dflow->in_h;
+ else
+ line_sz = dflow->in_w;
+
+ if (kfb->base.format->hsub > 1)
+ max_line_sz = layer->yuv_line_sz;
+ else
+ max_line_sz = layer->line_sz;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -743,6 +760,7 @@ komeda_improc_validate(struct komeda_improc *improc,
struct komeda_data_flow_cfg *dflow)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct drm_crtc_state *crtc_st = &kcrtc_st->base;
struct komeda_component_state *c_st;
struct komeda_improc_state *st;
@@ -756,6 +774,34 @@ komeda_improc_validate(struct komeda_improc *improc,
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
+ if (drm_atomic_crtc_needs_modeset(crtc_st)) {
+ u32 output_depths, output_formats;
+ u32 avail_depths, avail_formats;
+
+ komeda_crtc_get_color_config(crtc_st, &output_depths,
+ &output_formats);
+
+ avail_depths = output_depths & improc->supported_color_depths;
+ if (avail_depths == 0) {
+ DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
+ output_depths,
+ improc->supported_color_depths);
+ return -EINVAL;
+ }
+
+ avail_formats = output_formats &
+ improc->supported_color_formats;
+ if (!avail_formats) {
+ DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
+ output_formats,
+ improc->supported_color_formats);
+ return -EINVAL;
+ }
+
+ st->color_depth = __fls(avail_depths);
+ st->color_format = BIT(__ffs(avail_formats));
+ }
+
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &improc->base, 0);
@@ -1218,7 +1264,17 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
return 0;
}
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+/* Since standalong disabled components must be disabled separately and in the
+ * last, So a complete disable operation may needs to call pipeline_disable
+ * twice (two phase disabling).
+ * Phase 1: disable the common components, flush it.
+ * Phase 2: disable the standalone disabled components, flush it.
+ *
+ * RETURNS:
+ * true: disable is not complete, needs a phase 2 disable.
+ * false: disable is complete.
+ */
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state)
{
struct komeda_pipeline_state *old;
@@ -1228,9 +1284,14 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
old = komeda_pipeline_get_old_state(pipe, old_state);
- disabling_comps = old->active_comps;
- DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
- pipe->id, disabling_comps);
+ disabling_comps = old->active_comps &
+ (~pipe->standalone_disabled_comps);
+ if (!disabling_comps)
+ disabling_comps = old->active_comps &
+ pipe->standalone_disabled_comps;
+
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
+ pipe->id, old->active_comps, disabling_comps);
dp_for_each_set_bit(id, disabling_comps) {
c = komeda_pipeline_get_component(pipe, id);
@@ -1248,6 +1309,13 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
c->funcs->disable(c);
}
+
+ /* Update the pipeline state, if there are components that are still
+ * active, return true for calling the phase 2 disable.
+ */
+ old->active_comps &= ~disabling_comps;
+
+ return old->active_comps ? true : false;
}
void komeda_pipeline_update(struct komeda_pipeline *pipe,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index b72840c06ab7..e465cc4879c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -141,6 +141,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_wb_connector *kwb_conn;
struct drm_writeback_connector *wb_conn;
+ struct drm_display_info *info;
u32 *formats, n_formats = 0;
int err;
@@ -172,6 +173,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
+ info = &kwb_conn->base.base.display_info;
+ info->bpc = __fls(kcrtc->master->improc->supported_color_depths);
+ info->color_formats = kcrtc->master->improc->supported_color_formats;
+
kcrtc->wb_conn = kwb_conn;
return 0;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 333b88a5efb0..37d92a06318e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -368,7 +368,7 @@ malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
return false;
}
-struct drm_framebuffer *
+static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -491,9 +491,9 @@ void malidp_error(struct malidp_drm *malidp,
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
}
-void malidp_error_stats_dump(const char *prefix,
- struct malidp_error_stats error_stats,
- struct seq_file *m)
+static void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
{
seq_printf(m, "[%s] num_errors : %d\n", prefix,
error_stats.num_errors);
@@ -665,7 +665,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
}
-DEVICE_ATTR_RO(core_id);
+static DEVICE_ATTR_RO(core_id);
static int malidp_init_sysfs(struct device *dev)
{
@@ -817,6 +817,12 @@ static int malidp_bind(struct device *dev)
malidp->core_id = version;
+ ret = of_property_read_u32(dev->of_node,
+ "arm,malidp-arqos-value",
+ &hwdev->arqos_value);
+ if (ret)
+ hwdev->arqos_value = 0x0;
+
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index bd8265f02e0b..ca570b135478 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -379,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+
+ /*
+ * Program the RQoS register to avoid high resolutions flicker
+ * issue on the LS1028A.
+ */
+ if (hwdev->arqos_value) {
+ val = hwdev->arqos_value;
+ malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY);
+ }
}
int malidp_format_get_bpp(u32 fmt)
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 968a65eed371..e4c36bc90bda 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -251,6 +251,9 @@ struct malidp_hw_device {
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
+
+ /* priority level of RQOS register used for driven the ARQOS signal */
+ u32 arqos_value;
};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 993031542fa1..514c50dcb74d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -210,6 +210,16 @@
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
+/*
+ * The quality of service (QoS) register on the DP500. RQOS register values
+ * are driven by the ARQOS signal, using AXI transacations, dependent on the
+ * FIFO input level.
+ * The RQOS register can also set QoS levels for:
+ * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions
+ * - GREEN_ARQOS @ A 4-bit signal value for normal conditions
+ */
+#define MALIDP500_RQOS_QUALITY 0x00500
+
/* register offsets and bits specific to DP550/DP650 */
#define MALIDP550_ADDR_SPACE_SIZE 0x10000
#define MALIDP550_DE_CONTROL 0x00010
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 829620d5326c..fbcf2f45cef5 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -4,6 +4,8 @@ config DRM_AST
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6ed6ff49efc0..1f17794b0890 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -35,7 +35,6 @@
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
@@ -201,10 +200,7 @@ static struct pci_driver ast_pci_driver = {
.driver.pm = &ast_pm_ops,
};
-static const struct file_operations ast_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(ast_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 244cc7c382af..ff161bd622f3 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -82,6 +82,25 @@ enum ast_tx_chip {
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
struct ast_private {
struct drm_device *dev;
@@ -97,8 +116,11 @@ struct ast_private {
int fb_mtrr;
- struct drm_gem_object *cursor_cache;
- int next_cursor;
+ struct {
+ struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
+ unsigned int next_index;
+ } cursor;
+
bool support_wide_screen;
enum {
ast_use_p2a,
@@ -199,23 +221,6 @@ static inline void ast_open_key(struct ast_private *ast)
#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-#define AST_MAX_HWC_WIDTH 64
-#define AST_MAX_HWC_HEIGHT 64
-
-#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
-#define AST_HWC_SIGNATURE_SIZE 32
-
-#define AST_DEFAULT_HWC_NUM 2
-/* define for signature structure */
-#define AST_HWC_SIGNATURE_CHECKSUM 0x00
-#define AST_HWC_SIGNATURE_SizeX 0x04
-#define AST_HWC_SIGNATURE_SizeY 0x08
-#define AST_HWC_SIGNATURE_X 0x0C
-#define AST_HWC_SIGNATURE_Y 0x10
-#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
-#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
-
-
struct ast_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 50de8e47659c..21715d6a9b56 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -33,7 +33,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index d349c721501c..b13eaa2619ab 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -687,17 +687,6 @@ static void ast_encoder_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-
-static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
-
static const struct drm_encoder_funcs ast_enc_funcs = {
.destroy = ast_encoder_destroy,
};
@@ -847,7 +836,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
.mode_valid = ast_mode_valid,
.get_modes = ast_get_modes,
- .best_encoder = ast_best_single_encoder,
};
static const struct drm_connector_funcs ast_connector_funcs = {
@@ -895,50 +883,53 @@ static int ast_connector_init(struct drm_device *dev)
static int ast_cursor_init(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- int size;
- int ret;
- struct drm_gem_object *obj;
+ size_t size, i;
struct drm_gem_vram_object *gbo;
- s64 gpu_addr;
- void *base;
+ int ret;
- size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+ size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- ret = ast_gem_create(dev, size, true, &obj);
- if (ret)
- return ret;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- goto fail;
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- ret = (int)gpu_addr;
- goto fail;
- }
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
- /* kmap the object */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto fail;
+ ast->cursor.gbo[i] = gbo;
}
- ast->cursor_cache = obj;
return 0;
-fail:
+
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ ast->cursor.gbo[i] = NULL;
+ }
return ret;
}
static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(ast->cursor_cache);
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
}
int ast_mode_init(struct drm_device *dev)
@@ -1076,23 +1067,6 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
kfree(i2c);
}
-static void ast_show_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- u8 jreg;
-
- jreg = 0x2;
- /* enable ARGB cursor */
- jreg |= 1;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
-}
-
-static void ast_hide_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
-}
-
static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
{
union {
@@ -1149,21 +1123,99 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
return csum;
}
+static int ast_cursor_update(void *dst, void *src, unsigned int width,
+ unsigned int height)
+{
+ u32 csum;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ return 0;
+}
+
+static void ast_cursor_set_base(struct ast_private *ast, u64 address)
+{
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
+}
+
+static int ast_show_cursor(struct drm_crtc *crtc, void *src,
+ unsigned int width, unsigned int height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_vram_object *gbo;
+ void *dst;
+ s64 off;
+ int ret;
+ u8 jreg;
+
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = (int)off;
+ goto err_drm_gem_vram_vunmap;
+ }
+
+ ret = ast_cursor_update(dst, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ ast_cursor_set_base(ast, off);
+
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+
+ ++ast->cursor.next_index;
+ ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
+
+ drm_gem_vram_vunmap(gbo, dst);
+
+ return 0;
+
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, dst);
+ return ret;
+}
+
+static void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
static int ast_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height)
{
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo;
- s64 dst_gpu;
- u64 gpu_addr;
- u32 csum;
+ u8 *src;
int ret;
- u8 *src, *dst;
if (!handle) {
ast_hide_cursor(crtc);
@@ -1179,70 +1231,23 @@ static int ast_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
}
gbo = drm_gem_vram_of_gem(obj);
-
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
- goto err_drm_gem_object_put_unlocked;
- src = drm_gem_vram_kmap(gbo, true, NULL);
+ src = drm_gem_vram_vmap(gbo);
if (IS_ERR(src)) {
ret = PTR_ERR(src);
- goto err_drm_gem_vram_unpin;
- }
-
- dst = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- goto err_drm_gem_vram_kunmap;
- }
- dst_gpu = drm_gem_vram_offset(drm_gem_vram_of_gem(ast->cursor_cache));
- if (dst_gpu < 0) {
- ret = (int)dst_gpu;
- goto err_drm_gem_vram_kunmap;
- }
-
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
-
- /* do data transfer to cursor cache */
- csum = copy_cursor_image(src, dst, width, height);
-
- /* write checksum + signature */
- {
- struct drm_gem_vram_object *dst_gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- u8 *dst = drm_gem_vram_kmap(dst_gbo, false, NULL);
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-
- /* set pattern offset */
- gpu_addr = (u64)dst_gpu;
- gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
- gpu_addr >>= 3;
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ goto err_drm_gem_object_put_unlocked;
}
- ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
- ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
-
- ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
- ast_show_cursor(crtc);
+ ret = ast_show_cursor(crtc, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, src);
drm_gem_object_put_unlocked(obj);
return 0;
-err_drm_gem_vram_kunmap:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
@@ -1253,12 +1258,17 @@ static int ast_cursor_move(struct drm_crtc *crtc,
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_vram_object *gbo;
int x_offset, y_offset;
- u8 *sig;
+ u8 *dst, *sig;
+ u8 jreg;
- sig = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
@@ -1281,7 +1291,11 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
- ast_show_cursor(crtc);
+ jreg = 0x02 |
+ 0x01; /* enable ARGB4444 cursor */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+
+ drm_gem_vram_vunmap(gbo, dst);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index c52d92294171..fad34106083a 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -30,7 +30,6 @@
#include <drm/drm_print.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
@@ -42,7 +41,7 @@ int ast_mm_init(struct ast_private *ast)
vmm = drm_vram_helper_alloc_mm(
dev, pci_resource_start(dev->pdev, 0),
- ast->vram_size, &drm_gem_vram_mm_funcs);
+ ast->vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 375fa84c548b..121b62682d80 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -107,7 +107,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
output->encoder.possible_crtcs = 0x1;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 89f5a756fa37..034f202dfe8f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -601,7 +601,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
struct drm_framebuffer *fb = state->base.fb;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
- unsigned int tmp;
int ret;
int i;
@@ -694,9 +693,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (drm_rotation_90_or_270(state->base.rotation)) {
- tmp = state->src_w;
- state->src_w = state->src_h;
- state->src_h = tmp;
+ swap(state->src_w, state->src_h);
}
if (!desc->layout.size &&
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 32b043abb668..7bcdf294fed8 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -4,6 +4,8 @@ config DRM_BOCHS
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option for qemu.
If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 68483a2fc12c..917767173ee6 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -10,7 +10,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vram_mm_helper.h>
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 770e1625d05e..10460878414e 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -58,10 +58,7 @@ err:
return ret;
}
-static const struct file_operations bochs_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(bochs_fops);
static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
@@ -114,7 +111,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 02a9c1ed165b..3f0006c2470d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -69,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
}
}
-static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
- gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
-}
-
-static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
- gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
- drm_gem_vram_unpin(gbo);
-}
-
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
.update = bochs_pipe_update,
- .prepare_fb = bochs_pipe_prepare_fb,
- .cleanup_fb = bochs_pipe_cleanup_fb,
+ .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
+ .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
};
static int bochs_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 8f9bb886f7ad..1b74f530b07c 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -11,8 +11,7 @@ int bochs_mm_init(struct bochs_device *bochs)
struct drm_vram_mm *vmm;
vmm = drm_vram_helper_alloc_mm(bochs->dev, bochs->fb_base,
- bochs->fb_size,
- &drm_gem_vram_mm_funcs);
+ bochs->fb_size);
return PTR_ERR_OR_ZERO(vmm);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 1cc9f502c1f2..34362976cd6f 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -87,8 +87,7 @@ config DRM_SIL_SII8620
depends on OF
select DRM_KMS_HELPER
imply EXTCON
- select INPUT
- select RC_CORE
+ depends on RC_CORE || !RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 3c7cc5af735c..274989f96a91 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
@@ -38,12 +39,20 @@
#define AUX_CH_BUFFER_SIZE 16
#define AUX_WAIT_TIMEOUT_MS 15
-static const u8 anx78xx_i2c_addresses[] = {
- [I2C_IDX_TX_P0] = TX_P0,
- [I2C_IDX_TX_P1] = TX_P1,
- [I2C_IDX_TX_P2] = TX_P2,
- [I2C_IDX_RX_P0] = RX_P0,
- [I2C_IDX_RX_P1] = RX_P1,
+static const u8 anx7808_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x78,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
+};
+
+static const u8 anx781x_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x70,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
};
struct anx78xx_platform_data {
@@ -62,7 +71,6 @@ struct anx78xx {
struct i2c_client *client;
struct edid *edid;
struct drm_connector connector;
- struct drm_dp_link link;
struct anx78xx_platform_data pdata;
struct mutex lock;
@@ -715,7 +723,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
/* 1.0V digital core power regulator */
pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
if (IS_ERR(pdata->dvdd10)) {
- DRM_ERROR("DVDD10 regulator not found\n");
+ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
+ DRM_ERROR("DVDD10 regulator not found\n");
+
return PTR_ERR(pdata->dvdd10);
}
@@ -737,7 +747,7 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
{
- u8 dp_bw, value;
+ u8 dp_bw, dpcd[2];
int err;
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
@@ -790,18 +800,34 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /* Check link capabilities */
- err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to probe link capabilities: %d\n", err);
- return err;
- }
+ /*
+ * Power up the sink (DP_SET_POWER register is only available on DPCD
+ * v1.1 and later).
+ */
+ if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
+ err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+ err);
+ return err;
+ }
- /* Power up the sink */
- err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n", err);
- return err;
+ dpcd[0] &= ~DP_SET_POWER_MASK;
+ dpcd[0] |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
}
/* Possibly enable downspread on the sink */
@@ -840,15 +866,22 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate);
+ dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+ dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, value);
+ SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
return err;
- err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link);
+ dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd);
+
+ if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+ dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd,
+ sizeof(dpcd));
if (err < 0) {
- DRM_ERROR("Failed to configure DisplayPort link: %d\n", err);
+ DRM_ERROR("Failed to configure link: %d\n", err);
return err;
}
@@ -1301,6 +1334,7 @@ static const struct regmap_config anx78xx_regmap_config = {
};
static const u16 anx78xx_chipid_list[] = {
+ 0x7808,
0x7812,
0x7814,
0x7818,
@@ -1312,6 +1346,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
unsigned int i, idl, idh, version;
+ const u8 *i2c_addresses;
bool found = false;
int err;
@@ -1332,7 +1367,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
err = anx78xx_init_pdata(anx78xx);
if (err) {
- DRM_ERROR("Failed to initialize pdata: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ DRM_ERROR("Failed to initialize pdata: %d\n", err);
+
return err;
}
@@ -1349,22 +1386,26 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
}
/* Map slave addresses of ANX7814 */
+ i2c_addresses = device_get_match_data(&client->dev);
for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
- anx78xx->i2c_dummy[i] = i2c_new_dummy(client->adapter,
- anx78xx_i2c_addresses[i] >> 1);
- if (!anx78xx->i2c_dummy[i]) {
- err = -ENOMEM;
- DRM_ERROR("Failed to reserve I2C bus %02x\n",
- anx78xx_i2c_addresses[i]);
+ struct i2c_client *i2c_dummy;
+
+ i2c_dummy = i2c_new_dummy_device(client->adapter,
+ i2c_addresses[i] >> 1);
+ if (IS_ERR(i2c_dummy)) {
+ err = PTR_ERR(i2c_dummy);
+ DRM_ERROR("Failed to reserve I2C bus %02x: %d\n",
+ i2c_addresses[i], err);
goto err_unregister_i2c;
}
+ anx78xx->i2c_dummy[i] = i2c_dummy;
anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
&anx78xx_regmap_config);
if (IS_ERR(anx78xx->map[i])) {
err = PTR_ERR(anx78xx->map[i]);
DRM_ERROR("Failed regmap initialization %02x\n",
- anx78xx_i2c_addresses[i]);
+ i2c_addresses[i]);
goto err_unregister_i2c;
}
}
@@ -1463,7 +1504,10 @@ MODULE_DEVICE_TABLE(i2c, anx78xx_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id anx78xx_match_table[] = {
- { .compatible = "analogix,anx7814", },
+ { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
+ { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, anx78xx_match_table);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
index 25e063bcecbc..55d6c2109740 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.h
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h
@@ -6,15 +6,8 @@
#ifndef __ANX78xx_H
#define __ANX78xx_H
-#define TX_P0 0x70
-#define TX_P1 0x7a
-#define TX_P2 0x72
-
-#define RX_P0 0x7e
-#define RX_P1 0x80
-
/***************************************************************/
-/* Register definition of device address 0x7e */
+/* Register definitions for RX_PO */
/***************************************************************/
/*
@@ -171,7 +164,7 @@
#define SP_VSI_RCVD BIT(1)
/***************************************************************/
-/* Register definition of device address 0x80 */
+/* Register definitions for RX_P1 */
/***************************************************************/
/* HDCP BCAPS Shadow Register */
@@ -217,7 +210,7 @@
#define SP_SET_AVMUTE BIT(0)
/***************************************************************/
-/* Register definition of device address 0x70 */
+/* Register definitions for TX_P0 */
/***************************************************************/
/* HDCP Status Register */
@@ -451,7 +444,7 @@
#define SP_DP_BUF_DATA0_REG 0xf0
/***************************************************************/
-/* Register definition of device address 0x72 */
+/* Register definitions for TX_P2 */
/***************************************************************/
/*
@@ -674,7 +667,7 @@
#define SP_INT_CTRL_REG 0xff
/***************************************************************/
-/* Register definition of device address 0x7a */
+/* Register definitions for TX_P1 */
/***************************************************************/
/* DP TX Link Training Control Register */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 22885dceaa17..bb411fe52ae8 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -21,6 +21,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 6166dca6be81..3a5bd4e7fd1e 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -956,7 +956,8 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
panel = of_drm_find_panel(np);
if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
if (!bridge)
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 7aa789c35882..cc33dc411b9e 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -12,6 +12,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 2ab2c234f26c..e2132a8d5106 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -106,7 +106,8 @@ static int lvds_encoder_probe(struct platform_device *pdev)
}
lvds_encoder->panel_bridge =
- devm_drm_panel_bridge_add(dev, panel, DRM_MODE_CONNECTOR_LVDS);
+ devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(lvds_encoder->panel_bridge))
return PTR_ERR(lvds_encoder->panel_bridge);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 6e81e5db57f2..e8a49f6146c6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -25,6 +25,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d4a1cc5052c3..57ff01339559 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index b12ae3a4c5f1..f4e293e7cf64 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -133,8 +134,6 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* just calls the appropriate functions from &drm_panel.
*
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* For drivers converting from directly using drm_panel: The expected
* usage pattern is that during either encoder module probe or DSI
@@ -148,11 +147,37 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* drm_mode_config_cleanup() if the bridge has already been attached), then
* drm_panel_bridge_remove() to free it.
*
+ * The connector type is set to @panel->connector_type, which must be set to a
+ * known type. Calling this function with a panel whose connector type is
+ * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ *
* See devm_drm_panel_bridge_add() for an automatically manged version of this
* function.
*/
-struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
- u32 connector_type)
+struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return drm_panel_bridge_add_typed(panel, panel->connector_type);
+}
+EXPORT_SYMBOL(drm_panel_bridge_add);
+
+/**
+ * drm_panel_bridge_add_typed - Creates a &drm_bridge and &drm_connector with
+ * an explicit connector type.
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like drm_panel_bridge_add(), but forces the connector type to
+ * @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * drm_panel_bridge_add() instead, and fix panel drivers as necessary if they
+ * don't report a connector type.
+ */
+struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
+ u32 connector_type)
{
struct panel_bridge *panel_bridge;
@@ -176,7 +201,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
return &panel_bridge->bridge;
}
-EXPORT_SYMBOL(drm_panel_bridge_add);
+EXPORT_SYMBOL(drm_panel_bridge_add_typed);
/**
* drm_panel_bridge_remove - Unregisters and frees a drm_bridge
@@ -213,15 +238,38 @@ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
* that just calls the appropriate functions from &drm_panel.
* @dev: device to tie the bridge lifetime to
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* This is the managed version of drm_panel_bridge_add() which automatically
* calls drm_panel_bridge_remove() when @dev is unbound.
*/
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
- struct drm_panel *panel,
- u32 connector_type)
+ struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return devm_drm_panel_bridge_add_typed(dev, panel,
+ panel->connector_type);
+}
+EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+
+/**
+ * devm_drm_panel_bridge_add_typed - Creates a managed &drm_bridge and
+ * &drm_connector with an explicit connector type.
+ * @dev: device to tie the bridge lifetime to
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like devm_drm_panel_bridge_add(), but forces the connector type
+ * to @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * devm_drm_panel_bridge_add() instead, and fix panel drivers as necessary if
+ * they don't report a connector type.
+ */
+struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type)
{
struct drm_bridge **ptr, *bridge;
@@ -230,7 +278,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- bridge = drm_panel_bridge_add(panel, connector_type);
+ bridge = drm_panel_bridge_add_typed(panel, connector_type);
if (!IS_ERR(bridge)) {
*ptr = bridge;
devres_add(dev, ptr);
@@ -240,4 +288,4 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
return bridge;
}
-EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 93c68e2e9484..b7a72dfdcac3 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 38f75ac580df..b70e8c5cf2e1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 25d4ad8c7ad6..f81f81b7051f 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -13,6 +13,7 @@
* Dharam Kumar <dharam.kr@samsung.com>
*/
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -841,39 +842,28 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->client[I2C_MHL] = client;
- ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
- if (!ctx->client[I2C_TPI]) {
+ ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_TPI_ADDR);
+ if (IS_ERR(ctx->client[I2C_TPI])) {
dev_err(ctx->dev, "failed to create TPI client\n");
- return -ENODEV;
+ return PTR_ERR(ctx->client[I2C_TPI]);
}
- ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
- if (!ctx->client[I2C_HDMI]) {
+ ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_HDMI_ADDR);
+ if (IS_ERR(ctx->client[I2C_HDMI])) {
dev_err(ctx->dev, "failed to create HDMI RX client\n");
- goto fail_tpi;
+ return PTR_ERR(ctx->client[I2C_HDMI]);
}
- ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
- if (!ctx->client[I2C_CBUS]) {
+ ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_CBUS_ADDR);
+ if (IS_ERR(ctx->client[I2C_CBUS])) {
dev_err(ctx->dev, "failed to create CBUS client\n");
- goto fail_hdmi;
+ return PTR_ERR(ctx->client[I2C_CBUS]);
}
return 0;
-
-fail_hdmi:
- i2c_unregister_device(ctx->client[I2C_HDMI]);
-fail_tpi:
- i2c_unregister_device(ctx->client[I2C_TPI]);
-
- return -ENODEV;
-}
-
-static void sii9234_deinit_resources(struct sii9234 *ctx)
-{
- i2c_unregister_device(ctx->client[I2C_CBUS]);
- i2c_unregister_device(ctx->client[I2C_HDMI]);
- i2c_unregister_device(ctx->client[I2C_TPI]);
}
static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
@@ -950,7 +940,6 @@ static int sii9234_remove(struct i2c_client *client)
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
- sii9234_deinit_resources(ctx);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index bd3165ee5354..4c0eef406eb1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -9,6 +9,7 @@
#include <asm/unaligned.h>
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -1759,10 +1760,8 @@ static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
scancode &= MHL_RCP_KEY_ID_MASK;
- if (!ctx->rc_dev) {
- dev_dbg(ctx->dev, "RCP input device not initialized\n");
+ if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev)
return false;
- }
if (pressed)
rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
@@ -2099,6 +2098,9 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
struct rc_dev *rc_dev;
int ret;
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc_dev) {
dev_err(ctx->dev, "Failed to allocate RC device\n");
@@ -2213,6 +2215,9 @@ static void sii8620_detach(struct drm_bridge *bridge)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_unregister_device(ctx->rc_dev);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index ac1e001d0882..70ab4fbdc23e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -285,7 +285,7 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
ret = cec_register_adapter(cec->adap, pdev->dev.parent);
if (ret < 0) {
- cec_notifier_cec_adap_unregister(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
return ret;
}
@@ -302,7 +302,7 @@ static int dw_hdmi_cec_remove(struct platform_device *pdev)
{
struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 1d15cf9b6821..d7e65c869415 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -102,6 +102,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
}
dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+ dw_hdmi_set_channel_status(hdmi, hparms->iec.status);
dw_hdmi_set_channel_count(hdmi, hparms->channels);
dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation);
@@ -109,6 +110,14 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
hdmi_write(audio, conf0, HDMI_AUD_CONF0);
hdmi_write(audio, conf1, HDMI_AUD_CONF1);
+ return 0;
+}
+
+static int dw_hdmi_i2s_audio_startup(struct device *dev, void *data)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
dw_hdmi_audio_enable(hdmi);
return 0;
@@ -151,11 +160,23 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
return -EINVAL;
}
+static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
+}
+
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
+ .audio_startup = dw_hdmi_i2s_audio_startup,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
.get_eld = dw_hdmi_i2s_get_eld,
.get_dai_id = dw_hdmi_i2s_get_dai_id,
+ .hook_plugged_cb = dw_hdmi_i2s_hook_plugged_cb,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 521d689413c8..67fca439bbfb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -25,7 +25,9 @@
#include <uapi/linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -36,6 +38,7 @@
#include "dw-hdmi-cec.h"
#include "dw-hdmi.h"
+#define DDC_CI_ADDR 0x37
#define DDC_SEGMENT_ADDR 0x30
#define HDMI_EDID_LEN 512
@@ -191,6 +194,10 @@ struct dw_hdmi {
struct mutex cec_notifier_mutex;
struct cec_notifier *cec_notifier;
+
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ enum drm_connector_status last_connector_result;
};
#define HDMI_IH_PHY_STAT0_RX_SENSE \
@@ -215,6 +222,28 @@ static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset)
return val;
}
+static void handle_plugged_change(struct dw_hdmi *hdmi, bool plugged)
+{
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, plugged);
+}
+
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ bool plugged;
+
+ mutex_lock(&hdmi->mutex);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ plugged = hdmi->last_connector_result == connector_status_connected;
+ handle_plugged_change(hdmi, plugged);
+ mutex_unlock(&hdmi->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_plugged_cb);
+
static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
{
regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data);
@@ -398,6 +427,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
u8 addr = msgs[0].addr;
int i, ret = 0;
+ if (addr == DDC_CI_ADDR)
+ /*
+ * The internal I2C controller does not support the multi-byte
+ * read and write operations needed for DDC/CI.
+ * TOFIX: Blacklist the DDC/CI address until we filter out
+ * unsupported I2C operations.
+ */
+ return -EOPNOTSUPP;
+
dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
for (i = 0; i < num; i++) {
@@ -580,6 +618,26 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
return n;
}
+/*
+ * When transmitting IEC60958 linear PCM audio, these registers allow to
+ * configure the channel status information of all the channel status
+ * bits in the IEC60958 frame. For the moment this configuration is only
+ * used when the I2S audio interface, General Purpose Audio (GPA),
+ * or AHB audio DMA (AHBAUDDMA) interface is active
+ * (for S/PDIF interface this information comes from the stream).
+ */
+void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi,
+ u8 *channel_status)
+{
+ /*
+ * Set channel status register for frequency and word length.
+ * Use default values for other registers.
+ */
+ hdmi_writeb(hdmi, channel_status[3], HDMI_FC_AUDSCHNLS7);
+ hdmi_writeb(hdmi, channel_status[4], HDMI_FC_AUDSCHNLS8);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_status);
+
static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
unsigned long pixel_clk, unsigned int sample_rate)
{
@@ -1712,6 +1770,41 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi,
HDMI_FC_DATAUTO0_VSD_MASK);
}
+static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi)
+{
+ const struct drm_connector_state *conn_state = hdmi->connector.state;
+ struct hdmi_drm_infoframe frame;
+ u8 buffer[30];
+ ssize_t err;
+ int i;
+
+ if (!hdmi->plat_data->use_drm_infoframe)
+ return;
+
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+
+ err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state);
+ if (err < 0)
+ return;
+
+ err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err);
+ return;
+ }
+
+ hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0);
+ hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1);
+
+ for (i = 0; i < frame.length; i++)
+ hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i);
+
+ hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP);
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+}
+
static void hdmi_av_composer(struct dw_hdmi *hdmi,
const struct drm_display_mode *mode)
{
@@ -2023,7 +2116,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step E - Configure audio */
hdmi_clk_regenerator_update_pixel_clock(hdmi);
- hdmi_enable_audio_clk(hdmi, true);
+ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
}
/* not for DVI mode */
@@ -2033,6 +2126,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step F - Configure AVI InfoFrame */
hdmi_config_AVI(hdmi, mode);
hdmi_config_vendor_specific_infoframe(hdmi, mode);
+ hdmi_config_drm_infoframe(hdmi);
} else {
dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
}
@@ -2161,6 +2255,7 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
+ enum drm_connector_status result;
mutex_lock(&hdmi->mutex);
hdmi->force = DRM_FORCE_UNSPECIFIED;
@@ -2168,7 +2263,18 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
dw_hdmi_update_phy_mask(hdmi);
mutex_unlock(&hdmi->mutex);
- return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+ result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+
+ mutex_lock(&hdmi->mutex);
+ if (result != hdmi->last_connector_result) {
+ dev_dbg(hdmi->dev, "read_hpd result: %d", result);
+ handle_plugged_change(hdmi,
+ result == connector_status_connected);
+ hdmi->last_connector_result = result;
+ }
+ mutex_unlock(&hdmi->mutex);
+
+ return result;
}
static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -2199,6 +2305,45 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static bool hdr_metadata_equal(const struct drm_connector_state *old_state,
+ const struct drm_connector_state *new_state)
+{
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
+
+ if (!old_blob || !new_blob)
+ return old_blob == new_blob;
+
+ if (old_blob->length != new_blob->length)
+ return false;
+
+ return !memcmp(old_blob->data, new_blob->data, old_blob->length);
+}
+
+static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ if (!hdr_metadata_equal(old_state, new_state)) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
static void dw_hdmi_connector_force(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -2223,6 +2368,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
+ .atomic_check = dw_hdmi_connector_atomic_check,
};
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2243,6 +2389,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property, 0);
+
drm_connector_attach_encoder(connector, encoder);
cec_fill_conn_info_from_drm(&conn_info, connector);
@@ -2619,6 +2769,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
hdmi->mc_clkdis = 0x7f;
+ hdmi->last_connector_result = connector_status_disconnected;
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index 6988f12d89d9..1999db05bc3b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -158,6 +158,8 @@
#define HDMI_FC_SPDDEVICEINF 0x1062
#define HDMI_FC_AUDSCONF 0x1063
#define HDMI_FC_AUDSSTAT 0x1064
+#define HDMI_FC_AUDSCHNLS7 0x106e
+#define HDMI_FC_AUDSCHNLS8 0x106f
#define HDMI_FC_DATACH0FILL 0x1070
#define HDMI_FC_DATACH1FILL 0x1071
#define HDMI_FC_DATACH2FILL 0x1072
@@ -252,6 +254,7 @@
#define HDMI_FC_POL2 0x10DB
#define HDMI_FC_PRCONF 0x10E0
#define HDMI_FC_SCRAMBLER_CTRL 0x10E1
+#define HDMI_FC_PACKET_TX_EN 0x10E3
#define HDMI_FC_GMD_STAT 0x1100
#define HDMI_FC_GMD_EN 0x1101
@@ -287,6 +290,37 @@
#define HDMI_FC_GMD_PB26 0x111F
#define HDMI_FC_GMD_PB27 0x1120
+#define HDMI_FC_DRM_UP 0x1167
+#define HDMI_FC_DRM_HB0 0x1168
+#define HDMI_FC_DRM_HB1 0x1169
+#define HDMI_FC_DRM_PB0 0x116A
+#define HDMI_FC_DRM_PB1 0x116B
+#define HDMI_FC_DRM_PB2 0x116C
+#define HDMI_FC_DRM_PB3 0x116D
+#define HDMI_FC_DRM_PB4 0x116E
+#define HDMI_FC_DRM_PB5 0x116F
+#define HDMI_FC_DRM_PB6 0x1170
+#define HDMI_FC_DRM_PB7 0x1171
+#define HDMI_FC_DRM_PB8 0x1172
+#define HDMI_FC_DRM_PB9 0x1173
+#define HDMI_FC_DRM_PB10 0x1174
+#define HDMI_FC_DRM_PB11 0x1175
+#define HDMI_FC_DRM_PB12 0x1176
+#define HDMI_FC_DRM_PB13 0x1177
+#define HDMI_FC_DRM_PB14 0x1178
+#define HDMI_FC_DRM_PB15 0x1179
+#define HDMI_FC_DRM_PB16 0x117A
+#define HDMI_FC_DRM_PB17 0x117B
+#define HDMI_FC_DRM_PB18 0x117C
+#define HDMI_FC_DRM_PB19 0x117D
+#define HDMI_FC_DRM_PB20 0x117E
+#define HDMI_FC_DRM_PB21 0x117F
+#define HDMI_FC_DRM_PB22 0x1180
+#define HDMI_FC_DRM_PB23 0x1181
+#define HDMI_FC_DRM_PB24 0x1182
+#define HDMI_FC_DRM_PB25 0x1183
+#define HDMI_FC_DRM_PB26 0x1184
+
#define HDMI_FC_DBGFORCE 0x1200
#define HDMI_FC_DBGAUD0CH0 0x1201
#define HDMI_FC_DBGAUD1CH0 0x1202
@@ -742,6 +776,11 @@ enum {
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
+/* FC_PACKET_TX_EN field values */
+ HDMI_FC_PACKET_TX_EN_DRM_MASK = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_ENABLE = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_DISABLE = 0x00,
+
/* FC_AVICONF0-FC_AVICONF3 field values */
HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 675442bfc1bd..b6e793bb653c 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -316,7 +316,8 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
@@ -981,7 +982,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct reset_control *apb_rst;
struct dw_mipi_dsi *dsi;
- struct resource *res;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
@@ -997,11 +997,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
if (!plat_data->base) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return ERR_PTR(-ENODEV);
-
- dsi->base = devm_ioremap_resource(dev, res);
+ dsi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->base))
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 170f162ffa55..db298f550a5a 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -16,6 +16,7 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8a8d605021f0..8029478ffebb 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
@@ -228,7 +229,9 @@ static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
struct tc_edp_link {
- struct drm_dp_link base;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned int rate;
+ u8 num_lanes;
u8 assr;
bool scrambler_dis;
bool spread;
@@ -437,9 +440,9 @@ static u32 tc_srcctrl(struct tc_data *tc)
reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
if (tc->link.spread)
reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
- if (tc->link.base.rate != 162000)
+ if (tc->link.rate != 162000)
reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
return reg;
}
@@ -662,23 +665,35 @@ err:
static int tc_get_display_props(struct tc_data *tc)
{
+ u8 revision, num_lanes;
+ unsigned int rate;
int ret;
u8 reg;
/* Read DP Rx Link Capability */
- ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd,
+ DP_RECEIVER_CAP_SIZE);
if (ret < 0)
goto err_dpcd_read;
- if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+
+ revision = tc->link.dpcd[DP_DPCD_REV];
+ rate = drm_dp_max_link_rate(tc->link.dpcd);
+ num_lanes = drm_dp_max_lane_count(tc->link.dpcd);
+
+ if (rate != 162000 && rate != 270000) {
dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
- tc->link.base.rate = 270000;
+ rate = 270000;
}
- if (tc->link.base.num_lanes > 2) {
+ tc->link.rate = rate;
+
+ if (num_lanes > 2) {
dev_dbg(tc->dev, "Falling to 2 lanes\n");
- tc->link.base.num_lanes = 2;
+ num_lanes = 2;
}
+ tc->link.num_lanes = num_lanes;
+
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, &reg);
if (ret < 0)
goto err_dpcd_read;
@@ -696,11 +711,11 @@ static int tc_get_display_props(struct tc_data *tc)
tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
- tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
- (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
- tc->link.base.num_lanes,
- (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
- "enhanced" : "non-enhanced");
+ revision >> 4, revision & 0x0f,
+ (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.num_lanes,
+ drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ "enhanced" : "default");
dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n",
tc->link.spread ? "0.5%" : "0.0%",
tc->link.scrambler_dis ? "disabled" : "enabled");
@@ -739,7 +754,7 @@ static int tc_set_video_mode(struct tc_data *tc,
*/
in_bw = mode->clock * bits_per_pixel / 8;
- out_bw = tc->link.base.num_lanes * tc->link.base.rate;
+ out_bw = tc->link.num_lanes * tc->link.rate;
max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
dev_dbg(tc->dev, "set mode %dx%d\n",
@@ -901,7 +916,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
if (ret)
return ret;
@@ -911,7 +926,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Setup Main Link */
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
@@ -974,7 +989,13 @@ static int tc_main_link_enable(struct tc_data *tc)
}
/* Setup Link & DPRx Config for Training */
- ret = drm_dp_link_configure(aux, &tc->link.base);
+ tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate);
+ tmp[1] = tc->link.num_lanes;
+
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
+ tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -1018,9 +1039,8 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Enable DP0 to start Link Training */
ret = regmap_write(tc->regmap, DP0CTL,
- ((tc->link.base.capabilities &
- DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
- DP_EN);
+ (drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ EF_EN : 0) | DP_EN);
if (ret)
return ret;
@@ -1099,7 +1119,7 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = -ENODEV;
}
- if (tc->link.base.num_lanes == 2) {
+ if (tc->link.num_lanes == 2) {
value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS;
if (value != DP_CHANNEL_EQ_BITS) {
@@ -1170,7 +1190,7 @@ static int tc_stream_enable(struct tc_data *tc)
return ret;
value = VID_MN_GEN | DP_EN;
- if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
value |= EF_EN;
ret = regmap_write(tc->regmap, DP0CTL, value);
if (ret)
@@ -1296,7 +1316,7 @@ static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge,
return MODE_CLOCK_HIGH;
req = mode->clock * bits_per_pixel / 8;
- avail = tc->link.base.num_lanes * tc->link.base.rate;
+ avail = tc->link.num_lanes * tc->link.rate;
if (req > avail)
return MODE_BAD;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 0a580957c8cf..43abf01ebd4c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 61cc2354ef1b..aa3198dc9903 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 36a69aec8a4b..248c9f765c45 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
/* cirrus (simple) display pipe */
-static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
@@ -510,7 +510,7 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
-DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
deleted file mode 100644
index 1f73916e528e..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#ifndef __CIRRUS_DRV_H__
-#define __CIRRUS_DRV_H__
-
-#include <video/vga.h>
-
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-
-#include <drm/drm_gem.h>
-
-#define DRIVER_AUTHOR "Matthew Garrett"
-
-#define DRIVER_NAME "cirrus"
-#define DRIVER_DESC "qemu Cirrus emulation"
-#define DRIVER_DATE "20110418"
-
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-#define CIRRUSFB_CONN_LIMIT 1
-
-#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
-#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
-
-#define SEQ_INDEX 4
-#define SEQ_DATA 5
-
-#define WREG_SEQ(reg, v) \
- do { \
- WREG8(SEQ_INDEX, reg); \
- WREG8(SEQ_DATA, v); \
- } while (0) \
-
-#define CRT_INDEX 0x14
-#define CRT_DATA 0x15
-
-#define WREG_CRT(reg, v) \
- do { \
- WREG8(CRT_INDEX, reg); \
- WREG8(CRT_DATA, v); \
- } while (0) \
-
-#define GFX_INDEX 0xe
-#define GFX_DATA 0xf
-
-#define WREG_GFX(reg, v) \
- do { \
- WREG8(GFX_INDEX, reg); \
- WREG8(GFX_DATA, v); \
- } while (0) \
-
-/*
- * Cirrus has a "hidden" DAC register that can be accessed by writing to
- * the pixel mask register to reset the state, then reading from the register
- * four times. The next write will then pass to the DAC
- */
-#define VGA_DAC_MASK 0x6
-
-#define WREG_HDR(v) \
- do { \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- WREG8(VGA_DAC_MASK, v); \
- } while (0) \
-
-
-#define CIRRUS_MAX_FB_HEIGHT 4096
-#define CIRRUS_MAX_FB_WIDTH 4096
-
-#define CIRRUS_DPMS_CLEARED (-1)
-
-#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
-#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-
-struct cirrus_crtc {
- struct drm_crtc base;
- int last_dpms;
- bool enabled;
-};
-
-struct cirrus_fbdev;
-struct cirrus_mode_info {
- struct cirrus_crtc *crtc;
- /* pointer to fbdev info structure */
- struct cirrus_fbdev *gfbdev;
-};
-
-struct cirrus_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-struct cirrus_connector {
- struct drm_connector base;
-};
-
-struct cirrus_mc {
- resource_size_t vram_size;
- resource_size_t vram_base;
-};
-
-struct cirrus_device {
- struct drm_device *dev;
- unsigned long flags;
-
- resource_size_t rmmio_base;
- resource_size_t rmmio_size;
- void __iomem *rmmio;
-
- struct cirrus_mc mc;
- struct cirrus_mode_info mode_info;
-
- int num_crtc;
- int fb_mtrr;
-
- struct {
- struct ttm_bo_device bdev;
- } ttm;
- bool mm_inited;
-};
-
-
-struct cirrus_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct drm_framebuffer *gfb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
-struct cirrus_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
-
-static inline struct cirrus_bo *
-cirrus_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct cirrus_bo, bo);
-}
-
-
-#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-
- /* cirrus_main.c */
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
- uint32_t flags);
-void cirrus_device_fini(struct cirrus_device *cdev);
-void cirrus_gem_free_object(struct drm_gem_object *obj);
-int cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset);
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch);
-
- /* cirrus_display.c */
-int cirrus_modeset_init(struct cirrus_device *cdev);
-void cirrus_modeset_fini(struct cirrus_device *cdev);
-
- /* cirrus_fbdev.c */
-int cirrus_fbdev_init(struct cirrus_device *cdev);
-void cirrus_fbdev_fini(struct cirrus_device *cdev);
-
-
-
- /* cirrus_irq.c */
-void cirrus_driver_irq_preinstall(struct drm_device *dev);
-int cirrus_driver_irq_postinstall(struct drm_device *dev);
-void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
-
- /* cirrus_kms.c */
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
-void cirrus_driver_unload(struct drm_device *dev);
-extern struct drm_ioctl_desc cirrus_ioctls[];
-extern int cirrus_max_ioctl;
-
-int cirrus_mm_init(struct cirrus_device *cirrus);
-void cirrus_mm_fini(struct cirrus_device *cirrus);
-void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
-int cirrus_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct cirrus_bo **pcirrusbo);
-int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
-
-static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-int cirrus_bo_push_sysram(struct cirrus_bo *bo);
-int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
-
-extern int cirrus_bpp;
-
-#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2dd2cd87cdbb..b191d39c071d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -31,6 +31,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_plane_helper.h>
@@ -97,17 +98,6 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
-/*
- * For connectors that support multiple encoders, either the
- * .atomic_best_encoder() or .best_encoder() operation must be implemented.
- */
-static struct drm_encoder *
-pick_single_encoder_for_connector(struct drm_connector *connector)
-{
- WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
@@ -135,7 +125,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
@@ -359,7 +349,7 @@ update_connector_routing(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -482,7 +472,7 @@ mode_fixup(struct drm_atomic_state *state)
continue;
funcs = crtc->helper_private;
- if (!funcs->mode_fixup)
+ if (!funcs || !funcs->mode_fixup)
continue;
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 7a26bfb5329c..0d466d3b0809 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1405,7 +1405,7 @@ retry:
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
- if (unlikely(drm_debug & DRM_UT_STATE))
+ if (drm_debug_enabled(DRM_UT_STATE))
drm_atomic_print_state(state);
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 37ac168fcb60..121481f6aa71 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -130,7 +130,12 @@
* Z position is set up with drm_plane_create_zpos_immutable_property() and
* drm_plane_create_zpos_property(). It controls the visibility of overlapping
* planes. Without this property the primary plane is always below the cursor
- * plane, and ordering between all other planes is undefined.
+ * plane, and ordering between all other planes is undefined. The positive
+ * Z axis points towards the user, i.e. planes with lower Z position values
+ * are underneath planes with higher Z position values. Two planes with the
+ * same Z position value have undefined ordering. Note that the Z position
+ * value can also be immutable, to inform userspace about the hard-coded
+ * stacking of planes, see drm_plane_create_zpos_immutable_property().
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3bd76e918b5d..03e01b000f7a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[],
{
unsigned long i;
- mb();
+ mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
- mb();
+ mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
@@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#elif defined(__powerpc__)
unsigned long i;
+
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
@@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
- mb();
+ mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
- mb();
+ mb(); /*Make sure that all cache line entry is flushed*/
return;
}
@@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
+
addr = (void *)(((unsigned long)addr) & -size);
- mb();
+ mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
- mb();
+ mb(); /*Ensure that evry data cache line entry is flushed*/
return;
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index c8922b7cac09..895b73f23079 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -415,9 +415,8 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->possible_crtcs & drm_crtc_mask(crtc))
return true;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4c766624b20d..2166000ed057 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -365,8 +365,6 @@ EXPORT_SYMBOL(drm_connector_attach_edid_property);
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- int i;
-
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
@@ -381,18 +379,15 @@ int drm_connector_attach_encoder(struct drm_connector *connector,
if (WARN_ON(connector->encoder))
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
+ connector->possible_encoders |= drm_encoder_mask(encoder);
+
+ return 0;
}
EXPORT_SYMBOL(drm_connector_attach_encoder);
/**
- * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * drm_connector_has_possible_encoder - check if the connector and encoder are
+ * associated with each other
* @connector: the connector
* @encoder: the encoder
*
@@ -402,15 +397,7 @@ EXPORT_SYMBOL(drm_connector_attach_encoder);
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- struct drm_encoder *enc;
- int i;
-
- drm_connector_for_each_possible_encoder(connector, enc, i) {
- if (enc == encoder)
- return true;
- }
-
- return false;
+ return connector->possible_encoders & drm_encoder_mask(encoder);
}
EXPORT_SYMBOL(drm_connector_has_possible_encoder);
@@ -480,7 +467,10 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
- * Register userspace interfaces for a connector
+ * Register userspace interfaces for a connector. Only call this for connectors
+ * which can be hotplugged after drm_dev_register() has been called already,
+ * e.g. DP MST connectors. All other connectors will be registered automatically
+ * when calling drm_dev_register().
*
* Returns:
* Zero on success, error code on failure.
@@ -526,7 +516,10 @@ EXPORT_SYMBOL(drm_connector_register);
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
- * Unregister userspace interfaces for a connector
+ * Unregister userspace interfaces for a connector. Only call this for
+ * connectors which have registered explicitly by calling drm_dev_register(),
+ * since connectors are unregistered automatically when drm_dev_unregister() is
+ * called.
*/
void drm_connector_unregister(struct drm_connector *connector)
{
@@ -719,7 +712,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
__drm_connector_put_safe(iter->conn);
spin_unlock_irqrestore(&config->connector_list_lock, flags);
}
- lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
+ lock_release(&connector_list_iter_dep_map, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -882,6 +875,38 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
{ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
};
+/*
+ * As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry
+ * Format Table 2-120
+ */
+static const struct drm_prop_enum_list dp_colorspaces[] = {
+ /* For Default case, driver will set the colorspace */
+ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED, "RGB_Wide_Gamut_Fixed_Point" },
+ /* Colorimetry based on scRGB (IEC 61966-2-2) */
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT, "RGB_Wide_Gamut_Floating_Point" },
+ /* Colorimetry based on IEC 61966-2-5 */
+ { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ /* Colorimetry based on SMPTE RP 431-2 */
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ { DRM_MODE_COLORIMETRY_BT601_YCC, "BT601_YCC" },
+ { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ /* Standard Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ /* High Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+ { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 [33] */
+ { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+};
+
/**
* DOC: standard connector properties
*
@@ -1674,7 +1699,6 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* DOC: standard connector properties
*
* Colorspace:
- * drm_mode_create_colorspace_property - create colorspace property
* This property helps select a suitable colorspace based on the sink
* capability. Modern sink devices support wider gamut like BT2020.
* This helps switch to BT2020 mode if the BT2020 encoded video stream
@@ -1694,32 +1718,68 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* - This property is just to inform sink what colorspace
* source is trying to drive.
*
+ * Because between HDMI and DP have different colorspaces,
+ * drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
+ * drm_mode_create_dp_colorspace_property() is used for DP connector.
+ */
+
+/**
+ * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
* Called by a driver the first time it's needed, must be attached to desired
- * connectors.
+ * HDMI connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
*/
-int drm_mode_create_colorspace_property(struct drm_connector *connector)
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Colorspace",
- hdmi_colorspaces,
- ARRAY_SIZE(hdmi_colorspaces));
- if (!prop)
- return -ENOMEM;
- } else {
- DRM_DEBUG_KMS("Colorspace property not supported\n");
+ if (connector->colorspace_property)
return 0;
- }
- connector->colorspace_property = prop;
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ hdmi_colorspaces,
+ ARRAY_SIZE(hdmi_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
+
+/**
+ * drm_mode_create_dp_colorspace_property - create dp colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * DP connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ if (connector->colorspace_property)
+ return 0;
+
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ dp_colorspaces,
+ ARRAY_SIZE(dp_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property);
/**
* drm_mode_create_content_type_property - create content type property
@@ -2121,7 +2181,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
int encoders_count = 0;
int ret = 0;
int copied = 0;
- int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *encoder_ptr;
@@ -2136,14 +2195,13 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- drm_connector_for_each_possible_encoder(connector, encoder, i)
- encoders_count++;
+ encoders_count = hweight32(connector->possible_encoders);
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (put_user(encoder->base.id, encoder_ptr + copied)) {
ret = -EFAULT;
goto out;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 80ddf13ad996..499b05aaccfc 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -36,6 +36,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -459,6 +460,22 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
__drm_helper_disable_unused_functions(dev);
}
+/*
+ * For connectors that support multiple encoders, either the
+ * .atomic_best_encoder() or .best_encoder() operation must be implemented.
+ */
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ WARN_ON(hweight32(connector->possible_encoders) > 1);
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
@@ -624,7 +641,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector) {
- new_encoder = connector_funcs->best_encoder(connector);
+ if (connector_funcs->best_encoder)
+ new_encoder = connector_funcs->best_encoder(connector);
+ else
+ new_encoder = drm_connector_get_single_encoder(connector);
+
/* if we can't get an encoder for a connector
we are setting now - then fail */
if (new_encoder == NULL)
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index b5ac1581e623..f0a66ef47e5a 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -75,3 +75,6 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
+
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 8230dac01a89..3a4126dc2520 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -212,8 +212,14 @@ retry:
drm_for_each_plane(plane, fb->dev) {
struct drm_plane_state *plane_state;
- if (plane->state->fb != fb)
+ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
+ if (ret)
+ goto out;
+
+ if (plane->state->fb != fb) {
+ drm_modeset_unlock(&plane->mutex);
continue;
+ }
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index be1b7ba92ffe..ca3c55c6b815 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -334,19 +334,17 @@ static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
return LINE_LEN(crc->values_cnt);
}
-static unsigned int crtc_crc_poll(struct file *file, poll_table *wait)
+static __poll_t crtc_crc_poll(struct file *file, poll_table *wait)
{
struct drm_crtc *crtc = file->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
- unsigned ret;
+ __poll_t ret = 0;
poll_wait(file, &crc->wq, wait);
spin_lock_irq(&crc->lock);
if (crc->source && crtc_crc_data_count(crc))
- ret = POLLIN | POLLRDNORM;
- else
- ret = 0;
+ ret |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irq(&crc->lock);
return ret;
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index b15cee85b702..3ab2609f9ec7 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -8,9 +8,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <drm/drm_dp_helper.h>
+
#include <media/cec.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_dp_helper.h>
+
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
@@ -295,7 +299,10 @@ static void drm_dp_cec_unregister_work(struct work_struct *work)
*/
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
{
- u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ struct drm_connector *connector = aux->cec.connector;
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
+ CEC_CAP_CONNECTOR_INFO;
+ struct cec_connector_info conn_info;
unsigned int num_las = 1;
u8 cap;
@@ -344,13 +351,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
/* Create a new adapter */
aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
- aux, aux->cec.name, cec_caps,
+ aux, connector->name, cec_caps,
num_las);
if (IS_ERR(aux->cec.adap)) {
aux->cec.adap = NULL;
goto unlock;
}
- if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(aux->cec.adap, &conn_info);
+
+ if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
cec_delete_adapter(aux->cec.adap);
aux->cec.adap = NULL;
} else {
@@ -406,22 +417,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid);
/**
* drm_dp_cec_register_connector() - register a new connector
* @aux: DisplayPort AUX channel
- * @name: name of the CEC device
- * @parent: parent device
+ * @connector: drm connector
*
* A new connector was registered with associated CEC adapter name and
* CEC adapter parent device. After registering the name and parent
* drm_dp_cec_set_edid() is called to check if the connector supports
* CEC and to register a CEC adapter if that is the case.
*/
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
- struct device *parent)
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
{
WARN_ON(aux->cec.adap);
if (WARN_ON(!aux->transfer))
return;
- aux->cec.name = name;
- aux->cec.parent = parent;
+ aux->cec.connector = connector;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ffc68d305afe..2c7870aef469 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -120,33 +120,49 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane)
+{
+ unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2;
+ u8 value = dp_link_status(link_status, offset);
+
+ return (value >> (lane << 1)) & 0x3;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
+
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
- udelay(100);
+ rd_interval = 100;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0)
- udelay(400);
+ rd_interval = 400;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
@@ -220,7 +236,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
ret = aux->transfer(aux, &msg);
-
if (ret >= 0) {
native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
@@ -337,134 +352,6 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
- * drm_dp_link_probe() - probe a DisplayPort link for capabilities
- * @aux: DisplayPort AUX channel
- * @link: pointer to structure in which to return link capabilities
- *
- * The structure filled in by this function can usually be passed directly
- * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
- * configure the link based on the link's capabilities.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[3];
- int err;
-
- memset(link, 0, sizeof(*link));
-
- err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
- if (err < 0)
- return err;
-
- link->revision = values[0];
- link->rate = drm_dp_bw_code_to_link_rate(values[1]);
- link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
-
- if (values[2] & DP_ENHANCED_FRAME_CAP)
- link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_probe);
-
-/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_up);
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_down);
-
-/**
- * drm_dp_link_configure() - configure a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[2];
- int err;
-
- values[0] = drm_dp_link_rate_to_bw_code(link->rate);
- values[1] = link->num_lanes;
-
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_configure);
-
-/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -1109,6 +996,14 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* @aux: DisplayPort AUX channel
*
* Automatically calls drm_dp_aux_init() if this hasn't been done yet.
+ * This should only be called when the underlying &struct drm_connector is
+ * initialiazed already. Therefore the best place to call this is from
+ * &drm_connector_funcs.late_register. Not that drivers which don't follow this
+ * will Oops when CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ *
+ * Drivers which need to use the aux channel before that point (e.g. at driver
+ * load time, before drm_dev_register() has been called) need to call
+ * drm_dp_aux_init().
*
* Returns 0 on success or a negative error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 82add736e17d..ae5809a1f19a 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -28,15 +28,22 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stacktrace.h>
+#include <linux/sort.h>
+#include <linux/timekeeping.h>
+#include <linux/math64.h>
+#endif
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "drm_crtc_helper_internal.h"
+#include "drm_dp_mst_topology_internal.h"
/**
* DOC: dp mst helper
@@ -45,9 +52,14 @@
* protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs.
*/
+struct drm_dp_pending_up_req {
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct list_head next;
+};
+
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
-static int test_calc_pbn_mode(void);
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
@@ -62,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -74,6 +86,8 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+#define DBG_PREFIX "[dp_mst]"
+
#define DP_STR(x) [DP_ ## x] = #x
static const char *drm_dp_mst_req_type_str(u8 req_type)
@@ -130,6 +144,43 @@ static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
}
#undef DP_STR
+#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
+
+static const char *drm_dp_mst_sideband_tx_state_str(int state)
+{
+ static const char * const sideband_reason_str[] = {
+ DP_STR(QUEUED),
+ DP_STR(START_SEND),
+ DP_STR(SENT),
+ DP_STR(RX),
+ DP_STR(TIMEOUT),
+ };
+
+ if (state >= ARRAY_SIZE(sideband_reason_str) ||
+ !sideband_reason_str[state])
+ return "unknown";
+
+ return sideband_reason_str[state];
+}
+
+static int
+drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
+{
+ int i;
+ u8 unpacked_rad[16];
+
+ for (i = 0; i < lct; i++) {
+ if (i % 2)
+ unpacked_rad[i] = rad[i / 2] >> 4;
+ else
+ unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
+ }
+
+ /* TODO: Eventually add something to printk so we can format the rad
+ * like this: 1.2.3
+ */
+ return snprintf(out, len, "%*phC", lct, unpacked_rad);
+}
/* sideband msg handling */
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
@@ -262,8 +313,9 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
return true;
}
-static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
- struct drm_dp_sideband_msg_tx *raw)
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
int i;
@@ -272,6 +324,8 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
idx++;
break;
@@ -359,14 +413,253 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+ }
+ raw->cur_len = idx;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
+
+/* Decode a sideband request we've encoded, mainly used for debugging */
+int
+drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req)
+{
+ const u8 *buf = raw->msg;
+ int i, idx = 0;
+ req->req_type = buf[idx++] & 0x7f;
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
- buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
- idx++;
+ req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ {
+ struct drm_dp_allocate_payload *a =
+ &req->u.allocate_payload;
+
+ a->number_sdp_streams = buf[idx] & 0xf;
+ a->port_number = (buf[idx] >> 4) & 0xf;
+
+ WARN_ON(buf[++idx] & 0x80);
+ a->vcpi = buf[idx] & 0x7f;
+
+ a->pbn = buf[++idx] << 8;
+ a->pbn |= buf[++idx];
+
+ idx++;
+ for (i = 0; i < a->number_sdp_streams; i++) {
+ a->sdp_stream_sink[i] =
+ (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
+ }
+ }
+ break;
+ case DP_QUERY_PAYLOAD:
+ req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
+ WARN_ON(buf[++idx] & 0x80);
+ req->u.query_payload.vcpi = buf[idx] & 0x7f;
+ break;
+ case DP_REMOTE_DPCD_READ:
+ {
+ struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
+
+ r->port_number = (buf[idx] >> 4) & 0xf;
+
+ r->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ r->dpcd_address |= buf[++idx] & 0xff;
+
+ r->num_bytes = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ {
+ struct drm_dp_remote_dpcd_write *w =
+ &req->u.dpcd_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+
+ w->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ w->dpcd_address |= buf[++idx] & 0xff;
+
+ w->num_bytes = buf[++idx];
+
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ case DP_REMOTE_I2C_READ:
+ {
+ struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
+ struct drm_dp_remote_i2c_read_tx *tx;
+ bool failed = false;
+
+ r->num_transactions = buf[idx] & 0x3;
+ r->port_number = (buf[idx] >> 4) & 0xf;
+ for (i = 0; i < r->num_transactions; i++) {
+ tx = &r->transactions[i];
+
+ tx->i2c_dev_id = buf[++idx] & 0x7f;
+ tx->num_bytes = buf[++idx];
+ tx->bytes = kmemdup(&buf[++idx],
+ tx->num_bytes,
+ GFP_KERNEL);
+ if (!tx->bytes) {
+ failed = true;
+ break;
+ }
+ idx += tx->num_bytes;
+ tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
+ tx->i2c_transaction_delay = buf[idx] & 0xf;
+ }
+
+ if (failed) {
+ for (i = 0; i < r->num_transactions; i++)
+ kfree(tx->bytes);
+ return -ENOMEM;
+ }
+
+ r->read_i2c_device_id = buf[++idx] & 0x7f;
+ r->num_bytes_read = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ {
+ struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+ w->write_i2c_device_id = buf[++idx] & 0x7f;
+ w->num_bytes = buf[++idx];
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
+
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer)
+{
+ int i;
+
+#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
+ if (req->req_type == DP_LINK_ADDRESS) {
+ /* No contents to print */
+ P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
+ return;
+ }
+
+ P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
+ indent++;
+
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ P("port=%d\n", req->u.port_num.port_number);
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
+ req->u.allocate_payload.port_number,
+ req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.sdp_stream_sink);
+ break;
+ case DP_QUERY_PAYLOAD:
+ P("port=%d vcpi=%d\n",
+ req->u.query_payload.port_number,
+ req->u.query_payload.vcpi);
+ break;
+ case DP_REMOTE_DPCD_READ:
+ P("port=%d dpcd_addr=%05x len=%d\n",
+ req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
+ req->u.dpcd_read.num_bytes);
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ P("port=%d addr=%05x len=%d: %*ph\n",
+ req->u.dpcd_write.port_number,
+ req->u.dpcd_write.dpcd_address,
+ req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
+ req->u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ P("port=%d num_tx=%d id=%d size=%d:\n",
+ req->u.i2c_read.port_number,
+ req->u.i2c_read.num_transactions,
+ req->u.i2c_read.read_i2c_device_id,
+ req->u.i2c_read.num_bytes_read);
+
+ indent++;
+ for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
+ const struct drm_dp_remote_i2c_read_tx *rtx =
+ &req->u.i2c_read.transactions[i];
+
+ P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
+ i, rtx->i2c_dev_id, rtx->num_bytes,
+ rtx->no_stop_bit, rtx->i2c_transaction_delay,
+ rtx->num_bytes, rtx->bytes);
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ P("port=%d id=%d size=%d: %*ph\n",
+ req->u.i2c_write.port_number,
+ req->u.i2c_write.write_i2c_device_id,
+ req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
+ req->u.i2c_write.bytes);
+ break;
+ default:
+ P("???\n");
+ break;
+ }
+#undef P
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
+
+static inline void
+drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
+ const struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+ char buf[64];
+ int ret;
+ int i;
+
+ drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
+ sizeof(buf));
+ drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
+ txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
+ drm_dp_mst_sideband_tx_state_str(txmsg->state),
+ txmsg->path_msg, buf);
+
+ ret = drm_dp_decode_sideband_req(txmsg, &req);
+ if (ret) {
+ drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
+ return;
+ }
+ drm_dp_dump_sideband_msg_req_body(&req, 1, p);
+
+ switch (req.req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(req.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < req.u.i2c_read.num_transactions; i++)
+ kfree(req.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(req.u.i2c_write.bytes);
break;
}
- raw->cur_len = idx;
}
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
@@ -842,11 +1135,11 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
clear_bit(vcpi - 1, &mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i])
- if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
- mgr->proposed_vcpis[i] = NULL;
- clear_bit(i + 1, &mgr->payload_mask);
- }
+ if (mgr->proposed_vcpis[i] &&
+ mgr->proposed_vcpis[i]->vcpi == vcpi) {
+ mgr->proposed_vcpis[i] = NULL;
+ clear_bit(i + 1, &mgr->payload_mask);
+ }
}
mutex_unlock(&mgr->payload_lock);
}
@@ -899,6 +1192,11 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
}
out:
+ if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
mutex_unlock(&mgr->qlock);
return ret;
@@ -1108,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
}
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
-static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+
+#define STACK_DEPTH 8
+
+static noinline void
+__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_ref_history *history,
+ enum drm_dp_mst_topology_ref_type type)
{
- struct drm_dp_mst_branch *mstb =
- container_of(kref, struct drm_dp_mst_branch, topology_kref);
- struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
- bool wake_tx = false;
+ struct drm_dp_mst_topology_ref_entry *entry = NULL;
+ depot_stack_handle_t backtrace;
+ ulong stack_entries[STACK_DEPTH];
+ uint n;
+ int i;
- mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
- list_del(&port->next);
- drm_dp_mst_topology_put_port(port);
+ n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
+ backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
+ if (!backtrace)
+ return;
+
+ /* Try to find an existing entry for this backtrace */
+ for (i = 0; i < history->len; i++) {
+ if (history->entries[i].backtrace == backtrace) {
+ entry = &history->entries[i];
+ break;
+ }
}
- mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
- mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
+ /* Otherwise add one */
+ if (!entry) {
+ struct drm_dp_mst_topology_ref_entry *new;
+ int new_len = history->len + 1;
+
+ new = krealloc(history->entries, sizeof(*new) * new_len,
+ GFP_KERNEL);
+ if (!new)
+ return;
+
+ entry = &new[history->len];
+ history->len = new_len;
+ history->entries = new;
+
+ entry->backtrace = backtrace;
+ entry->type = type;
+ entry->count = 0;
}
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
- wake_tx = true;
+ entry->count++;
+ entry->ts_nsec = ktime_get_ns();
+}
+
+static int
+topology_ref_history_cmp(const void *a, const void *b)
+{
+ const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
+
+ if (entry_a->ts_nsec > entry_b->ts_nsec)
+ return 1;
+ else if (entry_a->ts_nsec < entry_b->ts_nsec)
+ return -1;
+ else
+ return 0;
+}
+
+static inline const char *
+topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
+{
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
+ return "get";
+ else
+ return "put";
+}
+
+static void
+__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+ void *ptr, const char *type_str)
+{
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ int i;
+
+ if (!buf)
+ return;
+
+ if (!history->len)
+ goto out;
+
+ /* First, sort the list so that it goes from oldest to newest
+ * reference entry
+ */
+ sort(history->entries, history->len, sizeof(*history->entries),
+ topology_ref_history_cmp, NULL);
+
+ drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
+ type_str, ptr);
+
+ for (i = 0; i < history->len; i++) {
+ const struct drm_dp_mst_topology_ref_entry *entry =
+ &history->entries[i];
+ ulong *entries;
+ uint nr_entries;
+ u64 ts_nsec = entry->ts_nsec;
+ u32 rem_nsec = do_div(ts_nsec, 1000000000);
+
+ nr_entries = stack_depot_fetch(entry->backtrace, &entries);
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
+
+ drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
+ entry->count,
+ topology_ref_type_to_str(entry->type),
+ ts_nsec, rem_nsec / 1000, buf);
}
- mutex_unlock(&mstb->mgr->qlock);
- if (wake_tx)
- wake_up_all(&mstb->mgr->tx_waitq);
+ /* Now free the history, since this is the only time we expose it */
+ kfree(history->entries);
+out:
+ kfree(buf);
+}
- drm_dp_mst_put_mstb_malloc(mstb);
+static __always_inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
+{
+ __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
+ "MSTB");
+}
+
+static __always_inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
+{
+ __dump_topology_ref_history(&port->topology_ref_history, port,
+ "Port");
+}
+
+static __always_inline void
+save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
+}
+
+static __always_inline void
+save_port_topology_ref(struct drm_dp_mst_port *port,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(port->mgr, &port->topology_ref_history, type);
+}
+
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->topology_ref_history_lock);
+}
+
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_unlock(&mgr->topology_ref_history_lock);
+}
+#else
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
+static inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
+#define save_mstb_topology_ref(mstb, type)
+#define save_port_topology_ref(port, type)
+#endif
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+
+ drm_dp_mst_dump_mstb_topology_history(mstb);
+
+ INIT_LIST_HEAD(&mstb->destroy_next);
+
+ /*
+ * This can get called under mgr->mutex, so we need to perform the
+ * actual destruction of the mstb in another worker
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1168,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
- int ret = kref_get_unless_zero(&mstb->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("mstb %p (%d)\n", mstb,
- kref_read(&mstb->topology_kref));
+ topology_ref_history_lock(mstb->mgr);
+ ret = kref_get_unless_zero(&mstb->topology_kref);
+ if (ret) {
+ DRM_DEBUG("mstb %p (%d)\n",
+ mstb, kref_read(&mstb->topology_kref));
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+
+ topology_ref_history_unlock(mstb->mgr);
return ret;
}
@@ -1193,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
*/
static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref);
DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+
+ topology_ref_history_unlock(mstb->mgr);
}
/**
@@ -1213,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
DRM_DEBUG("mstb %p (%d)\n",
mstb, kref_read(&mstb->topology_kref) - 1);
- kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
-}
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
-static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
-{
- struct drm_dp_mst_branch *mstb;
-
- switch (old_pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mstb = port->mstb;
- port->mstb = NULL;
- drm_dp_mst_topology_put_mstb(mstb);
- break;
- }
+ topology_ref_history_unlock(mstb->mgr);
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
}
static void drm_dp_destroy_port(struct kref *kref)
@@ -1242,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref)
container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
- if (!port->input) {
- kfree(port->cached_edid);
+ drm_dp_mst_dump_port_topology_history(port);
- /*
- * The only time we don't have a connector
- * on an output port is if the connector init
- * fails.
- */
- if (port->connector) {
- /* we can't destroy the connector here, as
- * we might be holding the mode_config.mutex
- * from an EDID retrieval */
-
- mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->next, &mgr->destroy_connector_list);
- mutex_unlock(&mgr->destroy_connector_lock);
- schedule_work(&mgr->destroy_connector_work);
- return;
- }
- /* no need to clean up vcpi
- * as if we have no connector we never setup a vcpi */
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ /* There's nothing that needs locking to destroy an input port yet */
+ if (port->input) {
+ drm_dp_mst_put_port_malloc(port);
+ return;
}
- drm_dp_mst_put_port_malloc(port);
+
+ kfree(port->cached_edid);
+
+ /*
+ * we can't destroy the connector here, as we might be holding the
+ * mode_config.mutex from an EDID retrieval
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&port->next, &mgr->destroy_port_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1294,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
- int ret = kref_get_unless_zero(&port->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("port %p (%d)\n", port,
- kref_read(&port->topology_kref));
+ topology_ref_history_lock(port->mgr);
+ ret = kref_get_unless_zero(&port->topology_kref);
+ if (ret) {
+ DRM_DEBUG("port %p (%d)\n",
+ port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+ topology_ref_history_unlock(port->mgr);
return ret;
}
@@ -1318,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref);
DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+
+ topology_ref_history_unlock(port->mgr);
}
/**
@@ -1336,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
DRM_DEBUG("port %p (%d)\n",
port, kref_read(&port->topology_kref) - 1);
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
+
+ topology_ref_history_unlock(port->mgr);
kref_put(&port->topology_kref, drm_dp_destroy_port);
}
@@ -1454,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-/*
- * return sends link address for new mstb
- */
-static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
{
- int ret;
- u8 rad[6], lct;
- bool send_link = false;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ struct drm_dp_mst_branch *mstb;
+ u8 rad[8], lct;
+ int ret = 0;
+
+ if (port->pdt == new_pdt)
+ return 0;
+
+ /* Teardown the old pdt, if there is one */
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /*
+ * If the new PDT would also have an i2c bus, don't bother
+ * with reregistering it
+ */
+ if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ new_pdt == DP_PEER_DEVICE_SST_SINK) {
+ port->pdt = new_pdt;
+ return 0;
+ }
+
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ break;
+ }
+
+ port->pdt = new_pdt;
switch (port->pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
break;
+
case DP_PEER_DEVICE_MST_BRANCHING:
lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p", port);
+ goto out;
+ }
- port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (port->mstb) {
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- send_link = true;
- }
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
+
+ /* And make sure we send a link address for this */
+ ret = 1;
break;
}
- return send_link;
+
+out:
+ if (ret < 0)
+ port->pdt = DP_PEER_DEVICE_NONE;
+ return ret;
}
/**
@@ -1617,44 +2117,131 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
-static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
- struct drm_device *dev,
- struct drm_dp_link_addr_reply_port *port_msg)
+static void
+drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ char proppath[255];
+ int ret;
+
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = mgr->cbs->add_connector(mgr, port, proppath);
+ if (!port->connector) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
+ drm_connector_set_tile_property(port->connector);
+ }
+
+ mgr->cbs->register_connector(port->connector);
+ return;
+
+error:
+ DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
+}
+
+/*
+ * Drop a topology reference, and unlink the port from the in-memory topology
+ * layout
+ */
+static void
+drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
+{
+ mutex_lock(&mgr->lock);
+ list_del(&port->next);
+ mutex_unlock(&mgr->lock);
+ drm_dp_mst_topology_put_port(port);
+}
+
+static struct drm_dp_mst_port *
+drm_dp_mst_add_port(struct drm_device *dev,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb, u8 port_number)
+{
+ struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+
+ if (!port)
+ return NULL;
+
+ kref_init(&port->topology_kref);
+ kref_init(&port->malloc_kref);
+ port->parent = mstb;
+ port->port_num = port_number;
+ port->mgr = mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev->dev;
+ port->aux.is_remote = true;
+
+ /*
+ * Make sure the memory allocation for our parent branch stays
+ * around until our own memory allocation is released
+ */
+ drm_dp_mst_get_mstb_malloc(mstb);
+
+ return port;
+}
+
+static int
+drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
+ struct drm_device *dev,
+ struct drm_dp_link_addr_reply_port *port_msg)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- bool ret;
- bool created = false;
- int old_pdt = 0;
- int old_ddps = 0;
+ int old_ddps = 0, ret;
+ u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
if (!port)
- return;
- kref_init(&port->topology_kref);
- kref_init(&port->malloc_kref);
- port->parent = mstb;
- port->port_num = port_msg->port_number;
- port->mgr = mstb->mgr;
- port->aux.name = "DPMST";
- port->aux.dev = dev->dev;
- port->aux.is_remote = true;
-
- /*
- * Make sure the memory allocation for our parent branch stays
- * around until our own memory allocation is released
+ return -ENOMEM;
+ created = true;
+ changed = true;
+ } else if (!port->input && port_msg->input_port && port->connector) {
+ /* Since port->connector can't be changed here, we create a
+ * new port if input_port changes from 0 to 1
*/
- drm_dp_mst_get_mstb_malloc(mstb);
-
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ drm_dp_mst_topology_put_port(port);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
+ if (!port)
+ return -ENOMEM;
+ changed = true;
created = true;
- } else {
- old_pdt = port->pdt;
+ } else if (port->input && !port_msg->input_port) {
+ changed = true;
+ } else if (port->connector) {
+ /* We're updating a port that's exposed to userspace, so do it
+ * under lock
+ */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+
old_ddps = port->ddps;
+ changed = port->ddps != port_msg->ddps ||
+ (port->ddps &&
+ (port->ldps != port_msg->legacy_device_plug_status ||
+ port->dpcd_rev != port_msg->dpcd_revision ||
+ port->mcs != port_msg->mcs ||
+ port->pdt != port_msg->peer_device_type ||
+ port->num_sdp_stream_sinks !=
+ port_msg->num_sdp_stream_sinks));
}
- port->pdt = port_msg->peer_device_type;
port->input = port_msg->input_port;
+ if (!port->input)
+ new_pdt = port_msg->peer_device_type;
port->mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
@@ -1665,77 +2252,104 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
/* manage mstb port lists with mgr lock - take a reference
for this list */
if (created) {
- mutex_lock(&mstb->mgr->lock);
+ mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
- mutex_unlock(&mstb->mgr->lock);
+ mutex_unlock(&mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
if (!port->input) {
- drm_dp_send_enum_path_resources(mstb->mgr,
- mstb, port);
+ drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
}
} else {
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
-
- ret = drm_dp_port_setup_pdt(port);
- if (ret == true)
- drm_dp_send_link_address(mstb->mgr, port->mstb);
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ send_link_addr = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT on port %p: %d\n",
+ port, ret);
+ goto fail;
}
- if (created && !port->input) {
- char proppath[255];
-
- build_mst_prop_path(mstb, port->port_num, proppath,
- sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
- port,
- proppath);
- if (!port->connector) {
- /* remove it from the port list */
- mutex_lock(&mstb->mgr->lock);
- list_del(&port->next);
- mutex_unlock(&mstb->mgr->lock);
- /* drop port list reference */
- drm_dp_mst_topology_put_port(port);
- goto out;
- }
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector,
- &port->aux.ddc);
- drm_connector_set_tile_property(port->connector);
- }
- (*mstb->mgr->cbs->register_connector)(port->connector);
+ /*
+ * If this port wasn't just created, then we're reprobing because
+ * we're coming out of suspend. In this case, always resend the link
+ * address if there's an MSTB on this port
+ */
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ send_link_addr = true;
+
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (!port->input)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+ if (send_link_addr && port->mstb) {
+ ret = drm_dp_send_link_address(mgr, port->mstb);
+ if (ret == 1) /* MSTB below us changed */
+ changed = true;
+ else if (ret < 0)
+ goto fail_put;
}
-out:
/* put reference to this port */
drm_dp_mst_topology_put_port(port);
+ return changed;
+
+fail:
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+fail_put:
+ drm_dp_mst_topology_put_port(port);
+ return ret;
}
-static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
- struct drm_dp_connection_status_notify *conn_stat)
+static void
+drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_connection_status_notify *conn_stat)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_pdt;
- int old_ddps;
- bool dowork = false;
+ int old_ddps, ret;
+ u8 new_pdt;
+ bool dowork = false, create_connector = false;
+
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
return;
+ if (port->connector) {
+ if (!port->input && conn_stat->input_port) {
+ /*
+ * We can't remove a connector from an already exposed
+ * port, so just throw the port out and make sure we
+ * reprobe the link address of it's parent MSTB
+ */
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ mstb->link_address_sent = false;
+ dowork = true;
+ goto out;
+ }
+
+ /* Locking is only needed if the port's exposed to userspace */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ } else if (port->input && !conn_stat->input_port) {
+ create_connector = true;
+ /* Reprobe link address so we get num_sdp_streams */
+ mstb->link_address_sent = false;
+ dowork = true;
+ }
+
old_ddps = port->ddps;
- old_pdt = port->pdt;
- port->pdt = conn_stat->peer_device_type;
+ port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -1747,17 +2361,27 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
- if (drm_dp_port_setup_pdt(port))
- dowork = true;
+ new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
+
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ dowork = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT for port %p: %d\n",
+ port, ret);
+ dowork = false;
}
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (create_connector)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+out:
drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
-
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -1800,7 +2424,7 @@ out:
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *mstb,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
@@ -1824,7 +2448,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
@@ -1843,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
return mstb;
}
-static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
- struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent)
- drm_dp_send_link_address(mgr, mstb);
+ int ret;
+ bool changed = false;
+
+ if (!mstb->link_address_sent) {
+ ret = drm_dp_send_link_address(mgr, mstb);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
+ }
list_for_each_entry(port, &mstb->ports, next) {
- if (port->input)
- continue;
+ struct drm_dp_mst_branch *mstb_child = NULL;
- if (!port->ddps)
+ if (port->input || !port->ddps)
continue;
- if (!port->available_pbn)
+ if (!port->available_pbn) {
+ drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port);
+ drm_modeset_unlock(&mgr->base.lock);
+ changed = true;
+ }
- if (port->mstb) {
+ if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
- if (mstb_child) {
- drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_mst_topology_put_mstb(mstb_child);
- }
+
+ if (mstb_child) {
+ ret = drm_dp_check_and_send_link_address(mgr,
+ mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
}
}
+
+ return changed;
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
+ mutex_lock(&mgr->probe_lock);
+
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
@@ -1886,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
mstb = NULL;
}
mutex_unlock(&mgr->lock);
- if (mstb) {
- drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_mst_topology_put_mstb(mstb);
+ if (!mstb) {
+ mutex_unlock(&mgr->probe_lock);
+ return;
}
+
+ ret = drm_dp_check_and_send_link_address(mgr, mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
+
+ mutex_unlock(&mgr->probe_lock);
+ if (ret)
+ drm_kms_helper_hotplug_event(dev);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -2035,8 +2687,11 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
idx += tosend + 1;
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
- if (ret) {
- DRM_DEBUG_KMS("sideband msg failed to send\n");
+ if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_printf(&p, "sideband msg failed to send\n");
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
return ret;
}
@@ -2098,21 +2753,52 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
+
+ if (drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
+
if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static void
+drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
+{
+ struct drm_dp_link_addr_reply_port *port_reply;
+ int i;
+
+ for (i = 0; i < reply->nports; i++) {
+ port_reply = &reply->ports[i];
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
+ i,
+ port_reply->input_port,
+ port_reply->peer_device_type,
+ port_reply->port_number,
+ port_reply->dpcd_revision,
+ port_reply->mcs,
+ port_reply->ddps,
+ port_reply->legacy_device_plug_status,
+ port_reply->num_sdp_streams,
+ port_reply->num_sdp_stream_sinks);
+ }
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
- int len;
struct drm_dp_sideband_msg_tx *txmsg;
- int ret;
+ struct drm_dp_link_address_ack_reply *reply;
+ struct drm_dp_mst_port *port, *tmp;
+ int i, len, ret, port_mask = 0;
+ bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return;
+ return -ENOMEM;
txmsg->dst = mstb;
len = build_link_address(txmsg);
@@ -2120,48 +2806,67 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
+ /* FIXME: Actually do some real error handling here */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0) {
- int i;
+ if (ret <= 0) {
+ DRM_ERROR("Sending link address failed with %d\n", ret);
+ goto out;
+ }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+ DRM_ERROR("link address NAK received\n");
+ ret = -EIO;
+ goto out;
+ }
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
- DRM_DEBUG_KMS("link address nak received\n");
- } else {
- DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
- txmsg->reply.u.link_addr.ports[i].input_port,
- txmsg->reply.u.link_addr.ports[i].peer_device_type,
- txmsg->reply.u.link_addr.ports[i].port_number,
- txmsg->reply.u.link_addr.ports[i].dpcd_revision,
- txmsg->reply.u.link_addr.ports[i].mcs,
- txmsg->reply.u.link_addr.ports[i].ddps,
- txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
- txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
- txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
- }
+ reply = &txmsg->reply.u.link_addr;
+ DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
+ drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+ drm_dp_check_mstb_guid(mstb, reply->guid);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
- }
- drm_kms_helper_hotplug_event(mgr->dev);
- }
- } else {
- mstb->link_address_sent = false;
- DRM_DEBUG_KMS("link address failed %d\n", ret);
+ for (i = 0; i < reply->nports; i++) {
+ port_mask |= BIT(reply->ports[i].port_number);
+ ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
+ &reply->ports[i]);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ goto out;
}
+ /* Prune any ports that are currently a part of mstb in our in-memory
+ * topology, but were not seen in this link address. Usually this
+ * means that they were removed while the topology was out of sync,
+ * e.g. during suspend/resume
+ */
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ if (port_mask & BIT(port->port_num))
+ continue;
+
+ DRM_DEBUG_KMS("port %d was not in link address, removing\n",
+ port->port_num);
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ changed = true;
+ }
+ mutex_unlock(&mgr->lock);
+
+out:
+ if (ret <= 0)
+ mstb->link_address_sent = false;
kfree(txmsg);
+ return ret < 0 ? ret : changed;
}
-static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+static int
+drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
{
- int len;
+ struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
+ int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2175,14 +2880,20 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
+ path_res = &txmsg->reply.u.path_resources;
+
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("enum path resources nak received\n");
} else {
- if (port->port_num != txmsg->reply.u.path_resources.port_number)
+ if (port->port_num != path_res->port_number)
DRM_ERROR("got incorrect port in response\n");
- DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
- txmsg->reply.u.path_resources.avail_payload_bw_number);
- port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
+ path_res->port_number,
+ path_res->full_payload_bw_number,
+ path_res->avail_payload_bw_number);
+ port->available_pbn =
+ path_res->avail_payload_bw_number;
}
}
@@ -2655,30 +3366,13 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
- int dp_link_count,
- int *out)
+static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
{
- switch (dp_link_bw) {
- default:
+ if (dp_link_bw == 0 || dp_link_count == 0)
DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
dp_link_bw, dp_link_count);
- return false;
- case DP_LINK_BW_1_62:
- *out = 3 * dp_link_count;
- break;
- case DP_LINK_BW_2_7:
- *out = 5 * dp_link_count;
- break;
- case DP_LINK_BW_5_4:
- *out = 10 * dp_link_count;
- break;
- case DP_LINK_BW_8_1:
- *out = 15 * dp_link_count;
- break;
- }
- return true;
+ return dp_link_bw * dp_link_count / 2;
}
/**
@@ -2710,9 +3404,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
- mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
- &mgr->pbn_div)) {
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ if (mgr->pbn_div == 0) {
ret = -EINVAL;
goto out_unlock;
}
@@ -2767,6 +3461,23 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+static void
+drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ /* The link address will need to be re-sent on resume */
+ mstb->link_address_sent = false;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ /* The PBN for each port will also need to be re-probed */
+ port->available_pbn = 0;
+
+ if (port->mstb)
+ drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
+ }
+}
+
/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
@@ -2780,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ flush_work(&mgr->delayed_destroy_work);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_state && mgr->mst_primary)
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
/**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume
+ * @sync: whether or not to perform topology reprobing synchronously
*
* This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return.
*
- * if the device fails this returns -1, and the driver should do
+ * If the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked.
+ *
+ * During system resume (where it is assumed that the driver will be calling
+ * drm_atomic_helper_resume()) this function should be called beforehand with
+ * @sync set to true. In contexts like runtime resume where the driver is not
+ * expected to be calling drm_atomic_helper_resume(), this function should be
+ * called with @sync set to false in order to avoid deadlocking.
+ *
+ * Returns: -1 if the MST topology was removed while we were suspended, 0
+ * otherwise.
*/
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync)
{
- int ret = 0;
+ int ret;
+ u8 guid[16];
mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
- if (mgr->mst_primary) {
- int sret;
- u8 guid[16];
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
+ DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (sret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
- /* Some hubs forget their guids after they resume */
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (sret != 16) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ /*
+ * For the final step of resuming the topology, we need to bring the
+ * state of our in-memory topology back into sync with reality. So,
+ * restart the probing process as if we're probing a new hub
+ */
+ queue_work(system_long_wq, &mgr->work);
+ mutex_unlock(&mgr->lock);
- ret = 0;
- } else
- ret = -1;
+ if (sync) {
+ DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
+ flush_work(&mgr->work);
+ }
-out_unlock:
+ return 0;
+
+out_fail:
mutex_unlock(&mgr->lock);
- return ret;
+ return -1;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
@@ -2890,136 +3628,198 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
- int ret = 0;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
+ int slot = -1;
+
+ if (!drm_dp_get_one_sb_msg(mgr, false))
+ goto clear_down_rep_recv;
- if (!drm_dp_get_one_sb_msg(mgr, false)) {
- memset(&mgr->down_rep_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
+ if (!mgr->down_rep_recv.have_eomt)
return 0;
+
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ goto clear_down_rep_recv;
+ }
+
+ /* find the message */
+ slot = hdr->seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
+
+ if (!txmsg) {
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb, hdr->seqno, hdr->lct, hdr->rad[0],
+ mgr->down_rep_recv.msg[0]);
+ goto no_msg;
}
- if (mgr->down_rep_recv.have_eomt) {
- struct drm_dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- int slot = -1;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad);
+ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
+ txmsg->reply.req_type,
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
+ txmsg->reply.u.nak.reason,
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
+ txmsg->reply.u.nak.nak_data);
- /* find the message */
- slot = mgr->down_rep_recv.initial_hdr.seqno;
- mutex_lock(&mgr->qlock);
- txmsg = mstb->tx_slots[slot];
- /* remove from slots */
- mutex_unlock(&mgr->qlock);
-
- if (!txmsg) {
- DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
- mstb,
- mgr->down_rep_recv.initial_hdr.seqno,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad[0],
- mgr->down_rep_recv.msg[0]);
- drm_dp_mst_topology_put_mstb(mstb);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ drm_dp_mst_topology_put_mstb(mstb);
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ mutex_lock(&mgr->qlock);
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ mstb->tx_slots[slot] = NULL;
+ mutex_unlock(&mgr->qlock);
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
- txmsg->reply.req_type,
- drm_dp_mst_req_type_str(txmsg->reply.req_type),
- txmsg->reply.u.nak.reason,
- drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
- txmsg->reply.u.nak.nak_data);
-
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_mst_topology_put_mstb(mstb);
+ wake_up_all(&mgr->tx_waitq);
+
+ return 0;
- mutex_lock(&mgr->qlock);
- txmsg->state = DRM_DP_SIDEBAND_TX_RX;
- mstb->tx_slots[slot] = NULL;
- mutex_unlock(&mgr->qlock);
+no_msg:
+ drm_dp_mst_topology_put_mstb(mstb);
+clear_down_rep_recv:
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- wake_up_all(&mgr->tx_waitq);
- }
- return ret;
+ return 0;
}
-static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+static inline bool
+drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_pending_up_req *up_req)
{
- int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
+ struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
+ bool hotplug = false;
- if (!drm_dp_get_one_sb_msg(mgr, true)) {
- memset(&mgr->up_req_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+ if (hdr->broadcast) {
+ const u8 *guid = NULL;
+
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
+ guid = msg->u.conn_stat.guid;
+ else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
+ guid = msg->u.resource_stat.guid;
+
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ } else {
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
- if (mgr->up_req_recv.have_eomt) {
- struct drm_dp_sideband_msg_req_body msg;
- struct drm_dp_mst_branch *mstb = NULL;
- bool seqno;
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ return false;
+ }
- if (!mgr->up_req_recv.initial_hdr.broadcast) {
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->up_req_recv.initial_hdr.lct,
- mgr->up_req_recv.initial_hdr.rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
- }
+ /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
- seqno = mgr->up_req_recv.initial_hdr.seqno;
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+ drm_dp_mst_topology_put_mstb(mstb);
+ return hotplug;
+}
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+static void drm_dp_mst_up_req_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ up_req_work);
+ struct drm_dp_pending_up_req *up_req;
+ bool send_hotplug = false;
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+ mutex_lock(&mgr->probe_lock);
+ while (true) {
+ mutex_lock(&mgr->up_req_lock);
+ up_req = list_first_entry_or_null(&mgr->up_req_list,
+ struct drm_dp_pending_up_req,
+ next);
+ if (up_req)
+ list_del(&up_req->next);
+ mutex_unlock(&mgr->up_req_lock);
+
+ if (!up_req)
+ break;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
+ kfree(up_req);
+ }
+ mutex_unlock(&mgr->probe_lock);
- drm_dp_update_port(mstb, &msg.u.conn_stat);
+ if (send_hotplug)
+ drm_kms_helper_hotplug_event(mgr->dev);
+}
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
- drm_kms_helper_hotplug_event(mgr->dev);
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
+ struct drm_dp_pending_up_req *up_req;
+ bool seqno;
- } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+ if (!drm_dp_get_one_sb_msg(mgr, true))
+ goto out;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (!mgr->up_req_recv.have_eomt)
+ return 0;
- DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
- }
+ up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
+ if (!up_req) {
+ DRM_ERROR("Not enough memory to process MST up req\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&up_req->next);
- if (mstb)
- drm_dp_mst_topology_put_mstb(mstb);
+ seqno = hdr->seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
+ up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
+ DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
+ up_req->msg.req_type);
+ kfree(up_req);
+ goto out;
}
- return ret;
+
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ seqno, false);
+
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ const struct drm_dp_connection_status_notify *conn_stat =
+ &up_req->msg.u.conn_stat;
+
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ conn_stat->port_number,
+ conn_stat->legacy_device_plug_status,
+ conn_stat->displayport_device_plug_status,
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ const struct drm_dp_resource_status_notify *res_stat =
+ &up_req->msg.u.resource_stat;
+
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
+ res_stat->port_number,
+ res_stat->available_pbn);
+ }
+
+ up_req->hdr = *hdr;
+ mutex_lock(&mgr->up_req_lock);
+ list_add_tail(&up_req->next, &mgr->up_req_list);
+ mutex_unlock(&mgr->up_req_lock);
+ queue_work(system_long_wq, &mgr->up_req_work);
+
+out:
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
}
/**
@@ -3063,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
+ * @ctx: The acquisition context to use for grabbing locks
* @mgr: manager for this port
- * @port: unverified pointer to a port
+ * @port: pointer to a port
*
- * This returns the current connection state for a port. It validates the
- * port pointer still exists so the caller doesn't require a reference
+ * This returns the current connection state for a port.
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
- struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
- enum drm_connector_status status = connector_status_disconnected;
+ int ret;
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ goto out;
+
+ ret = connector_status_disconnected;
+
if (!port->ddps)
goto out;
@@ -3088,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_SST_SINK:
- status = connector_status_connected;
+ ret = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) {
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
@@ -3096,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
- status = connector_status_connected;
+ ret = connector_status_connected;
break;
}
out:
drm_dp_mst_topology_put_port(port);
- return status;
+ return ret;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -3237,7 +4046,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, req_slots, ret;
+ int prev_slots, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
@@ -3284,8 +4093,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
}
vcpi->vcpi = req_slots;
- ret = req_slots;
- return ret;
+ return req_slots;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
@@ -3539,13 +4347,6 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
- u64 kbps;
- s64 peak_kbps;
- u32 numerator;
- u32 denominator;
-
- kbps = clock * bpp;
-
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
@@ -3556,41 +4357,11 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
*/
-
- numerator = 64 * 1006;
- denominator = 54 * 8 * 1000 * 1000;
-
- kbps *= numerator;
- peak_kbps = drm_fixp_from_fraction(kbps, denominator);
-
- return drm_fixp2int_ceil(peak_kbps);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
+ 8 * 54 * 1000 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
-static int test_calc_pbn_mode(void)
-{
- int ret;
- ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 154000, 30, 689, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 234000, 30, 1047, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(297000, 24);
- if (ret != 1063) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 297000, 24, 1063, ret);
- return -EINVAL;
- }
- return 0;
-}
-
/* we want to kick the TX after we've ack the up/down IRQs. */
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
{
@@ -3729,36 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_destroy_connector_work(struct work_struct *work)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_dp_mst_port *port;
- bool send_hotplug = false;
+ if (port->connector)
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_mst_put_port_malloc(port);
+}
+
+static inline void
+drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ }
+ mutex_unlock(&mgr->lock);
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up_all(&mstb->mgr->tx_waitq);
+
+ drm_dp_mst_put_mstb_malloc(mstb);
+}
+
+static void drm_dp_delayed_destroy_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ delayed_destroy_work);
+ bool send_hotplug = false, go_again;
+
/*
* Not a regular list traverse as we have to drop the destroy
- * connector lock before destroying the connector, to avoid AB->BA
+ * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
- for (;;) {
- mutex_lock(&mgr->destroy_connector_lock);
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
- if (!port) {
- mutex_unlock(&mgr->destroy_connector_lock);
- break;
+ do {
+ go_again = false;
+
+ for (;;) {
+ struct drm_dp_mst_branch *mstb;
+
+ mutex_lock(&mgr->delayed_destroy_lock);
+ mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
+ struct drm_dp_mst_branch,
+ destroy_next);
+ if (mstb)
+ list_del(&mstb->destroy_next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+
+ if (!mstb)
+ break;
+
+ drm_dp_delayed_destroy_mstb(mstb);
+ go_again = true;
}
- list_del(&port->next);
- mutex_unlock(&mgr->destroy_connector_lock);
- INIT_LIST_HEAD(&port->next);
+ for (;;) {
+ struct drm_dp_mst_port *port;
- mgr->cbs->destroy_connector(mgr, port->connector);
+ mutex_lock(&mgr->delayed_destroy_lock);
+ port = list_first_entry_or_null(&mgr->destroy_port_list,
+ struct drm_dp_mst_port,
+ next);
+ if (port)
+ list_del(&port->next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ if (!port)
+ break;
+
+ drm_dp_delayed_destroy_port(port);
+ send_hotplug = true;
+ go_again = true;
+ }
+ } while (go_again);
- drm_dp_mst_put_port_malloc(port);
- send_hotplug = true;
- }
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
@@ -3920,9 +4758,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_device *dev = mgr->dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
@@ -3948,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
- mutex_init(&mgr->destroy_connector_lock);
+ mutex_init(&mgr->delayed_destroy_lock);
+ mutex_init(&mgr->up_req_lock);
+ mutex_init(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_init(&mgr->topology_ref_history_lock);
+#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
- INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_LIST_HEAD(&mgr->destroy_port_list);
+ INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
+ INIT_LIST_HEAD(&mgr->up_req_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
- INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
+ INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
+ INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
@@ -3970,8 +4813,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (!mgr->proposed_vcpis)
return -ENOMEM;
set_bit(0, &mgr->payload_mask);
- if (test_calc_pbn_mode() < 0)
- DRM_ERROR("MST PBN self-test failed\n");
mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
if (mst_state == NULL)
@@ -3996,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ cancel_work_sync(&mgr->delayed_destroy_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;
@@ -4007,6 +4848,16 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mgr->aux = NULL;
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
+
+ mutex_destroy(&mgr->delayed_destroy_lock);
+ mutex_destroy(&mgr->payload_lock);
+ mutex_destroy(&mgr->qlock);
+ mutex_destroy(&mgr->lock);
+ mutex_destroy(&mgr->up_req_lock);
+ mutex_destroy(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_destroy(&mgr->topology_ref_history_lock);
+#endif
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
new file mode 100644
index 000000000000..eeda9a61c657
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Declarations for DP MST related functions which are only used in selftests
+ *
+ * Copyright © 2018 Red Hat
+ * Authors:
+ * Lyude Paul <lyude@redhat.com>
+ */
+
+#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
+#define _DRM_DP_MST_HELPER_INTERNAL_H_
+
+#include <drm/drm_dp_mst_helper.h>
+
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw);
+int drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req);
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer);
+
+#endif /* !_DRM_DP_MST_HELPER_INTERNAL_H_ */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 769feefeeeef..1b9b40a1c7c9 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,26 +46,9 @@
#include "drm_internal.h"
#include "drm_legacy.h"
-/*
- * drm_debug: Enable debug output.
- * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
- */
-unsigned int drm_debug = 0;
-EXPORT_SYMBOL(drm_debug);
-
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
-"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 77f4e5ae4197..4a475d9696ff 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -216,13 +216,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
pps_payload->rc_range_parameters[i] =
- ((dsc_cfg->rc_range_params[i].range_min_qp <<
- DSC_PPS_RC_RANGE_MINQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_max_qp <<
- DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_payload->rc_range_parameters[i] =
- cpu_to_be16(pps_payload->rc_range_parameters[i]);
+ cpu_to_be16((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
}
/* PPS 88 */
@@ -336,12 +334,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
else
vdsc_cfg->nfl_bpg_offset = 0;
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
/* Number of groups used to code the entire slice */
groups_total = groups_per_line * vdsc_cfg->slice_height;
@@ -371,11 +363,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->scale_increment_interval = 0;
}
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
/*
* DSC spec mentions that bits_per_pixel specifies the target
* bits/pixel (bpp) rate that is used by the encoder,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6b0177112e18..474ac04d5600 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1278,6 +1278,106 @@ static const struct drm_display_mode edid_cea_modes[] = {
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 108 - 1280x720@48Hz 16:9 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 109 - 1280x720@48Hz 64:27 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 110 - 1680x720@48Hz 64:27 */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490,
+ 2530, 2750, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 111 - 1920x1080@48Hz 16:9 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 112 - 1920x1080@48Hz 64:27 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 113 - 2560x1080@48Hz 64:27 */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558,
+ 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 114 - 3840x2160@48Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 115 - 4096x2160@48Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 116 - 3840x2160@48Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 117 - 3840x2160@100Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 118 - 3840x2160@120Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 119 - 3840x2160@100Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 120 - 3840x2160@120Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 121 - 5120x2160@24Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116,
+ 7204, 7500, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 122 - 5120x2160@25Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816,
+ 6904, 7200, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 123 - 5120x2160@30Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784,
+ 5872, 6000, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 124 - 5120x2160@48Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866,
+ 5954, 6250, 0, 2160, 2168, 2178, 2475, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 125 - 5120x2160@50Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 126 - 5120x2160@60Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284,
+ 5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 127 - 5120x2160@100Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
};
/*
@@ -1554,7 +1654,7 @@ static void connector_bad_edid(struct drm_connector *connector,
{
int i;
- if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
dev_warn(connector->dev->dev,
@@ -2092,7 +2192,8 @@ static int standard_timing_level(struct edid *edid)
return LEVEL_CVT;
if (drm_gtf2_hbreak(edid))
return LEVEL_GTF2;
- return LEVEL_GTF;
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ return LEVEL_GTF;
}
return LEVEL_DMT;
}
@@ -3108,18 +3209,10 @@ static bool drm_valid_cea_vic(u8 vic)
return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
}
-/**
- * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
- * the input VIC from the CEA mode list
- * @video_code: ID given to each of the CEA modes
- *
- * Returns picture aspect ratio
- */
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
+static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
return edid_cea_modes[video_code].picture_aspect_ratio;
}
-EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
@@ -3722,7 +3815,7 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
if (*end < 4 || *end > 127)
return -ERANGE;
} else {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -4191,7 +4284,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -4252,7 +4345,7 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -5071,6 +5164,49 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
+static u8 drm_mode_hdmi_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ bool has_hdmi_infoframe = connector ?
+ connector->display_info.has_hdmi_infoframe : false;
+
+ if (!has_hdmi_infoframe)
+ return 0;
+
+ /* No HDMI VIC when signalling 3D video format */
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ return 0;
+
+ return drm_match_hdmi_mode(mode);
+}
+
+static u8 drm_mode_cea_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ u8 vic;
+
+ /*
+ * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ * we should send its VIC in vendor infoframes, else send the
+ * VIC in AVI infoframes. Lets check if this mode is present in
+ * HDMI 1.4b 4K modes
+ */
+ if (drm_mode_hdmi_vic(connector, mode))
+ return 0;
+
+ vic = drm_match_cea_mode(mode);
+
+ /*
+ * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+ * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+ * have to make sure we dont break HDMI 1.4 sinks.
+ */
+ if (!is_hdmi2_sink(connector) && vic > 64)
+ return 0;
+
+ return vic;
+}
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
@@ -5098,29 +5234,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
- frame->video_code = drm_match_cea_mode(mode);
-
- /*
- * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
- * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
- * have to make sure we dont break HDMI 1.4 sinks.
- */
- if (!is_hdmi2_sink(connector) && frame->video_code > 64)
- frame->video_code = 0;
-
- /*
- * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
- * we should send its VIC in vendor infoframes, else send the
- * VIC in AVI infoframes. Lets check if this mode is present in
- * HDMI 1.4b 4K modes
- */
- if (frame->video_code) {
- u8 vendor_if_vic = drm_match_hdmi_mode(mode);
- bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK;
-
- if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d)
- frame->video_code = 0;
- }
+ frame->video_code = drm_mode_cea_vic(connector, mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
@@ -5285,6 +5399,23 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range);
+/**
+ * drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe
+ * bar information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ frame->right_bar = conn_state->tv.margins.right;
+ frame->left_bar = conn_state->tv.margins.left;
+ frame->top_bar = conn_state->tv.margins.top;
+ frame->bottom_bar = conn_state->tv.margins.bottom;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars);
+
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
@@ -5337,8 +5468,6 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
bool has_hdmi_infoframe = connector ?
connector->display_info.has_hdmi_infoframe : false;
int err;
- u32 s3d_flags;
- u8 vic;
if (!frame || !mode)
return -EINVAL;
@@ -5346,8 +5475,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
if (!has_hdmi_infoframe)
return -EINVAL;
- vic = drm_match_hdmi_mode(mode);
- s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
/*
* Even if it's not absolutely necessary to send the infoframe
@@ -5358,15 +5488,7 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
* mode if the source simply stops sending the infoframe when
* it wants to switch from 3D to 2D.
*/
-
- if (vic && s3d_flags)
- return -EINVAL;
-
- err = hdmi_vendor_infoframe_init(frame);
- if (err < 0)
- return err;
-
- frame->vic = vic;
+ frame->vic = drm_mode_hdmi_vic(connector, mode);
frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index d38b3b255926..37d8ba3ddb46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -175,7 +175,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
u8 *edid;
int fwsize, builtin;
int i, valid_extensions = 0;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ bool print_bad_edid = !connector->bad_edid_counter || drm_debug_enabled(DRM_UT_KMS);
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
if (builtin >= 0) {
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 7fb47b7b8b44..80d88a55302e 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a7ba5b4902d6..8ebeccdeed23 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -46,6 +46,7 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include "drm_crtc_helper_internal.h"
#include "drm_internal.h"
static bool drm_fbdev_emulation = true;
@@ -91,9 +92,12 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*
* Drivers that support a dumb buffer with a virtual address and mmap support,
* should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ * It will automatically set up deferred I/O if the driver requires a shadow
+ * buffer.
*
- * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
- * down by calling drm_fb_helper_fbdev_teardown().
+ * For other drivers, setup fbdev emulation by calling
+ * drm_fb_helper_fbdev_setup() and tear it down by calling
+ * drm_fb_helper_fbdev_teardown().
*
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
@@ -126,8 +130,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
- * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
- * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
+ * mmap page writes.
+ *
+ * Deferred I/O is not compatible with SHMEM. Such drivers should request an
+ * fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
@@ -679,49 +685,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
- * drm_fb_helper_defio_init - fbdev deferred I/O initialization
- * @fb_helper: driver-allocated fbdev helper
- *
- * This function allocates &fb_deferred_io, sets callback to
- * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
- * It should be called from the &drm_fb_helper_funcs->fb_probe callback.
- * drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
- *
- * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
- * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
- * affect other instances of that &fb_ops.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- struct fb_info *info = fb_helper->fbdev;
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- info->fbdefio = fbdefio;
- fbdefio->delay = msecs_to_jiffies(50);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
-
- *fbops = *info->fbops;
- info->fbops = fbops;
-
- fb_deferred_io_init(info);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_defio_init);
-
-/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
@@ -2355,7 +2318,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* Drivers that set the dirty callback on their framebuffer will get a shadow
* fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers.
+ * make deferred I/O work with all kinds of buffers. A shadow buffer can be
+ * requested explicitly by setting struct drm_mode_config.prefer_shadow or
+ * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
+ * required to use generic fbdev emulation with SHMEM helpers.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6854f5867d51..000fa4a1899f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1099,23 +1099,12 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
+ int ret;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (obj->funcs && obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else if (dev->driver->gem_vm_ops)
- vma->vm_ops = dev->driver->gem_vm_ops;
- else
- return -EINVAL;
-
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = obj;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
@@ -1124,6 +1113,33 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_get(obj);
+ if (obj->funcs && obj->funcs->mmap) {
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret) {
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
+ WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
+ } else {
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ }
+
+ vma->vm_private_data = obj;
+
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f5918707672f..0810d3ef6961 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -32,7 +32,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
@@ -505,39 +505,30 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
-const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
-EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
- * @filp: File object
+ * @obj: gem object
* @vma: VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
* operation for shmem objects. Drivers which employ the shmem helpers should
- * use this function as their &file_operations.mmap handler in the DRM device file's
- * file_operations structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ * use this function as their &drm_gem_object_funcs.mmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct drm_gem_shmem_object *shmem;
int ret;
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+ shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
if (ret) {
@@ -545,12 +536,10 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
- /* VM_PFNMAP was set by drm_gem_mmap() */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
-
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
new file mode 100644
index 000000000000..605a8a3da7f9
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/module.h>
+
+#include <drm/drm_gem_ttm_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helper functions for gem objects backed by
+ * ttm.
+ */
+
+/**
+ * drm_gem_ttm_print_info() - Print &ttm_buffer_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @gem: GEM object
+ *
+ * This function can be used as &drm_gem_object_funcs.print_info
+ * callback.
+ */
+void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *gem)
+{
+ static const char * const plname[] = {
+ [ TTM_PL_SYSTEM ] = "system",
+ [ TTM_PL_TT ] = "tt",
+ [ TTM_PL_VRAM ] = "vram",
+ [ TTM_PL_PRIV ] = "priv",
+
+ [ 16 ] = "cached",
+ [ 17 ] = "uncached",
+ [ 18 ] = "wc",
+ [ 19 ] = "contig",
+
+ [ 21 ] = "pinned", /* NO_EVICT */
+ [ 22 ] = "topdown",
+ };
+ const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+ drm_printf_indent(p, indent, "placement=");
+ drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
+ drm_printf(p, "\n");
+
+ if (bo->mem.bus.is_iomem) {
+ drm_printf_indent(p, indent, "bus.base=%lx\n",
+ (unsigned long)bo->mem.bus.base);
+ drm_printf_indent(p, indent, "bus.offset=%lx\n",
+ (unsigned long)bo->mem.bus.offset);
+ }
+}
+EXPORT_SYMBOL(drm_gem_ttm_print_info);
+
+/**
+ * drm_gem_ttm_mmap() - mmap &ttm_buffer_object
+ * @gem: GEM object.
+ * @vma: vm area.
+ *
+ * This function can be used as &drm_gem_object_funcs.mmap
+ * callback.
+ */
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ int ret;
+
+ ret = ttm_bo_mmap_obj(vma, bo);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * ttm has its own object refcounting, so drop gem reference
+ * to avoid double accounting counting.
+ */
+ drm_gem_object_put_unlocked(gem);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_ttm_mmap);
+
+MODULE_DESCRIPTION("DRM gem ttm helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index fd751078bae1..666cb4c22bb9 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,10 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
+#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
-#include <drm/drm_vram_mm_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_page_alloc.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -14,6 +19,11 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
*
* This library provides a GEM buffer object that is backed by video RAM
* (VRAM). It can be used for framebuffer devices with dedicated memory.
+ *
+ * The data structure &struct drm_vram_mm and its helpers implement a memory
+ * manager for simple framebuffer devices with dedicated video memory. Buffer
+ * objects are either placed in video RAM or evicted to system memory. The rsp.
+ * buffer object is provided by &struct drm_gem_vram_object.
*/
/*
@@ -26,6 +36,10 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
+
+ WARN_ON(gbo->kmap_use_count);
+ WARN_ON(gbo->kmap.virtual);
+
drm_gem_object_release(&gbo->bo.base);
}
@@ -47,6 +61,7 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
{
unsigned int i;
unsigned int c = 0;
+ u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
@@ -54,15 +69,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
if (pl_flag & TTM_PL_FLAG_VRAM)
gbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ TTM_PL_FLAG_VRAM |
+ invariant_flags;
if (pl_flag & TTM_PL_FLAG_SYSTEM)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
if (!c)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
gbo->placement.num_placement = c;
gbo->placement.num_busy_placement = c;
@@ -82,8 +100,7 @@ static int drm_gem_vram_init(struct drm_device *dev,
int ret;
size_t acc_size;
- if (!gbo->bo.base.funcs)
- gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
+ gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
if (ret)
@@ -192,30 +209,12 @@ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_offset);
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
+ unsigned long pl_flag)
{
int i, ret;
struct ttm_operation_ctx ctx = { false, false };
- ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
- return ret;
-
if (gbo->pin_count)
goto out;
@@ -227,62 +226,123 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
+ return ret;
out:
++gbo->pin_count;
- ttm_bo_unreserve(&gbo->bo);
return 0;
-
-err_ttm_bo_unreserve:
- ttm_bo_unreserve(&gbo->bo);
- return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
* @gbo: the GEM VRAM object
+ * @pl_flag: a bitmask of possible memory regions
+ *
+ * Pinning a buffer object ensures that it is not evicted from
+ * a memory region. A pinned buffer object has to be unpinned before
+ * it can be pinned to another region. If the pl_flag argument is 0,
+ * the buffer is pinned at its current location (video RAM or system
+ * memory).
+ *
+ * Small buffer objects, such as cursor images, can lead to memory
+ * fragmentation if they are pinned in the middle of video RAM. This
+ * is especially a problem on devices with only a small amount of
+ * video RAM. Fragmentation can prevent the primary framebuffer from
+ * fitting in, even though there's enough memory overall. The modifier
+ * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
+ * at the high end of the memory region to avoid fragmentation.
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
- int i, ret;
- struct ttm_operation_ctx ctx = { false, false };
+ int ret;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
+ if (ret)
return ret;
+ ret = drm_gem_vram_pin_locked(gbo, pl_flag);
+ ttm_bo_unreserve(&gbo->bo);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_pin);
+
+static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
+{
+ int i, ret;
+ struct ttm_operation_ctx ctx = { false, false };
if (WARN_ON_ONCE(!gbo->pin_count))
- goto out;
+ return 0;
--gbo->pin_count;
if (gbo->pin_count)
- goto out;
+ return 0;
for (i = 0; i < gbo->placement.num_placement ; ++i)
gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
-
-out:
- ttm_bo_unreserve(&gbo->bo);
+ return ret;
return 0;
+}
-err_ttm_bo_unreserve:
+/**
+ * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * @gbo: the GEM VRAM object
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
+
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
+static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
+ bool map, bool *is_iomem)
+{
+ int ret;
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+
+ if (gbo->kmap_use_count > 0)
+ goto out;
+
+ if (kmap->virtual || !map)
+ goto out;
+
+ ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ if (ret)
+ return ERR_PTR(ret);
+
+out:
+ if (!kmap->virtual) {
+ if (is_iomem)
+ *is_iomem = false;
+ return NULL; /* not mapped; don't increment ref */
+ }
+ ++gbo->kmap_use_count;
+ if (is_iomem)
+ return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return kmap->virtual;
+}
+
/**
* drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
* @gbo: the GEM VRAM object
@@ -304,43 +364,121 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem)
{
int ret;
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
-
- if (kmap->virtual || !map)
- goto out;
+ void *virtual;
- ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ERR_PTR(ret);
+ virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
+ ttm_bo_unreserve(&gbo->bo);
-out:
- if (!is_iomem)
- return kmap->virtual;
- if (!kmap->virtual) {
- *is_iomem = false;
- return NULL;
- }
- return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return virtual;
}
EXPORT_SYMBOL(drm_gem_vram_kmap);
+static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
+{
+ if (WARN_ON_ONCE(!gbo->kmap_use_count))
+ return;
+ if (--gbo->kmap_use_count > 0)
+ return;
+
+ /*
+ * Permanently mapping and unmapping buffers adds overhead from
+ * updating the page tables and creates debugging output. Therefore,
+ * we delay the actual unmap operation until the BO gets evicted
+ * from memory. See drm_gem_vram_bo_driver_move_notify().
+ */
+}
+
/**
* drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
* @gbo: the GEM VRAM object
*/
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
{
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+ int ret;
- if (!kmap->virtual)
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
return;
-
- ttm_bo_kunmap(kmap);
- kmap->virtual = NULL;
+ drm_gem_vram_kunmap_locked(gbo);
+ ttm_bo_unreserve(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_kunmap);
/**
+ * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
+ * space
+ * @gbo: The GEM VRAM object to map
+ *
+ * The vmap function pins a GEM VRAM object to its current location, either
+ * system or video memory, and maps its buffer into kernel address space.
+ * As pinned object cannot be relocated, you should avoid pinning objects
+ * permanently. Call drm_gem_vram_vunmap() with the returned address to
+ * unmap and unpin the GEM VRAM object.
+ *
+ * If you have special requirements for the pinning or mapping operations,
+ * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
+ *
+ * Returns:
+ * The buffer's virtual address on success, or
+ * an ERR_PTR()-encoded error code otherwise.
+ */
+void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+ void *base;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_gem_vram_pin_locked(gbo, 0);
+ if (ret)
+ goto err_ttm_bo_unreserve;
+ base = drm_gem_vram_kmap_locked(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_drm_gem_vram_unpin_locked;
+ }
+
+ ttm_bo_unreserve(&gbo->bo);
+
+ return base;
+
+err_drm_gem_vram_unpin_locked:
+ drm_gem_vram_unpin_locked(gbo);
+err_ttm_bo_unreserve:
+ ttm_bo_unreserve(&gbo->bo);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_vram_vmap);
+
+/**
+ * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
+ * @gbo: The GEM VRAM object to unmap
+ * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap()
+ *
+ * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
+ * the documentation for drm_gem_vram_vmap() for more information.
+ */
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
+ return;
+
+ drm_gem_vram_kunmap_locked(gbo);
+ drm_gem_vram_unpin_locked(gbo);
+
+ ttm_bo_unreserve(&gbo->bo);
+}
+EXPORT_SYMBOL(drm_gem_vram_vunmap);
+
+/**
* drm_gem_vram_fill_create_dumb() - \
Helper for implementing &struct drm_driver.dumb_create
* @file: the DRM file
@@ -410,59 +548,27 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
return (bo->destroy == ttm_buffer_object_destroy);
}
-/**
- * drm_gem_vram_bo_driver_evict_flags() - \
- Implements &struct ttm_bo_driver.evict_flags
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @pl: TTM placement information.
- */
-void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *pl)
+static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
+ struct ttm_placement *pl)
{
- struct drm_gem_vram_object *gbo;
-
- /* TTM may pass BOs that are not GEM VRAM BOs. */
- if (!drm_is_gem_vram(bo))
- return;
-
- gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
*pl = gbo->placement;
}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
-/**
- * drm_gem_vram_bo_driver_verify_access() - \
- Implements &struct ttm_bo_driver.verify_access
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @filp: File pointer.
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
+static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
- return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
- filp->private_data);
-}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
+ if (WARN_ON_ONCE(gbo->kmap_use_count))
+ return;
-/*
- * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm
- *
- * Most users of @struct drm_gem_vram_object will also use
- * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs
- * can be used to connect both.
- */
-const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
- .evict_flags = drm_gem_vram_bo_driver_evict_flags,
- .verify_access = drm_gem_vram_bo_driver_verify_access
-};
-EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
+ if (!kmap->virtual)
+ return;
+ ttm_bo_kunmap(kmap);
+ kmap->virtual = NULL;
+}
/*
* Helpers for struct drm_gem_object_funcs
@@ -544,6 +650,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
/*
+ * Helpers for struct drm_plane_helper_funcs
+ */
+
+/**
+ * drm_gem_vram_plane_helper_prepare_fb() - \
+ * Implements &struct drm_plane_helper_funcs.prepare_fb
+ * @plane: a DRM plane
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
+ if (!new_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+ }
+
+ return 0;
+
+err_drm_gem_vram_unpin:
+ while (i) {
+ --i;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
+
+/**
+ * drm_gem_vram_plane_helper_cleanup_fb() - \
+ * Implements &struct drm_plane_helper_funcs.cleanup_fb
+ * @plane: a DRM plane
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_plane_helper_prepare_fb().
+ */
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ if (!old_state->fb)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
+ if (!old_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
+ */
+
+/**
+ * drm_gem_vram_simple_display_pipe_prepare_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
+ * @pipe: a simple display pipe
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
+{
+ return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
+
+/**
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
+ * @pipe: a simple display pipe
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_simple_display_pipe_prepare_fb().
+ */
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
+
+/*
* PRIME helpers
*/
@@ -595,17 +824,11 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- int ret;
void *base;
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
- return NULL;
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- drm_gem_vram_unpin(gbo);
+ base = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(base))
return NULL;
- }
return base;
}
@@ -620,8 +843,7 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, vaddr);
}
/*
@@ -633,5 +855,278 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.pin = drm_gem_vram_object_pin,
.unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
- .vunmap = drm_gem_vram_object_vunmap
+ .vunmap = drm_gem_vram_object_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
+/*
+ * VRAM memory manager
+ */
+
+/*
+ * TTM TT
+ */
+
+static void backend_func_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func backend_func = {
+ .destroy = backend_func_destroy
+};
+
+/*
+ * TTM BO device
+ */
+
+static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &backend_func;
+
+ ret = ttm_tt_init(tt, bo, page_flags);
+ if (ret < 0)
+ goto err_ttm_tt_init;
+
+ return tt;
+
+err_ttm_tt_init:
+ kfree(tt);
+ return NULL;
+}
+
+static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_evict_flags(gbo, placement);
+}
+
+static void bo_driver_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+}
+
+static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
+ struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+
+ mem->bus.addr = NULL;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM: /* nothing to do */
+ mem->bus.offset = 0;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = vmm->vram_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{ }
+
+static struct ttm_bo_driver bo_driver = {
+ .ttm_tt_create = bo_driver_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bo_driver_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bo_driver_evict_flags,
+ .move_notify = bo_driver_move_notify,
+ .io_mem_reserve = bo_driver_io_mem_reserve,
+ .io_mem_free = bo_driver_io_mem_free,
+};
+
+/*
+ * struct drm_vram_mm
+ */
+
+#if defined(CONFIG_DEBUG_FS)
+static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
+ struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ drm_mm_print(mm, &p);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ return 0;
+}
+
+static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
+ { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
+#endif
+
+/**
+ * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
+ *
+ * @minor: drm minor device.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+{
+ int ret = 0;
+
+#if defined(CONFIG_DEBUG_FS)
+ ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list),
+ minor->debugfs_root, minor);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
+
+static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
+ uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ vmm->vram_base = vram_base;
+ vmm->vram_size = vram_size;
+
+ ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+ dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
+ true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
+{
+ ttm_bo_device_release(&vmm->bdev);
+}
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+/**
+ * drm_vram_helper_alloc_mm - Allocates a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ * @vram_base: the base address of the video memory
+ * @vram_size: the size of the video memory in bytes
+ *
+ * Returns:
+ * The new instance of &struct drm_vram_mm on success, or
+ * an ERR_PTR()-encoded errno code otherwise.
+ */
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ if (WARN_ON(dev->vram_mm))
+ return dev->vram_mm;
+
+ dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
+ if (!dev->vram_mm)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
+ if (ret)
+ goto err_kfree;
+
+ return dev->vram_mm;
+
+err_kfree:
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
+
+/**
+ * drm_vram_helper_release_mm - Releases a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ */
+void drm_vram_helper_release_mm(struct drm_device *dev)
+{
+ if (!dev->vram_mm)
+ return;
+
+ drm_vram_mm_cleanup(dev->vram_mm);
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+}
+EXPORT_SYMBOL(drm_vram_helper_release_mm);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0bec6dbb0142..fbea69d6f909 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -40,6 +40,7 @@
#include <xen/xen.h>
#include <drm/drm_agpsupport.h>
+#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 1961f713aaab..e34058c721be 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -783,7 +783,7 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *dbi, int dc,
int i, ret;
u8 *dst;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -907,7 +907,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
max_chunk = dbi->tx_buf9_len;
dst16 = dbi->tx_buf9;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -955,7 +955,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd,
int ret;
if (mipi_dbi_command_is_read(dbi, *cmd))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
@@ -1021,7 +1021,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
unsigned int i;
for (i = 0; i < len; i++)
- data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
+ data[i] = (buf[i] << 1) | (buf[i + 1] >> 7);
}
MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
@@ -1187,8 +1187,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
struct mipi_dbi_dev *dbidev = m->private;
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
- unsigned int i;
- int ret, idx;
+ int i, ret, idx;
if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 4581c5387372..2a6e34663146 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
node->__subtree_last = LAST(node);
- if (hole_node->allocated) {
+ if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb;
while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
node->hole_size = 0;
rm_hole(hole);
@@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color;
node->hole_size = 0;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
rm_hole(hole);
if (adj_start > hole_start)
@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+{
+ return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+}
+
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list);
@@ -584,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- node->allocated = false;
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
+
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
EXPORT_SYMBOL(drm_mm_remove_node);
@@ -605,10 +611,11 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
struct drm_mm *mm = old->mm;
- DRM_MM_BUG_ON(!old->allocated);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
*new = *old;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
list_replace(&old->node_list, &new->node_list);
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
@@ -622,8 +629,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
&mm->holes_addr);
}
- old->allocated = false;
- new->allocated = true;
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
}
EXPORT_SYMBOL(drm_mm_replace_node);
@@ -731,9 +737,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm);
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
- node->scanned_block = true;
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
+ __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
mm->scan_active++;
/* Remove this block from the node_list so that we enlarge the hole
@@ -818,8 +824,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm);
- DRM_MM_BUG_ON(!node->scanned_block);
- node->scanned_block = false;
+ DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
+ __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
@@ -917,7 +923,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- mm->head_node.allocated = false;
+ mm->head_node.flags = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = -size;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 7bc03c3c154f..3b570a404933 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -428,8 +428,6 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 43d89dd59c6b..0ca58803ba46 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -250,11 +250,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!remote)
return -ENODEV;
- if (!of_device_is_available(remote)) {
- of_node_put(remote);
- return -ENODEV;
- }
-
if (panel) {
*panel = of_drm_find_panel(remote);
if (!IS_ERR(*panel))
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 6b0bf42039cf..ed7985c0535a 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -44,13 +44,21 @@ static LIST_HEAD(panel_list);
/**
* drm_panel_init - initialize a panel
* @panel: DRM panel
+ * @dev: parent device of the panel
+ * @funcs: panel operations
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
*
- * Sets up internal fields of the panel so that it can subsequently be added
- * to the registry.
+ * Initialize the panel structure for subsequent registration with
+ * drm_panel_add().
*/
-void drm_panel_init(struct drm_panel *panel)
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs, int connector_type)
{
INIT_LIST_HEAD(&panel->list);
+ panel->dev = dev;
+ panel->funcs = funcs;
+ panel->connector_type = connector_type;
}
EXPORT_SYMBOL(drm_panel_init);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0a2316e0e812..0814211b0f3f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -713,6 +713,15 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct file *fil;
int ret;
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret)
+ return ret;
+ vma->vm_private_data = obj;
+ drm_gem_object_get(obj);
+ return 0;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
fil = kzalloc(sizeof(*fil), GFP_KERNEL);
if (!priv || !fil) {
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index a17c8a14dba4..9a25d73c155c 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -28,6 +28,7 @@
#include <stdarg.h>
#include <linux/io.h>
+#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -35,6 +36,24 @@
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+/*
+ * drm_debug: Enable debug output.
+ * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
+ */
+unsigned int drm_debug;
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
+module_param_named(debug, drm_debug, int, 0600);
+
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
struct drm_print_iterator *iterator = p->arg;
@@ -147,6 +166,12 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_debug);
+void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ pr_err("*ERROR* %s %pV", p->prefix, vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_err);
+
/**
* drm_puts - print a const string to a &drm_printer stream
* @p: the &drm printer
@@ -179,6 +204,37 @@ void drm_printf(struct drm_printer *p, const char *f, ...)
}
EXPORT_SYMBOL(drm_printf);
+/**
+ * drm_print_bits - print bits to a &drm_printer stream
+ *
+ * Print bits (in flag fields for example) in human readable form.
+ *
+ * @p: the &drm_printer
+ * @value: field value.
+ * @bits: Array with bit names.
+ * @nbits: Size of bit names array.
+ */
+void drm_print_bits(struct drm_printer *p, unsigned long value,
+ const char * const bits[], unsigned int nbits)
+{
+ bool first = true;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(nbits > BITS_PER_TYPE(value)))
+ nbits = BITS_PER_TYPE(value);
+
+ for_each_set_bit(i, &value, nbits) {
+ if (WARN_ON_ONCE(!bits[i]))
+ continue;
+ drm_printf(p, "%s%s", first ? "" : ",",
+ bits[i]);
+ first = false;
+ }
+ if (first)
+ drm_printf(p, "(none)");
+}
+EXPORT_SYMBOL(drm_print_bits);
+
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...)
{
@@ -206,7 +262,7 @@ void drm_dev_dbg(const struct device *dev, unsigned int category,
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
@@ -229,7 +285,7 @@ void drm_dbg(unsigned int category, const char *format, ...)
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ef2c468205a2..a7c87abe88d0 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -92,7 +93,6 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_device *dev = connector->dev;
enum drm_mode_status ret = MODE_OK;
struct drm_encoder *encoder;
- int i;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode);
@@ -100,7 +100,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
return ret;
/* Step 2: Validate against encoders and crtcs */
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
struct drm_crtc *crtc;
ret = drm_encoder_mode_valid(encoder, mode);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index b11910f14c46..15fb516ae2d8 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -42,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
/* Anything goes */
return MODE_OK;
- return pipe->funcs->mode_valid(crtc, mode);
+ return pipe->funcs->mode_valid(pipe, mode);
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4b5c7b0ed714..669c93fe2500 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -135,6 +135,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
#include "drm_internal.h"
@@ -1279,7 +1280,7 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags != 0)
return -EINVAL;
if (args->count_handles == 0)
@@ -1350,7 +1351,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
return -EINVAL;
if (args->count_handles == 0)
@@ -1371,25 +1372,32 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
fence = drm_syncobj_fence_get(syncobjs[i]);
chain = to_dma_fence_chain(fence);
if (chain) {
- struct dma_fence *iter, *last_signaled = NULL;
-
- dma_fence_chain_for_each(iter, fence) {
- if (iter->context != fence->context) {
- dma_fence_put(iter);
- /* It is most likely that timeline has
- * unorder points. */
- break;
+ struct dma_fence *iter, *last_signaled =
+ dma_fence_get(fence);
+
+ if (args->flags &
+ DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
+ point = fence->seqno;
+ } else {
+ dma_fence_chain_for_each(iter, fence) {
+ if (iter->context != fence->context) {
+ dma_fence_put(iter);
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
}
- dma_fence_put(last_signaled);
- last_signaled = dma_fence_get(iter);
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
}
- point = dma_fence_is_signaled(last_signaled) ?
- last_signaled->seqno :
- to_dma_fence_chain(last_signaled)->prev_seqno;
dma_fence_put(last_signaled);
} else {
point = 0;
}
+ dma_fence_put(fence);
ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
ret = ret ? -EFAULT : 0;
if (ret)
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 471eb927474b..11c6dd577e8e 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -13,17 +13,23 @@ struct drm_file;
#define TRACE_INCLUDE_FILE drm_trace
TRACE_EVENT(drm_vblank_event,
- TP_PROTO(int crtc, unsigned int seq),
- TP_ARGS(crtc, seq),
+ TP_PROTO(int crtc, unsigned int seq, ktime_t time, bool high_prec),
+ TP_ARGS(crtc, seq, time, high_prec),
TP_STRUCT__entry(
__field(int, crtc)
__field(unsigned int, seq)
+ __field(ktime_t, time)
+ __field(bool, high_prec)
),
TP_fast_assign(
__entry->crtc = crtc;
__entry->seq = seq;
- ),
- TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq)
+ __entry->time = time;
+ __entry->high_prec = high_prec;
+ ),
+ TP_printk("crtc=%d, seq=%u, time=%lld, high-prec=%s",
+ __entry->crtc, __entry->seq, __entry->time,
+ __entry->high_prec ? "true" : "false")
);
TRACE_EVENT(drm_vblank_event_queued,
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index fd1fbc77871f..1659b13b178c 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -106,7 +106,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
write_seqlock(&vblank->seqlock);
vblank->time = t_vblank;
- vblank->count += vblank_count_inc;
+ atomic64_add(vblank_count_inc, &vblank->count);
write_sequnlock(&vblank->seqlock);
}
@@ -272,7 +272,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%llu, diff=%u, hw=%u hw_last=%u\n",
- pipe, vblank->count, diff, cur_vblank, vblank->last);
+ pipe, atomic64_read(&vblank->count), diff,
+ cur_vblank, vblank->last);
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
@@ -294,11 +295,23 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ u64 count;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
- return vblank->count;
+ count = atomic64_read(&vblank->count);
+
+ /*
+ * This read barrier corresponds to the implicit write barrier of the
+ * write seqlock in store_vblank(). Note that this is the only place
+ * where we need an explicit barrier, since all other access goes
+ * through drm_vblank_count_and_time(), which already has the required
+ * read barrier curtesy of the read seqlock.
+ */
+ smp_rmb();
+
+ return count;
}
/**
@@ -319,7 +332,7 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -693,7 +706,7 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
*/
*vblank_time = ktime_sub_ns(etime, delta_ns);
- if ((drm_debug & DRM_UT_VBL) == 0)
+ if (!drm_debug_enabled(DRM_UT_VBL))
return true;
ts_etime = ktime_to_timespec64(etime);
@@ -763,6 +776,14 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* vblank interrupt (since it only reports the software vblank counter), see
* drm_crtc_accurate_vblank_count() for such use-cases.
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* The software vblank counter.
*/
@@ -800,7 +821,7 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
do {
seq = read_seqbegin(&vblank->seqlock);
- vblank_count = vblank->count;
+ vblank_count = atomic64_read(&vblank->count);
*vblanktime = vblank->time;
} while (read_seqretry(&vblank->seqlock, seq));
@@ -817,6 +838,14 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
*/
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime)
@@ -1323,7 +1352,7 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
assert_spin_locked(&dev->vblank_time_lock);
vblank = &dev->vblank[pipe];
- WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
"Cannot compute missed vblanks without frame duration\n");
framedur_ns = vblank->framedur_ns;
@@ -1581,7 +1610,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
unsigned int flags, pipe, high_pipe;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
@@ -1731,7 +1760,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq);
+ trace_drm_vblank_event(pipe, seq, now,
+ dev->driver->get_vblank_timestamp != NULL);
}
/**
@@ -1806,6 +1836,14 @@ EXPORT_SYMBOL(drm_handle_vblank);
*
* This is the native KMS version of drm_handle_vblank().
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* True if the event was successfully handled, false on failure.
*/
@@ -1838,7 +1876,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
if (!crtc)
@@ -1896,7 +1934,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
if (!crtc)
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
index e9c9f9a80ba3..2000d9b33fd5 100644
--- a/drivers/gpu/drm/drm_vram_helper_common.c
+++ b/drivers/gpu/drm/drm_vram_helper_common.c
@@ -7,9 +7,8 @@
*
* This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
* buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM can be
- * managed with &struct drm_vram_mm (VRAM MM). Both data structures are
- * supposed to be used together, but can also be used individually.
+ * framebuffer devices with dedicated memory. The video RAM is managed
+ * by &struct drm_vram_mm (VRAM MM).
*
* With the GEM interface userspace applications create, manage and destroy
* graphics buffers, such as an on-screen framebuffer. GEM does not provide
@@ -50,8 +49,7 @@
* // setup device, vram base and size
* // ...
*
- * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size,
- * &drm_gem_vram_mm_funcs);
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
* if (ret)
* return ret;
* return 0;
diff --git a/drivers/gpu/drm/drm_vram_mm_helper.c b/drivers/gpu/drm/drm_vram_mm_helper.c
deleted file mode 100644
index c911781d6728..000000000000
--- a/drivers/gpu/drm/drm_vram_mm_helper.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_vram_mm_helper.h>
-
-#include <drm/ttm/ttm_page_alloc.h>
-
-/**
- * DOC: overview
- *
- * The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. These
- * helper functions work well with &struct drm_gem_vram_object.
- */
-
-/*
- * TTM TT
- */
-
-static void backend_func_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func backend_func = {
- .destroy = backend_func_destroy
-};
-
-/*
- * TTM BO device
- */
-
-static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
- int ret;
-
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
- return NULL;
-
- tt->func = &backend_func;
-
- ret = ttm_tt_init(tt, bo, page_flags);
- if (ret < 0)
- goto err_ttm_tt_init;
-
- return tt;
-
-err_ttm_tt_init:
- kfree(tt);
- return NULL;
-}
-
-static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (vmm->funcs && vmm->funcs->evict_flags)
- vmm->funcs->evict_flags(bo, placement);
-}
-
-static int bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (!vmm->funcs || !vmm->funcs->verify_access)
- return 0;
- return vmm->funcs->verify_access(bo, filp);
-}
-
-static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
-
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- mem->bus.addr = NULL;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
-
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM: /* nothing to do */
- mem->bus.offset = 0;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- break;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = vmm->vram_base;
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{ }
-
-static struct ttm_bo_driver bo_driver = {
- .ttm_tt_create = bo_driver_ttm_tt_create,
- .ttm_tt_populate = ttm_pool_populate,
- .ttm_tt_unpopulate = ttm_pool_unpopulate,
- .init_mem_type = bo_driver_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = bo_driver_evict_flags,
- .verify_access = bo_driver_verify_access,
- .io_mem_reserve = bo_driver_io_mem_reserve,
- .io_mem_free = bo_driver_io_mem_free,
-};
-
-/*
- * struct drm_vram_mm
- */
-
-/**
- * drm_vram_mm_init() - Initialize an instance of VRAM MM.
- * @vmm: the VRAM MM instance to initialize
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
- uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- vmm->vram_base = vram_base;
- vmm->vram_size = vram_size;
- vmm->funcs = funcs;
-
- ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret)
- return ret;
-
- ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_vram_mm_init);
-
-/**
- * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM.
- * @vmm: the VRAM MM instance to clean up
- */
-void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
-{
- ttm_bo_device_release(&vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_cleanup);
-
-/**
- * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- * @vmm: the VRAM MM instance
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
- struct drm_vram_mm *vmm)
-{
- return ttm_bo_mmap(filp, vma, &vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_mmap);
-
-/*
- * Helpers for integration with struct drm_device
- */
-
-/**
- * drm_vram_helper_alloc_mm - Allocates a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * The new instance of &struct drm_vram_mm on success, or
- * an ERR_PTR()-encoded errno code otherwise.
- */
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- if (WARN_ON(dev->vram_mm))
- return dev->vram_mm;
-
- dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
- if (!dev->vram_mm)
- return ERR_PTR(-ENOMEM);
-
- ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs);
- if (ret)
- goto err_kfree;
-
- return dev->vram_mm;
-
-err_kfree:
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
-
-/**
- * drm_vram_helper_release_mm - Releases a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- */
-void drm_vram_helper_release_mm(struct drm_device *dev)
-{
- if (!dev->vram_mm)
- return;
-
- drm_vram_mm_cleanup(dev->vram_mm);
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
-}
-EXPORT_SYMBOL(drm_vram_helper_release_mm);
-
-/*
- * Helpers for &struct file_operations
- */
-
-/**
- * drm_vram_mm_file_operations_mmap() - \
- Implements &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
-
- if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
- return -EINVAL;
-
- return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
-}
-EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 7e4e2959bf4f..32d9fac587f9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -326,7 +326,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
lockdep_assert_held(&gpu->lock);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
@@ -459,13 +459,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
return_target,
etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
cmdbuf->vaddr);
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
cmdbuf->vaddr, cmdbuf->size, 0);
@@ -484,6 +484,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
link_target);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 3a0f0ba8c63a..1e6aa24bf45e 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -19,6 +19,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6926cee91b36..72726f2c7a9f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -24,6 +24,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b78e8c5ba553..f41d75923557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index bc1565f1822a..48159d5d2214 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <media/cec-notifier.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -852,6 +853,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
static void hdmi_connector_destroy(struct drm_connector *connector)
{
+ struct hdmi_context *hdata = connector_to_hdmi(connector);
+
+ cec_notifier_conn_unregister(hdata->notifier);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -935,6 +940,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
+ struct cec_connector_info conn_info;
int ret;
connector->interlace_allowed = true;
@@ -957,6 +963,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL,
+ &conn_info);
+ if (!hdata->notifier) {
+ ret = -ENOMEM;
+ DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n");
+ }
+
return ret;
}
@@ -1528,8 +1543,8 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
mutex_unlock(&hdata->mutex);
cancel_delayed_work(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier,
- CEC_PHYS_ADDR_INVALID);
+ if (hdata->notifier)
+ cec_notifier_phys_addr_invalidate(hdata->notifier);
return;
}
@@ -2006,12 +2021,6 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
- hdata->notifier = cec_notifier_get(&pdev->dev);
- if (hdata->notifier == NULL) {
- ret = -ENOMEM;
- goto err_hdmiphy;
- }
-
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@@ -2023,7 +2032,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_register_audio_device(hdata);
if (ret)
- goto err_notifier_put;
+ goto err_rpm_disable;
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
@@ -2034,8 +2043,7 @@ static int hdmi_probe(struct platform_device *pdev)
err_unregister_audio:
platform_device_unregister(hdata->audio.pdev);
-err_notifier_put:
- cec_notifier_put(hdata->notifier);
+err_rpm_disable:
pm_runtime_disable(dev);
err_hdmiphy:
@@ -2054,12 +2062,10 @@ static int hdmi_remove(struct platform_device *pdev)
struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
platform_device_unregister(hdata->audio.pdev);
- cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
if (!IS_ERR(hdata->reg_hdmi_en))
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7b24338fad3c..6cfdb95fef2f 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
int width = mode->hdisplay, height = mode->vdisplay, i;
- struct {
+ static const struct {
int hdisplay, vdisplay, htotal, vtotal, scan_val;
- } static const modes[] = {
+ } modes[] = {
{ 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
{ 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
{ 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index a92fd6c70b09..82c972e9c024 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -9,6 +9,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index f56852a503e8..8b784947ed3b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
+ memset(&clock, 0, sizeof(clock));
+
switch (refclk) {
case 27000:
if (target < 200000) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 03023fa0fb6f..f350ac1ead18 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
return;
}
- /*create a new connetor*/
+ /*create a new connector*/
dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
if (!dsi_connector) {
DRM_ERROR("No memory");
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 167c10767dd4..900e5499249d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
@@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 35a3c5f0c38c..dfc5aef62f7b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -4,7 +4,8 @@ config DRM_HISI_HIBMC
depends on DRM && PCI && MMU && ARM64
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
-
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index cc4c41748cfb..6527a97f68a3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -96,7 +96,6 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *state = plane->state;
u32 reg;
- int ret;
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
@@ -109,16 +108,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
hibmc_fb = to_hibmc_framebuffer(state->fb);
gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- DRM_ERROR("failed to pin bo: %d", ret);
- return;
- }
gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- return;
- }
+ if (WARN_ON_ONCE(gpu_addr < 0))
+ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
@@ -157,6 +149,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = {
};
static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
.atomic_check = hibmc_plane_atomic_check,
.atomic_update = hibmc_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index c103005b0a33..2fd4ca91a62d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -22,15 +22,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
-#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-static const struct file_operations hibmc_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 9f6e473e6295..21b684eab5c9 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -17,7 +17,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_print.h>
-#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
@@ -29,7 +28,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
vmm = drm_vram_helper_alloc_mm(dev,
pci_resource_start(dev->pdev, 0),
- hibmc->fb_size, &drm_gem_vram_mm_funcs);
+ hibmc->fb_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5bf8138941de..bdcf9c6ae9e9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 8bcf0d199145..a839f78a4c8a 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -44,7 +44,7 @@ struct sil164_priv {
((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
#define sil164_dbg(client, format, ...) do { \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
dev_printk(KERN_DEBUG, &client->dev, \
"%s: " format, __func__, ## __VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 8039fc0d83db..5b03fdd1eaa4 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -420,7 +420,8 @@ static int tda9950_probe(struct i2c_client *client,
priv->hdmi = glue->parent;
priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950",
- CEC_CAP_DEFAULTS,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(priv->adap))
return PTR_ERR(priv->adap);
@@ -457,13 +458,14 @@ static int tda9950_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- priv->notify = cec_notifier_get(priv->hdmi);
+ priv->notify = cec_notifier_cec_adap_register(priv->hdmi, NULL,
+ priv->adap);
if (!priv->notify)
return -ENOMEM;
ret = cec_register_adapter(priv->adap, priv->hdmi);
if (ret < 0) {
- cec_notifier_put(priv->notify);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
return ret;
}
@@ -473,8 +475,6 @@ static int tda9950_probe(struct i2c_client *client,
*/
devm_remove_action(dev, tda9950_cec_del, priv);
- cec_register_cec_notifier(priv->adap, priv->notify);
-
return 0;
}
@@ -482,8 +482,8 @@ static int tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
- cec_notifier_put(priv->notify);
return 0;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 84c6d4c91c65..a63790d32d75 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -14,6 +14,7 @@
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -805,8 +806,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
tda998x_edid_delay_start(priv);
} else {
schedule_work(&priv->detect_work);
- cec_notifier_set_phys_addr(priv->cec_notify,
- CEC_PHYS_ADDR_INVALID);
+ cec_notifier_phys_addr_invalidate(
+ priv->cec_notify);
}
handled = true;
@@ -1790,8 +1791,7 @@ static void tda998x_destroy(struct device *dev)
i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
+ cec_notifier_conn_unregister(priv->cec_notify);
}
static int tda998x_create(struct device *dev)
@@ -1916,7 +1916,7 @@ static int tda998x_create(struct device *dev)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(dev);
+ priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2a77823b8e9a..e66c38332df4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
if (sarea_priv->dirty)
@@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
sarea_priv->dirty = 0x7f;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 0d21402945ab..ba9595960bbe 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -76,7 +76,7 @@ config DRM_I915_CAPTURE_ERROR
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang to
- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
for triaging.
If in doubt, say "Y".
@@ -105,11 +105,11 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
- bool "Enable Intel GVT-g graphics virtualization host support"
- depends on DRM_I915
- depends on 64BIT
- default n
- help
+ bool "Enable Intel GVT-g graphics virtualization host support"
+ depends on DRM_I915
+ depends on 64BIT
+ default n
+ help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
@@ -148,3 +148,9 @@ menu "drm/i915 Profile Guided Optimisation"
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.profile"
endmenu
+
+menu "drm/i915 Unstable Evolution"
+ visible if EXPERT && STAGING && BROKEN
+ depends on DRM_I915
+ source "drivers/gpu/drm/i915/Kconfig.unstable"
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 00786a142ff0..0b1f786a7ce9 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -1,34 +1,33 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_I915_WERROR
- bool "Force GCC to throw an error instead of a warning when compiling"
- # As this may inadvertently break the build, only allow the user
- # to shoot oneself in the foot iff they aim really hard
- depends on EXPERT
- # We use the dependency on !COMPILE_TEST to not be enabled in
- # allmodconfig or allyesconfig configurations
- depends on !COMPILE_TEST
+ bool "Force GCC to throw an error instead of a warning when compiling"
+ # As this may inadvertently break the build, only allow the user
+ # to shoot oneself in the foot iff they aim really hard
+ depends on EXPERT
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
select HEADER_TEST
- default n
- help
- Add -Werror to the build flags for (and only for) i915.ko.
- Do not enable this unless you are writing code for the i915.ko module.
+ default n
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+ Do not enable this unless you are writing code for the i915.ko module.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG
- bool "Enable additional driver debugging"
- depends on DRM_I915
- select DEBUG_FS
- select PREEMPT_COUNT
- select REFCOUNT_FULL
- select I2C_CHARDEV
- select STACKDEPOT
- select DRM_DP_AUX_CHARDEV
- select X86_MSR # used by igt/pm_rpm
- select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
- select DRM_DEBUG_MM if DRM=y
+ bool "Enable additional driver debugging"
+ depends on DRM_I915
+ select DEBUG_FS
+ select PREEMPT_COUNT
+ select I2C_CHARDEV
+ select STACKDEPOT
+ select DRM_DP_AUX_CHARDEV
+ select X86_MSR # used by igt/pm_rpm
+ select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
@@ -36,14 +35,14 @@ config DRM_I915_DEBUG
select DRM_I915_SELFTEST
select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_DEBUG_MMIO
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_MMIO
bool "Always insert extra checks around mmio access by default"
@@ -59,16 +58,16 @@ config DRM_I915_DEBUG_MMIO
If in doubt, say "N".
config DRM_I915_DEBUG_GEM
- bool "Insert extra checks into the GEM internals"
- default n
- depends on DRM_I915_WERROR
- help
- Enable extra sanity checks (including BUGs) along the GEM driver
- paths that may slow the system down and if hit hang the machine.
+ bool "Insert extra checks into the GEM internals"
+ default n
+ depends on DRM_I915_WERROR
+ help
+ Enable extra sanity checks (including BUGs) along the GEM driver
+ paths that may slow the system down and if hit hang the machine.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
@@ -111,41 +110,41 @@ config DRM_I915_TRACE_GTT
If in doubt, say "N".
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
- bool "Enable additional driver debugging for fence objects"
- depends on DRM_I915
- select DEBUG_OBJECTS
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for fence objects"
+ depends on DRM_I915
+ select DEBUG_OBJECTS
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SW_FENCE_CHECK_DAG
- bool "Enable additional driver debugging for detecting dependency cycles"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for detecting dependency cycles"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_GUC
- bool "Enable additional driver debugging for GuC"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will help resolve GuC related issues.
+ bool "Enable additional driver debugging for GuC"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will help resolve GuC related issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
@@ -178,15 +177,15 @@ config DRM_I915_SELFTEST_BROKEN
If in doubt, say "N".
config DRM_I915_LOW_LEVEL_TRACEPOINTS
- bool "Enable low level request tracing events"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on low level request tracing events.
- This provides the ability to precisely monitor engine utilisation
- and also analyze the request dependency resolving timeline.
-
- If in doubt, say "N".
+ bool "Enable low level request tracing events"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on low level request tracing events.
+ This provides the ability to precisely monitor engine utilisation
+ and also analyze the request dependency resolving timeline.
+
+ If in doubt, say "N".
config DRM_I915_DEBUG_VBLANK_EVADE
bool "Enable extra debug warnings for vblank evasion"
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 48df8889a88a..1799537a3228 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -12,6 +12,29 @@ config DRM_I915_USERFAULT_AUTOSUSPEND
May be 0 to disable the extra delay and solely use the device level
runtime pm autosuspend delay tunable.
+config DRM_I915_HEARTBEAT_INTERVAL
+ int "Interval between heartbeat pulses (ms)"
+ default 2500 # milliseconds
+ help
+ The driver sends a periodic heartbeat down all active engines to
+ check the health of the GPU and undertake regular house-keeping of
+ internal driver state.
+
+ May be 0 to disable heartbeats and therefore disable automatic GPU
+ hang detection.
+
+config DRM_I915_PREEMPT_TIMEOUT
+ int "Preempt timeout (ms, jiffy granularity)"
+ default 100 # milliseconds
+ help
+ How long to wait (in milliseconds) for a preemption event to occur
+ when submitting a new context via execlists. If the current context
+ does not hit an arbitration point and yield to HW before the timer
+ expires, the HW will be reset to allow the more important context
+ to execute.
+
+ May be 0 to disable the timeout.
+
config DRM_I915_SPIN_REQUEST
int "Busywait for request completion (us)"
default 5 # microseconds
@@ -25,3 +48,29 @@ config DRM_I915_SPIN_REQUEST
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
+
+config DRM_I915_STOP_TIMEOUT
+ int "How long to wait for an engine to quiesce gracefully before reset (ms)"
+ default 100 # milliseconds
+ help
+ By stopping submission and sleeping for a short time before resetting
+ the GPU, we allow the innocent contexts also on the system to quiesce.
+ It is then less likely for a hanging context to cause collateral
+ damage as the system is reset in order to recover. The corollary is
+ that the reset itself may take longer and so be more disruptive to
+ interactive or low latency workloads.
+
+config DRM_I915_TIMESLICE_DURATION
+ int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
+ default 1 # milliseconds
+ help
+ When two user batches of equal priority are executing, we will
+ alternate execution of each batch to ensure forward progress of
+ all users. This is necessary in some cases where there may be
+ an implicit dependency between those batches that requires
+ concurrent execution in order for them to proceed, e.g. they
+ interact with each other via userspace semaphores. Each context
+ is scheduled for execution for the timeslice duration, before
+ switching to the next context.
+
+ May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Kconfig.unstable b/drivers/gpu/drm/i915/Kconfig.unstable
new file mode 100644
index 000000000000..0c2276155c2b
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.unstable
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_I915_UNSTABLE
+ bool "Enable unstable API for early prototype development"
+ depends on EXPERT
+ depends on STAGING
+ depends on BROKEN # should never be enabled by distros!
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Enable prototype uAPI under general discussion before they are
+ finalized. Such prototypes may be withdrawn or substantially
+ changed before release. They are only enabled here so that a wide
+ number of interested parties (userspace driver developers) can
+ verify that the uAPI meet their expectations. These uAPI should
+ never be used in production.
+
+ Recommended for driver developers _only_.
+
+ If in the slightest bit of doubt, say "N".
+
+config DRM_I915_UNSTABLE_FAKE_LMEM
+ bool "Enable the experimental fake lmem"
+ depends on DRM_I915_UNSTABLE
+ default n
+ help
+ Convert some system memory into a fake local memory region for
+ testing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2587ea834f06..90dcf09f52cc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -46,10 +46,12 @@ i915-y += i915_drv.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
+ i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
intel_csr.o \
intel_device_info.o \
+ intel_memory_region.o \
intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
@@ -76,19 +78,24 @@ gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
- gt/intel_engine_pool.o \
+ gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
+ gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
- gt/intel_hangcheck.o \
+ gt/intel_gt_requests.o \
+ gt/intel_llc.o \
gt/intel_lrc.o \
+ gt/intel_mocs.o \
+ gt/intel_rc6.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
- gt/intel_ringbuffer.o \
- gt/intel_mocs.o \
+ gt/intel_ring.o \
+ gt/intel_ring_submission.o \
+ gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
gt/intel_workarounds.o
@@ -114,10 +121,12 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+ gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+ gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
@@ -141,6 +150,7 @@ i915-y += \
i915_scheduler.o \
i915_trace_points.o \
i915_vma.o \
+ intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -172,6 +182,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_dpio_phy.o \
display/intel_dpll_mgr.o \
+ display/intel_dsb.o \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
@@ -182,7 +193,8 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
- display/intel_tc.o
+ display/intel_tc.o \
+ display/intel_vga.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -235,7 +247,8 @@ i915-y += \
oa/i915_oa_cflgt2.o \
oa/i915_oa_cflgt3.o \
oa/i915_oa_cnl.o \
- oa/i915_oa_icl.o
+ oa/i915_oa_icl.o \
+ oa/i915_oa_tgl.o
i915-y += i915_perf.o
# Post-mortem debug and GPU hang state capture
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 6e398c33a524..325df29b0447 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1584,7 +1584,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->type = INTEL_OUTPUT_DSI;
encoder->cloneable = 0;
- encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->pipe_mask = ~0;
encoder->power_domain = POWER_DOMAIN_PORT_DSI;
encoder->get_power_domains = gen11_dsi_get_power_domains;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index d3fb75bb9eb1..c2875b10adf9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -199,8 +199,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
- crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
+ crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
@@ -264,10 +264,13 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
*/
mode = PS_SCALER_MODE_NORMAL;
} else {
+ struct intel_plane *linked =
+ plane_state->planar_linked_plane;
+
mode = PS_SCALER_MODE_PLANAR;
- if (plane_state->linked_plane)
- mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+ if (linked)
+ mode |= PS_PLANE_Y_SEL(linked->id);
}
} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
mode = PS_SCALER_MODE_NORMAL;
@@ -371,6 +374,15 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
if (!plane) {
struct drm_plane_state *state;
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ continue;
+
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
@@ -378,13 +390,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane->base.id);
return PTR_ERR(state);
}
-
- /*
- * the plane is added after plane checks are run,
- * but since this plane is unchanged just do the
- * minimum required validation.
- */
- crtc_state->base.planes_changed = true;
}
intel_plane = to_intel_plane(plane);
@@ -425,6 +430,13 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = state->modeset = false;
+ state->global_state_changed = false;
+ state->active_pipes = 0;
+ memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
+ memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
+ memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
+ memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
+ state->cdclk.pipe = INVALID_PIPE;
}
struct intel_crtc_state *
@@ -438,3 +450,40 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+
+int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 58065d3161a3..49d5cb1b9e0a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -16,6 +16,7 @@ struct drm_crtc_state;
struct drm_device;
struct drm_i915_private;
struct drm_property;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -46,4 +47,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
+int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index d1fcdf206da4..98f557a9f8ee 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -138,18 +138,58 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
return cpp * crtc_state->pixel_rate;
}
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc_state *crtc_state;
+
+ if (!plane_state->base.visible || !plane->min_cdclk)
+ return false;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(crtc_state, plane_state);
+
+ /*
+ * Does the cdclk need to be bumbed up?
+ *
+ * Note: we obviously need to be called before the new
+ * cdclk frequency is calculated so state->cdclk.logical
+ * hasn't been populated yet. Hence we look at the old
+ * cdclk state under dev_priv->cdclk.logical. This is
+ * safe as long we hold at least one crtc mutex (which
+ * must be true since we have crtc_state).
+ */
+ if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id],
+ dev_priv->cdclk.logical.cdclk);
+ return true;
+ }
+
+ return false;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
+ const struct drm_framebuffer *fb = new_plane_state->base.fb;
int ret;
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
new_crtc_state->data_rate[plane->id] = 0;
+ new_crtc_state->min_cdclk[plane->id] = 0;
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -164,11 +204,11 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- is_planar_yuv_format(new_plane_state->base.fb->format->format))
+ drm_format_info_is_yuv_semiplanar(fb->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+ fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
if (new_plane_state->base.visible || old_plane_state->base.visible)
@@ -194,14 +234,11 @@ get_crtc_from_states(const struct intel_plane_state *old_plane_state,
return NULL;
}
-static int intel_plane_atomic_check(struct drm_plane *_plane,
- struct drm_plane_state *_new_plane_state)
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_atomic_state *state =
- to_intel_atomic_state(_new_plane_state->state);
struct intel_plane_state *new_plane_state =
- to_intel_plane_state(_new_plane_state);
+ intel_atomic_get_new_plane_state(state, plane);
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct intel_crtc *crtc =
@@ -320,9 +357,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
if (new_plane_state->base.visible) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else if (new_plane_state->slave) {
+ } else if (new_plane_state->planar_slave) {
struct intel_plane *master =
- new_plane_state->linked_plane;
+ new_plane_state->planar_linked_plane;
/*
* We update the slave plane from this function because
@@ -368,5 +405,4 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
- .atomic_check = intel_plane_atomic_check,
};
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index cb7ef4f9eafd..e61e9a82aadf 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -41,9 +41,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state);
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ddcccf4408c3..85e6b2bbb34f 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -28,6 +28,7 @@
#include <drm/i915_component.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -560,8 +561,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
- port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe));
if (WARN_ON(port == PORT_A))
return;
@@ -609,8 +611,9 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
- port_name(port), pipe_name(pipe), drm_eld_size(eld));
+ DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(eld));
if (WARN_ON(port == PORT_A))
return;
@@ -816,13 +819,8 @@ retry:
to_intel_atomic_state(state)->cdclk.force_min_cdclk =
enable ? 2 * 96000 : 0;
- /*
- * Protects dev_priv->cdclk.force_min_cdclk
- * Need to lock this here in case we have no active pipes
- * and thus wouldn't lock it during the commit otherwise.
- */
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
- &ctx);
+ /* Protects dev_priv->cdclk.force_min_cdclk */
+ ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
if (!ret)
ret = drm_atomic_commit(state);
@@ -850,11 +848,23 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
- if (dev_priv->audio_power_refcount++ == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (dev_priv->audio_power_refcount++ == 0) {
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
+ DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
+ /* Force CDCLK to 2*BCLK as long as we need audio powered. */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE(AUD_PIN_BUF_CTL,
+ (I915_READ(AUD_PIN_BUF_CTL) |
+ AUD_PIN_BUF_ENABLE));
+ }
+
return ret;
}
@@ -865,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
@@ -1114,6 +1124,12 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
+ DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
dev_priv->audio_component_registered = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3250c1b8dcca..63c1bd4c2954 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1399,6 +1399,7 @@ static enum port dvo_port_to_port(u8 dvo_port)
[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
[PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+ [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1},
};
enum port port;
int i;
@@ -1625,7 +1626,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
expected_size = 37;
} else if (bdb->version <= 215) {
expected_size = 38;
- } else if (bdb->version <= 216) {
+ } else if (bdb->version <= 229) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -1843,7 +1844,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (!HAS_DISPLAY(dev_priv)) {
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
@@ -2258,6 +2259,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
+ case DP_AUX_G:
+ aux_ch = AUX_CH_G;
+ break;
default:
MISSING_CASE(info->alternate_aux_channel);
aux_ch = AUX_CH_A;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4969189e620f..98f064828a57 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2016 Intel Corporation
+ * Copyright © 2016-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,7 @@
#include <drm/i915_drm.h>
struct drm_i915_private;
+enum port;
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 688858ebe4d0..22e83f857de8 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -35,28 +35,54 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else if (IS_GEN(dev_priv, 11)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
}
qi->num_channels = (val & 0xf0) >> 4;
qi->num_points = (val & 0xf00) >> 8;
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
return 0;
}
@@ -132,20 +158,25 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
}
struct intel_sa_info {
- u8 deburst, mpagesize, deprogbwlimit, displayrtids;
+ u16 displayrtids;
+ u8 deburst, deprogbwlimit;
};
static const struct intel_sa_info icl_sa_info = {
.deburst = 8,
- .mpagesize = 16,
.deprogbwlimit = 25, /* GB/s */
.displayrtids = 128,
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+static const struct intel_sa_info tgl_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 34, /* GB/s */
+ .displayrtids = 256,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- const struct intel_sa_info *sa = &icl_sa_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels;
int deinterleave;
@@ -233,14 +264,16 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 11))
- icl_get_bw_info(dev_priv);
+ if (IS_GEN(dev_priv, 12))
+ icl_get_bw_info(dev_priv, &tgl_sa_info);
+ else if (IS_GEN(dev_priv, 11))
+ icl_get_bw_info(dev_priv, &icl_sa_info);
}
static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
int num_planes)
{
- if (IS_GEN(dev_priv, 11))
+ if (INTEL_GEN(dev_priv) >= 11)
/*
* FIXME with SAGV disabled maybe we can assume
* point 1 will always be used? Seems to match
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index d0bc42e5039c..0caef2592a7e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_atomic.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
@@ -1161,28 +1162,88 @@ static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static int bxt_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 576000)
- return 624000;
- else if (min_cdclk > 384000)
- return 576000;
- else if (min_cdclk > 288000)
- return 384000;
- else if (min_cdclk > 144000)
- return 288000;
- else
- return 144000;
+static const struct intel_cdclk_vals bxt_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
+ {}
+};
+
+static const struct intel_cdclk_vals glk_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
+ {}
+};
+
+static const struct intel_cdclk_vals cnl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 },
+
+ { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 },
+ {}
+};
+
+static const struct intel_cdclk_vals icl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk >= min_cdclk)
+ return table[i].cdclk;
+
+ WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
-static int glk_calc_cdclk(int min_cdclk)
+static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- if (min_cdclk > 158400)
- return 316800;
- else if (min_cdclk > 79200)
- return 158400;
- else
- return 79200;
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ if (cdclk == dev_priv->cdclk.hw.bypass)
+ return 0;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk == cdclk)
+ return dev_priv->cdclk.hw.ref * table[i].ratio;
+
+ WARN(1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
static u8 bxt_calc_voltage_level(int cdclk)
@@ -1190,69 +1251,99 @@ static u8 bxt_calc_voltage_level(int cdclk)
return DIV_ROUND_UP(cdclk, 25000);
}
-static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static u8 cnl_calc_voltage_level(int cdclk)
{
- int ratio;
-
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk > 336000)
+ return 2;
+ else if (cdclk > 168000)
+ return 1;
+ else
return 0;
+}
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 144000:
- case 288000:
- case 384000:
- case 576000:
- ratio = 60;
- break;
- case 624000:
- ratio = 65;
- break;
- }
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
- return dev_priv->cdclk.hw.ref * ratio;
+static u8 ehl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 312000)
+ return 2;
+ else if (cdclk > 180000)
+ return 1;
+ else
+ return 0;
}
-static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- int ratio;
+ if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+ cdclk_state->ref = 24000;
+ else
+ cdclk_state->ref = 19200;
+}
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+static void icl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
- switch (cdclk) {
+ switch (dssm) {
default:
- MISSING_CASE(cdclk);
+ MISSING_CASE(dssm);
/* fall through */
- case 79200:
- case 158400:
- case 316800:
- ratio = 33;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+ cdclk_state->ref = 24000;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+ cdclk_state->ref = 19200;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+ cdclk_state->ref = 38400;
break;
}
-
- return dev_priv->cdclk.hw.ref * ratio;
}
-static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- u32 val;
+ u32 val, ratio;
- cdclk_state->ref = 19200;
- cdclk_state->vco = 0;
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_readout_refclk(dev_priv, cdclk_state);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_readout_refclk(dev_priv, cdclk_state);
+ else
+ cdclk_state->ref = 19200;
val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+ (val & BXT_DE_PLL_LOCK) == 0) {
+ /*
+ * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+ * setting it to zero is a way to signal that.
+ */
+ cdclk_state->vco = 0;
return;
+ }
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
+ /*
+ * CNL+ have the ratio directly in the PLL enable register, gen9lp had
+ * it in a separate PLL control register.
+ */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
+ else
+ ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- val = I915_READ(BXT_DE_PLL_CTL);
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+ cdclk_state->vco = ratio * cdclk_state->ref;
}
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1261,12 +1352,19 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
u32 divider;
int div;
- bxt_de_pll_update(dev_priv, cdclk_state);
+ bxt_de_pll_readout(dev_priv, cdclk_state);
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+ if (INTEL_GEN(dev_priv) >= 12)
+ cdclk_state->bypass = cdclk_state->ref / 2;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ cdclk_state->bypass = 50000;
+ else
+ cdclk_state->bypass = cdclk_state->ref;
- if (cdclk_state->vco == 0)
+ if (cdclk_state->vco == 0) {
+ cdclk_state->cdclk = cdclk_state->bypass;
goto out;
+ }
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1275,13 +1373,15 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
div = 8;
break;
default:
@@ -1297,7 +1397,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
- bxt_calc_voltage_level(cdclk_state->cdclk);
+ dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1332,259 +1432,6 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
- u32 val, divider;
- int ret;
-
- /* cdclk = vco / 2 / div{1,1.5,2,4} */
- switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
- /* fall through */
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- case 3:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
- divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 8:
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- }
-
- /*
- * Inform power controller of upcoming frequency change. BSpec
- * requires us to wait up to 150usec, but that leads to timeouts;
- * the 2ms used here is based on experiment.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_enable(dev_priv, vco);
-
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (cdclk >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
-
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
-
- /*
- * The timeout isn't specified, the 2ms used here is based on
- * experiment.
- * FIXME: Waiting for the request completion could be delayed until
- * the next PCODE request based on BSpec.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- intel_update_cdclk(dev_priv);
-}
-
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
- u32 cdctl, expected;
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- /* DPLL okay; verify the cdclock
- *
- * Some BIOS versions leave an incorrect decimal frequency value and
- * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
- * so sanitize this register.
- */
- cdctl = I915_READ(CDCLK_CTL);
- /*
- * Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
- * (PIPE_NONE).
- */
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (dev_priv->cdclk.hw.cdclk >= 500000)
- expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
-
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
-
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state;
-
- bxt_sanitize_cdclk(dev_priv);
-
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0)
- return;
-
- cdclk_state = dev_priv->cdclk.hw;
-
- /*
- * FIXME:
- * - The initial CDCLK needs to be read from VBT.
- * Need to make this change after VBT has changes for BXT.
- */
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk_state.cdclk = glk_calc_cdclk(0);
- cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
- } else {
- cdclk_state.cdclk = bxt_calc_cdclk(0);
- cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
- }
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static int cnl_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 336000)
- return 528000;
- else if (min_cdclk > 168000)
- return 336000;
- else
- return 168000;
-}
-
-static u8 cnl_calc_voltage_level(int cdclk)
-{
- if (cdclk > 336000)
- return 2;
- else if (cdclk > 168000)
- return 1;
- else
- return 0;
-}
-
-static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
- cdclk_state->ref = 24000;
- else
- cdclk_state->ref = 19200;
-
- cdclk_state->vco = 0;
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
- return;
-
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
-
- cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
-}
-
-static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 divider;
- int div;
-
- cnl_cdclk_pll_update(dev_priv, cdclk_state);
-
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
-
- if (cdclk_state->vco == 0)
- goto out;
-
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-
- switch (divider) {
- case BXT_CDCLK_CD2X_DIV_SEL_1:
- div = 2;
- break;
- case BXT_CDCLK_CD2X_DIV_SEL_2:
- div = 4;
- break;
- default:
- MISSING_CASE(divider);
- return;
- }
-
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
-
- out:
- /*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
- */
- cdclk_state->voltage_level =
- cnl_calc_voltage_level(cdclk_state->cdclk);
-}
-
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -1618,7 +1465,27 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
-static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
+static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (pipe == INVALID_PIPE)
+ return TGL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return TGL_CDCLK_CD2X_PIPE(pipe);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ if (pipe == INVALID_PIPE)
+ return ICL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return ICL_CDCLK_CD2X_PIPE(pipe);
+ } else {
+ if (pipe == INVALID_PIPE)
+ return BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ return BXT_CDCLK_CD2X_PIPE(pipe);
+ }
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
@@ -1627,17 +1494,28 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider;
int ret;
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ /* Inform power controller of upcoming frequency change. */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ else
+ /*
+ * BSpec requires us to wait up to 150usec, but that leads to
+ * timeouts; the 2ms used here is based on experiment.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
+
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
+ DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
- /* cdclk = vco / 2 / div{1,2} */
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
@@ -1646,67 +1524,87 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
+ case 3:
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
+ case 8:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+ break;
}
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
+ if (INTEL_GEN(dev_priv) >= 10) {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_disable(dev_priv);
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- I915_WRITE(CDCLK_CTL, val);
+ if (dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_enable(dev_priv, vco);
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
+ } else {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_disable(dev_priv);
- /* inform PCU of the change */
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ if (dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
+ }
- intel_update_cdclk(dev_priv);
+ val = divider | skl_cdclk_decimal(cdclk) |
+ bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
+ if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
-static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+ if (INTEL_GEN(dev_priv) >= 10) {
+ ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
+ } else {
+ /*
+ * The timeout isn't specified, the 2ms used here is based on
+ * experiment.
+ * FIXME: Waiting for the request completion could be delayed
+ * until the next PCODE request based on BSpec.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level,
+ 150, 2);
+ }
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 168000:
- case 336000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
- break;
- case 528000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
- break;
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
}
- return dev_priv->cdclk.hw.ref * ratio;
+ intel_update_cdclk(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
-static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
+ int cdclk, vco;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -1727,262 +1625,65 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ /* Make sure this is a legal cdclk value for the platform */
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
+ if (cdclk != dev_priv->cdclk.hw.cdclk)
+ goto sanitize;
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
+ /* Make sure the VCO is correct for the cdclk */
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+ if (vco != dev_priv->cdclk.hw.vco)
+ goto sanitize;
-static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
-{
- static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 };
- static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 };
- const int *ranges;
- int len, i;
+ expected = skl_cdclk_decimal(cdclk);
- switch (ref) {
- default:
- MISSING_CASE(ref);
- /* fall through */
- case 24000:
- ranges = ranges_24;
- len = ARRAY_SIZE(ranges_24);
- break;
- case 19200:
- case 38400:
- ranges = ranges_19_38;
- len = ARRAY_SIZE(ranges_19_38);
+ /* Figure out what CD2X divider we should be using for this cdclk */
+ switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco,
+ dev_priv->cdclk.hw.cdclk)) {
+ case 2:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1;
break;
- }
-
- for (i = 0; i < len; i++) {
- if (min_cdclk <= ranges[i])
- return ranges[i];
- }
-
- WARN_ON(min_cdclk > ranges[len - 1]);
- return ranges[len - 1];
-}
-
-static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
-
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
-
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 172800:
- case 307200:
- case 556800:
- case 652800:
- WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
- dev_priv->cdclk.hw.ref != 38400);
+ case 3:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
- case 180000:
- case 312000:
- case 552000:
- case 648000:
- WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+ case 4:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_2;
break;
- case 192000:
- WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
- dev_priv->cdclk.hw.ref != 38400 &&
- dev_priv->cdclk.hw.ref != 24000);
+ case 8:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_4;
break;
- }
-
- ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
-
- return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static void icl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- unsigned int cdclk = cdclk_state->cdclk;
- unsigned int vco = cdclk_state->vco;
- int ret;
-
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
- if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
-
- /*
- * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
- * divider here synchronized to a pipe while CDCLK is on, nor will we
- * need the corresponding vblank wait.
- */
- I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
- skl_cdclk_decimal(cdclk));
-
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
-
- intel_update_cdclk(dev_priv);
-
- /*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
- */
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
-
-static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
-{
- if (IS_ELKHARTLAKE(dev_priv)) {
- if (cdclk > 312000)
- return 2;
- else if (cdclk > 180000)
- return 1;
- else
- return 0;
- } else {
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
- }
-}
-
-static void icl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- cdclk_state->bypass = 50000;
-
- val = I915_READ(SKL_DSSM);
- switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
default:
- MISSING_CASE(val);
- /* fall through */
- case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
- cdclk_state->ref = 24000;
- break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
- cdclk_state->ref = 19200;
- break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
- cdclk_state->ref = 38400;
- break;
- }
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
- (val & BXT_DE_PLL_LOCK) == 0) {
- /*
- * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
- * setting it to zero is a way to signal that.
- */
- cdclk_state->vco = 0;
- cdclk_state->cdclk = cdclk_state->bypass;
- goto out;
+ goto sanitize;
}
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
-
- val = I915_READ(CDCLK_CTL);
- WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
-
- cdclk_state->cdclk = cdclk_state->vco / 2;
-
-out:
/*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- cdclk_state->voltage_level =
- icl_calc_voltage_level(dev_priv, cdclk_state->cdclk);
-}
-
-static void icl_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state sanitized_state;
- u32 val;
-
- /* This sets dev_priv->cdclk.hw. */
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- /* This means CDCLK disabled. */
- if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- val = I915_READ(CDCLK_CTL);
-
- if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
- goto sanitize;
-
- if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
- goto sanitize;
+ if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- return;
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
sanitize:
DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- sanitized_state.ref = dev_priv->cdclk.hw.ref;
- sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
- sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
- sanitized_state.cdclk);
- sanitized_state.voltage_level =
- icl_calc_voltage_level(dev_priv,
- sanitized_state.cdclk);
-
- icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
-}
-
-static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv,
- cdclk_state.cdclk);
+ /* force cdclk programming */
+ dev_priv->cdclk.hw.cdclk = 0;
- icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ /* force full PLL disable + enable */
+ dev_priv->cdclk.hw.vco = -1;
}
-static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
- cnl_sanitize_cdclk(dev_priv);
+ bxt_sanitize_cdclk(dev_priv);
if (dev_priv->cdclk.hw.cdclk != 0 &&
dev_priv->cdclk.hw.vco != 0)
@@ -1990,22 +1691,29 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cnl_calc_cdclk(0);
- cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ /*
+ * FIXME:
+ * - The initial CDCLK needs to be read from VBT.
+ * Need to make this change after VBT has changes for BXT.
+ */
+ cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
@@ -2019,14 +1727,10 @@ static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_cdclk_init(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_init_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_init_cdclk(i915);
+ if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ bxt_init_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_init_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_init_cdclk(i915);
}
/**
@@ -2038,14 +1742,10 @@ void intel_cdclk_init(struct drm_i915_private *i915)
*/
void intel_cdclk_uninit(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_uninit_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_uninit_cdclk(i915);
+ if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
+ bxt_uninit_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_uninit_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_uninit_cdclk(i915);
}
/**
@@ -2073,9 +1773,9 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
* Returns:
* True if the CDCLK states require just a cd2x divider update, false if not.
*/
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -2094,8 +1794,8 @@ bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
* Returns:
* True if the CDCLK states don't match, false if they do.
*/
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
@@ -2200,9 +1900,11 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
intel_set_cdclk(dev_priv, new_state, pipe);
}
-static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
- int pixel_rate)
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ int pixel_rate = crtc_state->pixel_rate;
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEN(dev_priv, 9) ||
@@ -2210,10 +1912,25 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
return DIV_ROUND_UP(pixel_rate * 100, 95);
+ else if (crtc_state->double_wide)
+ return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
else
return DIV_ROUND_UP(pixel_rate * 100, 90);
}
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+
+ return min_cdclk;
+}
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
@@ -2223,7 +1940,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (!crtc_state->base.enable)
return 0;
- min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
+ min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
@@ -2282,6 +1999,9 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
IS_GEMINILAKE(dev_priv))
min_cdclk = max(158400, min_cdclk);
+ /* Account for additional needs from the planes */
+ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
@@ -2303,11 +2023,20 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
sizeof(state->min_cdclk));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (min_cdclk < 0)
return min_cdclk;
+ if (state->min_cdclk[i] == min_cdclk)
+ continue;
+
state->min_cdclk[i] = min_cdclk;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_cdclk = state->cdclk.force_min_cdclk;
@@ -2318,6 +2047,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
}
/*
+ * Account for port clock min voltage level requirements.
+ * This only really does something on CNL+ but can be
+ * called on earlier platforms as well.
+ *
* Note that this functions assumes that 0 is
* the lowest voltage value, and higher values
* correspond to increasingly higher voltages.
@@ -2326,7 +2059,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* future platforms this code will need to be
* adjusted.
*/
-static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -2339,11 +2072,21 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
sizeof(state->min_voltage_level));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
if (crtc_state->base.enable)
- state->min_voltage_level[i] =
- crtc_state->min_voltage_level;
+ min_voltage_level = crtc_state->min_voltage_level;
else
- state->min_voltage_level[i] = 0;
+ min_voltage_level = 0;
+
+ if (state->min_voltage_level[i] == min_voltage_level)
+ continue;
+
+ state->min_voltage_level[i] = min_voltage_level;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_voltage_level = 0;
@@ -2369,7 +2112,7 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2400,7 +2143,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2470,7 +2213,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
state->cdclk.actual.vco = vco;
@@ -2487,38 +2230,33 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ int min_cdclk, min_voltage_level, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ min_voltage_level = bxt_compute_min_voltage_level(state);
+ if (min_voltage_level < 0)
+ return min_voltage_level;
+
+ cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.logical.vco = vco;
state->cdclk.logical.cdclk = cdclk;
state->cdclk.logical.voltage_level =
- bxt_calc_voltage_level(cdclk);
-
- if (!state->active_crtcs) {
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ max_t(int, min_voltage_level,
+ dev_priv->display.calc_voltage_level(cdclk));
+
+ if (!state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.actual.vco = vco;
state->cdclk.actual.cdclk = cdclk;
state->cdclk.actual.voltage_level =
- bxt_calc_voltage_level(cdclk);
+ dev_priv->display.calc_voltage_level(cdclk);
} else {
state->cdclk.actual = state->cdclk.logical;
}
@@ -2526,70 +2264,138 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
-static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ struct intel_crtc *crtc;
- min_cdclk = intel_compute_min_cdclk(state);
- if (min_cdclk < 0)
- return min_cdclk;
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
- cdclk = cnl_calc_cdclk(min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(cnl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(state));
+ if (!crtc_state->base.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ continue;
- if (!state->active_crtcs) {
- cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state->base.mode_changed = true;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- cnl_calc_voltage_level(cdclk);
- } else {
- state->cdclk.actual = state->cdclk.logical;
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ crtc_state->update_planes |= crtc_state->active_planes;
}
return 0;
}
-static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- unsigned int ref = state->cdclk.logical.ref;
- int min_cdclk, cdclk, vco;
+ int min_cdclk;
+ /*
+ * We can't change the cdclk frequency, but we still want to
+ * check that the required minimum frequency doesn't exceed
+ * the actual cdclk frequency.
+ */
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- cdclk = icl_calc_cdclk(min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ return 0;
+}
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(icl_calc_voltage_level(dev_priv, cdclk),
- cnl_compute_min_voltage_level(state));
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum pipe pipe;
+ int ret;
- if (!state->active_crtcs) {
- cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ ret = dev_priv->display.modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- icl_calc_voltage_level(dev_priv, cdclk);
+ /*
+ * Writes to dev_priv->cdclk.{actual,logical} must protected
+ * by holding all the crtc mutexes even if we don't end up
+ * touching the hardware
+ */
+ if (intel_cdclk_changed(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /*
+ * Also serialize commits across all crtcs
+ * if the actual hw needs to be poked.
+ */
+ ret = intel_atomic_serialize_global_state(state);
+ if (ret)
+ return ret;
+ } else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+ &state->cdclk.logical)) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
} else {
- state->cdclk.actual = state->cdclk.logical;
+ return 0;
+ }
+
+ if (is_power_of_2(state->active_pipes) &&
+ intel_cdclk_needs_cd2x_update(dev_priv,
+ &dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+
+ pipe = ilog2(state->active_pipes);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ pipe = INVALID_PIPE;
+ } else {
+ pipe = INVALID_PIPE;
+ }
+
+ if (pipe != INVALID_PIPE) {
+ state->cdclk.pipe = pipe;
+
+ DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /* All pipes must be switched off while we change the cdclk. */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+
+ state->cdclk.pipe = INVALID_PIPE;
+
+ DRM_DEBUG_KMS("Modeset required for cdclk change\n");
}
+ DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ state->cdclk.logical.cdclk,
+ state->cdclk.actual.cdclk);
+ DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+ state->cdclk.logical.voltage_level,
+ state->cdclk.actual.voltage_level);
+
return 0;
}
@@ -2809,15 +2615,29 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11) {
- dev_priv->display.set_cdclk = icl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = cnl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
+ dev_priv->cdclk.table = cnl_cdclk_table;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
+ if (IS_GEMINILAKE(dev_priv))
+ dev_priv->cdclk.table = glk_cdclk_table;
+ else
+ dev_priv->cdclk.table = bxt_cdclk_table;
} else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
@@ -2830,13 +2650,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+ } else {
+ dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
- if (INTEL_GEN(dev_priv) >= 11)
- dev_priv->display.get_cdclk = icl_get_cdclk;
- else if (IS_CANNONLAKE(dev_priv))
- dev_priv->display.get_cdclk = cnl_get_cdclk;
- else if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 4d6f7f5f8930..cf71394cc79c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -15,6 +15,13 @@ struct intel_atomic_state;
struct intel_cdclk_state;
struct intel_crtc_state;
+struct intel_cdclk_vals {
+ u16 refclk;
+ u32 cdclk;
+ u8 divider; /* CD2X divider * 2 */
+ u8 ratio;
+};
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init(struct drm_i915_private *i915);
void intel_cdclk_uninit(struct drm_i915_private *i915);
@@ -22,13 +29,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
void intel_cdclk_swap_state(struct intel_atomic_state *state);
void
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
@@ -42,5 +44,6 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context);
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 71a0201437a9..aa3a063549c3 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -43,6 +43,21 @@
#define LEGACY_LUT_LENGTH 256
/*
+ * ILK+ csc matrix:
+ *
+ * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0|
+ * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1|
+ * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2|
+ *
+ * ILK/SNB don't have explicit post offsets, and instead
+ * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used:
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16
+ */
+
+/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
* number of fractional bits.
@@ -59,37 +74,38 @@
#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+/* Nop pre/post offsets */
static const u16 ilk_csc_off_zero[3] = {};
+/* Identity matrix */
static const u16 ilk_csc_coeff_identity[9] = {
ILK_CSC_COEFF_1_0, 0, 0,
0, ILK_CSC_COEFF_1_0, 0,
0, 0, ILK_CSC_COEFF_1_0,
};
+/* Limited range RGB post offsets */
static const u16 ilk_csc_postoff_limited_range[3] = {
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
};
+/* Full range RGB -> limited range RGB matrix */
static const u16 ilk_csc_coeff_limited_range[9] = {
ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
};
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
+/* BT.709 full range RGB -> limited range YCbCr matrix */
static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
0x1e08, 0x9cc0, 0xb528,
0x2ba8, 0x09d8, 0x37e8,
0xbce8, 0x9ad8, 0x1e08,
};
-/* Post offset values for RGB->YCBCR conversion */
+/* Limited range YCbCr post offsets */
static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
0x0800, 0x0100, 0x0800,
};
@@ -611,12 +627,13 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@@ -624,10 +641,15 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
+ 1 << 16);
}
+
+ intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
@@ -787,78 +809,83 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- * Every entry in the multi-segment LUT is corresponding to a superfine
- * segment step which is 1/(8 * 128 * 256).
+ * Program Super Fine segment (let's call it seg1)...
*
- * Superfine segment has 9 entries, corresponding to values
- * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
+ * Super Fine segment's step is 1/(8 * 128 * 256) and it has
+ * 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
+ * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- *
* Program Fine segment (let's call it seg2)...
*
- * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256)
- * ... 256/(128*256). So in order to program fine segment of LUT we
- * need to pick every 8'th entry in LUT, and program 256 indexes.
+ * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256)
+ * ... 256/(128 * 256). So in order to program fine segment of LUT we
+ * need to pick every 8th entry in the LUT, and program 256 indexes.
*
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
- * with seg2[0] being unused by the hardware.
+ * seg2[0] being unused by the hardware.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
* Program Coarse segment (let's call it seg3)...
*
- * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
- * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
+ * Coarse segment starts from index 0 and it's step is 1/256 ie 0,
+ * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT
* above, we need to pick every (8 * 128)th entry in LUT, and
* program 256 of those.
*
@@ -868,20 +895,24 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
ivb_load_lut_ext_max(crtc);
+ intel_dsb_put(dsb);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
if (crtc_state->base.degamma_lut)
glk_load_degamma_lut(crtc_state);
@@ -890,16 +921,17 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
case GAMMA_MODE_MODE_8BIT:
i9xx_load_luts(crtc_state);
break;
-
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
break;
-
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
}
+
+ intel_dsb_commit(dsb);
+ intel_dsb_put(dsb);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -990,6 +1022,55 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
dev_priv->display.color_commit(crtc_state);
}
+static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->base.gamma_lut &&
+ !old_crtc_state->base.degamma_lut;
+}
+
+static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * CGM_PIPE_MODE is itself single buffered. We'd have to
+ * somehow split it out from chv_load_luts() if we wanted
+ * the ability to preload the CGM LUTs/CSC without tearing.
+ */
+ if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
+ return false;
+
+ return !old_crtc_state->base.gamma_lut;
+}
+
+static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * The hardware degamma is active whenever the pipe
+ * CSC is active. Thus even if the old state has no
+ * software degamma we need to avoid clobbering the
+ * linear hardware degamma mid scanout.
+ */
+ return !old_crtc_state->csc_enable &&
+ !old_crtc_state->base.gamma_lut;
+}
+
int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
@@ -1133,6 +1214,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1185,6 +1268,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1197,6 +1282,21 @@ static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
return GAMMA_MODE_MODE_10BIT;
}
+static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * CSC comes after the LUT in RGB->YCbCr mode.
+ * RGB->YCbCr needs the limited range offsets added to
+ * the output. RGB limited range output is handled by
+ * the hw automagically elsewhere.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return CSC_BLACK_SCREEN_OFFSET;
+
+ return CSC_MODE_YUV_TO_RGB |
+ CSC_POSITION_BEFORE_GAMMA;
+}
+
static int ilk_color_check(struct intel_crtc_state *crtc_state)
{
int ret;
@@ -1210,20 +1310,22 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
!crtc_state->c8_planes;
/*
- * We don't expose the ctm on ilk/snb currently,
- * nor do we enable YCbCr output. Also RGB limited
- * range output is handled by the hw automagically.
+ * We don't expose the ctm on ilk/snb currently, also RGB
+ * limited range output is handled by the hw automagically.
*/
- crtc_state->csc_enable = false;
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
- crtc_state->csc_mode = 0;
+ crtc_state->csc_mode = ilk_csc_mode(crtc_state);
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1281,6 +1383,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1319,6 +1423,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1368,9 +1474,408 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
crtc_state->csc_mode = icl_csc_mode(crtc_state);
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
+static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ return 10;
+ else
+ return i9xx_gamma_precision(crtc_state);
+}
+
+static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return chv_gamma_precision(crtc_state);
+ else
+ return i9xx_gamma_precision(crtc_state);
+ } else {
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ return glk_gamma_precision(crtc_state);
+ else if (IS_IRONLAKE(dev_priv))
+ return ilk_gamma_precision(crtc_state);
+ }
+
+ return 0;
+}
+
+static bool err_check(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2, u32 err)
+{
+ return ((abs((long)lut2->red - lut1->red)) <= err) &&
+ ((abs((long)lut2->blue - lut1->blue)) <= err) &&
+ ((abs((long)lut2->green - lut1->green)) <= err);
+}
+
+static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
+{
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (!err_check(&lut1[i], &lut2[i], err))
+ return false;
+ }
+
+ return true;
+}
+
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision)
+{
+ struct drm_color_lut *lut1, *lut2;
+ int lut_size1, lut_size2;
+ u32 err;
+
+ if (!blob1 != !blob2)
+ return false;
+
+ if (!blob1)
+ return true;
+
+ lut_size1 = drm_color_lut_size(blob1);
+ lut_size2 = drm_color_lut_size(blob2);
+
+ /* check sw and hw lut size */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (lut_size1 != lut_size2)
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ lut1 = blob1->data;
+ lut2 = blob2->data;
+
+ err = 0xffff >> bit_precision;
+
+ /* check sw and hw lut entry to be equal */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (!intel_color_lut_entry_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ return true;
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static struct drm_property_blob *
+i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ if (HAS_GMCH(dev_priv))
+ val = I915_READ(PALETTE(pipe, i));
+ else
+ val = I915_READ(LGC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_RED_MASK, val), 8);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_GREEN_MASK, val), 8);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_BLUE_MASK, val), 8);
+ }
+
+ return blob;
+}
+
+static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+}
+
+static struct drm_property_blob *
+i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val1, val2;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
+ val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
+
+ blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, val1);
+ blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
+ blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ }
+
+ blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 0)));
+ blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 1)));
+ blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 2)));
+
+ return blob;
+}
+
+static void i965_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state);
+}
+
+static struct drm_property_blob *
+chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
+
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void chv_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state);
+ else
+ i965_read_luts(crtc_state);
+}
+
+static struct drm_property_blob *
+ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(PREC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_BLUE_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void ilk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state);
+}
+
+static struct drm_property_blob *
+glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * hw_lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ val = I915_READ(PREC_PAL_DATA(pipe));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_BLUE_MASK, val), 10);
+ }
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+
+ return blob;
+}
+
+static void glk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+}
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1383,14 +1888,17 @@ void intel_color_init(struct intel_crtc *crtc)
dev_priv->display.color_check = chv_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = chv_load_luts;
+ dev_priv->display.read_luts = chv_read_luts;
} else if (INTEL_GEN(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i965_load_luts;
+ dev_priv->display.read_luts = i965_read_luts;
} else {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i9xx_load_luts;
+ dev_priv->display.read_luts = i9xx_read_luts;
}
} else {
if (INTEL_GEN(dev_priv) >= 11)
@@ -1409,16 +1917,19 @@ void intel_color_init(struct intel_crtc *crtc)
else
dev_priv->display.color_commit = ilk_color_commit;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
- else if (INTEL_GEN(dev_priv) >= 8)
+ dev_priv->display.read_luts = glk_read_luts;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
dev_priv->display.load_luts = bdw_load_luts;
- else if (INTEL_GEN(dev_priv) >= 7)
+ } else if (INTEL_GEN(dev_priv) >= 7) {
dev_priv->display.load_luts = ivb_load_luts;
- else
+ } else {
dev_priv->display.load_luts = ilk_load_luts;
+ dev_priv->display.read_luts = ilk_read_luts;
+ }
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 057e8ac63555..173727aaa24d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -6,13 +6,20 @@
#ifndef __INTEL_COLOR_H__
#define __INTEL_COLOR_H__
+#include <linux/types.h>
+
struct intel_crtc_state;
struct intel_crtc;
+struct drm_property_blob;
void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 308ec63207ee..1133c4e97bb4 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -277,7 +277,22 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
void
intel_attach_colorspace_property(struct drm_connector *connector)
{
- if (!drm_mode_create_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (drm_mode_create_hdmi_colorspace_property(connector))
+ return;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (drm_mode_create_dp_colorspace_property(connector))
+ return;
+ break;
+ default:
+ DRM_DEBUG_KMS("Colorspace property not supported\n");
+ return;
+ }
+
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 0a08354a6183..39cc6d79dc85 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -844,7 +844,7 @@ load_detect:
}
/* for pre-945g platforms use load detect */
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret > 0) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
@@ -1001,9 +1001,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev_priv))
- crt->base.crtc_mask = (1 << 0);
+ crt->base.pipe_mask = BIT(PIPE_A);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ crt->base.pipe_mask = ~0;
if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 8eb2b3ec01ed..0d6e494b4508 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -45,6 +45,7 @@
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
@@ -586,6 +587,26 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
{ 0x0, 0x00, 0x00 }, /* 3 0 */
};
+struct tgl_dkl_phy_ddi_buf_trans {
+ u32 dkl_vswing_control;
+ u32 dkl_preshoot_control;
+ u32 dkl_de_emphasis_control;
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -872,7 +893,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ 0, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ default_entry = n_entries - 1;
+ } else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
@@ -1049,6 +1077,8 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
+ case DPLL_ID_TGL_MGPLL5:
+ case DPLL_ID_TGL_MGPLL6:
return DDI_CLK_SEL_MG;
}
}
@@ -1413,11 +1443,30 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
ref_clock = dev_priv->cdclk.hw.ref;
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
- (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
- MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
switch (pll_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
@@ -1692,7 +1741,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
hsw_ddi_clock_get(encoder, pipe_config);
}
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1704,44 +1754,50 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
- temp = TRANS_MSA_SYNC_CLK;
-
- if (crtc_state->limited_color_range)
- temp |= TRANS_MSA_CEA_RANGE;
+ temp = DP_MSA_MISC_SYNC_CLOCK;
switch (crtc_state->pipe_bpp) {
case 18:
- temp |= TRANS_MSA_6_BPC;
+ temp |= DP_MSA_MISC_6_BPC;
break;
case 24:
- temp |= TRANS_MSA_8_BPC;
+ temp |= DP_MSA_MISC_8_BPC;
break;
case 30:
- temp |= TRANS_MSA_10_BPC;
+ temp |= DP_MSA_MISC_10_BPC;
break;
case 36:
- temp |= TRANS_MSA_12_BPC;
+ temp |= DP_MSA_MISC_12_BPC;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
break;
}
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->limited_color_range)
+ temp |= DP_MSA_MISC_COLOR_CEA_RGB;
+
/*
* As per DP 1.2 spec section 2.3.4.3 while sending
* YCBCR 444 signals we should program MSA MISC1/0 fields with
- * colorspace information. The output colorspace encoding is BT601.
+ * colorspace information.
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
+ temp |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+
/*
* As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
* of Color Encoding Format and Content Color Gamut] while sending
- * YCBCR 420 signals we should program MSA MISC1 fields which
- * indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
+ * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields
+ * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
*/
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- temp |= TRANS_MSA_USE_VSC_SDP;
+ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ temp |= DP_MSA_MISC_COLOR_VSC_SDP;
+
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1761,7 +1817,14 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+/*
+ * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
+ *
+ * Only intended to be used by intel_ddi_enable_transcoder_func() and
+ * intel_ddi_config_transcoder_func().
+ */
+static u32
+intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
@@ -1840,11 +1903,42 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder);
} else {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
+ return temp;
+}
+
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+/*
+ * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
+ * bit.
+ */
+static void
+intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ temp &= ~TRANS_DDI_FUNC_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
@@ -2045,18 +2139,20 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
- port_name(port));
+ DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
- port_name(port), *pipe_mask);
+ DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
- port_name(port), *pipe_mask, mst_pipe_mask);
+ DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -2066,8 +2162,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- DRM_ERROR("Port %c enabled but PHY powered down? "
- "(PHY_CTL %08x)\n", port_name(port), tmp);
+ DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
+ "(PHY_CTL %08x)\n", encoder->base.base.id,
+ encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -2138,7 +2235,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
/*
* VDSC power is needed when DSC is enabled
*/
- if (crtc_state->dsc_params.compression_enable)
+ if (crtc_state->dsc.compression_enable)
intel_display_power_get(dev_priv,
intel_dsc_power_domain(crtc_state));
}
@@ -2269,7 +2366,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ } else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
@@ -2583,7 +2686,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
int ln;
@@ -2599,33 +2702,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2633,9 +2736,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2643,7 +2746,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2654,17 +2757,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(ln, port));
+ val = I915_READ(MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(ln, port), val);
+ I915_WRITE(MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(ln, port));
+ val = I915_READ(MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2672,9 +2775,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(ln, port), val);
+ I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
- val = I915_READ(MG_TX2_DCC(ln, port));
+ val = I915_READ(MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2682,18 +2785,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(ln, port), val);
+ I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
- val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
}
}
@@ -2711,6 +2814,64 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
+static void
+tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
+ u32 level)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
+ u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
+
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ ddi_translations = tgl_dkl_phy_ddi_translations;
+
+ if (level >= n_entries)
+ level = n_entries - 1;
+
+ dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
+
+ for (ln = 0; ln < 2; ln++) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+
+ I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+ val &= ~DKL_TX_DP20BITMODE;
+ I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+ }
+}
+
+static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int link_clock,
+ u32 level,
+ enum intel_output_type type)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
+ else
+ tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
+}
+
static u32 translate_signal_level(int signal_levels)
{
int i;
@@ -2742,7 +2903,10 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
+ else if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -2989,130 +3153,141 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
-static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+static void
+icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ u32 val, bits;
int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val |= MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(MG_DP_MODE(ln, port), val);
- }
+ bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING;
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
- int ln;
+ for (ln = 0; ln < 2; ln++) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+ val = I915_READ(DKL_DP_MODE(tc_port));
+ } else {
+ val = I915_READ(MG_DP_MODE(ln, tc_port));
+ }
- if (tc_port == PORT_TC_NONE)
- return;
+ if (enable)
+ val |= bits;
+ else
+ val &= ~bits;
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(MG_DP_MODE(ln, port), val);
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(DKL_DP_MODE(tc_port), val);
+ else
+ I915_WRITE(MG_DP_MODE(ln, tc_port), val);
}
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
+ if (INTEL_GEN(dev_priv) == 11) {
+ bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING;
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ if (enable)
+ val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
+ else
+ val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+ }
}
-static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+static void
+icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->base.port;
- u32 ln0, ln1, lane_mask;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
+ u32 ln0, ln1, pin_assignment;
+ u8 width;
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
- ln0 = I915_READ(MG_DP_MODE(0, port));
- ln1 = I915_READ(MG_DP_MODE(1, port));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ ln0 = I915_READ(DKL_DP_MODE(tc_port));
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ ln1 = I915_READ(DKL_DP_MODE(tc_port));
+ } else {
+ ln0 = I915_READ(MG_DP_MODE(0, tc_port));
+ ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+ }
- switch (intel_dig_port->tc_mode) {
- case TC_PORT_DP_ALT:
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
+ /* DPPATC */
+ pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
+ width = crtc_state->lane_count;
- switch (lane_mask) {
- case 0x1:
- case 0x4:
- break;
- case 0x2:
+ switch (pin_assignment) {
+ case 0x0:
+ WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ if (width == 1) {
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x1:
+ if (width == 4) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x2:
+ if (width == 2) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x3:
+ case 0x5:
+ if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0x3:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0x8:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0xC:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0xF:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- default:
- MISSING_CASE(lane_mask);
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
-
- case TC_PORT_LEGACY:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ case 0x4:
+ case 0x6:
+ if (width == 1) {
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
break;
-
default:
- MISSING_CASE(intel_dig_port->tc_mode);
- return;
+ MISSING_CASE(pin_assignment);
}
- I915_WRITE(MG_DP_MODE(0, port), ln0);
- I915_WRITE(MG_DP_MODE(1, port), ln1);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln0);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+ } else {
+ I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
+ I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+ }
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3129,17 +3304,18 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
@@ -3148,21 +3324,205 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
}
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ u32 val;
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void
+tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ u32 val, exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ exit_scanlines = cstate->dc3co_exitline;
+ exit_scanlines <<= EXITLINE_SHIFT;
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ val |= exit_scanlines;
+ val |= EXITLINE_ENABLE;
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *cstate)
+{
+ u32 exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay;
+
+ cstate->dc3co_exitline = 0;
+
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+ if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A ||
+ encoder->port != PORT_A)
+ return;
+
+ if (!cstate->has_psr2 || !cstate->base.active)
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1;
+
+ if (WARN_ON(exit_scanlines > crtc_vdisplay))
+ return;
+
+ cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+ DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
+}
+
+static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
+{
+ u32 val;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return;
+
+ val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
+
+ if (val & EXITLINE_ENABLE)
+ crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+}
+
+static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int level = intel_ddi_dp_level(intel_dp);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+
+ tgl_set_psr2_transcoder_exitline(crtc_state);
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+ crtc_state->lane_count, is_mst);
+
+ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
+ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
+
+ /* 1.a got on intel_atomic_commit_tail() */
+
+ /* 2. */
+ intel_edp_panel_on(intel_dp);
+
+ /*
+ * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by:
+ * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and
+ * haswell_crtc_enable()->intel_enable_shared_dpll()
+ */
+
+ /* 4.b */
+ intel_ddi_clk_select(encoder, crtc_state);
+
+ /* 5. */
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+
+ /* 6. */
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+
+ /*
+ * 7.a - Steps in this function should only be executed over MST
+ * master, what will be taken in care by MST hook
+ * intel_mst_pre_enable_dp()
+ */
+ intel_ddi_enable_pipe_clock(crtc_state);
+
+ /* 7.b */
+ intel_ddi_config_transcoder_func(crtc_state);
+
+ /* 7.d */
+ icl_phy_set_clock_gating(dig_port, false);
+
+ /* 7.e */
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
+ encoder->type);
+
+ /* 7.f */
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(dev_priv, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+
+ /* 7.g */
+ intel_ddi_init_dp_buf_reg(encoder);
+
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ /*
+ * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+ * in the FEC_CONFIGURATION register to 1 before initiating link
+ * training
+ */
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ /* 7.c, 7.h, 7.i, 7.j */
+ intel_dp_start_link_train(intel_dp);
+
+ /* 7.k */
+ if (!is_trans_port_sync_mode(crtc_state))
+ intel_dp_stop_link_train(intel_dp);
+
+ /*
+ * TODO: enable clock gating
+ *
+ * It is not written in DP enabling sequence but "PHY Clockgating
+ * programming" states that clock gating should be enabled after the
+ * link training but doing so causes all the following trainings to fail
+ * so not enabling it for now.
+ */
+
+ /* 7.l */
+ intel_ddi_enable_fec(encoder, crtc_state);
+ intel_dsc_enable(encoder, crtc_state);
+}
+
+static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -3177,6 +3537,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3186,8 +3549,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+ icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3215,12 +3578,13 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
- if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+ if ((port != PORT_A || INTEL_GEN(dev_priv) >= 9) &&
+ !is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp);
intel_ddi_enable_fec(encoder, crtc_state);
- icl_enable_phy_clock_gating(dig_port);
+ icl_phy_set_clock_gating(dig_port, true);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
@@ -3228,6 +3592,24 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ else
+ hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+
+ /* MST will call a setting of MSA after an allocating of Virtual Channel
+ * from MST encoder pre_enable callback.
+ */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
+}
+
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3244,10 +3626,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+ icl_phy_set_clock_gating(dig_port, false);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+ level, INTEL_OUTPUT_HDMI);
+ else if (INTEL_GEN(dev_priv) == 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@@ -3257,7 +3642,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
- icl_enable_phy_clock_gating(dig_port);
+ icl_phy_set_clock_gating(dig_port, true);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
@@ -3330,10 +3715,14 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ }
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -3373,6 +3762,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
+ tgl_clear_psr2_transcoder_exitline(old_crtc_state);
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3475,7 +3865,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state);
- intel_dp_ycbcr_420_enable(intel_dp, crtc_state);
+ intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
+ intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -3486,12 +3877,12 @@ static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
{
- static const i915_reg_t regs[] = {
- [PORT_A] = CHICKEN_TRANS_EDP,
- [PORT_B] = CHICKEN_TRANS_A,
- [PORT_C] = CHICKEN_TRANS_B,
- [PORT_D] = CHICKEN_TRANS_C,
- [PORT_E] = CHICKEN_TRANS_A,
+ static const enum transcoder trans[] = {
+ [PORT_A] = TRANSCODER_EDP,
+ [PORT_B] = TRANSCODER_A,
+ [PORT_C] = TRANSCODER_B,
+ [PORT_D] = TRANSCODER_C,
+ [PORT_E] = TRANSCODER_A,
};
WARN_ON(INTEL_GEN(dev_priv) < 9);
@@ -3499,7 +3890,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
if (WARN_ON(port < PORT_A || port > PORT_E))
port = PORT_A;
- return regs[port];
+ return CHICKEN_TRANS(trans[port]);
}
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3633,7 +4024,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_ddi_set_pipe_settings(crtc_state);
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
@@ -3761,7 +4152,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
u32 val;
bool wait = false;
- if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
@@ -3769,11 +4160,11 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -3788,8 +4179,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
@@ -3891,6 +4282,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_reg_t dp_tp_ctl;
+
+ if (IS_GEN(dev_priv, 11))
+ dp_tp_ctl = DP_TP_CTL(encoder->port);
+ else
+ dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
+
+ pipe_config->fec_enable =
+ I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
+ }
+
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -3902,6 +4310,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ tgl_dc3co_exitline_get_config(pipe_config);
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -3979,10 +4390,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
- else
+ } else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+ tgl_dc3co_exitline_compute_config(encoder, pipe_config);
+ }
+
if (ret)
return ret;
@@ -4276,7 +4690,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
- enum pipe pipe;
enum phy phy = intel_port_to_phy(dev_priv, port);
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
@@ -4328,8 +4741,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->cloneable = 0;
- for_each_pipe(dev_priv, pipe)
- intel_encoder->crtc_mask |= BIT(pipe);
+ intel_encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -4351,46 +4763,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->update_complete = intel_ddi_update_complete;
}
- switch (port) {
- case PORT_A:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_A_IO;
- break;
- case PORT_B:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_B_IO;
- break;
- case PORT_C:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_C_IO;
- break;
- case PORT_D:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_D_IO;
- break;
- case PORT_E:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_E_IO;
- break;
- case PORT_F:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_F_IO;
- break;
- case PORT_G:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_G_IO;
- break;
- case PORT_H:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_H_IO;
- break;
- case PORT_I:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_I_IO;
- break;
- default:
- MISSING_CASE(port);
- }
+ WARN_ON(port > PORT_I);
+ intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
+ port - PORT_A;
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index a08365da2643..19aeab1246ee 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -30,7 +30,8 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index dfff6f4357b8..6f5e3bd13ad1 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -56,6 +55,8 @@
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_acpi.h"
@@ -65,6 +66,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_display_types.h"
+#include "intel_dp_link_training.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
@@ -79,6 +81,7 @@
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
+#include "intel_vga.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -88,7 +91,17 @@ static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_XRGB8888,
};
-/* Primary plane formats for gen >= 4 */
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
static const u32 i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -96,6 +109,7 @@ static const u32 i965_primary_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
};
static const u64 i9xx_format_modifiers[] = {
@@ -135,8 +149,6 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
-static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
@@ -490,7 +502,7 @@ static const struct intel_limit intel_limits_bxt = {
/* WA Display #0827: Gen9:all */
static void
-skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -521,6 +533,20 @@ needs_modeset(const struct intel_crtc_state *state)
return drm_atomic_crtc_needs_modeset(&state->base);
}
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
+ crtc_state->sync_mode_slaves_mask);
+}
+
+static bool
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -1612,8 +1638,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
- WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
- port_name(dport->base.port),
+ WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
@@ -2079,7 +2105,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
@@ -2161,8 +2188,6 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
i915_gem_object_lock(vma->obj);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
@@ -2504,6 +2529,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all.
*/
crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ if (!crtc)
+ return 0;
+
plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier,
@@ -2736,10 +2764,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
size++;
/* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
+ drm_rect_init(&r, x, y, width, height);
drm_rect_rotate(&r,
rot_info->plane[i].width * tile_width,
rot_info->plane[i].height * tile_height,
@@ -2861,10 +2886,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
struct drm_rect r;
/* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
+ drm_rect_init(&r, x, y, width, height);
drm_rect_rotate(&r,
info->plane[i].width * tile_width,
info->plane[i].height * tile_height,
@@ -2966,6 +2988,8 @@ static int i9xx_format_to_fourcc(int format)
return DRM_FORMAT_XRGB2101010;
case DISPPLANE_RGBX101010:
return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_RGBX161616:
+ return DRM_FORMAT_XBGR16161616F;
}
}
@@ -3063,13 +3087,11 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
}
- mutex_lock(&dev->struct_mutex);
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
- mutex_unlock(&dev->struct_mutex);
- if (!obj)
+ if (IS_ERR(obj))
return false;
switch (plane_config->tiling) {
@@ -3151,6 +3173,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
@@ -3230,13 +3253,11 @@ valid_fb:
intel_state->color_plane[0].stride =
intel_fb_pitch(fb, 0, intel_state->base.rotation);
- mutex_lock(&dev->struct_mutex);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb,
&intel_state->view,
intel_plane_uses_fence(intel_state),
&intel_state->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
intel_crtc->pipe, PTR_ERR(intel_state->vma));
@@ -3344,6 +3365,16 @@ static int icl_max_plane_width(const struct drm_framebuffer *fb,
return 5120;
}
+static int skl_max_plane_height(void)
+{
+ return 4096;
+}
+
+static int icl_max_plane_height(void)
+{
+ return 4320;
+}
+
static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
int main_x, int main_y, u32 main_offset)
{
@@ -3392,7 +3423,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
int max_width;
- int max_height = 4096;
+ int max_height;
u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
if (INTEL_GEN(dev_priv) >= 11)
@@ -3402,6 +3433,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
else
max_width = skl_max_plane_width(fb, 0, rotation);
+ if (INTEL_GEN(dev_priv) >= 11)
+ max_height = icl_max_plane_height();
+ else
+ max_height = skl_max_plane_height();
+
if (w > max_width || h > max_height) {
DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
w, h, max_width, max_height);
@@ -3468,9 +3504,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (x << 16) - plane_state->base.src.x1,
- (y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->base.src,
+ x << 16, y << 16);
return 0;
}
@@ -3541,7 +3576,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (is_planar_yuv_format(fb->format->format)) {
+ if (drm_format_info_is_yuv_semiplanar(fb->format)) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3562,6 +3597,53 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
unsigned int
i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -3644,6 +3726,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XBGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
default:
MISSING_CASE(fb->format->format);
return 0;
@@ -3666,7 +3751,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- int src_x, src_y;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_x, src_y, src_w;
u32 offset;
int ret;
@@ -3677,9 +3763,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
if (!plane_state->base.visible)
return 0;
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
src_x = plane_state->base.src.x1 >> 16;
src_y = plane_state->base.src.y1 >> 16;
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
@@ -3692,9 +3783,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (src_x << 16) - plane_state->base.src.x1,
- (src_y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->base.src,
+ src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
@@ -4224,7 +4314,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev, ctx);
- i915_redisable_vga(to_i915(dev));
+ intel_vga_redisable(to_i915(dev));
if (!state)
return 0;
@@ -4256,7 +4346,7 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(dev_priv));
+ intel_has_gpu_reset(&dev_priv->gt));
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -4343,7 +4433,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -4391,50 +4481,60 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
-static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
- crtc->base.mode = new_crtc_state->base.mode;
+ u32 trans_ddi_func_ctl2_val;
+ u8 master_select;
/*
- * Update pipe size and adjust fitter if needed: the reason for this is
- * that in compute_mode_changes we check the native mode (not the pfit
- * mode) to see if we can flip rather than do a full mode set. In the
- * fastboot case, we'll flip, but if we don't update the pipesrc and
- * pfit state, we'll end up with a big fb scanned out into the wrong
- * sized surface.
+ * Configure the master select and enable Transcoder Port Sync for
+ * Slave CRTCs transcoder.
*/
+ if (crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
- I915_WRITE(PIPESRC(crtc->pipe),
- ((new_crtc_state->pipe_src_w - 1) << 16) |
- (new_crtc_state->pipe_src_h - 1));
+ if (crtc_state->master_transcoder == TRANSCODER_EDP)
+ master_select = 0;
+ else
+ master_select = crtc_state->master_transcoder + 1;
- /* on skylake this is done by detaching scalers */
- if (INTEL_GEN(dev_priv) >= 9) {
- skl_detach_scalers(new_crtc_state);
+ /* Set the master select bits for Tranascoder Port Sync */
+ trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
+ PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
+ /* Enable Transcoder Port Sync */
+ trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
- if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(new_crtc_state);
- else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(old_crtc_state);
- }
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+ trans_ddi_func_ctl2_val);
+}
- if (INTEL_GEN(dev_priv) >= 11)
- icl_set_pipe_chicken(crtc);
+static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg;
+ u32 trans_ddi_func_ctl2_val;
+
+ if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
+
+ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
+ transcoder_name(old_crtc_state->cpu_transcoder));
+
+ reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
+ trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT_MASK);
+ I915_WRITE(reg, trans_ddi_func_ctl2_val);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4477,7 +4577,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, tries;
@@ -4578,7 +4678,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -4711,7 +4811,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, j;
@@ -4829,7 +4929,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4866,7 +4966,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4897,7 +4997,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -5212,7 +5312,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 temp;
assert_pch_transcoder_disabled(dev_priv, pipe);
@@ -5307,7 +5407,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
@@ -5459,7 +5559,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && is_planar_yuv_format(format->format) &&
+ if (format && drm_format_info_is_yuv_semiplanar(format) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
@@ -5536,7 +5636,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && is_planar_yuv_format(fb->format->format))
+ fb && drm_format_info_is_yuv_semiplanar(fb->format))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
@@ -5568,10 +5668,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -5587,6 +5683,13 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ /* fall through */
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
intel_plane->base.base.id, intel_plane->base.name,
@@ -5646,7 +5749,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
@@ -5728,13 +5831,8 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
- if (intel_crtc->overlay) {
- struct drm_device *dev = intel_crtc->base.dev;
-
- mutex_lock(&dev->struct_mutex);
+ if (intel_crtc->overlay)
(void) intel_overlay_switch_off(intel_crtc->overlay);
- mutex_unlock(&dev->struct_mutex);
- }
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
@@ -5759,7 +5857,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@@ -5783,7 +5881,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@@ -6306,7 +6404,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6439,7 +6537,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+ enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
bool psl_clkgate_wa;
@@ -6459,6 +6557,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(pipe_config);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_enable_trans_port_sync(pipe_config);
+
intel_set_pipe_src_size(pipe_config);
if (cpu_transcoder != TRANSCODER_EDP &&
@@ -6504,7 +6605,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(intel_crtc);
- intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(pipe_config);
@@ -6565,7 +6665,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Sometimes spurious CPU pipe underruns happen when the
@@ -6637,6 +6737,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_disable_transcoder_port_sync(old_crtc_state);
+
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -6734,6 +6837,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_E_LANES;
case PORT_F:
return POWER_DOMAIN_PORT_DDI_F_LANES;
+ case PORT_G:
+ return POWER_DOMAIN_PORT_DDI_G_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
@@ -6750,16 +6855,18 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
dig_port->tc_mode == TC_PORT_TBT_ALT) {
switch (dig_port->aux_ch) {
case AUX_CH_C:
- return POWER_DOMAIN_AUX_TBT1;
+ return POWER_DOMAIN_AUX_C_TBT;
case AUX_CH_D:
- return POWER_DOMAIN_AUX_TBT2;
+ return POWER_DOMAIN_AUX_D_TBT;
case AUX_CH_E:
- return POWER_DOMAIN_AUX_TBT3;
+ return POWER_DOMAIN_AUX_E_TBT;
case AUX_CH_F:
- return POWER_DOMAIN_AUX_TBT4;
+ return POWER_DOMAIN_AUX_F_TBT;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G_TBT;
default:
MISSING_CASE(dig_port->aux_ch);
- return POWER_DOMAIN_AUX_TBT1;
+ return POWER_DOMAIN_AUX_C_TBT;
}
}
@@ -6776,6 +6883,8 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
return POWER_DOMAIN_AUX_E;
case AUX_CH_F:
return POWER_DOMAIN_AUX_F;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G;
default:
MISSING_CASE(dig_port->aux_ch);
return POWER_DOMAIN_AUX_A;
@@ -6852,7 +6961,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6984,7 +7093,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
@@ -7093,7 +7202,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_display_power_put_unchecked(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
- dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+ dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
@@ -7201,7 +7310,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev_priv)->num_pipes == 2)
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7539,6 +7648,27 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
constant_n);
}
+static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+{
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE;
+
+ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
+ enableddisabled(bios_lvds_use_ssc),
+ enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+}
+
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
@@ -8190,6 +8320,21 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
(crtc_state->pipe_src_h - 1));
}
+static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (IS_GEN(dev_priv, 2))
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ else
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+}
+
static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8228,7 +8373,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
- if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+ if (intel_pipe_is_interlaced(pipe_config)) {
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
pipe_config->base.adjusted_mode.crtc_vtotal += 1;
pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
@@ -8560,7 +8705,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
struct dpll clock;
u32 mdiv;
int refclk = 100000;
@@ -8670,7 +8815,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
@@ -8699,47 +8844,24 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
-static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static enum intel_output_format
+bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
-
- pipe_config->lspcon_downsampling = false;
+ u32 tmp;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
- if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
- bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
- bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+ if (tmp & PIPEMISC_YUV420_ENABLE) {
+ /* We support 4:2:0 in full blend mode only */
+ WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
- if (ycbcr420_enabled) {
- /* We support 4:2:0 in full blend mode only */
- if (!blend)
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else if (!(IS_GEMINILAKE(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10))
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else
- output = INTEL_OUTPUT_FORMAT_YCBCR420;
- } else {
- /*
- * Currently there is no interface defined to
- * check user preference between RGB/YCBCR444
- * or YCBCR420. So the only possible case for
- * YCBCR444 usage is driving YCBCR420 output
- * with LSPCON, when pipe is configured for
- * YCBCR444 output and LSPCON takes care of
- * downsampling it.
- */
- pipe_config->lspcon_downsampling = true;
- output = INTEL_OUTPUT_FORMAT_YCBCR444;
- }
- }
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ return INTEL_OUTPUT_FORMAT_YCBCR444;
+ } else {
+ return INTEL_OUTPUT_FORMAT_RGB;
}
-
- pipe_config->output_format = output;
}
static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
@@ -8777,6 +8899,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
@@ -9416,9 +9539,19 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
else
val |= PIPECONF_PROGRESSIVE;
+ /*
+ * This would end up with an odd purple hue over
+ * the entire display. Make sure we don't do it.
+ */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
I915_WRITE(PIPECONF(pipe), val);
@@ -9440,6 +9573,10 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
else
val |= PIPECONF_PROGRESSIVE;
+ if (IS_HASWELL(dev_priv) &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
}
@@ -9590,7 +9727,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -9889,8 +10026,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
val = I915_READ(PLANE_SIZE(pipe, plane_id));
- fb->height = ((val >> 16) & 0xfff) + 1;
- fb->width = ((val >> 0) & 0x1fff) + 1;
+ fb->height = ((val >> 16) & 0xffff) + 1;
+ fb->width = ((val >> 0) & 0xffff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
@@ -9951,9 +10088,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!wakeref)
return false;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -9980,6 +10117,16 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
+ switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
+ case PIPECONF_OUTPUT_COLORSPACE_YUV601:
+ case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ break;
+ default:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ break;
+ }
+
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
@@ -10394,6 +10541,59 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
}
}
+static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 trans_port_sync, master_select;
+
+ trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = trans_port_sync &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK;
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 transcoders;
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
+ crtc_state->cpu_transcoder);
+
+ transcoders = BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) |
+ BIT(TRANSCODER_D);
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -10405,6 +10605,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_crtc_init_scalers(crtc, pipe_config);
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
+
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10435,7 +10637,30 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
}
intel_get_pipe_src_size(crtc, pipe_config);
- intel_get_crtc_ycbcr_config(crtc, pipe_config);
+
+ if (IS_HASWELL(dev_priv)) {
+ u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+ if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ else
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ } else {
+ pipe_config->output_format =
+ bdw_get_pipemisc_output_format(crtc);
+
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ pipe_config->lspcon_downsampling =
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
+ }
pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
@@ -10490,6 +10715,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ icelake_get_trans_port_sync_config(pipe_config);
+
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv,
@@ -10511,21 +10740,13 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
else
base = intel_plane_ggtt_offset(plane_state);
- base += plane_state->color_plane[0].offset;
-
- /* ILK+ do this automagically */
- if (HAS_GMCH(dev_priv) &&
- plane_state->base.rotation & DRM_MODE_ROTATE_180)
- base += (plane_state->base.crtc_h *
- plane_state->base.crtc_w - 1) * fb->format->cpp[0];
-
- return base;
+ return base + plane_state->color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
{
- int x = plane_state->base.crtc_x;
- int y = plane_state->base.crtc_y;
+ int x = plane_state->base.dst.x1;
+ int y = plane_state->base.dst.y1;
u32 pos = 0;
if (x < 0) {
@@ -10547,8 +10768,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
{
const struct drm_mode_config *config =
&plane_state->base.plane->dev->mode_config;
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ int width = drm_rect_width(&plane_state->base.dst);
+ int height = drm_rect_height(&plane_state->base.dst);
return width > 0 && width <= config->cursor_width &&
height > 0 && height <= config->cursor_height;
@@ -10556,6 +10777,9 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ unsigned int rotation = plane_state->base.rotation;
int src_x, src_y;
u32 offset;
int ret;
@@ -10567,8 +10791,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
if (!plane_state->base.visible)
return 0;
- src_x = plane_state->base.src_x >> 16;
- src_y = plane_state->base.src_y >> 16;
+ src_x = plane_state->base.src.x1 >> 16;
+ src_y = plane_state->base.src.y1 >> 16;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
@@ -10579,7 +10803,25 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->base.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
return 0;
}
@@ -10603,6 +10845,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->base.src = drm_plane_state_src(&plane_state->base);
+ plane_state->base.dst = drm_plane_state_dest(&plane_state->base);
+
ret = intel_cursor_check_surface(plane_state);
if (ret)
return ret;
@@ -10645,7 +10891,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- int width = plane_state->base.crtc_w;
+ int width = drm_rect_width(&plane_state->base.dst);
/*
* 845g/865g are only limited by the width of their cursors,
@@ -10671,8 +10917,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->base.dst),
+ drm_rect_height(&plane_state->base.dst));
return -EINVAL;
}
@@ -10705,8 +10951,8 @@ static void i845_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
- unsigned int width = plane_state->base.crtc_w;
- unsigned int height = plane_state->base.crtc_h;
+ unsigned int width = drm_rect_width(&plane_state->base.dst);
+ unsigned int height = drm_rect_height(&plane_state->base.dst);
cntl = plane_state->ctl |
i845_cursor_ctl_crtc(crtc_state);
@@ -10808,7 +11054,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- switch (plane_state->base.crtc_w) {
+ switch (drm_rect_width(&plane_state->base.dst)) {
case 64:
cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
@@ -10819,7 +11065,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(plane_state->base.crtc_w);
+ MISSING_CASE(drm_rect_width(&plane_state->base.dst));
return 0;
}
@@ -10833,8 +11079,8 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ int width = drm_rect_width(&plane_state->base.dst);
+ int height = drm_rect_height(&plane_state->base.dst);
if (!intel_cursor_size_ok(plane_state))
return false;
@@ -10887,17 +11133,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->base.dst),
+ drm_rect_height(&plane_state->base.dst));
return -EINVAL;
}
WARN_ON(plane_state->base.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
- if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) {
DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0], plane_state->base.crtc_w);
+ fb->pitches[0],
+ drm_rect_width(&plane_state->base.dst));
return -EINVAL;
}
@@ -10912,7 +11160,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->base.visible && plane_state->base.crtc_x < 0) {
+ plane_state->base.visible && plane_state->base.dst.x1 < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -10932,11 +11180,14 @@ static void i9xx_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
+ unsigned width = drm_rect_width(&plane_state->base.dst);
+ unsigned height = drm_rect_height(&plane_state->base.dst);
+
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (plane_state->base.crtc_h != plane_state->base.crtc_w)
- fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
base = intel_cursor_base(plane_state);
pos = intel_cursor_position(plane_state);
@@ -11081,7 +11332,6 @@ static int intel_modeset_disable_planes(struct drm_atomic_state *state,
}
int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -11188,10 +11438,8 @@ found:
crtc_state->base.active = crtc_state->base.enable = true;
- if (!mode)
- mode = &load_detect_mode;
-
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base,
+ &load_detect_mode);
if (ret)
goto fail;
@@ -11283,7 +11531,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
struct dpll clock;
@@ -11507,7 +11755,6 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
bool was_crtc_enabled = old_crtc_state->base.active;
bool is_crtc_enabled = crtc_state->base.active;
bool turn_off, turn_on, visible, was_visible;
- struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
@@ -11536,24 +11783,18 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
plane_state->base.visible = visible = false;
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
}
if (!was_visible && !visible)
return 0;
- if (fb != old_plane_state->base.fb)
- crtc_state->fb_changed = true;
-
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
- fb ? fb->base.id : -1);
-
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- plane->base.base.id, plane->base.name,
was_visible, visible,
turn_off, turn_on, mode_changed);
@@ -11662,7 +11903,7 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- linked = plane_state->linked_plane;
+ linked = plane_state->planar_linked_plane;
if (!linked)
continue;
@@ -11671,8 +11912,8 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
if (IS_ERR(linked_plane_state))
return PTR_ERR(linked_plane_state);
- WARN_ON(linked_plane_state->linked_plane != plane);
- WARN_ON(linked_plane_state->slave == plane_state->slave);
+ WARN_ON(linked_plane_state->planar_linked_plane != plane);
+ WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
}
return 0;
@@ -11695,16 +11936,16 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
* in the crtc_state->active_planes mask.
*/
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+ if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
continue;
- plane_state->linked_plane = NULL;
- if (plane_state->slave && !plane_state->base.visible) {
+ plane_state->planar_linked_plane = NULL;
+ if (plane_state->planar_slave && !plane_state->base.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
- plane_state->slave = false;
+ plane_state->planar_slave = false;
}
if (!crtc_state->nv12_planes)
@@ -11738,10 +11979,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
return -EINVAL;
}
- plane_state->linked_plane = linked;
+ plane_state->planar_linked_plane = linked;
- linked_state->slave = true;
- linked_state->linked_plane = plane;
+ linked_state->planar_slave = true;
+ linked_state->planar_linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
@@ -11761,25 +12002,108 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static int intel_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
+static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc_state);
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_connector *master_connector, *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_crtc *master_crtc = NULL;
+ struct drm_crtc_state *master_crtc_state;
+ struct intel_crtc_state *master_pipe_config;
+ int i, tile_group_id;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /*
+ * In case of tiled displays there could be one or more slaves but there is
+ * only one master. Lets make the CRTC used by the connector corresponding
+ * to the last horizonal and last vertical tile a master/genlock CRTC.
+ * All the other CRTCs corresponding to other tiles of the same Tile group
+ * are the slave CRTCs and hold a pointer to their genlock CRTC.
+ */
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+ if (!connector->has_tile)
+ continue;
+ if (crtc_state->base.mode.hdisplay != connector->tile_h_size ||
+ crtc_state->base.mode.vdisplay != connector->tile_v_size)
+ return 0;
+ if (connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ continue;
+ crtc_state->sync_mode_slaves_mask = 0;
+ tile_group_id = connector->tile_group->id;
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(master_connector, &conn_iter) {
+ struct drm_connector_state *master_conn_state = NULL;
+
+ if (!master_connector->has_tile)
+ continue;
+ if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
+ master_connector->tile_v_loc != master_connector->num_v_tile - 1)
+ continue;
+ if (master_connector->tile_group->id != tile_group_id)
+ continue;
+
+ master_conn_state = drm_atomic_get_connector_state(&state->base,
+ master_connector);
+ if (IS_ERR(master_conn_state)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return PTR_ERR(master_conn_state);
+ }
+ if (master_conn_state->crtc) {
+ master_crtc = master_conn_state->crtc;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!master_crtc) {
+ DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
+ connector_state->crtc->base.id);
+ return -EINVAL;
+ }
+
+ master_crtc_state = drm_atomic_get_crtc_state(&state->base,
+ master_crtc);
+ if (IS_ERR(master_crtc_state))
+ return PTR_ERR(master_crtc_state);
+
+ master_pipe_config = to_intel_crtc_state(master_crtc_state);
+ crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
+ master_pipe_config->sync_mode_slaves_mask |=
+ BIT(crtc_state->cpu_transcoder);
+ DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
+ transcoder_name(crtc_state->master_transcoder),
+ crtc_state->base.crtc->base.id,
+ master_pipe_config->sync_mode_slaves_mask);
+ }
+
+ return 0;
+}
+
+static int intel_crtc_atomic_check(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool mode_changed = needs_modeset(crtc_state);
int ret;
- bool mode_changed = needs_modeset(pipe_config);
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
- mode_changed && !crtc_state->active)
- pipe_config->update_wm_post = true;
+ mode_changed && !crtc_state->base.active)
+ crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->enable &&
+ if (mode_changed && crtc_state->base.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(pipe_config->shared_dpll)) {
- ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- pipe_config);
+ !WARN_ON(crtc_state->shared_dpll)) {
+ ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
}
@@ -11788,19 +12112,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* May need to update pipe gamma enable bits
* when C8 planes are getting enabled/disabled.
*/
- if (c8_planes_changed(pipe_config))
- crtc_state->color_mgmt_changed = true;
+ if (c8_planes_changed(crtc_state))
+ crtc_state->base.color_mgmt_changed = true;
- if (mode_changed || pipe_config->update_pipe ||
- crtc_state->color_mgmt_changed) {
- ret = intel_color_check(pipe_config);
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->base.color_mgmt_changed) {
+ ret = intel_color_check(crtc_state);
if (ret)
return ret;
}
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(pipe_config);
+ ret = dev_priv->display.compute_pipe_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
return ret;
@@ -11816,7 +12140,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(pipe_config);
+ ret = dev_priv->display.compute_intermediate_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
@@ -11824,29 +12148,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed || pipe_config->update_pipe)
- ret = skl_update_scaler_crtc(pipe_config);
-
- if (!ret)
- ret = icl_check_nv12_planes(pipe_config);
+ if (mode_changed || crtc_state->update_pipe)
+ ret = skl_update_scaler_crtc(crtc_state);
if (!ret)
- ret = skl_check_pipe_max_pixel_rate(intel_crtc,
- pipe_config);
- if (!ret)
- ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
- pipe_config);
+ ret = intel_atomic_setup_scalers(dev_priv, crtc,
+ crtc_state);
}
if (HAS_IPS(dev_priv))
- pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
+ crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .atomic_check = intel_crtc_atomic_check,
-};
-
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -12156,6 +12470,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
+ if (IS_CHERRYVIEW(dev_priv))
+ DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+ else
+ DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
dump_planes:
if (!state)
return;
@@ -12176,6 +12499,12 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
bool ret = true;
/*
+ * We're going to peek into connector->state,
+ * hence connection_mutex must be held.
+ */
+ drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+
+ /*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
@@ -12257,6 +12586,13 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
+ /*
+ * Save the slave bitmask which gets filled for master crtc state during
+ * slave atomic check call.
+ */
+ if (is_trans_port_sync_master(crtc_state))
+ saved_state->sync_mode_slaves_mask =
+ crtc_state->sync_mode_slaves_mask;
/* Keep base drm_crtc_state intact, only clear our extended struct */
BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
@@ -12350,6 +12686,15 @@ encoder_retry:
drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
CRTC_STEREO_DOUBLE);
+ /* Set the crtc_state defaults for trans_port_sync */
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
+ ret = icl_add_sync_mode_crtcs(pipe_config);
+ if (ret) {
+ DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
+ ret);
+ return ret;
+ }
+
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@@ -12482,22 +12827,23 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
- drm_dbg(DRM_UT_KMS, "expected:");
+ DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
+ DRM_DEBUG_KMS("expected:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg(DRM_UT_KMS, "found");
+ DRM_DEBUG_KMS("found:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
- drm_err("mismatch in %s infoframe", name);
- drm_err("expected:");
+ DRM_ERROR("mismatch in %s infoframe\n", name);
+ DRM_ERROR("expected:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err("found");
+ DRM_ERROR("found:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
-static void __printf(3, 4)
-pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
+static void __printf(4, 5)
+pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -12507,9 +12853,11 @@ pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
vaf.va = &args;
if (fastset)
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
else
- drm_err("mismatch in %s %pV", name, &vaf);
+ DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
va_end(args);
}
@@ -12537,7 +12885,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bool fastset)
{
struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
bool ret = true;
+ u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
@@ -12549,8 +12899,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected 0x%08x, found 0x%08x)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12559,8 +12909,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12569,8 +12919,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12586,8 +12936,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12596,8 +12946,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %p, found %p)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %p, found %p)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12608,9 +12958,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
!fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12635,10 +12985,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
&pipe_config->name, !fastset) && \
!intel_compare_link_m_n(&current_config->alt_name, \
&pipe_config->name, !fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"or tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12660,8 +13010,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(%x) (expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(%x) (expected %i, found %i)", \
(mask), \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -12671,8 +13021,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12689,6 +13039,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
+ if (current_config->name1 != pipe_config->name1) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name1), \
+ "(expected %i, found %i, won't compare lut values)", \
+ current_config->name1, \
+ pipe_config->name1); \
+ ret = false;\
+ } else { \
+ if (!intel_color_lut_equal(current_config->name2, \
+ pipe_config->name2, pipe_config->name1, \
+ bit_precision)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name2), \
+ "hw_state doesn't match sw_state"); \
+ ret = false; \
+ } \
+ } \
+} while (0)
+
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -12727,6 +13095,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12735,6 +13104,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -12784,6 +13154,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(csc_mode);
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
+
+ bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
+ if (bp_gamma)
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
+
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12839,6 +13214,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_I(master_transcoder);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -12846,6 +13224,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
+#undef PIPE_CONF_CHECK_COLOR_LUT
#undef PIPE_CONF_QUIRK
return ret;
@@ -13157,7 +13536,7 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->slave ||
+ assert_plane(plane, plane_state->planar_slave ||
plane_state->base.visible);
}
@@ -13273,10 +13652,15 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
verify_disabled_dpll_state(dev_priv);
}
-static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
+static void
+intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13306,7 +13690,6 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
* answer that's slightly in the future.
*/
if (IS_GEN(dev_priv, 2)) {
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
@@ -13317,8 +13700,9 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
} else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
- } else
+ } else {
crtc->scanline_offset = 1;
+ }
}
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
@@ -13400,158 +13784,43 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_lock_all_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- /* Add all pipes to the state */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int intel_modeset_all_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- /*
- * Add all pipes to the state, and force
- * a modeset on all the active ones.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
- int ret;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (!crtc_state->base.active || needs_modeset(crtc_state))
- continue;
-
- crtc_state->base.mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(&state->base,
- &crtc->base);
- if (ret)
- return ret;
-
- ret = drm_atomic_add_affected_planes(&state->base,
- &crtc->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int intel_modeset_checks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
- int ret = 0, i;
-
- if (!check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return -EINVAL;
- }
+ int ret, i;
/* keep the current setting */
if (!state->cdclk.force_min_cdclk_changed)
state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
state->modeset = true;
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
state->cdclk.logical = dev_priv->cdclk.logical;
state->cdclk.actual = dev_priv->cdclk.actual;
- state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (new_crtc_state->base.active)
- state->active_crtcs |= 1 << i;
+ state->active_pipes |= BIT(crtc->pipe);
else
- state->active_crtcs &= ~(1 << i);
+ state->active_pipes &= ~BIT(crtc->pipe);
if (old_crtc_state->base.active != new_crtc_state->base.active)
- state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
+ state->active_pipe_changes |= BIT(crtc->pipe);
}
- /*
- * See if the config requires any additional preparation, e.g.
- * to adjust global state with pipes off. We need to do this
- * here so we can get the modeset_pipe updated config for the new
- * mode set on this crtc. For other crtcs we need to use the
- * adjusted_mode bits in the crtc directly.
- */
- if (dev_priv->display.modeset_calc_cdclk) {
- enum pipe pipe;
-
- ret = dev_priv->display.modeset_calc_cdclk(state);
- if (ret < 0)
+ if (state->active_pipe_changes) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
return ret;
-
- /*
- * Writes to dev_priv->cdclk.logical must protected by
- * holding all the crtc locks, even if we don't end up
- * touching the hardware
- */
- if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &state->cdclk.logical)) {
- ret = intel_lock_all_pipes(state);
- if (ret < 0)
- return ret;
- }
-
- if (is_power_of_2(state->active_crtcs)) {
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
-
- pipe = ilog2(state->active_crtcs);
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (crtc_state && needs_modeset(crtc_state))
- pipe = INVALID_PIPE;
- } else {
- pipe = INVALID_PIPE;
- }
-
- /* All pipes must be switched off while we change the cdclk. */
- if (pipe != INVALID_PIPE &&
- intel_cdclk_needs_cd2x_update(dev_priv,
- &dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_lock_all_pipes(state);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = pipe;
- } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(state);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = INVALID_PIPE;
- }
-
- DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- state->cdclk.logical.cdclk,
- state->cdclk.actual.cdclk);
- DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- state->cdclk.logical.voltage_level,
- state->cdclk.actual.voltage_level);
}
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+
intel_modeset_clear_plls(state);
if (IS_HASWELL(dev_priv))
@@ -13600,6 +13869,114 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->has_drrs = old_crtc_state->has_drrs;
}
+static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ u8 plane_ids_mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if ((plane_ids_mask & BIT(plane->id)) == 0)
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
+{
+ /* See {hsw,vlv,ivb}_plane_ratio() */
+ return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv);
+}
+
+static int intel_atomic_check_planes(struct intel_atomic_state *state,
+ bool *need_modeset)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = icl_add_linked_planes(state);
+ if (ret)
+ return ret;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_atomic_check(state, plane);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
+ return ret;
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ u8 old_active_planes, new_active_planes;
+
+ ret = icl_check_nv12_planes(new_crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * On some platforms the number of active planes affects
+ * the planes' minimum cdclk calculation. Add such planes
+ * to the state before we compute the minimum cdclk.
+ */
+ if (!active_planes_affects_min_cdclk(dev_priv))
+ continue;
+
+ old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+
+ if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ continue;
+
+ ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * active_planes bitmask has been updated, and potentially
+ * affected planes are part of the state. We can now
+ * compute the minimum cdclk for each plane.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i)
+ *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+
+ return 0;
+}
+
+static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret = intel_crtc_atomic_check(state, crtc);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -13613,7 +13990,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = state->cdclk.force_min_cdclk_changed;
+ bool any_ms = false;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -13647,10 +14024,22 @@ static int intel_atomic_check(struct drm_device *dev,
any_ms = true;
}
+ if (any_ms && !check_digital_port_conflicts(state)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ ret = EINVAL;
+ goto fail;
+ }
+
ret = drm_dp_mst_atomic_check(&state->base);
if (ret)
goto fail;
+ any_ms |= state->cdclk.force_min_cdclk_changed;
+
+ ret = intel_atomic_check_planes(state, &any_ms);
+ if (ret)
+ goto fail;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
@@ -13659,11 +14048,7 @@ static int intel_atomic_check(struct drm_device *dev,
state->cdclk.logical = dev_priv->cdclk.logical;
}
- ret = icl_add_linked_planes(state);
- if (ret)
- goto fail;
-
- ret = drm_atomic_helper_check_planes(dev, &state->base);
+ ret = intel_atomic_check_crtcs(state);
if (ret)
goto fail;
@@ -13721,25 +14106,110 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!IS_GEN(dev_priv, 2))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+ if (crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+}
+
+static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ */
+ intel_set_pipe_src_size(new_crtc_state);
+
+ /* on skylake this is done by detaching scalers */
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_detach_scalers(new_crtc_state);
+
+ if (new_crtc_state->pch_pfit.enabled)
+ skylake_pfit_enable(new_crtc_state);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (new_crtc_state->pch_pfit.enabled)
+ ironlake_pfit_enable(new_crtc_state);
+ else if (old_crtc_state->pch_pfit.enabled)
+ ironlake_pfit_disable(old_crtc_state);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_set_pipe_chicken(crtc);
+}
+
+static void commit_pipe_config(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ bool modeset = needs_modeset(new_crtc_state);
+
+ /*
+ * During modesets pipe configuration was programmed as the
+ * CRTC was enabled.
+ */
+ if (!modeset) {
+ if (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_detach_scalers(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
+ if (new_crtc_state->update_pipe)
+ intel_pipe_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(state,
+ new_crtc_state);
+}
+
static void intel_update_crtc(struct intel_crtc *crtc,
struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state,
to_intel_plane(crtc->base.primary));
if (modeset) {
- update_scanline_offset(new_crtc_state);
+ intel_crtc_update_active_timings(new_crtc_state);
+
dev_priv->display.crtc_enable(new_crtc_state, state);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
} else {
+ if (new_crtc_state->preload_luts &&
+ (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
+
intel_pre_plane_update(old_crtc_state, new_crtc_state);
if (new_crtc_state->update_pipe)
@@ -13751,17 +14221,151 @@ static void intel_update_crtc(struct intel_crtc *crtc,
else if (new_plane_state)
intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
- intel_begin_crtc_commit(state, crtc);
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
else
i9xx_update_planes_on_crtc(state, crtc);
- intel_finish_crtc_commit(state, crtc);
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev);
+ enum transcoder slave_transcoder;
+
+ WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+
+ slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
+ return intel_get_crtc_for_pipe(dev_priv,
+ (enum pipe)slave_transcoder);
+}
+
+static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_disable_planes(state, crtc);
+
+ /*
+ * We need to disable pipe CRC before disabling the pipe,
+ * or we race against vblank off.
+ */
+ intel_crtc_disable_pipe_crc(crtc);
+
+ dev_priv->display.crtc_disable(old_crtc_state, state);
+ crtc->active = false;
+ intel_fbc_disable(crtc);
+ intel_disable_shared_dpll(old_crtc_state);
+
+ /*
+ * Underruns don't always raise interrupts,
+ * so check manually.
+ */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
+
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->base.active &&
+ !HAS_GMCH(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state,
+ new_crtc_state);
+}
+
+static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ struct intel_crtc_state *new_slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ WARN_ON(!slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
+
+ /* Disable Slave first */
+ intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
+ if (old_slave_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_slave_crtc_state,
+ new_slave_crtc_state,
+ slave_crtc);
+
+ /* Disable Master */
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
+ if (old_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_crtc_state,
+ new_crtc_state,
+ crtc);
+}
+
+static void intel_commit_modeset_disables(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ /*
+ * Disable CRTC/pipes in reverse order because some features(MST in
+ * TGL+) requires master and slave relationship between pipes, so it
+ * should always pick the lowest pipe as master as it will be enabled
+ * first and disable in the reverse order so the master will be the
+ * last one to be disabled.
+ */
+ for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
+ continue;
+
+ /* In case of Transcoder port Sync master slave CRTCs can be
+ * assigned in any order and we need to make sure that
+ * slave CRTCs are disabled first and then master CRTC since
+ * Slave vblanks are masked till Master Vblanks.
+ */
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ if (is_trans_port_sync_master(new_crtc_state))
+ intel_trans_port_sync_modeset_disables(state,
+ crtc,
+ old_crtc_state,
+ new_crtc_state);
+ else
+ continue;
+ } else {
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
+
+ if (old_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_crtc_state,
+ new_crtc_state,
+ crtc);
+ }
+ }
}
-static void intel_update_crtcs(struct intel_atomic_state *state)
+static void intel_commit_modeset_enables(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -13776,14 +14380,120 @@ static void intel_update_crtcs(struct intel_atomic_state *state)
}
}
-static void skl_update_crtcs(struct intel_atomic_state *state)
+static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_update_active_timings(new_crtc_state);
+ dev_priv->display.crtc_enable(new_crtc_state, state);
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct drm_connector *uninitialized_var(conn);
+ struct drm_connector_state *conn_state;
+ struct intel_dp *intel_dp;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ if (conn_state->crtc == &crtc->base)
+ break;
+ }
+ intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
+ intel_dp_stop_link_train(intel_dp);
+}
+
+static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state,
+ to_intel_plane(crtc->base.primary));
+ bool modeset = needs_modeset(new_crtc_state);
+
+ if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
+ else if (new_plane_state)
+ intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
+ skl_update_planes_on_crtc(state, crtc);
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ struct intel_crtc_state *new_slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ WARN_ON(!slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
+
+ DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+ crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
+ slave_crtc->base.name);
+
+ /* Enable seq for slave with with DP_TP_CTL left Idle until the
+ * master is ready
+ */
+ intel_crtc_enable_trans_port_sync(slave_crtc,
+ state,
+ new_slave_crtc_state);
+
+ /* Enable seq for master with with DP_TP_CTL left Idle */
+ intel_crtc_enable_trans_port_sync(crtc,
+ state,
+ new_crtc_state);
+
+ /* Set Slave's DP_TP_CTL to Normal */
+ intel_set_dp_tp_ctl_normal(slave_crtc,
+ state);
+
+ /* Set Master's DP_TP_CTL To Normal */
+ usleep_range(200, 400);
+ intel_set_dp_tp_ctl_normal(crtc,
+ state);
+
+ /* Now do the post crtc enable for all master and slaves */
+ intel_post_crtc_enable_updates(slave_crtc,
+ state);
+ intel_post_crtc_enable_updates(crtc,
+ state);
+}
+
+static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
unsigned int updated = 0;
bool progress;
- enum pipe pipe;
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = state->wm_results.ddb.enabled_slices;
@@ -13808,20 +14518,19 @@ static void skl_update_crtcs(struct intel_atomic_state *state)
progress = false;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
bool vbl_wait = false;
- unsigned int cmask = drm_crtc_mask(&crtc->base);
-
- pipe = crtc->pipe;
+ bool modeset = needs_modeset(new_crtc_state);
- if (updated & cmask || !new_crtc_state->base.active)
+ if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries,
- INTEL_INFO(dev_priv)->num_pipes, i))
+ INTEL_NUM_PIPES(dev_priv), i))
continue;
- updated |= cmask;
+ updated |= BIT(pipe);
entries[i] = new_crtc_state->wm.skl.ddb;
/*
@@ -13832,12 +14541,22 @@ static void skl_update_crtcs(struct intel_atomic_state *state)
*/
if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
&old_crtc_state->wm.skl.ddb) &&
- !new_crtc_state->base.active_changed &&
+ !modeset &&
state->wm_results.dirty_pipes != updated)
vbl_wait = true;
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
+ if (is_trans_port_sync_master(new_crtc_state))
+ intel_update_trans_port_sync_crtcs(crtc,
+ state,
+ old_crtc_state,
+ new_crtc_state);
+ else
+ continue;
+ } else {
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
+ }
if (vbl_wait)
intel_wait_for_vblank(dev_priv, pipe);
@@ -13926,49 +14645,18 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
if (needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
put_domains[crtc->pipe] =
modeset_get_crtc_power_domains(new_crtc_state);
}
-
- if (!needs_modeset(new_crtc_state))
- continue;
-
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
-
- if (old_crtc_state->base.active) {
- intel_crtc_disable_planes(state, crtc);
-
- /*
- * We need to disable pipe CRC before disabling the pipe,
- * or we race against vblank off.
- */
- intel_crtc_disable_pipe_crc(crtc);
-
- dev_priv->display.crtc_disable(old_crtc_state, state);
- crtc->active = false;
- intel_fbc_disable(crtc);
- intel_disable_shared_dpll(old_crtc_state);
-
- /*
- * Underruns don't always raise
- * interrupts, so check manually.
- */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-
- /* FIXME unify this for all platforms */
- if (!new_crtc_state->base.active &&
- !HAS_GMCH(dev_priv) &&
- dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state,
- new_crtc_state);
- }
}
+ intel_commit_modeset_disables(state);
+
/* FIXME: Eventually get rid of our crtc->config pointer */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
crtc->config = new_crtc_state;
@@ -14009,7 +14697,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.update_crtcs(state);
+ dev_priv->display.commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
@@ -14034,6 +14722,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->base.active &&
!needs_modeset(new_crtc_state) &&
+ !new_crtc_state->preload_luts &&
(new_crtc_state->base.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
@@ -14139,6 +14828,14 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
+static void assert_global_state_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *_state,
bool nonblock)
@@ -14204,12 +14901,14 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (state->modeset) {
+ if (state->global_state_changed) {
+ assert_global_state_locked(dev_priv);
+
memcpy(dev_priv->min_cdclk, state->min_cdclk,
sizeof(state->min_cdclk));
memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
sizeof(state->min_voltage_level));
- dev_priv->active_crtcs = state->active_crtcs;
+ dev_priv->active_pipes = state->active_pipes;
dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
intel_cdclk_swap_state(state);
@@ -14222,7 +14921,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (nonblock && state->modeset) {
queue_work(dev_priv->modeset_wq, &state->base.commit_work);
} else if (nonblock) {
- queue_work(system_unbound_wq, &state->base.commit_work);
+ queue_work(dev_priv->flip_wq, &state->base.commit_work);
} else {
if (state->modeset)
flush_workqueue(dev_priv->modeset_wq);
@@ -14251,7 +14950,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -14332,7 +15031,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
{
struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_DISPLAY,
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
};
i915_gem_object_wait_priority(obj, 0, &attr);
@@ -14341,25 +15040,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
- * @new_state: the plane state being prepared
+ * @_new_plane_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
* involves pinning the underlying object and updating the frontbuffer tracking
* bits. Some older platforms need special physical address handling for
* cursor planes.
*
- * Must be called with struct_mutex held.
- *
* Returns 0 on success, negative error code on failure.
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_plane_state *_new_plane_state)
{
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_state->state);
+ to_intel_atomic_state(new_plane_state->base.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *fb = new_plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
@@ -14390,9 +15089,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
- if (new_state->fence) { /* explicit fencing */
+ if (new_plane_state->base.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
- new_state->fence,
+ new_plane_state->base.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
@@ -14406,15 +15105,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
return ret;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- return ret;
- }
+ ret = intel_plane_pin_fb(new_plane_state);
- ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
@@ -14422,7 +15114,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fb_obj_bump_render_priority(obj);
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
- if (!new_state->fence) { /* implicit fencing */
+ if (!new_plane_state->base.fence) { /* implicit fencing */
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
@@ -14434,11 +15126,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
- add_rps_boost_after_vblank(new_state->crtc, fence);
+ add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ fence);
dma_fence_put(fence);
}
} else {
- add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
+ add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ new_plane_state->base.fence);
}
/*
@@ -14450,7 +15144,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, true);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, true);
intel_state->rps_interactive = true;
}
@@ -14460,130 +15154,27 @@ intel_prepare_plane_fb(struct drm_plane *plane,
/**
* intel_cleanup_plane_fb - Cleans up an fb after plane use
* @plane: drm plane to clean up for
- * @old_state: the state from the previous modeset
+ * @_old_plane_state: the state from the previous modeset
*
* Cleans up a framebuffer that has just been removed from a plane.
- *
- * Must be called with struct_mutex held.
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_plane_state *_old_plane_state)
{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(old_state->state);
+ to_intel_atomic_state(old_plane_state->base.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, false);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, false);
intel_state->rps_interactive = false;
}
/* Should only be called after a successful intel_prepare_plane_fb()! */
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_plane_unpin_fb(to_intel_plane_state(old_state));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int
-skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int max_scale, mult;
- int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
-
- if (!crtc_state->base.enable)
- return DRM_PLANE_HELPER_NO_SCALING;
-
- crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- max_dotclk *= 2;
-
- if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
- return DRM_PLANE_HELPER_NO_SCALING;
-
- /*
- * skl max scale is lower of:
- * close to 3 but not 3, -1 is for that purpose
- * or
- * cdclk/crtc_clock
- */
- mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
- tmpclk1 = (1 << 16) * mult - 1;
- tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
- max_scale = min(tmpclk1, tmpclk2);
-
- return max_scale;
-}
-
-static void intel_begin_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
-
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(new_crtc_state);
-
- if (modeset)
- goto out;
-
- if (new_crtc_state->base.color_mgmt_changed ||
- new_crtc_state->update_pipe)
- intel_color_commit(new_crtc_state);
-
- if (new_crtc_state->update_pipe)
- intel_update_pipe_config(old_crtc_state, new_crtc_state);
- else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(new_crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
-
-out:
- if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state,
- new_crtc_state);
-}
-
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-
- if (crtc_state->has_pch_encoder) {
- enum pipe pch_transcoder =
- intel_crtc_pch_transcoder(crtc);
-
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
- }
-}
-
-static void intel_finish_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- intel_pipe_update_end(new_crtc_state);
-
- if (new_crtc_state->update_pipe &&
- !needs_modeset(new_crtc_state) &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+ intel_plane_unpin_fb(old_plane_state);
}
/**
@@ -14640,6 +15231,7 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED;
default:
@@ -14673,8 +15265,8 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
};
static int
-intel_legacy_cursor_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -14682,11 +15274,13 @@ intel_legacy_cursor_update(struct drm_plane *plane,
u32 src_w, u32 src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->state);
+ to_intel_crtc_state(crtc->base.state);
struct intel_crtc_state *new_crtc_state;
int ret;
@@ -14698,14 +15292,13 @@ intel_legacy_cursor_update(struct drm_plane *plane,
crtc_state->update_pipe)
goto slow;
- old_plane_state = plane->state;
/*
* Don't do an async update if there is an outstanding commit modifying
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- if (old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ if (old_plane_state->base.commit &&
+ !try_wait_for_completion(&old_plane_state->base.commit->hw_done))
goto slow;
/*
@@ -14713,56 +15306,51 @@ intel_legacy_cursor_update(struct drm_plane *plane,
* take the slowpath. Only changing fb or position should be
* in the fastpath.
*/
- if (old_plane_state->crtc != crtc ||
- old_plane_state->src_w != src_w ||
- old_plane_state->src_h != src_h ||
- old_plane_state->crtc_w != crtc_w ||
- old_plane_state->crtc_h != crtc_h ||
- !old_plane_state->fb != !fb)
+ if (old_plane_state->base.crtc != &crtc->base ||
+ old_plane_state->base.src_w != src_w ||
+ old_plane_state->base.src_h != src_h ||
+ old_plane_state->base.crtc_w != crtc_w ||
+ old_plane_state->base.crtc_h != crtc_h ||
+ !old_plane_state->base.fb != !fb)
goto slow;
- new_plane_state = intel_plane_duplicate_state(plane);
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
if (!new_plane_state)
return -ENOMEM;
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
if (!new_crtc_state) {
ret = -ENOMEM;
goto out_free;
}
- drm_atomic_set_fb_for_plane(new_plane_state, fb);
+ drm_atomic_set_fb_for_plane(&new_plane_state->base, fb);
- new_plane_state->src_x = src_x;
- new_plane_state->src_y = src_y;
- new_plane_state->src_w = src_w;
- new_plane_state->src_h = src_h;
- new_plane_state->crtc_x = crtc_x;
- new_plane_state->crtc_y = crtc_y;
- new_plane_state->crtc_w = crtc_w;
- new_plane_state->crtc_h = crtc_h;
+ new_plane_state->base.src_x = src_x;
+ new_plane_state->base.src_y = src_y;
+ new_plane_state->base.src_w = src_w;
+ new_plane_state->base.src_h = src_h;
+ new_plane_state->base.crtc_x = crtc_x;
+ new_plane_state->base.crtc_y = crtc_y;
+ new_plane_state->base.crtc_w = crtc_w;
+ new_plane_state->base.crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- to_intel_plane_state(old_plane_state),
- to_intel_plane_state(new_plane_state));
+ old_plane_state, new_plane_state);
if (ret)
goto out_free;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ ret = intel_plane_pin_fb(new_plane_state);
if (ret)
goto out_free;
- ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
- if (ret)
- goto out_unlock;
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
- to_intel_frontbuffer(fb),
- intel_plane->frontbuffer_bit);
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
+ to_intel_frontbuffer(new_plane_state->base.fb),
+ plane->frontbuffer_bit);
/* Swap plane state */
- plane->state = new_plane_state;
+ plane->base.state = &new_plane_state->base;
/*
* We cannot swap crtc_state as it may be in use by an atomic commit or
@@ -14776,27 +15364,24 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (plane->state->visible)
- intel_update_plane(intel_plane, crtc_state,
- to_intel_plane_state(plane->state));
+ if (new_plane_state->base.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
else
- intel_disable_plane(intel_plane, crtc_state);
+ intel_disable_plane(plane, crtc_state);
- intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
+ intel_plane_unpin_fb(old_plane_state);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
if (new_crtc_state)
- intel_crtc_destroy_state(crtc, &new_crtc_state->base);
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base);
if (ret)
- intel_plane_destroy_state(plane, new_plane_state);
+ intel_plane_destroy_state(&plane->base, &new_plane_state->base);
else
- intel_plane_destroy_state(plane, old_plane_state);
+ intel_plane_destroy_state(&plane->base, &old_plane_state->base);
return ret;
slow:
- return drm_atomic_helper_update_plane(plane, crtc, fb,
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
}
@@ -14837,7 +15422,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -14867,8 +15452,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
}
if (INTEL_GEN(dev_priv) >= 4) {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
modifiers = i9xx_format_modifiers;
plane->max_stride = i9xx_plane_max_stride;
@@ -14877,6 +15480,15 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
plane_funcs = &i965_plane_funcs;
} else {
formats = i8xx_primary_formats;
@@ -14888,6 +15500,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->disable_plane = i9xx_disable_plane;
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ plane->min_cdclk = i9xx_plane_min_cdclk;
plane_funcs = &i8xx_plane_funcs;
}
@@ -14926,6 +15539,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_MODE_ROTATE_0,
supported_rotations);
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -14942,7 +15558,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
{
unsigned int possible_crtcs;
struct intel_plane *cursor;
- int ret;
+ int ret, zpos;
cursor = intel_plane_alloc();
if (IS_ERR(cursor))
@@ -14991,6 +15607,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
@@ -15066,12 +15685,12 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
.disable_vblank = i965_disable_vblank,
};
-static const struct drm_crtc_funcs i945gm_crtc_funcs = {
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
INTEL_CRTC_FUNCS,
.get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i945gm_enable_vblank,
- .disable_vblank = i945gm_disable_vblank,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
};
static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -15142,8 +15761,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
funcs = &g4x_crtc_funcs;
else if (IS_GEN(dev_priv, 4))
funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv))
- funcs = &i945gm_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
else if (IS_GEN(dev_priv, 3))
funcs = &i915_crtc_funcs;
else
@@ -15178,8 +15797,6 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
}
- drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
intel_color_init(intel_crtc);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -15214,21 +15831,32 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct intel_encoder *encoder)
+static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_encoder *source_encoder;
- int index_mask = 0;
- int entry = 0;
+ u32 possible_clones = 0;
for_each_intel_encoder(dev, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
- index_mask |= (1 << entry);
+ possible_clones |= drm_encoder_mask(&source_encoder->base);
+ }
- entry++;
+ return possible_clones;
+}
+
+static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc;
+ u32 possible_crtcs = 0;
+
+ for_each_intel_crtc(dev, crtc) {
+ if (encoder->pipe_mask & BIT(crtc->pipe))
+ possible_crtcs |= drm_crtc_mask(&crtc->base);
}
- return index_mask;
+ return possible_crtcs;
}
static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
@@ -15310,13 +15938,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return;
if (INTEL_GEN(dev_priv) >= 12) {
- /* TODO: initialize TC ports as well */
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ intel_ddi_init(dev_priv, PORT_G);
+ intel_ddi_init(dev_priv, PORT_H);
+ intel_ddi_init(dev_priv, PORT_I);
icl_dsi_init(dev_priv);
} else if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
@@ -15526,9 +16159,10 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_psr_init(dev_priv);
for_each_intel_encoder(&dev_priv->drm, encoder) {
- encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_crtcs =
+ intel_encoder_possible_crtcs(encoder);
encoder->base.possible_clones =
- intel_encoder_clones(encoder);
+ intel_encoder_possible_clones(encoder);
}
intel_init_pch_refclk(dev_priv);
@@ -15783,8 +16417,14 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
- if (INTEL_GEN(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ /* Transcoder timing limits */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ hdisplay_max = 16384;
+ vdisplay_max = 8192;
+ htotal_max = 16384;
+ vtotal_max = 8192;
+ } else if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
@@ -15813,6 +16453,56 @@ intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ if (mode->hdisplay < 64 ||
+ mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 5)
+ return MODE_V_ILLEGAL;
+ } else {
+ if (mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 3)
+ return MODE_V_ILLEGAL;
+ }
+
+ return MODE_OK;
+}
+
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode)
+{
+ int plane_width_max, plane_height_max;
+
+ /*
+ * intel_mode_valid() should be
+ * sufficient on older platforms.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ return MODE_OK;
+
+ /*
+ * Most people will probably want a fullscreen
+ * plane so let's not advertize modes that are
+ * too big for that.
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ plane_width_max = 5120;
+ plane_height_max = 4320;
+ } else {
+ plane_width_max = 5120;
+ plane_height_max = 4096;
+ }
+
+ if (mode->hdisplay > plane_width_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > plane_height_max)
+ return MODE_V_ILLEGAL;
+
return MODE_OK;
}
@@ -15916,47 +16606,17 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
}
if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.update_crtcs = skl_update_crtcs;
- else
- dev_priv->display.update_crtcs = intel_update_crtcs;
-}
-
-static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return VLV_VGACNTRL;
- else if (INTEL_GEN(dev_priv) >= 5)
- return CPU_VGACNTRL;
+ dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
else
- return VGACNTRL;
-}
-
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
- /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(SR01, VGA_SR_INDEX);
- sr1 = inb(VGA_SR_DATA);
- outb(sr1 | 1<<5, VGA_SR_DATA);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
+ dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
}
-void intel_modeset_init_hw(struct drm_device *dev)
+void intel_modeset_init_hw(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
- dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
+ intel_update_cdclk(i915);
+ intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
+ i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
}
/*
@@ -16116,114 +16776,111 @@ out:
return ret;
}
-int intel_modeset_init(struct drm_device *dev)
+static void intel_mode_config_init(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe;
- struct intel_crtc *crtc;
- int ret;
-
- dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
-
- drm_mode_config_init(dev);
-
- ret = intel_bw_init(dev_priv);
- if (ret)
- return ret;
-
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.prefer_shadow = 1;
+ struct drm_mode_config *mode_config = &i915->drm.mode_config;
- dev->mode_config.allow_fb_modifiers = true;
+ drm_mode_config_init(&i915->drm);
- dev->mode_config.funcs = &intel_mode_funcs;
+ mode_config->min_width = 0;
+ mode_config->min_height = 0;
- init_llist_head(&dev_priv->atomic_helper.free_list);
- INIT_WORK(&dev_priv->atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
-
- intel_init_quirks(dev_priv);
+ mode_config->preferred_depth = 24;
+ mode_config->prefer_shadow = 1;
- intel_fbc_init(dev_priv);
+ mode_config->allow_fb_modifiers = true;
- intel_init_pm(dev_priv);
-
- /*
- * There may be no VBT; and if the BIOS enabled SSC we can
- * just keep using it to avoid unnecessary flicker. Whereas if the
- * BIOS isn't using it, don't assume it will work even if the VBT
- * indicates as much.
- */
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
- DREF_SSC1_ENABLE);
-
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
- bios_lvds_use_ssc ? "en" : "dis",
- dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
- dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
- }
- }
+ mode_config->funcs = &intel_mode_funcs;
/*
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (INTEL_GEN(dev_priv) >= 7) {
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_GEN(dev_priv, 3)) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
+ if (INTEL_GEN(i915) >= 7) {
+ mode_config->max_width = 16384;
+ mode_config->max_height = 16384;
+ } else if (INTEL_GEN(i915) >= 4) {
+ mode_config->max_width = 8192;
+ mode_config->max_height = 8192;
+ } else if (IS_GEN(i915, 3)) {
+ mode_config->max_width = 4096;
+ mode_config->max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ mode_config->max_width = 2048;
+ mode_config->max_height = 2048;
}
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
- dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN(dev_priv, 2)) {
- dev->mode_config.cursor_width = 64;
- dev->mode_config.cursor_height = 64;
+ if (IS_I845G(i915) || IS_I865G(i915)) {
+ mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ mode_config->cursor_height = 1023;
+ } else if (IS_GEN(i915, 2)) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
} else {
- dev->mode_config.cursor_width = 256;
- dev->mode_config.cursor_height = 256;
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
}
+}
- DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev_priv)->num_pipes,
- INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
- for_each_pipe(dev_priv, pipe) {
- ret = intel_crtc_init(dev_priv, pipe);
- if (ret) {
- drm_mode_config_cleanup(dev);
- return ret;
+ i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+
+ intel_mode_config_init(i915);
+
+ ret = intel_bw_init(i915);
+ if (ret)
+ return ret;
+
+ init_llist_head(&i915->atomic_helper.free_list);
+ INIT_WORK(&i915->atomic_helper.free_work,
+ intel_atomic_helper_free_state_worker);
+
+ intel_init_quirks(i915);
+
+ intel_fbc_init(i915);
+
+ intel_init_pm(i915);
+
+ intel_panel_sanitize_ssc(i915);
+
+ intel_gmbus_setup(i915);
+
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ for_each_pipe(i915, pipe) {
+ ret = intel_crtc_init(i915, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
}
}
intel_shared_dpll_init(dev);
- intel_update_fdi_pll_freq(dev_priv);
+ intel_update_fdi_pll_freq(i915);
- intel_update_czclk(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_update_czclk(i915);
+ intel_modeset_init_hw(i915);
- intel_hdcp_component_init(dev_priv);
+ intel_hdcp_component_init(i915);
- if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev_priv);
+ if (i915->max_cdclk_freq == 0)
+ intel_update_max_cdclk(i915);
/* Just disable it once at startup */
- i915_disable_vga(dev_priv);
- intel_setup_outputs(dev_priv);
+ intel_vga_disable(i915);
+ intel_setup_outputs(i915);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
@@ -16242,8 +16899,7 @@ int intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- dev_priv->display.get_initial_plane_config(crtc,
- &plane_config);
+ i915->display.get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -16257,7 +16913,7 @@ int intel_modeset_init(struct drm_device *dev)
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(i915))
sanitize_watermarks(dev);
/*
@@ -16582,39 +17238,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
icl_sanitize_encoder_pll_mapping(encoder);
}
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
-{
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev_priv);
- }
-}
-
-void i915_redisable_vga(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- i915_redisable_vga_power_on(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
-}
-
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct drm_i915_private *dev_priv)
{
@@ -16658,7 +17281,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct drm_connector_list_iter conn_iter;
int i;
- dev_priv->active_crtcs = 0;
+ dev_priv->active_pipes = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -16675,7 +17298,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = crtc_state->base.active;
if (crtc_state->base.active)
- dev_priv->active_crtcs |= 1 << crtc->pipe;
+ dev_priv->active_pipes |= BIT(crtc->pipe);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
@@ -16735,24 +17358,28 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
connector->base.dpms = DRM_MODE_DPMS_ON;
encoder = connector->encoder;
connector->base.encoder = &encoder->base;
- if (encoder->base.crtc &&
- encoder->base.crtc->state->active) {
+ crtc = to_intel_crtc(encoder->base.crtc);
+ crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
+
+ if (crtc_state && crtc_state->base.active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
- encoder->base.crtc->state->connector_mask |=
+ crtc_state->base.connector_mask |=
drm_connector_mask(&connector->base);
- encoder->base.crtc->state->encoder_mask |=
+ crtc_state->base.encoder_mask |=
drm_encoder_mask(&encoder->base);
}
-
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
@@ -16771,13 +17398,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane *plane;
int min_cdclk = 0;
- memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc_state->base.active) {
- intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
- crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
- crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
- intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+ struct drm_display_mode mode;
+
+ intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode,
+ crtc_state);
+
+ mode = crtc_state->base.adjusted_mode;
+ mode.hdisplay = crtc_state->pipe_src_w;
+ mode.vdisplay = crtc_state->pipe_src_h;
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode));
/*
* The initial mode needs to be set in order to keep
@@ -16792,21 +17422,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_crtc_compute_pixel_rate(crtc_state);
- if (dev_priv->display.modeset_calc_cdclk) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
- min_cdclk = 0;
- }
-
- drm_calc_timestamping_constants(&crtc->base,
- &crtc_state->base.adjusted_mode);
- update_scanline_offset(crtc_state);
+ intel_crtc_update_active_timings(crtc_state);
}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
- dev_priv->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -16818,8 +17436,34 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (plane_state->base.visible)
crtc_state->data_rate[plane->id] =
4 * crtc_state->pixel_rate;
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use plane->min_cdclk() :(
+ */
+ if (plane_state->base.visible && plane->min_cdclk) {
+ if (crtc_state->double_wide ||
+ INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ crtc_state->min_cdclk[plane->id] =
+ DIV_ROUND_UP(crtc_state->pixel_rate, 2);
+ else
+ crtc_state->min_cdclk[plane->id] =
+ crtc_state->pixel_rate;
+ }
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
+ }
+
+ if (crtc_state->base.active) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (WARN_ON(min_cdclk < 0))
+ min_cdclk = 0;
}
+ dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+ dev_priv->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
+
intel_bw_crtc_update(bw_state, crtc_state);
intel_pipe_config_sanity_check(dev_priv, crtc_state);
@@ -17060,13 +17704,13 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-static void intel_hpd_poll_fini(struct drm_device *dev)
+static void intel_hpd_poll_fini(struct drm_i915_private *i915)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
@@ -17078,78 +17722,49 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_modeset_driver_remove(struct drm_device *dev)
+void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_workqueue(i915->flip_wq);
+ flush_workqueue(i915->modeset_wq);
- flush_workqueue(dev_priv->modeset_wq);
-
- flush_work(&dev_priv->atomic_helper.free_work);
- WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+ flush_work(&i915->atomic_helper.free_work);
+ WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
- intel_irq_uninstall(dev_priv);
+ intel_irq_uninstall(i915);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(dev);
+ intel_hpd_poll_fini(i915);
/* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(dev_priv);
+ intel_fbdev_fini(i915);
intel_unregister_dsm_handler();
- intel_fbc_global_disable(dev_priv);
+ intel_fbc_global_disable(i915);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- intel_hdcp_component_fini(dev_priv);
-
- drm_mode_config_cleanup(dev);
+ intel_hdcp_component_fini(i915);
- intel_overlay_cleanup(dev_priv);
+ drm_mode_config_cleanup(&i915->drm);
- intel_gmbus_teardown(dev_priv);
+ intel_overlay_cleanup(i915);
- destroy_workqueue(dev_priv->modeset_wq);
+ intel_gmbus_teardown(i915);
- intel_fbc_cleanup_cfb(dev_priv);
-}
+ destroy_workqueue(i915->flip_wq);
+ destroy_workqueue(i915->modeset_wq);
-/*
- * set vga decode state - true == enable VGA decode
- */
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
-{
- unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
-
- if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
- DRM_ERROR("failed to read control word\n");
- return -EIO;
- }
-
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
- return 0;
-
- if (state)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
- else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
- if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
- DRM_ERROR("failed to write control word\n");
- return -EIO;
- }
-
- return 0;
+ intel_fbc_cleanup_cfb(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -17212,7 +17827,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -17291,7 +17906,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+ err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 01fa87ad3270..f417e0948001 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006-2017 Intel Corporation
+ * Copyright © 2006-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,8 +32,10 @@ enum link_m_n_set;
struct dpll;
struct drm_connector;
struct drm_device;
+struct drm_display_mode;
struct drm_encoder;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
@@ -52,6 +54,7 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
+struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -91,6 +94,7 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
+ INVALID_TRANSCODER = -1,
/*
* The following transcoders have a 1:1 transcoder -> pipe mapping,
* keep their values fixed: the code assumes that TRANSCODER_A=0, the
@@ -182,6 +186,24 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
@@ -251,6 +273,7 @@ enum aux_ch {
AUX_CH_D,
AUX_CH_E, /* ICL+ */
AUX_CH_F,
+ AUX_CH_G,
};
#define aux_ch_name(a) ((a) + 'A')
@@ -289,10 +312,10 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -330,7 +353,7 @@ enum phy_fia {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- drm_plane_mask(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -411,6 +434,23 @@ enum phy_fia {
(__i)++) \
for_each_if(crtc)
+#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
+#define intel_atomic_crtc_state_for_each_plane_state( \
+ plane, plane_state, \
+ crtc_state) \
+ for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \
+ ((crtc_state)->base.plane_mask)) \
+ for_each_if ((plane_state = \
+ to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base))))
+
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
@@ -420,7 +460,11 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -464,7 +508,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -499,8 +542,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
@@ -520,8 +561,6 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
@@ -544,13 +583,10 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
/* modesetting */
-void intel_modeset_init_hw(struct drm_device *dev);
-int intel_modeset_init(struct drm_device *dev);
-void intel_modeset_driver_remove(struct drm_device *dev);
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
+void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init(struct drm_i915_private *i915);
+void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
-void i915_redisable_vga(struct drm_i915_private *dev_priv);
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
/* modesetting asserts */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 12099760d99e..ce1b64f4dd44 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,8 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include <linux/vgaarb.h>
-
#include "display/intel_crt.h"
#include "display/intel_dp.h"
@@ -19,16 +17,14 @@
#include "intel_hotplug.h"
#include "intel_sideband.h"
#include "intel_tc.h"
+#include "intel_vga.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
const char *
-intel_display_power_domain_str(struct drm_i915_private *i915,
- enum intel_display_power_domain domain)
+intel_display_power_domain_str(enum intel_display_power_domain domain)
{
- bool ddi_tc_ports = IS_GEN(i915, 12);
-
switch (domain) {
case POWER_DOMAIN_DISPLAY_CORE:
return "DISPLAY_CORE";
@@ -71,23 +67,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_D_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
- POWER_DOMAIN_PORT_DDI_TC1_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
+ return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
- POWER_DOMAIN_PORT_DDI_TC2_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
+ return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_F_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
- POWER_DOMAIN_PORT_DDI_TC3_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
- case POWER_DOMAIN_PORT_DDI_TC4_LANES:
- return "PORT_DDI_TC4_LANES";
- case POWER_DOMAIN_PORT_DDI_TC5_LANES:
- return "PORT_DDI_TC5_LANES";
- case POWER_DOMAIN_PORT_DDI_TC6_LANES:
- return "PORT_DDI_TC6_LANES";
+ return "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_G_LANES:
+ return "PORT_DDI_G_LANES";
+ case POWER_DOMAIN_PORT_DDI_H_LANES:
+ return "PORT_DDI_H_LANES";
+ case POWER_DOMAIN_PORT_DDI_I_LANES:
+ return "PORT_DDI_I_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@@ -95,23 +85,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_IO:
return "PORT_DDI_C_IO";
case POWER_DOMAIN_PORT_DDI_D_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
- POWER_DOMAIN_PORT_DDI_TC1_IO);
- return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
+ return "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
- POWER_DOMAIN_PORT_DDI_TC2_IO);
- return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
+ return "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DDI_F_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
- POWER_DOMAIN_PORT_DDI_TC3_IO);
- return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
- case POWER_DOMAIN_PORT_DDI_TC4_IO:
- return "PORT_DDI_TC4_IO";
- case POWER_DOMAIN_PORT_DDI_TC5_IO:
- return "PORT_DDI_TC5_IO";
- case POWER_DOMAIN_PORT_DDI_TC6_IO:
- return "PORT_DDI_TC6_IO";
+ return "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DDI_G_IO:
+ return "PORT_DDI_G_IO";
+ case POWER_DOMAIN_PORT_DDI_H_IO:
+ return "PORT_DDI_H_IO";
+ case POWER_DOMAIN_PORT_DDI_I_IO:
+ return "PORT_DDI_I_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -129,34 +113,33 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
- return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
+ return "AUX_D";
case POWER_DOMAIN_AUX_E:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
- return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
+ return "AUX_E";
case POWER_DOMAIN_AUX_F:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
- return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
- case POWER_DOMAIN_AUX_TC4:
- return "AUX_TC4";
- case POWER_DOMAIN_AUX_TC5:
- return "AUX_TC5";
- case POWER_DOMAIN_AUX_TC6:
- return "AUX_TC6";
+ return "AUX_F";
+ case POWER_DOMAIN_AUX_G:
+ return "AUX_G";
+ case POWER_DOMAIN_AUX_H:
+ return "AUX_H";
+ case POWER_DOMAIN_AUX_I:
+ return "AUX_I";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
- case POWER_DOMAIN_AUX_TBT1:
- return "AUX_TBT1";
- case POWER_DOMAIN_AUX_TBT2:
- return "AUX_TBT2";
- case POWER_DOMAIN_AUX_TBT3:
- return "AUX_TBT3";
- case POWER_DOMAIN_AUX_TBT4:
- return "AUX_TBT4";
- case POWER_DOMAIN_AUX_TBT5:
- return "AUX_TBT5";
- case POWER_DOMAIN_AUX_TBT6:
- return "AUX_TBT6";
+ case POWER_DOMAIN_AUX_C_TBT:
+ return "AUX_C_TBT";
+ case POWER_DOMAIN_AUX_D_TBT:
+ return "AUX_D_TBT";
+ case POWER_DOMAIN_AUX_E_TBT:
+ return "AUX_E_TBT";
+ case POWER_DOMAIN_AUX_F_TBT:
+ return "AUX_F_TBT";
+ case POWER_DOMAIN_AUX_G_TBT:
+ return "AUX_G_TBT";
+ case POWER_DOMAIN_AUX_H_TBT:
+ return "AUX_H_TBT";
+ case POWER_DOMAIN_AUX_I_TBT:
+ return "AUX_I_TBT";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -283,23 +266,8 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 irq_pipe_mask, bool has_vga)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- if (has_vga) {
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- }
+ if (has_vga)
+ intel_vga_reset_io_mem(dev_priv);
if (irq_pipe_mask)
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
@@ -578,6 +546,8 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#endif
+#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@@ -594,6 +564,17 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
+
+ if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
+ enum tc_port tc_port;
+
+ tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ DRM_WARN("Timeout waiting TC uC health\n");
+ }
}
static void
@@ -714,7 +695,11 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (INTEL_GEN(dev_priv) >= 11)
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
+ | DC_STATE_EN_DC9;
+ else if (IS_GEN(dev_priv, 11))
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
@@ -784,6 +769,52 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
dev_priv->csr.dc_state = val & mask;
}
+static u32
+sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 target_dc_state)
+{
+ u32 states[] = {
+ DC_STATE_EN_UPTO_DC6,
+ DC_STATE_EN_UPTO_DC5,
+ DC_STATE_EN_DC3CO,
+ DC_STATE_DISABLE,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ if (target_dc_state != states[i])
+ continue;
+
+ if (dev_priv->csr.allowed_dc_mask & target_dc_state)
+ break;
+
+ target_dc_state = states[i + 1];
+ }
+
+ return target_dc_state;
+}
+
+static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ DRM_DEBUG_KMS("Enabling DC3CO\n");
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Disabling DC3CO\n");
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_DC3CO_STATUS;
+ I915_WRITE(DC_STATE_EN, val);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /*
+ * Delay of 200us DC3CO Exit time B.Spec 49196
+ */
+ usleep_range(200, 210);
+}
+
static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
@@ -839,6 +870,51 @@ lookup_power_well(struct drm_i915_private *dev_priv,
return &dev_priv->power_domains.power_wells[0];
}
+/**
+ * intel_display_power_set_target_dc_state - Set target dc state.
+ * @dev_priv: i915 device
+ * @state: state which needs to be set as target_dc_state.
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ struct i915_power_well *power_well;
+ bool dc_off_enabled;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+
+ if (WARN_ON(!power_well))
+ goto unlock;
+
+ state = sanitize_target_dc_state(dev_priv, state);
+
+ if (state == dev_priv->csr.target_dc_state)
+ goto unlock;
+
+ dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
+ power_well);
+ /*
+ * If DC off power well is disabled, need to enable and disable the
+ * DC off power well to effect target DC state.
+ */
+ if (!dc_off_enabled)
+ power_well->desc->ops->enable(dev_priv, power_well);
+
+ dev_priv->csr.target_dc_state = state;
+
+ if (!dc_off_enabled)
+ power_well->desc->ops->disable(dev_priv, power_well);
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+}
+
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
@@ -951,7 +1027,8 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
+ return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
@@ -967,6 +1044,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = {};
+ if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
+ tgl_disable_dc3co(dev_priv);
+ return;
+ }
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
@@ -999,10 +1081,17 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
if (!dev_priv->csr.dmc_payload)
return;
- if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+ switch (dev_priv->csr.target_dc_state) {
+ case DC_STATE_EN_DC3CO:
+ tgl_enable_dc3co(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC6:
skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+ break;
+ case DC_STATE_EN_UPTO_DC5:
gen9_enable_dc5(dev_priv);
+ break;
+ }
}
static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
@@ -1208,7 +1297,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(dev_priv);
+ intel_vga_redisable_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@@ -1718,15 +1807,12 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
- struct drm_i915_private *i915 =
- container_of(power_domains, struct drm_i915_private,
- power_domains);
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(i915, domain),
+ intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
@@ -1896,7 +1982,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
- const char *name = intel_display_power_domain_str(dev_priv, domain);
+ const char *name = intel_display_power_domain_str(domain);
power_domains = &dev_priv->power_domains;
@@ -2487,10 +2573,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2530,22 +2616,22 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A))
#define ICL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1))
-#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2))
-#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3))
-#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
+#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
#define TGL_PW_5_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_D) | \
@@ -2565,24 +2651,24 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2596,37 +2682,54 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
TGL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
-#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
-#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
-#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
-#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
-#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
-#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
-
-#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC1))
-#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC2))
-#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC3))
-#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC4))
-#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC5))
-#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC6))
-#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5))
-#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6))
+#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
+#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
+#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
+
+#define TGL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define TGL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define TGL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G))
+#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H))
+#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I))
+#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
+#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
+#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
+#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@@ -2938,7 +3041,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3020,7 +3123,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3080,7 +3183,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
.name = "DC off",
.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3249,7 +3352,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
.name = "DC off",
.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3377,7 +3480,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3484,8 +3587,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .name = "AUX C TC1",
+ .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3495,8 +3598,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX D",
- .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .name = "AUX D TC2",
+ .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3506,8 +3609,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX E",
- .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .name = "AUX E TC3",
+ .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3517,8 +3620,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX F",
- .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .name = "AUX F TC4",
+ .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3528,8 +3631,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX C TBT1",
+ .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3539,8 +3642,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX D TBT2",
+ .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3550,8 +3653,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX E TBT3",
+ .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3561,8 +3664,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX F TBT4",
+ .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3610,7 +3713,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "DC off",
.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3667,8 +3770,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
}
},
{
- .name = "DDI TC1 IO",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
+ .name = "DDI D TC1 IO",
+ .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3677,8 +3780,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC2 IO",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
+ .name = "DDI E TC2 IO",
+ .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3687,8 +3790,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC3 IO",
- .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
+ .name = "DDI F TC3 IO",
+ .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3697,8 +3800,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC4 IO",
- .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
+ .name = "DDI G TC4 IO",
+ .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3707,8 +3810,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC5 IO",
- .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
+ .name = "DDI H TC5 IO",
+ .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3717,8 +3820,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC6 IO",
- .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
+ .name = "DDI I TC6 IO",
+ .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3728,7 +3831,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3738,7 +3841,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3748,7 +3851,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_C_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3757,8 +3860,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC1",
- .domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
+ .name = "AUX D TC1",
+ .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3768,8 +3871,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC2",
- .domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
+ .name = "AUX E TC2",
+ .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3779,8 +3882,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC3",
- .domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
+ .name = "AUX F TC3",
+ .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3790,8 +3893,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC4",
- .domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
+ .name = "AUX G TC4",
+ .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3801,8 +3904,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC5",
- .domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
+ .name = "AUX H TC5",
+ .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3812,8 +3915,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC6",
- .domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
+ .name = "AUX I TC6",
+ .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3823,8 +3926,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX D TBT1",
+ .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3834,8 +3937,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX E TBT2",
+ .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3845,8 +3948,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX F TBT3",
+ .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3856,8 +3959,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX G TBT4",
+ .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3867,8 +3970,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT5",
- .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
+ .name = "AUX H TBT5",
+ .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3878,8 +3981,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT6",
- .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
+ .name = "AUX I TBT6",
+ .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3931,14 +4034,17 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (INTEL_GEN(dev_priv) >= 11) {
- max_dc = 2;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ max_dc = 4;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN(dev_priv, 11)) {
+ max_dc = 2;
+ mask = DC_STATE_EN_DC9;
} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
@@ -3957,7 +4063,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = enable_dc;
} else if (enable_dc == -1) {
requested_dc = max_dc;
- } else if (enable_dc > max_dc && enable_dc <= 2) {
+ } else if (enable_dc > max_dc && enable_dc <= 4) {
DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
@@ -3966,10 +4072,20 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = max_dc;
}
- if (requested_dc > 1)
+ switch (requested_dc) {
+ case 4:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
+ break;
+ case 3:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
+ break;
+ case 2:
mask |= DC_STATE_EN_UPTO_DC6;
- if (requested_dc > 0)
+ break;
+ case 1:
mask |= DC_STATE_EN_UPTO_DC5;
+ break;
+ }
DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
@@ -4030,6 +4146,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->csr.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+ dev_priv->csr.target_dc_state =
+ sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
mutex_init(&power_domains->lock);
@@ -4896,6 +5015,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
+ /* Must happen before power domain init on VLV/CHV */
+ intel_update_rawclk(i915);
+
if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
@@ -5104,8 +5226,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_domain(domain, power_well->desc->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(i915,
- domain),
+ intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index a50605b8b1ad..1da04f3e0fb3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -36,29 +36,20 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_TC4_LANES,
- POWER_DOMAIN_PORT_DDI_TC5_LANES,
- POWER_DOMAIN_PORT_DDI_TC6_LANES,
+ POWER_DOMAIN_PORT_DDI_G_LANES,
+ POWER_DOMAIN_PORT_DDI_H_LANES,
+ POWER_DOMAIN_PORT_DDI_I_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
- POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DDI_G_IO,
- POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_H_IO,
- POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_I_IO,
- POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -68,21 +59,19 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
- POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_TC4,
- POWER_DOMAIN_AUX_TC5,
- POWER_DOMAIN_AUX_TC6,
+ POWER_DOMAIN_AUX_G,
+ POWER_DOMAIN_AUX_H,
+ POWER_DOMAIN_AUX_I,
POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_TBT1,
- POWER_DOMAIN_AUX_TBT2,
- POWER_DOMAIN_AUX_TBT3,
- POWER_DOMAIN_AUX_TBT4,
- POWER_DOMAIN_AUX_TBT5,
- POWER_DOMAIN_AUX_TBT6,
+ POWER_DOMAIN_AUX_C_TBT,
+ POWER_DOMAIN_AUX_D_TBT,
+ POWER_DOMAIN_AUX_E_TBT,
+ POWER_DOMAIN_AUX_F_TBT,
+ POWER_DOMAIN_AUX_G_TBT,
+ POWER_DOMAIN_AUX_H_TBT,
+ POWER_DOMAIN_AUX_I_TBT,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
@@ -111,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ SKL_DISP_DC_OFF,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
@@ -267,10 +257,11 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915);
void intel_display_power_resume_early(struct drm_i915_private *i915);
void intel_display_power_suspend(struct drm_i915_private *i915);
void intel_display_power_resume(struct drm_i915_private *i915);
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state);
const char *
-intel_display_power_domain_str(struct drm_i915_private *i915,
- enum intel_display_power_domain domain);
+intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 449abaea619f..1a7334dbe802 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -128,7 +128,8 @@ struct intel_encoder {
enum intel_output_type type;
enum port port;
- unsigned int cloneable;
+ u16 cloneable;
+ u8 pipe_mask;
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received);
@@ -187,7 +188,6 @@ struct intel_encoder {
* device interrupts are disabled.
*/
void (*suspend)(struct intel_encoder *);
- int crtc_mask;
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -388,6 +388,13 @@ struct intel_hdcp {
wait_queue_head_t cp_irq_queue;
atomic_t cp_irq_count;
int cp_irq_count_cached;
+
+ /*
+ * HDCP register access for gen12+ need the transcoder associated.
+ * Transcoder attached to the connector could be changed at modeset.
+ * Hence caching the transcoder here.
+ */
+ enum transcoder cpu_transcoder;
};
struct intel_connector {
@@ -481,9 +488,9 @@ struct intel_atomic_state {
* but the converse is not necessarily true; simply changing a mode may
* not flip the final active status of any CRTC's
*/
- unsigned int active_pipe_changes;
+ u8 active_pipe_changes;
- unsigned int active_crtcs;
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -499,6 +506,14 @@ struct intel_atomic_state {
bool rps_interactive;
+ /*
+ * active_pipes
+ * min_cdclk[]
+ * min_voltage_level[]
+ * cdclk.*
+ */
+ bool global_state_changed;
+
/* Gen9+ only */
struct skl_ddb_values wm_results;
@@ -552,24 +567,24 @@ struct intel_plane_state {
int scaler_id;
/*
- * linked_plane:
+ * planar_linked_plane:
*
* ICL planar formats require 2 planes that are updated as pairs.
* This member is used to make sure the other plane is also updated
* when required, and for update_slave() to find the correct
* plane_state to pass as argument.
*/
- struct intel_plane *linked_plane;
+ struct intel_plane *planar_linked_plane;
/*
- * slave:
+ * planar_slave:
* If set don't update use the linked plane's state for updating
* this plane during atomic commit with the update_slave() callback.
*
* It's also used by the watermark code to ignore wm calculations on
* this plane. They're calculated by the linked plane's wm code.
*/
- u32 slave;
+ u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
};
@@ -759,8 +774,8 @@ struct intel_crtc_state {
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
- bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
+ bool preload_luts;
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
@@ -864,6 +879,7 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
+ u32 dc3co_exitline;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -925,6 +941,8 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
+ int min_cdclk[I915_MAX_PLANES];
+
u32 data_rate[I915_MAX_PLANES];
/* Gamma mode programmed on the pipe */
@@ -979,11 +997,17 @@ struct intel_crtc_state {
bool dsc_split;
u16 compressed_bpp;
u8 slice_count;
- } dsc_params;
- struct drm_dsc_config dp_dsc_cfg;
+ struct drm_dsc_config config;
+ } dsc;
/* Forward Error correction State */
bool fec_enable;
+
+ /* Pointer to master transcoder in case of tiled displays */
+ enum transcoder master_transcoder;
+
+ /* Bitmask to indicate slaves attached */
+ u8 sync_mode_slaves_mask;
};
struct intel_crtc {
@@ -1026,6 +1050,9 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
+
+ /* per pipe DSB related info */
+ struct intel_dsb dsb;
};
struct intel_plane {
@@ -1061,6 +1088,8 @@ struct intel_plane {
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+ int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
};
struct intel_watermark_params {
@@ -1176,6 +1205,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
+ u32 aux_busy_last_status;
u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -1211,6 +1241,15 @@ struct intel_dp {
bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
+
+ /*
+ * DP_TP_* registers may be either on port or transcoder register space.
+ */
+ struct {
+ i915_reg_t dp_tp_ctl;
+ i915_reg_t dp_tp_status;
+ } regs;
+
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@@ -1269,6 +1308,7 @@ struct intel_digital_port {
char tc_port_name[8];
enum tc_port_mode tc_mode;
enum phy_fia tc_phy_fia;
+ u8 tc_phy_fia_idx;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1509,7 +1549,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 9b15ac4f2fb6..c61ac0c3acb5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -68,11 +68,6 @@
#define DP_DPRX_ESI_LEN 14
-/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
-#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
-#define DP_DSC_MIN_SUPPORTED_BPC 8
-#define DP_DSC_MAX_SUPPORTED_BPC 10
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -500,7 +495,17 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
DP_DSC_FEC_OVERHEAD_FACTOR);
}
-static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
+static int
+small_joiner_ram_size_bits(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ return 7680 * 8;
+ else
+ return 6144 * 8;
+}
+
+static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay)
{
u32 bits_per_pixel, max_bpp_small_joiner_ram;
@@ -517,7 +522,8 @@ static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
+ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
+ mode_hdisplay;
DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
/*
@@ -585,6 +591,25 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
+static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+ int hdisplay)
+{
+ /*
+ * Older platforms don't like hdisplay==4096 with DP.
+ *
+ * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
+ * and frame counter increment), but we don't get vblank interrupts,
+ * and the pipe underruns immediately. The link also doesn't seem
+ * to get trained properly.
+ *
+ * On CHV the vblank interrupts don't seem to disappear but
+ * otherwise the symptoms are similar.
+ *
+ * TODO: confirm the behaviour on HSW+
+ */
+ return hdisplay == 4096 && !HAS_DDI(dev_priv);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -620,6 +645,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
+ if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
/*
* Output bpp is stored in 6.4 format so right shift by 4 to get the
* integer value since we support only integer values of bpp.
@@ -634,7 +662,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
true);
} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(max_link_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ max_link_clock,
max_lanes,
target_clock,
mode->hdisplay) >> 4;
@@ -655,7 +684,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
@@ -732,12 +761,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to port %c being active\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port)))
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
- DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -855,9 +886,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
- DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
+ DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe),
- port_name(intel_dig_port->base.port));
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -965,13 +997,16 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
- port_name(port));
+ DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return;
}
- DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
- port_name(port), pipe_name(intel_dp->pps_pipe));
+ DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -1144,18 +1179,20 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
u32 status;
bool done;
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(10));
+ msecs_to_jiffies_timeout(timeout_ms));
/* just trace the final value */
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (!done)
- DRM_ERROR("dp aux hw did not signal timeout!\n");
+ DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
@@ -1338,13 +1375,12 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- static u32 last_status = -1;
const u32 status = intel_uncore_read(uncore, ch_ctl);
- if (status != last_status) {
+ if (status != intel_dp->aux_busy_last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
status);
- last_status = status;
+ intel_dp->aux_busy_last_status = status;
}
ret = -EBUSY;
@@ -1636,6 +1672,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_CTL(aux_ch);
default:
MISSING_CASE(aux_ch);
@@ -1656,6 +1693,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_DATA(aux_ch, index);
default:
MISSING_CASE(aux_ch);
@@ -1834,8 +1872,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 11 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
+ /* On TGL, FEC is supported on all Pipes */
+ if (INTEL_GEN(dev_priv) >= 12)
+ return true;
+
+ if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
}
static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
@@ -1850,8 +1894,18 @@ static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 10 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
+ if (!INTEL_INFO(dev_priv)->display.has_dsc)
+ return false;
+
+ /* On TGL, DSC is supported on all Pipes */
+ if (INTEL_GEN(dev_priv) >= 12)
+ return true;
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
}
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
@@ -2010,11 +2064,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
- dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
- conn_state->max_requested_bpc);
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (INTEL_GEN(dev_priv) >= 12)
+ dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10,
+ conn_state->max_requested_bpc);
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
- if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+
+ /* Min Input BPC for ICL+ is 8 */
+ if (pipe_bpp < 8 * 3) {
DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
return -EINVAL;
}
@@ -2029,10 +2089,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->lane_count = limits->max_lane_count;
if (intel_dp_is_edp(intel_dp)) {
- pipe_config->dsc_params.compressed_bpp =
+ pipe_config->dsc.compressed_bpp =
min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count =
+ pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
} else {
@@ -2040,7 +2100,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
u8 dsc_dp_slice_count;
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ pipe_config->port_clock,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
@@ -2052,10 +2113,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
return -EINVAL;
}
- pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ pipe_config->dsc.compressed_bpp = min_t(u16,
dsc_max_output_bpp >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ pipe_config->dsc.slice_count = dsc_dp_slice_count;
}
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
@@ -2063,8 +2124,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* then we need to use 2 VDSC instances.
*/
if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
- if (pipe_config->dsc_params.slice_count > 1) {
- pipe_config->dsc_params.dsc_split = true;
+ if (pipe_config->dsc.slice_count > 1) {
+ pipe_config->dsc.dsc_split = true;
} else {
DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
@@ -2076,16 +2137,16 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
"Compressed BPP = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
return ret;
}
- pipe_config->dsc_params.compression_enable = true;
+ pipe_config->dsc.compression_enable = true;
DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
"Compressed Bpp = %d Slice Count = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp,
- pipe_config->dsc_params.slice_count);
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
return 0;
}
@@ -2159,15 +2220,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- if (pipe_config->dsc_params.compression_enable) {
+ if (pipe_config->dsc.compression_enable) {
DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
DRM_DEBUG_KMS("DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc_params.compressed_bpp),
+ pipe_config->dsc.compressed_bpp),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
@@ -2222,6 +2283,16 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
@@ -2259,6 +2330,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
else
@@ -2304,6 +2376,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
+ if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ return -EINVAL;
+
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
if (ret < 0)
return ret;
@@ -2311,8 +2386,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (pipe_config->dsc_params.compression_enable)
- output_bpp = pipe_config->dsc_params.compressed_bpp;
+ if (pipe_config->dsc.compression_enable)
+ output_bpp = pipe_config->dsc.compressed_bpp;
else
output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
@@ -2339,6 +2414,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config);
+ intel_hdcp_transcoder_config(intel_connector,
+ pipe_config->cpu_transcoder);
+
return 0;
}
@@ -2366,6 +2444,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
/*
* There are four kinds of DP registers:
*
@@ -2567,8 +2648,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
- DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2587,8 +2669,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2613,8 +2696,9 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_pps_lock(intel_dp, wakeref)
vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -2632,8 +2716,9 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2695,8 +2780,9 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!intel_dp_is_edp(intel_dp))
return;
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
intel_dp->want_panel_vdd = false;
@@ -2717,12 +2803,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
if (WARN(edp_have_panel_power(intel_dp),
- "eDP port %c panel power already on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port)))
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@@ -2777,11 +2865,11 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dig_port->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dig_port->base.port));
+ WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2926,8 +3014,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
- "DP port %c state assertion failure (expected %s, current %s)\n",
- port_name(dig_port->base.port),
+ "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -3023,7 +3111,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
{
int ret;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
@@ -3315,7 +3403,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- u32 temp = I915_READ(DP_TP_CTL(port));
+ u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3341,7 +3429,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
- I915_WRITE(DP_TP_CTL(port), temp);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3505,8 +3593,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
@@ -3522,17 +3611,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active (e)DP port %c\n",
- pipe_name(pipe), port_name(port));
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
- DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(port));
+ DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@@ -3574,8 +3664,9 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
- DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
- pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
+ DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+ encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -4039,22 +4130,22 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
- val = I915_READ(DP_TP_CTL(port));
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
/*
- * On PORT_A we can have only eDP in SST mode. There the only reason
- * we need to set idle transmission mode is to work around a HW issue
- * where we enable the pipe while not in idle link-training mode.
+ * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+ * reason we need to set idle transmission mode is to work around a HW
+ * issue where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
return;
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -4396,9 +4487,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
- DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
- port_name(encoder->port), yesno(intel_dp->can_mst),
- yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@@ -4418,9 +4510,36 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
+bool
+intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ /*
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut], in order to
+ * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ return true;
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static void
-intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct dp_sdp vsc_sdp = {};
@@ -4441,13 +4560,55 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
*/
vsc_sdp.sdp_header.HB3 = 0x13;
- /*
- * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
- * DB16[3:0] DP 1.4a spec, Table 2-120
- */
- vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
- /* RGB->YCBCR color conversion uses the BT.709 color space. */
- vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ /* RGB: DB16[7:4] = 0h */
+ break;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc_sdp.db[16] |= 0x1;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc_sdp.db[16] |= 0x2;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc_sdp.db[16] |= 0x3;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc_sdp.db[16] |= 0x4;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc_sdp.db[16] |= 0x5;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc_sdp.db[16] |= 0x6;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc_sdp.db[16] |= 0x7;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
+ break;
+ default:
+ /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
+
+ /* RGB->YCBCR color conversion uses the BT.709 color space. */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ break;
+ }
/*
* For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
@@ -4499,13 +4660,106 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
}
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void
+intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct dp_sdp infoframe_sdp = {};
+ struct hdmi_drm_infoframe drm_infoframe = {};
+ const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
+ unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
+ ssize_t len;
+ int ret;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
+ if (ret) {
+ DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
return;
+ }
- intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
+ len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
+ if (len < 0) {
+ DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
+ return;
+ }
+
+ if (len != infoframe_size) {
+ DRM_DEBUG_KMS("wrong static hdr metadata size\n");
+ return;
+ }
+
+ /*
+ * Set up the infoframe sdp packet for HDR static metadata.
+ * Prepare VSC Header for SU as per DP 1.4a spec,
+ * Table 2-100 and Table 2-101
+ */
+
+ /* Packet ID, 00h for non-Audio INFOFRAME */
+ infoframe_sdp.sdp_header.HB0 = 0;
+ /*
+ * Packet Type 80h + Non-audio INFOFRAME Type value
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87,
+ */
+ infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * infoframe_size - 1,
+ */
+ infoframe_sdp.sdp_header.HB2 = 0x1D;
+ /* INFOFRAME SDP Version Number */
+ infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ infoframe_sdp.db[0] = drm_infoframe.version;
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ infoframe_sdp.db[1] = drm_infoframe.length;
+ /*
+ * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
+ * HDMI_INFOFRAME_HEADER_SIZE
+ */
+ BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ /*
+ * Size of DP infoframe sdp packet for HDR static metadata is consist of
+ * - DP SDP Header(struct dp_sdp_header): 4 bytes
+ * - Two Data Blocks: 2 bytes
+ * CTA Header Byte2 (INFOFRAME Version Number)
+ * CTA Header Byte3 (Length of INFOFRAME)
+ * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
+ *
+ * Prior to GEN11's GMP register size is identical to DP HDR static metadata
+ * infoframe size. But GEN11+ has larger than that size, write_infoframe
+ * will pad rest of the size.
+ */
+ intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ &infoframe_sdp,
+ sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
+}
+
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
+}
+
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
+ crtc_state,
+ conn_state);
}
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
@@ -5227,6 +5481,9 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
{
enum port port = intel_dig_port->base.port;
+ if (HAS_PCH_MCC(dev_priv) && port == PORT_C)
+ return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
@@ -5506,7 +5763,6 @@ static int
intel_dp_connector_register(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = connector->dev;
int ret;
ret = intel_connector_register(connector);
@@ -5521,8 +5777,7 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_dp->aux.dev = connector->kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&intel_dp->aux, connector);
return ret;
}
@@ -6280,13 +6535,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* would end up in an endless cycle of
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
- DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
- DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
- port_name(intel_dig_port->base.port),
+ DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -6353,6 +6610,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
+ intel_attach_colorspace_property(connector);
+
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -7150,8 +7414,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_modeset_retry_work_fn);
if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7192,9 +7457,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
port != PORT_B && port != PORT_C))
return false;
- DRM_DEBUG_KMS("Adding %s connector on port %c\n",
- type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- port_name(port));
+ DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7218,11 +7483,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
- (port == PORT_B || port == PORT_C ||
- port == PORT_D || port == PORT_F))
- intel_dp_mst_encoder_init(intel_dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(intel_dig_port,
+ intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
@@ -7313,11 +7575,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->power_domain = intel_port_to_power_domain(port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
@@ -7378,7 +7640,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
+ true);
if (ret) {
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 00981fb9414b..3da166054788 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -13,6 +13,7 @@
#include "i915_reg.h"
enum pipe;
+enum port;
struct drm_connector_state;
struct drm_encoder;
struct drm_i915_private;
@@ -107,6 +108,14 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_digital_port_connected(struct intel_encoder *encoder);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 600873c796d0..03d1cba0b696 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -215,7 +215,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
- DRM_ERROR("failed to update payload %d\n", ret);
+ DRM_DEBUG_KMS("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
@@ -295,7 +295,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
@@ -326,12 +325,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = I915_READ(DP_TP_STATUS(port));
- I915_WRITE(DP_TP_STATUS(port), temp);
+ temp = I915_READ(intel_dp->regs.dp_tp_status);
+ I915_WRITE(intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
intel_ddi_enable_pipe_clock(pipe_config);
+
+ intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -342,11 +343,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
@@ -393,20 +393,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
-
- if (drm_connector_is_unregistered(connector))
- return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
- intel_connector->port);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -426,6 +413,7 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -453,7 +441,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode_rate > max_rate || mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@@ -466,11 +454,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
+static int
+intel_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ if (drm_connector_is_unregistered(connector))
+ return connector_status_disconnected;
+
+ return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
+ intel_connector->port);
+}
+
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
+ .detect_ctx = intel_dp_mst_detect,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
@@ -615,8 +618,16 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
+ /*
+ * This is wrong, but broken userspace uses the intersection
+ * of possible_crtcs of all the encoders of a given connector
+ * to figure out which crtcs can drive said connector. What
+ * should be used instead is the union of possible_crtcs.
+ * To keep such userspace functioning we must misconfigure
+ * this to make sure the intersection is not empty :(
+ */
+ intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
@@ -653,21 +664,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ enum port port = intel_dig_port->base.port;
int ret;
- intel_dp->can_mst = true;
+ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ return 0;
+
+ if (INTEL_GEN(i915) < 12 && port == PORT_A)
+ return 0;
+
+ if (INTEL_GEN(i915) < 11 && port == PORT_E)
+ return 0;
+
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
- if (ret) {
- intel_dp->can_mst = false;
+ if (ret)
return ret;
- }
+
+ intel_dp->can_mst = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index d5a298c3c83b..3ce0a023eee0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -247,8 +247,7 @@ static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
const struct intel_dpll_hw_state *pll_state,
- enum intel_dpll_id range_min,
- enum intel_dpll_id range_max)
+ unsigned long dpll_mask)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
@@ -257,7 +256,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- for (i = range_min; i <= range_max; i++) {
+ WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+
+ for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
@@ -464,8 +465,8 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
} else {
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B);
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
}
if (!pll)
@@ -829,7 +830,8 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
if (!pll)
return NULL;
@@ -892,7 +894,7 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SPLL, DPLL_ID_SPLL);
+ BIT(DPLL_ID_SPLL));
} else {
return false;
}
@@ -1462,13 +1464,13 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL0);
+ BIT(DPLL_ID_SKL_DPLL0));
else
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL1,
- DPLL_ID_SKL_DPLL3);
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
return false;
@@ -2416,8 +2418,9 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL2);
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1) |
+ BIT(DPLL_ID_SKL_DPLL0));
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
return false;
@@ -2535,6 +2538,18 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
+static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
+ .dco_integer = 0x54, .dco_fraction = 0x3000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
+ .dco_integer = 0x43, .dco_fraction = 0x4000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
@@ -2562,8 +2577,34 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = tgl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = tgl_tbt_pll_24MHz_values;
+ break;
+ }
+ } else {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = icl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = icl_tbt_pll_24MHz_values;
+ break;
+ }
+ }
+
return true;
}
@@ -2622,7 +2663,8 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
- struct intel_dpll_hw_state *state)
+ struct intel_dpll_hw_state *state,
+ bool is_dkl)
{
u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
@@ -2644,8 +2686,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
continue;
if (div2 >= 2) {
+ /*
+ * Note: a_divratio not matching TGL BSpec
+ * algorithm but matching hardcoded values and
+ * working on HW for DP alt-mode at least
+ */
a_divratio = is_dp ? 10 : 5;
- tlinedrv = 2;
+ tlinedrv = is_dkl ? 1 : 2;
} else {
a_divratio = 5;
tlinedrv = 0;
@@ -2708,11 +2755,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ bool is_dkl = INTEL_GEN(dev_priv) >= 12;
memset(pll_state, 0, sizeof(*pll_state));
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state)) {
+ pll_state, is_dkl)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
return false;
}
@@ -2720,8 +2768,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
m1div = 2;
m2div_int = dco_khz / (refclk_khz * m1div);
if (m2div_int > 255) {
- m1div = 4;
- m2div_int = dco_khz / (refclk_khz * m1div);
+ if (!is_dkl) {
+ m1div = 4;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ }
+
if (m2div_int > 255) {
DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
clock);
@@ -2801,60 +2852,94 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
ssc_steplog = 4;
- pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
- MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
- MG_PLL_DIV0_FBDIV_INT(m2div_int);
-
- pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
- MG_PLL_DIV1_DITHER_DIV_2 |
- MG_PLL_DIV1_NDIVRATIO(1) |
- MG_PLL_DIV1_FBPREDIV(m1div);
-
- pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
- MG_PLL_LF_AFCCNTSEL_512 |
- MG_PLL_LF_GAINCTRL(1) |
- MG_PLL_LF_INT_COEFF(int_coeff) |
- MG_PLL_LF_PROP_COEFF(prop_coeff);
-
- pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
- MG_PLL_FRAC_LOCK_DCODITHEREN |
- MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
- if (use_ssc || m2div_rem > 0)
- pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
-
- pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
- MG_PLL_SSC_TYPE(2) |
- MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
- MG_PLL_SSC_STEPNUM(ssc_steplog) |
- MG_PLL_SSC_FLLEN |
- MG_PLL_SSC_STEPSIZE(ssc_stepsize);
-
- pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
- MG_PLL_TDC_COLDST_IREFINT_EN |
- MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
- MG_PLL_TDC_TDCOVCCORR_EN |
- MG_PLL_TDC_TDCSEL(3);
-
- pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
- MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
- MG_PLL_BIAS_BIAS_BONUS(10) |
- MG_PLL_BIAS_BIASCAL_EN |
- MG_PLL_BIAS_CTRIM(12) |
- MG_PLL_BIAS_VREF_RDAC(4) |
- MG_PLL_BIAS_IREFTRIM(iref_trim);
-
- if (refclk_khz == 38400) {
- pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
- pll_state->mg_pll_bias_mask = 0;
+ /* write pll_state calculations */
+ if (is_dkl) {
+ pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
+ DKL_PLL_DIV0_FBPREDIV(m1div) |
+ DKL_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
+
+ pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
+ DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
+ (use_ssc ? DKL_PLL_SSC_EN : 0);
+
+ pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
+ DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
+
} else {
- pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
- pll_state->mg_pll_bias_mask = -1U;
- }
+ pll_state->mg_pll_div0 =
+ (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+ MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+ MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 =
+ MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+ MG_PLL_DIV1_DITHER_DIV_2 |
+ MG_PLL_DIV1_NDIVRATIO(1) |
+ MG_PLL_DIV1_FBPREDIV(m1div);
+
+ pll_state->mg_pll_lf =
+ MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+ MG_PLL_LF_AFCCNTSEL_512 |
+ MG_PLL_LF_GAINCTRL(1) |
+ MG_PLL_LF_INT_COEFF(int_coeff) |
+ MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+ pll_state->mg_pll_frac_lock =
+ MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+ MG_PLL_FRAC_LOCK_DCODITHEREN |
+ MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+ if (use_ssc || m2div_rem > 0)
+ pll_state->mg_pll_frac_lock |=
+ MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+ pll_state->mg_pll_ssc =
+ (use_ssc ? MG_PLL_SSC_EN : 0) |
+ MG_PLL_SSC_TYPE(2) |
+ MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+ MG_PLL_SSC_STEPNUM(ssc_steplog) |
+ MG_PLL_SSC_FLLEN |
+ MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias =
+ MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask =
+ MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
+ }
- pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
- pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ pll_state->mg_pll_tdc_coldst_bias &=
+ pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ }
return true;
}
@@ -2908,7 +2993,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
- bool has_dpll4 = false;
+ unsigned long dpll_mask;
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
@@ -2917,16 +3002,19 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
}
if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
- has_dpll4 = true;
+ dpll_mask =
+ BIT(DPLL_ID_EHL_DPLL4) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ else
+ dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- DPLL_ID_ICL_DPLL0,
- has_dpll4 ? DPLL_ID_EHL_DPLL4
- : DPLL_ID_ICL_DPLL1);
+ dpll_mask);
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
- port_name(encoder->port));
+ DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name);
return false;
}
@@ -2956,8 +3044,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- DPLL_ID_ICL_TBTPLL,
- DPLL_ID_ICL_TBTPLL);
+ BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
return false;
@@ -2976,8 +3063,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
encoder->port));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- dpll_id,
- dpll_id);
+ BIT(dpll_id));
if (!port_dpll->pll) {
DRM_DEBUG_KMS("No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
@@ -3101,6 +3187,78 @@ out:
return ret;
}
+static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = I915_READ(MG_PLL_ENABLE(tc_port));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ /*
+ * All registers read here have the same HIP_INDEX_REG even though
+ * they are on different building blocks
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+
+ hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+
+ hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+
+ hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ return ret;
+}
+
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state,
@@ -3235,6 +3393,75 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
+static void dkl_pll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
+ u32 val;
+
+ /*
+ * All registers programmed here have the same HIP_INDEX_REG even
+ * though on different building block
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV0(tc_port));
+ val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+ val |= hw_state->mg_pll_div0;
+ I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV1(tc_port));
+ val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+ val |= hw_state->mg_pll_div1;
+ I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+
+ val = I915_READ(DKL_PLL_SSC(tc_port));
+ val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+ val |= hw_state->mg_pll_ssc;
+ I915_WRITE(DKL_PLL_SSC(tc_port), val);
+
+ val = I915_READ(DKL_PLL_BIAS(tc_port));
+ val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+ val |= hw_state->mg_pll_bias;
+ I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+
+ val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+
+ POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+}
+
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
@@ -3327,7 +3554,10 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv,
icl_pll_power_enable(dev_priv, pll, enable_reg);
- icl_mg_pll_write(dev_priv, pll);
+ if (INTEL_GEN(dev_priv) >= 12)
+ dkl_pll_write(dev_priv, pll);
+ else
+ icl_mg_pll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3482,11 +3712,22 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = dkl_pll_get_hw_state,
+};
+
static const struct dpll_info tgl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
{ "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- /* TODO: Add typeC plls */
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
+ { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
{ },
};
@@ -3494,6 +3735,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.dpll_info = tgl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 104cf6d42333..2a104c64291d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -337,6 +337,11 @@ struct intel_shared_dpll {
* @info: platform specific info
*/
const struct dpll_info *info;
+
+ /**
+ * @wakeref: In some platforms a device-level runtime pm reference may
+ * need to be grabbed to disable DC states while this DPLL is enabled
+ */
intel_wakeref_t wakeref;
};
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
new file mode 100644
index 000000000000..bb5a0e91b370
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+
+#define DSB_BUF_SIZE (2 * PAGE_SIZE)
+
+/**
+ * DOC: DSB
+ *
+ * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
+ * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
+ * engine that can be programmed to download the DSB from memory.
+ * It allows driver to batch submit display HW programming. This helps to
+ * reduce loading time and CPU activity, thereby making the context switch
+ * faster. DSB Support added from Gen12 Intel graphics based platform.
+ *
+ * DSB's can access only the pipe, plane, and transcoder Data Island Packet
+ * registers.
+ *
+ * DSB HW can support only register writes (both indexed and direct MMIO
+ * writes). There are no registers reads possible with DSB HW engine.
+ */
+
+/* DSB opcodes. */
+#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_BYTE_EN 0xF
+#define DSB_BYTE_EN_SHIFT 20
+#define DSB_REG_VALUE_MASK 0xfffff
+
+static inline bool is_dsb_busy(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+}
+
+static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl |= DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl &= ~DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+/**
+ * intel_dsb_get() - Allocate DSB context and return a DSB instance.
+ * @crtc: intel_crtc structure to get pipe info.
+ *
+ * This function provides handle of a DSB instance, for the further DSB
+ * operations.
+ *
+ * Returns: address of Intel_dsb instance requested for.
+ * Failure: Returns the same DSB instance, but without a command buffer.
+ */
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_dsb *dsb = &crtc->dsb;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_DSB(i915))
+ return dsb;
+
+ if (atomic_add_return(1, &dsb->refcount) != 1)
+ return dsb;
+
+ dsb->id = DSB1;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Gem object creation failed\n");
+ goto err;
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ DRM_ERROR("Vma creation failed\n");
+ i915_gem_object_put(obj);
+ atomic_dec(&dsb->refcount);
+ goto err;
+ }
+
+ dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(dsb->cmd_buf)) {
+ DRM_ERROR("Command buffer creation failed\n");
+ i915_vma_unpin_and_release(&vma, 0);
+ dsb->cmd_buf = NULL;
+ atomic_dec(&dsb->refcount);
+ goto err;
+ }
+ dsb->vma = vma;
+
+err:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ return dsb;
+}
+
+/**
+ * intel_dsb_put() - To destroy DSB context.
+ * @dsb: intel_dsb structure.
+ *
+ * This function destroys the DSB context allocated by a dsb_get(), by
+ * unpinning and releasing the VMA object associated with it.
+ */
+
+void intel_dsb_put(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!HAS_DSB(i915))
+ return;
+
+ if (WARN_ON(atomic_read(&dsb->refcount) == 0))
+ return;
+
+ if (atomic_dec_and_test(&dsb->refcount)) {
+ i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ dsb->cmd_buf = NULL;
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ }
+}
+
+/**
+ * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
+ * increment register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB for auto-increment register. During command buffer overflow,
+ * a warning is thrown and rest all erroneous condition register programming
+ * is done through mmio write.
+ */
+
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+ u32 reg_val;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ /*
+ * For example the buffer will look like below for 3 dwords for auto
+ * increment register:
+ * +--------------------------------------------------------+
+ * | size = 3 | offset &| value1 | value2 | value3 | zero |
+ * | | opcode | | | | |
+ * +--------------------------------------------------------+
+ * + + + + + + +
+ * 0 4 8 12 16 20 24
+ * Byte
+ *
+ * As every instruction is 8 byte aligned the index of dsb instruction
+ * will start always from even number while dealing with u32 array. If
+ * we are writing odd no of dwords, Zeros will be added in the end for
+ * padding.
+ */
+ reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ if (reg_val != i915_mmio_reg_offset(reg)) {
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+
+ dsb->ins_start_offset = dsb->free_pos;
+
+ /* Update the size. */
+ buf[dsb->free_pos++] = 1;
+
+ /* Update the opcode and reg. */
+ buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
+ DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+
+ /* Update the value. */
+ buf[dsb->free_pos++] = val;
+ } else {
+ /* Update the new value. */
+ buf[dsb->free_pos++] = val;
+
+ /* Update the size. */
+ buf[dsb->ins_start_offset]++;
+ }
+
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+}
+
+/**
+ * intel_dsb_reg_write() -Write to the DSB context for normal
+ * register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB. During command buffer overflow, a warning is thrown
+ * and rest all erroneous condition register programming is done
+ * through mmio write.
+ */
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ dsb->ins_start_offset = dsb->free_pos;
+ buf[dsb->free_pos++] = val;
+ buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg);
+}
+
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @dsb: intel_dsb structure.
+ *
+ * This function is used to do actual write to hardware using DSB.
+ * On errors, fall back to MMIO. Also this function help to reset the context.
+ */
+void intel_dsb_commit(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tail;
+
+ if (!dsb->free_pos)
+ return;
+
+ if (!intel_dsb_enable_engine(dsb))
+ goto reset;
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+
+ tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
+ if (tail > dsb->free_pos * 4)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ (tail - dsb->free_pos * 4));
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+ if (wait_for(!is_dsb_busy(dsb), 1)) {
+ DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ goto reset;
+ }
+
+reset:
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ intel_dsb_disable_engine(dsb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
new file mode 100644
index 000000000000..6f95c8e909e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_H
+#define _INTEL_DSB_H
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct intel_crtc;
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ atomic_t refcount;
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc);
+void intel_dsb_put(struct intel_dsb *dsb);
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val);
+void intel_dsb_commit(struct intel_dsb *dsb);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 5fec02aceaed..a2a937109a5a 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -55,6 +55,7 @@ int intel_dsi_get_modes(struct drm_connector *connector)
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -73,7 +74,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
}
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 93baf366692e..bcfbcb743e7d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -280,7 +280,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 dvo_val;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
@@ -505,7 +505,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 16ed44bfd734..3111ecaeabd0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -343,8 +343,8 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
HSW_FBCQ_DIS);
}
- if (IS_GEN(dev_priv, 11))
- /* Wa_1409120013:icl,ehl */
+ if (INTEL_GEN(dev_priv) >= 11)
+ /* Wa_1409120013:icl,ehl,tgl */
I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -1320,6 +1320,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->enabled = false;
fbc->active = false;
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
+
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index d59eee5c5d9c..48c960ca12fb 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- obj = NULL;
+ obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
@@ -204,7 +204,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
sizes->fb_height = intel_fb->base.height;
}
- mutex_lock(&dev->struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
@@ -235,6 +234,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end;
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ info->fix.smem_len = vma->node.size;
+
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -244,10 +248,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start = (unsigned long)info->screen_base;
- info->fix.smem_len = info->screen_size;
-
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
@@ -266,7 +266,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -274,7 +273,6 @@ out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -291,11 +289,8 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper);
- if (ifbdev->vma) {
- mutex_lock(&ifbdev->helper.dev->struct_mutex);
+ if (ifbdev->vma)
intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
- mutex_unlock(&ifbdev->helper.dev->struct_mutex);
- }
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
@@ -444,7 +439,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv)))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 719379774fa5..84b164f31895 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -206,6 +206,7 @@ static int frontbuffer_active(struct i915_active *ref)
return 0;
}
+__i915_active_call
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
@@ -220,11 +221,18 @@ static void frontbuffer_release(struct kref *ref)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
+ struct drm_i915_gem_object *obj = front->obj;
+ struct i915_vma *vma;
- front->obj->frontbuffer = NULL;
- spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj)
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ spin_unlock(&obj->vma.lock);
- i915_gem_object_put(front->obj);
+ obj->frontbuffer = NULL;
+ spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+
+ i915_gem_object_put(obj);
kfree(front);
}
@@ -249,8 +257,9 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
front->obj = obj;
kref_init(&front->ref);
atomic_set(&front->bits, 0);
- i915_active_init(i915, &front->write,
- frontbuffer_active, frontbuffer_retire);
+ i915_active_init(&front->write,
+ frontbuffer_active,
+ i915_active_may_sleep(frontbuffer_retire));
spin_lock(&i915->fb_tracking.lock);
if (obj->frontbuffer) {
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d6775a005726..3d4d19ac1d14 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -836,7 +836,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6ec5ceeab601..f1f41ca8402b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1,9 +1,11 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2017 Google, Inc.
+ * Copyright _ 2017-2019, Intel Corporation.
*
* Authors:
* Sean Paul <seanpaul@chromium.org>
+ * Ramalingam C <ramalingam.c@intel.com>
*/
#include <linux/component.h>
@@ -18,6 +20,7 @@
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
+#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
@@ -105,24 +108,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
-static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(PORT_HDCP_STATUS(port));
- return reg & HDCP_STATUS_ENC;
+ return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ HDCP_STATUS_ENC;
}
-static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(HDCP2_STATUS_DDI(port));
- return reg & LINK_ENCRYPTION_STATUS;
+ return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS;
}
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
@@ -253,9 +252,29 @@ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
}
static
-u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- enum port port = intel_dig_port->base.port;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return HDCP_TRANSA_REP_PRESENT |
+ HDCP_TRANSA_SHA1_M0;
+ case TRANSCODER_B:
+ return HDCP_TRANSB_REP_PRESENT |
+ HDCP_TRANSB_SHA1_M0;
+ case TRANSCODER_C:
+ return HDCP_TRANSC_REP_PRESENT |
+ HDCP_TRANSC_SHA1_M0;
+ case TRANSCODER_D:
+ return HDCP_TRANSD_REP_PRESENT |
+ HDCP_TRANSD_SHA1_M0;
+ default:
+ DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+ return -EINVAL;
+ }
+ }
+
switch (port) {
case PORT_A:
return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
@@ -268,18 +287,20 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- break;
+ DRM_ERROR("Unknown port %d\n", port);
+ return -EINVAL;
}
- DRM_ERROR("Unknown port %d\n", port);
- return -EINVAL;
}
static
-int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
+int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
+ enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
@@ -306,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
sha_idx = 0;
sha_text = 0;
sha_leftovers = 0;
- rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+ rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
@@ -548,7 +569,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* V prime atleast twice.
*/
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
+ ret = intel_hdcp_validate_v_prime(connector, shim,
ksv_fifo, num_downstream,
bstatus);
if (!ret)
@@ -576,6 +597,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
@@ -615,18 +637,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
+ I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
- an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
+ an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@@ -644,24 +669,26 @@ static int intel_hdcp_auth(struct intel_connector *connector)
return -EPERM;
}
- I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
- I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+ I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
+ I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
I915_WRITE(HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(intel_dig_port));
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
+ port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
@@ -689,22 +716,25 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return -ETIMEDOUT;
}
/* Wait for encryption confirmation */
- if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@@ -729,15 +759,17 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
- I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ if (intel_de_wait_for_clear(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
@@ -808,9 +840,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* Check_link valid only when HDCP1.4 is enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -819,10 +853,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
connector->base.name, connector->base.base.id,
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -887,7 +922,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
/* PORT E doesn't have HDCP, and PORT F is disabled */
- return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
+ return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
}
static int
@@ -1493,10 +1528,11 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
-
+ WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
@@ -1506,14 +1542,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
}
- if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+ if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) |
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
+ port)) |
CTL_LINK_ENCRYPTION_REQ);
}
- ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
+ ret = intel_de_wait_for_set(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@@ -1526,14 +1566,19 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+ WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
+ ~CTL_LINK_ENCRYPTION_REQ);
- ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
+ ret = intel_de_wait_for_clear(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
@@ -1632,9 +1677,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -1643,9 +1690,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
- I915_READ(HDCP2_STATUS_DDI(port)));
+ I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -1749,13 +1797,71 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
+static inline
+enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return MEI_DDI_A;
+ case PORT_B ... PORT_F:
+ return (enum mei_fw_ddi)port;
+ default:
+ return MEI_DDI_INVALID_PORT;
+ }
+}
+
+static inline
+enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A ... TRANSCODER_D:
+ return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ default: /* eDP, DSI TRANSCODERS are non HDCP capable */
+ return MEI_INVALID_TRANSCODER;
+ }
+}
+
+void intel_hdcp_transcoder_config(struct intel_connector *connector,
+ enum transcoder cpu_transcoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ if (!hdcp->shim)
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->cpu_transcoder = cpu_transcoder;
+ hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ mutex_unlock(&hdcp->mutex);
+ }
+}
+
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
- data->port = connector->encoder->port;
+ if (INTEL_GEN(dev_priv) < 12)
+ data->fw_ddi =
+ intel_get_mei_fw_ddi_index(connector->encoder->port);
+ else
+ /*
+ * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * with zero(INVALID PORT index).
+ */
+ data->fw_ddi = MEI_DDI_INVALID_PORT;
+
+ /*
+ * As associated transcoder is set and modified at modeset, here fw_tc
+ * is initialized to zero (invalid transcoder index). This will be
+ * retained for <Gen12 forever.
+ */
+ data->fw_tc = MEI_INVALID_TRANSCODER;
+
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 13555b054930..41c1053d9e38 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -15,10 +15,14 @@ struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
struct intel_hdcp_shim;
+enum port;
+enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
+void intel_hdcp_transcoder_config(struct intel_connector *connector,
+ enum transcoder cpu_transcoder);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b030f7ae3302..f6f5312205c4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -189,13 +189,19 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
-static int hsw_dip_data_size(unsigned int type)
+static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+ unsigned int type)
{
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_VSC_DATA_SIZE;
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ if (INTEL_GEN(dev_priv) >= 11)
+ return VIDEO_DIP_GMP_DATA_SIZE;
+ else
+ return VIDEO_DIP_DATA_SIZE;
default:
return VIDEO_DIP_DATA_SIZE;
}
@@ -514,7 +520,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
int i;
u32 val = I915_READ(ctl_reg);
- data_size = hsw_dip_data_size(type);
+ data_size = hsw_dip_data_size(dev_priv, type);
+
+ WARN_ON(len > data_size);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -724,11 +732,20 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
drm_hdmi_avi_infoframe_content_type(frame, conn_state);
@@ -1491,7 +1508,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv =
intel_dig_port->base.base.dev->dev_private;
+ struct intel_connector *connector =
+ intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
union {
u32 reg;
@@ -1502,39 +1522,30 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return false;
}
return true;
}
-struct hdcp2_hdmi_msg_data {
+struct hdcp2_hdmi_msg_timeout {
u8 msg_id;
- u32 timeout;
- u32 timeout2;
+ u16 timeout;
};
-static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = {
- { HDCP_2_2_AKE_INIT, 0, 0 },
- { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
- { HDCP_2_2_AKE_NO_STORED_KM, 0, 0 },
- { HDCP_2_2_AKE_STORED_KM, 0, 0 },
- { HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
- { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
- { HDCP_2_2_LC_INIT, 0, 0 },
- { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 },
- { HDCP_2_2_SKE_SEND_EKS, 0, 0 },
- { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
- { HDCP_2_2_REP_SEND_ACK, 0, 0 },
- { HDCP_2_2_REP_STREAM_MANAGE, 0, 0 },
- { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
+ { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, },
+ { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, },
+ { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, },
+ { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, },
};
static
@@ -1551,12 +1562,17 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
- if (hdcp2_msg_data[i].msg_id == msg_id &&
- (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
- return hdcp2_msg_data[i].timeout;
- else if (hdcp2_msg_data[i].msg_id == msg_id)
- return hdcp2_msg_data[i].timeout2;
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME) {
+ if (is_paired)
+ return HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS;
+ else
+ return HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_timeout); i++) {
+ if (hdcp2_msg_timeout[i].msg_id == msg_id)
+ return hdcp2_msg_timeout[i].timeout;
+ }
return -EINVAL;
}
@@ -2184,8 +2200,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
true, force_dvi);
}
+ if (status != MODE_OK)
+ return status;
- return status;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
@@ -2261,9 +2279,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *config,
- int *clock_12bpc, int *clock_10bpc,
- int *clock_8bpc)
+ struct intel_crtc_state *config)
{
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
@@ -2272,11 +2288,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return false;
}
- /* YCBCR420 TMDS rate requirement is half the pixel clock */
- config->port_clock /= 2;
- *clock_12bpc /= 2;
- *clock_10bpc /= 2;
- *clock_8bpc /= 2;
config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
@@ -2291,6 +2302,104 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return true;
}
+static int intel_hdmi_port_clock(int clock, int bpc)
+{
+ /*
+ * Need to adjust the port link by:
+ * 1.5x for 12bpc
+ * 1.25x for 10bpc
+ */
+ return clock * bpc / 8;
+}
+
+static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int clock, bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int bpc;
+
+ for (bpc = 12; bpc >= 10; bpc -= 2) {
+ if (hdmi_deep_color_possible(crtc_state, bpc) &&
+ hdmi_port_clock_valid(intel_hdmi,
+ intel_hdmi_port_clock(clock, bpc),
+ true, force_dvi) == MODE_OK)
+ return bpc;
+ }
+
+ return 8;
+}
+
+static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ int bpc, clock = adjusted_mode->crtc_clock;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ clock *= 2;
+
+ /* YCBCR420 TMDS rate requirement is half the pixel clock */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ clock /= 2;
+
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
+ clock, force_dvi);
+
+ crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
+
+ /*
+ * pipe_bpp could already be below 8bpc due to
+ * FDI bandwidth constraints. We shouldn't bump it
+ * back up to 8bpc in that case.
+ */
+ if (crtc_state->pipe_bpp > bpc * 3)
+ crtc_state->pipe_bpp = bpc * 3;
+
+ DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
+
+ if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
+ false, force_dvi) != MODE_OK) {
+ DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
+ crtc_state->port_clock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ return crtc_state->has_hdmi_sink &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2302,11 +2411,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
- int clock_10bpc = clock_8bpc * 5 / 4;
- int clock_12bpc = clock_8bpc * 3 / 2;
- int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
+ int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -2317,33 +2423,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
- if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /* See CEA-861-E - 5.1 Default Encoding Parameters */
- pipe_config->limited_color_range =
- pipe_config->has_hdmi_sink &&
- drm_default_rgb_quant_range(adjusted_mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- pipe_config->limited_color_range =
- intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
- }
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
- clock_8bpc *= 2;
- clock_10bpc *= 2;
- clock_12bpc *= 2;
- }
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
- if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
- &clock_12bpc, &clock_10bpc,
- &clock_8bpc)) {
+ if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return -EINVAL;
}
}
+ pipe_config->limited_color_range =
+ intel_hdmi_limited_color_range(pipe_config, conn_state);
+
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
@@ -2355,43 +2447,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
- /*
- * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
- * to check that the higher clock still fits within limits.
- */
- if (hdmi_deep_color_possible(pipe_config, 12) &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
- desired_bpp = 12*3;
-
- /* Need to adjust the port link by 1.5x for 12bpc. */
- pipe_config->port_clock = clock_12bpc;
- } else if (hdmi_deep_color_possible(pipe_config, 10) &&
- hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
- desired_bpp = 10 * 3;
-
- /* Need to adjust the port link by 1.25x for 10bpc. */
- pipe_config->port_clock = clock_10bpc;
- } else {
- DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
- desired_bpp = 8*3;
-
- pipe_config->port_clock = clock_8bpc;
- }
-
- if (!pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
- pipe_config->pipe_bpp = desired_bpp;
- }
-
- if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
- false, force_dvi) != MODE_OK) {
- DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
- return -EINVAL;
- }
+ ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+ if (ret)
+ return ret;
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
@@ -2431,6 +2489,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
+ intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
+ pipe_config->cpu_transcoder);
+
return 0;
}
@@ -2757,8 +2818,9 @@ intel_hdmi_connector_register(struct drm_connector *connector)
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- if (intel_attached_hdmi(connector)->cec_notifier)
- cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
+ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
+
+ cec_notifier_conn_unregister(n);
intel_connector_destroy(connector);
}
@@ -3007,7 +3069,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@ -3073,13 +3135,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
+ struct cec_connector_info conn_info;
- DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
- port_name(port));
+ DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
if (WARN(intel_dig_port->max_lanes < 4,
- "Not enough lanes (%d) for HDMI on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
@@ -3125,8 +3189,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
- intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
- port_identifier(port));
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ intel_hdmi->cec_notifier =
+ cec_notifier_conn_register(dev->dev, port_identifier(port),
+ &conn_info);
if (!intel_hdmi->cec_notifier)
DRM_DEBUG_KMS("CEC notifier get failed\n");
}
@@ -3216,11 +3283,11 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 106c2e0bc3c9..cf1ea5427639 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -23,6 +23,7 @@ struct intel_crtc_state;
struct intel_hdmi;
struct drm_connector_state;
union hdmi_infoframe;
+enum port;
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 56be20f6f47e..fc29046d48ea 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -481,7 +481,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
queue_dig = true;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index b0cd447b7fbc..087b5f57b321 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -13,6 +13,7 @@
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
+enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index b19800b58442..0b67f7887cd0 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -114,7 +114,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
- pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
+ pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
pdata->port[0].pipe = -1;
pdata->port[1].pipe = -1;
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b7c459a8931c..b1bc78623647 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -232,7 +232,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 temp;
if (HAS_PCH_SPLIT(dev_priv)) {
@@ -899,12 +899,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev_priv))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN(dev_priv, 4))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ if (INTEL_GEN(dev_priv) < 4)
+ intel_encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->crtc_mask = (1 << 1);
+ intel_encoder->pipe_mask = ~0;
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 29edfc343716..848ce07a8ec2 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -30,6 +30,7 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -230,7 +231,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
if (IS_ERR(rq))
return rq;
- err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
+ err = i915_active_add_request(&overlay->last_flip, rq);
if (err) {
i915_request_add(rq);
return ERR_PTR(err);
@@ -439,8 +440,6 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
/*
* Only wait if there is actually an old frame to release to
* guarantee forward progress.
@@ -751,7 +750,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
@@ -852,7 +850,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
@@ -1068,11 +1065,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (!(params->flags & I915_OVERLAY_ENABLE)) {
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
-
ret = intel_overlay_switch_off(overlay);
-
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1088,7 +1081,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
if (i915_gem_object_is_tiled(new_bo)) {
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
@@ -1152,14 +1144,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto out_unlock;
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
return 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
@@ -1233,7 +1223,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
}
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
@@ -1290,7 +1279,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1303,15 +1291,11 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
struct i915_vma *vma;
int err;
- mutex_lock(&i915->drm.struct_mutex);
-
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@@ -1332,13 +1316,10 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
overlay->reg_bo = obj;
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
err_put_bo:
i915_gem_object_put(obj);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1367,8 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
- i915_active_init(dev_priv,
- &overlay->last_flip,
+ i915_active_init(&overlay->last_flip,
NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 3bfb720560c2..6a9f322d3fca 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -76,7 +76,7 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Cannot enable DSC and PSR2 simultaneously */
- WARN_ON(crtc_state->dsc_params.compression_enable &&
+ WARN_ON(crtc_state->dsc.compression_enable &&
crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
@@ -88,48 +88,35 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
-static int edp_psr_shift(enum transcoder cpu_transcoder)
+static void psr_irq_control(struct drm_i915_private *dev_priv)
{
- switch (cpu_transcoder) {
- case TRANSCODER_A:
- return EDP_PSR_TRANSCODER_A_SHIFT;
- case TRANSCODER_B:
- return EDP_PSR_TRANSCODER_B_SHIFT;
- case TRANSCODER_C:
- return EDP_PSR_TRANSCODER_C_SHIFT;
- default:
- MISSING_CASE(cpu_transcoder);
- /* fallthrough */
- case TRANSCODER_EDP:
- return EDP_PSR_TRANSCODER_EDP_SHIFT;
- }
-}
-
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
-{
- u32 debug_mask, mask;
- enum transcoder cpu_transcoder;
- u32 transcoders = BIT(TRANSCODER_EDP);
+ enum transcoder trans_shift;
+ u32 mask, val;
+ i915_reg_t imr_reg;
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
-
- debug_mask = 0;
- mask = 0;
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
-
- mask |= EDP_PSR_ERROR(shift);
- debug_mask |= EDP_PSR_POST_EXIT(shift) |
- EDP_PSR_PRE_ENTRY(shift);
+ /*
+ * gen12+ has registers relative to transcoder and one per transcoder
+ * using the same bit definition: handle it as TRANSCODER_EDP to force
+ * 0 shift in bit definition
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
}
- if (debug & I915_PSR_DEBUG_IRQ)
- mask |= debug_mask;
+ mask = EDP_PSR_ERROR(trans_shift);
+ if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+ mask |= EDP_PSR_POST_EXIT(trans_shift) |
+ EDP_PSR_PRE_ENTRY(trans_shift);
- I915_WRITE(EDP_PSR_IMR, ~mask);
+ /* Warning: it is masking/setting reserved bits too */
+ val = I915_READ(imr_reg);
+ val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val |= ~mask;
+ I915_WRITE(imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@@ -171,60 +158,58 @@ static void psr_event_print(u32 val, bool psr2_enabled)
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
{
- u32 transcoders = BIT(TRANSCODER_EDP);
- enum transcoder cpu_transcoder;
+ enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+ enum transcoder trans_shift;
+ i915_reg_t imr_reg;
ktime_t time_ns = ktime_get();
- u32 mask = 0;
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
+ }
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
+ if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ dev_priv->psr.last_entry_attempt = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
+ }
- if (psr_iir & EDP_PSR_ERROR(shift)) {
- DRM_WARN("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ dev_priv->psr.last_exit = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
- dev_priv->psr.irq_aux_error = true;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ bool psr2_enabled = dev_priv->psr.psr2_enabled;
- /*
- * If this interruption is not masked it will keep
- * interrupting so fast that it prevents the scheduled
- * work to run.
- * Also after a PSR error, we don't want to arm PSR
- * again so we don't care about unmask the interruption
- * or unset irq_aux_error.
- */
- mask |= EDP_PSR_ERROR(shift);
- }
-
- if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
- dev_priv->psr.last_entry_attempt = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
- transcoder_name(cpu_transcoder));
+ I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ psr_event_print(val, psr2_enabled);
}
+ }
- if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
- dev_priv->psr.last_exit = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ u32 val;
- if (INTEL_GEN(dev_priv) >= 9) {
- u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = dev_priv->psr.psr2_enabled;
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
- I915_WRITE(PSR_EVENT(cpu_transcoder), val);
- psr_event_print(val, psr2_enabled);
- }
- }
- }
+ dev_priv->psr.irq_aux_error = true;
- if (mask) {
- mask |= I915_READ(EDP_PSR_IMR);
- I915_WRITE(EDP_PSR_IMR, mask);
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ val = I915_READ(imr_reg);
+ val |= EDP_PSR_ERROR(trans_shift);
+ I915_WRITE(imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@@ -283,6 +268,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ if (dev_priv->psr.dp) {
+ DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+ return;
+ }
+
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
@@ -305,7 +295,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- WARN_ON(dev_priv->psr.dp);
dev_priv->psr.dp = intel_dp;
if (INTEL_GEN(dev_priv) >= 9 &&
@@ -390,7 +379,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
+ I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -401,7 +390,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
+ I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -491,8 +480,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
- I915_WRITE(EDP_PSR_CTL, val);
+ val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -528,9 +518,87 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- I915_WRITE(EDP_PSR_CTL, 0);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
- I915_WRITE(EDP_PSR2_CTL, val);
+static bool
+transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
+{
+ if (INTEL_GEN(dev_priv) < 9)
+ return false;
+ else if (INTEL_GEN(dev_priv) >= 12)
+ return trans == TRANSCODER_A;
+ else
+ return trans == TRANSCODER_EDP;
+}
+
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+{
+ if (!cstate || !cstate->base.active)
+ return 0;
+
+ return DIV_ROUND_UP(1000 * 1000,
+ drm_mode_vrefresh(&cstate->base.adjusted_mode));
+}
+
+static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+ u32 idle_frames)
+{
+ u32 val;
+
+ idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val &= ~EDP_PSR2_IDLE_FRAME_MASK;
+ val |= idle_frames;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
+
+static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ psr2_program_idle_frames(dev_priv, 0);
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ int idle_frames;
+
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ /*
+ * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
+ * cases including the off-by-one issue that HW has in some cases.
+ */
+ idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ psr2_program_idle_frames(dev_priv, idle_frames);
+}
+
+static void tgl_dc5_idle_thread(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.idle_work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+ /* If delayed work is pending, it is not idle */
+ if (delayed_work_pending(&dev_priv->psr.idle_work))
+ goto unlock;
+
+ DRM_DEBUG_KMS("DC5/6 idle thread\n");
+ tgl_psr2_disable_dc3co(dev_priv);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->psr.dc3co_enabled)
+ return;
+
+ cancel_delayed_work(&dev_priv->psr.idle_work);
+ /* Before PSR2 exit disallow dc3co*/
+ tgl_psr2_disable_dc3co(dev_priv);
}
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
@@ -544,17 +612,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
+ DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
+ return false;
+ }
+
/*
* DSC and PSR2 cannot be enabled simultaneously. If a requested
* resolution requires DSC to be enabled, priority is given to DSC
* over PSR2.
*/
- if (crtc_state->dsc_params.compression_enable) {
+ if (crtc_state->dsc.compression_enable) {
DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ psr_max_h = 5120;
+ psr_max_v = 3200;
+ } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
} else if (IS_GEN(dev_priv, 9)) {
@@ -606,10 +683,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
/*
* HSW spec explicitly says PSR is tied to port A.
- * BDW+ platforms with DDI implementation of PSR have different
- * PSR registers per transcoder and we only implement transcoder EDP
- * ones. Since by Display design transcoder EDP is tied to port A
- * we can safely escape based on the port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * for now it only supports one instance of PSR, so lets keep it
+ * hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
@@ -648,9 +724,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+ WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@@ -663,25 +740,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
dev_priv->psr.active = true;
}
-static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- static const i915_reg_t regs[] = {
- [TRANSCODER_A] = CHICKEN_TRANS_A,
- [TRANSCODER_B] = CHICKEN_TRANS_B,
- [TRANSCODER_C] = CHICKEN_TRANS_C,
- [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
- };
-
- WARN_ON(INTEL_GEN(dev_priv) < 9);
-
- if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
- !regs[cpu_transcoder].reg))
- cpu_transcoder = TRANSCODER_A;
-
- return regs[cpu_transcoder];
-}
-
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -697,8 +755,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
- i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
- cpu_transcoder);
+ i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = I915_READ(reg);
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
@@ -720,19 +777,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- I915_WRITE(EDP_PSR_DEBUG, mask);
+ I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+
+ psr_irq_control(dev_priv);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
+ u32 val;
WARN_ON(dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+ dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
+ dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ val &= EDP_PSR_ERROR(0);
+ } else {
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+ }
+ if (val) {
+ dev_priv->psr.sink_not_reliable = true;
+ DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+ return;
+ }
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -782,20 +866,28 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
u32 val;
if (!dev_priv->psr.active) {
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR2_ENABLE);
+ }
+
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR_ENABLE);
+
return;
}
if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+ val &= ~EDP_PSR2_ENABLE;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ val &= ~EDP_PSR_ENABLE;
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@@ -817,10 +909,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(dev_priv);
if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS;
+ psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS;
+ psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -859,6 +951,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
+ cancel_delayed_work_sync(&dev_priv->psr.idle_work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -963,7 +1056,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore,
+ EDP_PSR_STATUS(dev_priv->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -979,10 +1073,10 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return false;
if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
+ reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS;
+ reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -1067,7 +1161,13 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+ /*
+ * Do it right away if it's already enabled, otherwise it will be done
+ * when enabling the source.
+ */
+ if (dev_priv->psr.enabled)
+ psr_irq_control(dev_priv);
mutex_unlock(&dev_priv->psr.lock);
@@ -1159,6 +1259,44 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->psr.lock);
}
+/*
+ * When we will be completely rely on PSR2 S/W tracking in future,
+ * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
+ * event also therefore tgl_dc3co_flush() require to be changed
+ * accrodingly in future.
+ */
+static void
+tgl_dc3co_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin)
+{
+ u32 delay;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.dc3co_enabled)
+ goto unlock;
+
+ if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+ goto unlock;
+
+ /*
+ * At every frontbuffer flush flip event modified delay of delayed work,
+ * when delayed work schedules that means display has been idle.
+ */
+ if (!(frontbuffer_bits &
+ INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+ goto unlock;
+
+ tgl_psr2_enable_dc3co(dev_priv);
+ /* DC5/DC6 required idle frames = 6 */
+ delay = 6 * dev_priv->psr.dc3co_exit_delay;
+ mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
+ usecs_to_jiffies(delay));
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_flush - Flush PSR
* @dev_priv: i915 device
@@ -1178,8 +1316,10 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP) {
+ tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
return;
+ }
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -1208,45 +1348,34 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
- u32 val;
-
if (!HAS_PSR(dev_priv))
return;
- dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
- HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
-
if (!dev_priv->psr.sink_support)
return;
+ if (IS_HASWELL(dev_priv))
+ /*
+ * HSW don't have PSR registers on the same space as transcoder
+ * so set this to a value that when subtract to the register
+ * in transcoder space results in the right offset for HSW
+ */
+ dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
+
if (i915_modparams.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
- /*
- * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
- * will still keep the error set even after the reset done in the
- * irq_preinstall and irq_uninstall hooks.
- * And enabling in this situation cause the screen to freeze in the
- * first time that PSR HW tries to activate so lets keep PSR disabled
- * to avoid any rendering problems.
- */
- val = I915_READ(EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
- if (val) {
- DRM_DEBUG_KMS("PSR interruption error set\n");
- dev_priv->psr.sink_not_reliable = true;
- }
-
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else
- /* For new platforms let's respect VBT back again */
+ else if (INTEL_GEN(dev_priv) < 12)
+ /* For new platforms up to TGL let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
mutex_init(&dev_priv->psr.lock);
}
@@ -1288,7 +1417,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
if (val & DP_PSR_LINK_CRC_ERROR)
- DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+ DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index dc818826f36d..46e4de8b8cd5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -30,7 +30,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index adeb1c840976..5b7f4baf7348 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2921,7 +2921,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_sdvo->base.pipe_mask = ~0;
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index c9e05bcdd141..a66f224aa17d 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -14,6 +14,7 @@
struct drm_i915_private;
enum pipe;
+enum port;
bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index cae25e493128..72fda0430062 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,19 +48,6 @@
#include "intel_psr.h"
#include "intel_sprite.h"
-bool is_planar_yuv_format(u32 pixelformat)
-{
- switch (pixelformat) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return true;
- default:
- return false;
- }
-}
-
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -300,10 +287,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- src->x1 = src_x << 16;
- src->x2 = (src_x + src_w) << 16;
- src->y1 = src_y << 16;
- src->y2 = (src_y + src_h) << 16;
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
if (!fb->format->is_yuv)
return 0;
@@ -337,6 +322,55 @@ bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
icl_hdr_plane_mask() & BIT(plane_id);
}
+static void
+skl_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 9;
+ *den = 8;
+ }
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int num, den;
+
+ skl_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock on glk+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ den *= 2;
+
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
+ mul_u32_u32(den, dst_w * dst_h));
+}
+
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -361,6 +395,7 @@ skl_program_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
@@ -381,7 +416,7 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+ if (drm_format_info_is_yuv_semiplanar(fb->format) &&
!icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -554,7 +589,7 @@ skl_program_plane(struct intel_plane *plane,
u32 y = plane_state->color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
- struct intel_plane *linked = plane_state->linked_plane;
+ struct intel_plane *linked = plane_state->planar_linked_plane;
const struct drm_framebuffer *fb = plane_state->base.fb;
u8 alpha = plane_state->base.alpha >> 8;
u32 plane_color_ctl = 0;
@@ -653,7 +688,7 @@ skl_update_plane(struct intel_plane *plane,
{
int color_plane = 0;
- if (plane_state->linked_plane) {
+ if (plane_state->planar_linked_plane) {
/* Program the UV plane */
color_plane = 1;
}
@@ -825,6 +860,85 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
+static void
+vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * VLV bspec only considers cases where all three planes are
+ * enabled, and cases where the primary and one sprite is enabled.
+ * Let's assume the case with just two sprites enabled also
+ * maps to the latter case.
+ */
+ if (hweight8(active_planes) == 3) {
+ switch (cpp) {
+ case 8:
+ *num = 11;
+ *den = 8;
+ break;
+ case 4:
+ *num = 18;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ vlv_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -1031,6 +1145,164 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ switch (cpp) {
+ case 8:
+ *num = 12;
+ *den = 8;
+ break;
+ case 4:
+ *num = 19;
+ *den = 16;
+ break;
+ case 2:
+ *num = 33;
+ *den = 32;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+}
+
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, dst_w, pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+
+ if (src_w != dst_w)
+ ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
+ else
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w),
+ den * dst_w);
+}
+
+static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int num, den;
+
+ hsw_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -1044,6 +1316,16 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return sprctl;
}
+static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+
+ return fb->format->cpp[0] == 8 &&
+ (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -1066,6 +1348,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
break;
@@ -1083,7 +1371,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- sprctl |= SPRITE_INT_GAMMA_DISABLE;
+ if (!ivb_need_sprite_gamma(plane_state))
+ sprctl |= SPRITE_INT_GAMMA_DISABLE;
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -1105,12 +1394,26 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
-static void ivb_sprite_linear_gamma(u16 gamma[18])
+static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
+ u16 gamma[18])
{
- int i;
+ int scale, i;
- for (i = 0; i < 17; i++)
- gamma[i] = (i << 10) / 16;
+ /*
+ * WaFP16GammaEnabling:ivb,hsw
+ * "Workaround : When using the 64-bit format, the sprite output
+ * on each color channel has one quarter amplitude. It can be
+ * brought up to full amplitude by using sprite internal gamma
+ * correction, pipe gamma correction, or pipe color space
+ * conversion to multiply the sprite output by four."
+ */
+ scale = 4;
+
+ for (i = 0; i < 16; i++)
+ gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1);
+
+ gamma[i] = min((scale * i << 10) / 16, 1 << 10);
+ i++;
gamma[i] = 3 << 10;
i++;
@@ -1124,7 +1427,10 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
u16 gamma[18];
int i;
- ivb_sprite_linear_gamma(gamma);
+ if (!ivb_need_sprite_gamma(plane_state))
+ return;
+
+ ivb_sprite_linear_gamma(plane_state, gamma);
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
@@ -1257,6 +1563,53 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int hscale, pixel_rate;
+ unsigned int limit, decimate;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ hscale = drm_rect_calc_hscale(&plane_state->base.src,
+ &plane_state->base.dst,
+ 0, INT_MAX);
+ if (hscale < 0x10000)
+ return pixel_rate;
+
+ /* Decimation steps at 2x,4x,8x,16x */
+ decimate = ilog2(hscale >> 16);
+ hscale >>= decimate;
+
+ /* Starting limit is 90% of cdclk */
+ limit = 9;
+
+ /* -10% per decimation step */
+ limit -= decimate;
+
+ /* -10% for RGB */
+ if (fb->format->cpp[0] >= 4)
+ limit--; /* -10% for RGB */
+
+ /*
+ * We should also do -10% if sprite scaling is enabled
+ * on the other pipe, but we can't really check for that,
+ * so we ignore it.
+ */
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale),
+ limit << 16);
+}
+
static unsigned int
g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -1300,6 +1653,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
break;
@@ -1513,6 +1872,11 @@ static bool intel_fb_scalable(const struct drm_framebuffer *fb)
switch (fb->format->format) {
case DRM_FORMAT_C8:
return false;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return INTEL_GEN(to_i915(fb->dev)) >= 11;
default:
return true;
}
@@ -1791,7 +2155,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
/* Display WA #1106 */
- if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
+ if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@@ -1801,6 +2165,22 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
return 0;
}
+static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+ const struct drm_framebuffer *fb)
+{
+ /*
+ * We don't yet know the final source width nor
+ * whether we can use the HQ scaler mode. Assume
+ * the best case.
+ * FIXME need to properly check this later.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ !drm_format_info_is_yuv_semiplanar(fb->format))
+ return 0x30000 - 1;
+ else
+ return 0x20000 - 1;
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
@@ -1818,7 +2198,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
/* use scaler when colorkey is not required */
if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_max_scale(crtc_state, fb->format->format);
+ max_scale = skl_plane_max_scale(dev_priv, fb);
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1993,8 +2373,10 @@ static const u64 i9xx_plane_format_modifiers[] = {
};
static const u32 snb_plane_formats[] = {
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2024,6 +2406,8 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2039,6 +2423,8 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2055,6 +2441,8 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2158,6 +2546,13 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
+static const u64 gen12_plane_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -2198,6 +2593,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2306,6 +2703,55 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
+static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -2342,6 +2788,15 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
+static const struct drm_plane_funcs gen12_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = gen12_plane_format_mod_supported,
+};
+
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2430,6 +2885,7 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
+ const struct drm_plane_funcs *plane_funcs;
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
@@ -2459,6 +2915,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = skl_disable_plane;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
+ plane->min_cdclk = skl_plane_min_cdclk;
if (icl_is_nv12_y_plane(plane_id))
plane->update_slave = icl_update_slave;
@@ -2472,11 +2929,19 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* TODO: Implement support for gen-12 CCS modifiers */
+ plane->has_ccs = false;
+ modifiers = gen12_plane_format_modifiers_noccs;
+ plane_funcs = &gen12_plane_funcs;
+ } else {
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
+ plane_funcs = &skl_plane_funcs;
+ }
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
@@ -2486,7 +2951,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
possible_crtcs = BIT(pipe);
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, &skl_plane_funcs,
+ possible_crtcs, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -2519,6 +2984,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -2540,7 +3007,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -2556,6 +3023,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->min_cdclk = vlv_plane_min_cdclk;
formats = vlv_plane_formats;
num_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -2569,6 +3037,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else
+ plane->min_cdclk = ivb_sprite_min_cdclk;
+
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
@@ -2580,6 +3053,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN(dev_priv, 6)) {
@@ -2630,6 +3104,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ zpos = sprite + 1;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 093a2d156f1e..5eeaa92420d1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,7 +17,6 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
-bool is_planar_yuv_format(u32 pixelformat);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -50,4 +49,11 @@ static inline u8 icl_hdr_plane_mask(void)
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 85743a43bee2..7773169b7331 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,32 +23,38 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
-static bool has_modular_fia(struct drm_i915_private *i915)
-{
- if (!INTEL_INFO(i915)->display.has_modular_fia)
- return false;
-
- return intel_uncore_read(&i915->uncore,
- PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK;
-}
-
-static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915,
- enum tc_port tc_port)
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915,
+ struct intel_digital_port *dig_port)
{
- if (!has_modular_fia(i915))
- return FIA1;
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+ u32 modular_fia;
+
+ if (INTEL_INFO(i915)->display.has_modular_fia) {
+ modular_fia = intel_uncore_read(&i915->uncore,
+ PORT_TX_DFLEXDPSP(FIA1));
+ modular_fia &= MODULAR_FIA_MASK;
+ } else {
+ modular_fia = 0;
+ }
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
* than two TC ports, there are multiple instances of Modular FIA.
*/
- return tc_port / 2;
+ if (modular_fia) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
}
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 lane_mask;
@@ -57,8 +63,23 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
WARN_ON(lane_mask == 0xffffffff);
- return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
+ return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+}
+
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 pin_mask;
+
+ pin_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
+
+ WARN_ON(pin_mask == 0xffffffff);
+
+ return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
+ DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
@@ -95,7 +116,6 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -104,19 +124,21 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
- val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
switch (required_lanes) {
case 1:
- val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
break;
case 2:
- val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
break;
case 4:
- val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
break;
default:
MISSING_CASE(required_lanes);
@@ -164,9 +186,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return mask;
}
- if (val & TC_LIVE_STATE_TBT(tc_port))
+ if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_TBT_ALT);
- if (val & TC_LIVE_STATE_TC(tc_port))
+ if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_DP_ALT);
if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
@@ -182,7 +204,6 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -194,14 +215,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return false;
}
- return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
+ return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
bool enable)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -215,9 +235,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
return false;
}
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
if (!enable)
- val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
@@ -232,7 +252,6 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -244,7 +263,7 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
return true;
}
- return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
+ return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
}
/*
@@ -540,5 +559,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
mutex_init(&dig_port->tc_lock);
dig_port->tc_legacy_port = is_legacy;
dig_port->tc_link_refcount = 0;
- dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port);
+ tc_port_load_fia_params(i915, dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 783d75531435..463f1b3c836f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -13,6 +13,7 @@ struct intel_digital_port;
bool intel_tc_port_connected(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index b70221f5112a..9983fadf6c28 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -961,11 +961,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
- < 1000)
- return MODE_OK;
+ if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000)
+ return MODE_CLOCK_RANGE;
- return MODE_CLOCK_RANGE;
+ return MODE_OK;
}
static int
@@ -1702,7 +1701,7 @@ intel_tv_detect(struct drm_connector *connector,
struct intel_load_detect_pipe tmp;
int ret;
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret < 0)
return ret;
@@ -1948,9 +1947,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
intel_encoder->cloneable = 0;
- intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index dfcd156b5094..69a7cb1fa121 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -114,6 +114,7 @@ enum bdb_block_id {
BDB_LVDS_POWER = 44,
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
+ BDB_COMPRESSION_PARAMETERS = 56,
BDB_SKIP = 254, /* VBIOS private block, ignore */
};
@@ -291,6 +292,8 @@ struct bdb_general_features {
#define DVO_PORT_HDMIE 12 /* 193 */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
+#define DVO_PORT_DPG 15
+#define DVO_PORT_HDMIG 16
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
@@ -325,6 +328,7 @@ enum vbt_gmbus_ddi {
#define DP_AUX_D 0x30
#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
+#define DP_AUX_G 0x70
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
@@ -808,4 +812,55 @@ struct bdb_mipi_sequence {
u8 data[0]; /* up to 6 variable length blocks */
} __packed;
+/*
+ * Block 56 - Compression Parameters
+ */
+
+#define VBT_RC_BUFFER_BLOCK_SIZE_1KB 0
+#define VBT_RC_BUFFER_BLOCK_SIZE_4KB 1
+#define VBT_RC_BUFFER_BLOCK_SIZE_16KB 2
+#define VBT_RC_BUFFER_BLOCK_SIZE_64KB 3
+
+#define VBT_DSC_LINE_BUFFER_DEPTH(vbt_value) ((vbt_value) + 8) /* bits */
+#define VBT_DSC_MAX_BPP(vbt_value) (6 + (vbt_value) * 2)
+
+struct dsc_compression_parameters_entry {
+ u8 version_major:4;
+ u8 version_minor:4;
+
+ u8 rc_buffer_block_size:2;
+ u8 reserved1:6;
+
+ /*
+ * Buffer size in bytes:
+ *
+ * 4 ^ rc_buffer_block_size * 1024 * (rc_buffer_size + 1) bytes
+ */
+ u8 rc_buffer_size;
+ u32 slices_per_line;
+
+ u8 line_buffer_depth:4;
+ u8 reserved2:4;
+
+ /* Flag Bits 1 */
+ u8 block_prediction_enable:1;
+ u8 reserved3:7;
+
+ u8 max_bpp; /* mapping */
+
+ /* Color depth capabilities */
+ u8 reserved4:1;
+ u8 support_8bpc:1;
+ u8 support_10bpc:1;
+ u8 support_12bpc:1;
+ u8 reserved5:4;
+
+ u16 slice_height;
+} __packed;
+
+struct bdb_compression_parameters {
+ u16 entry_size;
+ struct dsc_compression_parameters_entry data[16];
+} __packed;
+
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index d4fb7f16f9f6..896b0c334f5e 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -322,8 +322,8 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
- struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
- u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
+ u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
u8 i = 0;
int row_index = 0;
int column_index = 0;
@@ -332,7 +332,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
- pipe_config->dsc_params.slice_count);
+ pipe_config->dsc.slice_count);
/*
* Slice Height of 8 works for all currently available panels. So start
* with that if pic_height is an integral multiple of 8.
@@ -485,13 +485,13 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
u32 rc_range_params_dword[8];
- u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
int i = 0;
/* Populate PICTURE_PARAMETER_SET_0 registers */
@@ -514,11 +514,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
pps_val);
}
@@ -533,11 +533,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
pps_val);
}
@@ -553,11 +553,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
pps_val);
}
@@ -573,11 +573,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
pps_val);
}
@@ -593,11 +593,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
pps_val);
}
@@ -613,11 +613,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
pps_val);
}
@@ -635,11 +635,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
pps_val);
}
@@ -655,11 +655,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
pps_val);
}
@@ -675,11 +675,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
pps_val);
}
@@ -695,11 +695,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
pps_val);
}
@@ -717,11 +717,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
pps_val);
}
@@ -740,11 +740,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
pps_val);
}
@@ -763,7 +763,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
@@ -782,7 +782,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_buf_thresh_dword[2]);
I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
@@ -824,7 +824,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
@@ -859,7 +859,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
@@ -885,7 +885,7 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
@@ -909,7 +909,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
u32 dss_ctl1_val = 0;
u32 dss_ctl2_val = 0;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
/* Enable Power wells for VDSC/joining */
@@ -928,7 +928,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
}
dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
@@ -944,7 +944,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
- if (!old_crtc_state->dsc_params.compression_enable)
+ if (!old_crtc_state->dsc.compression_enable)
return;
if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
new file mode 100644
index 000000000000..2ff7293986d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_vga.h"
+
+static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return VLV_VGACNTRL;
+ else if (INTEL_GEN(i915) >= 5)
+ return CPU_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
+/* Disable the VGA plane that we never use */
+void intel_vga_disable(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+ u8 sr1;
+
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(SR01, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1 << 5, VGA_SR_DATA);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ intel_vga_disable(dev_priv);
+ }
+}
+
+void intel_vga_redisable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ /*
+ * This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses.
+ */
+ wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA);
+ if (!wakeref)
+ return;
+
+ intel_vga_redisable_power_on(i915);
+
+ intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
+}
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+}
+
+static int
+intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
+ DRM_ERROR("failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
+ DRM_ERROR("failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static unsigned int
+intel_vga_set_decode(void *cookie, bool enable_decode)
+{
+ struct drm_i915_private *i915 = cookie;
+
+ intel_vga_set_state(i915, enable_decode);
+
+ if (enable_decode)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+int intel_vga_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ int ret;
+
+ /*
+ * If we have > 1 VGA cards, then we need to arbitrate access to the
+ * common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+void intel_vga_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_client_register(pdev, NULL, NULL, NULL);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
new file mode 100644
index 000000000000..ba5b55b917f0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_H__
+#define __INTEL_VGA_H__
+
+struct drm_i915_private;
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915);
+void intel_vga_disable(struct drm_i915_private *i915);
+void intel_vga_redisable(struct drm_i915_private *i915);
+void intel_vga_redisable_power_on(struct drm_i915_private *i915);
+int intel_vga_register(struct drm_i915_private *i915);
+void intel_vga_unregister(struct drm_i915_private *i915);
+
+#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index a71b22bdd95b..0ca49b1604c6 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -749,7 +749,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;
@@ -1870,11 +1870,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* port C. BXT isn't limited like this.
*/
if (IS_GEN9_LP(dev_priv))
- intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ intel_encoder->pipe_mask = ~0;
else if (port == PORT_A)
- intel_encoder->crtc_mask = BIT(PIPE_A);
+ intel_encoder->pipe_mask = BIT(PIPE_A);
else
- intel_encoder->crtc_mask = BIT(PIPE_B);
+ intel_encoder->pipe_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index f99920652751..81366aa4812b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
@@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
- /* XXX: we need to kill this */
- mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
- goto out_unlock;
+ goto out_signal;
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
@@ -211,7 +208,7 @@ static void clear_pages_worker(struct work_struct *work)
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
- err = i915_active_ref(&vma->active, rq->timeline, rq);
+ err = __i915_vma_move_to_active(vma, rq);
if (err)
goto out_request;
@@ -229,8 +226,6 @@ out_batch:
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 1cdfe05514c3..e553ca8d98eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,8 +69,10 @@
#include <drm/i915_drm.h>
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
@@ -167,97 +169,6 @@ lookup_user_engine(struct i915_gem_context *ctx,
return i915_gem_context_get_engine(ctx, idx);
}
-static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
-{
- unsigned int max;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- if (INTEL_GEN(i915) >= 12)
- max = GEN12_MAX_CONTEXT_HW_ID;
- else if (INTEL_GEN(i915) >= 11)
- max = GEN11_MAX_CONTEXT_HW_ID;
- else if (USES_GUC_SUBMISSION(i915))
- /*
- * When using GuC in proxy submission, GuC consumes the
- * highest bit in the context id to indicate proxy submission.
- */
- max = MAX_GUC_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
-
- return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
-}
-
-static int steal_hw_id(struct drm_i915_private *i915)
-{
- struct i915_gem_context *ctx, *cn;
- LIST_HEAD(pinned);
- int id = -ENOSPC;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- list_for_each_entry_safe(ctx, cn,
- &i915->contexts.hw_id_list, hw_id_link) {
- if (atomic_read(&ctx->hw_id_pin_count)) {
- list_move_tail(&ctx->hw_id_link, &pinned);
- continue;
- }
-
- GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
- list_del_init(&ctx->hw_id_link);
- id = ctx->hw_id;
- break;
- }
-
- /*
- * Remember how far we got up on the last repossesion scan, so the
- * list is kept in a "least recently scanned" order.
- */
- list_splice_tail(&pinned, &i915->contexts.hw_id_list);
- return id;
-}
-
-static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
-{
- int ret;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- /*
- * We prefer to steal/stall ourselves and our users over that of the
- * entire system. That may be a little unfair to our users, and
- * even hurt high priority clients. The choice is whether to oomkill
- * something else, or steal a context id.
- */
- ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- if (unlikely(ret < 0)) {
- ret = steal_hw_id(i915);
- if (ret < 0) /* once again for the correct errno code */
- ret = new_hw_id(i915, GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- *out = ret;
- return 0;
-}
-
-static void release_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
-
- if (list_empty(&ctx->hw_id_link))
- return;
-
- mutex_lock(&i915->contexts.mutex);
- if (!list_empty(&ctx->hw_id_link)) {
- ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
- list_del_init(&ctx->hw_id_link);
- }
- mutex_unlock(&i915->contexts.mutex);
-}
-
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
@@ -294,101 +205,241 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
for_each_engine(engine, gt, id) {
struct intel_context *ce;
+ if (engine->legacy_idx == INVALID_ENGINE)
+ continue;
+
+ GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+ GEM_BUG_ON(e->engines[engine->legacy_idx]);
+
ce = intel_context_create(ctx, engine);
if (IS_ERR(ce)) {
- __free_engines(e, id);
+ __free_engines(e, e->num_engines + 1);
return ERR_CAST(ce);
}
- e->engines[id] = ce;
- e->num_engines = id + 1;
+ e->engines[engine->legacy_idx] = ce;
+ e->num_engines = max(e->num_engines, engine->legacy_idx);
}
+ e->num_engines++;
return e;
}
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
- release_hw_id(ctx);
- if (ctx->vm)
- i915_vm_put(ctx->vm);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
+ kfree(ctx->jump_whitelist);
+
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
kfree(ctx->name);
put_pid(ctx->pid);
- list_del(&ctx->link);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
-static void contexts_free(struct drm_i915_private *i915)
+static void contexts_free_all(struct llist_node *list)
{
- struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- llist_for_each_entry_safe(ctx, cn, freed, free_link)
+ llist_for_each_entry_safe(ctx, cn, list, free_link)
i915_gem_context_free(ctx);
}
-static void contexts_free_first(struct drm_i915_private *i915)
+static void contexts_flush_free(struct i915_gem_contexts *gc)
{
- struct i915_gem_context *ctx;
- struct llist_node *freed;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- freed = llist_del_first(&i915->contexts.free_list);
- if (!freed)
- return;
-
- ctx = container_of(freed, typeof(*ctx), free_link);
- i915_gem_context_free(ctx);
+ contexts_free_all(llist_del_all(&gc->free_list));
}
static void contexts_free_worker(struct work_struct *work)
{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), contexts.free_work);
+ struct i915_gem_contexts *gc =
+ container_of(work, typeof(*gc), free_work);
- mutex_lock(&i915->drm.struct_mutex);
- contexts_free(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ contexts_flush_free(gc);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct drm_i915_private *i915 = ctx->i915;
+ struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &i915->contexts.free_list))
- queue_work(i915->wq, &i915->contexts.free_work);
+ if (llist_add(&ctx->free_link, &gc->free_list))
+ schedule_work(&gc->free_work);
}
-static void context_close(struct i915_gem_context *ctx)
+static inline struct i915_gem_engines *
+__context_engines_static(const struct i915_gem_context *ctx)
{
- mutex_lock(&ctx->mutex);
+ return rcu_dereference_protected(ctx->engines, true);
+}
- i915_gem_context_set_closed(ctx);
- ctx->file_priv = ERR_PTR(-EBADF);
+static bool __reset_engine(struct intel_engine_cs *engine)
+{
+ struct intel_gt *gt = engine->gt;
+ bool success = false;
+
+ if (!intel_has_reset_engine(gt))
+ return false;
+
+ if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ success = intel_engine_reset(engine, NULL) == 0;
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
+
+ return success;
+}
+
+static void __reset_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "context closure in %s", ctx->name);
+}
+
+static bool __cancel_engine(struct intel_engine_cs *engine)
+{
+ /*
+ * Send a "high priority pulse" down the engine to cause the
+ * current request to be momentarily preempted. (If it fails to
+ * be preempted, it will be reset). As we have marked our context
+ * as banned, any incomplete request, including any running, will
+ * be skipped following the preemption.
+ *
+ * If there is no hangchecking (one of the reasons why we try to
+ * cancel the context) and no forced preemption, there may be no
+ * means by which we reset the GPU and evict the persistent hog.
+ * Ergo if we are unable to inject a preemptive pulse that can
+ * kill the banned context, we fallback to doing a local reset
+ * instead.
+ */
+ if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
+ !intel_engine_pulse(engine))
+ return true;
+
+ /* If we are unable to send a pulse, try resetting this engine. */
+ return __reset_engine(engine);
+}
+
+static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine, *locked;
+
+ /*
+ * Serialise with __i915_request_submit() so that it sees
+ * is-banned?, or we know the request is already inflight.
+ */
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->active.lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->active.lock);
+ spin_lock(&engine->active.lock);
+ locked = engine;
+ }
+
+ engine = NULL;
+ if (i915_request_is_active(rq) && !rq->fence.error)
+ engine = rq->engine;
+
+ spin_unlock_irq(&locked->active.lock);
+
+ return engine;
+}
+
+static struct intel_engine_cs *active_engine(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct i915_request *rq;
+
+ if (!ce->timeline)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ /* Check with the backend if the request is inflight */
+ engine = __active_engine(rq);
+ if (engine)
+ break;
+ }
+ rcu_read_unlock();
+
+ return engine;
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ /*
+ * If we are already banned, it was due to a guilty request causing
+ * a reset and the entire context being evicted from the GPU.
+ */
+ if (i915_gem_context_is_banned(ctx))
+ return;
+
+ i915_gem_context_set_banned(ctx);
/*
- * This context will never again be assinged to HW, so we can
- * reuse its ID for the next context.
+ * Map the user's engine back to the actual engines; one virtual
+ * engine will be mapped to multiple engines, and using ctx->engine[]
+ * the same engine may be have multiple instances in the user's map.
+ * However, we only care about pending requests, so only include
+ * engines on which there are incomplete requests.
*/
- release_hw_id(ctx);
+ for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+ struct intel_engine_cs *engine;
+
+ /*
+ * Check the current active state of this context; if we
+ * are currently executing on the GPU we need to evict
+ * ourselves. On the other hand, if we haven't yet been
+ * submitted to the GPU or if everything is complete,
+ * we have nothing to do.
+ */
+ engine = active_engine(ce);
+
+ /* First attempt to gracefully cancel the context */
+ if (engine && !__cancel_engine(engine))
+ /*
+ * If we are unable to send a preemptive pulse to bump
+ * the context from the GPU, we have to resort to a full
+ * reset. We hope the collateral damage is worth it.
+ */
+ __reset_context(ctx, engine);
+ }
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ i915_gem_context_set_closed(ctx);
+
+ mutex_lock(&ctx->mutex);
+
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ i915_vm_close(vm);
+
+ ctx->file_priv = ERR_PTR(-EBADF);
/*
* The LUT uses the VMA as a backpointer to unref the object,
@@ -398,9 +449,47 @@ static void context_close(struct i915_gem_context *ctx)
lut_close(ctx);
mutex_unlock(&ctx->mutex);
+
+ /*
+ * If the user has disabled hangchecking, we can not be sure that
+ * the batches will ever complete after the context is closed,
+ * keeping the context and all resources pinned forever. So in this
+ * case we opt to forcibly kill off all remaining requests on
+ * context close.
+ */
+ if (!i915_gem_context_is_persistent(ctx) ||
+ !i915_modparams.enable_hangcheck)
+ kill_context(ctx);
+
i915_gem_context_put(ctx);
}
+static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
+{
+ if (i915_gem_context_is_persistent(ctx) == state)
+ return 0;
+
+ if (state) {
+ /*
+ * Only contexts that are short-lived [that will expire or be
+ * reset] are allowed to survive past termination. We require
+ * hangcheck to ensure that the persistent requests are healthy.
+ */
+ if (!i915_modparams.enable_hangcheck)
+ return -EINVAL;
+
+ i915_gem_context_set_persistence(ctx);
+ } else {
+ /* To cancel a context we use "preempt-to-idle" */
+ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ return -ENODEV;
+
+ i915_gem_context_clear_persistence(ctx);
+ }
+
+ return 0;
+}
+
static struct i915_gem_context *
__create_context(struct drm_i915_private *i915)
{
@@ -414,7 +503,6 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &i915->contexts.list);
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
@@ -428,7 +516,6 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -437,10 +524,18 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
+ __context_set_persistence(ctx, true /* cgroup hook? */);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+ ctx->jump_whitelist = NULL;
+ ctx->jump_whitelist_cmds = 0;
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ spin_unlock(&i915->gem.contexts.lock);
+
return ctx;
err_free:
@@ -470,11 +565,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_address_space *old = ctx->vm;
+ struct i915_address_space *old = i915_gem_context_vm(ctx);
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- ctx->vm = i915_vm_get(vm);
+ rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@@ -483,12 +578,12 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
static void __assign_ppgtt(struct i915_gem_context *ctx,
struct i915_address_space *vm)
{
- if (vm == ctx->vm)
+ if (vm == rcu_access_pointer(ctx->vm))
return;
vm = __set_ppgtt(ctx, vm);
if (vm)
- i915_vm_put(vm);
+ i915_vm_close(vm);
}
static void __set_timeline(struct intel_timeline **dst,
@@ -515,27 +610,25 @@ static void __assign_timeline(struct i915_gem_context *ctx,
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_context *ctx;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
- !HAS_EXECLISTS(dev_priv))
+ !HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the most stale context */
- contexts_free_first(dev_priv);
+ /* Reap the stale contexts */
+ contexts_flush_free(&i915->gem.contexts);
- ctx = __create_context(dev_priv);
+ ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
- if (HAS_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -543,14 +636,17 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt);
}
+ mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct intel_timeline *timeline;
- timeline = intel_timeline_create(&dev_priv->gt, NULL);
+ timeline = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
@@ -582,19 +678,13 @@ struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
- int err;
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
- err = i915_gem_context_pin_hw_id(ctx);
- if (err) {
- destroy_kernel_context(&ctx);
- return ERR_PTR(err);
- }
-
i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_persistence(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -602,62 +692,42 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx;
}
-static void init_contexts(struct drm_i915_private *i915)
+static void init_contexts(struct i915_gem_contexts *gc)
{
- mutex_init(&i915->contexts.mutex);
- INIT_LIST_HEAD(&i915->contexts.list);
-
- /* Using the simple ida interface, the max is limited by sizeof(int) */
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&i915->contexts.hw_ida);
- INIT_LIST_HEAD(&i915->contexts.hw_id_list);
+ spin_lock_init(&gc->lock);
+ INIT_LIST_HEAD(&gc->list);
- INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
- init_llist_head(&i915->contexts.free_list);
+ INIT_WORK(&gc->free_work, contexts_free_worker);
+ init_llist_head(&gc->free_list);
}
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_contexts(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
- GEM_BUG_ON(dev_priv->kernel_context);
+ GEM_BUG_ON(i915->kernel_context);
- init_contexts(dev_priv);
+ init_contexts(&i915->gem.contexts);
/* lowest priority; idle task */
- ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
+ ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
return PTR_ERR(ctx);
}
- /*
- * For easy recognisablity, we want the kernel context to be 0 and then
- * all user contexts will have non-zero hw_id. Kernel contexts are
- * permanently pinned, so that we never suffer a stall and can
- * use them from any allocation context (e.g. for evicting other
- * contexts and from inside the shrinker).
- */
- GEM_BUG_ON(ctx->hw_id);
- GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
- dev_priv->kernel_context = ctx;
+ i915->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
return 0;
}
-void i915_gem_contexts_fini(struct drm_i915_private *i915)
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->drm.struct_mutex);
-
destroy_kernel_context(&i915->kernel_context);
-
- /* Must free all deferred contexts (via flush_workqueue) first */
- GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
- ida_destroy(&i915->contexts.hw_ida);
+ flush_work(&i915->gem.contexts.free_work);
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -675,11 +745,16 @@ static int vm_idr_cleanup(int id, void *p, void *data)
static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv)
{
+ struct i915_address_space *vm;
int ret;
ctx->file_priv = fpriv;
- if (ctx->vm)
- ctx->vm->file = fpriv;
+
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->file, fpriv); /* XXX */
+ mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@@ -716,9 +791,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
idr_init(&file_priv->context_idr);
idr_init_base(&file_priv->vm_idr, 1);
- mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0);
- mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
@@ -746,6 +819,7 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = file_priv->dev_priv;
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
@@ -754,6 +828,8 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr);
mutex_destroy(&file_priv->vm_idr_lock);
+
+ contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -846,6 +922,7 @@ struct context_barrier_task {
void *data;
};
+__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@@ -865,20 +942,18 @@ static int context_barrier_task(struct i915_gem_context *ctx,
void (*task)(void *data),
void *data)
{
- struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
struct intel_context *ce;
int err = 0;
- lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!task);
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
- i915_active_init(i915, &cb->base, NULL, cb_retire);
+ i915_active_init(&cb->base, NULL, cb_retire);
err = i915_active_acquire(&cb->base);
if (err) {
kfree(cb);
@@ -910,7 +985,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
- err = i915_active_ref(&cb->base, rq->timeline, rq);
+ err = i915_active_add_request(&cb->base, rq);
i915_request_add(rq);
if (err)
@@ -933,16 +1008,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_address_space *vm;
int ret;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
- /* XXX rcu acquire? */
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
- if (ret)
- return ret;
-
+ rcu_read_lock();
vm = i915_vm_get(ctx->vm);
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret)
@@ -953,7 +1024,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret < 0)
goto err_unlock;
- i915_vm_get(vm);
+ i915_vm_open(vm);
args->size = 0;
args->value = ret;
@@ -973,7 +1044,7 @@ static void set_ppgtt_barrier(void *data)
if (INTEL_GEN(old->i915) < 8)
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
- i915_vm_put(old);
+ i915_vm_close(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
@@ -1003,12 +1074,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
intel_ring_advance(rq, cs);
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ int err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
@@ -1045,34 +1122,34 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (args->size)
return -EINVAL;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
if (upper_32_bits(args->value))
return -ENOENT;
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (err)
- return err;
-
+ rcu_read_lock();
vm = idr_find(&file_priv->vm_idr, args->value);
- if (vm)
- i915_vm_get(vm);
- mutex_unlock(&file_priv->vm_idr_lock);
+ if (vm && !kref_get_unless_zero(&vm->ref))
+ vm = NULL;
+ rcu_read_unlock();
if (!vm)
return -ENOENT;
- err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ err = mutex_lock_interruptible(&ctx->mutex);
if (err)
goto out;
- if (vm == ctx->vm)
+ if (i915_gem_context_is_closed(ctx)) {
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
- mutex_lock(&ctx->mutex);
lut_close(ctx);
- mutex_unlock(&ctx->mutex);
old = __set_ppgtt(ctx, vm);
@@ -1087,13 +1164,12 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
- i915_vm_put(__set_ppgtt(ctx, old));
- i915_vm_put(old);
+ i915_vm_close(__set_ppgtt(ctx, old));
+ i915_vm_close(old);
}
unlock:
- mutex_unlock(&ctx->i915->drm.struct_mutex);
-
+ mutex_unlock(&ctx->mutex);
out:
i915_vm_put(vm);
return err;
@@ -1112,7 +1188,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) +
LRC_STATE_PN * PAGE_SIZE +
- (CTX_R_PWR_CLK_STATE + 1) * 4;
+ CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
@@ -1155,8 +1231,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
}
static int
-__intel_context_reconfigure_sseu(struct intel_context *ce,
- struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
int ret;
@@ -1180,23 +1255,6 @@ unlock:
}
static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
-{
- struct drm_i915_private *i915 = ce->engine->i915;
- int ret;
-
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ret = __intel_context_reconfigure_sseu(ce, sseu);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
-}
-
-static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context)
@@ -1629,7 +1687,7 @@ replace:
i915_gem_context_set_user_engines(ctx);
else
i915_gem_context_clear_user_engines(ctx);
- rcu_swap_protected(ctx->engines, set.engines, 1);
+ set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex);
call_rcu(&set.engines->rcu, free_engines_rcu);
@@ -1738,6 +1796,16 @@ err_free:
return err;
}
+static int
+set_persistence(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ if (args->size)
+ return -EINVAL;
+
+ return __context_set_persistence(ctx, args->value);
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1815,6 +1883,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ ret = set_persistence(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1967,10 +2039,11 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
struct i915_address_space *vm;
+ int err = 0;
rcu_read_lock();
do {
- vm = READ_ONCE(src->vm);
+ vm = rcu_dereference(src->vm);
if (!vm)
break;
@@ -1992,7 +2065,7 @@ static int clone_vm(struct i915_gem_context *dst,
* it cannot be reallocated elsewhere.
*/
- if (vm == READ_ONCE(src->vm))
+ if (vm == rcu_access_pointer(src->vm))
break;
i915_vm_put(vm);
@@ -2000,11 +2073,16 @@ static int clone_vm(struct i915_gem_context *dst,
rcu_read_unlock();
if (vm) {
- __assign_ppgtt(dst, vm);
+ if (!mutex_lock_interruptible(&dst->mutex)) {
+ __assign_ppgtt(dst, vm);
+ mutex_unlock(&dst->mutex);
+ } else {
+ err = -EINTR;
+ }
i915_vm_put(vm);
}
- return 0;
+ return err;
}
static int create_clone(struct i915_user_extension __user *ext, void *data)
@@ -2094,12 +2172,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
ext_data.ctx = i915_gem_create_context(i915, args->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ext_data.ctx))
return PTR_ERR(ext_data.ctx);
@@ -2226,12 +2299,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- if (ctx->vm)
- args->value = ctx->vm->total;
- else if (to_i915(dev)->ggtt.alias)
- args->value = to_i915(dev)->ggtt.alias->vm.total;
+ rcu_read_lock();
+ if (rcu_access_pointer(ctx->vm))
+ args->value = rcu_dereference(ctx->vm)->total;
else
args->value = to_i915(dev)->ggtt.vm.total;
+ rcu_read_unlock();
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@@ -2266,6 +2339,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = get_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ args->size = 0;
+ args->value = i915_gem_context_is_persistent(ctx);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2297,7 +2375,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_reset_stats *args = data;
struct i915_gem_context *ctx;
int ret;
@@ -2319,7 +2397,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
*/
if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ args->reset_count = i915_reset_count(&i915->gpu_error);
else
args->reset_count = 0;
@@ -2332,33 +2410,6 @@ out:
return ret;
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
- int err = 0;
-
- mutex_lock(&i915->contexts.mutex);
-
- GEM_BUG_ON(i915_gem_context_is_closed(ctx));
-
- if (list_empty(&ctx->hw_id_link)) {
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
-
- err = assign_hw_id(i915, &ctx->hw_id);
- if (err)
- goto out_unlock;
-
- list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
- }
-
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
- atomic_inc(&ctx->hw_id_pin_count);
-
-out_unlock:
- mutex_unlock(&i915->contexts.mutex);
- return err;
-}
-
/* GEM context-engines iterator: for_each_gem_engine() */
struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 176978608b6f..18e50a769a6e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -11,7 +11,9 @@
#include "gt/intel_context.h"
+#include "i915_drv.h"
#include "i915_gem.h"
+#include "i915_gem_gtt.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
@@ -74,6 +76,21 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
+static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
+{
+ return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
+{
+ set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
+{
+ clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
@@ -112,19 +129,22 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
-static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+static inline bool
+i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
{
- if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
- return 0;
+ return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
+}
- return __i915_gem_context_pin_hw_id(ctx);
+static inline void
+i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
+{
+ set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
-static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
+static inline void
+i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
{
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
- atomic_dec(&ctx->hw_id_pin_count);
+ clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
@@ -133,8 +153,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
}
/* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
@@ -173,6 +193,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline struct i915_address_space *
+i915_gem_context_vm(struct i915_gem_context *ctx)
+{
+ return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
+}
+
+static inline struct i915_address_space *
+i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm);
+ if (!vm)
+ vm = &ctx->i915->ggtt.vm;
+ vm = i915_vm_get(vm);
+ rcu_read_unlock();
+
+ return vm;
+}
+
static inline struct i915_gem_engines *
i915_gem_context_engines(struct i915_gem_context *ctx)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 260d59cc3de8..3870dd5daaa0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -88,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_address_space *vm;
+ struct i915_address_space __rcu *vm;
/**
* @pid: process id of creator
@@ -137,6 +137,7 @@ struct i915_gem_context {
#define UCONTEXT_NO_ERROR_CAPTURE 1
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
+#define UCONTEXT_PERSISTENCE 4
/**
* @flags: small set of booleans
@@ -146,24 +147,7 @@ struct i915_gem_context {
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
#define CONTEXT_USER_ENGINES 3
-
- /**
- * @hw_id: - unique identifier for the context
- *
- * The hardware needs to uniquely identify the context for a few
- * functions like fault reporting, PASID, scheduling. The
- * &drm_i915_private.context_hw_ida is used to assign a unqiue
- * id for the lifetime of the context.
- *
- * @hw_id_pin_count: - number of times this context had been pinned
- * for use (should be, at most, once per engine).
- *
- * @hw_id_link: - all contexts with an assigned id are tracked
- * for possible repossession.
- */
- unsigned int hw_id;
- atomic_t hw_id_pin_count;
- struct list_head hw_id_link;
+#define CONTEXT_NOPREEMPT 4
struct mutex mutex;
@@ -192,6 +176,13 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
+
+ /** jump_whitelist: Bit array for tracking cmds during cmdparsing
+ * Guarded by struct_mutex
+ */
+ unsigned long *jump_whitelist;
+ /** jump_whitelist_cmds: No of cmd slots available */
+ u32 jump_whitelist_cmds;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 96ce95c8ac5a..eaea49d08eb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -256,6 +256,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ static struct lock_class_key lock_class;
struct dma_buf_attachment *attach;
struct drm_i915_gem_object *obj;
int ret;
@@ -287,7 +288,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 9c58e8fac1d9..9937b4c341f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -27,7 +27,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
- if (!READ_ONCE(obj->pin_global))
+ if (!i915_gem_object_is_framebuffer(obj))
return;
i915_gem_object_lock(obj);
@@ -288,14 +288,21 @@ restart:
if (!drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+ /* Wait for an earlier async bind, need to rewrite it */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
if (ret)
return ret;
}
}
- list_for_each_entry(vma, &obj->vma.list, obj_link)
- vma->node.color = cache_level;
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (i915_vm_has_cache_coloring(vma->vm))
+ vma->node.color = cache_level;
+ }
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
@@ -389,16 +396,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- goto out;
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret == 0) {
ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
out:
i915_gem_object_put(obj);
@@ -422,12 +424,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
assert_object_held(obj);
- /* Mark the global pin early so that we account for the
- * display coherency whilst setting up the cache domains.
- */
- obj->pin_global++;
-
- /* The display engine is not coherent with the LLC cache on gen6. As
+ /*
+ * The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
* chipsets.
@@ -439,12 +437,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(to_i915(obj->base.dev)) ?
I915_CACHE_WT : I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err_unpin_global;
- }
+ if (ret)
+ return ERR_PTR(ret);
- /* As the user may map the buffer once pinned in the display plane
+ /*
+ * As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. However,
* it may simply be too big to fit into mappable, in which case
@@ -461,22 +458,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
if (IS_ERR(vma))
- goto err_unpin_global;
+ return vma;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
__i915_gem_object_flush_for_display(obj);
- /* It should now be out of any other write domains, and we can update
+ /*
+ * It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
-
-err_unpin_global:
- obj->pin_global--;
- return vma;
}
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
@@ -491,6 +485,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
if (!drm_mm_node_allocated(&vma->node))
continue;
+ GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
mutex_unlock(&i915->ggtt.vm.mutex);
@@ -500,7 +495,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (obj->mm.madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED &&
+ !atomic_read(&obj->mm.shrink_pin))
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
@@ -514,12 +510,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
assert_object_held(obj);
- if (WARN_ON(obj->pin_global == 0))
- return;
-
- if (--obj->pin_global == 0)
- vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
-
/* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b5f6937369ea..f0998f1225af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -19,6 +19,7 @@
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -252,6 +253,7 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
+ struct intel_context *ce;
struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
@@ -296,7 +298,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
+ return intel_engine_requires_cmd_parser(eb->engine) ||
+ (intel_engine_using_cmd_parser(eb->engine) &&
+ eb->args->batch_len);
}
static int eb_create(struct i915_execbuffer *eb)
@@ -697,7 +701,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
+ mutex_lock(&eb->context->vm->mutex);
err = i915_gem_evict_vm(eb->context->vm);
+ mutex_unlock(&eb->context->vm->mutex);
if (err)
return err;
break;
@@ -725,7 +731,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->vm)
+ if (rcu_access_pointer(ctx->vm))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
eb->context_flags = 0;
@@ -880,6 +886,9 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
+ if (eb->reloc_cache.ce)
+ intel_context_put(eb->reloc_cache.ce);
+
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -902,7 +911,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
- cache->node.allocated = false;
+ cache->node.flags = 0;
+ cache->ce = NULL;
cache->rq = NULL;
cache->rq_size = 0;
}
@@ -963,11 +973,13 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
@@ -1042,11 +1054,13 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
@@ -1056,7 +1070,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
offset = cache->node.start;
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1145,7 +1159,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
- pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+ pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1168,7 +1182,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_request_create(eb->context);
+ rq = intel_context_create_request(cache->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1239,6 +1253,29 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
+ if (!cache->ce) {
+ struct intel_context *ce;
+
+ /*
+ * The CS pre-parser can pre-fetch commands across
+ * memory sync points and starting gen12 it is able to
+ * pre-fetch across BB_START and BB_END boundaries
+ * (within the same context). We therefore use a
+ * separate context gen12+ to guarantee that the reloc
+ * writes land before the parser gets to the target
+ * memory location.
+ */
+ if (cache->gen >= 12)
+ ce = intel_context_create(eb->context->gem_context,
+ eb->engine);
+ else
+ ce = intel_context_get(eb->context);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ cache->ce = ce;
+ }
+
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@@ -1388,7 +1425,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
- PIN_GLOBAL);
+ PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
return err;
@@ -1955,40 +1992,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
return 0;
}
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
+static struct i915_vma *
+shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = eb->i915;
+ struct i915_vma * const vma = *eb->vma;
+ struct i915_address_space *vm;
+ u64 flags;
+
+ /*
+ * PPGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
+ */
+ if (CMDPARSER_USES_GGTT(dev_priv)) {
+ flags = PIN_GLOBAL;
+ vm = &dev_priv->ggtt.vm;
+ } else if (vma->vm->has_read_only) {
+ flags = PIN_USER;
+ vm = vma->vm;
+ i915_gem_object_set_readonly(obj);
+ } else {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
+}
+
+static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
{
struct intel_engine_pool_node *pool;
struct i915_vma *vma;
+ u64 batch_start;
+ u64 shadow_batch_start;
int err;
- pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+ pool = intel_engine_get_pool(eb->engine, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
- err = intel_engine_cmd_parser(eb->engine,
+ vma = shadow_batch_pin(eb, pool->obj);
+ if (IS_ERR(vma))
+ goto err;
+
+ batch_start = gen8_canonical_addr(eb->batch->node.start) +
+ eb->batch_start_offset;
+
+ shadow_batch_start = gen8_canonical_addr(vma->node.start);
+
+ err = intel_engine_cmd_parser(eb->gem_context,
+ eb->engine,
eb->batch->obj,
- pool->obj,
+ batch_start,
eb->batch_start_offset,
eb->batch_len,
- is_master);
+ pool->obj,
+ shadow_batch_start);
+
if (err) {
- if (err == -EACCES) /* unhandled chained batch */
+ i915_vma_unpin(vma);
+
+ /*
+ * Unsafe GGTT-backed buffers can still be submitted safely
+ * as non-secure.
+ * For PPGTT backing however, we have no choice but to forcibly
+ * reject unsafe buffers
+ */
+ if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
+ /* Execute original buffer non-secure */
vma = NULL;
else
vma = ERR_PTR(err);
goto err;
}
- vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
- goto err;
-
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
+ eb->batch_start_offset = 0;
+ eb->batch = vma;
+
+ if (CMDPARSER_USES_GGTT(eb->i915))
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+
+ /* eb->batch_len unchanged */
+
vma->private = pool;
return vma;
@@ -2043,6 +2134,9 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
+ if (i915_gem_context_nopreempt(eb->gem_context))
+ eb->request->flags |= I915_REQUEST_NOPREEMPT;
+
return 0;
}
@@ -2112,35 +2206,6 @@ static struct i915_request *eb_throttle(struct intel_context *ce)
return i915_request_get(rq);
}
-static int
-__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- int err;
-
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
- return 0;
-
- err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
- if (err)
- return err;
-
- err = __intel_context_do_pin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- return err;
-}
-
-static void
-__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
- return;
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- intel_context_unpin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-}
-
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_timeline *tl;
@@ -2160,7 +2225,7 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = __eb_pin_context(eb, ce);
+ err = intel_context_pin(ce);
if (err)
return err;
@@ -2204,7 +2269,7 @@ err_exit:
intel_context_exit(ce);
intel_context_timeline_unlock(tl);
err_unpin:
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
return err;
}
@@ -2217,7 +2282,7 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
intel_context_exit(ce);
mutex_unlock(&tl->mutex);
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
}
static unsigned int
@@ -2421,6 +2486,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec,
struct drm_syncobj **fences)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
@@ -2432,7 +2498,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS);
- eb.i915 = to_i915(dev);
+ eb.i915 = i915;
eb.file = file;
eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
@@ -2452,8 +2518,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
+ if (INTEL_GEN(i915) >= 11)
+ return -ENODEV;
+
+ /* Return -EPERM to trigger fallback code on old binaries. */
+ if (!HAS_SECURE_BATCHES(i915))
+ return -EPERM;
+
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return -EPERM;
eb.batch_flags |= I915_DISPATCH_SECURE;
}
@@ -2530,34 +2603,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
+ if (eb.batch_len == 0)
+ eb.batch_len = eb.batch->size - eb.batch_start_offset;
+
if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma;
- vma = eb_parse(&eb, drm_is_current_master(file));
+ vma = eb_parse(&eb);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_vma;
}
-
- if (vma) {
- /*
- * Batch parsed and accepted:
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in
- * the dispatch_execbuffer implementations. We
- * specifically don't want that set on batches the
- * command parser has accepted.
- */
- eb.batch_flags |= I915_DISPATCH_SECURE;
- eb.batch_start_offset = 0;
- eb.batch = vma;
- }
}
- if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
-
/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..9cfb0e41ff06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ create_st:
goto err;
}
- /* Mark the pages as dontneed whilst they are still pinned. As soon
- * as they are unpinned they are allowed to be reaped by the shrinker,
- * and the caller is expected to repopulate - the contents of this
- * object are only valid whilst active and pinned.
- */
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
internal_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -172,6 +164,7 @@ struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
phys_addr_t size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -186,7 +179,16 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
+
+ /*
+ * Mark the object as volatile, such that the pages are marked as
+ * dontneed whilst they are still pinned. As soon as they are unpinned
+ * they are allowed to be reaped by the shrinker, and the caller is
+ * expected to repopulate - the contents of this object are only valid
+ * whilst active and pinned.
+ */
+ i915_gem_object_set_volatile(obj);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index 000000000000..0e2bf6b7e143
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+/* XXX: Time to vfunc your life up? */
+void __iomem *
+i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ size, flags);
+}
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index 000000000000..7c176b8b7d2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags);
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 05289edbafe3..e3002849844b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -8,6 +8,7 @@
#include <linux/sizes.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_gem_gtt.h"
@@ -249,16 +250,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_rpm;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err_reset;
-
- /* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
- ret = -EFAULT;
- goto err_unlock;
- }
-
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
@@ -285,10 +276,19 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
}
+
+ /* The entire mappable GGTT is pinned? Unexpected! */
+ GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err_unlock;
+ goto err_reset;
+ }
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ ret = -EFAULT;
+ goto err_unpin;
}
ret = i915_vma_pin_fence(vma);
@@ -312,7 +312,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
- if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -326,8 +326,6 @@ err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
err_reset:
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
@@ -335,23 +333,20 @@ err_rpm:
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
- case -EIO:
- /*
- * We eat errors when the gpu is terminally wedged to avoid
- * userspace unduly crashing (gl has no provisions for mmaps to
- * fail). But any other -EIO isn't ours (e.g. swap in failure)
- * and so needs to be reported.
- */
- if (!intel_gt_is_wedged(ggtt->vm.gt))
- return VM_FAULT_SIGBUS;
- /* else, fall through */
- case -EAGAIN:
- /*
- * EAGAIN means the gpu is hung and we'll wait for the error
- * handler to reset everything when re-faulting in
- * i915_mutex_lock_interruptible.
- */
+ default:
+ WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+ /* fallthrough */
+ case -EIO: /* shmemfs failure from swap device */
+ case -EFAULT: /* purged object */
+ case -ENODEV: /* bad object, how did you get here! */
+ return VM_FAULT_SIGBUS;
+
+ case -ENOSPC: /* shmemfs allocation failure */
+ case -ENOMEM: /* our allocation failure */
+ return VM_FAULT_OOM;
+
case 0:
+ case -EAGAIN:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@@ -360,15 +355,6 @@ err:
* already did the job.
*/
return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -ENOSPC:
- case -EFAULT:
- case -ENODEV: /* bad object, how did you get here! */
- return VM_FAULT_SIGBUS;
- default:
- WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
- return VM_FAULT_SIGBUS;
}
}
@@ -439,6 +425,7 @@ out:
static int create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = &i915->gt;
int err;
err = drm_gem_create_mmap_offset(&obj->base);
@@ -446,21 +433,12 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
return 0;
/* Attempt to reap some mmap space from dead objects */
- do {
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- break;
-
- i915_gem_drain_freed_objects(i915);
- err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
- break;
-
- } while (flush_delayed_work(&i915->gem.retire_work));
+ err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
- return err;
+ i915_gem_drain_freed_objects(i915);
+ return drm_gem_create_mmap_offset(&obj->base);
}
int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d7855dc5a5c5..a50296cce0d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -47,9 +47,10 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops)
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key)
{
- mutex_init(&obj->mm.lock);
+ __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@@ -155,21 +156,30 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
-
trace_i915_gem_object_destroy(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_destroy(vma);
+ if (!list_empty(&obj->vma.list)) {
+ struct i915_vma *vma;
+
+ /*
+ * Note that the vma keeps an object reference while
+ * it is active, so it *should* not sleep while we
+ * destroy it. Our debug code errs insits it *might*.
+ * For the moment, play along.
+ */
+ spin_lock(&obj->vma.lock);
+ while ((vma = list_first_entry_or_null(&obj->vma.list,
+ struct i915_vma,
+ obj_link))) {
+ GEM_BUG_ON(vma->obj != obj);
+ spin_unlock(&obj->vma.lock);
+
+ i915_vma_destroy(vma);
+
+ spin_lock(&obj->vma.lock);
+ }
+ spin_unlock(&obj->vma.lock);
}
- GEM_BUG_ON(!list_empty(&obj->vma.list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
-
- mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index ddf3605bea8e..458cd51331f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -23,12 +23,14 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops);
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key);
struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size);
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
- const void *data, size_t size);
+ const void *data, resource_size_t size);
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
@@ -106,6 +108,11 @@ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
dma_resv_lock(obj->base.resv, NULL);
}
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+ return dma_resv_trylock(obj->base.resv);
+}
+
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
@@ -135,33 +142,58 @@ i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
+}
+
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
+static inline void
+i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
+{
+ obj->flags |= I915_BO_ALLOC_VOLATILE;
+}
+
+static inline bool
+i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
+ unsigned long flags)
+{
+ return obj->ops->flags & flags;
+}
+
+static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
}
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
}
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
}
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
}
static inline bool
@@ -412,7 +444,8 @@ static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
- return obj->pin_global; /* currently in use by HW, keep flushed */
+ /* Currently in use by HW (display engine)? Keep flushed. */
+ return i915_gem_object_is_framebuffer(obj);
}
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -429,6 +462,5 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 6415f9a17e2d..70809d8897cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -8,6 +8,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
@@ -16,7 +17,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
u32 value)
{
struct drm_i915_private *i915 = ce->vm->i915;
- const u32 block_size = S16_MAX * PAGE_SIZE;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 offset;
@@ -29,10 +30,10 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
- count = div_u64(vma->size, block_size);
+ count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_pool_get(&ce->engine->pool, size);
+ pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -200,7 +201,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *dst)
{
struct drm_i915_private *i915 = ce->vm->i915;
- const u32 block_size = S16_MAX * PAGE_SIZE;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
@@ -213,10 +214,10 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
- count = div_u64(dst->size, block_size);
+ count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_pool_get(&ce->engine->pool, size);
+ pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 646859fea224..96008374a412 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -8,6 +8,7 @@
#define __I915_GEM_OBJECT_TYPES_H__
#include <drm/drm_gem.h>
+#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
#include "i915_selftest.h"
@@ -30,10 +31,11 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_NO_GGTT BIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -118,6 +120,11 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(struct list_head st_link);
+ unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_VOLATILE BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
@@ -153,17 +160,30 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- /** Count of how many global VMA are currently pinned for use by HW */
- unsigned int pin_global;
struct {
struct mutex lock; /* protects the pages and their use */
atomic_t pages_pin_count;
+ atomic_t shrink_pin;
+
+ /**
+ * Memory region for this object.
+ */
+ struct intel_memory_region *region;
+ /**
+ * List of memory region blocks allocated for this object.
+ */
+ struct list_head blocks;
+ /**
+ * Element within memory_region->objects or region->purgeable
+ * if the object is marked as DONTNEED. Access is protected by
+ * region->obj_lock.
+ */
+ struct list_head region_link;
struct sg_table *pages;
void *mapping;
- /* TODO: whack some of this into the error state */
struct i915_page_sizes {
/**
* The sg mask of the pages sg_table. i.e the mask of
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 18f0ce0135c1..29f4c2850745 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -18,6 +19,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_DONTNEED;
+
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
obj->write_domain = 0;
@@ -71,6 +75,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
list = &i915->mm.shrink_list;
list_add_tail(&obj->mm.link, list);
+ atomic_set(&obj->mm.shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
@@ -150,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
+static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
+{
+ if (i915_gem_object_is_lmem(obj))
+ io_mapping_unmap((void __force __iomem *)ptr);
+ else if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -159,17 +174,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
if (IS_ERR_OR_NULL(pages))
return pages;
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_WILLNEED;
+
i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
- void *ptr;
-
- ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
+ unmap_object(obj, page_mask_bits(obj->mm.mapping));
obj->mm.mapping = NULL;
}
@@ -224,7 +235,7 @@ unlock:
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
@@ -237,6 +248,16 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
pgprot_t pgprot;
void *addr;
+ if (i915_gem_object_is_lmem(obj)) {
+ void __iomem *io;
+
+ if (type != I915_MAP_WC)
+ return NULL;
+
+ io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+ return (void __force *)io;
+ }
+
/* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
@@ -278,11 +299,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
+ unsigned int flags;
bool pinned;
void *ptr;
int err;
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+ if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock);
@@ -314,10 +337,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin;
}
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
+ unmap_object(obj, ptr);
ptr = obj->mm.mapping = NULL;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_gem_region.h"
#include "i915_scatterlist.h"
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages))
+ if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+ i915_gem_object_release_memory_region(obj);
+ }
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index ad2a63dbcac2..f88ee1317bb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -7,138 +7,9 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
-#include "i915_globals.h"
-
-static void call_idle_barriers(struct intel_engine_cs *engine)
-{
- struct llist_node *node, *next;
-
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
- struct i915_active_request *active =
- container_of((struct list_head *)node,
- typeof(*active), link);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, NULL);
- }
-}
-
-static void i915_gem_park(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id)
- call_idle_barriers(engine); /* cleanup after wedging */
-
- i915_vma_parked(i915);
-
- i915_globals_park();
-}
-
-static void idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.idle_work);
- bool park;
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- mutex_lock(&i915->drm.struct_mutex);
-
- intel_wakeref_lock(&i915->gt.wakeref);
- park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
- !work_pending(work));
- intel_wakeref_unlock(&i915->gt.wakeref);
- if (park)
- i915_gem_park(i915);
- else
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
-static void retire_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.retire_work.work);
-
- /* Come back later if the device is busy... */
- if (mutex_trylock(&i915->drm.struct_mutex)) {
- i915_retire_requests(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
-
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-static int pm_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct drm_i915_private *i915 =
- container_of(nb, typeof(*i915), gem.pm_notifier);
-
- switch (action) {
- case INTEL_GT_UNPARK:
- i915_globals_unpark();
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
- break;
-
- case INTEL_GT_PARK:
- queue_work(i915->wq, &i915->gem.idle_work);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static bool switch_to_kernel_context_sync(struct intel_gt *gt)
-{
- bool result = !intel_gt_is_wedged(gt);
-
- do {
- if (i915_gem_wait_for_idle(gt->i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- /* XXX hide warning from gem_eio */
- if (i915_modparams.reset) {
- dev_err(gt->i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /*
- * Forcibly cancel outstanding work and leave
- * the gpu quiet.
- */
- intel_gt_set_wedged(gt);
- result = false;
- }
- } while (i915_retire_requests(gt->i915) && result);
-
- if (intel_gt_pm_wait_for_idle(gt))
- result = false;
-
- return result;
-}
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915)
-{
- return switch_to_kernel_context_sync(&i915->gt);
-}
void i915_gem_suspend(struct drm_i915_private *i915)
{
@@ -147,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
- mutex_lock(&i915->drm.struct_mutex);
-
/*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
@@ -158,15 +27,9 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- switch_to_kernel_context_sync(&i915->gt);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- cancel_delayed_work_sync(&i915->gt.hangcheck.work);
+ intel_gt_suspend_prepare(&i915->gt);
i915_gem_drain_freed_objects(i915);
-
- intel_uc_suspend(&i915->gt.uc);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@@ -206,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
+ intel_gt_suspend_late(&i915->gt);
+
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
LIST_HEAD(keep);
@@ -230,18 +95,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
list_splice_tail(&keep, *phase);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
- i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
- mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- if (i915_gem_init_hw(i915))
+ if (intel_gt_init_hw(&i915->gt))
goto err_wedged;
/*
@@ -252,15 +114,8 @@ void i915_gem_resume(struct drm_i915_private *i915)
if (intel_gt_resume(&i915->gt))
goto err_wedged;
- intel_uc_resume(&i915->gt.uc);
-
- /* Always reload a context for powersaving. */
- if (!i915_gem_load_power_context(i915))
- goto err_wedged;
-
out_unlock:
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
@@ -271,13 +126,3 @@ err_wedged:
}
goto out_unlock;
}
-
-void i915_gem_init__pm(struct drm_i915_private *i915)
-{
- INIT_WORK(&i915->gem.idle_work, idle_work_handler);
- INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
-
- i915->gem.pm_notifier.notifier_call = pm_notifier;
- blocking_notifier_chain_register(&i915->gt.pm_notifications,
- &i915->gem.pm_notifier);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 6f7d5d11ac3b..26b78dbdc225 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -12,9 +12,6 @@
struct drm_i915_private;
struct work_struct;
-void i915_gem_init__pm(struct drm_i915_private *i915);
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915);
void i915_gem_resume(struct drm_i915_private *i915);
void i915_gem_idle_work_handler(struct work_struct *work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index 000000000000..2f7bcfb9c964
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
+
+ obj->mm.dirty = false;
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+ struct list_head *blocks = &obj->mm.blocks;
+ resource_size_t size = obj->base.size;
+ resource_size_t prev_end;
+ struct i915_buddy_block *block;
+ unsigned int flags;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ int ret;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ flags |= I915_ALLOC_CONTIGUOUS;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+ if (ret)
+ goto err_free_sg;
+
+ GEM_BUG_ON(list_empty(blocks));
+
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ prev_end = (resource_size_t)-1;
+
+ list_for_each_entry(block, blocks, link) {
+ u64 block_size, offset;
+
+ block_size = min_t(u64, size,
+ i915_buddy_block_size(&mem->mm, block));
+ offset = i915_buddy_block_offset(block);
+
+ GEM_BUG_ON(overflows_type(block_size, sg->length));
+
+ if (offset != prev_end ||
+ add_overflows_t(typeof(sg->length), sg->length, block_size)) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = block_size;
+
+ sg->length = block_size;
+
+ st->nents++;
+ } else {
+ sg->length += block_size;
+ sg_dma_len(sg) += block_size;
+ }
+
+ prev_end = offset + block_size;
+ };
+
+ sg_page_sizes |= sg->length;
+ sg_mark_end(sg);
+ i915_sg_trim(st);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err_free_sg:
+ sg_free_table(st);
+ kfree(st);
+ return ret;
+}
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&obj->mm.blocks);
+ obj->mm.region = intel_memory_region_get(mem);
+ obj->flags |= flags;
+
+ mutex_lock(&mem->objects.lock);
+
+ if (obj->flags & I915_BO_ALLOC_VOLATILE)
+ list_add(&obj->mm.region_link, &mem->objects.purgeable);
+ else
+ list_add(&obj->mm.region_link, &mem->objects.list);
+
+ mutex_unlock(&mem->objects.lock);
+}
+
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+
+ mutex_lock(&mem->objects.lock);
+ list_del(&obj->mm.region_link);
+ mutex_unlock(&mem->objects.lock);
+
+ intel_memory_region_put(mem);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * NB: Our use of resource_size_t for the size stems from using struct
+ * resource for the mem->region. We might need to revisit this in the
+ * future.
+ */
+
+ GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
+ if (!mem)
+ return ERR_PTR(-ENODEV);
+
+ size = round_up(size, mem->min_page_size);
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
+
+ /*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ */
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = mem->ops->create_object(mem, size, flags);
+ if (!IS_ERR(obj))
+ trace_i915_gem_object_create(obj);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
new file mode 100644
index 000000000000..f2ff6f8bff74
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_REGION_H__
+#define __I915_GEM_REGION_H__
+
+#include <linux/types.h>
+
+struct intel_memory_region;
+struct drm_i915_gem_object;
+struct sg_table;
+
+int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
+void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags);
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..4d69c3fc3439 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
#include <linux/pagevec.h>
#include <linux/swap.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
+#include "i915_gemfs.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
static int shmem_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (page_count > totalram_pages())
+ if (obj->base.size > resource_size(&mem->region))
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
static void shmem_release(struct drm_i915_gem_object *obj)
{
+ i915_gem_object_release_memory_region(obj);
+
fput(obj->base.filp);
}
@@ -434,9 +439,9 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.release = shmem_release,
};
-static int create_shmem(struct drm_i915_private *i915,
- struct drm_gem_object *obj,
- size_t size)
+static int __create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ resource_size_t size)
{
unsigned long flags = VM_NORESERVE;
struct file *filp;
@@ -455,31 +460,24 @@ static int create_shmem(struct drm_i915_private *i915,
return 0;
}
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
+static struct drm_i915_gem_object *
+create_shmem(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- /* There is a prevalence of the assumption that we fit the object's
- * page count inside a 32bit _signed_ variable. Let's document this and
- * catch if we ever need to fix it. In the meantime, if you do spot
- * such a local variable, please consider fixing!
- */
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
- ret = create_shmem(i915, &obj->base, size);
+ ret = __create_shmem(i915, &obj->base, size);
if (ret)
goto fail;
@@ -494,7 +492,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -518,7 +516,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
i915_gem_object_set_cache_coherency(obj, cache_level);
- trace_i915_gem_object_create(obj);
+ i915_gem_object_init_memory_region(obj, mem, 0);
return obj;
@@ -527,14 +525,22 @@ fail:
return ERR_PTR(ret);
}
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+ size, 0);
+}
+
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size)
+ const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct file *file;
- size_t offset;
+ resource_size_t offset;
int err;
obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
@@ -577,3 +583,35 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
+
+static int init_shmem(struct intel_memory_region *mem)
+{
+ int err;
+
+ err = i915_gemfs_init(mem->i915);
+ if (err) {
+ DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
+ err);
+ }
+
+ return 0; /* Don't error, we can simply fallback to the kernel mnt */
+}
+
+static void release_shmem(struct intel_memory_region *mem)
+{
+ i915_gemfs_fini(mem->i915);
+}
+
+static const struct intel_memory_region_ops shmem_region_ops = {
+ .init = init_shmem,
+ .release = release_shmem,
+ .create_object = create_shmem,
+};
+
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915, 0,
+ totalram_pages() << PAGE_SHIFT,
+ PAGE_SIZE, 0,
+ &shmem_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index edd21d14e64f..f2418a1cfe68 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -16,40 +16,6 @@
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *i915,
- unsigned int flags,
- bool *unlock)
-{
- struct mutex *m = &i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(m)) {
- case MUTEX_TRYLOCK_RECURSIVE:
- *unlock = false;
- return true;
-
- case MUTEX_TRYLOCK_FAILED:
- *unlock = false;
- if (flags & I915_SHRINK_ACTIVE &&
- mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
- *unlock = true;
- return *unlock;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
- }
-
- BUG();
-}
-
-static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
-{
- if (!unlock)
- return;
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@@ -61,7 +27,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (!i915_gem_object_is_shrinkable(obj))
return false;
- /* Only report true if by unbinding the object and putting its pages
+ /*
+ * Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
*
@@ -72,16 +39,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
return false;
- /* If any vma are "permanently" pinned, it will prevent us from
- * reclaiming the obj->mm.pages. We only allow scanout objects to claim
- * a permanent pin, along with a few others like the context objects.
- * To simplify the scan, and to avoid walking the list of vma under the
- * object, we just check the count of its permanently pinned.
- */
- if (READ_ONCE(obj->pin_global))
- return false;
-
- /* We can only return physical pages to the system if we can either
+ /*
+ * We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
@@ -162,10 +121,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- bool unlock;
-
- if (!shrinker_lock(i915, shrink, &unlock))
- return 0;
/*
* When shrinking the active list, we should also consider active
@@ -275,8 +230,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- shrinker_unlock(i915, unlock);
-
if (nr_scanned)
*nr_scanned += scanned;
return count;
@@ -346,19 +299,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
- bool unlock;
sc->nr_scanned = 0;
- if (!shrinker_lock(i915, 0, &unlock))
- return SHRINK_STOP;
-
freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_WRITEBACK);
+ I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
@@ -373,8 +321,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
- shrinker_unlock(i915, unlock);
-
return sc->nr_scanned ? freed : SHRINK_STOP;
}
@@ -391,6 +337,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@@ -426,10 +373,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
- bool unlock;
-
- if (!shrinker_lock(i915, 0, &unlock))
- return NOTIFY_DONE;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@@ -446,15 +389,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!vma->iomap || i915_vma_is_active(vma))
continue;
- mutex_unlock(&i915->ggtt.vm.mutex);
- if (i915_vma_unbind(vma) == 0)
+ if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
- mutex_lock(&i915->ggtt.vm.mutex);
}
mutex_unlock(&i915->ggtt.vm.mutex);
- shrinker_unlock(i915, unlock);
-
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
@@ -497,72 +436,65 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
fs_reclaim_acquire(GFP_KERNEL);
- /*
- * As we invariably rely on the struct_mutex within the shrinker,
- * but have a complicated recursion dance, taint all the mutexes used
- * within the shrinker with the struct_mutex. For completeness, we
- * taint with all subclass of struct_mutex, even though we should
- * only need tainting by I915_MM_NORMAL to catch possible ABBA
- * deadlocks from using struct_mutex inside @mutex.
- */
- mutex_acquire(&i915->drm.struct_mutex.dep_map,
- I915_MM_SHRINKER, 0, _RET_IP_);
-
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
- mutex_release(&mutex->dep_map, 0, _RET_IP_);
-
- mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+ mutex_release(&mutex->dep_map, _RET_IP_);
fs_reclaim_release(GFP_KERNEL);
if (unlock)
- mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+ mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
}
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
/*
* We can only be called while the pages are pinned or when
* the pages are released. If pinned, we should only be called
* from a single caller under controlled conditions; and on release
* only one caller may release us. Neither the two may cross.
*/
- if (!list_empty(&obj->mm.link)) { /* pinned by caller */
- struct drm_i915_private *i915 = obj_to_i915(obj);
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- GEM_BUG_ON(list_empty(&obj->mm.link));
+ if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
+ return;
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
+ !list_empty(&obj->mm.link)) {
list_del_init(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
struct list_head *head)
{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- GEM_BUG_ON(!list_empty(&obj->mm.link));
+ if (!i915_gem_object_is_shrinkable(obj))
+ return;
- if (i915_gem_object_is_shrinkable(obj)) {
- struct drm_i915_private *i915 = obj_to_i915(obj);
- unsigned long flags;
+ if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
+ return;
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- GEM_BUG_ON(!kref_read(&obj->base.refcount));
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(!kref_read(&obj->base.refcount));
+ if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
list_add_tail(&obj->mm.link, head);
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index aa533b4ab5f5..a2d49c04e6a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
{
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -425,8 +426,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
- case 11:
default:
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ /* fall-through */
+ case 11:
+ case 12:
icl_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@@ -536,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+ if (obj->mm.region)
+ i915_gem_object_release_memory_region(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -545,65 +552,116 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *stolen,
+ struct intel_memory_region *mem)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
+ int err = -ENOMEM;
obj = i915_gem_object_alloc();
- if (obj == NULL)
- return NULL;
+ if (!obj)
+ goto err;
drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- if (i915_gem_object_pin_pages(obj))
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
goto cleanup;
+ if (mem)
+ i915_gem_object_init_memory_region(obj, mem, 0);
+
return obj;
cleanup:
i915_gem_object_free(obj);
- return NULL;
+err:
+ return ERR_PTR(err);
}
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
+ return ERR_PTR(-ENODEV);
if (size == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
- kfree(stolen);
- return NULL;
+ obj = ERR_PTR(ret);
+ goto err_free;
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj)
- return obj;
+ obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
+ if (IS_ERR(obj))
+ goto err_remove;
+ return obj;
+
+err_remove:
i915_gem_stolen_remove_node(dev_priv, stolen);
+err_free:
kfree(stolen);
- return NULL;
+ return obj;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+ size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+ /*
+ * Initialise stolen early so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+ i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+ .init = init_stolen,
+ .release = release_stolen,
+ .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ resource_size(&intel_graphics_stolen_res),
+ PAGE_SIZE, 0,
+ &i915_region_stolen_ops);
}
struct drm_i915_gem_object *
@@ -619,9 +677,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, &gtt_offset, &size);
@@ -630,11 +686,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
stolen->start = stolen_offset;
stolen->size = size;
@@ -644,15 +700,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
- return NULL;
+ return ERR_PTR(ret);
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj == NULL) {
+ obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL);
+ if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
- return NULL;
+ return obj;
}
/* Some objects just need physical mem from stolen space */
@@ -674,22 +730,26 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
+ mutex_lock(&ggtt->vm.mutex);
ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
+ mutex_unlock(&ggtt->vm.mutex);
goto err_pages;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->pages);
vma->pages = obj->mm.pages;
- vma->flags |= I915_VMA_GLOBAL_BIND;
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+
+ set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
__i915_vma_set_map_and_fenceable(vma);
- mutex_lock(&ggtt->vm.mutex);
- list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
+ list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
@@ -701,5 +761,5 @@ err_pages:
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
- return NULL;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 2289644d8604..c1040627fbf3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -21,8 +21,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 1e372420771b..540ef0551789 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- if (target) {
+ if (target && xchg(&target->file_priv, NULL))
list_del(&target->client_link);
- target->file_priv = NULL;
- }
target = request;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ca0c2f451742..1fa592d82af5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -181,22 +181,25 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma;
- int ret;
+ int ret = 0;
if (tiling_mode == I915_TILING_NONE)
return 0;
+ mutex_lock(&ggtt->vm.mutex);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
if (ret)
- return ret;
+ break;
}
+ mutex_unlock(&ggtt->vm.mutex);
- return 0;
+ return ret;
}
int
@@ -212,7 +215,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
- lockdep_assert_held(&i915->drm.struct_mutex);
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
@@ -233,16 +235,18 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* whilst executing a fenced command for an untiled object.
*/
- err = i915_gem_object_fence_prepare(obj, tiling, stride);
- if (err)
- return err;
-
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
+ err = i915_gem_object_fence_prepare(obj, tiling, stride);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ return err;
+ }
+
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@@ -313,10 +317,14 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -340,9 +348,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
else
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -364,12 +372,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
}
}
- err = mutex_lock_interruptible(&dev->struct_mutex);
- if (err)
- goto err;
-
err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
- mutex_unlock(&dev->struct_mutex);
/* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
@@ -402,6 +405,9 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (obj) {
@@ -415,10 +421,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 6b3b50f0f6d9..4c72d74d6576 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -92,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
- struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
@@ -129,33 +128,13 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
- if (!unlock) {
- unlock = &mn->mm->i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(unlock)) {
- default:
- case MUTEX_TRYLOCK_FAILED:
- if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
- i915_gem_object_put(obj);
- return -EINTR;
- }
- /* fall through */
- case MUTEX_TRYLOCK_SUCCESS:
- break;
-
- case MUTEX_TRYLOCK_RECURSIVE:
- unlock = ERR_PTR(-EEXIST);
- break;
- }
- }
-
ret = i915_gem_object_unbind(obj,
I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
- goto unlock;
+ return ret;
spin_lock(&mn->lock);
@@ -168,10 +147,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
-unlock:
- if (!IS_ERR_OR_NULL(unlock))
- mutex_unlock(unlock);
-
return ret;
}
@@ -671,8 +646,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
obj->mm.dirty = false;
for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
+ if (obj->mm.dirty && trylock_page(page)) {
+ /*
+ * As this may not be anonymous memory (e.g. shmem)
+ * but exist on a real mapping, we have to lock
+ * the page in order to dirty it -- holding
+ * the page reference is not sufficient to
+ * prevent the inode from being truncated.
+ * Play safe and take the lock.
+ *
+ * However...!
+ *
+ * The mmu-notifier can be invalidated for a
+ * migrate_page, that is alreadying holding the lock
+ * on the page. Such a try_to_unmap() will result
+ * in us calling put_pages() and so recursively try
+ * to lock the page. We avoid that deadlock with
+ * a trylock_page() and in exchange we risk missing
+ * some page dirtying.
+ */
set_page_dirty(page);
+ unlock_page(page);
+ }
mark_page_accessed(page);
put_page(page);
@@ -750,6 +745,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
@@ -783,7 +779,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- vm = dev_priv->kernel_context->vm;
+ vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
+ true); /* static vm */
if (!vm || !vm->has_read_only)
return -ENODEV;
}
@@ -793,7 +790,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 3c5d17b2b670..892d12db6c49 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -96,6 +96,7 @@ huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
dma_addr_t dma_size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -111,7 +112,7 @@ huge_gem_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops);
+ i915_gem_object_init(obj, &huge_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 8de83c6d81f5..688c49a24f32 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -8,6 +8,8 @@
#include "i915_selftest.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
@@ -17,6 +19,7 @@
#include "selftests/mock_drm.h"
#include "selftests/mock_gem_device.h"
+#include "selftests/mock_region.h"
#include "selftests/i915_random.h"
static const unsigned int page_sizes[] = {
@@ -113,8 +116,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
- obj->mm.madv = I915_MADV_DONTNEED;
-
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
@@ -135,7 +136,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
huge_pages_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -150,6 +150,7 @@ huge_pages_object(struct drm_i915_private *i915,
u64 size,
unsigned int page_mask)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -166,7 +167,9 @@ huge_pages_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops);
+ i915_gem_object_init(obj, &huge_page_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -227,8 +230,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -261,8 +262,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
sg_dma_address(sg) = page_size;
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
@@ -281,7 +280,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
{
fake_free_huge_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -299,6 +297,7 @@ static const struct drm_i915_gem_object_ops fake_ops_single = {
static struct drm_i915_gem_object *
fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -317,9 +316,11 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
if (single)
- i915_gem_object_init(obj, &fake_ops_single);
+ i915_gem_object_init(obj, &fake_ops_single, &lock_class);
else
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -333,7 +334,12 @@ static int igt_check_page_sizes(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
- int err = 0;
+ int err;
+
+ /* We have to wait for the async bind to complete before our asserts */
+ err = i915_vma_sync(vma);
+ if (err)
+ return err;
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
@@ -447,6 +453,88 @@ out_device:
return err;
}
+static int igt_mock_memory_region_huge_pages(void *arg)
+{
+ const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
+ struct i915_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct intel_memory_region *mem;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int bit;
+ int err = 0;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("%s failed to create memory region\n", __func__);
+ return PTR_ERR(mem);
+ }
+
+ for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ unsigned int page_size = BIT(bit);
+ resource_size_t phys;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(flags); ++i) {
+ obj = i915_gem_object_create_region(mem, page_size,
+ flags[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_region;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ phys = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(phys, page_size)) {
+ pr_err("%s addr misaligned(%pa) page_size=%u\n",
+ __func__, &phys, page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("%s page_sizes.gtt=%u, expected=%u\n",
+ __func__, vma->page_sizes.gtt,
+ page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ goto out_region;
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_region:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
@@ -879,9 +967,8 @@ out_object_put:
return err;
}
-static int gpu_write(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int gpu_write(struct intel_context *ce,
+ struct i915_vma *vma,
u32 dw,
u32 val)
{
@@ -893,11 +980,12 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
- return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
+ return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
vma->size >> PAGE_SHIFT, val);
}
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int
+__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
unsigned int needs_flush;
unsigned long n;
@@ -929,18 +1017,61 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
-static int __igt_write_huge(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 __iomem *base;
+ u32 read_val;
+
+ base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+ read_val = ioread32(base + dword);
+ io_mapping_unmap_atomic(base);
+ if (read_val != val) {
+ pr_err("n=%lu base[%u]=%u, val=%u\n",
+ n, dword, read_val, val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ if (i915_gem_object_has_struct_page(obj))
+ return __cpu_check_shmem(obj, dword, val);
+ else if (i915_gem_object_is_lmem(obj))
+ return __cpu_check_lmem(obj, dword, val);
+
+ return -ENODEV;
+}
+
+static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -954,7 +1085,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
* The ggtt may have some pages reserved so
* refrain from erroring out.
*/
- if (err == -ENOSPC && i915_is_ggtt(vm))
+ if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
goto out_vma_close;
@@ -964,7 +1095,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
if (err)
goto out_vma_unpin;
- err = gpu_write(vma, ctx, engine, dword, val);
+ err = gpu_write(ce, vma, dword, val);
if (err) {
pr_err("gpu-write failed at offset=%llx\n", offset);
goto out_vma_unpin;
@@ -987,14 +1118,13 @@ out_vma_close:
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
- static struct intel_engine_cs *engines[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
- unsigned int id;
+ unsigned int count;
u64 max;
u64 num;
u64 size;
@@ -1008,19 +1138,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
- max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
- max = div_u64((vm->total - size), max_page_size);
-
n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n",
- id);
+ count = 0;
+ max = U64_MAX;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- }
- engines[n++] = engine;
- }
+ max = min(max, ce->vm->total);
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
if (!n)
return 0;
@@ -1029,23 +1158,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array.
*/
- order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+ order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
+ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max = div_u64(max - size, max_page_size);
+
/*
* Try various offsets in an ascending/descending fashion until we
* timeout -- we want to avoid issues hidden by effectively always using
* offset = 0.
*/
i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
for_each_prime_number_from(num, 0, max) {
u64 offset_low = num * max_page_size;
u64 offset_high = (max - num) * max_page_size;
u32 dword = offset_in_page(num) / 4;
+ struct intel_context *ce;
- engine = engines[order[i] % n];
- i = (i + 1) % (n * I915_NUM_ENGINES);
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
/*
* In order to utilize 64K pages we need to both pad the vma
@@ -1057,22 +1193,23 @@ static int igt_write_huge(struct i915_gem_context *ctx,
offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ err = __igt_write_huge(ce, obj, size, offset_low,
dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ err = __igt_write_huge(ce, obj, size, offset_high,
dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
- "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high,
+ "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+ __func__, ce->engine->name, offset_low, offset_high,
max_page_size))
break;
}
+ i915_gem_context_unlock_engines(ctx);
kfree(order);
@@ -1180,131 +1317,235 @@ out_device:
return err;
}
-static int igt_ppgtt_internal_huge(void *arg)
+typedef struct drm_i915_gem_object *
+(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+ return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static struct drm_i915_gem_object *
+igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("%s missing THP support, skipping\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return i915_gem_object_create_shmem(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_internal(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return huge_pages_object(i915, size, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_lmem(i915, size, flags);
+}
+
+static u32 igt_random_size(struct rnd_state *prng,
+ u32 min_page_size,
+ u32 max_page_size)
+{
+ u64 mask;
+ u32 size;
+
+ GEM_BUG_ON(!is_power_of_2(min_page_size));
+ GEM_BUG_ON(!is_power_of_2(max_page_size));
+ GEM_BUG_ON(min_page_size < PAGE_SIZE);
+ GEM_BUG_ON(min_page_size > max_page_size);
+
+ mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
+ size = prandom_u32_state(prng) & mask;
+ if (size < min_page_size)
+ size |= min_page_size;
+
+ return size;
+}
+
+static int igt_ppgtt_smoke_huge(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_64K,
- SZ_128K,
- SZ_256K,
- SZ_512K,
- SZ_1M,
- SZ_2M,
+ I915_RND_STATE(prng);
+ struct {
+ igt_create_fn fn;
+ u32 min;
+ u32 max;
+ } backends[] = {
+ { igt_create_internal, SZ_64K, SZ_2M, },
+ { igt_create_shmem, SZ_64K, SZ_32M, },
+ { igt_create_local, SZ_64K, SZ_1G, },
};
- int i;
int err;
+ int i;
/*
- * Sanity check that the HW uses huge pages correctly through internal
- * -- ensure that our writes land in the right place.
+ * Sanity check that the HW uses huge pages correctly through our
+ * various backends -- ensure that our writes land in the right place.
*/
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ u32 min = backends[i].min;
+ u32 max = backends[i].max;
+ u32 size = max;
+try_again:
+ size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ obj = backends[i].fn(i915, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -E2BIG) {
+ size >>= 1;
+ goto try_again;
+ } else if (err == -ENODEV) {
+ err = 0;
+ continue;
+ }
+
+ return err;
+ }
err = i915_gem_object_pin_pages(obj);
- if (err)
+ if (err) {
+ if (err == -ENXIO) {
+ i915_gem_object_put(obj);
+ size >>= 1;
+ goto try_again;
+ }
goto out_put;
+ }
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
- pr_info("internal unable to allocate huge-page(s) with size=%u\n",
- size);
+ if (obj->mm.page_sizes.phys < min) {
+ pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
+ __func__, size, i);
+ err = -ENOMEM;
goto out_unpin;
}
err = igt_write_huge(ctx, obj);
if (err) {
- pr_err("internal write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ pr_err("%s write-huge failed with size=%u, i=%d\n",
+ __func__, size, i);
}
-
+out_unpin:
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+out_put:
i915_gem_object_put(obj);
- }
- return 0;
+ if (err == -ENOMEM || err == -ENXIO)
+ err = 0;
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+ if (err)
+ break;
- return err;
-}
+ cond_resched();
+ }
-static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
-{
- return i915->mm.gemfs && has_transparent_hugepage();
+ return err;
}
-static int igt_ppgtt_gemfs_huge(void *arg)
+static int igt_ppgtt_sanity_check(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_2M,
- SZ_4M,
- SZ_8M,
- SZ_16M,
- SZ_32M,
+ unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ struct {
+ igt_create_fn fn;
+ unsigned int flags;
+ } backends[] = {
+ { igt_create_system, 0, },
+ { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
- int i;
+ struct {
+ u32 size;
+ u32 pages;
+ } combos[] = {
+ { SZ_64K, SZ_64K },
+ { SZ_2M, SZ_2M },
+ { SZ_2M, SZ_64K },
+ { SZ_2M - SZ_64K, SZ_64K },
+ { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
+ { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
+ };
+ int i, j;
int err;
+ if (supported == I915_GTT_PAGE_SIZE_4K)
+ return 0;
+
/*
- * Sanity check that the HW uses huge pages correctly through gemfs --
- * ensure that our writes land in the right place.
+ * Sanity check that the HW behaves with a limited set of combinations.
+ * We already have a bunch of randomised testing, which should give us
+ * a decent amount of variation between runs, however we should keep
+ * this to limit the chances of introducing a temporary regression, by
+ * testing the most obvious cases that might make something blow up.
*/
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- return 0;
- }
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ for (j = 0; j < ARRAY_SIZE(combos); ++j) {
+ struct drm_i915_gem_object *obj;
+ u32 size = combos[j].size;
+ u32 pages = combos[j].pages;
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ obj = backends[i].fn(i915, size, backends[i].flags);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -ENODEV) {
+ pr_info("Device lacks local memory, skipping\n");
+ err = 0;
+ break;
+ }
- obj = i915_gem_object_create_shmem(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ return err;
+ }
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_put;
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
- pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
- size);
- goto out_unpin;
- }
+ GEM_BUG_ON(pages > obj->base.size);
+ pages = pages & supported;
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("gemfs write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ if (pages)
+ obj->mm.page_sizes.sg = pages;
+
+ err = igt_write_huge(ctx, obj);
+
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+
+ if (err) {
+ pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
+ __func__, size, pages, i, j);
+ goto out;
+ }
}
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
+ cond_resched();
}
- return 0;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+out:
+ if (err == -ENOMEM)
+ err = 0;
return err;
}
@@ -1314,15 +1555,15 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
unsigned int n;
int first, last;
- int err;
+ int err = 0;
/*
* Make sure there's no funny business when doing a PIN_UPDATE -- in the
@@ -1332,9 +1573,10 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!vm || !i915_vm_is_4lvl(vm)) {
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (!i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
- return 0;
+ goto out_vm;
}
first = ilog2(I915_GTT_PAGE_SIZE_64K);
@@ -1387,7 +1629,7 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
}
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
if (err)
goto out_unpin;
@@ -1419,14 +1661,18 @@ static int igt_ppgtt_pin_update(void *arg)
*/
n = 0;
- for_each_engine(engine, dev_priv, id) {
- if (!intel_engine_can_store_dword(engine))
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
+ i915_gem_context_unlock_engines(ctx);
+ if (err)
+ goto out_unpin;
+
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
@@ -1439,6 +1685,8 @@ out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1448,7 +1696,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1498,6 +1746,7 @@ out_put:
out_restore:
i915->mm.gemfs = gemfs;
+ i915_vm_put(vm);
return err;
}
@@ -1505,14 +1754,14 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
unsigned int n;
- int err;
+ int err = 0;
/*
* Sanity check shrinking huge-paged object -- make sure nothing blows
@@ -1521,12 +1770,14 @@ static int igt_shrink_thp(void *arg)
if (!igt_can_allocate_thp(i915)) {
pr_info("missing THP support, skipping\n");
- return 0;
+ goto out_vm;
}
obj = i915_gem_object_create_shmem(i915, SZ_2M);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_vm;
+ }
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
@@ -1548,16 +1799,19 @@ static int igt_shrink_thp(void *arg)
goto out_unpin;
n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
-
+ i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
+ if (err)
+ goto out_close;
/*
* Now that the pages are *unpinned* shrink-all should invoke
@@ -1583,16 +1837,17 @@ static int igt_shrink_thp(void *arg)
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
-
out_unpin:
i915_vma_unpin(vma);
out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1601,6 +1856,7 @@ int i915_gem_huge_page_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
@@ -1617,7 +1873,6 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
@@ -1643,9 +1898,7 @@ out_close:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
drm_dev_put(&dev_priv->drm);
-
return err;
}
@@ -1656,12 +1909,12 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_exhaust_huge),
- SUBTEST(igt_ppgtt_gemfs_huge),
- SUBTEST(igt_ppgtt_internal_huge),
+ SUBTEST(igt_ppgtt_smoke_huge),
+ SUBTEST(igt_ppgtt_sanity_check),
};
struct drm_file *file;
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
+ struct i915_address_space *vm;
int err;
if (!HAS_PPGTT(i915)) {
@@ -1676,25 +1929,21 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- if (ctx->vm)
- ctx->vm->scrub_64K = true;
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->scrub_64K, true);
+ mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
+out_file:
mock_file_free(i915, file);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index d8804a847945..da8edee4fe0a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,6 +5,7 @@
#include "i915_selftest.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "selftests/igt_flush_test.h"
@@ -12,10 +13,9 @@
#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_client_fill(void *arg)
+static int __igt_client_fill(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -37,7 +37,7 @@ static int igt_client_fill(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
- obj = huge_gem_object(i915, phys_sz, sz);
+ obj = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -103,6 +103,28 @@ err_flush:
return err;
}
+static int igt_client_fill(void *arg)
+{
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ int err;
+
+ engine = intel_engine_lookup_user(arg,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ err = __igt_client_fill(engine);
+ if (err == -ENOMEM)
+ err = 0;
+ if (err)
+ return err;
+ } while (1);
+}
+
int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 0ff7a89aadca..2b29f6b4e1dd 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,13 +7,18 @@
#include <linux/prime_numbers.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
-static int cpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+struct context {
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+};
+
+static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
unsigned int needs_clflush;
struct page *page;
@@ -21,11 +26,11 @@ static int cpu_set(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_write(obj, &needs_clflush);
+ err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -38,14 +43,12 @@ static int cpu_set(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int cpu_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
unsigned int needs_clflush;
struct page *page;
@@ -53,11 +56,11 @@ static int cpu_get(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_read(obj, &needs_clflush);
+ err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -67,136 +70,137 @@ static int cpu_get(struct drm_i915_gem_object *obj,
*v = *cpu;
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int gtt_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int gtt_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
*v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int wc_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int wc_set(struct context *ctx, unsigned long offset, u32 v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int wc_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
*v = map[offset / sizeof(*map)];
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int gpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
+ rq = i915_request_create(ctx->engine->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -209,12 +213,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return PTR_ERR(cs);
}
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(ctx->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (INTEL_GEN(ctx->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
@@ -239,32 +243,34 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return err;
}
-static bool always_valid(struct drm_i915_private *i915)
+static bool always_valid(struct context *ctx)
{
return true;
}
-static bool needs_fence_registers(struct drm_i915_private *i915)
+static bool needs_fence_registers(struct context *ctx)
{
- return !intel_gt_is_wedged(&i915->gt);
-}
+ struct intel_gt *gt = ctx->engine->gt;
-static bool needs_mi_store_dword(struct drm_i915_private *i915)
-{
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(gt))
return false;
- if (!HAS_ENGINE(i915, RCS0))
+ return gt->ggtt->num_fences;
+}
+
+static bool needs_mi_store_dword(struct context *ctx)
+{
+ if (intel_gt_is_wedged(ctx->engine->gt))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS0]);
+ return intel_engine_can_store_dword(ctx->engine);
}
static const struct igt_coherency_mode {
const char *name;
- int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
- int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
- bool (*valid)(struct drm_i915_private *i915);
+ int (*set)(struct context *ctx, unsigned long offset, u32 v);
+ int (*get)(struct context *ctx, unsigned long offset, u32 *v);
+ bool (*valid)(struct context *ctx);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
{ "gtt", gtt_set, gtt_get, needs_fence_registers },
@@ -273,19 +279,37 @@ static const struct igt_coherency_mode {
{ },
};
+static struct intel_engine_cs *
+random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ for_each_uabi_engine(engine, i915)
+ if (count-- == 0)
+ return engine;
+
+ return NULL;
+}
+
static int igt_gem_coherency(void *arg)
{
const unsigned int ncachelines = PAGE_SIZE/64;
- I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
+ I915_RND_STATE(prng);
+ struct context ctx;
int err = 0;
- /* We repeatedly write, overwrite and read from a sequence of
+ /*
+ * We repeatedly write, overwrite and read from a sequence of
* cachelines in order to try and detect incoherency (unflushed writes
* from either the CPU or GPU). Each setter/getter uses our cache
* domain API which should prevent incoherency.
@@ -299,34 +323,36 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ ctx.engine = random_engine(i915, &prng);
+ GEM_BUG_ON(!ctx.engine);
+ pr_info("%s: using %s\n", __func__, ctx.engine->name);
+
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
- if (!over->valid(i915))
+ if (!over->valid(&ctx))
continue;
for (write = igt_coherency_mode; write->name; write++) {
if (!write->set)
continue;
- if (!write->valid(i915))
+ if (!write->valid(&ctx))
continue;
for (read = igt_coherency_mode; read->name; read++) {
if (!read->get)
continue;
- if (!read->valid(i915))
+ if (!read->valid(&ctx))
continue;
for_each_prime_number_from(count, 1, ncachelines) {
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto unlock;
+ ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(ctx.obj)) {
+ err = PTR_ERR(ctx.obj);
+ goto free;
}
i915_random_reorder(offsets, ncachelines, &prng);
@@ -334,7 +360,7 @@ static int igt_gem_coherency(void *arg)
values[n] = prandom_u32_state(&prng);
for (n = 0; n < count; n++) {
- err = over->set(obj, offsets[n], ~values[n]);
+ err = over->set(&ctx, offsets[n], ~values[n]);
if (err) {
pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
n, count, over->name, err);
@@ -343,7 +369,7 @@ static int igt_gem_coherency(void *arg)
}
for (n = 0; n < count; n++) {
- err = write->set(obj, offsets[n], values[n]);
+ err = write->set(&ctx, offsets[n], values[n]);
if (err) {
pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
n, count, write->name, err);
@@ -354,7 +380,7 @@ static int igt_gem_coherency(void *arg)
for (n = 0; n < count; n++) {
u32 found;
- err = read->get(obj, offsets[n], &found);
+ err = read->get(&ctx, offsets[n], &found);
if (err) {
pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
n, count, read->name, err);
@@ -372,20 +398,18 @@ static int igt_gem_coherency(void *arg)
}
}
- i915_gem_object_put(obj);
+ i915_gem_object_put(ctx.obj);
}
}
}
}
-unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+free:
kfree(offsets);
return err;
put_object:
- i915_gem_object_put(obj);
- goto unlock;
+ i915_gem_object_put(ctx.obj);
+ goto free;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 3e6f4a65d356..62fabc023a83 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -31,7 +32,6 @@ static int live_nop_switch(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
- enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
unsigned long n;
@@ -52,23 +52,21 @@ static int live_nop_switch(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
- goto out_unlock;
+ goto out_file;
}
for (n = 0; n < nctx; n++) {
ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
- goto out_unlock;
+ goto out_file;
}
}
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
unsigned long end_time, prime;
ktime_t times[2] = {};
@@ -78,7 +76,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unlock;
+ goto out_file;
}
i915_request_add(rq);
}
@@ -86,7 +84,7 @@ static int live_nop_switch(void *arg)
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
err = -EIO;
- goto out_unlock;
+ goto out_file;
}
times[1] = ktime_get_raw();
@@ -96,7 +94,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) {
@@ -106,7 +104,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unlock;
+ goto out_file;
}
/*
@@ -142,7 +140,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ goto out_file;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -150,8 +148,235 @@ static int live_nop_switch(void *arg)
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+struct parallel_switch {
+ struct task_struct *tsk;
+ struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
+{
+ struct parallel_switch *arg = data;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq = NULL;
+ int err, n;
+
+ err = 0;
+ for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ }
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+ struct parallel_switch *arg = data;
+ struct i915_request *rq = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int n;
+
+ count = 0;
+ do {
+ for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+ int err = 0;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return err;
+ }
+ }
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(rq);
+
+ pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_switch1,
+ __live_parallel_switchN,
+ NULL,
+ };
+ struct parallel_switch *data = NULL;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ int (* const *fn)(void *arg);
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ int n, m, count;
+ int err = 0;
+
+ /*
+ * Check we can process switches on all engines simultaneously.
+ */
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ engines = i915_gem_context_lock_engines(ctx);
+ count = engines->num_engines;
+
+ data = kcalloc(count, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ i915_gem_context_unlock_engines(ctx);
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ m = 0; /* Use the first context as our template for the engines */
+ for_each_gem_engine(ce, engines, it) {
+ err = intel_context_pin(ce);
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out;
+ }
+ data[m++].ce[0] = intel_context_get(ce);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ /* Clone the same set of engines into the other contexts */
+ for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ for (m = 0; m < count; m++) {
+ if (!data[m].ce[0])
+ continue;
+
+ ce = intel_context_create(ctx, data[m].ce[0]->engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ data[m].ce[n] = ce;
+ }
+ }
+
+ for (fn = func; !err && *fn; fn++) {
+ struct igt_live_test t;
+ int n;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ break;
+
+ for (n = 0; n < count; n++) {
+ if (!data[n].ce[0])
+ continue;
+
+ data[n].tsk = kthread_run(*fn, &data[n],
+ "igt/parallel:%s",
+ data[n].ce[0]->engine->name);
+ if (IS_ERR(data[n].tsk)) {
+ err = PTR_ERR(data[n].tsk);
+ break;
+ }
+ get_task_struct(data[n].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (n = 0; n < count; n++) {
+ int status;
+
+ if (IS_ERR_OR_NULL(data[n].tsk))
+ continue;
+
+ status = kthread_stop(data[n].tsk);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(data[n].tsk);
+ data[n].tsk = NULL;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+out:
+ for (n = 0; n < count; n++) {
+ for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
+ if (!data[n].ce[m])
+ continue;
+
+ intel_context_unpin(data[n].ce[m]);
+ intel_context_put(data[n].ce[m]);
+ }
+ }
+ kfree(data);
+out_file:
mock_file_free(i915, file);
return err;
}
@@ -166,28 +391,20 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
}
-static int gpu_fill(struct drm_i915_gem_object *obj,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int gpu_fill(struct intel_context *ce,
+ struct drm_i915_gem_object *obj,
unsigned int dw)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_vma *vma;
int err;
- GEM_BUG_ON(obj->base.size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(obj->base.size > ce->vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
if (err)
return err;
@@ -200,9 +417,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
* whilst checking that each context provides a unique view
* into the object.
*/
- err = igt_gpu_fill_dw(vma,
- ctx,
- engine,
+ err = igt_gpu_fill_dw(ce, vma,
(dw * real_page_count(obj)) << PAGE_SHIFT |
(dw * sizeof(u32)),
real_page_count(obj),
@@ -305,22 +520,21 @@ static int file_add_object(struct drm_file *file,
}
static struct drm_i915_gem_object *
-create_test_object(struct i915_gem_context *ctx,
+create_test_object(struct i915_address_space *vm,
struct drm_file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
/* Keep in GEM's good graces */
- i915_retire_requests(ctx->i915);
+ intel_gt_retire_requests(vm->gt);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
- obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
+ obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj))
return obj;
@@ -348,11 +562,49 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
return npages / DW_PER_PAGE;
}
+static void throttle_release(struct i915_request **q, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (IS_ERR_OR_NULL(q[i]))
+ continue;
+
+ i915_request_put(fetch_and_zero(&q[i]));
+ }
+}
+
+static int throttle(struct intel_context *ce,
+ struct i915_request **q, int count)
+{
+ int i;
+
+ if (!IS_ERR_OR_NULL(q[0])) {
+ if (i915_request_wait(q[0],
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ return -EINTR;
+
+ i915_request_put(q[0]);
+ }
+
+ for (i = 0; i < count - 1; i++)
+ q[i] = q[i + 1];
+
+ q[i] = intel_context_create_request(ce);
+ if (IS_ERR(q[i]))
+ return PTR_ERR(q[i]);
+
+ i915_request_get(q[i]);
+ i915_request_add(q[i]);
+
+ return 0;
+}
+
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -364,9 +616,10 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
+ struct i915_request *tq[5] = {};
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
@@ -382,39 +635,53 @@ static int igt_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
+ struct intel_context *ce;
- ctx = live_context(i915, file);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -424,6 +691,9 @@ static int igt_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+
+ intel_context_put(ce);
+ kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -441,10 +711,10 @@ static int igt_ctx_exec(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
if (err)
@@ -459,9 +729,9 @@ out_unlock:
static int igt_shared_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct i915_request *tq[5] = {};
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
int err = 0;
@@ -478,24 +748,22 @@ static int igt_shared_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
parent = live_context(i915, file);
if (IS_ERR(parent)) {
err = PTR_ERR(parent);
- goto out_unlock;
+ goto out_file;
}
if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0;
- goto out_unlock;
+ goto out_file;
}
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
unsigned long ncontexts, ndwords, dw;
struct drm_i915_gem_object *obj = NULL;
IGT_TIMEOUT(end_time);
@@ -509,6 +777,7 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
+ struct intel_context *ce;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -516,23 +785,38 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test;
}
+ mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, parent->vm);
+ mutex_unlock(&ctx->mutex);
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(parent, file, &objects);
+ obj = create_test_object(parent->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_test;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
@@ -545,6 +829,7 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+ intel_context_put(ce);
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -562,16 +847,13 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem;
}
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
}
out_test:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
+out_file:
mock_file_free(i915, file);
return err;
}
@@ -604,6 +886,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ intel_gt_chipset_flush(vma->vm->gt);
+
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -681,10 +965,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
*rq_out = i915_request_get(rq);
@@ -698,8 +979,7 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
err_vma:
i915_vma_unpin(vma);
@@ -860,8 +1140,8 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(ce->engine->i915,
- 0, MAX_SCHEDULE_TIMEOUT);
+ ret = intel_gt_wait_for_idle(ce->engine->gt,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -887,7 +1167,7 @@ __sseu_test(const char *name,
if (ret)
return ret;
- ret = __intel_context_reconfigure_sseu(ce, sseu);
+ ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
@@ -908,106 +1188,97 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
const char *name,
unsigned int flags)
{
- struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
- struct intel_sseu pg_sseu;
- struct drm_file *file;
- int ret;
-
- if (INTEL_GEN(i915) < 9 || !engine)
- return 0;
-
- if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
- return 0;
+ int inst = 0;
+ int ret = 0;
- if (hweight32(engine->sseu.slice_mask) < 2)
+ if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
return 0;
- /*
- * Gen11 VME friendly power-gated configuration with half enabled
- * sub-slices.
- */
- pg_sseu = engine->sseu;
- pg_sseu.slice_mask = 1;
- pg_sseu.subslice_mask =
- ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
-
- pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
- name, flags, hweight32(engine->sseu.slice_mask),
- hweight32(pg_sseu.slice_mask));
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
if (flags & TEST_RESET)
igt_global_reset_lock(&i915->gt);
- mutex_lock(&i915->drm.struct_mutex);
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
- i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
-
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto out_unlock;
}
- ce = i915_gem_context_get_engine(ctx, RCS0);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto out_put;
- }
+ do {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ struct intel_sseu pg_sseu;
- ret = intel_context_pin(ce);
- if (ret)
- goto out_context;
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_RENDER,
+ inst++);
+ if (!engine)
+ break;
- /* First set the default mask. */
- ret = __sseu_test(name, flags, ce, obj, engine->sseu);
- if (ret)
- goto out_fail;
+ if (hweight32(engine->sseu.slice_mask) < 2)
+ continue;
- /* Then set a power-gated configuration. */
- ret = __sseu_test(name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ /*
+ * Gen11 VME friendly power-gated configuration with
+ * half enabled sub-slices.
+ */
+ pg_sseu = engine->sseu;
+ pg_sseu.slice_mask = 1;
+ pg_sseu.subslice_mask =
+ ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
+
+ pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
+ engine->name, name, flags,
+ hweight32(engine->sseu.slice_mask),
+ hweight32(pg_sseu.slice_mask));
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_put;
+ }
- /* Back to defaults. */
- ret = __sseu_test(name, flags, ce, obj, engine->sseu);
- if (ret)
- goto out_fail;
+ ret = intel_context_pin(ce);
+ if (ret)
+ goto out_ce;
- /* One last power-gated configuration for the road. */
- ret = __sseu_test(name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ /* First set the default mask. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* Then set a power-gated configuration. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* Back to defaults. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
-out_fail:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ /* One last power-gated configuration for the road. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
+
+out_unpin:
+ intel_context_unpin(ce);
+out_ce:
+ intel_context_put(ce);
+ } while (!ret);
+
+ if (igt_flush_test(i915))
ret = -EIO;
- intel_context_unpin(ce);
-out_context:
- intel_context_put(ce);
out_put:
i915_gem_object_put(obj);
out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
if (flags & TEST_RESET)
igt_global_reset_unlock(&i915->gt);
- mock_file_free(i915, file);
-
if (ret)
pr_err("%s: Failed with %d!\n", name, ret);
@@ -1041,6 +1312,7 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_request *tq[5] = {};
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx, ndwords, dw;
@@ -1061,52 +1333,63 @@ static int igt_ctx_readonly(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- vm = ctx->vm ?: &i915->ggtt.alias->vm;
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
+ rcu_read_unlock();
err = 0;
- goto out_unlock;
+ goto out_file;
}
+ rcu_read_unlock();
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- unsigned int id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (prandom_u32_state(&prng) & 1)
i915_gem_object_set_readonly(obj);
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ ce->engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -1115,6 +1398,7 @@ static int igt_ctx_readonly(void *arg)
}
ndwords++;
}
+ i915_gem_context_unlock_engines(ctx);
}
pr_info("Submitted %lu dwords (across %u engines)\n",
ndwords, RUNTIME_INFO(i915)->num_engines);
@@ -1137,19 +1421,19 @@ static int igt_ctx_readonly(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
-static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+static int check_scratch(struct i915_address_space *vm, u64 offset)
{
struct drm_mm_node *node =
- __drm_mm_interval_first(&ctx->vm->mm,
+ __drm_mm_interval_first(&vm->mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
@@ -1167,6 +1451,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cmd;
@@ -1197,17 +1482,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto err_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1229,12 +1517,11 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
- i915_vma_unpin(vma);
- i915_vma_close(vma);
- i915_vma_put(vma);
+ i915_vma_unpin_and_release(&vma, 0);
i915_request_add(rq);
+ i915_vm_put(vm);
return 0;
skip_request:
@@ -1243,6 +1530,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
+err_vm:
+ i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@@ -1254,6 +1543,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
@@ -1296,17 +1586,20 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto err_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1337,12 +1630,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
- goto err;
+ goto err_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto err_vm;
}
*value = cmd[result / sizeof(*cmd)];
@@ -1357,6 +1650,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
+err_vm:
+ i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@@ -1371,7 +1666,6 @@ static int igt_vm_isolation(void *arg)
struct drm_file *file;
I915_RND_STATE(prng);
unsigned long count;
- unsigned int id;
u64 vm_total;
int err;
@@ -1387,34 +1681,32 @@ static int igt_vm_isolation(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
- goto out_unlock;
+ goto out_file;
}
ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
- goto out_unlock;
+ goto out_file;
}
/* We can only test vm isolation, if the vm are distinct */
if (ctx_a->vm == ctx_b->vm)
- goto out_unlock;
+ goto out_file;
vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
unsigned long this = 0;
@@ -1436,7 +1728,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
- goto out_unlock;
+ goto out_file;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1445,7 +1737,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_unlock;
+ goto out_file;
}
this++;
@@ -1455,30 +1747,13 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %d engines\n",
count, RUNTIME_INFO(i915)->num_engines);
-out_unlock:
+out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
mock_file_free(i915, file);
return err;
}
-static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (engines == ALL_ENGINES)
- return "all";
-
- for_each_engine_masked(engine, i915, engines, tmp)
- return engine->name;
-
- return "none";
-}
-
static bool skip_unused_engines(struct intel_context *ce, void *data)
{
return !ce->state;
@@ -1506,13 +1781,9 @@ static int mock_context_barrier(void *arg)
* a request; useful for retiring old state after loading new.
*/
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = mock_context(i915, "mock");
- if (!ctx) {
- err = -ENOMEM;
- goto unlock;
- }
+ if (!ctx)
+ return -ENOMEM;
counter = 0;
err = context_barrier_task(ctx, 0,
@@ -1585,8 +1856,6 @@ static int mock_context_barrier(void *arg)
out:
mock_context_close(ctx);
-unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
#undef pr_fmt
#define pr_fmt(x) x
@@ -1614,6 +1883,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_switch),
+ SUBTEST(live_parallel_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 1d27babff0ce..29b2077b73d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -10,6 +10,7 @@
#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
+#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
struct tile {
@@ -76,18 +77,103 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
static int check_partial_mapping(struct drm_i915_gem_object *obj,
const struct tile *tile,
- unsigned long end_time)
+ struct rnd_state *prng)
{
- const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long page;
+ u32 __iomem *io;
+ struct page *p;
+ unsigned int n;
+ u64 offset;
+ u32 *cpu;
int err;
- if (igt_timeout(end_time,
- "%s: timed out before tiling=%d stride=%d\n",
- __func__, tile->tiling, tile->stride))
- return -EINTR;
+ err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
+ if (err) {
+ pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
+ tile->tiling, tile->stride, err);
+ return err;
+ }
+
+ GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
+ GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n", err);
+ return err;
+ }
+
+ page = i915_prandom_u32_max_state(npages, prng);
+ view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
+ }
+
+ n = page - view.partial.offset;
+ GEM_BUG_ON(n >= view.partial.size);
+
+ io = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(io)) {
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
+ err = PTR_ERR(io);
+ goto out;
+ }
+
+ iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
+ i915_vma_unpin_iomap(vma);
+
+ offset = tiled_offset(tile, page << PAGE_SHIFT);
+ if (offset >= obj->base.size)
+ goto out;
+
+ intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+
+ p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ cpu = kmap(p) + offset_in_page(offset);
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ if (*cpu != (u32)page) {
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ page, n,
+ view.partial.offset,
+ view.partial.size,
+ vma->size >> PAGE_SHIFT,
+ tile->tiling ? tile_row_pages(obj) : 0,
+ vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
+ offset >> PAGE_SHIFT,
+ (unsigned int)offset_in_page(offset),
+ offset,
+ (u32)page, *cpu);
+ err = -EINVAL;
+ }
+ *cpu = 0;
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ kunmap(p);
+
+out:
+ i915_vma_destroy(vma);
+ return err;
+}
+
+static int check_partial_mappings(struct drm_i915_gem_object *obj,
+ const struct tile *tile,
+ unsigned long end_time)
+{
+ const unsigned int nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_vma *vma;
+ unsigned long page;
+ int err;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
if (err) {
@@ -170,11 +256,42 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return err;
i915_vma_destroy(vma);
+
+ if (igt_timeout(end_time,
+ "%s: timed out after tiling=%d stride=%d\n",
+ __func__, tile->tiling, tile->stride))
+ return -EINTR;
}
return 0;
}
+static unsigned int
+setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) <= 2) {
+ tile->height = 16;
+ tile->width = 128;
+ tile->size = 11;
+ } else if (tile->tiling == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(i915)) {
+ tile->height = 32;
+ tile->width = 128;
+ tile->size = 12;
+ } else {
+ tile->height = 8;
+ tile->width = 512;
+ tile->size = 12;
+ }
+
+ if (INTEL_GEN(i915) < 4)
+ return 8192 / tile->width;
+ else if (INTEL_GEN(i915) < 7)
+ return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
+ else
+ return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
+}
+
static int igt_partial_tiling(void *arg)
{
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
@@ -184,6 +301,9 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
/* We want to check the page mapping and fencing of a large object
* mmapped through the GTT. The object we create is larger than can
* possibly be mmaped as a whole, and so we must use partial GGTT vma.
@@ -205,7 +325,6 @@ static int igt_partial_tiling(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
@@ -219,7 +338,7 @@ static int igt_partial_tiling(void *arg)
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
tile.tiling = I915_TILING_NONE;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err && err != -EINTR)
goto out_unlock;
}
@@ -241,10 +360,10 @@ static int igt_partial_tiling(void *arg)
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
- tile.swizzle = i915->mm.bit_6_swizzle_x;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->mm.bit_6_swizzle_y;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
break;
}
@@ -253,31 +372,11 @@ static int igt_partial_tiling(void *arg)
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
- if (INTEL_GEN(i915) <= 2) {
- tile.height = 16;
- tile.width = 128;
- tile.size = 11;
- } else if (tile.tiling == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(i915)) {
- tile.height = 32;
- tile.width = 128;
- tile.size = 12;
- } else {
- tile.height = 8;
- tile.width = 512;
- tile.size = 12;
- }
-
- if (INTEL_GEN(i915) < 4)
- max_pitch = 8192 / tile.width;
- else if (INTEL_GEN(i915) < 7)
- max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
- else
- max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
+ max_pitch = setup_tile_size(&tile, i915);
for (pitch = max_pitch; pitch; pitch >>= 1) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -285,7 +384,7 @@ static int igt_partial_tiling(void *arg)
if (pitch > 2 && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch - 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -294,7 +393,7 @@ static int igt_partial_tiling(void *arg)
if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch + 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -305,7 +404,7 @@ static int igt_partial_tiling(void *arg)
if (INTEL_GEN(i915) >= 4) {
for_each_prime_number(pitch, max_pitch) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -318,7 +417,100 @@ next_tiling: ;
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_smoke_tiling(void *arg)
+{
+ const unsigned int nreal = 1 << 12; /* largest tile row x2 */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ I915_RND_STATE(prng);
+ unsigned long count;
+ IGT_TIMEOUT(end);
+ int err;
+
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
+ /*
+ * igt_partial_tiling() does an exhastive check of partial tiling
+ * chunking, but will undoubtably run out of time. Here, we do a
+ * randomised search and hope over many runs of 1s with different
+ * seeds we will do a thorough check.
+ *
+ * Remember to look at the st_seed if we see a flip-flop in BAT!
+ */
+
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ return 0;
+
+ obj = huge_gem_object(i915,
+ nreal << PAGE_SHIFT,
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ count = 0;
+ do {
+ struct tile tile;
+
+ tile.tiling =
+ i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
+ switch (tile.tiling) {
+ case I915_TILING_NONE:
+ tile.height = 1;
+ tile.width = 1;
+ tile.size = 0;
+ tile.stride = 0;
+ tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
+ break;
+
+ case I915_TILING_X:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ break;
+ }
+
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
+ tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
+ continue;
+
+ if (tile.tiling != I915_TILING_NONE) {
+ unsigned int max_pitch = setup_tile_size(&tile, i915);
+
+ tile.stride =
+ i915_prandom_u32_max_state(max_pitch, &prng);
+ tile.stride = (1 + tile.stride) * tile.width;
+ if (INTEL_GEN(i915) < 4)
+ tile.stride = rounddown_pow_of_two(tile.stride);
+ }
+
+ err = check_partial_mapping(obj, &tile, &prng);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end, NULL));
+
+ pr_info("%s: Completed %lu trials\n", __func__, count);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
@@ -329,20 +521,19 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- int err;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- for_each_engine(engine, i915, id) {
- struct i915_request *rq;
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
@@ -358,12 +549,13 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
i915_vma_unlock(vma);
i915_request_add(rq);
+ i915_vma_unpin(vma);
+ if (err)
+ return err;
}
- i915_vma_unpin(vma);
i915_gem_object_put(obj); /* leave it only alive via its active ref */
-
- return err;
+ return 0;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
@@ -386,21 +578,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
-
intel_gt_pm_get(&i915->gt);
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
+ cancel_delayed_work_sync(&i915->gt.requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
+ igt_flush_test(i915);
intel_gt_pm_put(&i915->gt);
-
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_driver_register__shrinker(i915);
}
@@ -490,9 +675,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = make_obj_busy(obj);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
@@ -515,6 +698,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
+ SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index c21d747e7d05..e8132aca0bb6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -3,40 +3,241 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/sort.h>
+
#include "gt/intel_gt.h"
+#include "gt/intel_engine_user.h"
#include "i915_selftest.h"
+#include "gem/i915_gem_context.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
#include "selftests/mock_drm.h"
#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_fill_blt(void *arg)
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static int __perf_fill_blt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+ int err;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_fill_blt(obj, ce, 0);
+ if (err)
+ return err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
+ engine->name,
+ obj->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * obj->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_fill_blt(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
- struct drm_i915_gem_object *obj;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __perf_fill_blt(obj);
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __perf_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst)
+{
+ struct drm_i915_private *i915 = to_i915(src->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+ int err;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_copy_blt(src, dst, ce);
+ if (err)
+ return err;
+
+ err = i915_gem_object_wait(dst,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
+ engine->name,
+ src->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * src->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_copy_blt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *src, *dst;
+ int err;
+
+ src = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
+ dst = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_src;
+ }
+
+ err = __perf_copy_blt(src, dst);
+
+ i915_gem_object_put(dst);
+err_src:
+ i915_gem_object_put(src);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct igt_thread_arg {
+ struct drm_i915_private *i915;
struct rnd_state prng;
+ unsigned int n_cpus;
+};
+
+static int igt_fill_blt_thread(void *arg)
+{
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ unsigned int prio;
IGT_TIMEOUT(end);
- u32 *vaddr;
- int err = 0;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
- prandom_seed_state(&prng, i915_selftest.random_seed);
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
- /*
- * XXX: needs some threads to scale all these tests, also maybe throw
- * in submission from higher priority context to see if we are
- * preempted for very large objects...
- */
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
- u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
- u32 phys_sz = sz % (max_block_size + 1);
- u32 val = prandom_u32_state(&prng);
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
u32 i;
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -65,9 +266,7 @@ static int igt_fill_blt(void *arg)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_fill_blt(obj, ce, val);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@@ -100,28 +299,56 @@ err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_context_put(ce);
+out_file:
+ mock_file_free(i915, file);
return err;
}
-static int igt_copy_blt(void *arg)
+static int igt_copy_blt_thread(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *src, *dst;
- struct rnd_state prng;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ unsigned int prio;
IGT_TIMEOUT(end);
- u32 *vaddr;
- int err = 0;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- prandom_seed_state(&prng, i915_selftest.random_seed);
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
+
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
- u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
- u32 phys_sz = sz % (max_block_size + 1);
- u32 val = prandom_u32_state(&prng);
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
u32 i;
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -166,9 +393,7 @@ static int igt_copy_blt(void *arg)
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
dst->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_copy_blt(src, dst, ce);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@@ -205,12 +430,85 @@ err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_context_put(ce);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int igt_threaded_blt(struct drm_i915_private *i915,
+ int (*blt_fn)(void *arg))
+{
+ struct igt_thread_arg *thread;
+ struct task_struct **tsk;
+ I915_RND_STATE(prng);
+ unsigned int n_cpus;
+ unsigned int i;
+ int err = 0;
+
+ n_cpus = num_online_cpus() + 1;
+
+ tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL);
+ if (!tsk)
+ return 0;
+
+ thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
+ if (!thread) {
+ kfree(tsk);
+ return 0;
+ }
+
+ for (i = 0; i < n_cpus; ++i) {
+ thread[i].i915 = i915;
+ thread[i].n_cpus = n_cpus;
+ thread[i].prng =
+ I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
+
+ tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i);
+ if (IS_ERR(tsk[i])) {
+ err = PTR_ERR(tsk[i]);
+ break;
+ }
+
+ get_task_struct(tsk[i]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (i = 0; i < n_cpus; ++i) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[i]))
+ continue;
+
+ status = kthread_stop(tsk[i]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[i]);
+ }
+
+ kfree(tsk);
+ kfree(thread);
+
return err;
}
+static int igt_fill_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_fill_blt_thread);
+}
+
+static int igt_copy_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_copy_blt_thread);
+}
+
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(perf_fill_blt),
+ SUBTEST(perf_copy_blt),
SUBTEST(igt_fill_blt),
SUBTEST(igt_copy_blt),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 94a15e3f6db8..34932871b3a5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -25,9 +25,7 @@ static int mock_phys_object(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 57ece53c1075..6718da20f35d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
@@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma,
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
+ intel_gt_chipset_flush(vma->vm->gt);
+
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -101,40 +104,35 @@ err:
return ERR_PTR(err);
}
-int igt_gpu_fill_dw(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset,
- unsigned long count,
- u32 val)
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *batch;
unsigned int flags;
int err;
- GEM_BUG_ON(vma->size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
GEM_BUG_ON(!i915_vma_is_pinned(vma));
batch = igt_emit_store_dw(vma, offset, count, val);
if (IS_ERR(batch))
return PTR_ERR(batch);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
+ if (INTEL_GEN(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
if (err)
goto err_request;
@@ -156,9 +154,7 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
i915_request_add(rq);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return 0;
@@ -167,7 +163,6 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 361a7ef866b0..4221cf84d175 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -11,9 +11,11 @@
struct i915_request;
struct i915_gem_context;
-struct intel_engine_cs;
struct i915_vma;
+struct intel_context;
+struct intel_engine_cs;
+
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
@@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma,
unsigned long count,
u32 val);
-int igt_gpu_fill_dw(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset,
- unsigned long count,
- u32 val);
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val);
#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index be8974ccff24..29b8984f0e47 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -13,7 +13,6 @@ mock_context(struct drm_i915_private *i915,
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
- int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -23,6 +22,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ i915_gem_context_set_persistence(ctx);
+
mutex_init(&ctx->engines_mutex);
e = default_engines(ctx);
if (IS_ERR(e))
@@ -30,13 +31,8 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
mutex_init(&ctx->mutex);
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret < 0)
- goto err_engines;
-
if (name) {
struct i915_ppgtt *ppgtt;
@@ -48,14 +44,15 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_put;
+ mutex_lock(&ctx->mutex);
__set_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
return ctx;
-err_engines:
- free_engines(rcu_access_pointer(ctx->engines));
err_free:
kfree(ctx);
return NULL;
@@ -73,7 +70,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
{
- init_contexts(i915);
+ init_contexts(&i915->gem.contexts);
}
struct i915_gem_context *
@@ -82,8 +79,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
struct i915_gem_context *ctx;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 09c68dda2098..55317081d48b 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -120,7 +120,6 @@ __dma_fence_signal__notify(struct dma_fence *fence,
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
- lockdep_assert_irqs_disabled();
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
@@ -134,9 +133,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
+ unsigned long flags;
LIST_HEAD(signal);
- spin_lock(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
@@ -182,30 +182,23 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
}
}
- spin_unlock(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
struct list_head cb_list;
- spin_lock(&rq->lock);
+ spin_lock_irqsave(&rq->lock, flags);
list_replace(&rq->fence.cb_list, &cb_list);
__dma_fence_signal__timestamp(&rq->fence, timestamp);
__dma_fence_signal__notify(&rq->fence, &cb_list);
- spin_unlock(&rq->lock);
+ spin_unlock_irqrestore(&rq->lock, flags);
i915_request_put(rq);
}
}
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
-{
- local_irq_disable();
- intel_engine_breadcrumbs_irq(engine);
- local_irq_enable();
-}
-
static void signal_irq_work(struct irq_work *work)
{
struct intel_engine_cs *engine =
@@ -275,7 +268,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
@@ -325,7 +317,6 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
/*
* We must wait for b->irq_lock so that we know the interrupt handler
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index f55691d151ae..ee9d2bcd2c13 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -13,6 +13,7 @@
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_ring.h"
static struct i915_global_context {
struct i915_global base;
@@ -62,7 +63,7 @@ int __intel_context_do_pin(struct intel_context *ce)
}
err = 0;
- with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
@@ -134,10 +135,11 @@ static int __context_pin_state(struct i915_vma *vma)
static void __context_unpin_state(struct i915_vma *vma)
{
- __i915_vma_unpin(vma);
i915_vma_make_shrinkable(vma);
+ __i915_vma_unpin(vma);
}
+__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
@@ -150,6 +152,7 @@ static void __intel_context_retire(struct i915_active *active)
intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
+
intel_context_put(ce);
}
@@ -219,12 +222,20 @@ intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct i915_address_space *vm;
+
GEM_BUG_ON(!engine->cops);
kref_init(&ce->ref);
ce->gem_context = ctx;
- ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm);
+ if (vm)
+ ce->vm = i915_vm_get(vm);
+ else
+ ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
+ rcu_read_unlock();
if (ctx->timeline)
ce->timeline = intel_timeline_get(ctx->timeline);
@@ -238,7 +249,7 @@ intel_context_init(struct intel_context *ce,
mutex_init(&ce->pin_mutex);
- i915_active_init(ctx->i915, &ce->active,
+ i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire);
}
@@ -298,14 +309,14 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
/* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->hw_context == ce);
- if (rq->timeline != tl) { /* beware timeline sharing */
+ if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
err = mutex_lock_interruptible_nested(&tl->mutex,
SINGLE_DEPTH_NESTING);
if (err)
return err;
/* Queue this switch after current activity by this context. */
- err = i915_active_request_set(&tl->last_request, rq);
+ err = i915_active_fence_set(&tl->last_request, rq);
mutex_unlock(&tl->mutex);
if (err)
return err;
@@ -319,7 +330,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
- return i915_active_ref(&ce->active, rq->timeline, rq);
+ return i915_active_add_request(&ce->active, rq);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index dd742ac2fbdb..68b3d317d959 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
#include "i915_active.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+#include "intel_ring_types.h"
#include "intel_timeline_types.h"
void intel_context_init(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index bf9cedfccbf0..6959b05ae5f8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -58,6 +58,7 @@ struct intel_context {
u32 *lrc_reg_state;
u64 lrc_desc;
+ u32 tag; /* cookie passed to HW to track this context on submission */
unsigned int active_count; /* protected by timeline->mutex */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 22aab8593abf..bc3b72bfa9e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -19,6 +19,7 @@
#include "intel_workarounds.h"
struct drm_printer;
+struct intel_gt;
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -89,38 +90,6 @@ struct drm_printer;
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-enum intel_engine_hangcheck_action {
- ENGINE_IDLE = 0,
- ENGINE_WAIT,
- ENGINE_ACTIVE_SEQNO,
- ENGINE_ACTIVE_HEAD,
- ENGINE_ACTIVE_SUBUNITS,
- ENGINE_WAIT_KICK,
- ENGINE_DEAD,
-};
-
-static inline const char *
-hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
-{
- switch (a) {
- case ENGINE_IDLE:
- return "idle";
- case ENGINE_WAIT:
- return "wait";
- case ENGINE_ACTIVE_SEQNO:
- return "active seqno";
- case ENGINE_ACTIVE_HEAD:
- return "active head";
- case ENGINE_ACTIVE_SUBUNITS:
- return "active subunits";
- case ENGINE_WAIT_KICK:
- return "wait kick";
- case ENGINE_DEAD:
- return "dead";
- }
-
- return "unknown";
-}
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
@@ -206,126 +175,13 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_HWS_CSB_WRITE_INDEX 0x1f
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_ring_pin(struct intel_ring *ring);
-void intel_ring_reset(struct intel_ring *ring, u32 tail);
-unsigned int intel_ring_update_space(struct intel_ring *ring);
-void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct kref *ref);
-
-static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
-{
- kref_get(&ring->ref);
- return ring;
-}
-
-static inline void intel_ring_put(struct intel_ring *ring)
-{
- kref_put(&ring->ref, intel_ring_free);
-}
-
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
-int __must_check intel_ring_cacheline_align(struct i915_request *rq);
-
-u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
-
-static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
-{
- /* Dummy function.
- *
- * This serves as a placeholder in the code so that the reader
- * can compare against the preceding intel_ring_begin() and
- * check that the number of dwords emitted matches the space
- * reserved for the command packet (i.e. the value passed to
- * intel_ring_begin()).
- */
- GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
-}
-
-static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
-{
- return pos & (ring->size - 1);
-}
-
-static inline bool
-intel_ring_offset_valid(const struct intel_ring *ring,
- unsigned int pos)
-{
- if (pos & -ring->size) /* must be strictly within the ring */
- return false;
-
- if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
- return false;
-
- return true;
-}
-
-static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
-{
- /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - rq->ring->vaddr;
- GEM_BUG_ON(offset > rq->ring->size);
- return intel_ring_wrap(rq->ring, offset);
-}
-
-static inline void
-assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
-{
- GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
-
- /*
- * "Ring Buffer Use"
- * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
- * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
- * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- *
- * We use ring->head as the last known location of the actual RING_HEAD,
- * it may have advanced but in the worst case it is equally the same
- * as ring->head and so we should never program RING_TAIL to advance
- * into the same cacheline as ring->head.
- */
-#define cacheline(a) round_down(a, CACHELINE_BYTES)
- GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
- tail < ring->head);
-#undef cacheline
-}
-
-static inline unsigned int
-intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
-{
- /* Whilst writes to the tail are strictly order, there is no
- * serialisation between readers and the writers. The tail may be
- * read by i915_request_retire() just as it is being updated
- * by execlists, as although the breadcrumb is complete, the context
- * switch hasn't been seen.
- */
- assert_ring_tail_valid(ring, tail);
- ring->tail = tail;
- return tail;
-}
-
-static inline unsigned int
-__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
-{
- /*
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- */
- GEM_BUG_ON(!is_power_of_2(size));
- return (head - tail - CACHELINE_BYTES) & (size - 1);
-}
-
-int intel_engines_init_mmio(struct drm_i915_private *i915);
-int intel_engines_setup(struct drm_i915_private *i915);
-int intel_engines_init(struct drm_i915_private *i915);
-void intel_engines_cleanup(struct drm_i915_private *i915);
+int intel_engines_init_mmio(struct intel_gt *gt);
+int intel_engines_setup(struct intel_gt *gt);
+int intel_engines_init(struct intel_gt *gt);
+void intel_engines_cleanup(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -349,7 +205,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
@@ -422,8 +277,9 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
engine->serial++; /* contexts lost */
}
-bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct intel_gt *gt);
+bool intel_engine_is_idle(struct intel_engine_cs *engine);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt);
@@ -434,61 +290,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-static inline void intel_engine_context_in(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- if (engine->stats.active++ == 0)
- engine->stats.start = ktime_get();
- GEM_BUG_ON(engine->stats.active == 0);
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
-static inline void intel_engine_context_out(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- ktime_t last;
-
- if (engine->stats.active && --engine->stats.active == 0) {
- /*
- * Decrement the active context count and in case GPU
- * is now idle add up to the running total.
- */
- last = ktime_sub(ktime_get(), engine->stats.start);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- } else if (engine->stats.active == 0) {
- /*
- * After turning on engine stats, context out might be
- * the first event in which case we account from the
- * time stats gathering was turned on.
- */
- last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- }
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);
@@ -525,4 +326,22 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
+static inline bool
+intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return false;
+
+ return intel_engine_has_preemption(engine);
+}
+
+static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return false;
+
+ return intel_engine_has_semaphores(engine);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4ce8626b140e..5ca3ec911e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -37,6 +37,7 @@
#include "intel_context.h"
#include "intel_lrc.h"
#include "intel_reset.h"
+#include "intel_ring.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -277,6 +278,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+ if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
+ return -EINVAL;
+
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
@@ -293,6 +297,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
engine->id = id;
+ engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
engine->i915 = gt->i915;
engine->gt = gt;
@@ -304,6 +309,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->instance = info->instance;
__sprint_engine_name(engine);
+ engine->props.heartbeat_interval_ms =
+ CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.preempt_timeout_ms =
+ CONFIG_DRM_I915_PREEMPT_TIMEOUT;
+ engine->props.stop_timeout_ms =
+ CONFIG_DRM_I915_STOP_TIMEOUT;
+ engine->props.timeslice_duration_ms =
+ CONFIG_DRM_I915_TIMESLICE_DURATION;
+
/*
* To be overridden by the backend on setup. However to facilitate
* cleanup on error during setup, we always provide the destroy vfunc.
@@ -328,6 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
intel_engine_sanitize_mmio(engine);
gt->engine_class[info->class][info->instance] = engine;
+ gt->engine[id] = engine;
intel_engine_add_user(engine);
gt->i915->engine[id] = engine;
@@ -365,38 +380,40 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
}
}
-static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
+static void intel_setup_engine_capabilities(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
__setup_engine_capabilities(engine);
}
/**
* intel_engines_cleanup() - free the resources allocated for Command Streamers
- * @i915: the i915 devic
+ * @gt: pointer to struct intel_gt
*/
-void intel_engines_cleanup(struct drm_i915_private *i915)
+void intel_engines_cleanup(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
engine->destroy(engine);
- i915->engine[id] = NULL;
+ gt->engine[id] = NULL;
+ gt->i915->engine[id] = NULL;
}
}
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
- * @i915: the i915 device
+ * @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init_mmio(struct drm_i915_private *i915)
+int intel_engines_init_mmio(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_device_info *device_info = mkwrite_device_info(i915);
const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
unsigned int mask = 0;
@@ -414,7 +431,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(&i915->gt, i);
+ err = intel_engine_setup(gt, i);
if (err)
goto cleanup;
@@ -431,36 +448,36 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- intel_gt_check_and_clear_faults(&i915->gt);
+ intel_gt_check_and_clear_faults(gt);
- intel_setup_engine_capabilities(i915);
+ intel_setup_engine_capabilities(gt);
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
/**
* intel_engines_init() - init the Engine Command Streamers
- * @i915: i915 device private
+ * @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_i915_private *i915)
+int intel_engines_init(struct intel_gt *gt)
{
int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(i915))
+ if (HAS_EXECLISTS(gt->i915))
init = intel_execlists_submission_init;
else
init = intel_ring_submission_init;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
err = init(engine);
if (err)
goto cleanup;
@@ -469,7 +486,7 @@ int intel_engines_init(struct drm_i915_private *i915)
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
@@ -513,7 +530,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
unsigned int flags;
flags = PIN_GLOBAL;
- if (!HAS_LLC(engine->i915))
+ if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
@@ -597,7 +614,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
- intel_engine_init_hangcheck(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
@@ -616,26 +632,26 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
/**
* intel_engines_setup- setup engine state not requiring hw access
- * @i915: Device to setup.
+ * @gt: pointer to struct intel_gt
*
* Initializes engine structure members shared between legacy and execlists
* submission modes which do not require hardware access.
*
* Typically done early in the submission mode specific engine setup stage.
*/
-int intel_engines_setup(struct drm_i915_private *i915)
+int intel_engines_setup(struct intel_gt *gt)
{
int (*setup)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(i915))
+ if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
err = intel_engine_setup_common(engine);
if (err)
goto cleanup;
@@ -653,7 +669,7 @@ int intel_engines_setup(struct drm_i915_private *i915)
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
@@ -680,6 +696,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
engine->status_page.vma))
goto out_frame;
+ mutex_lock(&frame->timeline.mutex);
+
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
@@ -688,18 +706,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
- frame->rq.timeline = &frame->timeline;
+ rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
dw = intel_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
+ spin_lock_irq(&engine->active.lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+ spin_unlock_irq(&engine->active.lock);
+
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
intel_timeline_unpin(&frame->timeline);
out_timeline:
+ mutex_unlock(&frame->timeline.mutex);
intel_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
@@ -730,6 +752,7 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
+ static struct lock_class_key kernel;
struct intel_context *ce;
int err;
@@ -745,6 +768,14 @@ create_kernel_context(struct intel_engine_cs *engine)
return ERR_PTR(err);
}
+ /*
+ * Give our perma-pinned kernel timelines a separate lockdep class,
+ * so that we can use them from within the normal user timelines
+ * should we need to inject GPU operations during their request
+ * construction.
+ */
+ lockdep_set_class(&ce->timeline->mutex, &kernel);
+
return ce;
}
@@ -814,8 +845,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
+ if (engine->kernel_context) {
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+ }
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
@@ -851,6 +884,21 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+static unsigned long stop_timeout(const struct intel_engine_cs *engine)
+{
+ if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
+ return 0;
+
+ /*
+ * If we are doing a normal GPU reset, we can take our time and allow
+ * the engine to quiesce. We've stopped submission to the engine, and
+ * if we wait long enough an innocent context should complete and
+ * leave the engine idle. So they should not be caught unaware by
+ * the forthcoming GPU reset (which usually follows the stop_cs)!
+ */
+ return READ_ONCE(engine->props.stop_timeout_ms);
+}
+
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
@@ -868,7 +916,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
err = 0;
if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
- 1000, 0,
+ 1000, stop_timeout(engine),
NULL)) {
GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
err = -ETIMEDOUT;
@@ -948,6 +996,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -965,7 +1014,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- for_each_instdone_slice_subslice(i915, slice, subslice) {
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
GEN7_SAMPLER_INSTDONE);
@@ -1031,6 +1080,25 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (__tasklet_is_scheduled(t)) {
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
+ }
+
+ /* Otherwise flush the tasklet if it was running on another cpu */
+ tasklet_unlock_wait(t);
+}
+
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
@@ -1049,21 +1117,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
/* Waiting to drain ELSP? */
if (execlists_active(&engine->execlists)) {
- struct tasklet_struct *t = &engine->execlists.tasklet;
-
synchronize_hardirq(engine->i915->drm.pdev->irq);
- local_bh_disable();
- if (tasklet_trylock(t)) {
- /* Must wait for any GPU reset in progress. */
- if (__tasklet_is_enabled(t))
- t->func(t->data);
- tasklet_unlock(t);
- }
- local_bh_enable();
-
- /* Otherwise flush the tasklet if it was on another cpu */
- tasklet_unlock_wait(t);
+ intel_engine_flush_submission(engine);
if (execlists_active(&engine->execlists))
return false;
@@ -1093,7 +1149,7 @@ bool intel_engines_are_idle(struct intel_gt *gt)
if (!READ_ONCE(gt->awake))
return true;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1106,7 +1162,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->set_default_submission(engine);
}
@@ -1118,6 +1174,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
+ case 4:
+ return !IS_I965G(engine->i915); /* who knows! */
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
@@ -1193,6 +1251,38 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->active.lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static const char *repr_timer(const struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return "inactive";
+
+ if (timer_pending(t))
+ return "active";
+
+ return "expired";
+}
+
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
@@ -1254,19 +1344,21 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
- ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
- ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
- num_entries);
+ drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
+ yesno(test_bit(TASKLET_STATE_SCHED,
+ &engine->execlists.tasklet.state)),
+ enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
+ repr_timer(&engine->execlists.preempt),
+ repr_timer(&engine->execlists.timer));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
- drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
- read, write,
- yesno(test_bit(TASKLET_STATE_SCHED,
- &engine->execlists.tasklet.state)),
- enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
+ drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ read, write, num_entries);
+
if (read >= num_entries)
read = 0;
if (write >= num_entries)
@@ -1280,33 +1372,45 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
execlists_active_lock_bh(execlists);
+ rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
char hdr[80];
int len;
len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d: ",
+ "\t\tActive[%d]: ",
(int)(port - execlists->active));
- if (!i915_request_signaled(rq))
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
len += snprintf(hdr + len, sizeof(hdr) - len,
"ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
+ tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
+
+ if (tl)
+ intel_timeline_put(tl);
+ }
snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
+ struct intel_timeline *tl = get_timeline(rq);
char hdr[80];
snprintf(hdr, sizeof(hdr),
"\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
(int)(port - execlists->pending),
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
+ tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
print_request(m, rq, hdr);
+
+ if (tl)
+ intel_timeline_put(tl);
}
+ rcu_read_unlock();
execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
@@ -1372,8 +1476,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
- drm_printf(m, "\tHangcheck: %d ms ago\n",
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ drm_printf(m, "\tHeartbeat: %d ms ago\n",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ rcu_read_unlock();
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
i915_reset_count(error));
@@ -1383,6 +1492,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
+ struct intel_timeline *tl = get_timeline(rq);
+
print_request(m, rq, "\t\tactive ");
drm_printf(m, "\t\tring->start: 0x%08x\n",
@@ -1395,18 +1506,27 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
- drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
- rq->timeline->hwsp_offset);
+
+ if (tl) {
+ drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
+ tl->hwsp_offset);
+ intel_timeline_put(tl);
+ }
print_request_ring(m, rq);
+
+ if (rq->hw_context->lrc_reg_state) {
+ drm_printf(m, "Logical Ring Context:\n");
+ hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+ }
}
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
- wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
new file mode 100644
index 000000000000..06aa14c7aa8c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -0,0 +1,234 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_request.h"
+
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_reset.h"
+
+/*
+ * While the engine is active, we send a periodic pulse along the engine
+ * to check on its health and to flush any idle-barriers. If that request
+ * is stuck, and we fail to preempt it, we declare the engine hung and
+ * issue a reset -- in the hope that restores progress.
+ */
+
+static bool next_heartbeat(struct intel_engine_cs *engine)
+{
+ long delay;
+
+ delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+ if (!delay)
+ return false;
+
+ delay = msecs_to_jiffies_timeout(delay);
+ if (delay >= HZ)
+ delay = round_jiffies_up_relative(delay);
+ schedule_delayed_work(&engine->heartbeat.work, delay);
+
+ return true;
+}
+
+static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
+ i915_request_add_active_barriers(rq);
+}
+
+static void show_heartbeat(const struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct drm_printer p = drm_debug_printer("heartbeat");
+
+ intel_engine_dump(engine, &p,
+ "%s heartbeat {prio:%d} not ticking\n",
+ engine->name,
+ rq->sched.attr.priority);
+}
+
+static void heartbeat(struct work_struct *wrk)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_engine_cs *engine =
+ container_of(wrk, typeof(*engine), heartbeat.work.work);
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return;
+
+ rq = engine->heartbeat.systole;
+ if (rq && i915_request_completed(rq)) {
+ i915_request_put(rq);
+ engine->heartbeat.systole = NULL;
+ }
+
+ if (intel_gt_is_wedged(engine->gt))
+ goto out;
+
+ if (engine->heartbeat.systole) {
+ if (engine->schedule &&
+ rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+ /*
+ * Gradually raise the priority of the heartbeat to
+ * give high priority work [which presumably desires
+ * low latency and no jitter] the chance to naturally
+ * complete before being preempted.
+ */
+ attr.priority = I915_PRIORITY_MASK;
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority = I915_PRIORITY_BARRIER;
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable();
+ } else {
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ show_heartbeat(rq, engine);
+
+ intel_gt_handle_error(engine->gt, engine->mask,
+ I915_ERROR_CAPTURE,
+ "stopped heartbeat on %s",
+ engine->name);
+ }
+ goto out;
+ }
+
+ if (engine->wakeref_serial == engine->serial)
+ goto out;
+
+ mutex_lock(&ce->timeline->mutex);
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq))
+ goto unlock;
+
+ idle_pulse(engine, rq);
+ if (i915_modparams.enable_hangcheck)
+ engine->heartbeat.systole = i915_request_get(rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out:
+ if (!next_heartbeat(engine))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ intel_engine_pm_put(engine);
+}
+
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ return;
+
+ next_heartbeat(engine);
+}
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+{
+ if (cancel_delayed_work(&engine->heartbeat.work))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+}
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+{
+ INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+}
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ int err;
+
+ /* Send one last pulse before to cleanup persistent hogs */
+ if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
+ err = intel_engine_pulse(engine);
+ if (err)
+ return err;
+ }
+
+ WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
+
+ if (intel_engine_pm_get_if_awake(engine)) {
+ if (delay)
+ intel_engine_unpark_heartbeat(engine);
+ else
+ intel_engine_park_heartbeat(engine);
+ intel_engine_pm_put(engine);
+ }
+
+ return 0;
+}
+
+int intel_engine_pulse(struct intel_engine_cs *engine)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (!intel_engine_has_preemption(engine))
+ return -ENODEV;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ if (mutex_lock_interruptible(&ce->timeline->mutex))
+ goto out_rpm;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unlock;
+ }
+
+ rq->flags |= I915_REQUEST_SENTINEL;
+ idle_pulse(engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ if (llist_empty(&engine->barrier_tasks))
+ return 0;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ idle_pulse(engine, rq);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_heartbeat.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
new file mode 100644
index 000000000000..a7b8c0f9e005
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_HEARTBEAT_H
+#define INTEL_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay);
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_pulse(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 65b5ca74b394..874d82677179 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -7,10 +7,13 @@
#include "i915_drv.h"
#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_ring.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
@@ -33,7 +36,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (engine->unpark)
engine->unpark(engine);
- intel_engine_init_hangcheck(engine);
+ intel_engine_unpark_heartbeat(engine);
return 0;
}
@@ -52,7 +55,7 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
static inline void __timeline_mark_unlock(struct intel_context *ce,
unsigned long flags)
{
- mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
local_irq_restore(flags);
}
@@ -103,14 +106,14 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Context switch failed, hope for the best! Maybe reset? */
goto out_unlock;
- intel_timeline_enter(rq->timeline);
+ intel_timeline_enter(i915_request_timeline(rq));
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
i915_request_add_active_barriers(rq);
/* Install ourselves as a preemption barrier */
- rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_commit(rq);
/* Release our exclusive hold on the engine */
@@ -123,6 +126,19 @@ out_unlock:
return result;
}
+static void call_idle_barriers(struct intel_engine_cs *engine)
+{
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ struct dma_fence_cb *cb =
+ container_of((struct list_head *)node,
+ typeof(*cb), node);
+
+ cb->func(NULL, cb);
+ }
+}
+
static int __engine_park(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
@@ -142,6 +158,9 @@ static int __engine_park(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
+ call_idle_barriers(engine); /* cleanup after wedging */
+
+ intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
intel_engine_pool_park(&engine->pool);
@@ -169,9 +188,10 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
+ struct intel_runtime_pm *rpm = engine->uncore->rpm;
intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
+ intel_engine_init_heartbeat(engine);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 4cd54c569911..397186818305 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -61,6 +61,7 @@ static int pool_active(struct i915_active *ref)
return 0;
}
+__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
@@ -94,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_PTR(-ENOMEM);
node->pool = pool;
- i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+ i915_active_init(&node->active, pool_active, pool_retire);
obj = i915_gem_object_create_internal(engine->i915, sz);
if (IS_ERR(obj)) {
@@ -103,13 +104,25 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_CAST(obj);
}
+ i915_gem_object_set_readonly(obj);
+
node->obj = obj;
return node;
}
+static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
+{
+ if (intel_engine_is_virtual(engine))
+ engine = intel_virtual_engine_get_sibling(engine, 0);
+
+ GEM_BUG_ON(!engine);
+ return &engine->pool;
+}
+
struct intel_engine_pool_node *
-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
{
+ struct intel_engine_pool *pool = lookup_pool(engine);
struct intel_engine_pool_node *node;
struct list_head *list;
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
index 8d069efd9457..1bd89cadc3b7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -12,13 +12,13 @@
#include "i915_request.h"
struct intel_engine_pool_node *
-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
struct i915_request *rq)
{
- return i915_active_ref(&node->active, rq->timeline, rq);
+ return i915_active_add_request(&node->active, rq);
}
static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index a82cea95c2f2..758f0e8ec672 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -15,6 +15,7 @@
#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "i915_gem.h"
#include "i915_pmu.h"
@@ -58,6 +59,7 @@ struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
struct intel_gt;
+struct intel_ring;
struct intel_uncore;
typedef u8 intel_engine_mask_t;
@@ -76,40 +78,6 @@ struct intel_instdone {
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
-struct intel_engine_hangcheck {
- u64 acthd;
- u32 last_ring;
- u32 last_head;
- unsigned long action_timestamp;
- struct intel_instdone instdone;
-};
-
-struct intel_ring {
- struct kref ref;
- struct i915_vma *vma;
- void *vaddr;
-
- /*
- * As we have two types of rings, one global to the engine used
- * by ringbuffer submission and those that are exclusive to a
- * context used by execlists, we have to play safe and allow
- * atomic updates to the pin_count. However, the actual pinning
- * of the context is either done during initialisation for
- * ringbuffer submission or serialised as part of the context
- * pinning for execlists, and so we do not need a mutex ourselves
- * to serialise intel_ring_pin/intel_ring_unpin.
- */
- atomic_t pin_count;
-
- u32 head;
- u32 tail;
- u32 emit;
-
- u32 space;
- u32 size;
- u32 effective_size;
-};
-
/*
* we use a single page to load ctx workarounds so all of these
* values are referred in terms of dwords
@@ -148,6 +116,7 @@ enum intel_engine_id {
VECS1,
#define _VECS(n) (VECS0 + (n))
I915_NUM_ENGINES
+#define INVALID_ENGINE ((enum intel_engine_id)-1)
};
struct st_preempt_hang {
@@ -174,6 +143,11 @@ struct intel_engine_execlists {
struct timer_list timer;
/**
+ * @preempt: reset the current context if it fails to give way
+ */
+ struct timer_list preempt;
+
+ /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
@@ -303,10 +277,12 @@ struct intel_engine_cs {
u8 uabi_class;
u8 uabi_instance;
+ u32 uabi_capabilities;
u32 context_size;
u32 mmio_base;
- u32 uabi_capabilities;
+ unsigned int context_tag;
+#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
struct rb_node uabi_node;
@@ -323,6 +299,11 @@ struct intel_engine_cs {
intel_engine_mask_t saturated; /* submitting semaphores too late? */
+ struct {
+ struct delayed_work work;
+ struct i915_request *systole;
+ } heartbeat;
+
unsigned long serial;
unsigned long wakeref_serial;
@@ -473,14 +454,14 @@ struct intel_engine_cs {
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
- struct intel_engine_hangcheck hangcheck;
-
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
/*
@@ -538,12 +519,25 @@ struct intel_engine_cs {
*/
ktime_t total;
} stats;
+
+ struct {
+ unsigned long heartbeat_interval_ms;
+ unsigned long preempt_timeout_ms;
+ unsigned long stop_timeout_ms;
+ unsigned long timeslice_duration_ms;
+ } props;
};
static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
{
- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+ return engine->flags & I915_ENGINE_USING_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
}
static inline bool
@@ -576,20 +570,24 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
-#define instdone_slice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+static inline bool
+intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
+{
+ return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
+}
-#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+#define instdone_has_slice(dev_priv___, sseu___, slice___) \
+ ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
- for ((slice__) = 0, (subslice__) = 0; \
- (slice__) < I915_MAX_SLICES; \
- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
- (slice__) += ((subslice__) == 0)) \
- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
- (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
+ (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
+ intel_sseu_has_subslice(sseu__, 0, subslice__))
+#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
+ for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
+ (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
+ (slice_) += ((subslice_) == 0)) \
+ for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
+ (instdone_has_subslice(dev_priv_, sseu_, slice_, \
+ subslice_)))
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 77cd5de83930..7f7150a733f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -160,10 +160,10 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
};
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
- return -1;
+ return INVALID_ENGINE;
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
- return -1;
+ return INVALID_ENGINE;
return map[ring->class].base + ring->instance;
}
@@ -171,23 +171,15 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
static void add_legacy_ring(struct legacy_ring *ring,
struct intel_engine_cs *engine)
{
- int idx;
-
if (engine->gt != ring->gt || engine->class != ring->class) {
ring->gt = engine->gt;
ring->class = engine->class;
ring->instance = 0;
}
- idx = legacy_ring_idx(ring);
- if (unlikely(idx == -1))
- return;
-
- GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
- ring->gt->engine[idx] = engine;
- ring->instance++;
-
- engine->legacy_idx = idx;
+ engine->legacy_idx = legacy_ring_idx(ring);
+ if (engine->legacy_idx != INVALID_ENGINE)
+ ring->instance++;
}
void intel_engines_driver_register(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 86e00a2db8a4..4294f146f13c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -112,6 +112,7 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
@@ -119,6 +120,8 @@
#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
+#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5)
+#define MI_SEMAPHORE_TOKEN_SHIFT 5
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
@@ -132,7 +135,10 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
+#define MI_LRI_CS_MMIO (1<<19)
#define MI_LRI_FORCE_POSTED (1<<12)
+#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
@@ -147,6 +153,7 @@
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -156,7 +163,8 @@
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
-#define MI_BATCH_RESOURCE_STREAMER (1<<10)
+#define MI_BATCH_RESOURCE_STREAMER REG_BIT(10)
+#define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/
/*
* 3D instructions used by the kernel
@@ -217,6 +225,7 @@
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
+#define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -224,7 +233,9 @@
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
@@ -235,6 +246,29 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
+#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
+/* Opcodes for MI_MATH_INSTR */
+#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0)
+#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2)
+#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2)
+#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1)
+#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1)
+#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0)
+#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0)
+#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0)
+#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0)
+#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0)
+#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2)
+#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2)
+/* Registers used as operands in MI_MATH_INSTR */
+#define MI_MATH_REG(x) (x)
+#define MI_MATH_REG_SRCA 0x20
+#define MI_MATH_REG_SRCB 0x21
+#define MI_MATH_REG_ACCU 0x31
+#define MI_MATH_REG_ZF 0x32
+#define MI_MATH_REG_CF 0x33
+
/*
* Commands used only by the command parser
*/
@@ -251,7 +285,6 @@
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d48ec9a76ed1..4c26daf7ee46 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -6,7 +6,12 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_mocs.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
#include "intel_uncore.h"
+#include "intel_pm.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -18,15 +23,108 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
- intel_gt_init_hangcheck(gt);
intel_gt_init_reset(gt);
+ intel_gt_init_requests(gt);
intel_gt_pm_init_early(gt);
+
+ intel_rps_init_early(&gt->rps);
intel_uc_init_early(&gt->uc);
}
-void intel_gt_init_hw(struct drm_i915_private *i915)
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
+{
+ gt->ggtt = ggtt;
+
+ intel_gt_sanitize(gt, false);
+}
+
+static void init_unused_ring(struct intel_gt *gt, u32 base)
{
- i915->gt.ggtt = &i915->ggtt;
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
+}
+
+static void init_unused_rings(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (IS_GEN(i915, 2)) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (IS_GEN(i915, 3)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
+ }
+}
+
+int intel_gt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ BUG_ON(!i915->kernel_context);
+ ret = intel_gt_terminally_wedged(gt);
+ if (ret)
+ return ret;
+
+ gt->last_init_time = ktime_get();
+
+ /* Double layer security blanket, see i915_gem_init() */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
+
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(gt);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(gt, "init");
+
+ intel_gt_init_swizzling(gt);
+
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(gt);
+
+ ret = i915_ppgtt_init_hw(gt);
+ if (ret) {
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
+ goto out;
+ }
+
+ /* We can't enable contexts until all firmware is loaded */
+ ret = intel_uc_init_hw(&gt->uc);
+ if (ret) {
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ goto out;
+ }
+
+ intel_mocs_init(gt);
+
+out:
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ return ret;
}
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
@@ -89,7 +187,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine_masked(engine, i915, engine_mask, id)
+ for_each_engine_masked(engine, gt, engine_mask, id)
gen8_clear_engine_error_register(engine);
}
}
@@ -100,7 +198,7 @@ static void gen6_check_faults(struct intel_gt *gt)
enum intel_engine_id id;
u32 fault;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
@@ -176,7 +274,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
/*
@@ -200,18 +298,18 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
wmb();
- if (INTEL_INFO(i915)->has_coherent_ggtt)
+ if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
return;
intel_gt_chipset_flush(gt);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- struct intel_uncore *uncore = gt->uncore;
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ unsigned long flags;
- spin_lock_irq(&uncore->lock);
+ spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&uncore->lock);
+ spin_unlock_irqrestore(&uncore->lock, flags);
}
}
@@ -222,7 +320,12 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
intel_gtt_chipset_flush();
}
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+void intel_gt_driver_register(struct intel_gt *gt)
+{
+ intel_rps_driver_register(&gt->rps);
+}
+
+static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
@@ -230,7 +333,7 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
int ret;
obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
@@ -256,11 +359,40 @@ err_unref:
return ret;
}
-void intel_gt_fini_scratch(struct intel_gt *gt)
+static void intel_gt_fini_scratch(struct intel_gt *gt)
{
i915_vma_unpin_and_release(&gt->scratch, 0);
}
+int intel_gt_init(struct intel_gt *gt)
+{
+ int err;
+
+ err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ if (err)
+ return err;
+
+ intel_gt_pm_init(gt);
+
+ return 0;
+}
+
+void intel_gt_driver_remove(struct intel_gt *gt)
+{
+ GEM_BUG_ON(gt->awake);
+}
+
+void intel_gt_driver_unregister(struct intel_gt *gt)
+{
+ intel_rps_driver_unregister(&gt->rps);
+}
+
+void intel_gt_driver_release(struct intel_gt *gt)
+{
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+}
+
void intel_gt_driver_late_release(struct intel_gt *gt)
{
intel_uc_driver_late_release(&gt->uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 4920cb351f10..5436f8c30708 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -28,7 +28,14 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
}
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void intel_gt_init_hw(struct drm_i915_private *i915);
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int __must_check intel_gt_init_hw(struct intel_gt *gt);
+int intel_gt_init(struct intel_gt *gt);
+void intel_gt_driver_register(struct intel_gt *gt);
+
+void intel_gt_driver_unregister(struct intel_gt *gt);
+void intel_gt_driver_remove(struct intel_gt *gt);
+void intel_gt_driver_release(struct intel_gt *gt);
void intel_gt_driver_late_release(struct intel_gt *gt);
@@ -39,11 +46,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt,
void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
void intel_gt_chipset_flush(struct intel_gt *gt);
-void intel_gt_init_hangcheck(struct intel_gt *gt);
-
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
-void intel_gt_fini_scratch(struct intel_gt *gt);
-
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
enum intel_gt_scratch_field field)
{
@@ -55,6 +57,4 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt)
return __intel_reset_failed(&gt->reset);
}
-void intel_gt_queue_hangcheck(struct intel_gt *gt);
-
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 34a4fb624bf7..973ee7eded64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_uncore.h"
+#include "intel_rps.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
@@ -77,7 +78,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
return guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
- return gen11_rps_irq_handler(gt, iir);
+ return gen11_rps_irq_handler(&gt->rps, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -336,7 +337,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(gt->i915, gt_iir[2]);
+ gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 1363e069ec83..6187cdd06646 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -4,17 +4,38 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/suspend.h>
+
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_params.h"
+#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_llc.h"
#include "intel_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
#include "intel_wakeref.h"
-static void pm_notify(struct drm_i915_private *i915, int state)
+static void user_forcewake(struct intel_gt *gt, bool suspend)
{
- blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
+ int count = atomic_read(&gt->user_wakeref);
+
+ /* Inside suspend/resume so single threaded, no races to worry about. */
+ if (likely(!count))
+ return;
+
+ intel_gt_pm_get(gt);
+ if (suspend) {
+ GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
+ atomic_sub(count, &gt->wakeref.count);
+ } else {
+ atomic_add(count, &gt->wakeref.count);
+ }
+ intel_gt_pm_put(gt);
}
static int __gt_unpark(struct intel_wakeref *wf)
@@ -24,6 +45,8 @@ static int __gt_unpark(struct intel_wakeref *wf)
GEM_TRACE("\n");
+ i915_globals_unpark();
+
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -38,41 +61,44 @@ static int __gt_unpark(struct intel_wakeref *wf)
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
- intel_enable_gt_powersave(i915);
-
- i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+ intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
- intel_gt_queue_hangcheck(gt);
-
- pm_notify(i915, INTEL_GT_UNPARK);
+ intel_gt_unpark_requests(gt);
return 0;
}
static int __gt_park(struct intel_wakeref *wf)
{
- struct drm_i915_private *i915 =
- container_of(wf, typeof(*i915), gt.wakeref);
- intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
+ struct drm_i915_private *i915 = gt->i915;
GEM_TRACE("\n");
- pm_notify(i915, INTEL_GT_PARK);
+ intel_gt_park_requests(gt);
+ i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_idle(i915);
+ intel_rps_park(&gt->rps);
/* Everything switched off, flush any residual interrupt just in case */
intel_synchronize_irq(i915);
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
+ intel_rc6_ctx_wa_check(&i915->gt.rc6);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ }
+
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ i915_globals_park();
+
return 0;
}
@@ -84,9 +110,18 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+}
- BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
+void intel_gt_pm_init(struct intel_gt *gt)
+{
+ /*
+ * Enabling power-management should be "self-healing". If we cannot
+ * enable a feature, simply leave it disabled with a notice to the
+ * user.
+ */
+ intel_rc6_init(&gt->rc6);
+ intel_rps_init(&gt->rps);
}
static bool reset_engines(struct intel_gt *gt)
@@ -111,16 +146,47 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
- GEM_TRACE("\n");
+ GEM_TRACE("force:%s\n", yesno(force));
+
+ /* Use a raw wakeref to avoid calling intel_display_power_get early */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (intel_gt_is_wedged(gt))
+ intel_gt_unset_wedged(gt);
intel_uc_sanitize(&gt->uc);
- if (!reset_engines(gt) && !force)
- return;
+ for_each_engine(engine, gt, id)
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
+
+ intel_uc_reset_prepare(&gt->uc);
+
+ if (reset_engines(gt) || force) {
+ for_each_engine(engine, gt, id)
+ __intel_engine_reset(engine, false);
+ }
+
+ for_each_engine(engine, gt, id)
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
- for_each_engine(engine, gt->i915, id)
- __intel_engine_reset(engine, false);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+void intel_gt_pm_fini(struct intel_gt *gt)
+{
+ intel_rc6_fini(&gt->rc6);
}
int intel_gt_resume(struct intel_gt *gt)
@@ -129,6 +195,8 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id;
int err = 0;
+ GEM_TRACE("\n");
+
/*
* After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend.
@@ -136,14 +204,23 @@ int intel_gt_resume(struct intel_gt *gt)
* allowing us to fixup the user contexts on their first pin.
*/
intel_gt_pm_get(gt);
- for_each_engine(engine, gt->i915, id) {
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_rc6_sanitize(&gt->rc6);
+
+ intel_rps_enable(&gt->rps);
+ intel_llc_enable(&gt->llc);
+
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
intel_engine_pm_get(engine);
ce = engine->kernel_context;
- if (ce)
+ if (ce) {
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
ce->ops->reset(ce);
+ }
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
@@ -156,19 +233,99 @@ int intel_gt_resume(struct intel_gt *gt)
break;
}
}
+
+ intel_rc6_enable(&gt->rc6);
+
+ intel_uc_resume(&gt->uc);
+
+ user_forcewake(gt, false);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
return err;
}
+static void wait_for_suspend(struct intel_gt *gt)
+{
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ intel_gt_set_wedged(gt);
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+}
+
+void intel_gt_suspend_prepare(struct intel_gt *gt)
+{
+ user_forcewake(gt, true);
+ wait_for_suspend(gt);
+
+ intel_uc_suspend(&gt->uc);
+}
+
+static suspend_state_t pm_suspend_target(void)
+{
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+ return pm_suspend_target_state;
+#else
+ return PM_SUSPEND_TO_IDLE;
+#endif
+}
+
+void intel_gt_suspend_late(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+
+ /* We expect to be idle already; but also want to be independent */
+ wait_for_suspend(gt);
+
+ /*
+ * On disabling the device, we want to turn off HW access to memory
+ * that we no longer own.
+ *
+ * However, not all suspend-states disable the device. S0 (s2idle)
+ * is effectively runtime-suspend, the device is left powered on
+ * but needs to be put into a low power state. We need to keep
+ * powermanagement enabled, but we also retain system state and so
+ * it remains safe to keep on using our allocated memory.
+ */
+ if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
+ return;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ intel_rps_disable(&gt->rps);
+ intel_rc6_disable(&gt->rc6);
+ intel_llc_disable(&gt->llc);
+ }
+
+ intel_gt_sanitize(gt, false);
+
+ GEM_TRACE("\n");
+}
+
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(&gt->uc);
+
+ GEM_TRACE("\n");
}
int intel_gt_runtime_resume(struct intel_gt *gt)
{
+ GEM_TRACE("\n");
+
intel_gt_init_swizzling(gt);
return intel_uc_runtime_resume(&gt->uc);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_gt_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index fb39d99cd6ee..b3e17399be9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -12,11 +12,6 @@
#include "intel_gt_types.h"
#include "intel_wakeref.h"
-enum {
- INTEL_GT_UNPARK,
- INTEL_GT_PARK,
-};
-
static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
{
return intel_wakeref_is_active(&gt->wakeref);
@@ -43,10 +38,21 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
}
void intel_gt_pm_init_early(struct intel_gt *gt);
+void intel_gt_pm_init(struct intel_gt *gt);
+void intel_gt_pm_fini(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force);
+
+void intel_gt_suspend_prepare(struct intel_gt *gt);
+void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
+
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
+static inline bool is_mock_gt(const struct intel_gt *gt)
+{
+ return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
+}
+
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
new file mode 100644
index 000000000000..353809ac2754
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -0,0 +1,137 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h" /* for_each_engine() */
+#include "i915_request.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_timeline.h"
+
+static void retire_requests(struct intel_timeline *tl)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (!i915_request_retire(rq))
+ break;
+}
+
+static void flush_submission(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_submission(engine);
+}
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ unsigned long active_count = 0;
+ unsigned long flags;
+ bool interruptible;
+ LIST_HEAD(free);
+
+ interruptible = true;
+ if (unlikely(timeout < 0))
+ timeout = -timeout, interruptible = false;
+
+ flush_submission(gt); /* kick the ksoftirqd tasklets */
+
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++; /* report busy to caller, try again? */
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!tl->active_count);
+ tl->active_count++; /* pin the list element */
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ if (timeout > 0) {
+ struct dma_fence *fence;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ timeout = dma_fence_wait_timeout(fence,
+ interruptible,
+ timeout);
+ dma_fence_put(fence);
+ }
+ }
+
+ retire_requests(tl);
+
+ spin_lock_irqsave(&timelines->lock, flags);
+
+ /* Resume iteration after dropping lock */
+ list_safe_reset_next(tl, tn, link);
+ if (!--tl->active_count)
+ list_del(&tl->link);
+ else
+ active_count += !!rcu_access_pointer(tl->last_request.fence);
+
+ mutex_unlock(&tl->mutex);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(tl->active_count);
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+
+ return active_count ? timeout : 0;
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
+{
+ /* If the device is asleep, we have no requests outstanding */
+ if (!intel_gt_pm_is_awake(gt))
+ return 0;
+
+ while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return timeout;
+}
+
+static void retire_work_handler(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), requests.retire_work.work);
+
+ intel_gt_retire_requests(gt);
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+void intel_gt_init_requests(struct intel_gt *gt)
+{
+ INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
+}
+
+void intel_gt_park_requests(struct intel_gt *gt)
+{
+ cancel_delayed_work(&gt->requests.retire_work);
+}
+
+void intel_gt_unpark_requests(struct intel_gt *gt)
+{
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
new file mode 100644
index 000000000000..bd31cbce47e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_REQUESTS_H
+#define INTEL_GT_REQUESTS_H
+
+struct intel_gt;
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
+static inline void intel_gt_retire_requests(struct intel_gt *gt)
+{
+ intel_gt_retire_requests_timeout(gt, 0);
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+
+void intel_gt_init_requests(struct intel_gt *gt);
+void intel_gt_park_requests(struct intel_gt *gt);
+void intel_gt_unpark_requests(struct intel_gt *gt);
+
+#endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index dc295c196d11..d4e14dbd172e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -17,7 +17,10 @@
#include "i915_vma.h"
#include "intel_engine_types.h"
+#include "intel_llc_types.h"
#include "intel_reset_types.h"
+#include "intel_rc6_types.h"
+#include "intel_rps_types.h"
#include "intel_wakeref.h"
struct drm_i915_private;
@@ -25,14 +28,6 @@ struct i915_ggtt;
struct intel_engine_cs;
struct intel_uncore;
-struct intel_hangcheck {
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
- struct delayed_work work;
-};
-
struct intel_gt {
struct drm_i915_private *i915;
struct intel_uncore *uncore;
@@ -49,12 +44,23 @@ struct intel_gt {
struct list_head hwsp_free_list;
} timelines;
+ struct intel_gt_requests {
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+ } requests;
+
struct intel_wakeref wakeref;
+ atomic_t user_wakeref;
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
- struct intel_hangcheck hangcheck;
struct intel_reset reset;
/**
@@ -66,7 +72,9 @@ struct intel_gt {
*/
intel_wakeref_t awake;
- struct blocking_notifier_head pm_notifications;
+ struct intel_llc llc;
+ struct intel_rc6 rc6;
+ struct intel_rps rps;
ktime_t last_init_time;
@@ -89,14 +97,16 @@ enum intel_gt_scratch_field {
INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
/* 8 bytes */
- INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
-
- /* 8 bytes */
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+ /* 6 * 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
+
+ /* 4 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
};
#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
deleted file mode 100644
index 05d042cdefe2..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "intel_engine.h"
-#include "intel_gt.h"
-#include "intel_reset.h"
-
-struct hangcheck {
- u64 acthd;
- u32 ring;
- u32 head;
- enum intel_engine_hangcheck_action action;
- unsigned long action_timestamp;
- int deadlock;
- struct intel_instdone instdone;
- bool wedged:1;
- bool stalled:1;
-};
-
-static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
-{
- u32 tmp = current_instdone | *old_instdone;
- bool unchanged;
-
- unchanged = tmp == *old_instdone;
- *old_instdone |= tmp;
-
- return unchanged;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_instdone instdone;
- struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
- bool stuck;
- int slice;
- int subslice;
-
- intel_engine_get_instdone(engine, &instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = instdone_unchanged(instdone.instdone,
- &accu_instdone->instdone);
- stuck &= instdone_unchanged(instdone.slice_common,
- &accu_instdone->slice_common);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
- stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
- &accu_instdone->sampler[slice][subslice]);
- stuck &= instdone_unchanged(instdone.row[slice][subslice],
- &accu_instdone->row[slice][subslice]);
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return ENGINE_ACTIVE_HEAD;
- }
-
- if (!subunits_stuck(engine))
- return ENGINE_ACTIVE_SUBUNITS;
-
- return ENGINE_DEAD;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != ENGINE_DEAD)
- return ha;
-
- if (IS_GEN(engine->i915, 2))
- return ENGINE_DEAD;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = ENGINE_READ(engine, RING_CTL);
- if (tmp & RING_WAIT) {
- intel_gt_handle_error(engine->gt, engine->mask, 0,
- "stuck wait on %s", engine->name);
- ENGINE_WRITE(engine, RING_CTL, tmp);
- return ENGINE_WAIT_KICK;
- }
-
- return ENGINE_DEAD;
-}
-
-static void hangcheck_load_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- hc->acthd = intel_engine_get_active_head(engine);
- hc->ring = ENGINE_READ(engine, RING_START);
- hc->head = ENGINE_READ(engine, RING_HEAD);
-}
-
-static void hangcheck_store_sample(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.last_ring = hc->ring;
- engine->hangcheck.last_head = hc->head;
-}
-
-static enum intel_engine_hangcheck_action
-hangcheck_get_action(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- if (intel_engine_is_idle(engine))
- return ENGINE_IDLE;
-
- if (engine->hangcheck.last_ring != hc->ring)
- return ENGINE_ACTIVE_SEQNO;
-
- if (engine->hangcheck.last_head != hc->head)
- return ENGINE_ACTIVE_SEQNO;
-
- return engine_stuck(engine, hc->acthd);
-}
-
-static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
-
- hc->action = hangcheck_get_action(engine, hc);
-
- /* We always increment the progress
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, make it as a progress as the seqno
- * advancement might ensure and if not, it
- * will catch the hanging engine.
- */
-
- switch (hc->action) {
- case ENGINE_IDLE:
- case ENGINE_ACTIVE_SEQNO:
- /* Clear head and subunit states on seqno movement */
- hc->acthd = 0;
-
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- /* Intentional fall through */
- case ENGINE_WAIT_KICK:
- case ENGINE_WAIT:
- engine->hangcheck.action_timestamp = jiffies;
- break;
-
- case ENGINE_ACTIVE_HEAD:
- case ENGINE_ACTIVE_SUBUNITS:
- /*
- * Seqno stuck with still active engine gets leeway,
- * in hopes that it is just a long shader.
- */
- timeout = I915_SEQNO_DEAD_TIMEOUT;
- break;
-
- case ENGINE_DEAD:
- break;
-
- default:
- MISSING_CASE(hc->action);
- }
-
- hc->stalled = time_after(jiffies,
- engine->hangcheck.action_timestamp + timeout);
- hc->wedged = time_after(jiffies,
- engine->hangcheck.action_timestamp +
- I915_ENGINE_WEDGED_TIMEOUT);
-}
-
-static void hangcheck_declare_hang(struct intel_gt *gt,
- intel_engine_mask_t hung,
- intel_engine_mask_t stuck)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
- char msg[80];
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "no progress" : "hang");
- for_each_engine_masked(engine, gt->i915, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg);
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void hangcheck_elapsed(struct work_struct *work)
-{
- struct intel_gt *gt =
- container_of(work, typeof(*gt), hangcheck.work.work);
- intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
-
- if (!i915_modparams.enable_hangcheck)
- return;
-
- if (!READ_ONCE(gt->awake))
- return;
-
- if (intel_gt_is_wedged(gt))
- return;
-
- wakeref = intel_runtime_pm_get_if_in_use(&gt->i915->runtime_pm);
- if (!wakeref)
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(gt->uncore);
-
- for_each_engine(engine, gt->i915, id) {
- struct hangcheck hc;
-
- intel_engine_signal_breadcrumbs(engine);
-
- hangcheck_load_sample(engine, &hc);
- hangcheck_accumulate_sample(engine, &hc);
- hangcheck_store_sample(engine, &hc);
-
- if (hc.stalled) {
- hung |= engine->mask;
- if (hc.action != ENGINE_DEAD)
- stuck |= engine->mask;
- }
-
- if (hc.wedged)
- wedged |= engine->mask;
- }
-
- if (GEM_SHOW_DEBUG() && (hung | stuck)) {
- struct drm_printer p = drm_debug_printer("hangcheck");
-
- for_each_engine(engine, gt->i915, id) {
- if (intel_engine_is_idle(engine))
- continue;
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
- }
-
- if (wedged) {
- dev_err(gt->i915->drm.dev,
- "GPU recovery timed out,"
- " cancelling all in-flight rendering.\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- }
-
- if (hung)
- hangcheck_declare_hang(gt, hung, stuck);
-
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
-
- /* Reset timer in case GPU hangs without another request being added */
- intel_gt_queue_hangcheck(gt);
-}
-
-void intel_gt_queue_hangcheck(struct intel_gt *gt)
-{
- unsigned long delay;
-
- if (unlikely(!i915_modparams.enable_hangcheck))
- return;
-
- /*
- * Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
-
- delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
- queue_delayed_work(system_long_wq, &gt->hangcheck.work, delay);
-}
-
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
- engine->hangcheck.action_timestamp = jiffies;
-}
-
-void intel_gt_init_hangcheck(struct intel_gt *gt)
-{
- INIT_DELAYED_WORK(&gt->hangcheck.work, hangcheck_elapsed);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_hangcheck.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
new file mode 100644
index 000000000000..ceb785b75c25
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -0,0 +1,161 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/cpufreq.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_sideband.h"
+
+struct ia_constants {
+ unsigned int min_gpu_freq;
+ unsigned int max_gpu_freq;
+
+ unsigned int min_ring_freq;
+ unsigned int max_ia_freq;
+};
+
+static struct intel_gt *llc_to_gt(struct intel_llc *llc)
+{
+ return container_of(llc, struct intel_gt, llc);
+}
+
+static unsigned int cpu_max_MHz(void)
+{
+ struct cpufreq_policy *policy;
+ unsigned int max_khz;
+
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_khz = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
+ max_khz = tsc_khz;
+ }
+
+ return max_khz / 1000;
+}
+
+static bool get_ia_constants(struct intel_llc *llc,
+ struct ia_constants *consts)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ if (rps->max_freq <= rps->min_freq)
+ return false;
+
+ consts->max_ia_freq = cpu_max_MHz();
+
+ consts->min_ring_freq =
+ intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
+
+ consts->min_gpu_freq = rps->min_freq;
+ consts->max_gpu_freq = rps->max_freq;
+ if (INTEL_GEN(i915) >= 9) {
+ /* Convert GT frequency to 50 HZ units */
+ consts->min_gpu_freq /= GEN9_FREQ_SCALER;
+ consts->max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ return true;
+}
+
+static void calc_ia_freq(struct intel_llc *llc,
+ unsigned int gpu_freq,
+ const struct ia_constants *consts,
+ unsigned int *out_ia_freq,
+ unsigned int *out_ring_freq)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ const int diff = consts->max_gpu_freq - gpu_freq;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (INTEL_GEN(i915) >= 9) {
+ /*
+ * ring_freq = 2 * GT. ring_freq is in 100MHz units
+ * No floor required for ring frequency on SKL.
+ */
+ ring_freq = gpu_freq;
+ } else if (INTEL_GEN(i915) >= 8) {
+ /* max(2 * GT, DDR). NB: GT is 50MHz units */
+ ring_freq = max(consts->min_ring_freq, gpu_freq);
+ } else if (IS_HASWELL(i915)) {
+ ring_freq = mult_frac(gpu_freq, 5, 4);
+ ring_freq = max(consts->min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ const int min_freq = 15;
+ const int scale = 180;
+
+ /*
+ * On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz,
+ * just use the lowest ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = consts->max_ia_freq - diff * scale / 2;
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ }
+
+ *out_ia_freq = ia_freq;
+ *out_ring_freq = ring_freq;
+}
+
+static void gen6_update_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ unsigned int gpu_freq;
+
+ if (!get_ia_constants(llc, &consts))
+ return;
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = consts.max_gpu_freq;
+ gpu_freq >= consts.min_gpu_freq;
+ gpu_freq--) {
+ unsigned int ia_freq, ring_freq;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+ sandybridge_pcode_write(i915,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
+ }
+}
+
+void intel_llc_enable(struct intel_llc *llc)
+{
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ gen6_update_ring_freq(llc);
+}
+
+void intel_llc_disable(struct intel_llc *llc)
+{
+ /* Currently there is no HW configuration to be done to disable. */
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_llc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
new file mode 100644
index 000000000000..ef09a890d2b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_H
+#define INTEL_LLC_H
+
+struct intel_llc;
+
+void intel_llc_enable(struct intel_llc *llc);
+void intel_llc_disable(struct intel_llc *llc);
+
+#endif /* INTEL_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
new file mode 100644
index 000000000000..ecad4687b930
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_TYPES_H
+#define INTEL_LLC_TYPES_H
+
+struct intel_llc {
+};
+
+#endif /* INTEL_LLC_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 06a506c29463..0ac3b26674ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -145,6 +145,7 @@
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -230,17 +231,42 @@ static int __execlists_context_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring);
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool close);
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
static void mark_eio(struct i915_request *rq)
{
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+ if (i915_request_completed(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ struct i915_request *active = rq;
+
+ rcu_read_lock();
+ list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ active = rq;
+ }
+ rcu_read_unlock();
+
+ return active;
+}
+
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
@@ -337,10 +363,15 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* However, the priority hint is a mere hint that we may need to
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
*/
- last_prio = effective_prio(rq);
- if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
- last_prio))
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
return false;
/*
@@ -429,12 +460,8 @@ assert_priority_queue(const struct i915_request *prev,
static u64
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
-
desc = INTEL_LEGACY_32B_CONTEXT;
if (i915_vm_is_4lvl(ce->vm))
desc = INTEL_LEGACY_64B_CONTEXT;
@@ -444,33 +471,379 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_GEN(engine->i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
- desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
- /* bits 12-31 */
+ desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
/*
* The following 32bits are copied into the OA reports (dword 2).
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
if (INTEL_GEN(engine->i915) >= 11) {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
- /* bits 37-47 */
-
desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
/* bits 48-53 */
- /* TODO: decide what to do with SW counter (bits 55-60) */
-
desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
/* bits 61-63 */
- } else {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
return desc;
}
+static u32 *set_offsets(u32 *regs,
+ const u8 *data,
+ const struct intel_engine_cs *engine)
+#define NOP(x) (BIT(7) | (x))
+#define LRI(count, flags) ((flags) << 6 | (count))
+#define POSTED BIT(0)
+#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
+#define REG16(x) \
+ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
+ (((x) >> 2) & 0x7f)
+#define END() 0
+{
+ const u32 base = engine->mmio_base;
+
+ while (*data) {
+ u8 count, flags;
+
+ if (*data & BIT(7)) { /* skip */
+ regs += *data++ & ~BIT(7);
+ continue;
+ }
+
+ count = *data & 0x3f;
+ flags = *data >> 6;
+ data++;
+
+ *regs = MI_LOAD_REGISTER_IMM(count);
+ if (flags & POSTED)
+ *regs |= MI_LRI_FORCE_POSTED;
+ if (INTEL_GEN(engine->i915) >= 11)
+ *regs |= MI_LRI_CS_MMIO;
+ regs++;
+
+ GEM_BUG_ON(!count);
+ do {
+ u32 offset = 0;
+ u8 v;
+
+ do {
+ v = *data++;
+ offset <<= 7;
+ offset |= v & ~BIT(7);
+ } while (v & BIT(7));
+
+ *regs = base + (offset << 2);
+ regs += 2;
+ } while (--count);
+ }
+
+ return regs;
+}
+
+static const u8 gen8_xcs_offsets[] = {
+ NOP(1),
+ LRI(11, 0),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+
+ NOP(9),
+ LRI(9, 0),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(2, 0),
+ REG16(0x200),
+ REG(0x028),
+
+ END(),
+};
+
+static const u8 gen9_xcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, POSTED),
+ REG16(0x200),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+
+ END(),
+};
+
+static const u8 gen12_xcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END(),
+};
+
+static const u8 gen8_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+static const u8 gen11_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(1, POSTED),
+ REG(0x1b0),
+
+ NOP(10),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+static const u8 gen12_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+#undef END
+#undef REG16
+#undef REG
+#undef LRI
+#undef NOP
+
+static const u8 *reg_offsets(const struct intel_engine_cs *engine)
+{
+ /*
+ * The gen12+ lists only have the registers we program in the basic
+ * default state. We rely on the context image using relative
+ * addressing to automatic fixup the register state between the
+ * physical engines for virtual engine.
+ */
+ GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
+ !intel_engine_has_relative_mmio(engine));
+
+ if (engine->class == RENDER_CLASS) {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_rcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return gen11_rcs_offsets;
+ else
+ return gen8_rcs_offsets;
+ } else {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_xcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return gen9_xcs_offsets;
+ else
+ return gen8_xcs_offsets;
+ }
+}
+
static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -489,7 +862,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn,
&engine->active.requests,
sched.link) {
- struct intel_engine_cs *owner;
if (i915_request_completed(rq))
continue; /* XXX */
@@ -504,8 +876,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* engine so that it can be moved across onto another physical
* engine as load dictates.
*/
- owner = rq->hw_context->engine;
- if (likely(owner == engine)) {
+ if (likely(rq->execution_mask == engine->mask)) {
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
@@ -516,6 +887,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
active = rq;
} else {
+ struct intel_engine_cs *owner = rq->hw_context->engine;
+
/*
* Decouple the virtual breadcrumb before moving it
* back to the virtual engine -- we don't want the
@@ -525,7 +898,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
*/
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags)) {
- spin_lock(&rq->lock);
+ spin_lock_nested(&rq->lock,
+ SINGLE_DEPTH_NESTING);
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
@@ -561,6 +935,114 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
+static void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ if (engine->stats.active++ == 0)
+ engine->stats.start = ktime_get();
+ GEM_BUG_ON(engine->stats.active == 0);
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ ktime_t last;
+
+ if (engine->stats.active && --engine->stats.active == 0) {
+ /*
+ * Decrement the active context count and in case GPU
+ * is now idle add up to the running total.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.start);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ } else if (engine->stats.active == 0) {
+ /*
+ * After turning on engine stats, context out might be
+ * the first event in which case we account from the
+ * time stats gathering was turned on.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ }
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static void restore_default_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state)
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+
+ execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->hw_context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
+ __func__, engine->name, rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (i915_request_completed(rq))
+ head = rq->tail;
+ else
+ head = active_request(ce->timeline, rq)->head;
+ ce->ring->head = intel_ring_wrap(ce->ring, head);
+ intel_ring_update_space(ce->ring);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ restore_default_state(ce, engine);
+ __execlists_update_reg_state(ce, engine);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+}
+
static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -569,6 +1051,21 @@ __execlists_schedule_in(struct i915_request *rq)
intel_context_get(ce);
+ if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+ reset_active(rq, engine);
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ ce->lrc_desc |= (u64)ce->tag << 32;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ ce->lrc_desc &= ~GENMASK_ULL(47, 37);
+ ce->lrc_desc |=
+ (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
+ GEN11_SW_CTX_ID_SHIFT;
+ BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -612,6 +1109,12 @@ __execlists_schedule_out(struct i915_request *rq,
{
struct intel_context * const ce = rq->hw_context;
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
intel_gt_pm_put(engine->gt);
@@ -654,7 +1157,7 @@ static u64 execlists_update_context(const struct i915_request *rq)
struct intel_context *ce = rq->hw_context;
u64 desc;
- ce->lrc_reg_state[CTX_RING_TAIL + 1] =
+ ce->lrc_reg_state[CTX_RING_TAIL] =
intel_ring_set_tail(rq->ring, rq->tail);
/*
@@ -677,6 +1180,10 @@ static u64 execlists_update_context(const struct i915_request *rq)
desc = ce->lrc_desc;
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+ /* Wa_1607138340:tgl */
+ if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+
return desc;
}
@@ -699,6 +1206,9 @@ trace_ports(const struct intel_engine_execlists *execlists,
const struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
+ if (!ports[0])
+ return;
+
GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
engine->name, msg,
ports[0]->fence.context,
@@ -719,25 +1229,45 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
trace_ports(execlists, msg, execlists->pending);
- if (!execlists->pending[0])
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("Nothing pending for promotion!\n");
return false;
+ }
- if (execlists->pending[execlists_num_ports(execlists)])
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
+ execlists_num_ports(execlists));
return false;
+ }
for (port = execlists->pending; (rq = *port); port++) {
- if (ce == rq->hw_context)
+ if (ce == rq->hw_context) {
+ GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
ce = rq->hw_context;
if (i915_request_completed(rq))
continue;
- if (i915_active_is_idle(&ce->active))
+ if (i915_active_is_idle(&ce->active)) {
+ GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
+ port - execlists->pending);
+ return false;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
- if (!i915_vma_is_pinned(ce->state))
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
}
return ce;
@@ -814,6 +1344,10 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
+ if (unlikely((prev->flags ^ next->flags) &
+ (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
+ return false;
+
if (!can_merge_ctx(prev->hw_context, next->hw_context))
return false;
@@ -823,47 +1357,7 @@ static bool can_merge_rq(const struct i915_request *prev,
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
- u32 base = engine->mmio_base;
-
- /* Must match execlists_init_reg_state()! */
-
- regs[CTX_CONTEXT_CONTROL] =
- i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base));
- regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base));
- regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base));
- regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base));
- regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base));
-
- regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base));
- regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base));
- regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base));
- regs[CTX_SECOND_BB_HEAD_U] =
- i915_mmio_reg_offset(RING_SBBADDR_UDW(base));
- regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base));
- regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base));
-
- regs[CTX_CTX_TIMESTAMP] =
- i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base));
- regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3));
- regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3));
- regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2));
- regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2));
- regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1));
- regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1));
- regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
- regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
-
- if (engine->class == RENDER_CLASS) {
- regs[CTX_RCS_INDIRECT_CTX] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX(base));
- regs[CTX_RCS_INDIRECT_CTX_OFFSET] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base));
- regs[CTX_BB_PER_CTX_PTR] =
- i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base));
-
- regs[CTX_R_PWR_CLK_STATE] =
- i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
- }
+ set_offsets(regs, reg_offsets(engine), engine);
}
static bool virtual_matches(const struct virtual_engine *ve,
@@ -978,7 +1472,7 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
{
int hint;
- if (!intel_engine_has_semaphores(engine))
+ if (!intel_engine_has_timeslices(engine))
return false;
if (list_is_last(&rq->sched.link, &engine->active.requests))
@@ -999,15 +1493,32 @@ switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
return rq_prio(list_next_entry(rq, sched.link));
}
-static bool
-enable_timeslice(const struct intel_engine_execlists *execlists)
+static inline unsigned long
+timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static unsigned long
+active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *execlists->active;
+ const struct i915_request *rq = *engine->execlists.active;
if (i915_request_completed(rq))
- return false;
+ return 0;
+
+ if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ return 0;
+
+ return timeslice(engine);
+}
+
+static void set_timeslice(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return;
- return execlists->switch_priority_hint >= effective_prio(rq);
+ set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1015,6 +1526,30 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = last_active(&engine->execlists);
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine));
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1111,7 +1646,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
- !timer_pending(&engine->execlists.timer)) {
+ timer_expired(&engine->execlists.timer)) {
GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
engine->name,
last->fence.context,
@@ -1147,8 +1682,18 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* submission.
*/
if (!list_is_last(&last->sched.link,
- &engine->active.requests))
+ &engine->active.requests)) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ if (!execlists->timer.expires &&
+ need_timeslice(engine, last))
+ set_timer_ms(&execlists->timer,
+ timeslice(engine));
+
return;
+ }
/*
* WaIdleLiteRestore:bdw,skl
@@ -1216,7 +1761,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
unsigned int n;
GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- virtual_update_register_offsets(regs, engine);
+
+ if (!intel_engine_has_relative_mmio(engine))
+ virtual_update_register_offsets(regs,
+ engine);
if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve, engine);
@@ -1299,6 +1847,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last->hw_context == rq->hw_context)
goto done;
+ if (i915_request_has_sentinel(last))
+ goto done;
+
/*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
@@ -1357,11 +1908,28 @@ done:
if (submit) {
*port = execlists_schedule_in(last, port - execlists->pending);
- memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists->switch_priority_hint =
switch_prio(engine, *execlists->pending);
+
+ /*
+ * Skip if we ended up with exactly the same set of requests,
+ * e.g. trying to timeslice a pair of ordered contexts
+ */
+ if (!memcmp(execlists->active, execlists->pending,
+ (port - execlists->pending + 1) * sizeof(*port))) {
+ do
+ execlists_schedule_out(fetch_and_zero(port));
+ while (port-- != execlists->pending);
+
+ goto skip_submit;
+ }
+
+ memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists_submit_ports(engine);
+
+ set_preempt_timeout(engine);
} else {
+skip_submit:
ring_set_paused(engine, 0);
}
}
@@ -1394,13 +1962,6 @@ reset_in_progress(const struct intel_engine_execlists *execlists)
return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}
-enum csb_step {
- CSB_NOP,
- CSB_PROMOTE,
- CSB_PREEMPT,
- CSB_COMPLETE,
-};
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -1427,7 +1988,7 @@ enum csb_step {
* bits 47-57: sw context id of the lrc the GT switched away from
* bits 58-63: sw counter of the lrc the GT switched away from
*/
-static inline enum csb_step
+static inline bool
gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
u32 lower_dw = csb[0];
@@ -1436,9 +1997,6 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
- if (!ctx_away_valid && ctx_to_valid)
- return CSB_PROMOTE;
-
/*
* The context switch detail is not guaranteed to be 5 when a preemption
* occurs, so we can't just check for that. The check below works for
@@ -1446,8 +2004,10 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* instructions and lite-restore. Preempt-to-idle via the CTRL register
* would require some extra handling, but we don't support that.
*/
- if (new_queue && ctx_away_valid)
- return CSB_PREEMPT;
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!ctx_to_valid);
+ return true;
+ }
/*
* switch detail = 5 is covered by the case above and we do not expect a
@@ -1455,30 +2015,13 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* use polling mode.
*/
GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
-
- if (*execlists->active) {
- GEM_BUG_ON(!ctx_away_valid);
- return CSB_COMPLETE;
- }
-
- return CSB_NOP;
+ return false;
}
-static inline enum csb_step
+static inline bool
gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
- unsigned int status = *csb;
-
- if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
- return CSB_PROMOTE;
-
- if (status & GEN8_CTX_STATUS_PREEMPTED)
- return CSB_PREEMPT;
-
- if (*execlists->active)
- return CSB_COMPLETE;
-
- return CSB_NOP;
+ return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
}
static void process_csb(struct intel_engine_cs *engine)
@@ -1488,7 +2031,14 @@ static void process_csb(struct intel_engine_cs *engine)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1517,7 +2067,7 @@ static void process_csb(struct intel_engine_cs *engine)
rmb();
do {
- enum csb_step csb_step;
+ bool promote;
if (++head == num_entries)
head = 0;
@@ -1545,20 +2095,19 @@ static void process_csb(struct intel_engine_cs *engine)
buf[2 * head + 0], buf[2 * head + 1]);
if (INTEL_GEN(engine->i915) >= 12)
- csb_step = gen12_csb_parse(execlists, buf + 2 * head);
+ promote = gen12_csb_parse(execlists, buf + 2 * head);
else
- csb_step = gen8_csb_parse(execlists, buf + 2 * head);
+ promote = gen8_csb_parse(execlists, buf + 2 * head);
+ if (promote) {
+ if (!inject_preempt_hang(execlists))
+ ring_set_paused(engine, 0);
- switch (csb_step) {
- case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
+ /* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", execlists->active);
-
while (*execlists->active)
execlists_schedule_out(*execlists->active++);
- /* fallthrough */
- case CSB_PROMOTE: /* switch pending to inflight */
- GEM_BUG_ON(*execlists->active);
+ /* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
execlists->active =
memcpy(execlists->inflight,
@@ -1566,16 +2115,13 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_num_ports(execlists) *
sizeof(*execlists->pending));
- if (enable_timeslice(execlists))
- mod_timer(&execlists->timer, jiffies + 1);
-
- if (!inject_preempt_hang(execlists))
- ring_set_paused(engine, 0);
+ set_timeslice(engine);
WRITE_ONCE(execlists->pending[0], NULL);
- break;
+ } else {
+ GEM_BUG_ON(!*execlists->active);
- case CSB_COMPLETE: /* port0 completed, advanced to port1 */
+ /* port0 completed, advanced to port1 */
trace_ports(execlists, "completed", execlists->active);
/*
@@ -1590,10 +2136,6 @@ static void process_csb(struct intel_engine_cs *engine)
GEM_BUG_ON(execlists->active - execlists->inflight >
execlists_num_ports(execlists));
- break;
-
- case CSB_NOP:
- break;
}
} while (head != tail);
@@ -1623,6 +2165,43 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
}
}
+static noinline void preempt_reset(struct intel_engine_cs *engine)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (i915_modparams.reset < 3)
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ GEM_TRACE("%s: preempt timeout %lu+%ums\n",
+ engine->name,
+ READ_ONCE(engine->props.preempt_timeout_ms),
+ jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+ intel_engine_reset(engine, "preemption time out");
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return READ_ONCE(engine->execlists.pending[0]);
+}
+
/*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
@@ -1630,23 +2209,39 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- unsigned long flags;
+ bool timeout = preempt_timeout(engine);
process_csb(engine);
- if (!READ_ONCE(engine->execlists.pending[0])) {
+ if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
+ unsigned long flags;
+
spin_lock_irqsave(&engine->active.lock, flags);
__execlists_submission_tasklet(engine);
spin_unlock_irqrestore(&engine->active.lock, flags);
+
+ /* Recheck after serialising with direct-submission */
+ if (timeout && preempt_timeout(engine))
+ preempt_reset(engine);
}
}
-static void execlists_submission_timer(struct timer_list *timer)
+static void __execlists_kick(struct intel_engine_execlists *execlists)
{
- struct intel_engine_cs *engine =
- from_timer(engine, timer, execlists.timer);
-
/* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
}
static void queue_request(struct intel_engine_cs *engine,
@@ -1726,7 +2321,6 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
@@ -1738,7 +2332,6 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
@@ -1752,14 +2345,13 @@ static void execlists_context_unpin(struct intel_context *ce)
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
ce->engine);
- i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
-__execlists_update_reg_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -1767,16 +2359,16 @@ __execlists_update_reg_state(struct intel_context *ce,
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
- regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
- regs[CTX_RING_HEAD + 1] = ring->head;
- regs[CTX_RING_TAIL + 1] = ring->tail;
+ regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_HEAD] = ring->head;
+ regs[CTX_RING_TAIL] = ring->tail;
/* RPCS */
if (engine->class == RENDER_CLASS) {
- regs[CTX_R_PWR_CLK_STATE + 1] =
+ regs[CTX_R_PWR_CLK_STATE] =
intel_sseu_make_rpcs(engine->i915, &ce->sseu);
- i915_oa_init_reg_state(engine, ce, regs);
+ i915_oa_init_reg_state(ce, engine);
}
}
@@ -1802,18 +2394,12 @@ __execlists_context_pin(struct intel_context *ce,
goto unpin_active;
}
- ret = i915_gem_context_pin_hw_id(ce->gem_context);
- if (ret)
- goto unpin_map;
-
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
return 0;
-unpin_map:
- i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
intel_context_active_release(ce);
err:
@@ -1869,7 +2455,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
- GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1885,7 +2471,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = MI_NOOP;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_timeline(rq)->hwsp_offset;
*cs++ = 0;
*cs++ = rq->fence.seqno - 1;
@@ -1897,60 +2483,6 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
return 0;
}
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- /* Be doubly sure the LRI have landed before proceeding */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Re-invalidate the TLB for luck */
- return engine->emit_flush(rq, EMIT_INVALIDATE);
-}
-
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
@@ -1973,10 +2505,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(request->hw_context->vm))
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- else
- ret = emit_pdps(request);
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
@@ -2028,12 +2557,6 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 slm_offset(struct intel_engine_cs *engine)
-{
- return intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
-}
-
/*
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
* initialized at the beginning and shared across all contexts but this field
@@ -2062,10 +2585,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* Actual scratch location is at 128 bytes offset */
batch = gen8_emit_pipe_control(batch,
PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- slm_offset(engine));
+ LRC_PPHWSP_SCRATCH_ADDR);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2423,27 +2946,29 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
&execlists->csb_status[reset_value]);
}
-static struct i915_request *active_request(struct i915_request *rq)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- const struct intel_context * const ce = rq->hw_context;
- struct i915_request *active = NULL;
- struct list_head *list;
-
- if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
- return rq;
-
- list = &rq->timeline->requests;
- list_for_each_entry_from_reverse(rq, list, link) {
- if (i915_request_completed(rq))
- break;
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
- if (rq->hw_context != ce)
- break;
+static void __execlists_reset_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+ int x;
- active = rq;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
-
- return active;
}
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
@@ -2451,7 +2976,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
struct i915_request *rq;
- u32 *regs;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
process_csb(engine); /* drain preemption events */
@@ -2467,16 +2995,23 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
ce = rq->hw_context;
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- rq = active_request(rq);
- if (!rq) {
- ce->ring->head = ce->ring->tail;
+
+ if (i915_request_completed(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
}
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ rq = active_request(ce->timeline, rq);
ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(ce->ring->head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -2516,19 +3051,16 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = ce->lrc_reg_state;
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ restore_default_state(ce, engine);
out_replay:
- GEM_TRACE("%s replay {head:%04x, tail:%04x\n",
+ GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
engine->name, ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
+ __execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
/* Push back any incomplete requests for replay after the reset. */
@@ -2749,7 +3281,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
}
*cs++ = cmd;
- *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
intel_ring_advance(request, cs);
@@ -2760,10 +3292,6 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
static int gen8_emit_flush_render(struct i915_request *request,
u32 mode)
{
- struct intel_engine_cs *engine = request->engine;
- u32 scratch_addr =
- intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2785,7 +3313,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
/*
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
@@ -2818,7 +3346,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
if (dc_flush_wa)
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
@@ -2831,11 +3359,6 @@ static int gen8_emit_flush_render(struct i915_request *request,
static int gen11_emit_flush_render(struct i915_request *request,
u32 mode)
{
- struct intel_engine_cs *engine = request->engine;
- const u32 scratch_addr =
- intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
-
if (mode & EMIT_FLUSH) {
u32 *cs;
u32 flags = 0;
@@ -2848,13 +3371,13 @@ static int gen11_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(request, cs);
}
@@ -2872,14 +3395,106 @@ static int gen11_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static int gen12_emit_flush_render(struct i915_request *request,
+ u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(request, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ *cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
+
+ /*
+ * Wa_1604544889:tgl
+ */
+ if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ flags = 0;
+ flags |= PIPE_CONTROL_CS_STALL;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags,
+ LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
}
return 0;
@@ -2933,7 +3548,7 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
- request->timeline->hwsp_offset,
+ i915_request_active_timeline(request)->hwsp_offset,
0);
return gen8_emit_fini_breadcrumb_footer(request, cs);
@@ -2941,28 +3556,28 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- request->timeline->hwsp_offset,
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE);
-
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
return gen8_emit_fini_breadcrumb_footer(request, cs);
}
-static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
- u32 *cs)
+static u32 *
+gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- request->timeline->hwsp_offset,
+ i915_request_active_timeline(request)->hwsp_offset,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -2973,9 +3588,88 @@ static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
return gen8_emit_fini_breadcrumb_footer(request, cs);
}
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = intel_hws_preempt_address(request->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = gen12_emit_preempt_busywait(request, cs);
+
+ request->tail = intel_ring_offset(request, cs);
+ assert_ring_tail_valid(request->ring, request->tail);
+
+ return gen8_emit_wa_tail(request, cs);
+}
+
+static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ 0);
+
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
+}
+
+static u32 *
+gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_HDC_PIPELINE_FLUSH);
+
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
+}
+
static void execlists_park(struct intel_engine_cs *engine)
{
- del_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -2998,6 +3692,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
}
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
}
static void execlists_destroy(struct intel_engine_cs *engine)
@@ -3025,6 +3722,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen8_emit_flush;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
engine->set_default_submission = intel_execlists_set_default_submission;
@@ -3070,6 +3769,9 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
case 12:
+ engine->emit_flush = gen12_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
case 11:
engine->emit_flush = gen11_emit_flush_render;
engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
@@ -3085,7 +3787,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@@ -3142,7 +3845,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
return 0;
}
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
+static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
@@ -3175,86 +3878,50 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
return indirect_ctx_offset;
}
-static void execlists_init_reg_state(u32 *regs,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
- bool rcs = engine->class == RENDER_CLASS;
- u32 base = engine->mmio_base;
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
- MI_LRI_FORCE_POSTED;
-
- CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
+static void init_common_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring)
+{
+ regs[CTX_CONTEXT_CONTROL] =
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
- if (INTEL_GEN(engine->i915) < 11) {
- regs[CTX_CONTEXT_CONTROL + 1] |=
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ if (INTEL_GEN(engine->i915) < 11)
+ regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
- }
- CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
- CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
- RING_CTL_SIZE(ring->size) | RING_VALID);
- CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
- CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
- if (rcs) {
- struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
- RING_INDIRECT_CTX_OFFSET(base), 0);
- if (wa_ctx->indirect_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- regs[CTX_RCS_INDIRECT_CTX + 1] =
- (ggtt_offset + wa_ctx->indirect_ctx.offset) |
- (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
-
- regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
- intel_lr_indirect_ctx_offset(engine) << 6;
- }
- CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
- if (wa_ctx->per_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+ regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ regs[CTX_BB_STATE] = RING_BB_PPGTT;
+}
- regs[CTX_BB_PER_CTX_PTR + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
+static void init_wa_bb_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ u32 pos_bb_per_ctx)
+{
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
+
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+
+ regs[pos_bb_per_ctx] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
+ if (wa_ctx->indirect_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+
+ regs[pos_bb_per_ctx + 2] =
+ (ggtt_offset + wa_ctx->indirect_ctx.offset) |
+ (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
- CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
- /* PDP values well be assigned later if needed */
- CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
- CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
+ regs[pos_bb_per_ctx + 4] =
+ intel_lr_indirect_ctx_offset(engine) << 6;
+ }
+}
+static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
+{
if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
@@ -3267,15 +3934,47 @@ static void execlists_init_reg_state(u32 *regs,
ASSIGN_CTX_PDP(ppgtt, regs, 1);
ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
+}
- if (rcs) {
- regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
+}
+
+static void execlists_init_reg_state(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool close)
+{
+ /*
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
+ *
+ * Must keep consistent with virtual_update_register_offsets().
+ */
+ u32 *bbe = set_offsets(regs, reg_offsets(engine), engine);
+
+ if (close) { /* Close the batch; used mainly by live_lrc_layout() */
+ *bbe = MI_BATCH_BUFFER_END;
+ if (INTEL_GEN(engine->i915) >= 10)
+ *bbe |= BIT(0);
}
- regs[CTX_END] = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(engine->i915) >= 10)
- regs[CTX_END] |= BIT(0);
+ init_common_reg_state(regs, engine, ring);
+ init_ppgtt_reg_state(regs, vm_alias(ce->vm));
+
+ init_wa_bb_reg_state(regs, engine,
+ INTEL_GEN(engine->i915) >= 12 ?
+ GEN12_CTX_BB_PER_CTX_PTR :
+ CTX_BB_PER_CTX_PTR);
}
static int
@@ -3284,6 +3983,7 @@ populate_lr_context(struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
+ bool inhibit = true;
void *vaddr;
u32 *regs;
int ret;
@@ -3298,12 +3998,6 @@ populate_lr_context(struct intel_context *ce,
set_redzone(vaddr, engine);
if (engine->default_state) {
- /*
- * We only want to copy over the template context state;
- * skipping over the headers reserved for GuC communication,
- * leaving those as zero.
- */
- const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
void *defaults;
defaults = i915_gem_object_pin_map(engine->default_state,
@@ -3313,23 +4007,22 @@ populate_lr_context(struct intel_context *ce,
goto err_unpin_ctx;
}
- memcpy(vaddr + start, defaults + start, engine->context_size);
+ memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
+ inhibit = false;
}
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ce, engine, ring);
- if (!engine->default_state)
- regs[CTX_CONTEXT_CONTROL + 1] |=
+ execlists_init_reg_state(regs, ce, engine, ring, inhibit);
+ if (inhibit)
+ regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
ret = 0;
err_unpin_ctx:
- __i915_gem_object_flush_map(ctx_obj,
- LRC_HEADER_PAGES * PAGE_SIZE,
- engine->context_size);
+ __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
@@ -3346,11 +4039,6 @@ static int __execlists_context_alloc(struct intel_context *ce,
GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
- /*
- * Before the actual start of the context image, we insert a few pages
- * for our own use and for sharing with the GuC.
- */
- context_size += LRC_HEADER_PAGES * PAGE_SIZE;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
@@ -3462,8 +4150,9 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
return;
swap(ve->siblings[swp], ve->siblings[0]);
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- ve->siblings[0]);
+ if (!intel_engine_has_relative_mmio(ve->siblings[0]))
+ virtual_update_register_offsets(ve->context.lrc_reg_state,
+ ve->siblings[0]);
}
static int virtual_context_pin(struct intel_context *ce)
@@ -3714,6 +4403,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.i915 = ctx->i915;
ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
@@ -3737,6 +4427,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+ intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
@@ -3899,6 +4590,18 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
return 0;
}
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+
+ if (sibling >= ve->num_siblings)
+ return NULL;
+
+ return ve->siblings[sibling];
+}
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -3987,6 +4690,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
u32 head,
bool scrub)
{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
/*
* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
@@ -3995,16 +4700,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- if (scrub) {
- u32 *regs = ce->lrc_reg_state;
-
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
- }
+ if (scrub)
+ restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head = head;
@@ -4013,6 +4710,13 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
__execlists_update_reg_state(ce, engine);
}
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ intel_execlists_set_default_submission;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2bba82bcc16..04511d8ebdc1 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -43,6 +43,7 @@ struct intel_engine_cs;
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
@@ -66,6 +67,12 @@ struct intel_engine_cs;
#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
INTEL_CONTEXT_SCHEDULE_OUT,
@@ -79,30 +86,15 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
-
-/*
- * We allocate a header at the start of the context image for our own
- * use, therefore the actual location of the logical state is offset
- * from the start of the VMA. The layout is
- *
- * | [guc] | [hwsp] [logical state] |
- * |<- our header ->|<- context image ->|
- *
- */
-/* The first page is used for sharing data with the GuC */
-#define LRC_GUCSHR_PN (0)
-#define LRC_GUCSHR_SZ (1)
/* At the start of the context image is its per-process HWS page */
-#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
+#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
-/* Finally we have the logical state for the context */
+/* After the PPHWSP we have the logical state for the context */
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
-/*
- * Currently we include the PPHWSP in __intel_engine_context_size() so
- * the size of the header is synonymous with the start of the PPHWSP.
- */
-#define LRC_HEADER_PAGES LRC_PPHWSP_PN
+/* Space within PPHWSP reserved to be used as scratch */
+#define LRC_PPHWSP_SCRATCH 0x34
+#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
@@ -131,4 +123,11 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index b8f20ad71169..06ab0276e10e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,55 +9,41 @@
#include <linux/types.h>
-/* GEN8+ Reg State Context */
-#define CTX_LRI_HEADER_0 0x01
-#define CTX_CONTEXT_CONTROL 0x02
-#define CTX_RING_HEAD 0x04
-#define CTX_RING_TAIL 0x06
-#define CTX_RING_BUFFER_START 0x08
-#define CTX_RING_BUFFER_CONTROL 0x0a
-#define CTX_BB_HEAD_U 0x0c
-#define CTX_BB_HEAD_L 0x0e
-#define CTX_BB_STATE 0x10
-#define CTX_SECOND_BB_HEAD_U 0x12
-#define CTX_SECOND_BB_HEAD_L 0x14
-#define CTX_SECOND_BB_STATE 0x16
-#define CTX_BB_PER_CTX_PTR 0x18
-#define CTX_RCS_INDIRECT_CTX 0x1a
-#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
-#define CTX_LRI_HEADER_1 0x21
-#define CTX_CTX_TIMESTAMP 0x22
-#define CTX_PDP3_UDW 0x24
-#define CTX_PDP3_LDW 0x26
-#define CTX_PDP2_UDW 0x28
-#define CTX_PDP2_LDW 0x2a
-#define CTX_PDP1_UDW 0x2c
-#define CTX_PDP1_LDW 0x2e
-#define CTX_PDP0_UDW 0x30
-#define CTX_PDP0_LDW 0x32
-#define CTX_LRI_HEADER_2 0x41
-#define CTX_R_PWR_CLK_STATE 0x42
-#define CTX_END 0x44
-
-#define CTX_REG(reg_state, pos, reg, val) do { \
- u32 *reg_state__ = (reg_state); \
- const u32 pos__ = (pos); \
- (reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \
- (reg_state__)[(pos__) + 1] = (val); \
-} while (0)
+/* GEN8 to GEN11 Reg State Context */
+#define CTX_CONTEXT_CONTROL (0x02 + 1)
+#define CTX_RING_HEAD (0x04 + 1)
+#define CTX_RING_TAIL (0x06 + 1)
+#define CTX_RING_BUFFER_START (0x08 + 1)
+#define CTX_RING_BUFFER_CONTROL (0x0a + 1)
+#define CTX_BB_STATE (0x10 + 1)
+#define CTX_BB_PER_CTX_PTR (0x18 + 1)
+#define CTX_PDP3_UDW (0x24 + 1)
+#define CTX_PDP3_LDW (0x26 + 1)
+#define CTX_PDP2_UDW (0x28 + 1)
+#define CTX_PDP2_LDW (0x2a + 1)
+#define CTX_PDP1_UDW (0x2c + 1)
+#define CTX_PDP1_LDW (0x2e + 1)
+#define CTX_PDP0_UDW (0x30 + 1)
+#define CTX_PDP0_LDW (0x32 + 1)
+#define CTX_R_PWR_CLK_STATE (0x42 + 1)
+
+#define GEN9_CTX_RING_MI_MODE 0x54
+
+/* GEN12+ Reg State Context */
+#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
- (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \
} while (0)
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = px_dma(ppgtt->pd); \
- (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
} while (0)
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 728704bbbe18..2b977991b785 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -26,6 +26,7 @@
#include "intel_gt.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
+#include "intel_ring.h"
/* structures required */
struct drm_i915_mocs_entry {
@@ -199,14 +200,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
- /* Bypass LLC - Uncached (EHL+) */ \
- MOCS_ENTRY(16, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_1_UC), \
- /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
- MOCS_ENTRY(17, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
@@ -270,7 +263,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
L3_1_UC),
/* HW Special Case (Displayable) */
MOCS_ENTRY(61,
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
+ LE_1_UC | LE_TC_1_LLC,
L3_3_WB),
};
@@ -287,10 +280,9 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
GEN11_MOCS_ENTRIES
};
-static bool get_mocs_settings(struct intel_gt *gt,
+static bool get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
- struct drm_i915_private *i915 = gt->i915;
bool result = false;
if (INTEL_GEN(i915) >= 12) {
@@ -331,9 +323,9 @@ static bool get_mocs_settings(struct intel_gt *gt,
return result;
}
-static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
+static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
{
- switch (engine_id) {
+ switch (engine->id) {
case RCS0:
return GEN9_GFX_MOCS(index);
case VCS0:
@@ -347,7 +339,7 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
case VCS2:
return GEN11_MFX2_MOCS(index);
default:
- MISSING_CASE(engine_id);
+ MISSING_CASE(engine->id);
return INVALID_MMIO_REG;
}
}
@@ -365,118 +357,25 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table,
return table->table[I915_MOCS_PTE].control_value;
}
-/**
- * intel_mocs_init_engine() - emit the mocs control table
- * @engine: The engine for whom to emit the registers.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address.
- */
-void intel_mocs_init_engine(struct intel_engine_cs *engine)
+static void init_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
{
- struct intel_gt *gt = engine->gt;
- struct intel_uncore *uncore = gt->uncore;
- struct drm_i915_mocs_table table;
- unsigned int index;
- u32 unused_value;
-
- /* Platforms with global MOCS do not need per-engine initialization. */
- if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- return;
-
- /* Called under a blanket forcewake */
- assert_forcewakes_active(uncore, FORCEWAKE_ALL);
-
- if (!get_mocs_settings(gt, &table))
- return;
-
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].control_value;
-
- for (index = 0; index < table.size; index++) {
- u32 value = get_entry_control(&table, index);
+ struct intel_uncore *uncore = engine->uncore;
+ u32 unused_value = table->table[I915_MOCS_PTE].control_value;
+ unsigned int i;
+ for (i = 0; i < table->size; i++)
intel_uncore_write_fw(uncore,
- mocs_register(engine->id, index),
- value);
- }
+ mocs_register(engine, i),
+ get_entry_control(table, i));
- /* All remaining entries are also unused */
- for (; index < table.n_entries; index++)
+ /* All remaining entries are unused */
+ for (; i < table->n_entries; i++)
intel_uncore_write_fw(uncore,
- mocs_register(engine->id, index),
+ mocs_register(engine, i),
unused_value);
}
-static void intel_mocs_init_global(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
- struct drm_i915_mocs_table table;
- unsigned int index;
-
- GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
-
- if (!get_mocs_settings(gt, &table))
- return;
-
- if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
- return;
-
- for (index = 0; index < table.size; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[index].control_value);
-
- /*
- * Ok, now set the unused entries to the invalid entry (index 0). These
- * entries are officially undefined and no contract for the contents and
- * settings is given for these entries.
- */
- for (; index < table.n_entries; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[0].control_value);
-}
-
-static int emit_mocs_control_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
-{
- enum intel_engine_id engine = rq->engine->id;
- unsigned int index;
- u32 unused_value;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
-
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].control_value;
-
- cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries);
-
- for (index = 0; index < table->size; index++) {
- u32 value = get_entry_control(table, index);
-
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = value;
- }
-
- /* All remaining entries are also unused */
- for (; index < table->n_entries; index++) {
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = unused_value;
- }
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
/*
* Get l3cc_value from MOCS entry taking into account when it's not used:
* I915_MOCS_PTE's value is returned in this case.
@@ -494,141 +393,99 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
u16 low,
u16 high)
{
- return low | high << 16;
+ return low | (u32)high << 16;
}
-static int emit_mocs_l3cc_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
+static void init_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
{
- u16 unused_value;
+ struct intel_uncore *uncore = engine->uncore;
+ u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
unsigned int i;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
-
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].l3cc_value;
-
- cs = intel_ring_begin(rq, 2 + table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2);
for (i = 0; i < table->size / 2; i++) {
u16 low = get_entry_l3cc(table, 2 * i);
u16 high = get_entry_l3cc(table, 2 * i + 1);
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, high);
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, low, high));
}
/* Odd table size - 1 left over */
- if (table->size & 0x01) {
+ if (table->size & 1) {
u16 low = get_entry_l3cc(table, 2 * i);
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, unused_value);
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, low, unused_value));
i++;
}
/* All remaining entries are also unused */
- for (; i < table->n_entries / 2; i++) {
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, unused_value, unused_value);
- }
+ for (; i < table->n_entries / 2; i++)
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, unused_value,
+ unused_value));
+}
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+void intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_mocs_table table;
+
+ /* Called under a blanket forcewake */
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ if (!get_mocs_settings(engine->i915, &table))
+ return;
+
+ /* Platforms with global MOCS do not need per-engine initialization. */
+ if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ init_mocs_table(engine, &table);
- return 0;
+ if (engine->class == RENDER_CLASS)
+ init_l3cc_table(engine, &table);
}
-static void intel_mocs_init_l3cc_table(struct intel_gt *gt)
+static void intel_mocs_init_global(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
- unsigned int i;
- u16 unused_value;
+ unsigned int index;
- if (!get_mocs_settings(gt, &table))
+ /*
+ * LLC and eDRAM control values are not applicable to dgfx
+ */
+ if (IS_DGFX(gt->i915))
return;
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].l3cc_value;
-
- for (i = 0; i < table.size / 2; i++) {
- u16 low = get_entry_l3cc(&table, 2 * i);
- u16 high = get_entry_l3cc(&table, 2 * i + 1);
+ GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, high));
- }
+ if (!get_mocs_settings(gt->i915, &table))
+ return;
- /* Odd table size - 1 left over */
- if (table.size & 0x01) {
- u16 low = get_entry_l3cc(&table, 2 * i);
+ if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
+ return;
+ for (index = 0; index < table.size; index++)
intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, unused_value));
- i++;
- }
+ GEN12_GLOBAL_MOCS(index),
+ table.table[index].control_value);
- /* All remaining entries are also unused */
- for (; i < table.n_entries / 2; i++)
+ /*
+ * Ok, now set the unused entries to the invalid entry (index 0). These
+ * entries are officially undefined and no contract for the contents and
+ * settings is given for these entries.
+ */
+ for (; index < table.n_entries; index++)
intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, unused_value,
- unused_value));
-}
-
-/**
- * intel_mocs_emit() - program the MOCS register.
- * @rq: Request to use to set up the MOCS tables.
- *
- * This function will emit a batch buffer with the values required for
- * programming the MOCS register values for all the currently supported
- * rings.
- *
- * These registers are partially stored in the RCS context, so they are
- * emitted at the same time so that when a context is created these registers
- * are set up. These registers have to be emitted into the start of the
- * context as setting the ELSP will re-init some of these registers back
- * to the hw values.
- *
- * Return: 0 on success, otherwise the error status.
- */
-int intel_mocs_emit(struct i915_request *rq)
-{
- struct drm_i915_mocs_table t;
- int ret;
-
- if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ||
- rq->engine->class != RENDER_CLASS)
- return 0;
-
- if (get_mocs_settings(rq->engine->gt, &t)) {
- /* Program the RCS control registers */
- ret = emit_mocs_control_table(rq, &t);
- if (ret)
- return ret;
-
- /* Now program the l3cc registers */
- ret = emit_mocs_l3cc_table(rq, &t);
- if (ret)
- return ret;
- }
-
- return 0;
+ GEN12_GLOBAL_MOCS(index),
+ table.table[0].control_value);
}
void intel_mocs_init(struct intel_gt *gt)
{
- intel_mocs_init_l3cc_table(gt);
-
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
intel_mocs_init_global(gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 2ae816b7ca19..83371f3e6ba1 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,13 +49,10 @@
* context handling keep the MOCS in step.
*/
-struct i915_request;
struct intel_engine_cs;
struct intel_gt;
void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
-int intel_mocs_emit(struct i915_request *rq);
-
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
new file mode 100644
index 000000000000..700104b90163
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -0,0 +1,787 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_sideband.h"
+
+/**
+ * DOC: RC6
+ *
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as
+ * well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+
+static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
+{
+ return container_of(rc6, struct intel_gt, rc6);
+}
+
+static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->uncore;
+}
+
+static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->i915;
+}
+
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static void gen11_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1));
+
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE);
+}
+
+static void gen9_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6_mode;
+
+ /* 2b: Program RC6 thresholds.*/
+ if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
+ /*
+ * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+ * when CPG is enabled
+ */
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+ } else {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ }
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* WaRsUseTimeoutMode:cnl (pre-prod) */
+ if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+ else
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode);
+
+ /*
+ * WaRsDisableCoarsePowerGating:skl,cnl
+ * - Render/Media PG need to be disabled with RC6.
+ */
+ if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
+}
+
+static void gen8_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+
+ /* 3: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_RC6_ENABLE);
+}
+
+static void gen6_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6vids, rc6_mask;
+ int ret;
+
+ set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC1e_THRESHOLD, 1000);
+ if (IS_IVYBRIDGE(i915))
+ set(uncore, GEN6_RC6_THRESHOLD, 125000);
+ else
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6p_THRESHOLD, 150000);
+ set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* We don't use those on Haswell */
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ if (HAS_RC6p(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ if (HAS_RC6pp(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ set(uncore, GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
+ if (IS_GEN(i915, 6) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN(i915, 6) &&
+ (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+}
+
+/* Check that the pcbr address is not empty. */
+static int chv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+ paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
+
+ pctx_paddr = (paddr & ~4095);
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+ }
+
+ return 0;
+}
+
+static int vlv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_gem_object *pctx;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if (pcbr) {
+ /* BIOS set it up already, grab the pre-alloc'd space */
+ resource_size_t pcbr_offset;
+
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pctx = i915_gem_object_create_stolen_for_preallocated(i915,
+ pcbr_offset,
+ I915_GTT_OFFSET_NONE,
+ pctx_size);
+ if (IS_ERR(pctx))
+ return PTR_ERR(pctx);
+
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
+ /*
+ * From the Gunit register HAS:
+ * The Gfx driver is expected to program this register and ensure
+ * proper allocation within Gfx stolen memory. For example, this
+ * register should be programmed such than the PCBR range does not
+ * overlap with other ranges, such as the frame buffer, protected
+ * memory, or any other relevant ranges.
+ */
+ pctx = i915_gem_object_create_stolen(i915, pctx_size);
+ if (IS_ERR(pctx)) {
+ DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ return PTR_ERR(pctx);
+ }
+
+ GEM_BUG_ON(range_overflows_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+
+out:
+ rc6->pctx = pctx;
+ return 0;
+}
+
+static void chv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2a: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /* TO threshold set to 500 us (0x186 * 1.28 us) */
+ set(uncore, GEN6_RC6_THRESHOLD, 0x186);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* 3: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE);
+}
+
+static void vlv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 0x557);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ set(uncore, GEN6_RC_CONTROL,
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
+}
+
+static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ u32 rc6_ctx_base, rc_ctl, rc_sw_target;
+ bool enable_rc6 = true;
+
+ rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
+ rc_sw_target &= RC_SW_TARGET_STATE_MASK;
+ rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
+ DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+ onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ rc_sw_target);
+
+ if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+ DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ /*
+ * The exact context size is not known for BXT, so assume a page size
+ * for this check.
+ */
+ rc6_ctx_base =
+ intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+ if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+ enable_rc6 = false;
+ }
+
+ if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
+ DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
+ DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
+ DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
+ DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ return enable_rc6;
+}
+
+static bool rc6_supported(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!HAS_RC6(i915))
+ return false;
+
+ if (intel_vgpu_active(i915))
+ return false;
+
+ if (is_mock_gt(rc6_to_gt(rc6)))
+ return false;
+
+ if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
+ dev_notice(i915->drm.dev,
+ "RC6 and powersaving disabled by BIOS\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void rpm_get(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(rc6->wakeref);
+ pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = true;
+}
+
+static void rpm_put(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(!rc6->wakeref);
+ pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = false;
+}
+
+static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6)
+{
+ return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO);
+}
+
+static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (intel_rc6_ctx_corrupted(rc6)) {
+ DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
+ rc6->ctx_corrupted = true;
+ }
+}
+
+/**
+ * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
+ * @rc6: rc6 state
+ *
+ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
+ */
+void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6)
+{
+ if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) {
+ DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
+ rc6->ctx_corrupted = false;
+ }
+}
+
+/**
+ * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption
+ * @rc6: rc6 state
+ *
+ * Check if an RC6 CTX corruption has happened since the last check and if so
+ * disable RC6 and runtime power management.
+*/
+void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (rc6->ctx_corrupted)
+ return;
+
+ if (!intel_rc6_ctx_corrupted(rc6))
+ return;
+
+ DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+
+ intel_rc6_disable(rc6);
+ rc6->ctx_corrupted = true;
+
+ return;
+}
+
+static void __intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (INTEL_GEN(i915) >= 9)
+ set(uncore, GEN9_PG_ENABLE, 0);
+ set(uncore, GEN6_RC_CONTROL, 0);
+ set(uncore, GEN6_RC_STATE, 0);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+void intel_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ int err;
+
+ /* Disable runtime-pm until we can save the GPU state with rc6 pctx */
+ rpm_get(rc6);
+
+ if (!rc6_supported(rc6))
+ return;
+
+ intel_rc6_ctx_wa_init(rc6);
+
+ if (IS_CHERRYVIEW(i915))
+ err = chv_rc6_init(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ err = vlv_rc6_init(rc6);
+ else
+ err = 0;
+
+ /* Sanitize rc6, ensure it is disabled before we are ready. */
+ __intel_rc6_disable(rc6);
+
+ rc6->supported = err == 0;
+}
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6)
+{
+ if (rc6->enabled) { /* unbalanced suspend/resume */
+ rpm_get(rc6);
+ rc6->enabled = false;
+ }
+
+ if (rc6->supported)
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->supported)
+ return;
+
+ GEM_BUG_ON(rc6->enabled);
+
+ if (rc6->ctx_corrupted)
+ return;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rc6_enable(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 11)
+ gen11_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 9)
+ gen9_rc6_enable(rc6);
+ else if (IS_BROADWELL(i915))
+ gen8_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rc6_enable(rc6);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ /* rc6 is ready, runtime-pm is go! */
+ rpm_put(rc6);
+ rc6->enabled = true;
+}
+
+void intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ if (!rc6->enabled)
+ return;
+
+ rpm_get(rc6);
+ rc6->enabled = false;
+
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_fini(struct intel_rc6 *rc6)
+{
+ struct drm_i915_gem_object *pctx;
+
+ intel_rc6_disable(rc6);
+
+ pctx = fetch_and_zero(&rc6->pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
+
+ if (rc6->wakeref)
+ rpm_put(rc6);
+}
+
+static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
+{
+ u32 lower, upper, tmp;
+ int loop = 2;
+
+ /*
+ * The register accessed do not need forcewake. We borrow
+ * uncore lock to prevent concurrent access to range reg.
+ */
+ lockdep_assert_held(&uncore->lock);
+
+ /*
+ * vlv and chv residency counters are 40 bits in width.
+ * With a control bit, we can choose between upper or lower
+ * 32bit window into this counter.
+ *
+ * Although we always use the counter in high-range mode elsewhere,
+ * userspace may attempt to read the value before rc6 is initialised,
+ * before we have set the default VLV_COUNTER_CONTROL value. So always
+ * set the high bit to be safe.
+ */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ do {
+ tmp = upper;
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ lower = intel_uncore_read_fw(uncore, reg);
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ } while (upper != tmp && --loop);
+
+ /*
+ * Everywhere else we always use VLV_COUNTER_CONTROL with the
+ * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+ * now.
+ */
+
+ return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ u64 time_hw, prev_hw, overflow_hw;
+ unsigned int fw_domains;
+ unsigned long flags;
+ unsigned int i;
+ u32 mul, div;
+
+ if (!rc6->supported)
+ return 0;
+
+ /*
+ * Store previous hw counter values for counter wrap-around handling.
+ *
+ * There are only four interesting registers and they live next to each
+ * other so we can use the relative address, compared to the smallest
+ * one as the index into driver storage.
+ */
+ i = (i915_mmio_reg_offset(reg) -
+ i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+ if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+ return 0;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ mul = 1000000;
+ div = i915->czclk_freq;
+ overflow_hw = BIT_ULL(40);
+ time_hw = vlv_residency_raw(uncore, reg);
+ } else {
+ /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+ if (IS_GEN9_LP(i915)) {
+ mul = 10000;
+ div = 12;
+ } else {
+ mul = 1280;
+ div = 1;
+ }
+
+ overflow_hw = BIT_ULL(32);
+ time_hw = intel_uncore_read_fw(uncore, reg);
+ }
+
+ /*
+ * Counter wrap handling.
+ *
+ * But relying on a sufficient frequency of queries otherwise counters
+ * can still wrap.
+ */
+ prev_hw = rc6->prev_hw_residency[i];
+ rc6->prev_hw_residency[i] = time_hw;
+
+ /* RC6 delta from last sample. */
+ if (time_hw >= prev_hw)
+ time_hw -= prev_hw;
+ else
+ time_hw += overflow_hw - prev_hw;
+
+ /* Add delta to RC6 extended raw driver copy. */
+ time_hw += rc6->cur_residency[i];
+ rc6->cur_residency[i] = time_hw;
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
+
+ return mul_u64_u32_div(time_hw, mul, div);
+}
+
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
new file mode 100644
index 000000000000..1370f6834a4c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_H
+#define INTEL_RC6_H
+
+#include "i915_reg.h"
+
+struct intel_engine_cs;
+struct intel_rc6;
+
+void intel_rc6_init(struct intel_rc6 *rc6);
+void intel_rc6_fini(struct intel_rc6 *rc6);
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6);
+void intel_rc6_enable(struct intel_rc6 *rc6);
+void intel_rc6_disable(struct intel_rc6 *rc6);
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
+
+void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6);
+void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6);
+
+#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
new file mode 100644
index 000000000000..89ad5697a8d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_TYPES_H
+#define INTEL_RC6_TYPES_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_rc6 {
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
+
+ struct drm_i915_gem_object *pctx;
+
+ bool supported : 1;
+ bool enabled : 1;
+ bool wakeref : 1;
+ bool ctx_corrupted : 1;
+};
+
+#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 6d05f9c64178..c4edc35e7d89 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
+#include "intel_ring.h"
struct intel_renderstate {
const struct intel_renderstate_rodata *rodata;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8cea42379dd7..f03e000051c1 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -282,14 +282,14 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN6_GRDOM_RENDER,
[BCS0] = GEN6_GRDOM_BLT,
[VCS0] = GEN6_GRDOM_MEDIA,
[VCS1] = GEN8_GRDOM_MEDIA2,
[VECS0] = GEN6_GRDOM_VECS,
};
+ struct intel_engine_cs *engine;
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
@@ -298,7 +298,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
}
@@ -307,7 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
return gen6_hw_domain_reset(gt, hw_mask);
}
-static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
+static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
@@ -316,6 +316,7 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
i915_reg_t sfc_usage;
u32 sfc_usage_bit;
u32 sfc_reset_bit;
+ int ret;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
@@ -350,27 +351,33 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
}
/*
- * Tell the engine that a software reset is going to happen. The engine
- * will then try to force lock the SFC (if currently locked, it will
- * remain so until we tell the engine it is safe to unlock; if currently
- * unlocked, it will ignore this and all new lock requests). If SFC
- * ends up being locked to the engine we want to reset, we have to reset
- * it as well (we will unlock it once the reset sequence is completed).
+ * If the engine is using a SFC, tell the engine that a software reset
+ * is going to happen. The engine will then try to force lock the SFC.
+ * If SFC ends up being locked to the engine we want to reset, we have
+ * to reset it as well (we will unlock it once the reset sequence is
+ * completed).
*/
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
+ return 0;
+
rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
- if (__intel_wait_for_register_fw(uncore,
- sfc_forced_lock_ack,
- sfc_forced_lock_ack_bit,
- sfc_forced_lock_ack_bit,
- 1000, 0, NULL)) {
- DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ ret = __intel_wait_for_register_fw(uncore,
+ sfc_forced_lock_ack,
+ sfc_forced_lock_ack_bit,
+ sfc_forced_lock_ack_bit,
+ 1000, 0, NULL);
+
+ /* Was the SFC released while we were trying to lock it? */
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
return 0;
- }
- if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
- return sfc_reset_bit;
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ return ret;
+ }
+ *hw_mask |= sfc_reset_bit;
return 0;
}
@@ -406,7 +413,7 @@ static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
[VCS0] = GEN11_GRDOM_MEDIA,
@@ -425,17 +432,26 @@ static int gen11_reset_engines(struct intel_gt *gt,
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
- hw_mask |= gen11_lock_sfc(engine);
+ ret = gen11_lock_sfc(engine, &hw_mask);
+ if (ret)
+ goto sfc_unlock;
}
}
ret = gen6_hw_domain_reset(gt, hw_mask);
+sfc_unlock:
+ /*
+ * We unlock the SFC based on the lock status and not the result of
+ * gen11_lock_sfc to make sure that we clean properly if something
+ * wrong happened during the lock (e.g. lock acquired after timeout
+ * expiration).
+ */
if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen11_unlock_sfc(engine);
return ret;
@@ -494,7 +510,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
intel_engine_mask_t tmp;
int ret;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
@@ -520,19 +536,30 @@ static int gen8_reset_engines(struct intel_gt *gt,
ret = gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
return ret;
}
+static int mock_reset(struct intel_gt *gt,
+ intel_engine_mask_t mask,
+ unsigned int retry)
+{
+ return 0;
+}
+
typedef int (*reset_func)(struct intel_gt *,
intel_engine_mask_t engine_mask,
unsigned int retry);
-static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
+static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
{
- if (INTEL_GEN(i915) >= 8)
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (is_mock_gt(gt))
+ return mock_reset;
+ else if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
return gen6_reset_engines;
@@ -555,7 +582,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
int ret = -ETIMEDOUT;
int retry;
- reset = intel_get_gpu_reset(gt->i915);
+ reset = intel_get_gpu_reset(gt);
if (!reset)
return -ENODEV;
@@ -575,17 +602,20 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
return ret;
}
-bool intel_has_gpu_reset(struct drm_i915_private *i915)
+bool intel_has_gpu_reset(const struct intel_gt *gt)
{
if (!i915_modparams.reset)
return NULL;
- return intel_get_gpu_reset(i915);
+ return intel_get_gpu_reset(gt);
}
-bool intel_has_reset_engine(struct drm_i915_private *i915)
+bool intel_has_reset_engine(const struct intel_gt *gt)
{
- return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
+ if (i915_modparams.reset < 2)
+ return false;
+
+ return INTEL_INFO(gt->i915)->has_reset_engine;
}
int intel_reset_guc(struct intel_gt *gt)
@@ -652,7 +682,7 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
@@ -682,10 +712,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(gt->i915);
+ i915_gem_restore_fences(gt->ggtt);
return err;
}
@@ -695,7 +725,7 @@ static void reset_finish_engine(struct intel_engine_cs *engine)
engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
- intel_engine_signal_breadcrumbs(engine);
+ intel_engine_breadcrumbs_irq(engine);
}
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
@@ -703,7 +733,7 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
reset_finish_engine(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
@@ -739,7 +769,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
struct drm_printer p = drm_debug_printer(__func__);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
@@ -756,7 +786,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
__intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
/*
@@ -768,7 +798,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->cancel_requests(engine);
reset_finish(gt, awake);
@@ -781,7 +811,7 @@ void intel_gt_set_wedged(struct intel_gt *gt)
intel_wakeref_t wakeref;
mutex_lock(&gt->reset.mutex);
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
__intel_gt_set_wedged(gt);
mutex_unlock(&gt->reset.mutex);
}
@@ -791,11 +821,13 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl;
unsigned long flags;
+ bool ok;
if (!test_bit(I915_WEDGED, &gt->reset.flags))
return true;
- if (!gt->scratch) /* Never full initialised, recovery impossible */
+ /* Never fully initialised, recovery impossible */
+ if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
return false;
GEM_TRACE("start\n");
@@ -812,10 +844,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
- struct i915_request *rq;
+ struct dma_fence *fence;
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
continue;
spin_unlock_irqrestore(&timelines->lock, flags);
@@ -827,8 +859,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
* in the worst case.
*/
- dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
/* Restart iteration after droping lock */
spin_lock_irqsave(&timelines->lock, flags);
@@ -836,7 +868,18 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
}
spin_unlock_irqrestore(&timelines->lock, flags);
- intel_gt_sanitize(gt, false);
+ /* We must reset pending GPU events before restoring our submission */
+ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ if (!ok) {
+ /*
+ * Warn CI about the unrecoverable wedged condition.
+ * Time for a reboot.
+ */
+ add_taint_for_CI(TAINT_WARN);
+ return false;
+ }
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -891,7 +934,7 @@ static int resume(struct intel_gt *gt)
enum intel_engine_id id;
int ret;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
ret = engine->resume(engine);
if (ret)
return ret;
@@ -941,7 +984,7 @@ void intel_gt_reset(struct intel_gt *gt,
awake = reset_prepare(gt);
- if (!intel_has_gpu_reset(gt->i915)) {
+ if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
@@ -970,7 +1013,7 @@ void intel_gt_reset(struct intel_gt *gt,
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(gt->i915);
+ ret = intel_gt_init_hw(gt);
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
@@ -981,8 +1024,6 @@ void intel_gt_reset(struct intel_gt *gt,
if (ret)
goto taint;
- intel_gt_queue_hangcheck(gt);
-
finish:
reset_finish(gt, awake);
unlock:
@@ -1149,7 +1190,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
@@ -1162,8 +1203,8 @@ void intel_gt_handle_error(struct intel_gt *gt,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
@@ -1191,7 +1232,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
synchronize_rcu_expedited();
/* Prevent any other reset-engine attempt. */
- for_each_engine(engine, gt->i915, tmp) {
+ for_each_engine(engine, gt, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
wait_on_bit(&gt->reset.flags,
@@ -1201,7 +1242,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
intel_gt_reset_global(gt, engine_mask, msg);
- for_each_engine(engine, gt->i915, tmp)
+ for_each_engine(engine, gt, tmp)
clear_bit_unlock(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
@@ -1209,7 +1250,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
wake_up_all(&gt->reset.queue);
out:
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
@@ -1251,10 +1292,6 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
return -EIO;
- /* XXX intel_reset_finish() still takes struct_mutex!!! */
- if (mutex_is_locked(&gt->i915->drm.struct_mutex))
- return -EAGAIN;
-
if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
&gt->reset.flags)))
@@ -1263,6 +1300,14 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
return intel_gt_is_wedged(gt) ? -EIO : 0;
}
+void intel_gt_set_wedged_on_init(struct intel_gt *gt)
+{
+ BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
+ I915_WEDGED_ON_INIT);
+ intel_gt_set_wedged(gt);
+ set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
void intel_gt_init_reset(struct intel_gt *gt)
{
init_waitqueue_head(&gt->reset.queue);
@@ -1306,4 +1351,5 @@ void __intel_fini_wedge(struct intel_wedge_me *w)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_reset.c"
+#include "selftest_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 52c00199e069..8e8d5f761166 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -14,7 +14,6 @@
#include "intel_engine_types.h"
#include "intel_reset_types.h"
-struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
struct intel_gt;
@@ -45,6 +44,12 @@ void intel_gt_set_wedged(struct intel_gt *gt);
bool intel_gt_unset_wedged(struct intel_gt *gt);
int intel_gt_terminally_wedged(struct intel_gt *gt);
+/*
+ * There's no unset_wedged_on_init paired with this one.
+ * Once we're wedged on init, there's no going back.
+ */
+void intel_gt_set_wedged_on_init(struct intel_gt *gt);
+
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
int intel_reset_guc(struct intel_gt *gt);
@@ -68,10 +73,13 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
static inline bool __intel_reset_failed(const struct intel_reset *reset)
{
+ GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
+ !test_bit(I915_WEDGED, &reset->flags) : false);
+
return unlikely(test_bit(I915_WEDGED, &reset->flags));
}
-bool intel_has_gpu_reset(struct drm_i915_private *i915);
-bool intel_has_reset_engine(struct drm_i915_private *i915);
+bool intel_has_gpu_reset(const struct intel_gt *gt);
+bool intel_has_reset_engine(const struct intel_gt *gt);
#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 31968356e0c0..f43bc3a0fe4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -29,11 +29,17 @@ struct intel_reset {
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
+ *
+ * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no
+ * longer use the GPU - similar to #I915_WEDGED bit. The difference in
+ * in the way we're handling "forced" unwedged (e.g. through debugfs),
+ * which is not allowed in case we failed to initialize.
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_MODESET 1
#define I915_RESET_ENGINE 2
+#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1)
struct mutex mutex; /* serialises wedging/unwedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
new file mode 100644
index 000000000000..ece20504d240
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -0,0 +1,323 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_engine.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
+
+unsigned int intel_ring_update_space(struct intel_ring *ring)
+{
+ unsigned int space;
+
+ space = __intel_ring_space(ring->head, ring->emit, ring->size);
+
+ ring->space = space;
+ return space;
+}
+
+int intel_ring_pin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+ unsigned int flags;
+ void *addr;
+ int ret;
+
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
+
+ flags = PIN_GLOBAL;
+
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ if (vma->obj->stolen)
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
+
+ ret = i915_vma_pin(vma, 0, 0, flags);
+ if (unlikely(ret))
+ goto err_unpin;
+
+ if (i915_vma_is_map_and_fenceable(vma))
+ addr = (void __force *)i915_vma_pin_iomap(vma);
+ else
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err_ring;
+ }
+
+ i915_vma_make_unshrinkable(vma);
+
+ GEM_BUG_ON(ring->vaddr);
+ ring->vaddr = addr;
+
+ return 0;
+
+err_ring:
+ i915_vma_unpin(vma);
+err_unpin:
+ atomic_dec(&ring->pin_count);
+ return ret;
+}
+
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ tail = intel_ring_wrap(ring, tail);
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
+void intel_ring_unpin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
+
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->emit);
+
+ i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_is_map_and_fenceable(vma))
+ i915_vma_unpin_iomap(vma);
+ else
+ i915_gem_object_unpin_map(vma->obj);
+
+ GEM_BUG_ON(!ring->vaddr);
+ ring->vaddr = NULL;
+
+ i915_vma_unpin(vma);
+ i915_vma_make_purgeable(vma);
+}
+
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = ERR_PTR(-ENODEV);
+ if (i915_ggtt_has_aperture(ggtt))
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ring->ref);
+ ring->size = size;
+
+ /*
+ * Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = size;
+ if (IS_I830(i915) || IS_I845G(i915))
+ ring->effective_size -= 2 * CACHELINE_BYTES;
+
+ intel_ring_update_space(ring);
+
+ vma = create_ring_vma(engine->gt->ggtt, size);
+ if (IS_ERR(vma)) {
+ kfree(ring);
+ return ERR_CAST(vma);
+ }
+ ring->vma = vma;
+
+ return ring;
+}
+
+void intel_ring_free(struct kref *ref)
+{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
+
+ i915_vma_put(ring->vma);
+ kfree(ring);
+}
+
+static noinline int
+wait_for_space(struct intel_ring *ring,
+ struct intel_timeline *tl,
+ unsigned int bytes)
+{
+ struct i915_request *target;
+ long timeout;
+
+ if (intel_ring_update_space(ring) >= bytes)
+ return 0;
+
+ GEM_BUG_ON(list_empty(&tl->requests));
+ list_for_each_entry(target, &tl->requests, link) {
+ if (target->ring != ring)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ if (bytes <= __intel_ring_space(target->postfix,
+ ring->emit, ring->size))
+ break;
+ }
+
+ if (GEM_WARN_ON(&target->link == &tl->requests))
+ return -ENOSPC;
+
+ timeout = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
+
+ i915_request_retire_upto(target);
+
+ intel_ring_update_space(ring);
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
+}
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
+{
+ struct intel_ring *ring = rq->ring;
+ const unsigned int remain_usable = ring->effective_size - ring->emit;
+ const unsigned int bytes = num_dwords * sizeof(u32);
+ unsigned int need_wrap = 0;
+ unsigned int total_bytes;
+ u32 *cs;
+
+ /* Packets must be qword aligned. */
+ GEM_BUG_ON(num_dwords & 1);
+
+ total_bytes = bytes + rq->reserved_space;
+ GEM_BUG_ON(total_bytes > ring->effective_size);
+
+ if (unlikely(total_bytes > remain_usable)) {
+ const int remain_actual = ring->size - ring->emit;
+
+ if (bytes > remain_usable) {
+ /*
+ * Not enough space for the basic request. So need to
+ * flush out the remainder and then wait for
+ * base + reserved.
+ */
+ total_bytes += remain_actual;
+ need_wrap = remain_actual | 1;
+ } else {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate
+ * wrap and only need to effectively wait for the
+ * reserved size from the start of ringbuffer.
+ */
+ total_bytes = rq->reserved_space + remain_actual;
+ }
+ }
+
+ if (unlikely(total_bytes > ring->space)) {
+ int ret;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the
+ * request, as that cannot be allowed to fail. During request
+ * finalisation, reserved_space is set to 0 to stop the
+ * overallocation and the assumption is that then we never need
+ * to wait (which has the risk of failing with EINTR).
+ *
+ * See also i915_request_alloc() and i915_request_add().
+ */
+ GEM_BUG_ON(!rq->reserved_space);
+
+ ret = wait_for_space(ring,
+ i915_request_timeline(rq),
+ total_bytes);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(need_wrap)) {
+ need_wrap &= ~1;
+ GEM_BUG_ON(need_wrap > ring->space);
+ GEM_BUG_ON(ring->emit + need_wrap > ring->size);
+ GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
+
+ /* Fill the tail with MI_NOOP */
+ memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
+ ring->space -= need_wrap;
+ ring->emit = 0;
+ }
+
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ GEM_BUG_ON(ring->space < bytes);
+ cs = ring->vaddr + ring->emit;
+ GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
+ ring->emit += bytes;
+ ring->space -= bytes;
+
+ return cs;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct i915_request *rq)
+{
+ int num_dwords;
+ void *cs;
+
+ num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
+ if (num_dwords == 0)
+ return 0;
+
+ num_dwords = CACHELINE_DWORDS - num_dwords;
+ GEM_BUG_ON(num_dwords & 1);
+
+ cs = intel_ring_begin(rq, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
+ intel_ring_advance(rq, cs + num_dwords);
+
+ GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
new file mode 100644
index 000000000000..ea2839d9e044
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -0,0 +1,131 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_H
+#define INTEL_RING_H
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "i915_request.h"
+#include "intel_ring_types.h"
+
+struct intel_engine_cs;
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
+int intel_ring_cacheline_align(struct i915_request *rq);
+
+unsigned int intel_ring_update_space(struct intel_ring *ring);
+
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
+
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
+{
+ /* Dummy function.
+ *
+ * This serves as a placeholder in the code so that the reader
+ * can compare against the preceding intel_ring_begin() and
+ * check that the number of dwords emitted matches the space
+ * reserved for the command packet (i.e. the value passed to
+ * intel_ring_begin()).
+ */
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
+}
+
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
+{
+ /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+ u32 offset = addr - rq->ring->vaddr;
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
+ /*
+ * "Ring Buffer Use"
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
+ * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
+ * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ *
+ * We use ring->head as the last known location of the actual RING_HEAD,
+ * it may have advanced but in the worst case it is equally the same
+ * as ring->head and so we should never program RING_TAIL to advance
+ * into the same cacheline as ring->head.
+ */
+#define cacheline(a) round_down(a, CACHELINE_BYTES)
+ GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
+ tail < ring->head);
+#undef cacheline
+}
+
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
+
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
+#endif /* INTEL_RING_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index bacaa7bb8c9a..a47d5a7c32c9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -40,6 +40,7 @@
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -47,16 +48,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-unsigned int intel_ring_update_space(struct intel_ring *ring)
-{
- unsigned int space;
-
- space = __intel_ring_space(ring->head, ring->emit, ring->size);
-
- ring->space = space;
- return space;
-}
-
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
@@ -322,7 +313,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -425,7 +417,7 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -439,8 +431,8 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -459,8 +451,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -930,6 +922,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
i915_request_submit(request);
+ wmb(); /* paranoid flush writes out of the WCB before mmio */
ENGINE_WRITE(request->engine, RING_TAIL,
intel_ring_set_tail(request->ring, request->tail));
@@ -937,8 +930,8 @@ static void i9xx_submit_request(struct i915_request *request)
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -960,8 +953,8 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -1184,167 +1177,9 @@ i915_emit_bb_start(struct i915_request *rq,
return 0;
}
-int intel_ring_pin(struct intel_ring *ring)
-{
- struct i915_vma *vma = ring->vma;
- unsigned int flags;
- void *addr;
- int ret;
-
- if (atomic_fetch_inc(&ring->pin_count))
- return 0;
-
- flags = PIN_GLOBAL;
-
- /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- if (vma->obj->stolen)
- flags |= PIN_MAPPABLE;
- else
- flags |= PIN_HIGH;
-
- ret = i915_vma_pin(vma, 0, 0, flags);
- if (unlikely(ret))
- goto err_unpin;
-
- if (i915_vma_is_map_and_fenceable(vma))
- addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto err_ring;
- }
-
- i915_vma_make_unshrinkable(vma);
-
- GEM_BUG_ON(ring->vaddr);
- ring->vaddr = addr;
-
- return 0;
-
-err_ring:
- i915_vma_unpin(vma);
-err_unpin:
- atomic_dec(&ring->pin_count);
- return ret;
-}
-
-void intel_ring_reset(struct intel_ring *ring, u32 tail)
-{
- tail = intel_ring_wrap(ring, tail);
- ring->tail = tail;
- ring->head = tail;
- ring->emit = tail;
- intel_ring_update_space(ring);
-}
-
-void intel_ring_unpin(struct intel_ring *ring)
-{
- struct i915_vma *vma = ring->vma;
-
- if (!atomic_dec_and_test(&ring->pin_count))
- return;
-
- /* Discard any unused bytes beyond that submitted to hw. */
- intel_ring_reset(ring, ring->emit);
-
- i915_vma_unset_ggtt_write(vma);
- if (i915_vma_is_map_and_fenceable(vma))
- i915_vma_unpin_iomap(vma);
- else
- i915_gem_object_unpin_map(vma->obj);
-
- GEM_BUG_ON(!ring->vaddr);
- ring->vaddr = NULL;
-
- i915_vma_unpin(vma);
- i915_vma_make_purgeable(vma);
-}
-
-static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
-{
- struct i915_address_space *vm = &ggtt->vm;
- struct drm_i915_private *i915 = vm->i915;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
-
- obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- /*
- * Mark ring buffers as read-only from GPU side (so no stray overwrites)
- * if supported by the platform's GGTT.
- */
- if (vm->has_read_only)
- i915_gem_object_set_readonly(obj);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return vma;
-}
-
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size)
-{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_ring *ring;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!is_power_of_2(size));
- GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
-
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&ring->ref);
-
- ring->size = size;
- /* Workaround an erratum on the i830 which causes a hang if
- * the TAIL pointer points to within the last 2 cachelines
- * of the buffer.
- */
- ring->effective_size = size;
- if (IS_I830(i915) || IS_I845G(i915))
- ring->effective_size -= 2 * CACHELINE_BYTES;
-
- intel_ring_update_space(ring);
-
- vma = create_ring_vma(engine->gt->ggtt, size);
- if (IS_ERR(vma)) {
- kfree(ring);
- return ERR_CAST(vma);
- }
- ring->vma = vma;
-
- return ring;
-}
-
-void intel_ring_free(struct kref *ref)
-{
- struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
-
- i915_vma_close(ring->vma);
- i915_vma_put(ring->vma);
-
- kfree(ring);
-}
-
static void __ring_context_fini(struct intel_context *ce)
{
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void ring_context_destroy(struct kref *ref)
@@ -1609,7 +1444,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *signaller;
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1663,7 +1498,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
i915_reg_t last_reg = {}; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1676,7 +1511,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ *cs++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
}
@@ -1741,46 +1576,22 @@ static int remap_l3(struct i915_request *rq)
static int switch_context(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- struct i915_address_space *vm = vm_alias(rq->hw_context);
- unsigned int unwind_mm = 0;
- u32 hw_flags = 0;
+ struct intel_context *ce = rq->hw_context;
+ struct i915_address_space *vm = vm_alias(ce);
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (vm) {
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- int loops;
-
- /*
- * Baytail takes a little more convincing that it really needs
- * to reload the PD between contexts. It is not just a little
- * longer, as adding more stalls after the load_pd_dir (i.e.
- * adding a long loop around flush_pd_dir) is not as effective
- * as reloading the PD umpteen times. 32 is derived from
- * experimentation (gem_exec_parallel/fds) and has no good
- * explanation.
- */
- loops = 1;
- if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
- loops = 32;
-
- do {
- ret = load_pd_dir(rq, ppgtt);
- if (ret)
- goto err;
- } while (--loops);
-
- if (ppgtt->pd_dirty_engines & engine->mask) {
- unwind_mm = engine->mask;
- ppgtt->pd_dirty_engines &= ~unwind_mm;
- hw_flags = MI_FORCE_RESTORE;
- }
+ ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
+ if (ret)
+ return ret;
}
- if (rq->hw_context->state) {
- GEM_BUG_ON(engine->id != RCS0);
+ if (ce->state) {
+ u32 hw_flags;
+
+ GEM_BUG_ON(rq->engine->id != RCS0);
/*
* The kernel context(s) is treated as pure scratch and is not
@@ -1789,22 +1600,25 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
+ hw_flags = 0;
if (i915_gem_context_is_kernel(rq->gem_context))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
if (ret)
- goto err_mm;
+ return ret;
}
if (vm) {
+ struct intel_engine_cs *engine = rq->engine;
+
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
- goto err_mm;
+ return ret;
ret = flush_pd_dir(rq);
if (ret)
- goto err_mm;
+ return ret;
/*
* Not only do we need a full barrier (post-sync write) after
@@ -1816,24 +1630,18 @@ static int switch_context(struct i915_request *rq)
*/
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
- goto err_mm;
+ return ret;
ret = engine->emit_flush(rq, EMIT_FLUSH);
if (ret)
- goto err_mm;
+ return ret;
}
ret = remap_l3(rq);
if (ret)
- goto err_mm;
+ return ret;
return 0;
-
-err_mm:
- if (unwind_mm)
- i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
-err:
- return ret;
}
static int ring_request_alloc(struct i915_request *request)
@@ -1841,7 +1649,7 @@ static int ring_request_alloc(struct i915_request *request)
int ret;
GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
- GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1863,146 +1671,6 @@ static int ring_request_alloc(struct i915_request *request)
return 0;
}
-static noinline int
-wait_for_space(struct intel_ring *ring,
- struct intel_timeline *tl,
- unsigned int bytes)
-{
- struct i915_request *target;
- long timeout;
-
- if (intel_ring_update_space(ring) >= bytes)
- return 0;
-
- GEM_BUG_ON(list_empty(&tl->requests));
- list_for_each_entry(target, &tl->requests, link) {
- if (target->ring != ring)
- continue;
-
- /* Would completion of this request free enough space? */
- if (bytes <= __intel_ring_space(target->postfix,
- ring->emit, ring->size))
- break;
- }
-
- if (GEM_WARN_ON(&target->link == &tl->requests))
- return -ENOSPC;
-
- timeout = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (timeout < 0)
- return timeout;
-
- i915_request_retire_upto(target);
-
- intel_ring_update_space(ring);
- GEM_BUG_ON(ring->space < bytes);
- return 0;
-}
-
-u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
-{
- struct intel_ring *ring = rq->ring;
- const unsigned int remain_usable = ring->effective_size - ring->emit;
- const unsigned int bytes = num_dwords * sizeof(u32);
- unsigned int need_wrap = 0;
- unsigned int total_bytes;
- u32 *cs;
-
- /* Packets must be qword aligned. */
- GEM_BUG_ON(num_dwords & 1);
-
- total_bytes = bytes + rq->reserved_space;
- GEM_BUG_ON(total_bytes > ring->effective_size);
-
- if (unlikely(total_bytes > remain_usable)) {
- const int remain_actual = ring->size - ring->emit;
-
- if (bytes > remain_usable) {
- /*
- * Not enough space for the basic request. So need to
- * flush out the remainder and then wait for
- * base + reserved.
- */
- total_bytes += remain_actual;
- need_wrap = remain_actual | 1;
- } else {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So we don't need an immediate
- * wrap and only need to effectively wait for the
- * reserved size from the start of ringbuffer.
- */
- total_bytes = rq->reserved_space + remain_actual;
- }
- }
-
- if (unlikely(total_bytes > ring->space)) {
- int ret;
-
- /*
- * Space is reserved in the ringbuffer for finalising the
- * request, as that cannot be allowed to fail. During request
- * finalisation, reserved_space is set to 0 to stop the
- * overallocation and the assumption is that then we never need
- * to wait (which has the risk of failing with EINTR).
- *
- * See also i915_request_alloc() and i915_request_add().
- */
- GEM_BUG_ON(!rq->reserved_space);
-
- ret = wait_for_space(ring, rq->timeline, total_bytes);
- if (unlikely(ret))
- return ERR_PTR(ret);
- }
-
- if (unlikely(need_wrap)) {
- need_wrap &= ~1;
- GEM_BUG_ON(need_wrap > ring->space);
- GEM_BUG_ON(ring->emit + need_wrap > ring->size);
- GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
-
- /* Fill the tail with MI_NOOP */
- memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
- ring->space -= need_wrap;
- ring->emit = 0;
- }
-
- GEM_BUG_ON(ring->emit > ring->size - bytes);
- GEM_BUG_ON(ring->space < bytes);
- cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
- ring->emit += bytes;
- ring->space -= bytes;
-
- return cs;
-}
-
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct i915_request *rq)
-{
- int num_dwords;
- void *cs;
-
- num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- if (num_dwords == 0)
- return 0;
-
- num_dwords = CACHELINE_DWORDS - num_dwords;
- GEM_BUG_ON(num_dwords & 1);
-
- cs = intel_ring_begin(rq, num_dwords);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
- intel_ring_advance(rq, cs);
-
- GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
- return 0;
-}
-
static void gen6_bsd_submit_request(struct i915_request *request)
{
struct intel_uncore *uncore = request->engine->uncore;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
new file mode 100644
index 000000000000..d9f17f38e0cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -0,0 +1,51 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_TYPES_H
+#define INTEL_RING_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+/*
+ * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+
+struct i915_vma;
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
+ u32 head;
+ u32 tail;
+ u32 emit;
+
+ u32 space;
+ u32 size;
+ u32 effective_size;
+};
+
+#endif /* INTEL_RING_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
new file mode 100644
index 000000000000..20d6ee148afc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -0,0 +1,1872 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_rps.h"
+#include "intel_sideband.h"
+#include "../../../platform/x86/intel_ips.h"
+
+/*
+ * Lock protecting IPS related data structures
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+static struct intel_gt *rps_to_gt(struct intel_rps *rps)
+{
+ return container_of(rps, struct intel_gt, rps);
+}
+
+static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->i915;
+}
+
+static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->uncore;
+}
+
+static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
+{
+ return mask & ~rps->pm_intrmsk_mbz;
+}
+
+static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
+{
+ u32 mask = 0;
+
+ /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
+ if (val > rps->min_freq_softlimit)
+ mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ if (val < rps->max_freq_softlimit)
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+
+ mask &= rps->pm_events;
+
+ return rps_pm_sanitize_mask(rps, ~mask);
+}
+
+static void rps_reset_ei(struct intel_rps *rps)
+{
+ memset(&rps->ei, 0, sizeof(rps->ei));
+}
+
+static void rps_enable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps_reset_ei(rps);
+
+ if (IS_VALLEYVIEW(gt->i915))
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_enable_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+
+ intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+ rps_pm_mask(rps, rps->cur_freq));
+}
+
+static void gen6_rps_reset_interrupts(struct intel_rps *rps)
+{
+ gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
+}
+
+static void gen11_rps_reset_interrupts(struct intel_rps *rps)
+{
+ while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
+ ;
+}
+
+static void rps_reset_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (INTEL_GEN(gt->i915) >= 11)
+ gen11_rps_reset_interrupts(rps);
+ else
+ gen6_rps_reset_interrupts(rps);
+
+ rps->pm_iir = 0;
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void rps_disable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps->pm_events = 0;
+
+ intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+ rps_pm_sanitize_mask(rps, ~0u));
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&gt->irq_lock);
+
+ intel_synchronize_irq(gt->i915);
+
+ /*
+ * Now that we will not be generating any more work, flush any
+ * outstanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&rps->work);
+
+ rps_reset_interrupts(rps);
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+static void gen5_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fmax, fmin, fstart;
+ u32 rgvmodectl;
+ int c_m, i;
+
+ if (i915->fsb_freq <= 3200)
+ c_m = 0;
+ else if (i915->fsb_freq <= 4800)
+ c_m = 1;
+ else
+ c_m = 2;
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
+ rps->ips.m = cparams[i].m;
+ rps->ips.c = cparams[i].c;
+ break;
+ }
+ }
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ rps->min_freq = fmax;
+ rps->max_freq = fmin;
+
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+}
+
+static unsigned long
+__ips_chipset_val(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ unsigned long now = jiffies_to_msecs(jiffies), dt;
+ unsigned long result;
+ u64 total, delta;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ /*
+ * Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ dt = now - ips->last_time1;
+ if (dt <= 10)
+ return ips->chipset_power;
+
+ /* FIXME: handle per-counter overflow */
+ total = intel_uncore_read(uncore, DMIEC);
+ total += intel_uncore_read(uncore, DDREC);
+ total += intel_uncore_read(uncore, CSIEC);
+
+ delta = total - ips->last_count1;
+
+ result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
+
+ ips->last_count1 = total;
+ ips->last_time1 = now;
+
+ ips->chipset_power = result;
+
+ return result;
+}
+
+static unsigned long ips_mch_val(struct intel_uncore *uncore)
+{
+ unsigned int m, x, b;
+ u32 tsfs;
+
+ tsfs = intel_uncore_read(uncore, TSFS);
+ x = intel_uncore_read8(uncore, TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+ m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
+
+ return m * x / 127 - b;
+}
+
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
+{
+ const int vd = _pxvid_to_vd(pxvid);
+
+ if (INTEL_INFO(i915)->is_mobile)
+ return max(vd - 1125, 0);
+
+ return vd;
+}
+
+static void __gen5_ips_update(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ u64 now, delta, dt;
+ u32 count;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ now = ktime_get_raw_ns();
+ dt = now - ips->last_time2;
+ do_div(dt, NSEC_PER_MSEC);
+
+ /* Don't divide by 0 */
+ if (dt <= 10)
+ return;
+
+ count = intel_uncore_read(uncore, GFXEC);
+ delta = count - ips->last_count2;
+
+ ips->last_count2 = count;
+ ips->last_time2 = now;
+
+ /* More magic constants... */
+ ips->gfx_power = div_u64(delta * 1181, dt * 10);
+}
+
+static void gen5_rps_update(struct intel_rps *rps)
+{
+ spin_lock_irq(&mchdev_lock);
+ __gen5_ips_update(&rps->ips);
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ /* Invert the frequency bin into an ips delay */
+ val = rps->max_freq - val;
+ val = rps->min_freq + val;
+
+ rgvswctl =
+ (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) |
+ MEMCTL_SFCAVM;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+ intel_uncore_posting_read16(uncore, MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ return div * 133333 / (pre << post);
+}
+
+static unsigned int init_emon(struct intel_uncore *uncore)
+{
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ intel_uncore_write(uncore, ECR, 0);
+ intel_uncore_posting_read(uncore, ECR);
+
+ /* Program energy weights for various events */
+ intel_uncore_write(uncore, SDEW, 0x15040d00);
+ intel_uncore_write(uncore, CSIEW0, 0x007f0000);
+ intel_uncore_write(uncore, CSIEW1, 0x1e220004);
+ intel_uncore_write(uncore, CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ intel_uncore_write(uncore, PEW(i), 0);
+ for (i = 0; i < 3; i++)
+ intel_uncore_write(uncore, DEW(i), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
+ unsigned int freq = intel_pxfreq(pxvidfreq);
+ unsigned int vid =
+ (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+ unsigned int val;
+
+ val = vid * vid * freq / 1000 * 255;
+ val /= 127 * 127 * 900;
+
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ intel_uncore_write(uncore, PXW(i),
+ pxw[i * 4 + 0] << 24 |
+ pxw[i * 4 + 1] << 16 |
+ pxw[i * 4 + 2] << 8 |
+ pxw[i * 4 + 3] << 0);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ intel_uncore_write(uncore, OGW0, 0);
+ intel_uncore_write(uncore, OGW1, 0);
+ intel_uncore_write(uncore, EG0, 0x00007f00);
+ intel_uncore_write(uncore, EG1, 0x0000000e);
+ intel_uncore_write(uncore, EG2, 0x000e0000);
+ intel_uncore_write(uncore, EG3, 0x68000300);
+ intel_uncore_write(uncore, EG4, 0x42000000);
+ intel_uncore_write(uncore, EG5, 0x00140031);
+ intel_uncore_write(uncore, EG6, 0);
+ intel_uncore_write(uncore, EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ intel_uncore_write(uncore, PXWL(i), 0);
+
+ /* Enable PMON + select events */
+ intel_uncore_write(uncore, ECR, 0x80000019);
+
+ return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
+}
+
+static bool gen5_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fstart, vstart;
+ u32 rgvmodectl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Enable temp reporting */
+ intel_uncore_write16(uncore, PMMISC,
+ intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
+ intel_uncore_write16(uncore, TSC1,
+ intel_uncore_read16(uncore, TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ intel_uncore_write(uncore, RCUPEI, 100000);
+ intel_uncore_write(uncore, RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ intel_uncore_write(uncore, RCBMAXAVG, 90000);
+ intel_uncore_write(uncore, RCBMINAVG, 80000);
+
+ intel_uncore_write(uncore, MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+ PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ intel_uncore_write(uncore, VIDSTART, vstart);
+ intel_uncore_posting_read(uncore, VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
+
+ if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+ MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ mdelay(1);
+
+ gen5_rps_set(rps, rps->cur_freq);
+
+ rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
+ rps->ips.last_time1 = jiffies_to_msecs(jiffies);
+
+ rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
+ rps->ips.last_time2 = ktime_get_raw_ns();
+
+ spin_unlock_irq(&mchdev_lock);
+
+ rps->ips.corr = init_emon(uncore);
+
+ return true;
+}
+
+static void gen5_rps_disable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ intel_uncore_write(uncore, MEMINTREN,
+ intel_uncore_read(uncore, MEMINTREN) &
+ ~MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ intel_uncore_write(uncore, DEIER,
+ intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIMR,
+ intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ gen5_rps_set(rps, rps->idle_freq);
+ mdelay(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
+ mdelay(1);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static u32 rps_limits(struct intel_rps *rps, u8 val)
+{
+ u32 limits;
+
+ /*
+ * Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
+ limits = rps->max_freq_softlimit << 23;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 14;
+ } else {
+ limits = rps->max_freq_softlimit << 24;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 16;
+ }
+
+ return limits;
+}
+
+static void rps_set_power(struct intel_rps *rps, int new_power)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
+
+ lockdep_assert_held(&rps->power.mutex);
+
+ if (new_power == rps->power.mode)
+ return;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ /* Upclock if more than 95% busy over 16ms */
+ ei_up = 16000;
+ threshold_up = 95;
+
+ /* Downclock if less than 85% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 85;
+ break;
+
+ case BETWEEN:
+ /* Upclock if more than 90% busy over 13ms */
+ ei_up = 13000;
+ threshold_up = 90;
+
+ /* Downclock if less than 75% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 75;
+ break;
+
+ case HIGH_POWER:
+ /* Upclock if more than 85% busy over 10ms */
+ ei_up = 10000;
+ threshold_up = 85;
+
+ /* Downclock if less than 60% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 60;
+ break;
+ }
+
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(i915))
+ goto skip_hw_write;
+
+ intel_uncore_write(uncore, GEN6_RP_UP_EI,
+ GT_INTERVAL_FROM_US(i915, ei_up));
+ intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915,
+ ei_up * threshold_up / 100));
+
+ intel_uncore_write(uncore, GEN6_RP_DOWN_EI,
+ GT_INTERVAL_FROM_US(i915, ei_down));
+ intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915,
+ ei_down * threshold_down / 100));
+
+ intel_uncore_write(uncore, GEN6_RP_CONTROL,
+ (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+skip_hw_write:
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
+{
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(rps, new_power);
+ mutex_unlock(&rps->power.mutex);
+}
+
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
+{
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && rps->active)
+ rps_set_power(rps, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
+static int gen6_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 swreq;
+
+ if (INTEL_GEN(i915) >= 9)
+ swreq = GEN9_FREQUENCY(val);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ swreq = HSW_FREQUENCY(val);
+ else
+ swreq = (GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq);
+
+ return 0;
+}
+
+static int vlv_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ vlv_punit_get(i915);
+ err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_punit_put(i915);
+
+ return err;
+}
+
+static int rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ if (INTEL_GEN(i915) < 6)
+ return 0;
+
+ if (val == rps->last_freq)
+ return 0;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_rps_set(rps, val);
+ else
+ err = gen6_rps_set(rps, val);
+ if (err)
+ return err;
+
+ gen6_rps_set_thresholds(rps, val);
+ rps->last_freq = val;
+
+ return 0;
+}
+
+void intel_rps_unpark(struct intel_rps *rps)
+{
+ u8 freq;
+
+ if (!rps->enabled)
+ return;
+
+ /*
+ * Use the user's desired frequency as a guide, but for better
+ * performance, jump directly to RPe as our starting frequency.
+ */
+ mutex_lock(&rps->lock);
+ rps->active = true;
+ freq = max(rps->cur_freq, rps->efficient_freq),
+ freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
+ intel_rps_set(rps, freq);
+ rps->last_adj = 0;
+ mutex_unlock(&rps->lock);
+
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps_enable_interrupts(rps);
+
+ if (IS_GEN(rps_to_i915(rps), 5))
+ gen5_rps_update(rps);
+}
+
+void intel_rps_park(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (!rps->enabled)
+ return;
+
+ if (INTEL_GEN(i915) >= 6)
+ rps_disable_interrupts(rps);
+
+ rps->active = false;
+ if (rps->last_freq <= rps->idle_freq)
+ return;
+
+ /*
+ * The punit delays the write of the frequency and voltage until it
+ * determines the GPU is awake. During normal usage we don't want to
+ * waste power changing the frequency if the GPU is sleeping (rc6).
+ * However, the GPU and driver is now idle and we do not want to delay
+ * switching to minimum voltage (reducing power whilst idle) as we do
+ * not expect to be woken in the near future and so must flush the
+ * change by waking the device.
+ *
+ * We choose to take the media powerwell (either would do to trick the
+ * punit into committing the voltage change) as that takes a lot less
+ * power than the render powerwell.
+ */
+ intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+ rps_set(rps, rps->idle_freq);
+ intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+}
+
+void intel_rps_boost(struct i915_request *rq)
+{
+ struct intel_rps *rps = &rq->engine->gt->rps;
+ unsigned long flags;
+
+ if (i915_request_signaled(rq) || !rps->active)
+ return;
+
+ /* Serializes with i915_request_retire() */
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!i915_request_has_waitboost(rq) &&
+ !dma_fence_is_signaled_locked(&rq->fence)) {
+ rq->flags |= I915_REQUEST_WAITBOOST;
+
+ if (!atomic_fetch_inc(&rps->num_waiters) &&
+ READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ schedule_work(&rps->work);
+
+ atomic_inc(&rps->boosts);
+ }
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+int intel_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err = 0;
+
+ lockdep_assert_held(&rps->lock);
+ GEM_BUG_ON(val > rps->max_freq);
+ GEM_BUG_ON(val < rps->min_freq);
+
+ if (rps->active) {
+ err = rps_set(rps, val);
+
+ /*
+ * Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS,
+ rps_limits(rps, val));
+
+ intel_uncore_write(uncore, GEN6_PMINTRMSK,
+ rps_pm_mask(rps, val));
+ }
+ }
+
+ if (err == 0)
+ rps->cur_freq = val;
+
+ return err;
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* All of these values are in units of 50MHz */
+
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
+ if (IS_GEN9_LP(i915)) {
+ u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 0) & 0xff;
+ } else {
+ u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 16) & 0xff;
+ }
+
+ /* hw_max = RP0 until we check for overclocking */
+ rps->max_freq = rps->rp0_freq;
+
+ rps->efficient_freq = rps->rp1_freq;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+ IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ u32 ddcc_status = 0;
+
+ if (sandybridge_pcode_read(i915,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status, NULL) == 0)
+ rps->efficient_freq =
+ clamp_t(u8,
+ (ddcc_status >> 8) & 0xff,
+ rps->min_freq,
+ rps->max_freq);
+ }
+
+ if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ * the natural hardware unit for SKL
+ */
+ rps->rp0_freq *= GEN9_FREQ_SCALER;
+ rps->rp1_freq *= GEN9_FREQ_SCALER;
+ rps->min_freq *= GEN9_FREQ_SCALER;
+ rps->max_freq *= GEN9_FREQ_SCALER;
+ rps->efficient_freq *= GEN9_FREQ_SCALER;
+ }
+}
+
+static bool rps_reset(struct intel_rps *rps)
+{
+ /* force a reset */
+ rps->power.mode = -1;
+ rps->last_freq = -1;
+
+ if (rps_set(rps, rps->min_freq)) {
+ DRM_ERROR("Failed to reset RPS to initial values\n");
+ return false;
+ }
+
+ rps->cur_freq = rps->min_freq;
+ return true;
+}
+
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
+static bool gen9_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Program defaults and thresholds for RPS */
+ if (IS_GEN(i915, 9))
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(rps->rp1_freq));
+
+ /* 1 second timeout */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(i915, 1000000));
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+
+ return rps_reset(rps);
+}
+
+static bool gen8_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(rps->rp1_freq));
+
+ /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ 100000000 / 128); /* 1 second timeout */
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static bool gen6_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Power down if completely idle for over 50ms */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static int chv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
+ break;
+ case 12:
+ /* (2 * 6) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
+ break;
+ }
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static int chv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
+
+ return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+}
+
+static int chv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static u32 chv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static bool chv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ /* 1: Program defaults and thresholds for RPS*/
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* 2: Enable RPS */
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ /* Setting Fixed Bias */
+ vlv_punit_get(i915);
+
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static int vlv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp1;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
+ rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int vlv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp0;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+ /* Clamp to max */
+ rp0 = min_t(u32, rp0, 0xea);
+
+ return rp0;
+}
+
+static int vlv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rpe;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+ return rpe;
+}
+
+static int vlv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ /*
+ * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
+ * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
+ * a BYT-M B0 the above register contains 0xbf. Moreover when setting
+ * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
+ * to make sure it matches what Punit accepts.
+ */
+ return max_t(u32, val, 0xc0);
+}
+
+static bool vlv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ vlv_punit_get(i915);
+
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static unsigned long __ips_gfx_val(struct intel_ips *ips)
+{
+ struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
+
+ state1 = ext_v;
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ t = ips_mch_val(uncore);
+ if (t > 80)
+ corr = t * 2349 + 135940;
+ else if (t >= 50)
+ corr = t * 964 + 29317;
+ else /* < 50 */
+ corr = t * 301 + 1004;
+
+ corr = corr * 150142 * state1 / 10000 - 78642;
+ corr /= 100000;
+ corr2 = corr * ips->corr;
+
+ state2 = corr2 * state1 / 10000;
+ state2 /= 100; /* convert to mW */
+
+ __gen5_ips_update(ips);
+
+ return ips->gfx_power + state2;
+}
+
+void intel_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (IS_CHERRYVIEW(i915))
+ rps->enabled = chv_rps_enable(rps);
+ else if (IS_VALLEYVIEW(i915))
+ rps->enabled = vlv_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 9)
+ rps->enabled = gen9_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 8)
+ rps->enabled = gen8_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ rps->enabled = gen6_rps_enable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ rps->enabled = gen5_rps_enable(rps);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ if (!rps->enabled)
+ return;
+
+ WARN_ON(rps->max_freq < rps->min_freq);
+ WARN_ON(rps->idle_freq > rps->max_freq);
+
+ WARN_ON(rps->efficient_freq < rps->min_freq);
+ WARN_ON(rps->efficient_freq > rps->max_freq);
+}
+
+static void gen6_rps_disable(struct intel_rps *rps)
+{
+ intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+}
+
+void intel_rps_disable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->enabled = false;
+
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_disable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_disable(rps);
+}
+
+static int byt_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val - 0xb7
+ * Slow = Fast = GPLL ref * N
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
+}
+
+static int byt_freq_opcode(struct intel_rps *rps, int val)
+{
+ return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
+}
+
+static int chv_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val / 2
+ * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
+}
+
+static int chv_freq_opcode(struct intel_rps *rps, int val)
+{
+ /* CHV needs even values */
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
+}
+
+int intel_gpu_freq(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+ GEN9_FREQ_SCALER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_gpu_freq(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_gpu_freq(rps, val);
+ else
+ return val * GT_FREQUENCY_MULTIPLIER;
+}
+
+int intel_freq_opcode(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+ GT_FREQUENCY_MULTIPLIER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_freq_opcode(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_freq_opcode(rps, val);
+ else
+ return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
+}
+
+static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->gpll_ref_freq =
+ vlv_get_cck_clock(i915, "GPLL ref",
+ CCK_GPLL_CLOCK_CONTROL,
+ i915->czclk_freq);
+
+ DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+}
+
+static void vlv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = vlv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = vlv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = vlv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = vlv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+}
+
+static void chv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = chv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = chv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = chv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = chv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+ rps->min_freq) & 1,
+ "Odd GPU freq values\n");
+}
+
+static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
+{
+ ei->ktime = ktime_get_raw();
+ ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
+ ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
+}
+
+static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ const struct intel_rps_ei *prev = &rps->ei;
+ struct intel_rps_ei now;
+ u32 events = 0;
+
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+ return 0;
+
+ vlv_c0_read(uncore, &now);
+
+ if (prev->ktime) {
+ u64 time, c0;
+ u32 render, media;
+
+ time = ktime_us_delta(now.ktime, prev->ktime);
+
+ time *= rps_to_i915(rps)->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ render = now.render_c0 - prev->render_c0;
+ media = now.media_c0 - prev->media_c0;
+ c0 = max(render, media);
+ c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+ if (c0 > time * rps->power.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * rps->power.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
+ }
+
+ rps->ei = now;
+ return events;
+}
+
+static void rps_work(struct work_struct *work)
+{
+ struct intel_rps *rps = container_of(work, typeof(*rps), work);
+ struct intel_gt *gt = rps_to_gt(rps);
+ bool client_boost = false;
+ int new_freq, adj, min, max;
+ u32 pm_iir = 0;
+
+ spin_lock_irq(&gt->irq_lock);
+ pm_iir = fetch_and_zero(&rps->pm_iir);
+ client_boost = atomic_read(&rps->num_waiters);
+ spin_unlock_irq(&gt->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ goto out;
+
+ mutex_lock(&rps->lock);
+
+ pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
+
+ adj = rps->last_adj;
+ new_freq = rps->cur_freq;
+ min = rps->min_freq_softlimit;
+ max = rps->max_freq_softlimit;
+ if (client_boost)
+ max = rps->max_freq;
+ if (client_boost && new_freq < rps->boost_freq) {
+ new_freq = rps->boost_freq;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
+
+ if (new_freq >= rps->max_freq_softlimit)
+ adj = 0;
+ } else if (client_boost) {
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (rps->cur_freq > rps->efficient_freq)
+ new_freq = rps->efficient_freq;
+ else if (rps->cur_freq > rps->min_freq_softlimit)
+ new_freq = rps->min_freq_softlimit;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
+
+ if (new_freq <= rps->min_freq_softlimit)
+ adj = 0;
+ } else { /* unknown event */
+ adj = 0;
+ }
+
+ rps->last_adj = adj;
+
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ new_freq += adj;
+ new_freq = clamp_t(int, new_freq, min, max);
+
+ if (intel_rps_set(rps, new_freq)) {
+ DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+ rps->last_adj = 0;
+ }
+
+ mutex_unlock(&rps->lock);
+
+out:
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_unmask_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ const u32 events = rps->pm_events & pm_iir;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_gt_pm_mask_irq(gt, events);
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ if (pm_iir & rps->pm_events) {
+ spin_lock(&gt->irq_lock);
+ gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
+ rps->pm_iir |= pm_iir & rps->pm_events;
+ schedule_work(&rps->work);
+ spin_unlock(&gt->irq_lock);
+ }
+
+ if (INTEL_GEN(gt->i915) >= 8)
+ return;
+
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine[VECS0]);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
+}
+
+void gen5_rps_irq_handler(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_freq;
+
+ spin_lock(&mchdev_lock);
+
+ intel_uncore_write16(uncore,
+ MEMINTRSTS,
+ intel_uncore_read(uncore, MEMINTRSTS));
+
+ intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+ busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+ max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+ min_avg = intel_uncore_read(uncore, RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ new_freq = rps->cur_freq;
+ if (busy_up > max_avg)
+ new_freq++;
+ else if (busy_down < min_avg)
+ new_freq--;
+ new_freq = clamp(new_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
+
+ if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ rps->cur_freq = new_freq;
+
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_rps_init_early(struct intel_rps *rps)
+{
+ mutex_init(&rps->lock);
+ mutex_init(&rps->power.mutex);
+
+ INIT_WORK(&rps->work, rps_work);
+
+ atomic_set(&rps->num_waiters, 0);
+}
+
+void intel_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rps_init(rps);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rps_init(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rps_init(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_init(rps);
+
+ /* Derive initial user preferences/limits from the hardware limits */
+ rps->max_freq_softlimit = rps->max_freq;
+ rps->min_freq_softlimit = rps->min_freq;
+
+ /* After setting max-softlimit, find the overclock max freq */
+ if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+ u32 params = 0;
+
+ sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
+ &params, NULL);
+ if (params & BIT(31)) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
+ rps->max_freq = params & 0xff;
+ }
+ }
+
+ /* Finally allow us to boost to max by default */
+ rps->boost_freq = rps->max_freq;
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+
+ rps->pm_intrmsk_mbz = 0;
+
+ /*
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_GEN(i915) <= 7)
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_GEN(i915) >= 8)
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+}
+
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 cagf;
+
+ if (INTEL_GEN(i915) >= 9)
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+ return cagf;
+}
+
+/* External interface for intel_ips.ko */
+
+static struct drm_i915_private __rcu *ips_mchdev;
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_rps_driver_register(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ /*
+ * We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values.
+ */
+ if (IS_GEN(gt->i915, 5)) {
+ rcu_assign_pointer(ips_mchdev, gt->i915);
+ ips_ping_for_i915_load();
+ }
+}
+
+void intel_rps_driver_unregister(struct intel_rps *rps)
+{
+ rcu_assign_pointer(ips_mchdev, NULL);
+}
+
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = rcu_dereference(ips_mchdev);
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ struct intel_ips *ips = &i915->gt.rps.ips;
+
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __ips_chipset_val(ips);
+ graphics_val = __ips_gfx_val(ips);
+ spin_unlock_irq(&mchdev_lock);
+ }
+
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit < rps->max_freq)
+ rps->max_freq_softlimit++;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit > rps->min_freq)
+ rps->max_freq_softlimit--;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *i915;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ rps->max_freq_softlimit = rps->min_freq;
+ ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
new file mode 100644
index 000000000000..9518c66c9792
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -0,0 +1,38 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_H
+#define INTEL_RPS_H
+
+#include "intel_rps_types.h"
+
+struct i915_request;
+
+void intel_rps_init_early(struct intel_rps *rps);
+void intel_rps_init(struct intel_rps *rps);
+
+void intel_rps_driver_register(struct intel_rps *rps);
+void intel_rps_driver_unregister(struct intel_rps *rps);
+
+void intel_rps_enable(struct intel_rps *rps);
+void intel_rps_disable(struct intel_rps *rps);
+
+void intel_rps_park(struct intel_rps *rps);
+void intel_rps_unpark(struct intel_rps *rps);
+void intel_rps_boost(struct i915_request *rq);
+
+int intel_rps_set(struct intel_rps *rps, u8 val);
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
+
+int intel_gpu_freq(struct intel_rps *rps, int val);
+int intel_freq_opcode(struct intel_rps *rps, int val);
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
+
+void gen5_rps_irq_handler(struct intel_rps *rps);
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+
+#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
new file mode 100644
index 000000000000..c2e279154bd5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -0,0 +1,93 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_TYPES_H
+#define INTEL_RPS_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_ips {
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ u64 last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c, m;
+};
+
+struct intel_rps_ei {
+ ktime_t ktime;
+ u32 render_c0;
+ u32 media_c0;
+};
+
+struct intel_rps {
+ struct mutex lock; /* protects enabling and the worker */
+
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
+ struct work_struct work;
+ bool enabled;
+ bool active;
+ u32 pm_iir;
+
+ /* PM interrupt bits that should never be masked */
+ u32 pm_intrmsk_mbz;
+ u32 pm_events;
+
+ /* Frequencies are stored in potentially platform dependent multiples.
+ * In other words, *_freq needs to be multiplied by X to be interesting.
+ * Soft limits are those which are used for the dynamic reclocking done
+ * by the driver (raise frequencies under heavy loads, and lower for
+ * lighter loads). Hard limits are those imposed by the hardware.
+ *
+ * A distinction is made for overclocking, which is never enabled by
+ * default, and is considered to be above the hard limit if it's
+ * possible at all.
+ */
+ u8 cur_freq; /* Current frequency (cached, may not == HW) */
+ u8 last_freq; /* Last SWREQ frequency */
+ u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+ u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
+ u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 boost_freq; /* Frequency to request when wait boosting */
+ u8 idle_freq; /* Frequency to request when we are idle */
+ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
+ u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp0_freq; /* Non-overclocked max frequency. */
+ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
+
+ int last_adj;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
+
+ atomic_t num_waiters;
+ atomic_t boosts;
+
+ /* manual wa residency calculations */
+ struct intel_rps_ei ei;
+ struct intel_ips ips;
+};
+
+#endif /* INTEL_RPS_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 6bf2d87da109..74f793423231 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -8,6 +8,19 @@
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice)
+{
+ sseu->max_slices = max_slices;
+ sseu->max_subslices = max_subslices;
+ sseu->max_eus_per_subslice = max_eus_per_subslice;
+
+ sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE);
+ sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE);
+}
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
@@ -19,10 +32,32 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
return total;
}
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
+{
+ int i, offset = slice * sseu->ss_stride;
+ u32 mask = 0;
+
+ GEM_BUG_ON(slice >= sseu->max_slices);
+
+ for (i = 0; i < sseu->ss_stride; i++)
+ mask |= (u32)sseu->subslice_mask[offset + i] <<
+ i * BITS_PER_BYTE;
+
+ return mask;
+}
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride);
+}
+
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
{
- return hweight8(sseu->subslice_mask[slice]);
+ return hweight32(intel_sseu_get_subslices(sseu, slice));
}
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index b50d0401a4e2..d1d225204f09 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -10,15 +10,21 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include "i915_gem.h"
+
struct drm_i915_private;
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
+#define GEN_MAX_EUS (16) /* TGL upper bound */
+#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS)
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES];
+ u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -33,11 +39,8 @@ struct sseu_dev_info {
u8 max_subslices;
u8 max_eus_per_subslice;
- /* We don't have more than 8 eus per subslice at the moment and as we
- * store eus enabled using bits, no need to multiply by eus per
- * subslice.
- */
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
+ u8 ss_stride;
+ u8 eu_stride;
};
/*
@@ -63,12 +66,34 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
return value;
}
+static inline bool
+intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ u8 mask;
+ int ss_idx = subslice / BITS_PER_BYTE;
+
+ GEM_BUG_ON(ss_idx >= sseu->ss_stride);
+
+ mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
+
+ return mask & BIT(subslice % BITS_PER_BYTE);
+}
+
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice);
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask);
+
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 9cb01d9828f1..14ad10acd548 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -4,13 +4,13 @@
* Copyright © 2016-2018 Intel Corporation
*/
-#include "gt/intel_gt_types.h"
-
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_syncmap.h"
-#include "gt/intel_timeline.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
@@ -136,6 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
kfree(cl);
}
+__i915_active_call
static void __cacheline_retire(struct i915_active *active)
{
struct intel_timeline_cacheline *cl =
@@ -177,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
- i915_active_init(hwsp->gt->i915, &cl->active,
- __cacheline_active, __cacheline_retire);
+ i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
return cl;
}
@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -442,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
- err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
+ err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
if (err)
goto err_cacheline;
@@ -493,24 +493,39 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq)
{
- return i915_active_ref(&cl->active, rq->timeline, rq);
+ return i915_active_add_request(&cl->active, rq);
}
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
- struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
- struct intel_timeline *tl = from->timeline;
+ struct intel_timeline *tl;
int err;
- GEM_BUG_ON(to->timeline == tl);
+ rcu_read_lock();
+ tl = rcu_dereference(from->timeline);
+ if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+ if (!tl) /* already completed */
+ return 1;
+
+ GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl);
+
+ err = -EBUSY;
+ if (mutex_trylock(&tl->mutex)) {
+ struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
+
+ if (i915_request_completed(from)) {
+ err = 1;
+ goto unlock;
+ }
- mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
- err = i915_request_completed(from);
- if (!err)
err = cacheline_ref(cl, to);
- if (!err) {
+ if (err)
+ goto unlock;
+
if (likely(cl == tl->hwsp_cacheline)) {
*hwsp = tl->hwsp_offset;
} else { /* across a seqno wrap, recover the original offset */
@@ -518,8 +533,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
CACHELINE_BYTES;
}
+
+unlock:
+ mutex_unlock(&tl->mutex);
}
- mutex_unlock(&tl->mutex);
+ intel_timeline_put(tl);
return err;
}
@@ -541,7 +559,7 @@ void __intel_timeline_free(struct kref *kref)
container_of(kref, typeof(*timeline), kref);
intel_timeline_fini(timeline);
- kfree(timeline);
+ kfree_rcu(timeline, rcu);
}
static void timelines_fini(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 2b1baf2fcc8e..98d9ee166379 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -58,12 +58,13 @@ struct intel_timeline {
*/
struct list_head requests;
- /* Contains an RCU guarded pointer to the last request. No reference is
+ /*
+ * Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
- * the request using i915_active_request_get_request_rcu(), or hold the
- * struct_mutex.
+ * the request using i915_active_fence_get(), or manage the RCU
+ * protection themselves (cf the i915_active_fence API).
*/
- struct i915_active_request last_request;
+ struct i915_active_fence last_request;
/**
* We track the most recent seqno that we wait on in every context so
@@ -80,6 +81,7 @@ struct intel_timeline {
struct intel_gt *gt;
struct kref kref;
+ struct rcu_head rcu;
};
#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5f6ec2fd29a0..e4bccc14602f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/**
@@ -567,6 +568,9 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ /* Wa_1409142259:tgl */
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
}
static void
@@ -796,11 +800,10 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
slice = fls(sseu->slice_mask) - 1;
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = fls(l3_en & sseu->subslice_mask[slice]);
+ subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
if (!subslice) {
DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
- sseu->subslice_mask[slice], l3_en);
+ intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
WARN_ON(!subslice);
}
@@ -890,11 +893,27 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+
+ /* Wa_1607087056:icl */
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
+ /* Wa_1409420604:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
+
+ /* Wa_1409180338:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
static void
@@ -1197,6 +1216,26 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+ break;
+ default:
+ break;
+ }
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1258,6 +1297,26 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ /* Wa_1606700617:tgl */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1607138336:tgl */
+ wa_write_or(wal,
+ GEN9_CTX_PREEMPT_REG,
+ GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
+
+ /* Wa_1607030317:tgl */
+ /* Wa_1607186500:tgl */
+ /* Wa_1607297627:tgl */
+ wa_masked_en(wal,
+ GEN6_RC_SLEEP_PSMI_CONTROL,
+ GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+ }
+
if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -1452,7 +1511,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
- if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff))
+ if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
return true;
return false;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5d43cbc3f345..83f549d203a0 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -23,6 +23,7 @@
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "intel_context.h"
@@ -240,6 +241,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
struct mock_engine *engine;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
+ GEM_BUG_ON(!i915->gt.uncore);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
@@ -248,9 +250,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
engine->base.gt = &i915->gt;
+ engine->base.uncore = i915->gt.uncore;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
+ engine->base.legacy_idx = INVALID_ENGINE;
engine->base.instance = id;
engine->base.status_page.addr = (void *)(engine + 1);
@@ -265,6 +269,9 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.reset.finish = mock_reset_finish;
engine->base.cancel_requests = mock_cancel_requests;
+ i915->gt.engine[id] = &engine->base;
+ i915->gt.engine_class[0][id] = &engine->base;
+
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 9d1ea26c7a2d..bc720defc6b8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -14,22 +14,28 @@
static int request_sync(struct i915_request *rq)
{
+ struct intel_timeline *tl = i915_request_timeline(rq);
long timeout;
int err = 0;
+ intel_timeline_get(tl);
i915_request_get(rq);
- i915_request_add(rq);
+ /* Opencode i915_request_add() so we can keep the timeline locked. */
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, NULL);
+
timeout = i915_request_wait(rq, 0, HZ / 10);
- if (timeout < 0) {
+ if (timeout < 0)
err = timeout;
- } else {
- mutex_lock(&rq->timeline->mutex);
+ else
i915_request_retire_upto(rq);
- mutex_unlock(&rq->timeline->mutex);
- }
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+ mutex_unlock(&tl->mutex);
i915_request_put(rq);
+ intel_timeline_put(tl);
return err;
}
@@ -41,24 +47,20 @@ static int context_sync(struct intel_context *ce)
mutex_lock(&tl->mutex);
do {
- struct i915_request *rq;
+ struct dma_fence *fence;
long timeout;
- rcu_read_lock();
- rq = rcu_dereference(tl->last_request.request);
- if (rq)
- rq = i915_request_get_rcu(rq);
- rcu_read_unlock();
- if (!rq)
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
break;
- timeout = i915_request_wait(rq, 0, HZ / 10);
+ timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
if (timeout < 0)
err = timeout;
else
- i915_request_retire_upto(rq);
+ i915_request_retire_upto(to_request(fence));
- i915_request_put(rq);
+ dma_fence_put(fence);
} while (!err);
mutex_unlock(&tl->mutex);
@@ -101,9 +103,6 @@ static int __live_context_size(struct intel_engine_cs *engine,
*
* TLDR; this overlaps with the execlists redzone.
*/
- if (HAS_EXECLISTS(engine->i915))
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
-
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
@@ -153,15 +152,11 @@ static int live_context_size(void *arg)
* HW tries to write past the end of one.
*/
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = kernel_context(gt->i915);
- if (IS_ERR(fixme)) {
- err = PTR_ERR(fixme);
- goto unlock;
- }
+ if (IS_ERR(fixme))
+ return PTR_ERR(fixme);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct {
struct drm_i915_gem_object *state;
void *pinned;
@@ -199,8 +194,6 @@ static int live_context_size(void *arg)
}
kernel_context_close(fixme);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -303,26 +296,23 @@ static int live_active_context(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) {
err = PTR_ERR(fixme);
- goto unlock;
+ goto out_file;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = __live_active_context(engine, fixme);
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
+out_file:
mock_file_free(gt->i915, file);
return err;
}
@@ -416,26 +406,23 @@ static int live_remote_context(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) {
err = PTR_ERR(fixme);
- goto unlock;
+ goto out_file;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = __live_remote_context(engine, fixme);
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
+out_file:
mock_file_free(gt->i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
new file mode 100644
index 000000000000..e864406bd2d9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -0,0 +1,350 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "i915_drv.h"
+
+#include "intel_gt_requests.h"
+#include "i915_selftest.h"
+
+struct pulse {
+ struct i915_active active;
+ struct kref kref;
+};
+
+static int pulse_active(struct i915_active *active)
+{
+ kref_get(&container_of(active, struct pulse, active)->kref);
+ return 0;
+}
+
+static void pulse_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct pulse, kref));
+}
+
+static void pulse_put(struct pulse *p)
+{
+ kref_put(&p->kref, pulse_free);
+}
+
+static void pulse_retire(struct i915_active *active)
+{
+ pulse_put(container_of(active, struct pulse, active));
+}
+
+static struct pulse *pulse_create(void)
+{
+ struct pulse *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return p;
+
+ kref_init(&p->kref);
+ i915_active_init(&p->active, pulse_active, pulse_retire);
+
+ return p;
+}
+
+static void pulse_unlock_wait(struct pulse *p)
+{
+ mutex_lock(&p->active.mutex);
+ mutex_unlock(&p->active.mutex);
+ flush_work(&p->active.work);
+}
+
+static int __live_idle_pulse(struct intel_engine_cs *engine,
+ int (*fn)(struct intel_engine_cs *cs))
+{
+ struct pulse *p;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ p = pulse_create();
+ if (!p)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&p->active);
+ if (err)
+ goto out;
+
+ err = i915_active_acquire_preallocate_barrier(&p->active, engine);
+ if (err) {
+ i915_active_release(&p->active);
+ goto out;
+ }
+
+ i915_active_acquire_barrier(&p->active);
+ i915_active_release(&p->active);
+
+ GEM_BUG_ON(i915_active_is_idle(&p->active));
+ GEM_BUG_ON(llist_empty(&engine->barrier_tasks));
+
+ err = fn(engine);
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+
+ if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) {
+ err = -ETIME;
+ goto out;
+ }
+
+ GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial);
+
+ pulse_unlock_wait(p); /* synchronize with the retirement callback */
+
+ if (!i915_active_is_idle(&p->active)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: heartbeat pulse did not flush idle tasks\n",
+ engine->name);
+ i915_active_print(&p->active, &m);
+
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ pulse_put(p);
+ return err;
+}
+
+static int live_idle_flush(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_flush_barriers);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int live_idle_pulse(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that heartbeat pulses flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_pulse);
+ intel_engine_pm_put(engine);
+ if (err && err != -ENODEV)
+ break;
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+ const u32 *a = _a, *b = _b;
+
+ return *a - *b;
+}
+
+static int __live_heartbeat_fast(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u32 times[5];
+ int err;
+ int i;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ intel_engine_pm_get(engine);
+
+ err = intel_engine_set_heartbeat(engine, 1);
+ if (err)
+ goto err_pm;
+
+ for (i = 0; i < ARRAY_SIZE(times); i++) {
+ /* Manufacture a tick */
+ do {
+ while (READ_ONCE(engine->heartbeat.systole))
+ flush_delayed_work(&engine->heartbeat.work);
+
+ engine->serial++; /* quick, pretend we are not idle! */
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat did not start\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ rq = i915_request_get_rcu(rq);
+ rcu_read_unlock();
+ } while (!rq);
+
+ t0 = ktime_get();
+ while (rq == READ_ONCE(engine->heartbeat.systole))
+ yield(); /* work is on the local cpu! */
+ t1 = ktime_get();
+
+ i915_request_put(rq);
+ times[i] = ktime_us_delta(t1, t0);
+ }
+
+ sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL);
+
+ pr_info("%s: Heartbeat delay: %uus [%u, %u]\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ times[0],
+ times[ARRAY_SIZE(times) - 1]);
+
+ /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
+ if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ jiffies_to_usecs(6));
+ err = -EINVAL;
+ }
+
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_heartbeat_fast(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the heartbeat ticks at the desired rate. */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_heartbeat_fast(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __live_heartbeat_off(struct intel_engine_cs *engine)
+{
+ int err;
+
+ intel_engine_pm_get(engine);
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat not running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ err = intel_engine_set_heartbeat(engine, 0);
+ if (err)
+ goto err_pm;
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat still running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+ if (READ_ONCE(engine->heartbeat.systole)) {
+ pr_err("%s: heartbeat still allocated\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+err_beat:
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+static int live_heartbeat_off(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can turn off heartbeat and not interrupt VIP */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = __live_heartbeat_off(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_idle_flush),
+ SUBTEST(live_idle_pulse),
+ SUBTEST(live_heartbeat_fast),
+ SUBTEST(live_heartbeat_off),
+ };
+ int saved_hangcheck;
+ int err;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ saved_hangcheck = i915_modparams.enable_hangcheck;
+ i915_modparams.enable_hangcheck = INT_MAX;
+
+ err = intel_gt_live_subtests(tests, &i915->gt);
+
+ i915_modparams.enable_hangcheck = saved_hangcheck;
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 3a1419376912..20b9c83f43ad 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -25,7 +25,7 @@ static int live_engine_pm(void *arg)
}
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
const typeof(*igt_atomic_phases) *p;
for (p = igt_atomic_phases; p->name; p++) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
new file mode 100644
index 000000000000..d1752f15702a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -0,0 +1,60 @@
+
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "selftest_llc.h"
+
+static int live_gt_resume(void *arg)
+{
+ struct intel_gt *gt = arg;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ /* Do several suspend/resume cycles to check we don't explode! */
+ do {
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ if (gt->rc6.enabled) {
+ pr_err("rc6 still enabled after suspend!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = intel_gt_resume(gt);
+ if (err)
+ break;
+
+ if (gt->rc6.supported && !gt->rc6.enabled) {
+ pr_err("rc6 not enabled upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = st_llc_verify(&gt->llc);
+ if (err) {
+ pr_err("llc state not restored upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_gt_resume),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index a0098fc35921..85e9ccf5c304 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -131,7 +131,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct intel_gt *gt = h->gt;
- struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
@@ -141,12 +141,15 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ i915_vm_put(vm);
return ERR_CAST(obj);
+ }
vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
+ i915_vm_put(vm);
return ERR_CAST(vaddr);
}
@@ -157,16 +160,22 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_vm_put(vm);
return ERR_CAST(vma);
+ }
hws = i915_vma_instance(h->hws, vm, NULL);
- if (IS_ERR(hws))
+ if (IS_ERR(hws)) {
+ i915_vm_put(vm);
return ERR_CAST(hws);
+ }
err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return ERR_PTR(err);
+ }
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
@@ -264,6 +273,7 @@ unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
+ i915_vm_put(vm);
return err ? ERR_PTR(err) : rq;
}
@@ -285,7 +295,7 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
- igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->gt->i915);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -309,12 +319,11 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_wedge_me w;
long timeout;
@@ -355,8 +364,6 @@ static int igt_hang_sanitycheck(void *arg)
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -383,9 +390,7 @@ static int igt_reset_nop(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
@@ -395,9 +400,7 @@ static int igt_reset_nop(void *arg)
reset_count = i915_reset_count(global);
count = 0;
do {
- mutex_lock(&gt->i915->drm.struct_mutex);
-
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
int i;
for (i = 0; i < 16; i++) {
@@ -417,7 +420,6 @@ static int igt_reset_nop(void *arg)
intel_gt_reset(gt, ALL_ENGINES, NULL);
igt_global_reset_unlock(gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
@@ -429,16 +431,13 @@ static int igt_reset_nop(void *arg)
break;
}
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@@ -458,23 +457,21 @@ static int igt_reset_nop_engine(void *arg)
/* Check that we can engine-reset during non-user portions */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
i915_gem_context_clear_bannable(ctx);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
IGT_TIMEOUT(end_time);
@@ -494,7 +491,6 @@ static int igt_reset_nop_engine(void *arg)
break;
}
- mutex_lock(&gt->i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
@@ -507,7 +503,6 @@ static int igt_reset_nop_engine(void *arg)
i915_request_add(rq);
}
err = intel_engine_reset(engine, NULL);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
@@ -533,15 +528,12 @@ static int igt_reset_nop_engine(void *arg)
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@@ -559,18 +551,16 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
/* Check that we can issue an engine reset on an idle engine (no-op) */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (active) {
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
IGT_TIMEOUT(end_time);
@@ -593,17 +583,14 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (active) {
struct i915_request *rq;
- mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -647,7 +634,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -655,11 +642,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (active) {
- mutex_lock(&gt->i915->drm.struct_mutex);
+ if (active)
hang_fini(&h);
- mutex_unlock(&gt->i915->drm.struct_mutex);
- }
return err;
}
@@ -725,9 +709,7 @@ static int active_engine(void *data)
return PTR_ERR(file);
for (count = 0; count < ARRAY_SIZE(ctx); count++) {
- mutex_lock(&engine->i915->drm.struct_mutex);
ctx[count] = live_context(engine->i915, file);
- mutex_unlock(&engine->i915->drm.struct_mutex);
if (IS_ERR(ctx[count])) {
err = PTR_ERR(ctx[count]);
while (--count)
@@ -741,10 +723,8 @@ static int active_engine(void *data)
struct i915_request *old = rq[idx];
struct i915_request *new;
- mutex_lock(&engine->i915->drm.struct_mutex);
new = igt_request_alloc(ctx[idx], engine);
if (IS_ERR(new)) {
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
@@ -755,7 +735,6 @@ static int active_engine(void *data)
rq[idx] = i915_request_get(new);
i915_request_add(new);
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = active_request_put(old);
if (err)
@@ -791,13 +770,11 @@ static int __igt_reset_engines(struct intel_gt *gt,
* with any other engine.
*/
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
@@ -805,7 +782,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
h.ctx->sched.priority = 1024;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
@@ -823,7 +800,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
memset(threads, 0, sizeof(threads));
- for_each_engine(other, gt->i915, tmp) {
+ for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
threads[tmp].resets =
@@ -849,23 +826,22 @@ static int __igt_reset_engines(struct intel_gt *gt,
get_task_struct(tsk);
}
+ yield(); /* start all threads before we begin */
+
intel_engine_pm_get(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -940,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
unwind:
- for_each_engine(other, gt->i915, tmp) {
+ for_each_engine(other, gt, tmp) {
int ret;
if (!threads[tmp].task)
@@ -977,9 +953,7 @@ unwind:
if (err)
break;
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -987,11 +961,8 @@ unwind:
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
+ if (flags & TEST_ACTIVE)
hang_fini(&h);
- mutex_unlock(&gt->i915->drm.struct_mutex);
- }
return err;
}
@@ -1047,7 +1018,7 @@ static int igt_reset_wait(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
@@ -1061,7 +1032,6 @@ static int igt_reset_wait(void *arg)
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
@@ -1109,7 +1079,6 @@ out_rq:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -1127,15 +1096,14 @@ static int evict_vma(void *data)
{
struct evict_vma *arg = data;
struct i915_address_space *vm = arg->vma->vm;
- struct drm_i915_private *i915 = vm->i915;
struct drm_mm_node evict = arg->vma->node;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&vm->mutex);
err = i915_gem_evict_for_node(vm, &evict, 0);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&vm->mutex);
return err;
}
@@ -1143,39 +1111,33 @@ static int evict_vma(void *data)
static int evict_fence(void *data)
{
struct evict_vma *arg = data;
- struct drm_i915_private *i915 = arg->vma->vm->i915;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
-
/* Mark the fence register as dirty to force the mmio update. */
err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
if (err) {
pr_err("Invalid Y-tiling settings; err:%d\n", err);
- goto out_unlock;
+ return err;
}
err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
if (err) {
pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
- goto out_unlock;
+ return err;
}
err = i915_vma_pin_fence(arg->vma);
i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
- goto out_unlock;
+ return err;
}
i915_vma_unpin_fence(arg->vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return 0;
}
static int __igt_reset_evict_vma(struct intel_gt *gt,
@@ -1183,23 +1145,26 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
int (*fn)(void *),
unsigned int flags)
{
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
+ unsigned int pin_flags;
int err;
+ if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+ return 0;
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
@@ -1227,10 +1192,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
goto out_obj;
}
- err = i915_vma_pin(arg.vma, 0, 0,
- i915_vma_is_ggtt(arg.vma) ?
- PIN_GLOBAL | PIN_MAPPABLE :
- PIN_USER);
+ pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ pin_flags |= PIN_MAPPABLE;
+
+ err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
goto out_obj;
@@ -1262,8 +1229,6 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
if (err)
goto out_rq;
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -1312,16 +1277,12 @@ out_reset:
put_task_struct(tsk);
}
- mutex_lock(&gt->i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
i915_gem_object_put(obj);
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
if (intel_gt_is_wedged(gt))
return -EIO;
@@ -1340,6 +1301,7 @@ static int igt_reset_evict_ppgtt(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
+ struct i915_address_space *vm;
struct drm_file *file;
int err;
@@ -1347,18 +1309,20 @@ static int igt_reset_evict_ppgtt(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
err = 0;
- if (ctx->vm) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(gt, ctx->vm,
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (!i915_is_ggtt(vm)) {
+ /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(gt, vm,
evict_vma, EXEC_OBJECT_WRITE);
+ }
+ i915_vm_put(vm);
out:
mock_file_free(gt->i915, file);
@@ -1379,7 +1343,7 @@ static int wait_for_others(struct intel_gt *gt,
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (engine == exclude)
continue;
@@ -1403,12 +1367,11 @@ static int igt_reset_queue(void *arg)
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
@@ -1518,7 +1481,7 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -1526,7 +1489,6 @@ static int igt_reset_queue(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -1539,7 +1501,7 @@ static int igt_handle_error(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1547,17 +1509,15 @@ static int igt_handle_error(void *arg)
/* Check that we can issue a global GPU and engine reset */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
- mutex_lock(&gt->i915->drm.struct_mutex);
-
err = hang_init(&h, gt);
if (err)
- goto err_unlock;
+ return err;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
@@ -1581,8 +1541,6 @@ static int igt_handle_error(void *arg)
goto err_request;
}
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
/* Temporarily disable error capture */
error = xchg(&global->first_error, (void *)-1);
@@ -1590,8 +1548,6 @@ static int igt_handle_error(void *arg)
xchg(&global->first_error, error);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
err = -EINVAL;
@@ -1602,8 +1558,6 @@ err_request:
i915_request_put(rq);
err_fini:
hang_fini(&h);
-err_unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -1617,7 +1571,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable_nosync(t);
+ tasklet_disable(t);
p->critical_section_begin();
err = intel_engine_reset(engine, NULL);
@@ -1689,14 +1643,13 @@ static int igt_reset_engines_atomic(void *arg)
/* Check that the engines resets are usable from atomic context */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (USES_GUC_SUBMISSION(gt->i915))
return 0;
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
@@ -1706,7 +1659,7 @@ static int igt_reset_engines_atomic(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
@@ -1716,9 +1669,7 @@ static int igt_reset_engines_atomic(void *arg)
out:
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
-
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
return err;
@@ -1743,27 +1694,19 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
};
struct intel_gt *gt = &i915->gt;
intel_wakeref_t wakeref;
- bool saved_hangcheck;
int err;
- if (!intel_has_gpu_reset(gt->i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
- drain_delayed_work(&gt->hangcheck.work); /* flush param */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = intel_gt_live_subtests(tests, gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
- igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
- i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
new file mode 100644
index 000000000000..fd3770e48ac7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_pm.h" /* intel_gpu_freq() */
+#include "selftest_llc.h"
+#include "intel_rps.h"
+
+static int gen6_verify_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ intel_wakeref_t wakeref;
+ unsigned int gpu_freq;
+ int err = 0;
+
+ wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
+
+ if (!get_ia_constants(llc, &consts)) {
+ err = -ENODEV;
+ goto out_rpm;
+ }
+
+ for (gpu_freq = consts.min_gpu_freq;
+ gpu_freq <= consts.max_gpu_freq;
+ gpu_freq++) {
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ unsigned int ia_freq, ring_freq, found;
+ u32 val;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+
+ val = gpu_freq;
+ if (sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &val, NULL)) {
+ pr_err("Failed to read freq table[%d], range [%d, %d]\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
+ err = -ENXIO;
+ break;
+ }
+
+ found = (val >> 0) & 0xff;
+ if (found != ia_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ia_freq);
+ err = -EINVAL;
+ break;
+ }
+
+ found = (val >> 8) & 0xff;
+ if (found != ring_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ring_freq);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref);
+ return err;
+}
+
+int st_llc_verify(struct intel_llc *llc)
+{
+ int err = 0;
+
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ err = gen6_verify_ring_freq(llc);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h
new file mode 100644
index 000000000000..873f896e72f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_LLC_H
+#define SELFTEST_LLC_H
+
+struct intel_llc;
+
+int st_llc_verify(struct intel_llc *llc);
+
+#endif /* SELFTEST_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index d791158988d6..eb71ac2f992c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -19,26 +20,52 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
static int live_sanitycheck(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
struct intel_context *ce;
struct igt_spinner spin;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
goto err_spin;
@@ -55,13 +82,13 @@ static int live_sanitycheck(void *arg)
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx;
}
igt_spinner_end(&spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_ctx;
}
@@ -73,12 +100,175 @@ err_ctx:
kernel_context_close(ctx);
err_spin:
igt_spinner_fini(&spin);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ ctx = kernel_context(gt->i915);
+ if (!ctx)
+ goto err_spin;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(ctx, engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ __execlists_update_reg_state(ce[1], engine);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ tasklet_kill(&engine->execlists.tasklet); /* flush submission */
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ kernel_context_close(ctx);
+err_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -131,7 +321,13 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
if (IS_ERR(rq))
goto out_ctx;
- err = emit_semaphore_chain(rq, vma, idx);
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
i915_request_add(rq);
if (err)
rq = ERR_PTR(err);
@@ -144,10 +340,10 @@ out_ctx:
static int
release_queue(struct intel_engine_cs *engine,
struct i915_vma *vma,
- int idx)
+ int idx, int prio)
{
struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ .priority = prio,
};
struct i915_request *rq;
u32 *cs;
@@ -168,9 +364,15 @@ release_queue(struct intel_engine_cs *engine,
*cs++ = 1;
intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
i915_request_add(rq);
+ local_bh_disable();
engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
return 0;
}
@@ -189,8 +391,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
if (IS_ERR(head))
return PTR_ERR(head);
- i915_request_get(head);
- for_each_engine(engine, outer->i915, id) {
+ for_each_engine(engine, outer->gt, id) {
for (i = 0; i < count; i++) {
struct i915_request *rq;
@@ -199,15 +400,16 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
err = PTR_ERR(rq);
goto out;
}
+
+ i915_request_put(rq);
}
}
- err = release_queue(outer, vma, n);
+ err = release_queue(outer, vma, n, INT_MAX);
if (err)
goto out;
- if (i915_request_wait(head,
- I915_WAIT_LOCKED,
+ if (i915_request_wait(head, 0,
2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
count, n);
@@ -223,9 +425,8 @@ out:
static int live_timeslice_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
struct i915_vma *vma;
void *vaddr;
int err = 0;
@@ -239,17 +440,14 @@ static int live_timeslice_preempt(void *arg)
* need to preempt the current task and replace it with another
* ready task.
*/
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_unlock;
- }
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -269,7 +467,7 @@ static int live_timeslice_preempt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
@@ -279,7 +477,7 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_pin;
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_pin;
}
@@ -292,22 +490,168 @@ err_map:
i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+}
+
+static void wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (!i915_request_is_active(rq));
+}
+
+static long timeslice_threshold(const struct intel_engine_cs *engine)
+{
+ return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_pin;
+ }
+ engine->schedule(rq, &attr);
+ wait_for_submit(engine, rq);
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ i915_request_put(rq);
+ goto err_pin;
+ }
+ wait_for_submit(engine, nop);
+ i915_request_put(nop);
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err) {
+ i915_request_put(rq);
+ goto err_pin;
+ }
+
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.timer.expires) &&
+ !i915_request_completed(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EINVAL;
+ }
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
return err;
}
static int live_busywait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *map;
@@ -316,22 +660,19 @@ static int live_busywait_preempt(void *arg)
* preempt the busywaits used to synchronise between rings.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
- goto err_unlock;
+ return -ENOMEM;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_ctx_lo;
@@ -343,7 +684,7 @@ static int live_busywait_preempt(void *arg)
goto err_obj;
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_map;
@@ -353,7 +694,7 @@ static int live_busywait_preempt(void *arg)
if (err)
goto err_map;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
u32 *cs;
@@ -364,7 +705,7 @@ static int live_busywait_preempt(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_vma;
}
@@ -444,7 +785,7 @@ static int live_busywait_preempt(void *arg)
i915_request_add(hi);
if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
engine->name);
@@ -452,7 +793,7 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
@@ -475,9 +816,6 @@ err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -501,49 +839,45 @@ spinner_create_request(struct igt_spinner *spin,
static int live_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
pr_err("Logical preemption supported, but not exposed\n");
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -559,7 +893,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -576,7 +910,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -599,54 +933,47 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_late_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
ctx_lo->sched.priority = I915_USER_PRIORITY(1);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -705,15 +1032,12 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -723,14 +1047,13 @@ struct preempt_client {
struct i915_gem_context *ctx;
};
-static int preempt_client_init(struct drm_i915_private *i915,
- struct preempt_client *c)
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
{
- c->ctx = kernel_context(i915);
+ c->ctx = kernel_context(gt->i915);
if (!c->ctx)
return -ENOMEM;
- if (igt_spinner_init(&c->spin, &i915->gt))
+ if (igt_spinner_init(&c->spin, gt))
goto err_ctx;
return 0;
@@ -748,11 +1071,10 @@ static void preempt_client_fini(struct preempt_client *c)
static int live_nopreempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client a, b;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -760,19 +1082,16 @@ static int live_nopreempt(void *arg)
* that may be being observed and not want to be interrupted.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &a))
- goto err_unlock;
- if (preempt_client_init(i915, &b))
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
goto err_client_a;
b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
if (!intel_engine_has_preemption(engine))
@@ -832,7 +1151,7 @@ static int live_nopreempt(void *arg)
goto err_wedged;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -841,29 +1160,344 @@ err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ i915_gem_context_set_banned(arg->b.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
static int live_suppress_self_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
};
struct preempt_client a, b;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -873,30 +1507,31 @@ static int live_suppress_self_preempt(void *arg)
* completion event.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0; /* presume black blox */
- if (intel_vgpu_active(i915))
+ if (intel_vgpu_active(gt->i915))
return 0; /* GVT forces single port & request submission */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &a))
- goto err_unlock;
- if (preempt_client_init(i915, &b))
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
goto err_client_a;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
int depth;
if (!intel_engine_has_preemption(engine))
continue;
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ intel_engine_pm_get(engine);
engine->execlists.preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
@@ -904,12 +1539,14 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
@@ -921,6 +1558,7 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_b);
@@ -931,6 +1569,7 @@ static int live_suppress_self_preempt(void *arg)
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
@@ -944,11 +1583,13 @@ static int live_suppress_self_preempt(void *arg)
engine->name,
engine->execlists.preempt_hang.count,
depth);
+ intel_engine_pm_put(engine);
err = -EINVAL;
goto err_client_b;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -957,15 +1598,12 @@ err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
@@ -984,9 +1622,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
if (!rq)
return NULL;
- INIT_LIST_HEAD(&rq->active_list);
rq->engine = engine;
+ spin_lock_init(&rq->lock);
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+ rq->fence.lock = &rq->lock;
+ rq->fence.ops = &i915_fence_ops;
+
i915_sched_node_init(&rq->sched);
/* mark this request as permanently incomplete */
@@ -1021,11 +1663,10 @@ static void dummy_request_free(struct i915_request *dummy)
static int live_suppress_wait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct preempt_client client[4];
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
int i;
@@ -1035,22 +1676,19 @@ static int live_suppress_wait_preempt(void *arg)
* not needlessly generate preempt-to-idle cycles.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
- goto err_unlock;
- if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ if (preempt_client_init(gt, &client[0])) /* ELSP[0] */
+ return -ENOMEM;
+ if (preempt_client_init(gt, &client[1])) /* ELSP[1] */
goto err_client_0;
- if (preempt_client_init(i915, &client[2])) /* head of queue */
+ if (preempt_client_init(gt, &client[2])) /* head of queue */
goto err_client_1;
- if (preempt_client_init(i915, &client[3])) /* bystander */
+ if (preempt_client_init(gt, &client[3])) /* bystander */
goto err_client_2;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
int depth;
if (!intel_engine_has_preemption(engine))
@@ -1079,8 +1717,8 @@ static int live_suppress_wait_preempt(void *arg)
}
/* Disable NEWCLIENT promotion */
- __i915_active_request_set(&rq[i]->timeline->last_request,
- dummy);
+ __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
+ &dummy->fence);
i915_request_add(rq[i]);
}
@@ -1105,7 +1743,7 @@ static int live_suppress_wait_preempt(void *arg)
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
@@ -1128,26 +1766,22 @@ err_client_1:
preempt_client_fini(&client[1]);
err_client_0:
preempt_client_fini(&client[0]);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
}
static int live_chain_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client hi, lo;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -1156,19 +1790,16 @@ static int live_chain_preempt(void *arg)
* the previously submitted spinner in B.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &hi))
- goto err_unlock;
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
- if (preempt_client_init(i915, &lo))
+ if (preempt_client_init(gt, &lo))
goto err_client_hi;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
@@ -1199,7 +1830,7 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_wedged;
}
@@ -1237,7 +1868,7 @@ static int live_chain_preempt(void *arg)
igt_spinner_end(&hi.spin);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to preempt over chain of %d\n",
count);
@@ -1253,7 +1884,7 @@ static int live_chain_preempt(void *arg)
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to flush low priority chain of %d requests\n",
count);
@@ -1274,57 +1905,50 @@ err_client_lo:
preempt_client_fini(&lo);
err_client_hi:
preempt_client_fini(&hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_lo;
}
static int live_preempt_hang(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
@@ -1341,7 +1965,7 @@ static int live_preempt_hang(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -1363,28 +1987,28 @@ static int live_preempt_hang(void *arg)
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
- set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
intel_engine_reset(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -1399,9 +2023,105 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
return err;
}
@@ -1416,7 +2136,7 @@ static int random_priority(struct rnd_state *rnd)
}
struct preempt_smoke {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
@@ -1440,7 +2160,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0;
if (batch) {
- vma = i915_vma_instance(batch, ctx->vm, NULL);
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1489,11 +2213,9 @@ static int smoke_crescendo_thread(void *arg)
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
- mutex_lock(&smoke->i915->drm.struct_mutex);
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->batch);
- mutex_unlock(&smoke->i915->drm.struct_mutex);
if (err)
return err;
@@ -1514,9 +2236,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
unsigned long count;
int err = 0;
- mutex_unlock(&smoke->i915->drm.struct_mutex);
-
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
arg[id] = *smoke;
arg[id].engine = engine;
if (!(flags & BATCH))
@@ -1532,8 +2252,10 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
get_task_struct(tsk[id]);
}
+ yield(); /* start all threads before we kthread_stop() */
+
count = 0;
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
@@ -1548,11 +2270,9 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
put_task_struct(tsk[id]);
}
- mutex_lock(&smoke->i915->drm.struct_mutex);
-
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -1564,7 +2284,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
count = 0;
do {
- for_each_engine(smoke->engine, smoke->i915, id) {
+ for_each_engine(smoke->engine, smoke->gt, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
@@ -1580,25 +2300,24 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
static int live_preempt_smoke(void *arg)
{
struct preempt_smoke smoke = {
- .i915 = arg,
+ .gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
- intel_wakeref_t wakeref;
struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
return 0;
smoke.contexts = kmalloc_array(smoke.ncontext,
@@ -1607,13 +2326,11 @@ static int live_preempt_smoke(void *arg)
if (!smoke.contexts)
return -ENOMEM;
- mutex_lock(&smoke.i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
-
- smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err = PTR_ERR(smoke.batch);
- goto err_unlock;
+ goto err_free;
}
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
@@ -1627,13 +2344,13 @@ static int live_preempt_smoke(void *arg)
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
err = -EIO;
goto err_batch;
}
for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.i915);
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
if (!smoke.contexts[n])
goto err_ctx;
}
@@ -1660,15 +2377,13 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
-err_unlock:
- intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
- mutex_unlock(&smoke.i915->drm.struct_mutex);
+err_free:
kfree(smoke.contexts);
return err;
}
-static int nop_virtual_engine(struct drm_i915_private *i915,
+static int nop_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling,
unsigned int nctx,
@@ -1687,7 +2402,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
for (n = 0; n < nctx; n++) {
- ctx[n] = kernel_context(i915);
+ ctx[n] = kernel_context(gt->i915);
if (!ctx[n]) {
err = -ENOMEM;
nctx = n;
@@ -1712,7 +2427,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
}
}
- err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
if (err)
goto out;
@@ -1759,7 +2474,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
break;
}
}
@@ -1781,7 +2496,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
@@ -1794,25 +2509,22 @@ out:
static int live_virtual_engine(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
- struct intel_gt *gt = &i915->gt;
enum intel_engine_id id;
unsigned int class, inst;
- int err = -ENODEV;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id) {
- err = nop_virtual_engine(i915, &engine, 1, 1, 0);
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
if (err) {
pr_err("Failed to wrap engine %s: err=%d\n",
engine->name, err);
- goto out_unlock;
+ return err;
}
}
@@ -1830,23 +2542,21 @@ static int live_virtual_engine(void *arg)
continue;
for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(i915, siblings, nsibling,
+ err = nop_virtual_engine(gt, siblings, nsibling,
n, 0);
if (err)
- goto out_unlock;
+ return err;
}
- err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
if (err)
- goto out_unlock;
+ return err;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
-static int mask_virtual_engine(struct drm_i915_private *i915,
+static int mask_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
@@ -1862,7 +2572,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
* restrict it to our desired engine within the virtual engine.
*/
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
return -ENOMEM;
@@ -1876,7 +2586,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
if (err)
goto out_put;
- err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
if (err)
goto out_unpin;
@@ -1907,7 +2617,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto out;
}
@@ -1922,11 +2632,8 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
}
err = igt_live_test_end(&t);
- if (err)
- goto out;
-
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < nsibling; n++)
@@ -1943,17 +2650,14 @@ out_close:
static int live_virtual_mask(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
@@ -1967,17 +2671,166 @@ static int live_virtual_mask(void *arg)
if (nsibling < 2)
continue;
- err = mask_virtual_engine(i915, siblings, nsibling);
+ err = mask_virtual_engine(gt, siblings, nsibling);
if (err)
- goto out_unlock;
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct i915_gem_context *ctx;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ ctx = kernel_context(gt->i915);
+ if (!ctx)
+ return -ENOMEM;
+
+ scratch = create_scratch(siblings[0]->gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(ctx);
return err;
}
-static int bond_virtual_engine(struct drm_i915_private *i915,
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings,
unsigned int nsibling,
@@ -1993,13 +2846,13 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
return -ENOMEM;
err = 0;
rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, i915, id) {
+ for_each_engine(master, gt, id) {
struct i915_sw_fence fence = {};
if (master->class == class)
@@ -2104,7 +2957,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
out:
for (n = 0; !IS_ERR(rq[n]); n++)
i915_request_put(rq[n]);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
kernel_context_close(ctx);
@@ -2121,17 +2974,14 @@ static int live_virtual_bond(void *arg)
{ "schedule", BOND_SCHEDULE },
{ },
};
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
const struct phase *p;
int nsibling;
@@ -2148,38 +2998,42 @@ static int live_virtual_bond(void *arg)
continue;
for (p = phases; p->name; p++) {
- err = bond_virtual_engine(i915,
+ err = bond_virtual_engine(gt,
class, siblings, nsibling,
p->flags);
if (err) {
pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
__func__, p->name, class, nsibling, err);
- goto out_unlock;
+ return err;
}
}
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
+ SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
};
@@ -2189,5 +3043,512 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_live_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
+
+static void hexdump(const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ pr_info("*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ pr_info("[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+static int live_lrc_layout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 *mem;
+ int err;
+
+ /*
+ * Check the registers offsets we use to create the initial reg state
+ * match the layout saved by HW.
+ */
+
+ mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ u32 *hw, *lrc;
+ int dw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ lrc = memset(mem, 0, PAGE_SIZE);
+ execlists_init_reg_state(lrc,
+ engine->kernel_context,
+ engine,
+ engine->kernel_context->ring,
+ true);
+
+ dw = 0;
+ do {
+ u32 lri = hw[dw];
+
+ if (lri == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ pr_err("%s: Expected LRI command at dword %d, found %08x\n",
+ engine->name, dw, lri);
+ err = -EINVAL;
+ break;
+ }
+
+ if (lrc[dw] != lri) {
+ pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
+ engine->name, dw, lri, lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ lri &= 0x7f;
+ lri++;
+ dw++;
+
+ while (lri) {
+ if (hw[dw] != lrc[dw]) {
+ pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
+ engine->name, dw, hw[dw], lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Skip over the actual register value as we
+ * expect that to differ.
+ */
+ dw += 2;
+ lri -= 2;
+ }
+ } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ if (err) {
+ pr_info("%s: HW register image:\n", engine->name);
+ hexdump(hw, PAGE_SIZE);
+
+ pr_info("%s: SW register image:\n", engine->name);
+ hexdump(lrc, PAGE_SIZE);
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ if (err)
+ break;
+ }
+
+ kfree(mem);
+ return err;
+}
+
+static int find_offset(const u32 *lri, u32 offset)
+{
+ int i;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ if (lri[i] == offset)
+ return i;
+
+ return -1;
+}
+
+static int live_lrc_fixed(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the assumed register offsets match the actual locations in
+ * the context image.
+ */
+
+ for_each_engine(engine, gt, id) {
+ const struct {
+ u32 reg;
+ u32 offset;
+ const char *name;
+ } tbl[] = {
+ {
+ i915_mmio_reg_offset(RING_START(engine->mmio_base)),
+ CTX_RING_BUFFER_START - 1,
+ "RING_START"
+ },
+ {
+ i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
+ CTX_RING_BUFFER_CONTROL - 1,
+ "RING_CTL"
+ },
+ {
+ i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
+ CTX_RING_HEAD - 1,
+ "RING_HEAD"
+ },
+ {
+ i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
+ CTX_RING_TAIL - 1,
+ "RING_TAIL"
+ },
+ {
+ i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
+ lrc_ring_mi_mode(engine),
+ "RING_MI_MODE"
+ },
+ {
+ engine->mmio_base + 0x110,
+ CTX_BB_STATE - 1,
+ "BB_STATE"
+ },
+ { },
+ }, *t;
+ u32 *hw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ for (t = tbl; t->name; t++) {
+ int dw = find_offset(hw, t->reg);
+
+ if (dw != t->offset) {
+ pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
+ engine->name,
+ t->name,
+ t->reg,
+ dw,
+ t->offset);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ }
+
+ return err;
+}
+
+static int __live_lrc_state(struct i915_gem_context *fixme,
+ struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ enum {
+ RING_START_IDX = 0,
+ RING_TAIL_IDX,
+ MAX_IDX
+ };
+ u32 expected[MAX_IDX];
+ u32 *cs;
+ int err;
+ int n;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ cs = intel_ring_begin(rq, 4 * MAX_IDX);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_unpin;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ expected[RING_TAIL_IDX] = ce->ring->tail;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < MAX_IDX; n++) {
+ if (cs[n] != expected[n]) {
+ pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
+ engine->name, n, cs[n], expected[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_state(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the live register state matches what we expect for this
+ * intel_context.
+ */
+
+ fixme = kernel_context(gt->i915);
+ if (!fixme)
+ return -ENOMEM;
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
+ }
+
+ for_each_engine(engine, gt, id) {
+ err = __live_lrc_state(fixme, engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(fixme);
+ return err;
+}
+
+static int gpr_make_dirty(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int n;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int __live_gpr_clear(struct i915_gem_context *fixme,
+ struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
+ return 0; /* GPR only on rcs0 for gen8 */
+
+ err = gpr_make_dirty(engine);
+ if (err)
+ return err;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_put;
+ }
+
+ cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_put;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n]) {
+ pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
+ engine->name,
+ n / 2, n & 1 ? "udw" : "ldw",
+ cs[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_gpr_clear(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that GPR registers are cleared in new contexts as we need
+ * to avoid leaking any information from previous contexts.
+ */
+
+ fixme = kernel_context(gt->i915);
+ if (!fixme)
+ return -ENOMEM;
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
+ }
+
+ for_each_engine(engine, gt, id) {
+ err = __live_gpr_clear(fixme, engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(fixme);
+ return err;
+}
+
+int intel_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_lrc_layout),
+ SUBTEST(live_lrc_fixed),
+ SUBTEST(live_lrc_state),
+ SUBTEST(live_gpr_clear),
+ };
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 00a4f60cdfd5..6ad6aca315f6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -17,7 +17,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reset_count = i915_reset_count(&gt->i915->gpu_error);
@@ -28,7 +28,7 @@ static int igt_global_reset(void *arg)
err = -EINVAL;
}
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -45,14 +45,14 @@ static int igt_wedged_reset(void *arg)
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_gt_set_wedged(gt);
GEM_BUG_ON(!intel_gt_is_wedged(gt));
intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
return intel_gt_is_wedged(gt) ? -EIO : 0;
@@ -112,7 +112,7 @@ static int igt_atomic_engine_reset(void *arg)
/* Check that the resets are usable from atomic context */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (USES_GUC_SUBMISSION(gt->i915))
@@ -125,8 +125,8 @@ static int igt_atomic_engine_reset(void *arg)
if (!igt_force_reset(gt))
goto out_unlock;
- for_each_engine(engine, gt->i915, id) {
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ for_each_engine(engine, gt, id) {
+ tasklet_disable(&engine->execlists.tasklet);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
@@ -170,7 +170,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
};
struct intel_gt *gt = &i915->gt;
- if (!intel_has_gpu_reset(gt->i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 321481403165..f04a59fe5d2c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,8 +6,10 @@
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
+#include "intel_engine_pm.h"
#include "intel_gt.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
#include "../selftests/i915_random.h"
#include "../i915_selftest.h"
@@ -34,7 +36,7 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl)
#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
struct mock_hwsp_freelist {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct radix_tree_root cachelines;
struct intel_timeline **history;
unsigned long count, max;
@@ -67,7 +69,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = intel_timeline_create(&state->i915->gt, NULL);
+ tl = intel_timeline_create(state->gt, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -105,6 +107,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
static int mock_hwsp_freelist(void *arg)
{
struct mock_hwsp_freelist state;
+ struct drm_i915_private *i915;
const struct {
const char *name;
unsigned int flags;
@@ -116,12 +119,14 @@ static int mock_hwsp_freelist(void *arg)
unsigned int na;
int err = 0;
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
- state.i915 = mock_gem_device();
- if (!state.i915)
- return -ENOMEM;
+ state.gt = &i915->gt;
/*
* Create a bunch of timelines and check that their HWSP do not overlap.
@@ -136,7 +141,6 @@ static int mock_hwsp_freelist(void *arg)
goto err_put;
}
- mutex_lock(&state.i915->drm.struct_mutex);
for (p = phases; p->name; p++) {
pr_debug("%s(%s)\n", __func__, p->name);
for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
@@ -149,10 +153,9 @@ static int mock_hwsp_freelist(void *arg)
out:
for (na = 0; na < state.max; na++)
__mock_hwsp_record(&state, na, NULL);
- mutex_unlock(&state.i915->drm.struct_mutex);
kfree(state.history);
err_put:
- drm_dev_put(&state.i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -449,8 +452,6 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
struct i915_request *rq;
int err;
- lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
-
err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
@@ -461,10 +462,14 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
if (IS_ERR(rq))
goto out_unpin;
+ i915_request_get(rq);
+
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
i915_request_add(rq);
- if (err)
+ if (err) {
+ i915_request_put(rq);
rq = ERR_PTR(err);
+ }
out_unpin:
intel_timeline_unpin(tl);
@@ -475,11 +480,11 @@ out:
}
static struct intel_timeline *
-checked_intel_timeline_create(struct drm_i915_private *i915)
+checked_intel_timeline_create(struct intel_gt *gt)
{
struct intel_timeline *tl;
- tl = intel_timeline_create(&i915->gt, NULL);
+ tl = intel_timeline_create(gt, NULL);
if (IS_ERR(tl))
return tl;
@@ -496,11 +501,10 @@ checked_intel_timeline_create(struct drm_i915_private *i915)
static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -515,37 +519,40 @@ static int live_hwsp_engine(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
for (n = 0; n < NUM_TIMELINES; n++) {
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
+
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
}
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
@@ -559,11 +566,7 @@ out:
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
@@ -571,11 +574,10 @@ out:
static int live_hwsp_alternate(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -591,25 +593,25 @@ static int live_hwsp_alternate(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_timeline *tl;
struct i915_request *rq;
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
+ intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
+ intel_engine_pm_get(engine);
rq = tl_write(tl, engine, count);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
@@ -617,11 +619,12 @@ static int live_hwsp_alternate(void *arg)
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
@@ -635,22 +638,17 @@ out:
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
static int live_hwsp_wrap(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct intel_timeline *tl;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = 0;
/*
@@ -658,14 +656,10 @@ static int live_hwsp_wrap(void *arg)
* foreign GPU references.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ tl = intel_timeline_create(gt, NULL);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
- tl = intel_timeline_create(&i915->gt, NULL);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out_rpm;
- }
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
@@ -673,7 +667,7 @@ static int live_hwsp_wrap(void *arg)
if (err)
goto out_free;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
const u32 *hwsp_seqno[2];
struct i915_request *rq;
u32 seqno[2];
@@ -681,7 +675,9 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
@@ -743,29 +739,24 @@ static int live_hwsp_wrap(void *arg)
goto out;
}
- i915_retire_requests(i915); /* recycle HWSP */
+ intel_gt_retire_requests(gt); /* recycle HWSP */
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
intel_timeline_unpin(tl);
out_free:
intel_timeline_put(tl);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
static int live_hwsp_recycle(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count;
int err = 0;
@@ -775,38 +766,38 @@ static int live_hwsp_recycle(void *arg)
* want to confuse ourselves or the GPU.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
IGT_TIMEOUT(end_time);
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
do {
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
+ i915_request_put(rq);
intel_timeline_put(tl);
err = -EIO;
- goto out;
+ break;
}
if (*tl->hwsp_seqno != count) {
@@ -815,17 +806,18 @@ static int live_hwsp_recycle(void *arg)
err = -EINVAL;
}
+ i915_request_put(rq);
intel_timeline_put(tl);
count++;
if (err)
- goto out;
+ break;
} while (!__igt_timeout(end_time, NULL));
- }
-out:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
return err;
}
@@ -842,5 +834,5 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_live_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index d06d68ac2a3b..abce6e4ec9c0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -33,8 +33,32 @@ struct wa_lists {
} engine[I915_NUM_ENGINES];
};
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+ i915_request_put(rq);
+
+ return err;
+}
+
static void
-reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -42,10 +66,10 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
- gt_init_workarounds(i915, &lists->gt_wa_list);
+ gt_init_workarounds(gt->i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list;
wa_init_start(wal, "REF", engine->name);
@@ -59,12 +83,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
}
static void
-reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
intel_wa_list_free(&lists->engine[id].wa_list);
intel_wa_list_free(&lists->gt_wa_list);
@@ -191,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
i915_gem_object_lock(results);
- intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
+ intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
- if (intel_gt_is_wedged(&ctx->i915->gt))
+ if (intel_gt_is_wedged(engine->gt))
err = -EIO;
if (err)
goto out_put;
@@ -243,7 +267,6 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
struct i915_gem_context *ctx;
struct intel_context *ce;
struct i915_request *rq;
- intel_wakeref_t wakeref;
int err = 0;
ctx = kernel_context(engine->i915);
@@ -255,12 +278,9 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
- rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+ rq = igt_spinner_create_request(spin, ce, MI_NOOP);
intel_context_put(ce);
- kernel_context_close(ctx);
if (IS_ERR(rq)) {
spin = NULL;
@@ -268,17 +288,12 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
goto err;
}
- i915_request_add(rq);
-
- if (spin && !igt_wait_for_spinner(spin, rq)) {
- pr_err("Spinner failed to start\n");
- err = -ETIMEDOUT;
- }
-
+ err = request_add_spin(rq, spin);
err:
if (err && spin)
igt_spinner_end(spin);
+ kernel_context_close(ctx);
return err;
}
@@ -313,7 +328,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out_spin;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(engine->uncore->rpm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
@@ -355,6 +370,7 @@ out_ctx:
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
struct i915_vma *vma;
int err;
@@ -362,7 +378,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -463,12 +481,15 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
0xffff00ff,
0xffffffff,
};
+ struct i915_address_space *vm;
struct i915_vma *scratch;
struct i915_vma *batch;
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
+ i915_vm_put(vm);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -492,6 +513,9 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
ro_reg = ro_register(reg);
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
@@ -565,6 +589,14 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
err = engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
0);
@@ -572,15 +604,11 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
err_request:
- i915_request_add(rq);
- if (err)
- goto out_batch;
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = request_add_sync(rq, err);
+ if (err) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
- intel_gt_set_wedged(&ctx->i915->gt);
- err = -EIO;
+ intel_gt_set_wedged(engine->gt);
goto out_batch;
}
@@ -668,7 +696,7 @@ out_unpin:
break;
}
- if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(ctx->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -679,36 +707,29 @@ out_scratch:
static int live_dirty_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
int err = 0;
/* Can the user write to the whitelisted registers? */
- if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ file = mock_file(gt->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- mutex_unlock(&i915->drm.struct_mutex);
- file = mock_file(i915);
- mutex_lock(&i915->drm.struct_mutex);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- goto out_rpm;
- }
-
- ctx = live_context(i915, file);
+ ctx = live_context(gt->i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_file;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (engine->whitelist.count == 0)
continue;
@@ -718,45 +739,43 @@ static int live_dirty_whitelist(void *arg)
}
out_file:
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
- mutex_lock(&i915->drm.struct_mutex);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mock_file_free(gt->i915, file);
return err;
}
static int live_reset_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
+ igt_global_reset_lock(gt);
- if (!engine || engine->whitelist.count == 0)
- return 0;
-
- igt_global_reset_lock(&i915->gt);
+ for_each_engine(engine, gt, id) {
+ if (engine->whitelist.count == 0)
+ continue;
- if (intel_has_reset_engine(i915)) {
- err = check_whitelist_across_reset(engine,
- do_engine_reset,
- "engine");
- if (err)
- goto out;
- }
+ if (intel_has_reset_engine(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_engine_reset,
+ "engine");
+ if (err)
+ goto out;
+ }
- if (intel_has_gpu_reset(i915)) {
- err = check_whitelist_across_reset(engine,
- do_device_reset,
- "device");
- if (err)
- goto out;
+ if (intel_has_gpu_reset(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_device_reset,
+ "device");
+ if (err)
+ goto out;
+ }
}
out:
- igt_global_reset_unlock(&i915->gt);
+ igt_global_reset_unlock(gt);
return err;
}
@@ -772,6 +791,14 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
if (IS_ERR(rq))
return PTR_ERR(rq);
+ i915_vma_lock(results);
+ err = i915_request_await_object(rq, results->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(results);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
@@ -786,8 +813,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
u64 offset = results->node.start + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
- /* Clear access permission field */
- reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
*cs++ = srm;
*cs++ = reg;
@@ -797,12 +824,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
intel_ring_advance(rq, cs);
err_req:
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
-
- return err;
+ return request_add_sync(rq, err);
}
static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
@@ -830,6 +852,9 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
if (ro_register(reg))
continue;
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
*cs++ = reg;
*cs++ = 0xffffffff;
}
@@ -850,13 +875,19 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
/* Perform the writes from an unprivileged "user" batch */
err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
err_request:
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
+ err = request_add_sync(rq, err);
err_unpin:
i915_gem_object_unpin_map(batch->obj);
@@ -973,7 +1004,7 @@ err_a:
static int live_isolated_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct {
struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
@@ -987,40 +1018,46 @@ static int live_isolated_whitelist(void *arg)
* invisible to a second context.
*/
- if (!intel_engines_has_context_isolation(i915))
- return 0;
-
- if (!i915->kernel_context->vm)
+ if (!intel_engines_has_context_isolation(gt->i915))
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_address_space *vm;
struct i915_gem_context *c;
- c = kernel_context(i915);
+ c = kernel_context(gt->i915);
if (IS_ERR(c)) {
err = PTR_ERR(c);
goto err;
}
- client[i].scratch[0] = create_scratch(c->vm, 1024);
+ vm = i915_gem_context_get_vm_rcu(c);
+
+ client[i].scratch[0] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(c->vm, 1024);
+ client[i].scratch[1] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
client[i].ctx = c;
+ i915_vm_put(vm);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
+ if (!engine->kernel_context->vm)
+ continue;
+
if (!whitelist_writable_count(engine))
continue;
@@ -1074,7 +1111,7 @@ err:
kernel_context_close(client[i].ctx);
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
return err;
@@ -1109,16 +1146,16 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
static int
live_gpu_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1126,25 +1163,25 @@ live_gpu_reset_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
- igt_global_reset_lock(&i915->gt);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok)
goto out;
- intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
+ intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after reset");
out:
i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(&i915->gt);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
return ok ? 0 : -ESRCH;
}
@@ -1152,7 +1189,7 @@ out:
static int
live_engine_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
struct intel_context *ce;
@@ -1162,17 +1199,17 @@ live_engine_reset_workarounds(void *arg)
struct wa_lists lists;
int ret = 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- igt_global_reset_lock(&i915->gt);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct intel_engine_cs *engine = ce->engine;
@@ -1205,12 +1242,10 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
pr_err("Spinner failed to start\n");
igt_spinner_fini(&spin);
- ret = -ETIMEDOUT;
goto err;
}
@@ -1227,12 +1262,12 @@ live_engine_reset_workarounds(void *arg)
}
err:
i915_gem_context_unlock_engines(ctx);
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(&i915->gt);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
kernel_context_close(ctx);
- igt_flush_test(i915, I915_WAIT_LOCKED);
+ igt_flush_test(gt->i915);
return ret;
}
@@ -1246,14 +1281,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_gpu_reset_workarounds),
SUBTEST(live_engine_reset_workarounds),
};
- int err;
if (intel_gt_is_wedged(&i915->gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 598170efcaf6..2a77c051f36a 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 249c747e9756..3ee4a4e7689d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -4,11 +4,34 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
+/**
+ * DOC: GuC
+ *
+ * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
+ * designed to offload some of the functionality usually performed by the host
+ * driver; currently the main operations it can take care of are:
+ *
+ * - Authentication of the HuC, which is required to fully enable HuC usage.
+ * - Low latency graphics context scheduling (a.k.a. GuC submission).
+ * - GT Power management.
+ *
+ * The enable_guc module parameter can be used to select which of those
+ * operations to enable within GuC. Note that not all the operations are
+ * supported on all gen9+ platforms.
+ *
+ * Enabling the GuC is not mandatory and therefore the firmware is only loaded
+ * if at least one of the operations is selected. However, not loading the GuC
+ * might result in the loss of some features that do require the GuC (currently
+ * just the HuC, but more are expected to land in the future).
+ */
+
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -56,6 +79,93 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
guc->send_regs.fw_domains = fw_domains;
}
+static void gen9_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
+ guc->interrupts.enabled = true;
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
+ }
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
+
+ gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen9_reset_guc_interrupts(guc);
+}
+
+static void gen11_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_MASK, ~events);
+ guc->interrupts.enabled = true;
+ }
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
+
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen11_reset_guc_interrupts(guc);
+}
+
void intel_guc_init_early(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
@@ -82,32 +192,6 @@ void intel_guc_init_early(struct intel_guc *guc)
}
}
-static int guc_shared_data_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
-
- guc->shared_data = vma;
- guc->shared_data_vaddr = vaddr;
-
- return 0;
-}
-
-static void guc_shared_data_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
-}
-
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
@@ -254,14 +338,9 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_fetch;
- ret = guc_shared_data_create(guc);
- if (ret)
- goto err_fw;
- GEM_BUG_ON(!guc->shared_data);
-
ret = intel_guc_log_create(&guc->log);
if (ret)
- goto err_shared;
+ goto err_fw;
ret = intel_guc_ads_create(guc);
if (ret)
@@ -296,8 +375,6 @@ err_ads:
intel_guc_ads_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
-err_shared:
- guc_shared_data_destroy(guc);
err_fw:
intel_uc_fw_fini(&guc->fw);
err_fetch:
@@ -322,7 +399,6 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
- guc_shared_data_destroy(guc);
intel_uc_fw_fini(&guc->fw);
intel_uc_fw_cleanup_fetch(&guc->fw);
}
@@ -478,6 +554,13 @@ int intel_guc_suspend(struct intel_guc *guc)
};
/*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to suspend the GuC.
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
+ /*
* The ENTER_S_STATE action queues the save/restore operation in GuC FW
* and then returns, so waiting on the H2G is not enough to guarantee
* GuC is done. When all the processing is done, GuC writes
@@ -518,19 +601,9 @@ int intel_guc_suspend(struct intel_guc *guc)
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine)
{
- u32 data[7];
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
- data[1] = engine->guc_id;
- data[2] = 0;
- data[3] = 0;
- data[4] = 0;
- data[5] = guc->execbuf_client->stage_id;
- data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
+ /* XXX: to be implemented with submission interface rework */
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return -ENODEV;
}
/**
@@ -544,13 +617,27 @@ int intel_guc_resume(struct intel_guc *guc)
GUC_POWER_D0,
};
+ /*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to resume the GuC but we do need to enable the
+ * GuC communication on resume (above).
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/**
- * DOC: GuC Address Space
+ * DOC: GuC Memory Management
*
- * The layout of GuC address space is shown below:
+ * GuC can't allocate any memory for its own usage, so all the allocations must
+ * be handled by the host driver. GuC accesses the memory via the GGTT, with the
+ * exception of the top and bottom parts of the 4GB address space, which are
+ * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
+ * or other parts of the HW. The driver must take care not to place objects that
+ * the GuC is going to access in these reserved ranges. The layout of the GuC
+ * address space is shown below:
*
* ::
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b2f046d3cc3..e6400204a2bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,8 +47,6 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
- struct i915_vma *shared_data;
- void *shared_data_vaddr;
struct intel_guc_client *execbuf_client;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 1d3cdd67ca2f..a26a85d50209 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -548,6 +548,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
@@ -556,7 +557,6 @@ enum intel_guc_action {
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
- INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 36332064de9c..caed0d57e704 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -226,7 +226,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (WARN_ON(!intel_guc_log_relay_enabled(log)))
+ if (WARN_ON(!intel_guc_log_relay_created(log)))
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
@@ -361,6 +361,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
{
mutex_init(&log->relay.lock);
INIT_WORK(&log->relay.flush_work, capture_logs_work);
+ log->relay.started = false;
}
static int guc_log_relay_create(struct intel_guc_log *log)
@@ -546,7 +547,7 @@ out_unlock:
return ret;
}
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
+bool intel_guc_log_relay_created(const struct intel_guc_log *log)
{
return log->relay.buf_addr;
}
@@ -560,7 +561,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (intel_guc_log_relay_enabled(log)) {
+ if (intel_guc_log_relay_created(log)) {
ret = -EEXIST;
goto out_unlock;
}
@@ -585,6 +586,21 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
mutex_unlock(&log->relay.lock);
+ return 0;
+
+out_relay:
+ guc_log_relay_destroy(log);
+out_unlock:
+ mutex_unlock(&log->relay.lock);
+
+ return ret;
+}
+
+int intel_guc_log_relay_start(struct intel_guc_log *log)
+{
+ if (log->relay.started)
+ return -EEXIST;
+
guc_log_enable_flush_events(log);
/*
@@ -594,47 +610,59 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
*/
queue_work(system_highpri_wq, &log->relay.flush_work);
- return 0;
+ log->relay.started = true;
-out_relay:
- guc_log_relay_destroy(log);
-out_unlock:
- mutex_unlock(&log->relay.lock);
-
- return ret;
+ return 0;
}
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
+ if (!log->relay.started)
+ return;
+
/*
* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
}
-void intel_guc_log_relay_close(struct intel_guc_log *log)
+/*
+ * Stops the relay log. Called from intel_guc_log_relay_close(), so no
+ * possibility of race with start/flush since relay_write cannot race
+ * relay_close.
+ */
+static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ if (!log->relay.started)
+ return;
+
guc_log_disable_flush_events(log);
intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
+ log->relay.started = false;
+}
+
+void intel_guc_log_relay_close(struct intel_guc_log *log)
+{
+ guc_log_relay_stop(log);
+
mutex_lock(&log->relay.lock);
- GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
+ GEM_BUG_ON(!intel_guc_log_relay_created(log));
guc_log_unmap(log);
guc_log_relay_destroy(log);
mutex_unlock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 6f764879acb1..c252c022c5fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -47,6 +47,7 @@ struct intel_guc_log {
struct i915_vma *vma;
struct {
void *buf_addr;
+ bool started;
struct work_struct flush_work;
struct rchan *channel;
struct mutex lock;
@@ -65,8 +66,9 @@ int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
+bool intel_guc_log_relay_created(const struct intel_guc_log *log);
int intel_guc_log_relay_open(struct intel_guc_log *log);
+int intel_guc_log_relay_start(struct intel_guc_log *log);
void intel_guc_log_relay_flush(struct intel_guc_log *log);
void intel_guc_log_relay_close(struct intel_guc_log *log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index edf194d23c6b..1949346e714e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -83,6 +83,9 @@
#define GEN8_GTCR _MMIO(0x4274)
#define GEN8_GTCR_INVALIDATE (1<<0)
+#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8)
+#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0)
+
#define GUC_ARAT_C6DIS _MMIO(0xA178)
#define GUC_SHIM_CONTROL _MMIO(0xc064)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f325d3dd564f..2498c55e0ea5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,12 +6,13 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
-
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
+
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -29,6 +30,12 @@ enum {
/**
* DOC: GuC-based command submission
*
+ * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
+ * firmware is moving to an updated submission interface and we plan to
+ * turn submission back on when that lands. The below documentation (and related
+ * code) matches the old submission model and will be updated as part of the
+ * upgrade to the new flow.
+ *
* GuC client:
* A intel_guc_client refers to a submission path through GuC. Currently, there
* is only one client, which is charged with all submissions to the GuC. This
@@ -1004,7 +1011,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1014,7 +1021,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
* to GuC
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -1050,7 +1057,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1062,7 +1069,7 @@ static void guc_interrupts_release(struct intel_gt *gt)
*/
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
@@ -1119,7 +1126,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
enum intel_engine_id id;
int err;
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -1145,7 +1152,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(gt);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d4625c97b4f9..32a069841c14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -9,6 +9,34 @@
#include "intel_huc.h"
#include "i915_drv.h"
+/**
+ * DOC: HuC
+ *
+ * The HuC is a dedicated microcontroller for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can directly use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * The kernel driver is only responsible for loading the HuC firmware and
+ * triggering its security authentication, which is performed by the GuC. For
+ * The GuC to correctly perform the authentication, the HuC binary must be
+ * loaded before the GuC one. Loading the HuC is optional; however, not using
+ * the HuC might negatively impact power usage and/or performance of media
+ * workloads, depending on the use-cases.
+ *
+ * See https://github.com/intel/media-driver for the latest details on HuC
+ * functionality.
+ */
+
+/**
+ * DOC: HuC Memory Management
+ *
+ * Similarly to the GuC, the HuC can't do any memory allocations on its own,
+ * with the difference being that the allocations for HuC usage are handled by
+ * the userspace driver instead of the kernel one. The HuC accesses the memory
+ * via the PPGTT belonging to the context loaded on the VCS executing the
+ * HuC-specific commands.
+ */
+
void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
@@ -35,7 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
void *vaddr;
int err;
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -118,10 +146,9 @@ void intel_huc_fini(struct intel_huc *huc)
*
* Called after HuC and GuC firmware loading during intel_uc_init_hw().
*
- * This function pins HuC firmware image object into GGTT.
- * Then it invokes GuC action to authenticate passing the offset to RSA
- * signature through intel_guc_auth_huc(). It then waits for 50ms for
- * firmware verification ACK and unpins the object.
+ * This function invokes the GuC action to authenticate the HuC firmware,
+ * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
+ * waits for up to 50ms for firmware verification ACK.
*/
int intel_huc_auth(struct intel_huc *huc)
{
@@ -134,7 +161,7 @@ int intel_huc_auth(struct intel_huc *huc)
if (!intel_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
- ret = i915_inject_load_error(gt->i915, -ENXIO);
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
goto fail;
@@ -185,7 +212,7 @@ int intel_huc_check_status(struct intel_huc *huc)
if (!intel_huc_is_supported(huc))
return -ENODEV;
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status.reg);
return (status & huc->status.mask) == huc->status.value;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 74602487ed67..d654340d4d03 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,21 +8,6 @@
#include "i915_drv.h"
/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-/**
* intel_huc_fw_init_early() - initializes HuC firmware struct
* @huc: intel_huc struct
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 71ee7ab035cc..629b19377a29 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -20,7 +20,7 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
int ret;
u32 guc_status;
- ret = i915_inject_load_error(gt->i915, -ENXIO);
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
return ret;
@@ -197,7 +197,7 @@ static int guc_enable_communication(struct intel_guc *guc)
GEM_BUG_ON(guc_communication_enabled(guc));
- ret = i915_inject_load_error(i915, -ENXIO);
+ ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
return ret;
@@ -224,17 +224,7 @@ static int guc_enable_communication(struct intel_guc *guc)
return 0;
}
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-
- guc_clear_mmio_msg(guc);
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
+static void __guc_stop_communication(struct intel_guc *guc)
{
/*
* Events generated during or after CT disable are logged by guc in
@@ -247,6 +237,20 @@ static void guc_disable_communication(struct intel_guc *guc)
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
+}
+
+static void guc_stop_communication(struct intel_guc *guc)
+{
+ intel_guc_ct_stop(&guc->ct);
+
+ __guc_stop_communication(guc);
+
+ DRM_INFO("GuC communication stopped\n");
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ __guc_stop_communication(guc);
intel_guc_ct_disable(&guc->ct);
@@ -368,7 +372,7 @@ static int uc_init_wopcm(struct intel_uc *uc)
GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -537,7 +541,9 @@ void intel_uc_fini_hw(struct intel_uc *uc)
if (intel_uc_supports_guc_submission(uc))
intel_guc_submission_disable(guc);
- guc_disable_communication(guc);
+ if (guc_communication_enabled(guc))
+ guc_disable_communication(guc);
+
__uc_sanitize(uc);
}
@@ -581,7 +587,7 @@ void intel_uc_suspend(struct intel_uc *uc)
if (!intel_guc_is_running(guc))
return;
- with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
intel_uc_runtime_suspend(uc);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index bd22bf11adad..66a30ab7044a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -37,27 +37,34 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
/*
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
+ *
+ * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
+ * between 33.0 and 35.2 are only related to new additions to support new Gen12
+ * features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
- fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398))
-
-#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
+ fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0))
+
+#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
"i915/" \
__stringify(prefix_) name_ \
- __stringify(major_) separator_ \
- __stringify(minor_) separator_ \
+ __stringify(major_) "." \
+ __stringify(minor_) "." \
__stringify(patch_) ".bin"
#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_)
+ __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
- __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_)
+ __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
/* All blobs need to be declared via MODULE_FIRMWARE() */
#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
@@ -218,29 +225,31 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
{
bool user = e == -EINVAL;
- if (i915_inject_load_error(i915, e)) {
+ if (i915_inject_probe_error(i915, e)) {
/* non-existing blob */
uc_fw->path = "<invalid>";
uc_fw->user_overridden = user;
- } else if (i915_inject_load_error(i915, e)) {
+ } else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
uc_fw->major_ver_wanted += 1;
uc_fw->minor_ver_wanted = 0;
uc_fw->user_overridden = user;
- } else if (i915_inject_load_error(i915, e)) {
+ } else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
uc_fw->minor_ver_wanted += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) {
+ } else if (uc_fw->major_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
/* require prev major version */
uc_fw->major_ver_wanted -= 1;
uc_fw->minor_ver_wanted = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) {
+ } else if (uc_fw->minor_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
uc_fw->minor_ver_wanted -= 1;
uc_fw->user_overridden = user;
- } else if (user && i915_inject_load_error(i915, e)) {
+ } else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
uc_fw->major_ver_wanted = 0;
uc_fw->minor_ver_wanted = 0;
@@ -269,7 +278,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
GEM_BUG_ON(!i915->wopcm.size);
GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
- err = i915_inject_load_error(i915, -ENXIO);
+ err = i915_inject_probe_error(i915, -ENXIO);
if (err)
return err;
@@ -337,25 +346,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
}
/* Get version numbers from the CSS header */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
- css->sw_version);
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
- css->sw_version);
- break;
-
- default:
- MISSING_CASE(uc_fw->type);
- break;
- }
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
@@ -400,7 +394,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
struct drm_mm_node *node = &ggtt->uc_fw;
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
@@ -445,7 +439,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
u64 offset;
int ret;
- ret = i915_inject_load_error(gt->i915, -ETIMEDOUT);
+ ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
if (ret)
return ret;
@@ -506,7 +500,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
/* make sure the status was cleared the last time we reset the uc */
GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
- err = i915_inject_load_error(gt->i915, -ENOEXEC);
+ err = i915_inject_probe_error(gt->i915, -ENOEXEC);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index ae58e8a8c53b..029214cdedd5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -39,9 +39,6 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
- *
- * The only difference between GuC and HuC firmwares is how the version
- * information is saved.
*/
struct uc_css_header {
@@ -69,11 +66,9 @@ struct uc_css_header {
char username[8];
char buildnumber[12];
u32 sw_version;
-#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
-#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
-#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
u32 reserved[14];
u32 header_info;
} __packed;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index bba0eafe1cdb..d8a80388bd31 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -108,23 +108,15 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
* validating that the doorbells status expected by the driver matches what the
* GuC/HW have.
*/
-static int igt_guc_clients(void *args)
+static int igt_guc_clients(void *arg)
{
- struct drm_i915_private *dev_priv = args;
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
intel_wakeref_t wakeref;
- struct intel_guc *guc;
int err = 0;
- GEM_BUG_ON(!HAS_GT_UC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->gt.uc.guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = check_all_doorbells(guc);
if (err)
@@ -189,8 +181,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
@@ -201,22 +192,14 @@ unlock:
*/
static int igt_guc_doorbells(void *arg)
{
- struct drm_i915_private *dev_priv = arg;
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
intel_wakeref_t wakeref;
- struct intel_guc *guc;
int i, err = 0;
u16 db_id;
- GEM_BUG_ON(!HAS_GT_UC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->gt.uc.guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = check_all_doorbells(guc);
if (err)
@@ -298,20 +281,19 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
-int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
+int intel_guc_live_selftest(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_guc_clients),
SUBTEST(igt_guc_doorbells),
};
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!USES_GUC_SUBMISSION(i915))
return 0;
- return i915_subtests(tests, dev_priv);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 5ff2437b2998..771420453f82 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -98,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
return ret;
}
@@ -108,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
}
/**
@@ -198,7 +198,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(dev_priv);
+ reg = i915_reserve_fence(&dev_priv->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index e753b1e706e2..6a3ac8cde95d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -35,7 +35,9 @@
*/
#include <linux/slab.h>
+
#include "i915_drv.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 13044c027f27..e451298d11c3 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -152,6 +152,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct intel_vgpu_fb_info *info)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
@@ -161,7 +162,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
- i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
@@ -498,8 +499,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
goto out_free_gem;
}
- i915_gem_object_put(obj);
-
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
if (ret < 0) {
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
@@ -524,6 +523,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
file_count(dmabuf->file),
kref_read(&obj->base.refcount));
+ i915_gem_object_put(obj);
+
return dmabuf_fd;
out_free_dmabuf:
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f21b8fb5b37e..d6e7a1189bad 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu,
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
@@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 25f78196b964..bd12af349123 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -819,13 +819,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum intel_gvt_event_type event;
- if (reg == _DPA_AUX_CH_CTL)
+ if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
event = AUX_CHANNEL_A;
- else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ else if (reg == _PCH_DPB_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
event = AUX_CHANNEL_B;
- else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ else if (reg == _PCH_DPC_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
event = AUX_CHANNEL_C;
- else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ else if (reg == _PCH_DPD_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
WARN_ON(true);
@@ -2796,7 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
- MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
+ MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
@@ -2872,11 +2875,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
@@ -3417,6 +3420,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
}
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+ /* pvinfo data doesn't come from hw mmio */
+ if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
+ continue;
+
for (j = 0; j < block->size; j += 4) {
ret = handler(gvt,
i915_mmio_reg_offset(block->offset) + j,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 343d79c1cb7e..04a5a0d90823 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1564,27 +1564,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "\n");
}
-static ssize_t
-hw_id_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct mdev_device *mdev = mdev_from_dev(dev);
-
- if (mdev) {
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
- mdev_get_drvdata(mdev);
- return sprintf(buf, "%u\n",
- vgpu->submission.shadow[0]->gem_context->hw_id);
- }
- return sprintf(buf, "\n");
-}
-
static DEVICE_ATTR_RO(vgpu_id);
-static DEVICE_ATTR_RO(hw_id);
static struct attribute *intel_vgpu_attrs[] = {
&dev_attr_vgpu_id.attr,
- &dev_attr_hw_id.attr,
NULL
};
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 4208e40445b1..aaf15916d29a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6c79d16b381e..5b2a7d072ec9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -38,6 +38,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "gvt.h"
@@ -194,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
I915_GTT_PAGE_SIZE);
@@ -365,7 +366,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
+ struct i915_ppgtt *ppgtt =
+ i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -378,6 +380,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
+
+ i915_vm_put(&ppgtt->vm);
}
static int
@@ -385,11 +389,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (workload->req)
return 0;
@@ -415,10 +416,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&vgpu->vgpu_lock);
if (workload->shadow)
return 0;
@@ -580,8 +580,6 @@ static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_shadow_bb *bb, *pos;
if (list_empty(&workload->shadow_bb))
@@ -590,8 +588,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb = list_first_entry(&workload->shadow_bb,
struct intel_vgpu_shadow_bb, list);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
@@ -609,8 +605,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_del(&bb->list);
kfree(bb);
}
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int prepare_workload(struct intel_vgpu_workload *workload)
@@ -685,7 +679,6 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -694,7 +687,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ring_id, workload);
mutex_lock(&vgpu->vgpu_lock);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
@@ -729,7 +721,6 @@ out:
err_req:
if (ret)
workload->status = ret;
- mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -844,7 +835,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
I915_GTT_PAGE_SIZE);
@@ -887,7 +878,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
@@ -1233,20 +1224,18 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ struct i915_ppgtt *ppgtt;
enum intel_engine_id i;
int ret;
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
i915_gem_context_set_force_single_submission(ctx);
- i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
+ ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+ i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, i915, i) {
struct intel_context *ce;
@@ -1291,12 +1280,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+ i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
out_shadow_ctx:
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
+ i915_context_ppgtt_root_restore(s, ppgtt);
for_each_engine(engine, i915, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1304,9 +1293,8 @@ out_shadow_ctx:
intel_context_unpin(s->shadow[i]);
intel_context_put(s->shadow[i]);
}
+ i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1597,9 +1585,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
- mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 48e16ad93bbd..3c424cb90702 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,13 +7,12 @@
#include <linux/debugobjects.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
-#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
-
/*
* Active refs memory management
*
@@ -27,35 +26,35 @@ static struct i915_global_active {
} global;
struct active_node {
- struct i915_active_request base;
+ struct i915_active_fence base;
struct i915_active *ref;
struct rb_node node;
u64 timeline;
};
static inline struct active_node *
-node_from_active(struct i915_active_request *active)
+node_from_active(struct i915_active_fence *active)
{
return container_of(active, struct active_node, base);
}
#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
-static inline bool is_barrier(const struct i915_active_request *active)
+static inline bool is_barrier(const struct i915_active_fence *active)
{
- return IS_ERR(rcu_access_pointer(active->request));
+ return IS_ERR(rcu_access_pointer(active->fence));
}
static inline struct llist_node *barrier_to_ll(struct active_node *node)
{
GEM_BUG_ON(!is_barrier(&node->base));
- return (struct llist_node *)&node->base.link;
+ return (struct llist_node *)&node->base.cb.node;
}
static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node *node)
{
- return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
+ return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
}
static inline struct intel_engine_cs *
@@ -68,7 +67,7 @@ barrier_to_engine(struct active_node *node)
static inline struct active_node *barrier_from_ll(struct llist_node *x)
{
return container_of((struct list_head *)x,
- struct active_node, base.link);
+ struct active_node, base.cb.node);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
@@ -92,12 +91,17 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
- debug_object_activate(ref, &active_debug_desc);
+ spin_lock_irq(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* before the first inc */
+ debug_object_activate(ref, &active_debug_desc);
+ spin_unlock_irq(&ref->tree_lock);
}
static void debug_active_deactivate(struct i915_active *ref)
{
- debug_object_deactivate(ref, &active_debug_desc);
+ lockdep_assert_held(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* after the last dec */
+ debug_object_deactivate(ref, &active_debug_desc);
}
static void debug_active_fini(struct i915_active *ref)
@@ -125,31 +129,46 @@ __active_retire(struct i915_active *ref)
{
struct active_node *it, *n;
struct rb_root root;
- bool retire = false;
+ unsigned long flags;
- lockdep_assert_held(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
/* return the unused nodes to our slabcache -- flushing the allocator */
- if (atomic_dec_and_test(&ref->count)) {
- debug_active_deactivate(ref);
- root = ref->tree;
- ref->tree = RB_ROOT;
- ref->cache = NULL;
- retire = true;
- }
-
- mutex_unlock(&ref->mutex);
- if (!retire)
+ if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
return;
- rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
- GEM_BUG_ON(i915_active_request_isset(&it->base));
- kmem_cache_free(global.slab_cache, it);
- }
+ GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
+ debug_active_deactivate(ref);
+
+ root = ref->tree;
+ ref->tree = RB_ROOT;
+ ref->cache = NULL;
+
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
/* After the final retire, the entire struct may be freed */
if (ref->retire)
ref->retire(ref);
+
+ /* ... except if you wait on it, you must manage your own references! */
+ wake_up_var(ref);
+
+ rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+ GEM_BUG_ON(i915_active_fence_isset(&it->base));
+ kmem_cache_free(global.slab_cache, it);
+ }
+}
+
+static void
+active_work(struct work_struct *wrk)
+{
+ struct i915_active *ref = container_of(wrk, typeof(*ref), work);
+
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ __active_retire(ref);
}
static void
@@ -159,18 +178,29 @@ active_retire(struct i915_active *ref)
if (atomic_add_unless(&ref->count, -1, 1))
return;
- /* One active may be flushed from inside the acquire of another */
- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
+ queue_work(system_unbound_wq, &ref->work);
+ return;
+ }
+
__active_retire(ref);
}
static void
-node_retire(struct i915_active_request *base, struct i915_request *rq)
+node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- active_retire(node_from_active(base)->ref);
+ i915_active_fence_cb(fence, cb);
+ active_retire(container_of(cb, struct active_node, base.cb)->ref);
}
-static struct i915_active_request *
+static void
+excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ i915_active_fence_cb(fence, cb);
+ active_retire(container_of(cb, struct i915_active, excl.cb));
+}
+
+static struct i915_active_fence *
active_instance(struct i915_active *ref, struct intel_timeline *tl)
{
struct active_node *node, *prealloc;
@@ -193,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
if (!prealloc)
return NULL;
- mutex_lock(&ref->mutex);
+ spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
parent = NULL;
@@ -214,7 +244,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
}
node = prealloc;
- i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
+ __i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -223,29 +253,36 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
out:
ref->cache = node;
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
BUILD_BUG_ON(offsetof(typeof(*node), base));
return &node->base;
}
-void __i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *key)
{
+ unsigned long bits;
+
debug_active_init(ref);
- ref->i915 = i915;
ref->flags = 0;
ref->active = active;
- ref->retire = retire;
+ ref->retire = ptr_unpack_bits(retire, &bits, 2);
+ if (bits & I915_ACTIVE_MAY_SLEEP)
+ ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+
+ spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
ref->cache = NULL;
+
init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key);
+ __i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
+ INIT_WORK(&ref->work, active_work);
}
static bool ____active_del_barrier(struct i915_active *ref,
@@ -298,9 +335,9 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node)
int i915_active_ref(struct i915_active *ref,
struct intel_timeline *tl,
- struct i915_request *rq)
+ struct dma_fence *fence)
{
- struct i915_active_request *active;
+ struct i915_active_fence *active;
int err;
lockdep_assert_held(&tl->mutex);
@@ -323,26 +360,44 @@ int i915_active_ref(struct i915_active *ref,
* request that we want to emit on the kernel_context.
*/
__active_del_barrier(ref, node_from_active(active));
- RCU_INIT_POINTER(active->request, NULL);
- INIT_LIST_HEAD(&active->link);
- } else {
- if (!i915_active_request_isset(active))
- atomic_inc(&ref->count);
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
}
- GEM_BUG_ON(!atomic_read(&ref->count));
- __i915_active_request_set(active, rq);
+ if (!__i915_active_fence_set(active, fence))
+ atomic_inc(&ref->count);
out:
i915_active_release(ref);
return err;
}
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+ /* We expect the caller to manage the exclusive timeline ordering */
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /*
+ * As we don't know which mutex the caller is using, we told a small
+ * lie to the debug code that it is using the i915_active.mutex;
+ * and now we must stick to that lie.
+ */
+ mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);
+ if (!__i915_active_fence_set(&ref->excl, f))
+ atomic_inc(&ref->count);
+ mutex_release(&ref->mutex.dep_map, _THIS_IP_);
+}
+
+bool i915_active_acquire_if_busy(struct i915_active *ref)
+{
+ debug_active_assert(ref);
+ return atomic_add_unless(&ref->count, 1, 0);
+}
+
int i915_active_acquire(struct i915_active *ref)
{
int err;
- debug_active_assert(ref);
- if (atomic_add_unless(&ref->count, 1, 0))
+ if (i915_active_acquire_if_busy(ref))
return 0;
err = mutex_lock_interruptible(&ref->mutex);
@@ -367,109 +422,66 @@ void i915_active_release(struct i915_active *ref)
active_retire(ref);
}
-static void __active_ungrab(struct i915_active *ref)
-{
- clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
-}
-
-bool i915_active_trygrab(struct i915_active *ref)
+static void enable_signaling(struct i915_active_fence *active)
{
- debug_active_assert(ref);
-
- if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
- return false;
+ struct dma_fence *fence;
- if (!atomic_add_unless(&ref->count, 1, 0)) {
- __active_ungrab(ref);
- return false;
- }
+ fence = i915_active_fence_get(active);
+ if (!fence)
+ return;
- return true;
-}
-
-void i915_active_ungrab(struct i915_active *ref)
-{
- GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
-
- active_retire(ref);
- __active_ungrab(ref);
+ dma_fence_enable_sw_signaling(fence);
+ dma_fence_put(fence);
}
int i915_active_wait(struct i915_active *ref)
{
struct active_node *it, *n;
- int err;
+ int err = 0;
might_sleep();
- might_lock(&ref->mutex);
- if (i915_active_is_idle(ref))
+ if (!i915_active_acquire_if_busy(ref))
return 0;
- err = mutex_lock_interruptible(&ref->mutex);
- if (err)
- return err;
-
- if (!atomic_add_unless(&ref->count, 1, 0)) {
- mutex_unlock(&ref->mutex);
- return 0;
- }
-
+ /* Flush lazy signals */
+ enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) { /* unconnected idle-barrier */
- err = -EBUSY;
- break;
- }
+ if (is_barrier(&it->base)) /* unconnected idle barrier */
+ continue;
- err = i915_active_request_retire(&it->base, BKL(ref));
- if (err)
- break;
+ enable_signaling(&it->base);
}
+ /* Any fence added after the wait begins will not be auto-signaled */
- __active_retire(ref);
+ i915_active_release(ref);
if (err)
return err;
- if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
+ if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
return -EINTR;
- if (!i915_active_is_idle(ref))
- return -EBUSY;
-
return 0;
}
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active)
-{
- struct i915_request *barrier =
- i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
-
- return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
-}
-
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
- struct active_node *it, *n;
- int err;
-
- if (RB_EMPTY_ROOT(&ref->tree))
- return 0;
+ int err = 0;
- /* await allocates and so we need to avoid hitting the shrinker */
- err = i915_active_acquire(ref);
- if (err)
- return err;
+ if (rcu_access_pointer(ref->excl.fence)) {
+ struct dma_fence *fence;
- mutex_lock(&ref->mutex);
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- err = i915_request_await_active_request(rq, &it->base);
- if (err)
- break;
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&ref->excl.fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
}
- mutex_unlock(&ref->mutex);
- i915_active_release(ref);
+ /* In the future we may choose to await on all fences */
+
return err;
}
@@ -477,15 +489,16 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
void i915_active_fini(struct i915_active *ref)
{
debug_active_fini(ref);
- GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
GEM_BUG_ON(atomic_read(&ref->count));
+ GEM_BUG_ON(work_pending(&ref->work));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
mutex_destroy(&ref->mutex);
}
#endif
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{
- return node->timeline == idx && !i915_active_request_isset(&node->base);
+ return node->timeline == idx && !i915_active_fence_isset(&node->base);
}
static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
@@ -495,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
if (RB_EMPTY_ROOT(&ref->tree))
return NULL;
- mutex_lock(&ref->mutex);
+ spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
/*
@@ -560,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
goto match;
}
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
return NULL;
@@ -568,7 +581,7 @@ match:
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
if (p == &ref->cache->node)
ref->cache = NULL;
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
return rb_entry(p, struct active_node, node);
}
@@ -576,11 +589,12 @@ match:
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
intel_engine_mask_t tmp, mask = engine->mask;
+ struct intel_gt *gt = engine->gt;
struct llist_node *pos, *next;
int err;
+ GEM_BUG_ON(i915_active_is_idle(ref));
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
/*
@@ -589,7 +603,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* We can then use the preallocated nodes in
* i915_active_acquire_barrier()
*/
- for_each_engine_masked(engine, i915, mask, tmp) {
+ for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
struct active_node *node;
@@ -605,13 +619,13 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
node->base.lock =
&engine->kernel_context->timeline->mutex;
#endif
- RCU_INIT_POINTER(node->base.request, NULL);
- node->base.retire = node_retire;
+ RCU_INIT_POINTER(node->base.fence, NULL);
+ node->base.cb.func = node_retire;
node->timeline = idx;
node->ref = ref;
}
- if (!i915_active_request_isset(&node->base)) {
+ if (!i915_active_fence_isset(&node->base)) {
/*
* Mark this as being *our* unconnected proto-node.
*
@@ -621,8 +635,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* and then we can use the rb_node and list pointers
* for our tracking of the pending barrier.
*/
- RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
- node->base.link.prev = (void *)engine;
+ RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
+ node->base.cb.node.prev = (void *)engine;
atomic_inc(&ref->count);
}
@@ -648,6 +662,7 @@ unwind:
void i915_active_acquire_barrier(struct i915_active *ref)
{
struct llist_node *pos, *next;
+ unsigned long flags;
GEM_BUG_ON(i915_active_is_idle(ref));
@@ -657,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them.
*/
- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node);
@@ -679,54 +694,124 @@ void i915_active_acquire_barrier(struct i915_active *ref)
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- mutex_unlock(&ref->mutex);
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
}
void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
+ unsigned long flags;
GEM_BUG_ON(intel_engine_is_virtual(engine));
- GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
+ GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
+ node = llist_del_all(&engine->barrier_tasks);
+ if (!node)
+ return;
/*
* Attach the list of proto-fences to the in-flight request such
* that the parent i915_active will be released when this request
* is retired.
*/
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
- RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
+ spin_lock_irqsave(&rq->lock, flags);
+ llist_for_each_safe(node, next, node) {
+ RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
smp_wmb(); /* serialise with reuse_idle_barrier */
- list_add_tail((struct list_head *)node, &rq->active_list);
+ list_add_tail((struct list_head *)node, &rq->fence.cb_list);
}
+ spin_unlock_irqrestore(&rq->lock, flags);
}
-int i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#define active_is_held(active) lockdep_is_held((active)->lock)
+#else
+#define active_is_held(active) true
+#endif
+
+/*
+ * __i915_active_fence_set: Update the last active fence along its timeline
+ * @active: the active tracker
+ * @fence: the new fence (under construction)
+ *
+ * Records the new @fence as the last active fence along its timeline in
+ * this active tracker, moving the tracking callbacks from the previous
+ * fence onto this one. Returns the previous fence (if not already completed),
+ * which the caller must ensure is executed before the new fence. To ensure
+ * that the order of fences within the timeline of the i915_active_fence is
+ * maintained, it must be locked by the caller.
+ */
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence)
{
- int err;
+ struct dma_fence *prev;
+ unsigned long flags;
+
+ /* NB: must be serialised by an outer timeline mutex (active->lock) */
+ spin_lock_irqsave(fence->lock, flags);
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+
+ prev = rcu_dereference_protected(active->fence, active_is_held(active));
+ if (prev) {
+ GEM_BUG_ON(prev == fence);
+ spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ __list_del_entry(&active->cb.node);
+ spin_unlock(prev->lock); /* serialise with prev->cb_list */
+
+ /*
+ * active->fence is reset by the callback from inside
+ * interrupt context. We need to serialise our list
+ * manipulation with the fence->lock to prevent the prev
+ * being lost inside an interrupt (it can't be replaced as
+ * no other caller is allowed to enter __i915_active_fence_set
+ * as we hold the timeline lock). After serialising with
+ * the callback, we need to double check which ran first,
+ * our list_del() [decoupling prev from the callback] or
+ * the callback...
+ */
+ prev = rcu_access_pointer(active->fence);
+ }
+
+ rcu_assign_pointer(active->fence, fence);
+ list_add_tail(&active->cb.node, &fence->cb_list);
+
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return prev;
+}
+
+int i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq)
+{
+ struct dma_fence *fence;
+ int err = 0;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
lockdep_assert_held(active->lock);
#endif
- /* Must maintain ordering wrt previous active requests */
- err = i915_request_await_active_request(rq, active);
- if (err)
- return err;
+ /* Must maintain timeline ordering wrt previous active requests */
+ rcu_read_lock();
+ fence = __i915_active_fence_set(active, &rq->fence);
+ if (fence) /* but the previous fence may not belong to that timeline! */
+ fence = dma_fence_get_rcu(fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
- __i915_active_request_set(active, rq);
- return 0;
+ return err;
}
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request)
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- /* Space left intentionally blank */
+ i915_active_fence_cb(fence, cb);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index f95058f99057..44859356ce97 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -12,6 +12,10 @@
#include "i915_active_types.h"
#include "i915_request.h"
+struct i915_request;
+struct intel_engine_cs;
+struct intel_timeline;
+
/*
* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
@@ -28,308 +32,108 @@
* write access so that we can perform concurrent read operations between
* the CPU and GPU engines, as well as waiting for all rendering to
* complete, or waiting for the last GPU user of a "fence register". The
- * object then embeds a #i915_active_request to track the most recent (in
+ * object then embeds a #i915_active_fence to track the most recent (in
* retirement order) request relevant for the desired mode of access.
- * The #i915_active_request is updated with i915_active_request_set() to
+ * The #i915_active_fence is updated with i915_active_fence_set() to
* track the most recent fence request, typically this is done as part of
* i915_vma_move_to_active().
*
- * When the #i915_active_request completes (is retired), it will
+ * When the #i915_active_fence completes (is retired), it will
* signal its completion to the owner through a callback as well as mark
- * itself as idle (i915_active_request.request == NULL). The owner
+ * itself as idle (i915_active_fence.request == NULL). The owner
* can then perform any action, such as delayed freeing of an active
* resource including itself.
*/
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request);
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
/**
- * i915_active_request_init - prepares the activity tracker for use
+ * __i915_active_fence_init - prepares the activity tracker for use
* @active - the active tracker
- * @rq - initial request to track, can be NULL
+ * @fence - initial fence to track, can be NULL
* @func - a callback when then the tracker is retired (becomes idle),
* can be NULL
*
- * i915_active_request_init() prepares the embedded @active struct for use as
- * an activity tracker, that is for tracking the last known active request
- * associated with it. When the last request becomes idle, when it is retired
+ * i915_active_fence_init() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active fence
+ * associated with it. When the last fence becomes idle, when it is retired
* after completion, the optional callback @func is invoked.
*/
static inline void
-i915_active_request_init(struct i915_active_request *active,
+__i915_active_fence_init(struct i915_active_fence *active,
struct mutex *lock,
- struct i915_request *rq,
- i915_active_retire_fn retire)
+ void *fence,
+ dma_fence_func_t fn)
{
- RCU_INIT_POINTER(active->request, rq);
- INIT_LIST_HEAD(&active->link);
- active->retire = retire ?: i915_active_retire_noop;
+ RCU_INIT_POINTER(active->fence, fence);
+ active->cb.func = fn ?: i915_active_noop;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
active->lock = lock;
#endif
}
-#define INIT_ACTIVE_REQUEST(name, lock) \
- i915_active_request_init((name), (lock), NULL, NULL)
-
-/**
- * i915_active_request_set - updates the tracker to watch the current request
- * @active - the active tracker
- * @request - the request to watch
- *
- * __i915_active_request_set() watches the given @request for completion. Whilst
- * that @request is busy, the @active reports busy. When that @request is
- * retired, the @active tracker is updated to report idle.
- */
-static inline void
-__i915_active_request_set(struct i915_active_request *active,
- struct i915_request *request)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- lockdep_assert_held(active->lock);
-#endif
- list_move(&active->link, &request->active_list);
- rcu_assign_pointer(active->request, request);
-}
+#define INIT_ACTIVE_FENCE(A, LOCK) \
+ __i915_active_fence_init((A), (LOCK), NULL, NULL)
-int __must_check
-i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq);
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence);
/**
- * i915_active_request_raw - return the active request
+ * i915_active_fence_set - updates the tracker to watch the current fence
* @active - the active tracker
+ * @rq - the request to watch
*
- * i915_active_request_raw() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the caller
- * must hold struct_mutex.
+ * i915_active_fence_set() watches the given @rq for completion. While
+ * that @rq is busy, the @active reports busy. When that @rq is signaled
+ * (or else retired) the @active tracker is updated to report idle.
*/
-static inline struct i915_request *
-i915_active_request_raw(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return rcu_dereference_protected(active->request,
- lockdep_is_held(mutex));
-}
-
-/**
- * i915_active_request_peek - report the active request being monitored
- * @active - the active tracker
- *
- * i915_active_request_peek() returns the current request being tracked if
- * still active, or NULL. It does not obtain a reference on the request
- * for the caller, so the caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_peek(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- struct i915_request *request;
-
- request = i915_active_request_raw(active, mutex);
- if (!request || i915_request_completed(request))
- return NULL;
-
- return request;
-}
-
-/**
- * i915_active_request_get - return a reference to the active request
- * @active - the active tracker
- *
- * i915_active_request_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_get(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return i915_request_get(i915_active_request_peek(active, mutex));
-}
-
-/**
- * __i915_active_request_get_rcu - return a reference to the active request
- * @active - the active tracker
- *
- * __i915_active_request_get() returns a reference to the active request,
- * or NULL if the active tracker is idle. The caller must hold the RCU read
- * lock, but the returned pointer is safe to use outside of RCU.
- */
-static inline struct i915_request *
-__i915_active_request_get_rcu(const struct i915_active_request *active)
-{
- /*
- * Performing a lockless retrieval of the active request is super
- * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
- * slab of request objects will not be freed whilst we hold the
- * RCU read lock. It does not guarantee that the request itself
- * will not be freed and then *reused*. Viz,
- *
- * Thread A Thread B
- *
- * rq = active.request
- * retire(rq) -> free(rq);
- * (rq is now first on the slab freelist)
- * active.request = NULL
- *
- * rq = new submission on a new object
- * ref(rq)
- *
- * To prevent the request from being reused whilst the caller
- * uses it, we take a reference like normal. Whilst acquiring
- * the reference we check that it is not in a destroyed state
- * (refcnt == 0). That prevents the request being reallocated
- * whilst the caller holds on to it. To check that the request
- * was not reallocated as we acquired the reference we have to
- * check that our request remains the active request across
- * the lookup, in the same manner as a seqlock. The visibility
- * of the pointer versus the reference counting is controlled
- * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
- *
- * In the middle of all that, we inspect whether the request is
- * complete. Retiring is lazy so the request may be completed long
- * before the active tracker is updated. Querying whether the
- * request is complete is far cheaper (as it involves no locked
- * instructions setting cachelines to exclusive) than acquiring
- * the reference, so we do it first. The RCU read lock ensures the
- * pointer dereference is valid, but does not ensure that the
- * seqno nor HWS is the right one! However, if the request was
- * reallocated, that means the active tracker's request was complete.
- * If the new request is also complete, then both are and we can
- * just report the active tracker is idle. If the new request is
- * incomplete, then we acquire a reference on it and check that
- * it remained the active request.
- *
- * It is then imperative that we do not zero the request on
- * reallocation, so that we can chase the dangling pointers!
- * See i915_request_alloc().
- */
- do {
- struct i915_request *request;
-
- request = rcu_dereference(active->request);
- if (!request || i915_request_completed(request))
- return NULL;
-
- /*
- * An especially silly compiler could decide to recompute the
- * result of i915_request_completed, more specifically
- * re-emit the load for request->fence.seqno. A race would catch
- * a later seqno value, which could flip the result from true to
- * false. Which means part of the instructions below might not
- * be executed, while later on instructions are executed. Due to
- * barriers within the refcounting the inconsistency can't reach
- * past the call to i915_request_get_rcu, but not executing
- * that while still executing i915_request_put() creates
- * havoc enough. Prevent this with a compiler barrier.
- */
- barrier();
-
- request = i915_request_get_rcu(request);
-
- /*
- * What stops the following rcu_access_pointer() from occurring
- * before the above i915_request_get_rcu()? If we were
- * to read the value before pausing to get the reference to
- * the request, we may not notice a change in the active
- * tracker.
- *
- * The rcu_access_pointer() is a mere compiler barrier, which
- * means both the CPU and compiler are free to perform the
- * memory read without constraint. The compiler only has to
- * ensure that any operations after the rcu_access_pointer()
- * occur afterwards in program order. This means the read may
- * be performed earlier by an out-of-order CPU, or adventurous
- * compiler.
- *
- * The atomic operation at the heart of
- * i915_request_get_rcu(), see dma_fence_get_rcu(), is
- * atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_request_get_rcu()
- * returns the request (and so with the reference counted
- * incremented) then the following read for rcu_access_pointer()
- * must occur after the atomic operation and so confirm
- * that this request is the one currently being tracked.
- *
- * The corresponding write barrier is part of
- * rcu_assign_pointer().
- */
- if (!request || request == rcu_access_pointer(active->request))
- return rcu_pointer_handoff(request);
-
- i915_request_put(request);
- } while (1);
-}
-
+int __must_check
+i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq);
/**
- * i915_active_request_get_unlocked - return a reference to the active request
+ * i915_active_fence_get - return a reference to the active fence
* @active - the active tracker
*
- * i915_active_request_get_unlocked() returns a reference to the active request,
+ * i915_active_fence_get() returns a reference to the active fence,
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
- * The reference should be freed with i915_request_put().
+ * The reference should be freed with dma_fence_put().
*/
-static inline struct i915_request *
-i915_active_request_get_unlocked(const struct i915_active_request *active)
+static inline struct dma_fence *
+i915_active_fence_get(struct i915_active_fence *active)
{
- struct i915_request *request;
+ struct dma_fence *fence;
rcu_read_lock();
- request = __i915_active_request_get_rcu(active);
+ fence = dma_fence_get_rcu_safe(&active->fence);
rcu_read_unlock();
- return request;
+ return fence;
}
/**
- * i915_active_request_isset - report whether the active tracker is assigned
+ * i915_active_fence_isset - report whether the active tracker is assigned
* @active - the active tracker
*
- * i915_active_request_isset() returns true if the active tracker is currently
- * assigned to a request. Due to the lazy retiring, that request may be idle
+ * i915_active_fence_isset() returns true if the active tracker is currently
+ * assigned to a fence. Due to the lazy retiring, that fence may be idle
* and this may report stale information.
*/
static inline bool
-i915_active_request_isset(const struct i915_active_request *active)
+i915_active_fence_isset(const struct i915_active_fence *active)
{
- return rcu_access_pointer(active->request);
+ return rcu_access_pointer(active->fence);
}
-/**
- * i915_active_request_retire - waits until the request is retired
- * @active - the active request on which to wait
- *
- * i915_active_request_retire() waits until the request is completed,
- * and then ensures that at least the retirement handler for this
- * @active tracker is called before returning. If the @active
- * tracker is idle, the function returns immediately.
- */
-static inline int __must_check
-i915_active_request_retire(struct i915_active_request *active,
- struct mutex *mutex)
+static inline void
+i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- struct i915_request *request;
- long ret;
-
- request = i915_active_request_raw(active, mutex);
- if (!request)
- return 0;
-
- ret = i915_request_wait(request,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0)
- return ret;
+ struct i915_active_fence *active =
+ container_of(cb, typeof(*active), cb);
- list_del_init(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, request);
-
- return 0;
+ RCU_INIT_POINTER(active->fence, NULL);
}
/*
@@ -358,34 +162,40 @@ i915_active_request_retire(struct i915_active_request *active,
* synchronisation.
*/
-void __i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *key);
-#define i915_active_init(i915, ref, active, retire) do { \
+#define i915_active_init(ref, active, retire) do { \
static struct lock_class_key __key; \
\
- __i915_active_init(i915, ref, active, retire, &__key); \
+ __i915_active_init(ref, active, retire, &__key); \
} while (0)
int i915_active_ref(struct i915_active *ref,
struct intel_timeline *tl,
- struct i915_request *rq);
+ struct dma_fence *fence);
+
+static inline int
+i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+{
+ return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
+}
+
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+
+static inline bool i915_active_has_exclusive(struct i915_active *ref)
+{
+ return rcu_access_pointer(ref->excl.fence);
+}
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq,
- struct i915_active *ref);
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active);
+int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
int i915_active_acquire(struct i915_active *ref);
+bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
-void __i915_active_release_nested(struct i915_active *ref, int subclass);
-
-bool i915_active_trygrab(struct i915_active *ref);
-void i915_active_ungrab(struct i915_active *ref);
static inline bool
i915_active_is_idle(const struct i915_active *ref)
@@ -404,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq);
+void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 1854e7d168c1..96aed0ee700a 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -8,22 +8,18 @@
#define _I915_ACTIVE_TYPES_H_
#include <linux/atomic.h>
+#include <linux/dma-fence.h>
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
-struct drm_i915_private;
-struct i915_active_request;
-struct i915_request;
+#include "i915_utils.h"
-typedef void (*i915_active_retire_fn)(struct i915_active_request *,
- struct i915_request *);
-
-struct i915_active_request {
- struct i915_request __rcu *request;
- struct list_head link;
- i915_active_retire_fn retire;
+struct i915_active_fence {
+ struct dma_fence __rcu *fence;
+ struct dma_fence_cb cb;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
/*
* Incorporeal!
@@ -43,20 +39,30 @@ struct i915_active_request {
struct active_node;
+#define I915_ACTIVE_MAY_SLEEP BIT(0)
+
+#define __i915_active_call __aligned(4)
+#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
+
struct i915_active {
- struct drm_i915_private *i915;
+ atomic_t count;
+ struct mutex mutex;
+ spinlock_t tree_lock;
struct active_node *cache;
struct rb_root tree;
- struct mutex mutex;
- atomic_t count;
+
+ /* Preallocated "exclusive" node */
+ struct i915_active_fence excl;
unsigned long flags;
-#define I915_ACTIVE_GRAB_BIT 0
+#define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
+ struct work_struct work;
+
struct llist_head preallocated_barriers;
};
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index fe1871d7c126..e9d4200ce3bc 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -38,6 +38,7 @@ int __init i915_global_buddy_init(void)
if (!global.slab_blocks)
return -ENOMEM;
+ i915_global_register(&global.base);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 24555102e198..f24096e27bef 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -53,13 +53,11 @@
* granting userspace undue privileges. There are three categories of privilege.
*
* First, commands which are explicitly defined as privileged or which should
- * only be used by the kernel driver. The parser generally rejects such
- * commands, though it may allow some from the drm master process.
+ * only be used by the kernel driver. The parser rejects such commands
*
* Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser
- * provides a whitelist of registers which userspace may safely access (for both
- * normal and drm master processes).
+ * provides a whitelist of registers which userspace may safely access
*
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands.
@@ -84,9 +82,9 @@
* in the per-engine command tables.
*
* Other command table entries map fairly directly to high level categories
- * mentioned above: rejected, master-only, register whitelist. The parser
- * implements a number of checks, including the privileged memory checks, via a
- * general bitmasking mechanism.
+ * mentioned above: rejected, register whitelist. The parser implements a number
+ * of checks, including the privileged memory checks, via a general bitmasking
+ * mechanism.
*/
/*
@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor {
* CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring
- * CMD_DESC_MASTER: The command is allowed if the submitting process
- * is the DRM master
*/
u32 flags;
#define CMD_DESC_FIXED (1<<0)
@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor {
#define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4)
-#define CMD_DESC_MASTER (1<<5)
/*
* The command's unique identification bits and the bitmask to get them.
@@ -194,7 +189,7 @@ struct drm_i915_cmd_table {
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
- .cmd = { (op), ~0u << (opm) }, \
+ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
@@ -209,14 +204,13 @@ struct drm_i915_cmd_table {
#define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK
-#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action
---------------------------------------------------------- */
-static const struct drm_i915_cmd_descriptor common_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
- CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
-static const struct drm_i915_cmd_descriptor render_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
};
-static const struct drm_i915_cmd_descriptor video_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ),
};
-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ),
};
-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{
@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
};
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
+/*
+ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
+ * need to re-enforce the register access checks. We therefore only need to
+ * teach the cmdparser how to find the end of each command, and identify
+ * register accesses. The table doesn't need to reject any commands, and so
+ * the only commands listed here are:
+ * 1) Those that touch registers
+ * 2) Those that do not have the default 8-bit length
+ *
+ * Note that the default MI length mask chosen for this table is 0xFF, not
+ * the 0x3F used on older devices. This is because the vast majority of MI
+ * cmds on Gen9 use a standard 8-bit Length field.
+ * All the Gen9 blitter instructions are standard 0xFF length mask, and
+ * none allow access to non-general registers, so in fact no BLT cmds are
+ * included in the table at all.
+ *
+ */
+static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
+ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
+ CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+
+ /*
+ * We allow BB_START but apply further checks. We just sanitize the
+ * basic fields here.
+ */
+#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
+#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
+ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_BB_START_OPERAND_MASK,
+ .expected = MI_BB_START_OPERAND_EXPECT,
+ }}, ),
+};
+
static const struct drm_i915_cmd_descriptor noop_desc =
CMD(MI_NOOP, SMI, F, 1, S);
@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
#undef R
#undef W
#undef B
-#undef M
-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
};
-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
};
-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { video_cmds, ARRAY_SIZE(video_cmds) },
+static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
};
-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
};
-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
};
-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
};
+static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
+ { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
+};
+
+
/*
* Register whitelists, sorted by increasing register offset.
*/
@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
};
-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
-};
-
-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
+static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
+ REG32(BCS_SWCTRL),
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG64_IDX(BCS_GPR, 0),
+ REG64_IDX(BCS_GPR, 1),
+ REG64_IDX(BCS_GPR, 2),
+ REG64_IDX(BCS_GPR, 3),
+ REG64_IDX(BCS_GPR, 4),
+ REG64_IDX(BCS_GPR, 5),
+ REG64_IDX(BCS_GPR, 6),
+ REG64_IDX(BCS_GPR, 7),
+ REG64_IDX(BCS_GPR, 8),
+ REG64_IDX(BCS_GPR, 9),
+ REG64_IDX(BCS_GPR, 10),
+ REG64_IDX(BCS_GPR, 11),
+ REG64_IDX(BCS_GPR, 12),
+ REG64_IDX(BCS_GPR, 13),
+ REG64_IDX(BCS_GPR, 14),
+ REG64_IDX(BCS_GPR, 15),
};
#undef REG64
@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs;
int num_regs;
- bool master;
};
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
};
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
};
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
+ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
};
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+};
+
+static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
+ { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
};
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
+static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN(engine->i915, 7))
+ if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
+ engine->class == COPY_ENGINE_CLASS))
return;
switch (engine->class) {
case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_render_ring_cmds;
+ cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count =
- ARRAY_SIZE(hsw_render_ring_cmds);
+ ARRAY_SIZE(hsw_render_ring_cmd_table);
} else {
- cmd_tables = gen7_render_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ cmd_tables = gen7_render_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
}
if (IS_HASWELL(engine->i915)) {
@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
-
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VIDEO_DECODE_CLASS:
- cmd_tables = gen7_video_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+ cmd_tables = gen7_video_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case COPY_ENGINE_CLASS:
- if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_blt_ring_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ if (IS_GEN(engine->i915, 9)) {
+ cmd_tables = gen9_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
+ engine->get_cmd_length_mask =
+ gen9_blt_get_cmd_length_mask;
+
+ /* BCS Engine unsafe without parser */
+ engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
+ } else if (IS_HASWELL(engine->i915)) {
+ cmd_tables = hsw_blt_ring_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else {
- cmd_tables = gen7_blt_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ cmd_tables = gen7_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
}
- if (IS_HASWELL(engine->i915)) {
+ if (IS_GEN(engine->i915, 9)) {
+ engine->reg_tables = gen9_blt_reg_tables;
+ engine->reg_table_count =
+ ARRAY_SIZE(gen9_blt_reg_tables);
+ } else if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
-
- engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VIDEO_ENHANCEMENT_CLASS:
- cmd_tables = hsw_vebox_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+ cmd_tables = hsw_vebox_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return;
}
- engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
+ engine->flags |= I915_ENGINE_USING_CMD_PARSER;
}
/**
@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
- if (!intel_engine_needs_cmd_parser(engine))
+ if (!intel_engine_using_cmd_parser(engine))
return;
fini_hash_table(engine);
@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
}
static const struct drm_i915_reg_descriptor *
-find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, u32 addr)
{
const struct drm_i915_reg_table *table = engine->reg_tables;
+ const struct drm_i915_reg_descriptor *reg = NULL;
int count = engine->reg_table_count;
- for (; count > 0; ++table, --count) {
- if (!table->master || is_master) {
- const struct drm_i915_reg_descriptor *reg;
+ for (; !reg && (count > 0); ++table, --count)
+ reg = __find_reg(table->regs, table->num_regs, addr);
- reg = __find_reg(table->regs, table->num_regs, addr);
- if (reg != NULL)
- return reg;
- }
- }
-
- return NULL;
+ return reg;
}
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
- const u32 *cmd, u32 length,
- const bool is_master)
+ const u32 *cmd, u32 length)
{
if (desc->flags & CMD_DESC_SKIP)
return true;
@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
- if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
- DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
- *cmd);
- return false;
- }
-
if (desc->flags & CMD_DESC_REGISTER) {
/*
* Get the distance between individual register offset
@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg(engine, is_master, reg_addr);
+ find_reg(engine, reg_addr);
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
+static int check_bbstart(const struct i915_gem_context *ctx,
+ u32 *cmd, u32 offset, u32 length,
+ u32 batch_len,
+ u64 batch_start,
+ u64 shadow_batch_start)
+{
+ u64 jump_offset, jump_target;
+ u32 target_cmd_offset, target_cmd_index;
+
+ /* For igt compatibility on older platforms */
+ if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+ return -EACCES;
+ }
+
+ if (length != 3) {
+ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+ length);
+ return -EINVAL;
+ }
+
+ jump_target = *(u64*)(cmd+1);
+ jump_offset = jump_target - batch_start;
+
+ /*
+ * Any underflow of jump_target is guaranteed to be outside the range
+ * of a u32, so >= test catches both too large and too small
+ */
+ if (jump_offset >= batch_len) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ /*
+ * This cannot overflow a u32 because we already checked jump_offset
+ * is within the BB, and the batch_len is a u32
+ */
+ target_cmd_offset = lower_32_bits(jump_offset);
+ target_cmd_index = target_cmd_offset / sizeof(u32);
+
+ *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+
+ if (target_cmd_index == offset)
+ return 0;
+
+ if (ctx->jump_whitelist_cmds <= target_cmd_index) {
+ DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
+ return -EINVAL;
+ } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+{
+ const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
+ const u32 exact_size = BITS_TO_LONGS(batch_cmds);
+ u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
+ unsigned long *next_whitelist;
+
+ if (CMDPARSER_USES_GGTT(ctx->i915))
+ return;
+
+ if (batch_cmds <= ctx->jump_whitelist_cmds) {
+ bitmap_zero(ctx->jump_whitelist, batch_cmds);
+ return;
+ }
+
+again:
+ next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
+ if (next_whitelist) {
+ kfree(ctx->jump_whitelist);
+ ctx->jump_whitelist = next_whitelist;
+ ctx->jump_whitelist_cmds =
+ next_size * BITS_PER_BYTE * sizeof(long);
+ return;
+ }
+
+ if (next_size > exact_size) {
+ next_size = exact_size;
+ goto again;
+ }
+
+ DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+ bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+
+ return;
+}
+
#define LENGTH_BIAS 2
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ctx: the context in which the batch is to execute
* @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start: Canonical base address of batch
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
- * @is_master: is the submitting process the drm master?
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @shadow_batch_start: Canonical base address of shadow_batch_obj
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+
+int intel_engine_cmd_parser(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master)
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start)
{
- u32 *cmd, *batch_end;
+ u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool needs_clflush_after = false;
@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
return PTR_ERR(cmd);
}
+ init_whitelist(ctx, batch_len);
+
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
do {
u32 length;
- if (*cmd == MI_BATCH_BUFFER_END) {
- if (needs_clflush_after) {
- void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
- drm_clflush_virt_range(ptr,
- (void *)(cmd + 1) - ptr);
- }
+ if (*cmd == MI_BATCH_BUFFER_END)
break;
- }
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
ret = -EINVAL;
- break;
- }
-
- /*
- * If the batch buffer contains a chained batch, return an
- * error that tells the caller to abort and dispatch the
- * workload as a non-secure batch.
- */
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = -EACCES;
- break;
+ goto err;
}
if (desc->flags & CMD_DESC_FIXED)
@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
length,
batch_end - cmd);
ret = -EINVAL;
- break;
+ goto err;
}
- if (!check_cmd(engine, desc, cmd, length, is_master)) {
+ if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES;
+ goto err;
+ }
+
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = check_bbstart(ctx, cmd, offset, length,
+ batch_len, batch_start,
+ shadow_batch_start);
+
+ if (ret)
+ goto err;
break;
}
+ if (ctx->jump_whitelist_cmds > offset)
+ set_bit(offset, ctx->jump_whitelist);
+
cmd += length;
+ offset += length;
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
- break;
+ goto err;
}
} while (1);
+ if (needs_clflush_after) {
+ void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+
+ drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
+ }
+
+err:
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
}
@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_uabi_engine(engine, dev_priv) {
- if (intel_engine_needs_cmd_parser(engine)) {
+ if (intel_engine_using_cmd_parser(engine)) {
active = true;
break;
}
@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* the parser enabled.
* 9. Don't whitelist or handle oacontrol specially, as ownership
* for oacontrol state is moving to i915-perf.
+ * 10. Support for Gen9 BCS Parsing
*/
- return 9;
+ return 10;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b0f51591f2e4..8016484ebcd3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -41,7 +41,10 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
@@ -61,11 +64,18 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_device_info *info = INTEL_INFO(dev_priv);
struct drm_printer p = drm_seq_file_printer(m);
+ const char *msg;
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+ msg = "n/a";
+#ifdef CONFIG_INTEL_IOMMU
+ msg = enableddisabled(intel_iommu_gfx_mapped);
+#endif
+ seq_printf(m, "iommu: %s\n", msg);
+
intel_device_info_dump_flags(info, &p);
intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
intel_driver_caps_print(&dev_priv->caps, &p);
@@ -77,11 +87,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static char get_pin_flag(struct drm_i915_gem_object *obj)
-{
- return obj->pin_global ? 'p' : ' ';
-}
-
static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (i915_gem_object_get_tiling(obj)) {
@@ -140,9 +145,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int pin_count = 0;
- seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
+ seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
- get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
get_pin_mapped_flag(obj),
@@ -221,8 +225,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_global)
- seq_printf(m, " (global)");
+ if (i915_gem_object_is_framebuffer(obj))
+ seq_printf(m, " (fb)");
engine = i915_gem_object_last_write_engine(obj);
if (engine)
@@ -243,6 +247,9 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ return 0;
+
stats->count++;
stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count))
@@ -290,6 +297,7 @@ static int per_file_stats(int id, void *ptr, void *data)
}
spin_unlock(&obj->vma.lock);
+ i915_gem_object_put(obj);
return 0;
}
@@ -309,34 +317,44 @@ static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915)
{
struct file_stats kstats = {};
- struct i915_gem_context *ctx;
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &i915->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
intel_context_lock_pinned(ce);
if (intel_context_is_pinned(ce)) {
+ rcu_read_lock();
if (ce->state)
per_file_stats(0,
ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
+ rcu_read_unlock();
}
intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = { .vm = ctx->vm, };
+ struct file_stats stats = {
+ .vm = rcu_access_pointer(ctx->vm),
+ };
struct drm_file *file = ctx->file_priv->file;
struct task_struct *task;
char name[80];
- spin_lock(&file->table_lock);
+ rcu_read_lock();
idr_for_each(&file->object_idr, per_file_stats, &stats);
- spin_unlock(&file->table_lock);
+ rcu_read_unlock();
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
@@ -346,7 +364,12 @@ static void print_context_stats(struct seq_file *m,
print_file_stats(m, name, stats);
}
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
print_file_stats(m, "[k]contexts", kstats);
}
@@ -354,7 +377,6 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- int ret;
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
@@ -363,12 +385,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
print_context_stats(m, i915);
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -376,7 +393,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
static void gen8_display_interrupt_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- int pipe;
+ enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
@@ -527,6 +544,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
gen8_display_interrupt_info(m);
} else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_wakeref_t pref;
+
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
@@ -537,7 +556,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
- intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
pref = intel_display_power_get_if_enabled(dev_priv,
@@ -571,12 +589,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "PM IMR:\t\t%08x\n",
I915_READ(GEN6_PMIMR));
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
@@ -772,7 +792,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uncore *uncore = &dev_priv->uncore;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
intel_wakeref_t wakeref;
int ret = 0;
@@ -808,23 +828,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "actual GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -858,7 +878,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
else
reqf >>= 25;
}
- reqf = intel_gpu_freq(dev_priv, reqf);
+ reqf = intel_gpu_freq(rps, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -871,8 +891,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_gpu_freq(dev_priv,
- intel_get_cagf(dev_priv, rpstat));
+ cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -949,37 +968,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else {
seq_puts(m, "no P-state info available\n");
}
@@ -992,91 +1011,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static void i915_instdone_info(struct drm_i915_private *dev_priv,
- struct seq_file *m,
- struct intel_instdone *instdone)
-{
- int slice;
- int subslice;
-
- seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
- instdone->instdone);
-
- if (INTEL_GEN(dev_priv) <= 3)
- return;
-
- seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
- instdone->slice_common);
-
- if (INTEL_GEN(dev_priv) <= 6)
- return;
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->sampler[slice][subslice]);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->row[slice][subslice]);
-}
-
-static int i915_hangcheck_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- enum intel_engine_id id;
-
- seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
- if (test_bit(I915_WEDGED, &gt->reset.flags))
- seq_puts(m, "\tWedged\n");
- if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
- seq_puts(m, "\tDevice (global) reset in progress\n");
-
- if (!i915_modparams.enable_hangcheck) {
- seq_puts(m, "Hangcheck disabled\n");
- return 0;
- }
-
- if (timer_pending(&gt->hangcheck.work.timer))
- seq_printf(m, "Hangcheck active, timer fires in %dms\n",
- jiffies_to_msecs(gt->hangcheck.work.timer.expires -
- jiffies));
- else if (delayed_work_pending(&gt->hangcheck.work))
- seq_puts(m, "Hangcheck active, work pending\n");
- else
- seq_puts(m, "Hangcheck inactive\n");
-
- seq_printf(m, "GT active? %s\n", yesno(gt->awake));
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- for_each_engine(engine, i915, id) {
- struct intel_instdone instdone;
-
- seq_printf(m, "%s: %d ms ago\n",
- engine->name,
- jiffies_to_msecs(jiffies -
- engine->hangcheck.action_timestamp));
-
- seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)engine->hangcheck.acthd,
- intel_engine_get_active_head(engine));
-
- intel_engine_get_instdone(engine, &instdone);
-
- seq_puts(m, "\tinstdone read =\n");
- i915_instdone_info(i915, m, &instdone);
-
- seq_puts(m, "\tinstdone accu =\n");
- i915_instdone_info(i915, m,
- &engine->hangcheck.instdone);
- }
- }
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1157,11 +1091,13 @@ static void print_rc6_res(struct seq_file *m,
const char *title,
const i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- seq_printf(m, "%s %u (%llu us)\n",
- title, I915_READ(reg),
- intel_rc6_residency_us(dev_priv, reg));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(&i915->uncore, reg),
+ intel_rc6_residency_us(&i915->gt.rc6, reg));
}
static int vlv_drpc_info(struct seq_file *m)
@@ -1439,7 +1375,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
@@ -1464,10 +1400,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10 ?
+ GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1478,21 +1415,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_opregion *opregion = &dev_priv->opregion;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
- mutex_unlock(&dev->struct_mutex);
-
-out:
return 0;
}
@@ -1512,11 +1439,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
@@ -1551,7 +1473,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1564,23 +1485,20 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
seq_puts(m, "HW context ");
- if (!list_empty(&ctx->hw_id_link))
- seq_printf(m, "%x [pin %u]", ctx->hw_id,
- atomic_read(&ctx->hw_id_pin_count));
if (ctx->pid) {
struct task_struct *task;
@@ -1614,9 +1532,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
i915_gem_context_unlock_engines(ctx);
seq_putc(m, '\n');
- }
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock(&i915->gem.contexts.lock);
return 0;
}
@@ -1654,9 +1575,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
@@ -1711,7 +1632,7 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
@@ -1723,7 +1644,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
vlv_punit_put(dev_priv);
act_freq = (act_freq >> 8) & 0xff;
} else {
- act_freq = intel_get_cagf(dev_priv,
+ act_freq = intel_get_cagf(rps,
I915_READ(GEN6_RPSTAT1));
}
}
@@ -1734,17 +1655,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d, actual %d\n",
- intel_gpu_freq(dev_priv, rps->cur_freq),
- intel_gpu_freq(dev_priv, act_freq));
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_gpu_freq(rps, act_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
- intel_gpu_freq(dev_priv, rps->idle_freq),
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
@@ -1860,8 +1781,8 @@ static void i915_guc_log_info(struct seq_file *m,
struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
- if (!intel_guc_log_relay_enabled(log)) {
- seq_puts(m, "GuC log relay disabled\n");
+ if (!intel_guc_log_relay_created(log)) {
+ seq_puts(m, "GuC log relay not created\n");
return;
}
@@ -2048,9 +1969,23 @@ i915_guc_log_relay_write(struct file *filp,
loff_t *ppos)
{
struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
- intel_guc_log_relay_flush(log);
- return cnt;
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
}
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
@@ -2133,7 +2068,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = I915_READ(EDP_PSR2_STATUS);
+ val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2149,7 +2084,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = I915_READ(EDP_PSR_STATUS);
+ val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2188,14 +2123,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
status = "disabled";
seq_printf(m, "PSR mode: %s\n", status);
- if (!psr->enabled)
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
goto unlock;
+ }
if (psr->psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
@@ -2208,7 +2147,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
+ val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
@@ -2226,8 +2166,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* Reading all 3 registers before hand to minimize crossing a
* frame boundary between register reads
*/
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
- su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
+ frame));
+ su_frames_val[frame / 3] = val;
+ }
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
@@ -2360,8 +2303,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(dev_priv,
- power_domain),
+ intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
@@ -2396,6 +2338,13 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
if (INTEL_GEN(dev_priv) >= 12) {
dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
} else {
dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
SKL_CSR_DC3_DC5_COUNT;
@@ -3110,8 +3059,9 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (!intel_dig_port->dp.can_mst)
continue;
- seq_printf(m, "MST Source Port %c\n",
- port_name(intel_dig_port->base.port));
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -3573,6 +3523,37 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
i915_wedged_get, i915_wedged_set,
"%llu\n");
+static int
+i915_perf_noa_delay_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+
+ /*
+ * This would lead to infinite waits as we're doing timestamp
+ * difference on the CS with only 32bits.
+ */
+ if (val > mul_u32_u32(U32_MAX, clk))
+ return -EINVAL;
+
+ atomic64_set(&i915->perf.noa_programming_delay, val);
+ return 0;
+}
+
+static int
+i915_perf_noa_delay_get(void *data, u64 *val)
+{
+ struct drm_i915_private *i915 = data;
+
+ *val = atomic64_read(&i915->perf.noa_programming_delay);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
+ i915_perf_noa_delay_get,
+ i915_perf_noa_delay_set,
+ "%llu\n");
+
#define DROP_UNBOUND BIT(0)
#define DROP_BOUND BIT(1)
#define DROP_RETIRE BIT(2)
@@ -3582,6 +3563,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
#define DROP_IDLE BIT(6)
#define DROP_RESET_ACTIVE BIT(7)
#define DROP_RESET_SEQNO BIT(8)
+#define DROP_RCU BIT(9)
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
@@ -3590,7 +3572,8 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
DROP_SHRINK_ALL |\
DROP_IDLE | \
DROP_RESET_ACTIVE | \
- DROP_RESET_SEQNO)
+ DROP_RESET_SEQNO | \
+ DROP_RCU)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -3598,58 +3581,48 @@ i915_drop_caches_get(void *data, u64 *val)
return 0;
}
-
static int
-i915_drop_caches_set(void *data, u64 val)
+gt_drop_caches(struct intel_gt *gt, u64 val)
{
- struct drm_i915_private *i915 = data;
-
- DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
- val, val & DROP_ALL);
+ int ret;
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(&i915->gt),
- I915_IDLE_ENGINES_TIMEOUT))
- intel_gt_set_wedged(&i915->gt);
+ wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
+ intel_gt_set_wedged(gt);
- /* No need to check and wait for gpu resets, only libdrm auto-restarts
- * on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
- int ret;
+ if (val & DROP_RETIRE)
+ intel_gt_retire_requests(gt);
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (val & (DROP_IDLE | DROP_ACTIVE)) {
+ ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
+ }
- /*
- * To finish the flush of the idle_worker, we must complete
- * the switch-to-kernel-context, which requires a double
- * pass through wait_for_idle: first queues the switch,
- * second waits for the switch.
- */
- if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ if (val & DROP_IDLE) {
+ ret = intel_gt_pm_wait_for_idle(gt);
+ if (ret)
+ return ret;
+ }
- if (ret == 0 && val & DROP_IDLE)
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
+ intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
- if (val & DROP_RETIRE)
- i915_retire_requests(i915);
+ return 0;
+}
- mutex_unlock(&i915->drm.struct_mutex);
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ int ret;
- if (ret == 0 && val & DROP_IDLE)
- ret = intel_gt_pm_wait_for_idle(&i915->gt);
- }
+ DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
+ val, val & DROP_ALL);
- if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
- intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
+ ret = gt_drop_caches(&i915->gt, val);
+ if (ret)
+ return ret;
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
@@ -3662,10 +3635,8 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(i915);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE) {
- flush_delayed_work(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
- }
+ if (val & DROP_RCU)
+ rcu_barrier();
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
@@ -3721,6 +3692,15 @@ i915_cache_sharing_set(void *data, u64 val)
return 0;
}
+static void
+intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
+ u8 *to_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
+}
+
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
@@ -3794,12 +3774,13 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
continue;
sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ if (info->sseu.has_subslice_pg &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
@@ -3845,18 +3826,21 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv))
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
+ u8 ss_idx = s * info->sseu.ss_stride +
+ ss / BITS_PER_BYTE;
if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- sseu->subslice_mask[s] |= BIT(ss);
+ sseu->subslice_mask[ss_idx] |=
+ BIT(ss % BITS_PER_BYTE);
}
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
@@ -3873,25 +3857,23 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
int s;
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
- sseu->eu_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
- }
+ sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
- u8 subslice_7eu =
- RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
+ u8 subslice_7eu = info->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
@@ -3938,6 +3920,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
struct sseu_dev_info sseu;
intel_wakeref_t wakeref;
@@ -3945,14 +3928,13 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
+ i915_print_sseu_info(m, true, &info->sseu);
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
- sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
- sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
- sseu.max_eus_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
+ intel_sseu_set_info(&sseu, info->sseu.max_slices,
+ info->sseu.max_subslices,
+ info->sseu.max_eus_per_subslice);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
@@ -3973,13 +3955,12 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- file->private_data =
- (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_user_get(&i915->uncore);
+ atomic_inc(&gt->user_wakeref);
+ intel_gt_pm_get(gt);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_get(gt->uncore);
return 0;
}
@@ -3987,13 +3968,12 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- intel_uncore_forcewake_user_put(&i915->uncore);
- intel_runtime_pm_put(&i915->runtime_pm,
- (intel_wakeref_t)(uintptr_t)file->private_data);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_put(&i915->uncore);
+ intel_gt_pm_put(gt);
+ atomic_dec(&gt->user_wakeref);
return 0;
}
@@ -4302,7 +4282,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
@@ -4339,6 +4318,7 @@ static const struct i915_debugfs_files {
const char *name;
const struct file_operations *fops;
} i915_debugfs_files[] = {
+ {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -4528,7 +4508,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc_params.compression_enable));
+ yesno(crtc_state->dsc.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
seq_printf(m, "Force_DSC_Enable: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bb6f86c7067a..3c512c571e60 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -36,7 +36,6 @@
#include <linux/pm_runtime.h>
#include <linux/pnp.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
#include <acpi/video.h>
@@ -54,16 +53,17 @@
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
-#include "display/intel_gmbus.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
+#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -72,10 +72,12 @@
#include "i915_perf.h"
#include "i915_query.h"
#include "i915_suspend.h"
+#include "i915_switcheroo.h"
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_csr.h"
+#include "intel_memory_region.h"
#include "intel_pm.h"
static struct drm_driver driver;
@@ -269,182 +271,97 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-/* true = enable decode, false = disable decoder */
-static unsigned int i915_vga_set_decode(void *cookie, bool state)
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = cookie;
-
- intel_modeset_vga_set_state(dev_priv, state);
- if (state)
- return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- else
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static int i915_resume_switcheroo(struct drm_i915_private *i915);
-static int i915_suspend_switcheroo(struct drm_i915_private *i915,
- pm_message_t state);
-
-static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
-{
- struct drm_i915_private *i915 = pdev_to_i915(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-
- if (!i915) {
- dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
- return;
- }
-
- if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
- i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
- /* i915 resume handler doesn't set to D0 */
- pci_set_power_state(pdev, PCI_D0);
- i915_resume_switcheroo(i915);
- i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
- } else {
- pr_info("switched off\n");
- i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(i915, pmm);
- i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
- }
-}
-
-static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
-{
- struct drm_i915_private *i915 = pdev_to_i915(pdev);
-
- /*
- * FIXME: open_count is protected by drm_global_mutex but that would lead to
- * locking inversion with the driver load path. And the access here is
- * completely racy anyway. So don't bother with locking for now.
- */
- return i915 && i915->drm.open_count == 0;
-}
-
-static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
- .set_gpu_state = i915_switcheroo_set_state,
- .reprobe = NULL,
- .can_switch = i915_switcheroo_can_switch,
-};
-
-static int i915_driver_modeset_probe(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_probe_failure(dev_priv))
+ if (i915_inject_probe_failure(i915))
return -ENODEV;
- if (HAS_DISPLAY(dev_priv)) {
- ret = drm_vblank_init(&dev_priv->drm,
- INTEL_INFO(dev_priv)->num_pipes);
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ ret = drm_vblank_init(&i915->drm,
+ INTEL_NUM_PIPES(i915));
if (ret)
goto out;
}
- intel_bios_init(dev_priv);
+ intel_bios_init(i915);
- /* If we have > 1 VGA cards, then we need to arbitrate access
- * to the common VGA resources.
- *
- * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
- * then we do not take part in VGA arbitration and the
- * vga_client_register() fails with -ENODEV.
- */
- ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
+ ret = intel_vga_register(i915);
+ if (ret)
goto out;
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+ ret = i915_switcheroo_register(i915);
if (ret)
goto cleanup_vga_client;
- /* must happen before intel_power_domains_init_hw() on VLV/CHV */
- intel_update_rawclk(dev_priv);
+ intel_power_domains_init_hw(i915, false);
- intel_power_domains_init_hw(dev_priv, false);
+ intel_csr_ucode_init(i915);
- intel_csr_ucode_init(dev_priv);
-
- ret = intel_irq_install(dev_priv);
+ ret = intel_irq_install(i915);
if (ret)
goto cleanup_csr;
- intel_gmbus_setup(dev_priv);
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- ret = intel_modeset_init(dev);
+ ret = intel_modeset_init(i915);
if (ret)
goto cleanup_irq;
- ret = i915_gem_init(dev_priv);
+ ret = i915_gem_init(i915);
if (ret)
goto cleanup_modeset;
- intel_overlay_setup(dev_priv);
+ intel_overlay_setup(i915);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
return 0;
- ret = intel_fbdev_init(dev);
+ ret = intel_fbdev_init(&i915->drm);
if (ret)
goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev_priv);
+ intel_hpd_init(i915);
- intel_init_ipc(dev_priv);
+ intel_init_ipc(i915);
return 0;
cleanup_gem:
- i915_gem_suspend(dev_priv);
- i915_gem_driver_remove(dev_priv);
- i915_gem_driver_release(dev_priv);
+ i915_gem_suspend(i915);
+ i915_gem_driver_remove(i915);
+ i915_gem_driver_release(i915);
cleanup_modeset:
- intel_modeset_driver_remove(dev);
+ intel_modeset_driver_remove(i915);
cleanup_irq:
- intel_irq_uninstall(dev_priv);
- intel_gmbus_teardown(dev_priv);
+ intel_irq_uninstall(i915);
cleanup_csr:
- intel_csr_ucode_fini(dev_priv);
- intel_power_domains_driver_remove(dev_priv);
- vga_switcheroo_unregister_client(pdev);
+ intel_csr_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
+ i915_switcheroo_unregister(i915);
cleanup_vga_client:
- vga_client_register(pdev, NULL, NULL, NULL);
+ intel_vga_unregister(i915);
out:
return ret;
}
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
- struct apertures_struct *ap;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool primary;
- int ret;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
+ intel_modeset_driver_remove(i915);
- ap->ranges[0].base = ggtt->gmadr.start;
- ap->ranges[0].size = ggtt->mappable_end;
+ intel_irq_uninstall(i915);
- primary =
- pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ intel_bios_driver_remove(i915);
- ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ i915_switcheroo_unregister(i915);
- kfree(ap);
+ intel_vga_unregister(i915);
- return ret;
+ intel_csr_ucode_fini(i915);
}
static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -601,9 +518,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_gt_init_early(&dev_priv->gt, dev_priv);
- ret = i915_gem_init_early(dev_priv);
- if (ret < 0)
- goto err_gt;
+ i915_gem_init_early(dev_priv);
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
@@ -625,7 +540,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
-err_gt:
intel_gt_driver_late_release(&dev_priv->gt);
vlv_free_s0ix_state(dev_priv);
err_workqueues:
@@ -683,12 +597,10 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
intel_uc_init_mmio(&dev_priv->gt.uc);
- ret = intel_engines_init_mmio(dev_priv);
+ ret = intel_engines_init_mmio(&dev_priv->gt);
if (ret)
goto err_uncore;
- i915_gem_init_mmio(dev_priv);
-
return 0;
err_uncore:
@@ -706,7 +618,7 @@ err_bridge:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_engines_cleanup(dev_priv);
+ intel_engines_cleanup(&dev_priv->gt);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
@@ -1160,8 +1072,8 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- const unsigned int sets[4] = { 1, 1, 2, 2 };
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
@@ -1249,32 +1161,24 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_perf;
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+ if (ret)
goto err_ggtt;
- }
- ret = vga_remove_vgacon(pdev);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
+ ret = i915_ggtt_init_hw(dev_priv);
+ if (ret)
goto err_ggtt;
- }
- ret = i915_ggtt_init_hw(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
goto err_ggtt;
- intel_gt_init_hw(dev_priv);
+ intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
pci_set_master(pdev);
@@ -1291,7 +1195,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
@@ -1309,16 +1213,13 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_sanitize_gt_powersave(dev_priv);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1364,6 +1265,8 @@ err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
+err_mem_regions:
+ intel_memory_regions_driver_release(dev_priv);
err_ggtt:
i915_ggtt_driver_release(dev_priv);
err_perf:
@@ -1418,14 +1321,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (HAS_DISPLAY(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
}
- if (IS_GEN(dev_priv, 5))
- intel_gpu_ips_init(dev_priv);
+ intel_gt_driver_register(&dev_priv->gt);
intel_audio_init(dev_priv);
@@ -1442,7 +1344,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (HAS_DISPLAY(dev_priv))
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -1468,7 +1370,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
- intel_gpu_ips_teardown();
+ intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
@@ -1577,6 +1479,23 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ /*
+ * Check if we support fake LMEM -- for now we only unleash this for
+ * the live selftests(test-and-exit).
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
+ if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+ i915_modparams.fake_lmem_start) {
+ mkwrite_device_info(dev_priv)->memory_regions =
+ REGION_SMEM | REGION_LMEM | REGION_STOLEN;
+ mkwrite_device_info(dev_priv)->is_dgfx = true;
+ GEM_BUG_ON(!HAS_LMEM(dev_priv));
+ GEM_BUG_ON(!IS_DGFX(dev_priv));
+ }
+ }
+#endif
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
@@ -1597,7 +1516,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe(&dev_priv->drm);
+ ret = i915_driver_modeset_probe(dev_priv);
if (ret < 0)
goto out_cleanup_hw;
@@ -1611,10 +1530,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_cleanup_hw:
i915_driver_hw_remove(dev_priv);
+ intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
-
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_sanitize_gt_powersave(dev_priv);
out_cleanup_mmio:
i915_driver_mmio_release(dev_priv);
out_runtime_pm_put:
@@ -1630,8 +1547,6 @@ out_fini:
void i915_driver_remove(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
-
disable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_unregister(i915);
@@ -1652,19 +1567,9 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- intel_modeset_driver_remove(&i915->drm);
-
- intel_bios_driver_remove(i915);
-
- vga_switcheroo_unregister_client(pdev);
- vga_client_register(pdev, NULL, NULL, NULL);
-
- intel_csr_ucode_fini(i915);
+ i915_driver_modeset_remove(i915);
- /* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_reset_error_state(i915);
-
i915_gem_driver_remove(i915);
intel_power_domains_driver_remove(i915);
@@ -1683,11 +1588,9 @@ static void i915_driver_release(struct drm_device *dev)
i915_gem_driver_release(dev_priv);
+ intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_sanitize_gt_powersave(dev_priv);
-
i915_driver_mmio_release(dev_priv);
enable_rpm_wakeref_asserts(rpm);
@@ -1731,12 +1634,10 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
i915_gem_context_close(file);
i915_gem_release(dev, file);
- mutex_unlock(&dev->struct_mutex);
- kfree(file_priv);
+ kfree_rcu(file_priv, rcu);
/* Catch up with all the deferred frees from "this" client */
i915_gem_flush_free_objects(to_i915(dev));
@@ -1891,8 +1792,7 @@ out:
return ret;
}
-static int
-i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
@@ -1916,18 +1816,17 @@ static int i915_drm_resume(struct drm_device *dev)
int ret;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- intel_sanitize_gt_powersave(dev_priv);
- i915_gem_sanitize(dev_priv);
+ intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6);
+
+ intel_gt_sanitize(&dev_priv->gt, true);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1952,7 +1851,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -2049,18 +1948,14 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_display_power_resume_early(dev_priv);
- intel_sanitize_gt_powersave(dev_priv);
-
intel_power_domains_resume(dev_priv);
- intel_gt_sanitize(&dev_priv->gt, true);
-
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
-static int i915_resume_switcheroo(struct drm_i915_private *i915)
+int i915_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
@@ -2593,9 +2488,6 @@ static int intel_runtime_suspend(struct device *kdev)
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
- return -ENODEV;
-
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -2628,7 +2520,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
enable_rpm_wakeref_asserts(rpm);
@@ -2708,7 +2600,7 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 953e1d12c23c..e29bc137e7ba 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
#include "display/intel_display.h"
#include "display/intel_display_power.h"
#include "display/intel_dpll_mgr.h"
+#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
@@ -84,6 +85,7 @@
#include "intel_device_info.h"
#include "intel_pch.h"
#include "intel_runtime_pm.h"
+#include "intel_memory_region.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
@@ -92,12 +94,15 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
+#include "i915_perf_types.h"
#include "i915_request.h"
#include "i915_scheduler.h"
#include "gt/intel_timeline.h"
#include "i915_vma.h"
#include "i915_irq.h"
+#include "intel_region_lmem.h"
+
#include "intel_gvt.h"
/* General customization:
@@ -105,8 +110,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190822"
-#define DRIVER_TIMESTAMP 1566477988
+#define DRIVER_DATE "20191101"
+#define DRIVER_TIMESTAMP 1572604873
struct drm_i915_gem_object;
@@ -185,7 +190,11 @@ struct i915_mmu_object;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
- struct drm_file *file;
+
+ union {
+ struct drm_file *file;
+ struct rcu_head rcu;
+ };
struct {
spinlock_t lock;
@@ -272,6 +281,7 @@ struct drm_i915_display_funcs {
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+ u8 (*calc_voltage_level)(int cdclk);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -284,7 +294,8 @@ struct drm_i915_display_funcs {
struct intel_atomic_state *old_state);
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct intel_atomic_state *old_state);
- void (*update_crtcs)(struct intel_atomic_state *state);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+ void (*commit_modeset_disables)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -331,6 +342,7 @@ struct intel_csr {
i915_reg_t mmioaddr[20];
u32 mmiodata[20];
u32 dc_state;
+ u32 target_dc_state;
u32 allowed_dc_mask;
intel_wakeref_t wakeref;
};
@@ -479,6 +491,7 @@ struct i915_psr {
bool enabled;
struct intel_dp *dp;
enum pipe pipe;
+ enum transcoder transcoder;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
@@ -492,6 +505,9 @@ struct i915_psr {
bool sink_not_reliable;
bool irq_aux_error;
u16 su_x_granularity;
+ bool dc3co_enabled;
+ u32 dc3co_exit_delay;
+ struct delayed_work idle_work;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -529,106 +545,6 @@ struct i915_suspend_saved_registers {
struct vlv_s0ix_state;
-struct intel_rps_ei {
- ktime_t ktime;
- u32 render_c0;
- u32 media_c0;
-};
-
-struct intel_rps {
- struct mutex lock; /* protects enabling and the worker */
-
- /*
- * work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
- */
- struct work_struct work;
- bool interrupts_enabled;
- u32 pm_iir;
-
- /* PM interrupt bits that should never be masked */
- u32 pm_intrmsk_mbz;
-
- /* Frequencies are stored in potentially platform dependent multiples.
- * In other words, *_freq needs to be multiplied by X to be interesting.
- * Soft limits are those which are used for the dynamic reclocking done
- * by the driver (raise frequencies under heavy loads, and lower for
- * lighter loads). Hard limits are those imposed by the hardware.
- *
- * A distinction is made for overclocking, which is never enabled by
- * default, and is considered to be above the hard limit if it's
- * possible at all.
- */
- u8 cur_freq; /* Current frequency (cached, may not == HW) */
- u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
- u8 max_freq_softlimit; /* Max frequency permitted by the driver */
- u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
- u8 min_freq; /* AKA RPn. Minimum frequency */
- u8 boost_freq; /* Frequency to request when wait boosting */
- u8 idle_freq; /* Frequency to request when we are idle */
- u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
- u8 rp1_freq; /* "less than" RP0 power/freqency */
- u8 rp0_freq; /* Non-overclocked max frequency. */
- u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
-
- int last_adj;
-
- struct {
- struct mutex mutex;
-
- enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
- unsigned int interactive;
-
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
- } power;
-
- bool enabled;
- atomic_t num_waiters;
- atomic_t boosts;
-
- /* manual wa residency calculations */
- struct intel_rps_ei ei;
-};
-
-struct intel_rc6 {
- bool enabled;
- u64 prev_hw_residency[4];
- u64 cur_residency[4];
-};
-
-struct intel_llc_pstate {
- bool enabled;
-};
-
-struct intel_gen6_power_mgmt {
- struct intel_rps rps;
- struct intel_rc6 rc6;
- struct intel_llc_pstate llc_pstate;
-};
-
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
-struct intel_ilk_power_mgmt {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- u64 last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -677,6 +593,8 @@ struct i915_gem_mm {
*/
struct vfsmount *gemfs;
+ struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -688,11 +606,6 @@ struct i915_gem_mm {
*/
struct workqueue_struct *userptr_wq;
- /** Bit 6 swizzling required for X tiling */
- u32 bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- u32 bit_6_swizzle_y;
-
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
u32 shrink_count;
@@ -973,305 +886,6 @@ struct intel_wm_config {
bool sprites_scaled;
};
-struct i915_oa_format {
- u32 format;
- int size;
-};
-
-struct i915_oa_reg {
- i915_reg_t addr;
- u32 value;
-};
-
-struct i915_oa_config {
- char uuid[UUID_STRING_LEN + 1];
- int id;
-
- const struct i915_oa_reg *mux_regs;
- u32 mux_regs_len;
- const struct i915_oa_reg *b_counter_regs;
- u32 b_counter_regs_len;
- const struct i915_oa_reg *flex_regs;
- u32 flex_regs_len;
-
- struct attribute_group sysfs_metric;
- struct attribute *attrs[2];
- struct device_attribute sysfs_metric_id;
-
- atomic_t ref_count;
-};
-
-struct i915_perf_stream;
-
-/**
- * struct i915_perf_stream_ops - the OPs to support a specific stream type
- */
-struct i915_perf_stream_ops {
- /**
- * @enable: Enables the collection of HW samples, either in response to
- * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
- * without `I915_PERF_FLAG_DISABLED`.
- */
- void (*enable)(struct i915_perf_stream *stream);
-
- /**
- * @disable: Disables the collection of HW samples, either in response
- * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
- * the stream.
- */
- void (*disable)(struct i915_perf_stream *stream);
-
- /**
- * @poll_wait: Call poll_wait, passing a wait queue that will be woken
- * once there is something ready to read() for the stream
- */
- void (*poll_wait)(struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait);
-
- /**
- * @wait_unlocked: For handling a blocking read, wait until there is
- * something to ready to read() for the stream. E.g. wait on the same
- * wait queue that would be passed to poll_wait().
- */
- int (*wait_unlocked)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy buffered metrics as records to userspace
- * **buf**: the userspace, destination buffer
- * **count**: the number of bytes to copy, requested by userspace
- * **offset**: zero at the start of the read, updated as the read
- * proceeds, it represents how many bytes have been copied so far and
- * the buffer offset for copying the next record.
- *
- * Copy as many buffered i915 perf samples and records for this stream
- * to userspace as will fit in the given buffer.
- *
- * Only write complete records; returning -%ENOSPC if there isn't room
- * for a complete record.
- *
- * Return any error condition that results in a short read such as
- * -%ENOSPC or -%EFAULT, even though these may be squashed before
- * returning to userspace.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @destroy: Cleanup any stream specific resources.
- *
- * The stream will always be disabled before this is called.
- */
- void (*destroy)(struct i915_perf_stream *stream);
-};
-
-/**
- * struct i915_perf_stream - state for a single open stream FD
- */
-struct i915_perf_stream {
- /**
- * @dev_priv: i915 drm device
- */
- struct drm_i915_private *dev_priv;
-
- /**
- * @link: Links the stream into ``&drm_i915_private->streams``
- */
- struct list_head link;
-
- /**
- * @wakeref: As we keep the device awake while the perf stream is
- * active, we track our runtime pm reference for later release.
- */
- intel_wakeref_t wakeref;
-
- /**
- * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
- * properties given when opening a stream, representing the contents
- * of a single sample as read() by userspace.
- */
- u32 sample_flags;
-
- /**
- * @sample_size: Considering the configured contents of a sample
- * combined with the required header size, this is the total size
- * of a single sample record.
- */
- int sample_size;
-
- /**
- * @ctx: %NULL if measuring system-wide across all contexts or a
- * specific context that is being monitored.
- */
- struct i915_gem_context *ctx;
-
- /**
- * @enabled: Whether the stream is currently enabled, considering
- * whether the stream was opened in a disabled state and based
- * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
- */
- bool enabled;
-
- /**
- * @ops: The callbacks providing the implementation of this specific
- * type of configured stream.
- */
- const struct i915_perf_stream_ops *ops;
-
- /**
- * @oa_config: The OA configuration used by the stream.
- */
- struct i915_oa_config *oa_config;
-
- /**
- * The OA context specific information.
- */
- struct intel_context *pinned_ctx;
- u32 specific_ctx_id;
- u32 specific_ctx_id_mask;
-
- struct hrtimer poll_check_timer;
- wait_queue_head_t poll_wq;
- bool pollin;
-
- bool periodic;
- int period_exponent;
-
- /**
- * State of the OA buffer.
- */
- struct {
- struct i915_vma *vma;
- u8 *vaddr;
- u32 last_ctx_id;
- int format;
- int format_size;
- int size_exponent;
-
- /**
- * Locks reads and writes to all head/tail state
- *
- * Consider: the head and tail pointer state needs to be read
- * consistently from a hrtimer callback (atomic context) and
- * read() fop (user context) with tail pointer updates happening
- * in atomic context and head updates in user context and the
- * (unlikely) possibility of read() errors needing to reset all
- * head/tail state.
- *
- * Note: Contention/performance aren't currently a significant
- * concern here considering the relatively low frequency of
- * hrtimer callbacks (5ms period) and that reads typically only
- * happen in response to a hrtimer event and likely complete
- * before the next callback.
- *
- * Note: This lock is not held *while* reading and copying data
- * to userspace so the value of head observed in htrimer
- * callbacks won't represent any partial consumption of data.
- */
- spinlock_t ptr_lock;
-
- /**
- * One 'aging' tail pointer and one 'aged' tail pointer ready to
- * used for reading.
- *
- * Initial values of 0xffffffff are invalid and imply that an
- * update is required (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * Index for the aged tail ready to read() data up to.
- */
- unsigned int aged_tail_idx;
-
- /**
- * A monotonic timestamp for when the current aging tail pointer
- * was read; used to determine when it is old enough to trust.
- */
- u64 aging_timestamp;
-
- /**
- * Although we can always read back the head pointer register,
- * we prefer to avoid trusting the HW state, just to avoid any
- * risk that some hardware condition could * somehow bump the
- * head pointer unpredictably and cause us to forward the wrong
- * OA buffer data to userspace.
- */
- u32 head;
- } oa_buffer;
-};
-
-/**
- * struct i915_oa_ops - Gen specific implementation of an OA unit stream
- */
-struct i915_oa_ops {
- /**
- * @is_valid_b_counter_reg: Validates register's address for
- * programming boolean counters for a particular platform.
- */
- bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
- u32 addr);
-
- /**
- * @is_valid_mux_reg: Validates register's address for programming mux
- * for a particular platform.
- */
- bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @is_valid_flex_reg: Validates register's address for programming
- * flex EU filtering for a particular platform.
- */
- bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @enable_metric_set: Selects and applies any MUX configuration to set
- * up the Boolean and Custom (B/C) counters that are part of the
- * counter reports being sampled. May apply system constraints such as
- * disabling EU clock gating as required.
- */
- int (*enable_metric_set)(struct i915_perf_stream *stream);
-
- /**
- * @disable_metric_set: Remove system constraints associated with using
- * the OA unit.
- */
- void (*disable_metric_set)(struct i915_perf_stream *stream);
-
- /**
- * @oa_enable: Enable periodic sampling
- */
- void (*oa_enable)(struct i915_perf_stream *stream);
-
- /**
- * @oa_disable: Disable periodic sampling
- */
- void (*oa_disable)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy data from the circular OA buffer into a given userspace
- * buffer.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @oa_hw_tail_read: read the OA tail pointer register
- *
- * In particular this enables us to share all the fiddly code for
- * handling the OA unit tail pointer race that affects multiple
- * generations.
- */
- u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
-};
-
struct intel_cdclk_state {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
@@ -1331,11 +945,11 @@ struct drm_i915_private {
*/
u32 gpio_mmio_base;
+ u32 hsw_psr_mmio_adjust;
+
/* MMIO base address for MIPI regs */
u32 mipi_mmio_base;
- u32 psr_mmio_base;
-
u32 pps_mmio_base;
wait_queue_head_t gmbus_wait_queue;
@@ -1367,7 +981,6 @@ struct drm_i915_private {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
- u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1397,13 +1010,14 @@ struct drm_i915_private {
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
+ /*
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
struct {
/*
* The current logical cdclk state.
* See intel_atomic_state.cdclk.logical
- *
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
*/
struct intel_cdclk_state logical;
/*
@@ -1414,6 +1028,9 @@ struct drm_i915_private {
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
int force_min_cdclk;
} cdclk;
@@ -1428,6 +1045,8 @@ struct drm_i915_private {
/* ordered wq for modesets */
struct workqueue_struct *modeset_wq;
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip_wq;
/* Display functions */
struct drm_i915_display_funcs display;
@@ -1468,7 +1087,11 @@ struct drm_i915_private {
*/
struct mutex dpll_lock;
- unsigned int active_crtcs;
+ /*
+ * For reading active_pipes, min_cdclk, min_voltage_level holding
+ * any crtc lock is sufficient, for writing must hold all of them.
+ */
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -1497,13 +1120,6 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- /* gen6+ GT PM state */
- struct intel_gen6_power_mgmt gt_pm;
-
- /* ilk-only ips/rps state. Everything in here is protected by the global
- * mchdev_lock in intel_pm.c */
- struct intel_ilk_power_mgmt ips;
-
struct i915_power_domains power_domains;
struct i915_psr psr;
@@ -1528,25 +1144,7 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
int audio_power_refcount;
-
- struct {
- struct mutex mutex;
- struct list_head list;
- struct llist_head free_list;
- struct work_struct free_work;
-
- /* The hw wants to have a stable context identifier for the
- * lifetime of the context (for OA, PASID, faults, etc).
- * This is limited in execlists to 21 bits.
- */
- struct ida hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
- struct list_head hw_id_list;
- } contexts;
+ u32 audio_freq_cntrl;
u32 fdi_rx_config;
@@ -1572,6 +1170,8 @@ struct drm_i915_private {
I915_SAGV_NOT_CONTROLLED
} sagv_status;
+ u32 sagv_block_time_us;
+
struct {
/*
* Raw watermark latency values:
@@ -1642,61 +1242,7 @@ struct drm_i915_private {
struct intel_runtime_pm runtime_pm;
- struct {
- bool initialized;
-
- struct kobject *metrics_kobj;
- struct ctl_table_header *sysctl_header;
-
- /*
- * Lock associated with adding/modifying/removing OA configs
- * in dev_priv->perf.metrics_idr.
- */
- struct mutex metrics_lock;
-
- /*
- * List of dynamic configurations, you need to hold
- * dev_priv->perf.metrics_lock to access it.
- */
- struct idr metrics_idr;
-
- /*
- * Lock associated with anything below within this structure
- * except exclusive_stream.
- */
- struct mutex lock;
- struct list_head streams;
-
- /*
- * The stream currently using the OA unit. If accessed
- * outside a syscall associated to its file
- * descriptor, you need to hold
- * dev_priv->drm.struct_mutex.
- */
- struct i915_perf_stream *exclusive_stream;
-
- /**
- * For rate limiting any notifications of spurious
- * invalid OA reports
- */
- struct ratelimit_state spurious_report_rs;
-
- struct i915_oa_config test_config;
-
- u32 gen7_latched_oastatus1;
- u32 ctx_oactxctrl_offset;
- u32 ctx_flexeu0_offset;
-
- /**
- * The RPT_ID/reason field for Gen8+ includes a bit
- * to determine if the CTX ID in the report is valid
- * but the specific bit differs between Gen 8 and 9
- */
- u32 gen8_valid_ctx_bit;
-
- struct i915_oa_ops ops;
- const struct i915_oa_format *oa_formats;
- } perf;
+ struct i915_perf perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct intel_gt gt;
@@ -1704,34 +1250,19 @@ struct drm_i915_private {
struct {
struct notifier_block pm_notifier;
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * When we detect an idle GPU, we want to turn on
- * powersaving features. So once we see that there
- * are no more requests outstanding and no more
- * arrive within a small period of time, we fire
- * off the idle_work.
- */
- struct work_struct idle_work;
+ struct i915_gem_contexts {
+ spinlock_t lock; /* locks list */
+ struct list_head list;
+
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } contexts;
} gem;
u8 pch_ssc_use;
- /* For i945gm vblank irq vs. C3 workaround */
- struct {
- struct work_struct work;
- struct pm_qos_request pm_qos;
- u8 c3_disable_latency;
- u8 enabled;
- } i945gm_vblank;
+ /* For i915gm/i945gm vblank irq workaround */
+ u8 vblank_enabled;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -1794,10 +1325,10 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+ for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
(tmp__) ? \
- ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
#define rb_to_uabi_engine(rb) \
@@ -1853,6 +1384,8 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
INTEL_INFO(dev_priv)->gen == (n))
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+
/*
* Return true if revision is in range [since,until] inclusive.
*
@@ -1924,6 +1457,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
}
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
+#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2058,6 +1592,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
+#define TGL_REVID_A0 0x0
+
+#define IS_TGL_REVID(p, since, until) \
+ (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
@@ -2075,9 +1614,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define VEBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+/*
+ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
+ * All later gens can run the final buffer from the ppgtt
+ */
+#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
+
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
+#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
@@ -2110,10 +1656,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
+#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
+ (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
+
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_CANNONLAKE(dev_priv) || \
- IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+ (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
@@ -2155,6 +1703,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
+
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
/* Having GuC is not the same as using GuC */
@@ -2178,7 +1729,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
+
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+
+/* Only valid when HAS_DISPLAY() is true */
+#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
static inline bool intel_vtd_active(void)
{
@@ -2211,7 +1767,9 @@ extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
void i915_driver_remove(struct drm_i915_private *i915);
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+int i915_resume_switcheroo(struct drm_i915_private *i915);
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
@@ -2230,12 +1788,13 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
/* i915_gem.c */
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_init_early(struct drm_i915_private *dev_priv);
+void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
+
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
/*
@@ -2284,6 +1843,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
+struct i915_vma * __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags);
+
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
static inline int __must_check
@@ -2312,15 +1879,11 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
-void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
@@ -2360,7 +1923,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
@@ -2376,9 +1939,9 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
i915_gem_object_is_tiled(obj);
}
@@ -2393,12 +1956,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+int intel_engine_cmd_parser(struct i915_gem_context *cxt,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 user_batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master);
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start);
/* intel_device_info.c */
static inline struct intel_device_info *
@@ -2480,4 +2045,10 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
+static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
+{
+ return intel_guc_is_submission_supported(guc) &&
+ intel_guc_is_running(guc);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d0f94f239919..b9eb6b3149b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -45,13 +45,14 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
-#include "gem/i915_gemfs.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
#include "gt/intel_renderstate.h"
+#include "gt/intel_rps.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
@@ -62,20 +63,31 @@
#include "intel_pm.h"
static int
-insert_mappable_node(struct i915_ggtt *ggtt,
- struct drm_mm_node *node, u32 size)
+insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
{
+ int err;
+
+ err = mutex_lock_interruptible(&ggtt->vm.mutex);
+ if (err)
+ return err;
+
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
- size, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
+ size, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+
+ mutex_unlock(&ggtt->vm.mutex);
+
+ return err;
}
static void
-remove_mappable_node(struct drm_mm_node *node)
+remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
{
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(node);
+ mutex_unlock(&ggtt->vm.mutex);
}
int
@@ -87,7 +99,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- mutex_lock(&ggtt->vm.mutex);
+ if (mutex_lock_interruptible(&ggtt->vm.mutex))
+ return -EINTR;
pinned = ggtt->vm.reserved;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
@@ -109,20 +122,24 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
LIST_HEAD(still_in_list);
int ret = 0;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
+ struct i915_address_space *vm = vma->vm;
+
+ ret = -EBUSY;
+ if (!i915_vm_tryopen(vm))
+ break;
+
list_move_tail(&vma->obj_link, &still_in_list);
spin_unlock(&obj->vma.lock);
- ret = -EBUSY;
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
!i915_vma_is_active(vma))
ret = i915_vma_unbind(vma);
+ i915_vm_close(vm);
spin_lock(&obj->vma.lock);
}
list_splice(&still_in_list, &obj->vma.list);
@@ -338,10 +355,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
u64 remain, offset;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = ERR_PTR(-ENODEV);
if (!i915_gem_object_is_tiled(obj))
@@ -351,16 +364,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out_unlock;
- GEM_BUG_ON(!node.allocated);
+ goto out_rpm;
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -393,7 +404,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -414,17 +425,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
-out_unlock:
+out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return ret;
}
@@ -531,10 +539,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
void __user *user_data;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
@@ -544,10 +548,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* using the cache bypass of indirect GGTT access.
*/
wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (!wakeref) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ if (!wakeref)
+ return -EFAULT;
} else {
/* No backing pages, no fallback, we must force GGTT access */
wakeref = intel_runtime_pm_get(rpm);
@@ -561,16 +563,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
- GEM_BUG_ON(!node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
@@ -634,18 +634,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
out_rpm:
intel_runtime_pm_put(rpm, wakeref);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -887,74 +884,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static long
-wait_for_timelines(struct drm_i915_private *i915,
- unsigned int wait, long timeout)
-{
- struct intel_gt_timelines *timelines = &i915->gt.timelines;
- struct intel_timeline *tl;
- unsigned long flags;
-
- spin_lock_irqsave(&timelines->lock, flags);
- list_for_each_entry(tl, &timelines->active_list, link) {
- struct i915_request *rq;
-
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
- continue;
-
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- /*
- * "Race-to-idle".
- *
- * Switching to the kernel context is often used a synchronous
- * step prior to idling, e.g. in suspend for flushing all
- * current operations to memory before sleeping. These we
- * want to complete as quickly as possible to avoid prolonged
- * stalls, so allow the gpu to boost to maximum clocks.
- */
- if (wait & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq);
-
- timeout = i915_request_wait(rq, wait, timeout);
- i915_request_put(rq);
- if (timeout < 0)
- return timeout;
-
- /* restart after reacquiring the lock */
- spin_lock_irqsave(&timelines->lock, flags);
- tl = list_entry(&timelines->active_list, typeof(*tl), link);
- }
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- return timeout;
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
-{
- /* If the device is asleep, we have no requests outstanding */
- if (!intel_gt_pm_is_awake(&i915->gt))
- return 0;
-
- GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
- flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
-
- timeout = wait_for_timelines(i915, flags, timeout);
- if (timeout < 0)
- return timeout;
-
- if (flags & I915_WAIT_LOCKED) {
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- i915_retire_requests(i915);
- }
-
- return 0;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -964,11 +893,23 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.vm;
+
+ return i915_gem_object_pin(obj, vm, view, size, alignment,
+ flags | PIN_GLOBAL);
+}
+
+struct i915_vma *
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
if (i915_gem_object_never_bind_ggtt(obj))
return ERR_PTR(-ENODEV);
@@ -1018,13 +959,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
}
- WARN(i915_vma_is_pinned(vma),
- "bo is already pinned in ggtt with incorrect alignment:"
- " offset=%08x, req.alignment=%llx,"
- " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
- i915_ggtt_offset(vma), alignment,
- !!(flags & PIN_MAPPABLE),
- i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
@@ -1038,7 +972,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ ret = i915_vma_pin(vma, size, alignment, flags);
if (ret)
return ERR_PTR(ret);
@@ -1119,128 +1053,7 @@ out:
return err;
}
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- /*
- * As we have just resumed the machine and woken the device up from
- * deep PCI sleep (presumably D3_cold), assume the HW has been reset
- * back to defaults, recovering from whatever wedged state we left it
- * in and so worth trying to use the device once more.
- */
- if (intel_gt_is_wedged(&i915->gt))
- intel_gt_unset_wedged(&i915->gt);
-
- /*
- * If we inherit context state from the BIOS or earlier occupants
- * of the GPU, the GPU may be in an inconsistent state when we
- * try to take over. The only way to remove the earlier state
- * is by resetting. However, resetting on earlier gen is tricky as
- * it may impact the display and we are uncertain about the stability
- * of the reset, so this could be applied to even earlier gen.
- */
- intel_gt_sanitize(&i915->gt, false);
-
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-}
-
-static void init_unused_ring(struct intel_gt *gt, u32 base)
-{
- struct intel_uncore *uncore = gt->uncore;
-
- intel_uncore_write(uncore, RING_CTL(base), 0);
- intel_uncore_write(uncore, RING_HEAD(base), 0);
- intel_uncore_write(uncore, RING_TAIL(base), 0);
- intel_uncore_write(uncore, RING_START(base), 0);
-}
-
-static void init_unused_rings(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- if (IS_I830(i915)) {
- init_unused_ring(gt, PRB1_BASE);
- init_unused_ring(gt, SRB0_BASE);
- init_unused_ring(gt, SRB1_BASE);
- init_unused_ring(gt, SRB2_BASE);
- init_unused_ring(gt, SRB3_BASE);
- } else if (IS_GEN(i915, 2)) {
- init_unused_ring(gt, SRB0_BASE);
- init_unused_ring(gt, SRB1_BASE);
- } else if (IS_GEN(i915, 3)) {
- init_unused_ring(gt, PRB1_BASE);
- init_unused_ring(gt, PRB2_BASE);
- }
-}
-
-int i915_gem_init_hw(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_gt *gt = &i915->gt;
- int ret;
-
- BUG_ON(!i915->kernel_context);
- ret = intel_gt_terminally_wedged(gt);
- if (ret)
- return ret;
-
- gt->last_init_time = ktime_get();
-
- /* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
- intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
-
- if (IS_HASWELL(i915))
- intel_uncore_write(uncore,
- MI_PREDICATE_RESULT_2,
- IS_HSW_GT3(i915) ?
- LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
-
- /* Apply the GT workarounds... */
- intel_gt_apply_workarounds(gt);
- /* ...and determine whether they are sticking. */
- intel_gt_verify_workarounds(gt, "init");
-
- intel_gt_init_swizzling(gt);
-
- /*
- * At least 830 can leave some of the unused rings
- * "active" (ie. head != tail) after resume which
- * will prevent c3 entry. Makes sure all unused rings
- * are totally idle.
- */
- init_unused_rings(gt);
-
- ret = i915_ppgtt_init_hw(gt);
- if (ret) {
- DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
- goto out;
- }
-
- /* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(&gt->uc);
- if (ret) {
- i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
- goto out;
- }
-
- intel_mocs_init(gt);
-
-out:
- intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
- return ret;
-}
-
-static int __intel_engines_record_defaults(struct drm_i915_private *i915)
+static int __intel_engines_record_defaults(struct intel_gt *gt)
{
struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine;
@@ -1256,7 +1069,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
* from the same default HW values.
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
@@ -1264,7 +1077,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
GEM_BUG_ON(!engine->kernel_context);
engine->serial++; /* force the kernel context switch */
- ce = intel_context_create(i915->kernel_context, engine);
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
@@ -1281,15 +1095,6 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_rq;
- /*
- * Failing to program the MOCS is non-fatal.The system will not
- * run at peak performance. So warn the user and carry on.
- */
- err = intel_mocs_emit(rq);
- if (err)
- dev_notice(i915->drm.dev,
- "Failed to program MOCS registers; expect performance issues.\n");
-
err = intel_renderstate_emit(rq);
if (err)
goto err_rq;
@@ -1302,7 +1107,7 @@ err_rq:
}
/* Flush the default context image to memory, and enable powersaving. */
- if (!i915_gem_load_power_context(i915)) {
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
err = -EIO;
goto out;
}
@@ -1361,7 +1166,7 @@ out:
* this is by declaring ourselves wedged.
*/
if (err)
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct intel_context *ce;
@@ -1378,18 +1183,7 @@ out:
return err;
}
-static int
-i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
-{
- return intel_gt_init_scratch(&i915->gt, size);
-}
-
-static void i915_gem_fini_scratch(struct drm_i915_private *i915)
-{
- intel_gt_fini_scratch(&i915->gt);
-}
-
-static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
+static int intel_engines_verify_workarounds(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1398,7 +1192,7 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (intel_engine_verify_workarounds(engine, "load"))
err = -EIO;
}
@@ -1430,7 +1224,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = i915_init_ggtt(dev_priv);
@@ -1439,36 +1232,29 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_init_scratch(dev_priv,
- IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_ggtt;
- }
+ intel_gt_init(&dev_priv->gt);
- ret = intel_engines_setup(dev_priv);
+ ret = intel_engines_setup(&dev_priv->gt);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_contexts(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_scratch;
}
- ret = intel_engines_init(dev_priv);
+ ret = intel_engines_init(&dev_priv->gt);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_context;
}
- intel_init_gt_powersave(dev_priv);
-
intel_uc_init(&dev_priv->gt.uc);
- ret = i915_gem_init_hw(dev_priv);
+ ret = intel_gt_init_hw(&dev_priv->gt);
if (ret)
goto err_uc_init;
@@ -1488,24 +1274,23 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_engines_verify_workarounds(dev_priv);
+ ret = intel_engines_verify_workarounds(&dev_priv->gt);
if (ret)
goto err_gt;
- ret = __intel_engines_record_defaults(dev_priv);
+ ret = __intel_engines_record_defaults(&dev_priv->gt);
if (ret)
goto err_gt;
- ret = i915_inject_load_error(dev_priv, -ENODEV);
+ ret = i915_inject_probe_error(dev_priv, -ENODEV);
if (ret)
goto err_gt;
- ret = i915_inject_load_error(dev_priv, -EIO);
+ ret = i915_inject_probe_error(dev_priv, -EIO);
if (ret)
goto err_gt;
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
@@ -1516,32 +1301,25 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* driver doesn't explode during runtime.
*/
err_gt:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged_on_init(&dev_priv->gt);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
intel_uc_fini_hw(&dev_priv->gt.uc);
err_uc_init:
if (ret != -EIO) {
intel_uc_fini(&dev_priv->gt.uc);
- intel_cleanup_gt_powersave(dev_priv);
- intel_engines_cleanup(dev_priv);
+ intel_engines_cleanup(&dev_priv->gt);
}
err_context:
if (ret != -EIO)
- i915_gem_contexts_fini(dev_priv);
+ i915_gem_driver_release__contexts(dev_priv);
err_scratch:
- i915_gem_fini_scratch(dev_priv);
-err_ggtt:
+ intel_gt_driver_release(&dev_priv->gt);
err_unlock:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret != -EIO) {
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
@@ -1550,8 +1328,6 @@ err_unlock:
}
if (ret == -EIO) {
- mutex_lock(&dev_priv->drm.struct_mutex);
-
/*
* Allow engines or uC initialisation to fail by marking the GPU
* as wedged. But we only want to do this when the GPU is angry,
@@ -1566,10 +1342,8 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
i915_gem_drain_freed_objects(dev_priv);
@@ -1590,48 +1364,35 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- GEM_BUG_ON(dev_priv->gt.awake);
-
intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
i915_gem_suspend_late(dev_priv);
- intel_disable_gt_powersave(dev_priv);
+ intel_gt_driver_remove(&dev_priv->gt);
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(&dev_priv->gt.uc);
intel_uc_fini(&dev_priv->gt.uc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
}
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_engines_cleanup(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- i915_gem_fini_scratch(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_engines_cleanup(&dev_priv->gt);
+ i915_gem_driver_release__contexts(dev_priv);
+ intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
- intel_cleanup_gt_powersave(dev_priv);
-
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
intel_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
-void i915_gem_init_mmio(struct drm_i915_private *i915)
-{
- i915_gem_sanitize(i915);
+ WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1646,20 +1407,11 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
i915_gem_init__objects(i915);
}
-int i915_gem_init_early(struct drm_i915_private *dev_priv)
+void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err;
-
i915_gem_init__mm(dev_priv);
- i915_gem_init__pm(dev_priv);
spin_lock_init(&dev_priv->fb_tracking.lock);
-
- err = i915_gemfs_init(dev_priv);
- if (err)
- DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
-
- return 0;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -1668,8 +1420,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
-
- i915_gemfs_fini(dev_priv);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 6795f1daa3d5..f6f9675848b8 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -37,10 +37,8 @@ struct drm_i915_private;
#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
- pr_err("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
- GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
BUG(); \
} \
} while(0)
@@ -66,11 +64,16 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#define GEM_TRACE_ERR(...) do { \
+ pr_err(__VA_ARGS__); \
+ trace_printk(__VA_ARGS__); \
+} while (0)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
#define GEM_TRACE_DUMP_ON(expr) \
do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
+#define GEM_TRACE_ERR(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
@@ -83,6 +86,11 @@ static inline void tasklet_lock(struct tasklet_struct *t)
cpu_relax();
}
+static inline bool tasklet_is_locked(const struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_RUN, &t->state);
+}
+
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 52c86c6e0673..7e62c310290f 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -29,6 +29,7 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
-static int ggtt_flush(struct drm_i915_private *i915)
+static int ggtt_flush(struct intel_gt *gt)
{
/*
* Not everything in the GGTT is tracked via vma (otherwise we
@@ -46,10 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- return i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
static bool
@@ -70,7 +68,7 @@ mark_free(struct drm_mm_scan *scan,
* @vm: address space to evict from
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
- * @cache_level: cache_level for the desired space
+ * @color: color for the desired space
* @start: start (inclusive) of the range from which to evict objects
* @end: end (exclusive) of the range from which to evict objects
* @flags: additional flags to control the eviction algorithm
@@ -91,11 +89,10 @@ mark_free(struct drm_mm_scan *scan,
int
i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = vm->i915;
struct drm_mm_scan scan;
struct list_head eviction_list;
struct i915_vma *vma, *next;
@@ -104,7 +101,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *active;
int ret;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -124,17 +121,10 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
drm_mm_scan_init_with_range(&scan, &vm->mm,
- min_size, alignment, cache_level,
+ min_size, alignment, color,
start, end, mode);
- /*
- * Retire before we search the active list. Although we have
- * reasonable accuracy in our retirement lists, we may have
- * a stray pin (preventing eviction) that can only be resolved by
- * retiring.
- */
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(dev_priv);
+ intel_gt_retire_requests(vm->gt);
search_again:
active = NULL;
@@ -207,7 +197,7 @@ search_again:
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY;
- ret = ggtt_flush(dev_priv);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
@@ -235,12 +225,12 @@ found:
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -266,25 +256,23 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
u64 start = target->start;
u64 end = start + target->size;
struct i915_vma *vma, *next;
- bool check_color;
int ret = 0;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
trace_i915_gem_evict_node(vm, target, flags);
- /* Retire before we search the active list. Although we have
+ /*
+ * Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
*/
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(vm->i915);
+ intel_gt_retire_requests(vm->gt);
- check_color = vm->mm.color_adjust;
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start)
start -= I915_GTT_PAGE_SIZE;
@@ -301,7 +289,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between
@@ -310,7 +298,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* abutt and conflict. If they are in conflict, then we evict
* those as well to make room for our guard pages.
*/
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
if (node->start + node->size == target->start) {
if (node->color == target->color)
continue;
@@ -351,7 +339,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -375,7 +363,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
struct i915_vma *vma, *next;
int ret;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict_vm(vm);
/* Switch back to the default context in order to unpin
@@ -384,13 +372,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
* switch otherwise is ineffective.
*/
if (i915_is_ggtt(vm)) {
- ret = ggtt_flush(vm->i915);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
}
INIT_LIST_HEAD(&eviction_list);
- mutex_lock(&vm->mutex);
list_for_each_entry(vma, &vm->bound_list, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
@@ -398,13 +385,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
- mutex_unlock(&vm->mutex);
ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 615a9f4ef30c..71efccfde122 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -59,6 +59,16 @@
#define pipelined 0
+static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.i915;
+}
+
+static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.gt->uncore;
+}
+
static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
@@ -66,7 +76,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
- if (INTEL_GEN(fence->i915) >= 6) {
+ if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
@@ -95,7 +105,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
/*
* To w/a incoherency with non-atomic 64-bit register updates,
@@ -132,7 +142,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
- if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
+ if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
else
stride /= 512;
@@ -148,7 +158,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -180,7 +190,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -191,15 +201,17 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = fence_to_i915(fence);
+
/*
* Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
- if (IS_GEN(fence->i915, 2))
+ if (IS_GEN(i915, 2))
i830_write_fence_reg(fence, vma);
- else if (IS_GEN(fence->i915, 3))
+ else if (IS_GEN(i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
@@ -215,6 +227,8 @@ static void fence_write(struct i915_fence_reg *fence,
static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct i915_ggtt *ggtt = fence->ggtt;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
@@ -230,14 +244,15 @@ static int fence_update(struct i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
- ret = i915_active_wait(&vma->active);
+ ret = i915_vma_sync(vma);
if (ret)
return ret;
}
old = xchg(&fence->vma, NULL);
if (old) {
- ret = i915_active_wait(&old->active);
+ /* XXX Ideally we would move the waiting to outside the mutex */
+ ret = i915_vma_sync(old);
if (ret) {
fence->vma = old;
return ret;
@@ -255,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence,
old->fence = NULL;
}
- list_move(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move(&fence->link, &ggtt->fence_list);
}
/*
@@ -268,7 +283,7 @@ static int fence_update(struct i915_fence_reg *fence,
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
- wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
@@ -279,10 +294,10 @@ static int fence_update(struct i915_fence_reg *fence,
if (vma) {
vma->fence = fence;
- list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move_tail(&fence->link, &ggtt->fence_list);
}
- intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
}
@@ -311,11 +326,11 @@ int i915_vma_revoke_fence(struct i915_vma *vma)
return fence_update(fence, NULL);
}
-static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
+static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *fence;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
+ list_for_each_entry(fence, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (atomic_read(&fence->pin_count))
@@ -325,19 +340,21 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(i915))
+ if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
}
-static int __i915_vma_pin_fence(struct i915_vma *vma)
+int __i915_vma_pin_fence(struct i915_vma *vma)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
+ lockdep_assert_held(&vma->vm->mutex);
+
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
@@ -348,7 +365,7 @@ static int __i915_vma_pin_fence(struct i915_vma *vma)
return 0;
}
} else if (set) {
- fence = fence_find(vma->vm->i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -399,7 +416,7 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -415,14 +432,13 @@ int i915_vma_pin_fence(struct i915_vma *vma)
/**
* i915_reserve_fence - Reserve a fence for vGPU
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_fence_reg *fence;
int count;
int ret;
@@ -436,7 +452,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
if (count <= 1)
return ERR_PTR(-ENOSPC);
- fence = fence_find(i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return fence;
@@ -460,7 +476,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
- struct i915_ggtt *ggtt = &fence->i915->ggtt;
+ struct i915_ggtt *ggtt = fence->ggtt;
lockdep_assert_held(&ggtt->vm.mutex);
@@ -469,19 +485,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
/**
* i915_gem_restore_fences - restore fence state
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_i915_private *i915)
+void i915_gem_restore_fences(struct i915_ggtt *ggtt)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *reg = &ggtt->fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -547,15 +563,16 @@ void i915_gem_restore_fences(struct drm_i915_private *i915)
*/
/**
- * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @i915: i915 device private
+ * detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @ggtt: Global GGTT
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
-static void detect_bit_6_swizzle(struct drm_i915_private *i915)
+static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -717,8 +734,8 @@ static void detect_bit_6_swizzle(struct drm_i915_private *i915)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- i915->mm.bit_6_swizzle_x = swizzle_x;
- i915->mm.bit_6_swizzle_y = swizzle_y;
+ i915->ggtt.bit_6_swizzle_x = swizzle_x;
+ i915->ggtt.bit_6_swizzle_y = swizzle_y;
}
/*
@@ -819,17 +836,20 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
int num_fences;
int i;
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
- intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
+ intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
- detect_bit_6_swizzle(i915);
+ detect_bit_6_swizzle(ggtt);
- if (INTEL_GEN(i915) >= 7 &&
- !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ if (!i915_ggtt_has_aperture(ggtt))
+ num_fences = 0;
+ else if (INTEL_GEN(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (INTEL_GEN(i915) >= 4 ||
IS_I945G(i915) || IS_I945GM(i915) ||
@@ -839,20 +859,20 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
num_fences = 8;
if (intel_vgpu_active(i915))
- num_fences = intel_uncore_read(&i915->uncore,
+ num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
- fence->i915 = i915;
+ fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
- i915_gem_restore_fences(i915);
+ i915_gem_restore_fences(ggtt);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
@@ -861,7 +881,7 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
if (INTEL_GEN(i915) < 5 ||
- i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 99866fb9d94f..7bd521cd7cd7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
struct drm_i915_gem_object;
-struct drm_i915_private;
struct i915_ggtt;
struct i915_vma;
struct intel_gt;
@@ -39,7 +38,7 @@ struct sg_table;
struct i915_fence_reg {
struct list_head link;
- struct drm_i915_private *i915;
+ struct i915_ggtt *ggtt;
struct i915_vma *vma;
atomic_t pin_count;
int id;
@@ -55,10 +54,10 @@ struct i915_fence_reg {
};
/* i915_gem_fence_reg.c */
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
-void i915_gem_restore_fences(struct drm_i915_private *i915);
+void i915_gem_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b1a7a8b9b46a..6239a9adbf14 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -38,6 +38,7 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -132,9 +133,15 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
gen6_ggtt_invalidate(ggtt);
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+ if (INTEL_GEN(i915) >= 12)
+ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ else
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -144,16 +151,18 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
u32 pte_flags;
int err;
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ if (flags & I915_VMA_ALLOC) {
err = vma->vm->allocate_va_range(vma->vm,
vma->node.start, vma->size);
if (err)
return err;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
/* Applicable to VLV, and gen8+ */
@@ -161,14 +170,17 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ wmb();
return 0;
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
static int ppgtt_set_pages(struct i915_vma *vma)
@@ -496,22 +508,26 @@ static void i915_address_space_fini(struct i915_address_space *vm)
mutex_destroy(&vm->mutex);
}
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
+void __i915_vm_close(struct i915_address_space *vm)
{
- struct list_head *phases[] = {
- &vm->bound_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
+ struct i915_vma *vma, *vn;
+
+ mutex_lock(&vm->mutex);
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- mutex_lock(&vm->i915->drm.struct_mutex);
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
+ /* Keep the obj (and hence the vma) alive as _we_ destroy it */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ i915_vma_destroy(vma);
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- i915_vma_destroy(vma);
+ i915_gem_object_put(obj);
}
- mutex_unlock(&vm->i915->drm.struct_mutex);
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ mutex_unlock(&vm->mutex);
}
static void __i915_vm_release(struct work_struct *work)
@@ -519,11 +535,6 @@ static void __i915_vm_release(struct work_struct *work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, rcu.work);
- ppgtt_destroy_vma(vm);
-
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- GEM_BUG_ON(!list_empty(&vm->unbound_list));
-
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -538,7 +549,6 @@ void i915_vm_release(struct kref *kref)
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
- vm->closed = true;
queue_rcu_work(vm->i915->wq, &vm->rcu);
}
@@ -546,6 +556,7 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ atomic_set(&vm->open, 1);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -562,7 +573,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
stash_init(&vm->free_pages);
- INIT_LIST_HEAD(&vm->unbound_list);
INIT_LIST_HEAD(&vm->bound_list);
}
@@ -816,17 +826,6 @@ release_pd_entry(struct i915_page_directory * const pd,
return free;
}
-/*
- * PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_engines = ALL_ENGINES;
-}
-
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *dev_priv = ppgtt->vm.i915;
@@ -1367,7 +1366,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (vm->has_read_only &&
vm->i915->kernel_context &&
vm->i915->kernel_context->vm) {
- struct i915_address_space *clone = vm->i915->kernel_context->vm;
+ struct i915_address_space *clone =
+ rcu_dereference_protected(vm->i915->kernel_context->vm,
+ true); /* static */
GEM_BUG_ON(!clone->has_read_only);
@@ -1422,6 +1423,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
set_pd_entry(pd, idx, pde);
atomic_inc(px_used(pde)); /* keep pinned */
}
+ wmb();
return 0;
}
@@ -1489,8 +1491,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
*
* Gen11 has HSDES#:1807136187 unresolved. Disable ro support
* for now.
+ *
+ * Gen12 has inherited the same read-only fault issue from gen11.
*/
- ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
+ ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12);
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1509,13 +1513,12 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
}
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- if (intel_vgpu_active(i915)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_free_pd;
}
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
@@ -1566,7 +1569,7 @@ static void gen7_ppgtt_enable(struct intel_gt *gt)
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
/* GFX_MODE is per-ring on gen7+ */
ENGINE_WRITE(engine,
RING_MODE_GEN7,
@@ -1729,10 +1732,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
}
spin_unlock(&pd->lock);
- if (flush) {
- mark_tlbs_dirty(&ppgtt->base);
+ if (flush)
gen6_ggtt_invalidate(vm->gt->ggtt);
- }
goto out;
@@ -1786,15 +1787,13 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct drm_i915_private *i915 = vm->i915;
- /* FIXME remove the struct_mutex to bring the locking under control */
- mutex_lock(&i915->drm.struct_mutex);
i915_vma_destroy(ppgtt->vma);
- mutex_unlock(&i915->drm.struct_mutex);
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
+
+ mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt->base.pd);
}
@@ -1827,7 +1826,6 @@ static int pd_vma_bind(struct i915_vma *vma,
gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
gen6_write_pde(ppgtt, pde, pt);
- mark_tlbs_dirty(&ppgtt->base);
gen6_ggtt_invalidate(ggtt);
return 0;
@@ -1866,7 +1864,6 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
- struct drm_i915_private *i915 = ppgtt->base.vm.i915;
struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma;
@@ -1877,33 +1874,30 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &vma->active, NULL, NULL);
+ i915_active_init(&vma->active, NULL, NULL);
- vma->vm = &ggtt->vm;
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(&ggtt->vm);
vma->ops = &pd_vma_ops;
vma->private = ppgtt;
vma->size = size;
vma->fence_size = size;
- vma->flags = I915_VMA_GGTT;
+ atomic_set(&vma->flags, I915_VMA_GGTT);
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->closed_link);
- mutex_lock(&vma->vm->mutex);
- list_add(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
-
return vma;
}
int gen6_ppgtt_pin(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- int err;
+ int err = 0;
- GEM_BUG_ON(ppgtt->base.vm.closed);
+ GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -1911,24 +1905,26 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base)
* (When vma->pin_count becomes atomic, I expect we will naturally
* need a larger, unpacked, type and kill this redundancy.)
*/
- if (ppgtt->pin_count++)
+ if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
return 0;
+ if (mutex_lock_interruptible(&ppgtt->pin_mutex))
+ return -EINTR;
+
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- err = i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
- if (err)
- goto unpin;
-
- return 0;
+ if (!atomic_read(&ppgtt->pin_count)) {
+ err = i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
+ }
+ if (!err)
+ atomic_inc(&ppgtt->pin_count);
+ mutex_unlock(&ppgtt->pin_mutex);
-unpin:
- ppgtt->pin_count = 0;
return err;
}
@@ -1936,22 +1932,20 @@ void gen6_ppgtt_unpin(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- GEM_BUG_ON(!ppgtt->pin_count);
- if (--ppgtt->pin_count)
- return;
-
- i915_vma_unpin(ppgtt->vma);
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
}
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- if (!ppgtt->pin_count)
+ if (!atomic_read(&ppgtt->pin_count))
return;
- ppgtt->pin_count = 0;
i915_vma_unpin(ppgtt->vma);
+ atomic_set(&ppgtt->pin_count, 0);
}
static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
@@ -1964,9 +1958,12 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
+ mutex_init(&ppgtt->pin_mutex);
+
ppgtt_init(&ppgtt->base, &i915->gt);
ppgtt->base.vm.top = 1;
+ ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
@@ -2023,7 +2020,7 @@ static void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(i915) >= 9)
+ else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
@@ -2202,7 +2199,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_dma(addr, sgt_iter, vma->pages)
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
/*
@@ -2243,7 +2240,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
struct sgt_iter iter;
dma_addr_t addr;
- for_each_sgt_dma(addr, iter, vma->pages)
+ for_each_sgt_daddr(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
/*
@@ -2448,7 +2445,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@@ -2478,14 +2475,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ if (flags & I915_VMA_ALLOC) {
ret = alias->vm.allocate_va_range(&alias->vm,
vma->node.start,
vma->size);
if (ret)
return ret;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
+ __i915_vma_flags(vma)));
alias->vm.insert_entries(&alias->vm, vma,
cache_level, pte_flags);
}
@@ -2506,7 +2507,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
@@ -2514,7 +2515,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
vm->clear_range(vm, vma->node.start, vma->size);
}
- if (vma->flags & I915_VMA_LOCAL_BIND) {
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
struct i915_address_space *vm =
&i915_vm_to_ggtt(vma->vm)->alias->vm;
@@ -2530,7 +2531,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+ /* XXX This does not prevent more requests being submitted! */
+ if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
+ -MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2555,12 +2558,12 @@ static int ggtt_set_pages(struct i915_vma *vma)
return 0;
}
-static void i915_gtt_color_adjust(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
{
- if (node->allocated && node->color != color)
+ if (i915_node_color_differs(node, color))
*start += I915_GTT_PAGE_SIZE;
/* Also leave a space between the unallocated reserved node after the
@@ -2598,6 +2601,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
goto err_ppgtt;
ggtt->alias = ppgtt;
+ ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
@@ -2614,22 +2618,16 @@ err_ppgtt:
static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_ppgtt *ppgtt;
- mutex_lock(&i915->drm.struct_mutex);
-
ppgtt = fetch_and_zero(&ggtt->alias);
if (!ppgtt)
- goto out;
+ return;
i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
-out:
- mutex_unlock(&i915->drm.struct_mutex);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -2661,7 +2659,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
{
ggtt_release_guc_top(ggtt);
- drm_mm_remove_node(&ggtt->error_capture);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
}
static int init_ggtt(struct i915_ggtt *ggtt)
@@ -2692,13 +2691,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
if (ret)
return ret;
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ if (ggtt->mappable_end) {
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (ret)
+ return ret;
+ }
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -2746,35 +2747,33 @@ int i915_init_ggtt(struct drm_i915_private *i915)
static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_vma *vma, *vn;
- ggtt->vm.closed = true;
+ atomic_set(&ggtt->vm.open, 0);
rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
- flush_workqueue(i915->wq);
+ flush_workqueue(ggtt->vm.i915->wq);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&ggtt->vm.mutex);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
+ WARN_ON(__i915_vma_unbind(vma));
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
ggtt_release_guc_top(ggtt);
-
- if (drm_mm_initialized(&ggtt->vm.mm)) {
- intel_vgt_deballoon(ggtt);
- i915_address_space_fini(&ggtt->vm);
- }
+ intel_vgt_deballoon(ggtt);
ggtt->vm.cleanup(&ggtt->vm);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&ggtt->vm.mutex);
+ i915_address_space_fini(&ggtt->vm);
arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->iomap);
+
+ if (ggtt->iomap.size)
+ io_mapping_fini(&ggtt->iomap);
}
/**
@@ -2794,8 +2793,6 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
- i915_gem_cleanup_stolen(i915);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2873,35 +2870,51 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
- I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
- I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
- I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
-}
-
-static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
- I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(0),
+ GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(1),
+ GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(2),
+ GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(3),
+ GEN8_PPAT_UC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(4),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(5),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(6),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(7),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
@@ -2914,11 +2927,11 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
- I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
-static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
@@ -2950,8 +2963,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
- I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -2962,18 +2975,26 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
-static void setup_private_pat(struct drm_i915_private *dev_priv)
+static void setup_private_pat(struct intel_uncore *uncore)
{
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
+ struct drm_i915_private *i915 = uncore->i915;
- if (INTEL_GEN(dev_priv) >= 12)
- tgl_setup_private_ppat(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ if (INTEL_GEN(i915) >= 12)
+ tgl_setup_private_ppat(uncore);
+ else if (INTEL_GEN(i915) >= 10)
+ cnl_setup_private_ppat(uncore);
+ else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+ chv_setup_private_ppat(uncore);
else
- bdw_setup_private_ppat(dev_priv);
+ bdw_setup_private_ppat(uncore);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+ return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -2985,10 +3006,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ if (!IS_DGFX(dev_priv)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3029,7 +3050,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.pte_encode = gen8_pte_encode;
- setup_private_pat(dev_priv);
+ setup_private_pat(ggtt->vm.gt->uncore);
return ggtt_probe_common(ggtt, size);
}
@@ -3200,9 +3221,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
static int ggtt_init_hw(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- int ret = 0;
-
- mutex_lock(&i915->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
@@ -3212,24 +3230,23 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
- ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
-
- if (!io_mapping_init_wc(&ggtt->iomap,
- ggtt->gmadr.start,
- ggtt->mappable_end)) {
- ggtt->vm.cleanup(&ggtt->vm);
- ret = -EIO;
- goto out;
- }
+ ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+ if (ggtt->mappable_end) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
+ return -EIO;
+ }
- ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+ }
i915_ggtt_init_fences(ggtt);
-out:
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -3251,19 +3268,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- /*
- * Initialise stolen early so that we may reserve preallocated
- * objects for the BIOS to KMS transition.
- */
- ret = i915_gem_init_stolen(dev_priv);
- if (ret)
- goto out_gtt_cleanup;
-
return 0;
-
-out_gtt_cleanup:
- dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
- return ret;
}
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
@@ -3301,6 +3306,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
bool flush = false;
+ int open;
intel_gt_check_and_clear_faults(ggtt->vm.gt);
@@ -3308,33 +3314,31 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
- mutex_unlock(&ggtt->vm.mutex);
-
- if (!i915_vma_unbind(vma))
- goto lock;
+ if (!__i915_vma_unbind(vma))
+ continue;
+ clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
- PIN_UPDATE));
+ PIN_GLOBAL, NULL));
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
-
-lock:
- mutex_lock(&ggtt->vm.mutex);
}
- ggtt->vm.closed = false;
+ atomic_set(&ggtt->vm.open, open);
ggtt->invalidate(ggtt);
mutex_unlock(&ggtt->vm.mutex);
@@ -3345,10 +3349,12 @@ lock:
void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
{
- ggtt_restore_mappings(&i915->ggtt);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ ggtt_restore_mappings(ggtt);
if (INTEL_GEN(i915) >= 8)
- setup_private_pat(i915);
+ setup_private_pat(ggtt->vm.gt->uncore);
}
static struct scatterlist *
@@ -3726,7 +3732,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 offset;
int err;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
+
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b97a47fc7a68..402283ce2864 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -148,8 +148,8 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
-#define for_each_sgt_dma(__dmap, __iter, __sgt) \
- __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
struct intel_remapped_plane_info {
/* in gtt pages */
@@ -305,7 +305,16 @@ struct i915_address_space {
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
- bool closed;
+ unsigned int bind_async_flags;
+
+ /*
+ * Each active user context has its own address space (in full-ppgtt).
+ * Since the vm may be shared between multiple contexts, we count how
+ * many contexts keep us "open". Once open hits zero, we are closed
+ * and do not allow any new attachments, and proceed to shutdown our
+ * vma and page directories.
+ */
+ atomic_t open;
struct mutex mutex; /* protects vma and our lists */
#define VM_CLASS_GGTT 0
@@ -320,11 +329,6 @@ struct i915_address_space {
*/
struct list_head bound_list;
- /**
- * List of vma that are not unbound.
- */
- struct list_head unbound_list;
-
struct pagestash free_pages;
/* Global GTT */
@@ -376,6 +380,12 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -401,6 +411,11 @@ struct i915_ggtt {
int mtrr;
+ /** Bit 6 swizzling required for X tiling */
+ u32 bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ u32 bit_6_swizzle_y;
+
u32 pin_bias;
unsigned int num_fences;
@@ -422,7 +437,6 @@ struct i915_ggtt {
struct i915_ppgtt {
struct i915_address_space vm;
- intel_engine_mask_t pd_dirty_engines;
struct i915_page_directory *pd;
};
@@ -432,7 +446,9 @@ struct gen6_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
- unsigned int pin_count;
+ atomic_t pin_count;
+ struct mutex pin_mutex;
+
bool scan_for_unused_pt;
};
@@ -559,6 +575,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+ return ggtt->mappable_end > 0;
+}
+
int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
@@ -577,6 +598,35 @@ static inline void i915_vm_put(struct i915_address_space *vm)
kref_put(&vm->ref, i915_vm_release);
}
+static inline struct i915_address_space *
+i915_vm_open(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ atomic_inc(&vm->open);
+ return i915_vm_get(vm);
+}
+
+static inline bool
+i915_vm_tryopen(struct i915_address_space *vm)
+{
+ if (atomic_add_unless(&vm->open, 1, 0))
+ return i915_vm_get(vm);
+
+ return false;
+}
+
+void __i915_vm_close(struct i915_address_space *vm);
+
+static inline void
+i915_vm_close(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ if (atomic_dec_and_test(&vm->open))
+ __i915_vm_close(vm);
+
+ i915_vm_put(vm);
+}
+
int gen6_ppgtt_pin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
@@ -609,10 +659,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT_ULL(11)
+#define PIN_UPDATE BIT_ULL(9)
+#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 5d9101376a3d..cf8a8c3ef047 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -5,6 +5,7 @@
#include "gt/intel_engine_user.h"
#include "i915_drv.h"
+#include "i915_perf.h"
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -62,7 +63,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
+ value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(i915);
@@ -79,8 +80,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915_modparams.enable_hangcheck &&
- intel_has_gpu_reset(i915);
- if (value && intel_has_reset_engine(i915))
+ intel_has_gpu_reset(&i915->gt);
+ if (value && intel_has_reset_engine(&i915->gt))
value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
@@ -156,6 +157,9 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
break;
+ case I915_PARAM_PERF_REVISION:
+ value = i915_perf_ioctl_version();
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e284bd76fa86..3c85cb0ee99f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -40,6 +40,7 @@
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
@@ -235,6 +236,7 @@ struct compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
+ bool wc;
};
static bool compress_init(struct compress *c)
@@ -292,7 +294,7 @@ static int compress_page(struct compress *c,
struct z_stream_s *zstream = &c->zstream;
zstream->next_in = src;
- if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
@@ -367,6 +369,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
struct compress {
struct pagevec pool;
+ bool wc;
};
static bool compress_init(struct compress *c)
@@ -389,7 +392,7 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
- if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
@@ -421,6 +424,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
int slice;
int subslice;
@@ -436,12 +440,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) <= 6)
return;
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.sampler[slice][subslice]);
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
@@ -470,9 +474,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
- header, ctx->comm, ctx->pid, ctx->hw_id,
- ctx->sched_attr.priority, ctx->guilty, ctx->active);
+ err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
+ ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -533,10 +537,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
}
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
- err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
- jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
- ee->hangcheck_timestamp,
- ee->hangcheck_timestamp == epoch ? "; epoch" : "");
err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
@@ -574,6 +574,9 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
lower_32_bits(obj->gtt_offset));
}
+ if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
+ err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes);
+
err_compression_marker(m);
for (page = 0; page < obj->page_count; page++) {
int i, len;
@@ -675,11 +678,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
ts = ktime_to_timespec64(error->uptime);
err_printf(m, "Uptime: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
- err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
- err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
- error->capture,
- jiffies_to_msecs(jiffies - error->capture),
- jiffies_to_msecs(error->capture - error->epoch));
+ err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
+ error->capture, jiffies_to_msecs(jiffies - error->capture));
for (ee = error->engine; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
@@ -734,8 +734,24 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+ if (IS_GEN_RANGE(m->i915, 8, 11))
+ err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
+
+ if (IS_GEN(m->i915, 12))
+ err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err);
+
+ if (INTEL_GEN(m->i915) >= 12) {
+ int i;
+
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
+ error->sfc_done[i]);
+
+ err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done);
+ }
+
for (ee = error->engine; ee; ee = ee->next)
- error_print_engine(m, ee, error->epoch);
+ error_print_engine(m, ee, error->capture);
for (ee = error->engine; ee; ee = ee->next) {
const struct drm_i915_error_object *obj;
@@ -763,7 +779,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
&ee->requests[j],
- error->epoch);
+ error->capture);
}
print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
@@ -963,7 +979,6 @@ i915_error_object_create(struct drm_i915_private *i915,
struct drm_i915_error_object *dst;
unsigned long num_pages;
struct sgt_iter iter;
- dma_addr_t dma;
int ret;
might_sleep();
@@ -984,21 +999,59 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->gtt_page_sizes = vma->page_sizes.gtt;
dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
+ compress->wc = i915_gem_object_is_lmem(vma->obj) ||
+ drm_mm_node_allocated(&ggtt->error_capture);
+
ret = -EINVAL;
- for_each_sgt_dma(dma, iter, vma->pages) {
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
void __iomem *s;
+ dma_addr_t dma;
- ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
- s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
- ret = compress_page(compress, (void __force *)s, dst);
- io_mapping_unmap(s);
- if (ret)
- break;
+ s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else if (i915_gem_object_is_lmem(vma->obj)) {
+ struct intel_memory_region *mem = vma->obj->mm.region;
+ dma_addr_t dma;
+
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ void __iomem *s;
+
+ s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else {
+ struct page *page;
+
+ for_each_sgt_page(page, iter, vma->pages) {
+ void *s;
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap(page);
+ ret = compress_page(compress, s, dst);
+ kunmap(s);
+
+ drm_clflush_pages(&page, 1);
+
+ if (ret)
+ break;
+ }
}
if (ret || compress_flush(compress, dst)) {
@@ -1136,8 +1189,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
ee->idle = intel_engine_is_idle(engine);
- if (!ee->idle)
- ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
@@ -1263,7 +1314,6 @@ static bool record_context(struct drm_i915_error_context *e,
rcu_read_unlock();
}
- e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
@@ -1291,7 +1341,7 @@ capture_vma(struct capture_vma *next,
if (!c)
return next;
- if (!i915_active_trygrab(&vma->active)) {
+ if (!i915_active_acquire_if_busy(&vma->active)) {
kfree(c);
return next;
}
@@ -1431,7 +1481,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
*this->slot =
i915_error_object_create(i915, vma, compress);
- i915_active_ungrab(&vma->active);
+ i915_active_release(&vma->active);
i915_vma_put(vma);
capture = this->next;
@@ -1553,6 +1603,21 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
+ if (IS_GEN_RANGE(i915, 8, 11))
+ error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
+
+ if (IS_GEN(i915, 12))
+ error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
+
+ if (INTEL_GEN(i915) >= 12) {
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ error->sfc_done[i] =
+ intel_uncore_read(uncore, GEN12_SFC_DONE(i));
+ }
+
+ error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
+ }
+
/* 4: Everything else */
if (INTEL_GEN(i915) >= 11) {
error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
@@ -1647,26 +1712,15 @@ static void capture_params(struct i915_gpu_state *error)
i915_params_copy(&error->params, &i915_modparams);
}
-static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
-{
- const struct drm_i915_error_engine *ee;
- unsigned long epoch = error->capture;
-
- for (ee = error->engine; ee; ee = ee->next) {
- if (ee->hangcheck_timestamp &&
- time_before(ee->hangcheck_timestamp, epoch))
- epoch = ee->hangcheck_timestamp;
- }
-
- return epoch;
-}
-
static void capture_finish(struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &error->i915->ggtt;
- const u64 slot = ggtt->error_capture.start;
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
+ const u64 slot = ggtt->error_capture.start;
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ }
}
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
@@ -1712,8 +1766,6 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
error->overlay = intel_overlay_capture_error_state(i915);
error->display = intel_display_capture_error_state(i915);
- error->epoch = capture_find_epoch(error);
-
capture_finish(error);
compress_fini(&compress);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index df9f57766626..5d2c3372ff99 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -34,7 +34,6 @@ struct i915_gpu_state {
ktime_t boottime;
ktime_t uptime;
unsigned long capture;
- unsigned long epoch;
struct drm_i915_private *i915;
@@ -74,6 +73,10 @@ struct i915_gpu_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
+ u32 gtt_cache;
+ u32 aux_err; /* gen12 */
+ u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */
+ u32 gam_done; /* gen12 */
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
@@ -85,7 +88,6 @@ struct i915_gpu_state {
/* Software tracked state */
bool idle;
- unsigned long hangcheck_timestamp;
int num_requests;
u32 reset_count;
@@ -118,7 +120,6 @@ struct i915_gpu_state {
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
- u32 hw_id;
int active;
int guilty;
struct i915_sched_attr sched_attr;
@@ -127,6 +128,7 @@ struct i915_gpu_state {
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
+ u32 gtt_page_sizes;
int num_pages;
int page_count;
int unused;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37e3dd3c1a9d..dae00f7dd7df 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/circ_buf.h>
-#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
@@ -46,6 +45,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
+#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -149,30 +149,24 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = {
};
static const u32 hpd_icp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
- [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
- [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
- [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
-};
-
-static const u32 hpd_mcc[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
};
static const u32 hpd_tgp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
- [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
- [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
- [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
- [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
- [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
- [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
+ [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
+ [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
@@ -327,180 +321,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
-{
- WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
-
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
-}
-
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
-
- spin_lock_irq(&gt->irq_lock);
-
- while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
- ;
-
- dev_priv->gt_pm.rps.pm_iir = 0;
-
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
-
- spin_lock_irq(&gt->irq_lock);
- gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
- dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&gt->irq_lock);
- WARN_ON_ONCE(rps->pm_iir);
-
- if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
- else
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-
- rps->interrupts_enabled = true;
- gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
-
- spin_unlock_irq(&gt->irq_lock);
-}
-
-u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
-
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_gt *gt = &dev_priv->gt;
-
- if (!READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&gt->irq_lock);
- rps->interrupts_enabled = false;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
- gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(dev_priv);
-
- /* Now that we will not be generating any more work, flush any
- * outstanding tasks. As we are called on the RPS idle path,
- * we will reset the GPU to minimum frequencies, so the current
- * state of the worker can be discarded.
- */
- cancel_work_sync(&rps->work);
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-void gen9_reset_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen9_enable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- WARN_ON_ONCE(intel_uncore_read(gt->uncore,
- gen6_pm_iir(gt->i915)) &
- gt->pm_guc_events);
- guc->interrupts.enabled = true;
- gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
- }
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen9_disable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
-
- gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(gt->i915);
-
- gen9_reset_guc_interrupts(guc);
-}
-
-void gen11_reset_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen11_enable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
-
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
- guc->interrupts.enabled = true;
- }
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen11_disable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
-
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(gt->i915);
-
- gen11_reset_guc_interrupts(guc);
-}
-
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -942,14 +762,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
+ struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
@@ -992,7 +812,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
- position = __intel_get_crtc_scanline(intel_crtc);
+ position = __intel_get_crtc_scanline(crtc);
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -1072,199 +892,6 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
return position;
}
-static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay;
-
- spin_lock(&mchdev_lock);
-
- intel_uncore_write16(uncore,
- MEMINTRSTS,
- intel_uncore_read(uncore, MEMINTRSTS));
-
- new_delay = dev_priv->ips.cur_delay;
-
- intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
- busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
- max_avg = intel_uncore_read(uncore, RCBMAXAVG);
- min_avg = intel_uncore_read(uncore, RCBMINAVG);
-
- /* Handle RCS change request from hw */
- if (busy_up > max_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.cur_delay - 1;
- if (new_delay < dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.max_delay;
- } else if (busy_down < min_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.cur_delay + 1;
- if (new_delay > dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.min_delay;
- }
-
- if (ironlake_set_drps(dev_priv, new_delay))
- dev_priv->ips.cur_delay = new_delay;
-
- spin_unlock(&mchdev_lock);
-
- return;
-}
-
-static void vlv_c0_read(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *ei)
-{
- ei->ktime = ktime_get_raw();
- ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
- ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
-}
-
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
-{
- memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
-}
-
-static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const struct intel_rps_ei *prev = &rps->ei;
- struct intel_rps_ei now;
- u32 events = 0;
-
- if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
- return 0;
-
- vlv_c0_read(dev_priv, &now);
-
- if (prev->ktime) {
- u64 time, c0;
- u32 render, media;
-
- time = ktime_us_delta(now.ktime, prev->ktime);
-
- time *= dev_priv->czclk_freq;
-
- /* Workload can be split between render + media,
- * e.g. SwapBuffers being blitted in X after being rendered in
- * mesa. To account for this we need to combine both engines
- * into our activity counter.
- */
- render = now.render_c0 - prev->render_c0;
- media = now.media_c0 - prev->media_c0;
- c0 = max(render, media);
- c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
-
- if (c0 > time * rps->power.up_threshold)
- events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->power.down_threshold)
- events = GEN6_PM_RP_DOWN_THRESHOLD;
- }
-
- rps->ei = now;
- return events;
-}
-
-static void gen6_pm_rps_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, gt_pm.rps.work);
- struct intel_gt *gt = &dev_priv->gt;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- bool client_boost = false;
- int new_delay, adj, min, max;
- u32 pm_iir = 0;
-
- spin_lock_irq(&gt->irq_lock);
- if (rps->interrupts_enabled) {
- pm_iir = fetch_and_zero(&rps->pm_iir);
- client_boost = atomic_read(&rps->num_waiters);
- }
- spin_unlock_irq(&gt->irq_lock);
-
- /* Make sure we didn't queue anything we're not going to process. */
- WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
- if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
- goto out;
-
- mutex_lock(&rps->lock);
-
- pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
-
- adj = rps->last_adj;
- new_delay = rps->cur_freq;
- min = rps->min_freq_softlimit;
- max = rps->max_freq_softlimit;
- if (client_boost)
- max = rps->max_freq;
- if (client_boost && new_delay < rps->boost_freq) {
- new_delay = rps->boost_freq;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
-
- if (new_delay >= rps->max_freq_softlimit)
- adj = 0;
- } else if (client_boost) {
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
- if (rps->cur_freq > rps->efficient_freq)
- new_delay = rps->efficient_freq;
- else if (rps->cur_freq > rps->min_freq_softlimit)
- new_delay = rps->min_freq_softlimit;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
- if (adj < 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
-
- if (new_delay <= rps->min_freq_softlimit)
- adj = 0;
- } else { /* unknown event */
- adj = 0;
- }
-
- rps->last_adj = adj;
-
- /*
- * Limit deboosting and boosting to keep ourselves at the extremes
- * when in the respective power modes (i.e. slowly decrease frequencies
- * while in the HIGH_POWER zone and slowly increase frequencies while
- * in the LOW_POWER zone). On idle, we will hit the timeout and drop
- * to the next level quickly, and conversely if busy we expect to
- * hit a waitboost and rapidly switch into max power.
- */
- if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
- (adj > 0 && rps->power.mode == LOW_POWER))
- rps->last_adj = 0;
-
- /* sysfs frequency interfaces may have snuck in while servicing the
- * interrupt
- */
- new_delay += adj;
- new_delay = clamp_t(int, new_delay, min, max);
-
- if (intel_set_rps(dev_priv, new_delay)) {
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- rps->last_adj = 0;
- }
-
- mutex_unlock(&rps->lock);
-
-out:
- /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- spin_lock_irq(&gt->irq_lock);
- if (rps->interrupts_enabled)
- gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-
/**
* ivybridge_parity_work - Workqueue called when a parity error interrupt
* occurred.
@@ -1401,11 +1028,11 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & ICP_DDIA_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
case HPD_PORT_B:
- return val & ICP_DDIB_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
case HPD_PORT_C:
- return val & TGP_DDIC_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
default:
return false;
}
@@ -1427,20 +1054,6 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
-static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
-{
- switch (pin) {
- case HPD_PORT_A:
- return val & ICP_DDIA_HPD_LONG_DETECT;
- case HPD_PORT_B:
- return val & ICP_DDIB_HPD_LONG_DETECT;
- case HPD_PORT_C:
- return val & TGP_DDIC_HPD_LONG_DETECT;
- default:
- return false;
- }
-}
-
static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1652,54 +1265,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
res1, res2);
}
-/* The RPS events need forcewake, so we add them to a work queue and mask their
- * IMR bits until the work is done. Other interrupts can be processed without
- * the work queue. */
-void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_rps *rps = &i915->gt_pm.rps;
- const u32 events = i915->pm_rps_events & pm_iir;
-
- lockdep_assert_held(&gt->irq_lock);
-
- if (unlikely(!events))
- return;
-
- gen6_gt_pm_mask_irq(gt, events);
-
- if (!rps->interrupts_enabled)
- return;
-
- rps->pm_iir |= events;
- schedule_work(&rps->work);
-}
-
-void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_gt *gt = &dev_priv->gt;
-
- if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
- if (rps->interrupts_enabled) {
- rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
- schedule_work(&rps->work);
- }
- spin_unlock(&gt->irq_lock);
- }
-
- if (INTEL_GEN(dev_priv) >= 8)
- return;
-
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
-
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
-}
-
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -1716,7 +1281,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- int pipe;
+ enum pipe pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1741,6 +1306,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
status_mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
+ default:
case PIPE_A:
iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
@@ -2009,7 +1575,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (gt_iir)
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2136,7 +1702,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
@@ -2222,7 +1788,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
@@ -2256,19 +1822,35 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
-static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
- const u32 *pins)
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- u32 ddi_hotplug_trigger;
- u32 tc_hotplug_trigger;
+ u32 ddi_hotplug_trigger, tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
+ bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
+ const u32 *pins;
- if (HAS_PCH_MCC(dev_priv)) {
+ if (HAS_PCH_TGP(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
+ tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_JSP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = 0;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_MCC(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
} else {
+ WARN(!HAS_PCH_ICP(dev_priv),
+ "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
}
if (ddi_hotplug_trigger) {
@@ -2292,44 +1874,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger,
dig_hotplug_reg, pins,
- icp_tc_port_hotplug_long_detect);
- }
-
- if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
-
- if (pch_iir & SDE_GMBUS_ICP)
- gmbus_irq_handler(dev_priv);
-}
-
-static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
-{
- u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
- u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
- u32 pin_mask = 0, long_mask = 0;
-
- if (ddi_hotplug_trigger) {
- u32 dig_hotplug_reg;
-
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
- I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
-
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- ddi_hotplug_trigger,
- dig_hotplug_reg, hpd_tgp,
- tgp_ddi_port_hotplug_long_detect);
- }
-
- if (tc_hotplug_trigger) {
- u32 dig_hotplug_reg;
-
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
- I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
-
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- tc_hotplug_trigger,
- dig_hotplug_reg, hpd_tgp,
- tgp_tc_port_hotplug_long_detect);
+ tc_port_hotplug_long_detect);
}
if (pin_mask)
@@ -2434,7 +1979,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
- ironlake_rps_change_irq_handler(dev_priv);
+ gen5_rps_irq_handler(&dev_priv->gt.rps);
}
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
@@ -2539,7 +2084,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
}
}
@@ -2616,10 +2161,16 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
u32 mask;
if (INTEL_GEN(dev_priv) >= 12)
- /* TODO: Add AUX entries for USBC */
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
- TGL_DE_PORT_AUX_DDIC;
+ TGL_DE_PORT_AUX_DDIC |
+ TGL_DE_PORT_AUX_USBC1 |
+ TGL_DE_PORT_AUX_USBC2 |
+ TGL_DE_PORT_AUX_USBC3 |
+ TGL_DE_PORT_AUX_USBC4 |
+ TGL_DE_PORT_AUX_USBC5 |
+ TGL_DE_PORT_AUX_USBC6;
+
mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
@@ -2638,7 +2189,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 11)
+ return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
+ else if (INTEL_GEN(dev_priv) >= 9)
return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2655,11 +2208,21 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (iir & GEN8_DE_EDP_PSR) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
+ u32 psr_iir;
+ i915_reg_t iir_reg;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
+ else
+ iir_reg = EDP_PSR_IIR;
+
+ psr_iir = I915_READ(iir_reg);
+ I915_WRITE(iir_reg, psr_iir);
+
+ if (psr_iir)
+ found = true;
intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
- found = true;
}
if (!found)
@@ -2780,12 +2343,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
- tgp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
- icp_irq_handler(dev_priv, iir, hpd_mcc);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir, hpd_icp);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_handler(dev_priv, iir);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
@@ -2894,9 +2453,11 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}
-static irqreturn_t gen11_irq_handler(int irq, void *arg)
+static __always_inline irqreturn_t
+__gen11_irq_handler(struct drm_i915_private * const i915,
+ u32 (*intr_disable)(void __iomem * const regs),
+ void (*intr_enable)(void __iomem * const regs))
{
- struct drm_i915_private * const i915 = arg;
void __iomem * const regs = i915->uncore.regs;
struct intel_gt *gt = &i915->gt;
u32 master_ctl;
@@ -2905,9 +2466,9 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
- master_ctl = gen11_master_intr_disable(regs);
+ master_ctl = intr_disable(regs);
if (!master_ctl) {
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
return IRQ_NONE;
}
@@ -2929,13 +2490,20 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
gen11_gu_misc_irq_handler(gt, gu_misc_iir);
return IRQ_HANDLED;
}
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
+{
+ return __gen11_irq_handler(arg,
+ gen11_master_intr_disable,
+ gen11_master_intr_enable);
+}
+
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2952,12 +2520,18 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-int i945gm_enable_vblank(struct drm_crtc *crtc)
+int i915gm_enable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- if (dev_priv->i945gm_vblank.enabled++ == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ /*
+ * Vblank interrupts fail to wake the device up from C2+.
+ * Disabling render clock gating during C-states avoids
+ * the problem. There is a small power cost so we do this
+ * only when vblank interrupts are actually enabled.
+ */
+ if (dev_priv->vblank_enabled++ == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -3030,14 +2604,14 @@ void i8xx_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void i945gm_disable_vblank(struct drm_crtc *crtc)
+void i915gm_disable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
i8xx_disable_vblank(crtc);
- if (--dev_priv->i945gm_vblank.enabled == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ if (--dev_priv->vblank_enabled == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -3076,60 +2650,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void i945gm_vblank_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, i945gm_vblank.work);
-
- /*
- * Vblank interrupts fail to wake up the device from C3,
- * hence we want to prevent C3 usage while vblank interrupts
- * are enabled.
- */
- pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
- READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
- dev_priv->i945gm_vblank.c3_disable_latency :
- PM_QOS_DEFAULT_VALUE);
-}
-
-static int cstate_disable_latency(const char *name)
-{
- const struct cpuidle_driver *drv;
- int i;
-
- drv = cpuidle_get_driver();
- if (!drv)
- return 0;
-
- for (i = 0; i < drv->state_count; i++) {
- const struct cpuidle_state *state = &drv->states[i];
-
- if (!strcmp(state->name, name))
- return state->exit_latency ?
- state->exit_latency - 1 : 0;
- }
-
- return 0;
-}
-
-static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
-{
- INIT_WORK(&dev_priv->i945gm_vblank.work,
- i945gm_vblank_work_func);
-
- dev_priv->i945gm_vblank.c3_disable_latency =
- cstate_disable_latency("C3");
- pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-}
-
-static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
-{
- cancel_work_sync(&dev_priv->i945gm_vblank.work);
- pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3246,7 +2766,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
gen8_master_intr_disable(dev_priv->uncore.regs);
@@ -3271,7 +2791,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
gen11_master_intr_disable(dev_priv->uncore.regs);
@@ -3279,8 +2799,23 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
+
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
+
+ intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
+ intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
+ }
+ } else {
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ }
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3431,42 +2966,44 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
}
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
+ u32 sde_ddi_mask, u32 sde_tc_mask,
+ u32 ddi_enable_mask, u32 tc_enable_mask,
+ const u32 *pins)
{
u32 hotplug_irqs, enabled_irqs;
- hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+ hotplug_irqs = sde_ddi_mask | sde_tc_mask;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
- ICP_TC_HPD_ENABLE_MASK);
+ icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
}
+/*
+ * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
+ * equivalent of SDE.
+ */
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, enabled_irqs;
-
- hotplug_irqs = SDE_DDI_MASK_TGP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
-
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
+ hpd_icp);
}
-static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+/*
+ * JSP behaves exactly the same as MCC above except that port C is mapped to
+ * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's
+ * masks & tables rather than ICP's masks & tables.
+ */
+static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, enabled_irqs;
-
- hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
-
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
- TGP_TC_HPD_ENABLE_MASK);
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_TGP, 0,
+ TGP_DDI_HPD_ENABLE_MASK, 0,
+ hpd_tgp);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3506,9 +3043,13 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
- tgp_hpd_irq_setup(dev_priv);
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
+ TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
+ ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK, hpd_icp);
}
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3684,7 +3225,6 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
if (IS_HASWELL(dev_priv)) {
gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -3794,8 +3334,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
+
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
+
+ gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
+ }
+ } else {
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ }
for_each_pipe(dev_priv, pipe) {
dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
@@ -3853,8 +3406,11 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_TGP(dev_priv))
icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
TGP_TC_HPD_ENABLE_MASK);
- else if (HAS_PCH_MCC(dev_priv))
+ else if (HAS_PCH_JSP(dev_priv))
icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ else if (HAS_PCH_MCC(dev_priv))
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE(PORT_TC1));
else
icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
ICP_TC_HPD_ENABLE_MASK);
@@ -4317,16 +3873,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
- if (IS_I945GM(dev_priv))
- i945gm_vblank_work_init(dev_priv);
-
intel_hpd_init_work(dev_priv);
- INIT_WORK(&rps->work, gen6_pm_rps_work);
-
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
@@ -4335,33 +3885,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
- /* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev_priv))
- /* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
- else
- dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
-
- /* We share the register with other engine */
- if (INTEL_GEN(dev_priv) > 9)
- GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
-
- rps->pm_intrmsk_mbz = 0;
-
- /*
- * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_GEN(dev_priv) <= 7)
- rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_GEN(dev_priv) >= 8)
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
@@ -4387,8 +3910,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
- if (HAS_PCH_MCC(dev_priv))
- /* EHL doesn't need most of gen11_hpd_irq_setup */
+ if (HAS_PCH_JSP(dev_priv))
+ dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
+ else if (HAS_PCH_MCC(dev_priv))
dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
else if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
@@ -4411,9 +3935,6 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
- if (IS_I945GM(i915))
- i945gm_vblank_work_fini(i915);
-
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}
@@ -4538,10 +4059,10 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
int irq = dev_priv->drm.pdev->irq;
/*
- * FIXME we can get called twice during driver load
- * error handling due to intel_modeset_cleanup()
- * calling us out of sequence. Would be nice if
- * it didn't do that...
+ * FIXME we can get called twice during driver probe
+ * error handling as well as during driver remove due to
+ * intel_modeset_driver_remove() calling us out of sequence.
+ * Would be nice if it didn't do that...
*/
if (!dev_priv->drm.irq_enabled)
return;
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 8e7e6071777e..812c47a9c2d6 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -17,14 +17,8 @@ struct drm_device;
struct drm_display_mode;
struct drm_i915_private;
struct intel_crtc;
-struct intel_crtc;
-struct intel_gt;
-struct intel_guc;
struct intel_uncore;
-void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir);
-void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-
void intel_irq_init(struct drm_i915_private *dev_priv);
void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
@@ -106,12 +100,6 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct intel_guc *guc);
-void gen9_enable_guc_interrupts(struct intel_guc *guc);
-void gen9_disable_guc_interrupts(struct intel_guc *guc);
-void gen11_reset_guc_interrupts(struct intel_guc *guc);
-void gen11_enable_guc_interrupts(struct intel_guc *guc);
-void gen11_disable_guc_interrupts(struct intel_guc *guc);
bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -122,12 +110,12 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
int i8xx_enable_vblank(struct drm_crtc *crtc);
-int i945gm_enable_vblank(struct drm_crtc *crtc);
+int i915gm_enable_vblank(struct drm_crtc *crtc);
int i965_enable_vblank(struct drm_crtc *crtc);
int ilk_enable_vblank(struct drm_crtc *crtc);
int bdw_enable_vblank(struct drm_crtc *crtc);
void i8xx_disable_vblank(struct drm_crtc *crtc);
-void i945gm_disable_vblank(struct drm_crtc *crtc);
+void i915gm_disable_vblank(struct drm_crtc *crtc);
void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 296452f9efe4..1dd1f3652795 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,8 @@ i915_param_named(modeset, int, 0400,
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
- "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
+ "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
+ "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
i915_param_named_unsafe(enable_fbc, int, 0600,
"Enable frame buffer compression for power savings "
@@ -165,7 +166,7 @@ i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-i915_param_named_unsafe(inject_load_failure, uint, 0400,
+i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
@@ -178,6 +179,11 @@ i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
#endif
+#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
+i915_param_named_unsafe(fake_lmem_start, ulong, 0600,
+ "Fake LMEM start offset (default: 0)");
+#endif
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
@@ -189,6 +195,8 @@ static __always_inline void _print_param(struct drm_printer *p,
drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
else if (!__builtin_strcmp(type, "unsigned int"))
drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "unsigned long"))
+ drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x);
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index d29ade3b7de6..31b88f297fbc 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -61,11 +61,12 @@ struct drm_printer;
param(char *, dmc_firmware_path, NULL) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
param(int, edp_vswing, 0) \
- param(int, reset, 2) \
- param(unsigned int, inject_load_failure, 0) \
+ param(int, reset, 3) \
+ param(unsigned int, inject_probe_failure, 0) \
param(int, fastboot, -1) \
param(int, enable_dpcd_backlight, 0) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
+ param(unsigned long, fake_lmem_start, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_hangcheck, true) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1974e4c78a43..1bb701d32a5d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/console.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
@@ -118,6 +117,14 @@
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
+#define TGL_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ [PIPE_D] = TGL_CURSOR_D_OFFSET, \
+ }
+
#define I9XX_COLORS \
.color = { .gamma_lut_size = 256 }
#define I965_COLORS \
@@ -144,10 +151,13 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
+#define GEN_DEFAULT_REGIONS \
+ .memory_regions = REGION_SMEM | REGION_STOLEN
+
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -161,11 +171,12 @@
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define I845_FEATURES \
GEN(2), \
- .num_pipes = 1, \
+ .pipe_mask = BIT(PIPE_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -178,7 +189,8 @@
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i830_info = {
I830_FEATURES,
@@ -203,7 +215,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
@@ -212,7 +224,8 @@ static const struct intel_device_info intel_i865g_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
@@ -287,7 +300,7 @@ static const struct intel_device_info intel_pineview_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
@@ -297,7 +310,8 @@ static const struct intel_device_info intel_pineview_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I965_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
@@ -337,7 +351,7 @@ static const struct intel_device_info intel_gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
@@ -347,7 +361,8 @@ static const struct intel_device_info intel_gm45_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
@@ -363,7 +378,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
#define GEN6_FEATURES \
GEN(6), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -377,7 +392,8 @@ static const struct intel_device_info intel_ironlake_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@@ -411,7 +427,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -420,12 +436,13 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@@ -462,7 +479,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
- .num_pipes = 0, /* legal, last one wins */
+ .pipe_mask = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
@@ -470,13 +487,13 @@ static const struct intel_device_info intel_valleyview_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .num_pipes = 2,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -486,6 +503,7 @@ static const struct intel_device_info intel_valleyview_info = {
I9XX_CURSOR_OFFSETS,
I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define G75_FEATURES \
@@ -560,7 +578,7 @@ static const struct intel_device_info intel_broadwell_gt3_info = {
static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .num_pipes = 3,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.display.has_hotplug = 1,
.is_lp = 1,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -570,7 +588,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
@@ -580,6 +598,7 @@ static const struct intel_device_info intel_cherryview_info = {
CHV_CURSOR_OFFSETS,
CHV_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@@ -593,6 +612,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
.has_gt_uc = 1, \
+ .display.has_hdcp = 1, \
.display.has_ipc = 1, \
.ddb_size = 896
@@ -631,11 +651,12 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.is_lp = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_fbc = 1, \
+ .display.has_hdcp = 1, \
.display.has_psr = 1, \
.has_runtime_pm = 1, \
.display.has_csr = 1, \
@@ -654,7 +675,8 @@ static const struct intel_device_info intel_skylake_gt4_info = {
HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN9_DEFAULT_PAGE_SIZES
+ GEN9_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
@@ -715,6 +737,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
GEN9_FEATURES, \
GEN(10), \
.ddb_size = 1024, \
+ .display.has_dsc = 1, \
.has_coherent_ggtt = false, \
GLK_COLORS
@@ -787,18 +810,25 @@ static const struct intel_device_info intel_elkhartlake_info = {
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
- .has_global_mocs = 1
+ TGL_CURSOR_OFFSETS, \
+ .has_global_mocs = 1, \
+ .display.has_dsb = 1
static const struct intel_device_info intel_tigerlake_12_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
- .num_pipes = 4,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .has_rps = false, /* XXX disabled for debugging */
};
+#define GEN12_DGFX_FEATURES \
+ GEN12_FEATURES, \
+ .is_dgfx = 1
+
#undef GEN
#undef PLATFORM
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e42b86827d6b..65d7c2e599de 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -196,8 +196,11 @@
#include <linux/uuid.h>
#include "gem/i915_gem_context.h"
-#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_perf.h"
@@ -215,6 +218,7 @@
#include "oa/i915_oa_cflgt3.h"
#include "oa/i915_oa_cnl.h"
#include "oa/i915_oa_icl.h"
+#include "oa/i915_oa_tgl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -291,6 +295,7 @@ static u32 i915_perf_stream_paranoid = true;
/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
#define OAREPORT_REASON_MASK 0x3f
+#define OAREPORT_REASON_MASK_EXTENDED 0x7f
#define OAREPORT_REASON_SHIFT 19
#define OAREPORT_REASON_TIMER (1<<0)
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
@@ -336,17 +341,24 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
+static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
+ [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
+};
+
#define SAMPLE_OA_REPORT (1<<0)
/**
* struct perf_open_properties - for validated properties given to open a stream
* @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
* @single_context: Whether a single or all gpu contexts should be monitored
+ * @hold_preemption: Whether the preemption is disabled for the filtered
+ * context
* @ctx_handle: A gem ctx handle for use with @single_context
* @metrics_set: An ID for an OA unit metric set advertised via sysfs
* @oa_format: An OA unit HW report format
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
+ * @engine: The engine (typically rcs0) being monitored by the OA unit
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -356,6 +368,7 @@ struct perf_open_properties {
u32 sample_flags;
u64 single_context:1;
+ u64 hold_preemption:1;
u64 ctx_handle;
/* OA sampling state */
@@ -363,69 +376,74 @@ struct perf_open_properties {
int oa_format;
bool oa_periodic;
int oa_period_exponent;
+
+ struct intel_engine_cs *engine;
+};
+
+struct i915_oa_config_bo {
+ struct llist_node node;
+
+ struct i915_oa_config *oa_config;
+ struct i915_vma *vma;
};
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
-static void free_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
+void i915_oa_config_release(struct kref *ref)
{
- if (!PTR_ERR(oa_config->flex_regs))
- kfree(oa_config->flex_regs);
- if (!PTR_ERR(oa_config->b_counter_regs))
- kfree(oa_config->b_counter_regs);
- if (!PTR_ERR(oa_config->mux_regs))
- kfree(oa_config->mux_regs);
- kfree(oa_config);
-}
+ struct i915_oa_config *oa_config =
+ container_of(ref, typeof(*oa_config), ref);
-static void put_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
-{
- if (!atomic_dec_and_test(&oa_config->ref_count))
- return;
+ kfree(oa_config->flex_regs);
+ kfree(oa_config->b_counter_regs);
+ kfree(oa_config->mux_regs);
- free_oa_config(dev_priv, oa_config);
+ kfree_rcu(oa_config, rcu);
}
-static int get_oa_config(struct drm_i915_private *dev_priv,
- int metrics_set,
- struct i915_oa_config **out_config)
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
{
- int ret;
+ struct i915_oa_config *oa_config;
- if (metrics_set == 1) {
- *out_config = &dev_priv->perf.test_config;
- atomic_inc(&dev_priv->perf.test_config.ref_count);
- return 0;
- }
+ rcu_read_lock();
+ if (metrics_set == 1)
+ oa_config = &perf->test_config;
+ else
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ if (oa_config)
+ oa_config = i915_oa_config_get(oa_config);
+ rcu_read_unlock();
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
- if (ret)
- return ret;
+ return oa_config;
+}
- *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
- if (!*out_config)
- ret = -EINVAL;
- else
- atomic_inc(&(*out_config)->ref_count);
+static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
+{
+ i915_oa_config_put(oa_bo->oa_config);
+ i915_vma_put(oa_bo->vma);
+ kfree(oa_bo);
+}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
- return ret;
+ return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
+ GEN12_OAG_OATAILPTR_MASK;
}
static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
+ return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
}
static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
}
@@ -456,7 +474,6 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
*/
static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
int report_size = stream->oa_buffer.format_size;
unsigned long flags;
unsigned int aged_idx;
@@ -479,7 +496,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aged_tail = stream->oa_buffer.tails[aged_idx].offset;
aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
- hw_tail = dev_priv->perf.ops.oa_hw_tail_read(stream);
+ hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
* not in report_size steps...
@@ -536,7 +553,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aging_tail = hw_tail;
stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
+ DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
hw_tail);
}
}
@@ -655,7 +672,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
int report_size = stream->oa_buffer.format_size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
@@ -738,9 +755,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* it to userspace...
*/
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
- OAREPORT_REASON_MASK);
+ (IS_GEN(stream->perf->i915, 12) ?
+ OAREPORT_REASON_MASK_EXTENDED :
+ OAREPORT_REASON_MASK));
if (reason == 0) {
- if (__ratelimit(&dev_priv->perf.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -755,7 +774,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
*/
- if (!(report32[0] & dev_priv->perf.gen8_valid_ctx_bit))
+ if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
+ INTEL_GEN(stream->perf->i915) <= 11)
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -789,7 +809,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!dev_priv->perf.exclusive_stream->ctx ||
+ if (!stream->perf->exclusive_stream->ctx ||
stream->specific_ctx_id == ctx_id ||
stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
@@ -798,7 +818,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (dev_priv->perf.exclusive_stream->ctx &&
+ if (stream->perf->exclusive_stream->ctx &&
stream->specific_ctx_id != ctx_id) {
report32[2] = INVALID_CTX_ID;
}
@@ -822,6 +842,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
+ i915_reg_t oaheadptr;
+
+ oaheadptr = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
+
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/*
@@ -829,8 +854,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* relative to oa_buf_base so put back here...
*/
head += gtt_offset;
-
- I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
+ intel_uncore_write(uncore, oaheadptr,
+ head & GEN12_OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -864,14 +889,18 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus;
+ i915_reg_t oastatus_reg;
int ret;
if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OASTATUS : GEN8_OASTATUS;
+
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
/*
* We treat OABUFFER_OVERFLOW as a significant error:
@@ -896,14 +925,14 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
stream->period_exponent);
- dev_priv->perf.ops.oa_disable(stream);
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
* reset GEN8_OASTATUS for us
*/
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
}
if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
@@ -911,8 +940,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- I915_WRITE(GEN8_OASTATUS,
- oastatus & ~GEN8_OASTATUS_REPORT_LOST);
+ intel_uncore_write(uncore, oastatus_reg,
+ oastatus & ~GEN8_OASTATUS_REPORT_LOST);
}
return gen8_append_oa_reports(stream, buf, count, offset);
@@ -943,7 +972,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
int report_size = stream->oa_buffer.format_size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
@@ -1017,7 +1046,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- if (__ratelimit(&dev_priv->perf.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -1043,9 +1072,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
*/
head += gtt_offset;
- I915_WRITE(GEN7_OASTATUS2,
- ((head & GEN7_OASTATUS2_HEAD_MASK) |
- GEN7_OASTATUS2_MEM_SELECT_GGTT));
+ intel_uncore_write(uncore, GEN7_OASTATUS2,
+ (head & GEN7_OASTATUS2_HEAD_MASK) |
+ GEN7_OASTATUS2_MEM_SELECT_GGTT);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -1075,21 +1104,21 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus1;
int ret;
if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
/* XXX: On Haswell we don't have a safe way to clear oastatus1
* bits while the OA unit is enabled (while the tail pointer
* may be updated asynchronously) so we ignore status bits
* that have already been reported to userspace.
*/
- oastatus1 &= ~dev_priv->perf.gen7_latched_oastatus1;
+ oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
*
@@ -1120,10 +1149,10 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
stream->period_exponent);
- dev_priv->perf.ops.oa_disable(stream);
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
}
if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
@@ -1131,7 +1160,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- dev_priv->perf.gen7_latched_oastatus1 |=
+ stream->perf->gen7_latched_oastatus1 |=
GEN7_OASTATUS1_REPORT_LOST;
}
@@ -1196,25 +1225,18 @@ static int i915_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- return dev_priv->perf.ops.read(stream, buf, count, offset);
+ return stream->perf->ops.read(stream, buf, count, offset);
}
static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
{
struct i915_gem_engines_iter it;
- struct drm_i915_private *i915 = stream->dev_priv;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
int err;
- err = i915_mutex_lock_interruptible(&i915->drm);
- if (err)
- return ERR_PTR(err);
-
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- if (ce->engine->class != RENDER_CLASS)
+ if (ce->engine != stream->engine) /* first match! */
continue;
/*
@@ -1229,10 +1251,6 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
}
i915_gem_context_unlock_engines(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err)
- return ERR_PTR(err);
-
return stream->pinned_ctx;
}
@@ -1248,14 +1266,13 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
- switch (INTEL_GEN(i915)) {
+ switch (INTEL_GEN(ce->engine->i915)) {
case 7: {
/*
* On Haswell we don't do any post processing of the reports
@@ -1269,7 +1286,11 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 8:
case 9:
case 10:
- if (USES_GUC_SUBMISSION(i915)) {
+ if (intel_engine_in_execlists_submission_mode(ce->engine)) {
+ stream->specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
+ } else {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
@@ -1289,31 +1310,23 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
- } else {
- stream->specific_ctx_id_mask =
- (1U << GEN8_CTX_ID_WIDTH) - 1;
- stream->specific_ctx_id =
- upper_32_bits(ce->lrc_desc);
- stream->specific_ctx_id &=
- stream->specific_ctx_id_mask;
}
break;
- case 11: {
+ case 11:
+ case 12: {
stream->specific_ctx_id_mask =
- ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
- ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
- ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
- stream->specific_ctx_id = upper_32_bits(ce->lrc_desc);
- stream->specific_ctx_id &=
- stream->specific_ctx_id_mask;
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
break;
}
default:
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(INTEL_GEN(ce->engine->i915));
}
+ ce->tag = stream->specific_ctx_id_mask;
+
DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
stream->specific_ctx_id,
stream->specific_ctx_id_mask);
@@ -1330,69 +1343,76 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
- stream->specific_ctx_id = INVALID_CTX_ID;
- stream->specific_ctx_id_mask = 0;
-
ce = fetch_and_zero(&stream->pinned_ctx);
if (ce) {
- mutex_lock(&dev_priv->drm.struct_mutex);
+ ce->tag = 0; /* recomputed on next submission after parking */
intel_context_unpin(ce);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
+
+ stream->specific_ctx_id = INVALID_CTX_ID;
+ stream->specific_ctx_id_mask = 0;
}
static void
free_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
-
- mutex_lock(&i915->drm.struct_mutex);
-
i915_vma_unpin_and_release(&stream->oa_buffer.vma,
I915_VMA_RELEASE_MAP);
- mutex_unlock(&i915->drm.struct_mutex);
-
stream->oa_buffer.vaddr = NULL;
}
+static void
+free_oa_configs(struct i915_perf_stream *stream)
+{
+ struct i915_oa_config_bo *oa_bo, *tmp;
+
+ i915_oa_config_put(stream->oa_config);
+ llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
+ free_oa_config_bo(oa_bo);
+}
+
+static void
+free_noa_wait(struct i915_perf_stream *stream)
+{
+ i915_vma_unpin_and_release(&stream->noa_wait, 0);
+}
+
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- BUG_ON(stream != dev_priv->perf.exclusive_stream);
+ BUG_ON(stream != perf->exclusive_stream);
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.exclusive_stream = NULL;
- dev_priv->perf.ops.disable_metric_set(stream);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
if (stream->ctx)
oa_put_render_ctx_id(stream);
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
+ free_noa_wait(stream);
- if (dev_priv->perf.spurious_report_rs.missed) {
+ if (perf->spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
- dev_priv->perf.spurious_report_rs.missed);
+ perf->spurious_report_rs.missed);
}
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
@@ -1401,13 +1421,14 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
- I915_WRITE(GEN7_OASTATUS2,
- gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
+ intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
+ gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN7_OABUFFER, gtt_offset);
+ intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
- I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+ intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
+ gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
@@ -1419,7 +1440,7 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
* already seen since they can't be cleared while periodic
* sampling is enabled.
*/
- dev_priv->perf.gen7_latched_oastatus1 = 0;
+ stream->perf->gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
* zeroed via shmfs (and so this memset is redundant when
@@ -1434,25 +1455,22 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
stream->pollin = false;
}
static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- I915_WRITE(GEN8_OASTATUS, 0);
- I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
+ intel_uncore_write(uncore, GEN8_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN8_OABUFFER_UDW, 0);
+ intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
/*
* PRM says:
@@ -1462,9 +1480,9 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
* to enable proper functionality of the overflow
* bit."
*/
- I915_WRITE(GEN8_OABUFFER, gtt_offset |
+ intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
- I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
+ intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
@@ -1493,35 +1511,82 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ stream->pollin = false;
+}
+
+static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
+
+ intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
+ gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
+ stream->oa_buffer.head = gtt_offset;
+
+ /*
+ * PRM says:
+ *
+ * "This MMIO must be set before the OATAILPTR
+ * register and after the OAHEADPTR register. This is
+ * to enable proper functionality of the overflow
+ * bit."
+ */
+ intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
+ OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
+ intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
+ gtt_offset & GEN12_OAG_OATAILPTR_MASK);
+
+ /* Mark that we need updated tail pointers to read from... */
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+
+ /*
+ * Reset state used to recognise context switches, affecting which
+ * reports we will forward to userspace while filtering for a single
+ * context.
+ */
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
+
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
+
/*
- * Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
+ * NB: although the OA buffer will initially be allocated
+ * zeroed via shmfs (and so this memset is redundant when
+ * first allocating), we may re-init the OA buffer, either
+ * when re-enabling a stream or in error/reset paths.
+ *
+ * The reason we clear the buffer for each re-init is for the
+ * sanity check in gen8_append_oa_reports() that looks at the
+ * reason field to make sure it's non-zero which relies on
+ * the assumption that new reports are being written to zeroed
+ * memory...
*/
+ memset(stream->oa_buffer.vaddr, 0,
+ stream->oa_buffer.vma->size);
+
stream->pollin = false;
}
static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
struct drm_i915_gem_object *bo;
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_vma *vma;
int ret;
if (WARN_ON(stream->oa_buffer.vma))
return -ENODEV;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
+ bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
DRM_ERROR("Failed to allocate OA buffer\n");
- ret = PTR_ERR(bo);
- goto unlock;
+ return PTR_ERR(bo);
}
i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
@@ -1541,11 +1606,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
goto err_unpin;
}
- DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
- i915_ggtt_offset(stream->oa_buffer.vma),
- stream->oa_buffer.vaddr);
-
- goto unlock;
+ return 0;
err_unpin:
__i915_vma_unpin(vma);
@@ -1556,55 +1617,389 @@ err_unref:
stream->oa_buffer.vaddr = NULL;
stream->oa_buffer.vma = NULL;
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
-static void config_oa_regs(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg *regs,
- u32 n_regs)
+static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
+ bool save, i915_reg_t reg, u32 offset,
+ u32 dword_count)
+{
+ u32 cmd;
+ u32 d;
+
+ cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(stream->perf->i915) >= 8)
+ cmd++;
+
+ for (d = 0; d < dword_count; d++) {
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
+ *cs++ = intel_gt_scratch_offset(stream->engine->gt,
+ offset) + 4 * d;
+ *cs++ = 0;
+ }
+
+ return cs;
+}
+
+static int alloc_noa_wait(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct drm_i915_gem_object *bo;
+ struct i915_vma *vma;
+ const u64 delay_ticks = 0xffffffffffffffff -
+ DIV64_U64_ROUND_UP(
+ atomic64_read(&stream->perf->noa_programming_delay) *
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
+ 1000000ull);
+ const u32 base = stream->engine->mmio_base;
+#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
+ u32 *batch, *ts0, *cs, *jump;
+ int ret, i;
+ enum {
+ START_TS,
+ NOW_TS,
+ DELTA_TS,
+ JUMP_PREDICATE,
+ DELTA_TARGET,
+ N_CS_GPR
+ };
+
+ bo = i915_gem_object_create_internal(i915, 4096);
+ if (IS_ERR(bo)) {
+ DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+ return PTR_ERR(bo);
+ }
+
+ /*
+ * We pin in GGTT because we jump into this buffer now because
+ * multiple OA config BOs will have a jump to this address and it
+ * needs to be fixed during the lifetime of the i915/perf stream.
+ */
+ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ ret = PTR_ERR(batch);
+ goto err_unpin;
+ }
+
+ /* Save registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, true /* save */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* First timestamp snapshot location. */
+ ts0 = cs;
+
+ /*
+ * Initial snapshot of the timestamp register to implement the wait.
+ * We work with 32b values, so clear out the top 32b bits of the
+ * register because the ALU works 64bits.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
+
+ /*
+ * This is the location we're going to jump back into until the
+ * required amount of time has passed.
+ */
+ jump = cs;
+
+ /*
+ * Take another snapshot of the timestamp register. Take care to clear
+ * up the top 32bits of CS_GPR(1) as we're using it for other
+ * operations below.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
+
+ /*
+ * Do a diff between the 2 timestamps and store the result back into
+ * CS_GPR(1).
+ */
+ *cs++ = MI_MATH(5);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
+ *cs++ = MI_MATH_SUB;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
+ * timestamp have rolled over the 32bits) into the predicate register
+ * to be used for the predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Restart from the beginning if we had timestamps roll over. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
+ *cs++ = 0;
+
+ /*
+ * Now add the diff between to previous timestamps and add it to :
+ * (((1 * << 64) - 1) - delay_ns)
+ *
+ * When the Carry Flag contains 1 this means the elapsed time is
+ * longer than the expected delay, and we can exit the wait loop.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
+ *cs++ = lower_32_bits(delay_ticks);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
+ *cs++ = upper_32_bits(delay_ticks);
+
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the result into the predicate register to be used for the
+ * predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Predicate the jump. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
+ *cs++ = 0;
+
+ /* Restore registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, false /* restore */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* And return to the ring. */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
+
+ i915_gem_object_flush_map(bo);
+ i915_gem_object_unpin_map(bo);
+
+ stream->noa_wait = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin_and_release(&vma, 0);
+err_unref:
+ i915_gem_object_put(bo);
+ return ret;
+}
+
+static u32 *write_cs_mi_lri(u32 *cs,
+ const struct i915_oa_reg *reg_data,
+ u32 n_regs)
{
u32 i;
for (i = 0; i < n_regs; i++) {
- const struct i915_oa_reg *reg = regs + i;
+ if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
+ u32 n_lri = min_t(u32,
+ n_regs - i,
+ MI_LOAD_REGISTER_IMM_MAX_REGS);
- I915_WRITE(reg->addr, reg->value);
+ *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
+ }
+ *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
+ *cs++ = reg_data[i].value;
}
+
+ return cs;
}
-static void delay_after_mux(void)
+static int num_lri_dwords(int num_regs)
{
+ int count = 0;
+
+ if (num_regs > 0) {
+ count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
+ count += num_regs * 2;
+ }
+
+ return count;
+}
+
+static struct i915_oa_config_bo *
+alloc_oa_config_buffer(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_oa_config_bo *oa_bo;
+ size_t config_length = 0;
+ u32 *cs;
+ int err;
+
+ oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
+ if (!oa_bo)
+ return ERR_PTR(-ENOMEM);
+
+ config_length += num_lri_dwords(oa_config->mux_regs_len);
+ config_length += num_lri_dwords(oa_config->b_counter_regs_len);
+ config_length += num_lri_dwords(oa_config->flex_regs_len);
+ config_length += 3; /* MI_BATCH_BUFFER_START */
+ config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_oa_bo;
+ }
+
+ cs = write_cs_mi_lri(cs,
+ oa_config->mux_regs,
+ oa_config->mux_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->flex_regs,
+ oa_config->flex_regs_len);
+
+ /* Jump into the active wait. */
+ *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8);
+ *cs++ = i915_ggtt_offset(stream->noa_wait);
+ *cs++ = 0;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ oa_bo->vma = i915_vma_instance(obj,
+ &stream->engine->gt->ggtt->vm,
+ NULL);
+ if (IS_ERR(oa_bo->vma)) {
+ err = PTR_ERR(oa_bo->vma);
+ goto err_oa_bo;
+ }
+
+ oa_bo->oa_config = i915_oa_config_get(oa_config);
+ llist_add(&oa_bo->node, &stream->oa_config_bos);
+
+ return oa_bo;
+
+err_oa_bo:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(oa_bo);
+ return ERR_PTR(err);
+}
+
+static struct i915_vma *
+get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
+{
+ struct i915_oa_config_bo *oa_bo;
+
/*
- * It apparently takes a fairly long time for a new MUX
- * configuration to be be applied after these register writes.
- * This delay duration was derived empirically based on the
- * render_basic config but hopefully it covers the maximum
- * configuration latency.
- *
- * As a fallback, the checks in _append_oa_reports() to skip
- * invalid OA reports do also seem to work to discard reports
- * generated before this config has completed - albeit not
- * silently.
- *
- * Unfortunately this is essentially a magic number, since we
- * don't currently know of a reliable mechanism for predicting
- * how long the MUX config will take to apply and besides
- * seeing invalid reports we don't know of a reliable way to
- * explicitly check that the MUX config has landed.
- *
- * It's even possible we've miss characterized the underlying
- * problem - it just seems like the simplest explanation why
- * a delay at this location would mitigate any invalid reports.
+ * Look for the buffer in the already allocated BOs attached
+ * to the stream.
*/
- usleep_range(15000, 20000);
+ llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
+ if (oa_bo->oa_config == oa_config &&
+ memcmp(oa_bo->oa_config->uuid,
+ oa_config->uuid,
+ sizeof(oa_config->uuid)) == 0)
+ goto out;
+ }
+
+ oa_bo = alloc_oa_config_buffer(stream, oa_config);
+ if (IS_ERR(oa_bo))
+ return ERR_CAST(oa_bo);
+
+out:
+ return i915_vma_get(oa_bo->vma);
+}
+
+static int emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ vma = get_oa_vma(stream, oa_config);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err_vma_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma_unpin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+ if (err)
+ goto err_add_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start, 0,
+ I915_DISPATCH_SECURE);
+err_add_request:
+ i915_request_add(rq);
+err_vma_unpin:
+ i915_vma_unpin(vma);
+err_vma_put:
+ i915_vma_put(vma);
+ return err;
+}
+
+static struct intel_context *oa_context(struct i915_perf_stream *stream)
+{
+ return stream->pinned_ctx ?: stream->engine->kernel_context;
}
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
/*
* PRM:
@@ -1616,31 +2011,24 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* count the events from non-render domain. Unit level clock
* gating for RCS should also be disabled.
*/
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
-
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- return 0;
+ return emit_oa_config(stream, stream->oa_config, oa_context(stream));
}
static void hsw_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
- ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
- GEN7_DOP_CLOCK_GATE_ENABLE));
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
@@ -1672,14 +2060,11 @@ static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
* in the case that the OA unit has been disabled.
*/
static void
-gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
- struct intel_context *ce,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
-{
- struct drm_i915_private *i915 = ce->engine->i915;
- u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
+gen8_update_reg_state_unlocked(const struct intel_context *ce,
+ const struct i915_perf_stream *stream)
+{
+ u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1690,21 +2075,28 @@ gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
EU_PERF_CNTL5,
EU_PERF_CNTL6,
};
+ u32 *reg_state = ce->lrc_reg_state;
int i;
- CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME);
+ if (IS_GEN(stream->perf->i915, 12)) {
+ u32 format = stream->oa_buffer.format;
- for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
- CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i],
- oa_config_flex_reg(oa_config, flex_regs[i]));
+ reg_state[ctx_oactxctrl + 1] =
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
+ } else {
+ reg_state[ctx_oactxctrl + 1] =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
}
- CTX_REG(reg_state,
- CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- intel_sseu_make_rpcs(i915, &ce->sseu));
+ for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++)
+ reg_state[ctx_flexeu0 + i * 2 + 1] =
+ oa_config_flex_reg(stream->oa_config, flex_regs[i]);
+
+ reg_state[CTX_R_PWR_CLK_STATE] =
+ intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
}
struct flex {
@@ -1728,7 +2120,7 @@ gen8_store_flex(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
do {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = offset + (flex->offset + 1) * sizeof(u32);
+ *cs++ = offset + flex->offset * sizeof(u32);
*cs++ = 0;
*cs++ = flex->value;
} while (flex++, --count);
@@ -1832,6 +2224,36 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
return err;
}
+static int gen12_emit_oar_config(struct intel_context *ce, bool enable)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int err = 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base));
+ *cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+ enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+out:
+ i915_request_add(rq);
+
+ return err;
+}
+
/*
* Manages updating the per-context aspects of the OA stream
* configuration across all contexts.
@@ -1856,24 +2278,22 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
*
* Note: it's only the RCS/Render context that has any OA state.
*/
-static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *i915 = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->perf->i915;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
-#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N))
+ const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
struct flex regs[] = {
{
GEN8_R_PWR_CLK_STATE,
CTX_R_PWR_CLK_STATE,
},
{
- GEN8_OACTXCONTROL,
- i915->perf.ctx_oactxctrl_offset,
- ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME)
+ IS_GEN(i915, 12) ?
+ GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL,
+ stream->perf->ctx_oactxctrl_offset + 1,
},
{ EU_PERF_CNTL0, ctx_flexeuN(0) },
{ EU_PERF_CNTL1, ctx_flexeuN(1) },
@@ -1885,13 +2305,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
};
#undef ctx_flexeuN
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- int i;
+ struct i915_gem_context *ctx, *cn;
+ size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs);
+ int i, err;
- for (i = 2; i < ARRAY_SIZE(regs); i++)
+ if (IS_GEN(i915, 12)) {
+ u32 format = stream->oa_buffer.format;
+
+ regs[1].value =
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
+ } else {
+ regs[1].value =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
+ }
+
+ for (i = 2; !!ctx_flexeu0 && i < array_size; i++)
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ lockdep_assert_held(&stream->perf->lock);
/*
* The OA register config is setup through the context image. This image
@@ -1909,16 +2343,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
* context. Contexts idle at the time of reconfiguration are not
* trapped behind the barrier.
*/
- list_for_each_entry(ctx, &i915->contexts.list, link) {
- int err;
-
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
if (ctx == i915->kernel_context)
continue;
- err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
- if (err)
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
+ err = gen8_configure_context(ctx, regs, array_size);
+ if (err) {
+ i915_gem_context_put(ctx);
return err;
+ }
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
/*
* After updating all other contexts, we need to modify ourselves.
@@ -1927,14 +2372,13 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
*/
for_each_uabi_engine(engine, i915) {
struct intel_context *ce = engine->kernel_context;
- int err;
if (engine->class != RENDER_CLASS)
continue;
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs));
+ err = gen8_modify_self(ce, regs, array_size);
if (err)
return err;
}
@@ -1944,8 +2388,8 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1971,10 +2415,10 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN_RANGE(dev_priv, 9, 11)) {
- I915_WRITE(GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
+ intel_uncore_write(uncore, GEN8_OA_DEBUG,
+ _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
@@ -1982,45 +2426,102 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(stream, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
+ return emit_oa_config(stream, oa_config, oa_context(stream));
+}
+
+static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ int ret;
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+ intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
+ /* Disable clk ratio reports, like previous Gens. */
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ /*
+ * If the user didn't require OA reports, instruct the
+ * hardware not to emit ctx switch reports.
+ */
+ !(stream->sample_flags & SAMPLE_OA_REPORT) ?
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS) :
+ _MASKED_BIT_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS));
+
+ intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
+ (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
+ (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
+ : 0);
- return 0;
+ /*
+ * Update all contexts prior writing the mux configurations as we need
+ * to make sure all slices/subslices are ON before writing to NOA
+ * registers.
+ */
+ ret = lrc_configure_all_contexts(stream, oa_config);
+ if (ret)
+ return ret;
+
+ /*
+ * For Gen12, performance counters are context
+ * saved/restored. Only enable it for the context that
+ * requested this.
+ */
+ if (stream->ctx) {
+ ret = gen12_emit_oar_config(stream->pinned_ctx,
+ oa_config != NULL);
+ if (ret)
+ return ret;
+ }
+
+ return emit_oa_config(stream, oa_config, oa_context(stream));
}
static void gen8_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
static void gen10_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
+
+ /* Reset all contexts' slices/subslices configurations. */
+ lrc_configure_all_contexts(stream, NULL);
+
+ /* Make sure we disable noa to save power. */
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
+}
+
+static void gen12_disable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL);
+
+ /* disable the context save/restore or OAR counters */
+ if (stream->ctx)
+ gen12_emit_oar_config(stream->pinned_ctx, false);
/* Make sure we disable noa to save power. */
- I915_WRITE(RPM_CONFIG1,
- I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
}
static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
struct i915_gem_context *ctx = stream->ctx;
u32 ctx_id = stream->specific_ctx_id;
bool periodic = stream->periodic;
@@ -2038,19 +2539,19 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
*/
gen7_init_oa_buffer(stream);
- I915_WRITE(GEN7_OACONTROL,
- (ctx_id & GEN7_OACONTROL_CTX_MASK) |
- (period_exponent <<
- GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
- (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
- (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
- (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
- GEN7_OACONTROL_ENABLE);
+ intel_uncore_write(uncore, GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
}
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 report_format = stream->oa_buffer.format;
/*
@@ -2069,9 +2570,28 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
* filtering and instead filter on the cpu based on the context-id
* field of reports
*/
- I915_WRITE(GEN8_OACONTROL, (report_format <<
- GEN8_OA_REPORT_FORMAT_SHIFT) |
- GEN8_OA_COUNTER_ENABLE);
+ intel_uncore_write(uncore, GEN8_OACONTROL,
+ (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
+ GEN8_OA_COUNTER_ENABLE);
+}
+
+static void gen12_oa_enable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 report_format = stream->oa_buffer.format;
+
+ /*
+ * If we don't want OA reports from the OA buffer, then we don't even
+ * need to program the OAG unit.
+ */
+ if (!(stream->sample_flags & SAMPLE_OA_REPORT))
+ return;
+
+ gen12_init_oa_buffer(stream);
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
+ (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
}
/**
@@ -2085,9 +2605,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_enable(stream);
if (stream->periodic)
hrtimer_start(&stream->poll_check_timer,
@@ -2097,7 +2615,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN7_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -2108,7 +2626,7 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN8_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -2117,6 +2635,18 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
+static void gen12_oa_disable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
+ GEN12_OAG_OACONTROL,
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
+ 50))
+ DRM_ERROR("wait for OA to be disabled timed out\n");
+}
+
/**
* i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
* @stream: An i915 perf stream opened for OA metrics
@@ -2127,9 +2657,7 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- dev_priv->perf.ops.oa_disable(stream);
+ stream->perf->ops.oa_disable(stream);
if (stream->periodic)
hrtimer_cancel(&stream->poll_check_timer);
@@ -2166,15 +2694,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
int format_size;
int ret;
- /* If the sysfs metrics/ directory wasn't registered for some
+ if (!props->engine) {
+ DRM_DEBUG("OA engine not specified\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If the sysfs metrics/ directory wasn't registered for some
* reason then don't let userspace try their luck with config
* IDs
*/
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
@@ -2184,16 +2718,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.ops.enable_metric_set) {
+ if (!perf->ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
- /* To avoid the complexity of having to accurately filter
+ /*
+ * To avoid the complexity of having to accurately filter
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (dev_priv->perf.exclusive_stream) {
+ if (perf->exclusive_stream) {
DRM_DEBUG("OA unit already in use\n");
return -EBUSY;
}
@@ -2203,9 +2738,12 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
+ stream->engine = props->engine;
+ stream->uncore = stream->engine->gt->uncore;
+
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = dev_priv->perf.oa_formats[props->oa_format].size;
+ format_size = perf->oa_formats[props->oa_format].size;
stream->sample_flags |= SAMPLE_OA_REPORT;
stream->sample_size += format_size;
@@ -2214,8 +2752,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (WARN_ON(stream->oa_buffer.format_size == 0))
return -EINVAL;
+ stream->hold_preemption = props->hold_preemption;
+
stream->oa_buffer.format =
- dev_priv->perf.oa_formats[props->oa_format].format;
+ perf->oa_formats[props->oa_format].format;
stream->periodic = props->oa_periodic;
if (stream->periodic)
@@ -2229,9 +2769,16 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
}
}
- ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
+ ret = alloc_noa_wait(stream);
if (ret) {
+ DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
+ goto err_noa_wait_alloc;
+ }
+
+ stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
+ if (!stream->oa_config) {
DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
+ ret = -EINVAL;
goto err_config;
}
@@ -2247,27 +2794,24 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_get(stream->engine);
+ intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
-
stream->ops = &i915_oa_stream_ops;
- dev_priv->perf.exclusive_stream = stream;
+ perf->exclusive_stream = stream;
- ret = dev_priv->perf.ops.enable_metric_set(stream);
+ ret = perf->ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ DRM_DEBUG("opening stream oa config uuid=%s\n",
+ stream->oa_config->uuid);
hrtimer_init(&stream->poll_check_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -2278,38 +2822,40 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- dev_priv->perf.exclusive_stream = NULL;
- dev_priv->perf.ops.disable_metric_set(stream);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
-err_lock:
free_oa_buffer(stream);
err_oa_buf_alloc:
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
err_config:
+ free_noa_wait(stream);
+
+err_noa_wait_alloc:
if (stream->ctx)
oa_put_render_ctx_id(stream);
return ret;
}
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *regs)
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct i915_perf_stream *stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+
if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(stream, ce, regs, stream->oa_config);
+ gen8_update_reg_state_unlocked(ce, stream);
}
/**
@@ -2379,7 +2925,7 @@ static ssize_t i915_perf_read(struct file *file,
loff_t *ppos)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
ssize_t ret;
/* To ensure it's handled consistently we simply treat all reads of a
@@ -2402,15 +2948,15 @@ static ssize_t i915_perf_read(struct file *file,
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file,
buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
} while (ret == -EAGAIN);
} else {
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file, buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/* We allow the poll checking to sometimes report false positive EPOLLIN
@@ -2448,7 +2994,6 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
/**
* i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
- * @dev_priv: i915 device instance
* @stream: An i915 perf stream
* @file: An i915 perf stream file
* @wait: poll() state table
@@ -2457,15 +3002,14 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
* &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
* will be woken for new stream data.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: any poll events that are ready without sleeping
*/
-static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
- struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait)
+static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
{
__poll_t events = 0;
@@ -2499,12 +3043,12 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
__poll_t ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_poll_locked(stream, file, wait);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2529,6 +3073,9 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
if (stream->ops->enable)
stream->ops->enable(stream);
+
+ if (stream->hold_preemption)
+ i915_gem_context_set_nopreempt(stream->ctx);
}
/**
@@ -2553,17 +3100,54 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
/* Allow stream->ops->disable() to refer to this */
stream->enabled = false;
+ if (stream->hold_preemption)
+ i915_gem_context_clear_nopreempt(stream->ctx);
+
if (stream->ops->disable)
stream->ops->disable(stream);
}
+static long i915_perf_config_locked(struct i915_perf_stream *stream,
+ unsigned long metrics_set)
+{
+ struct i915_oa_config *config;
+ long ret = stream->oa_config->id;
+
+ config = i915_perf_get_oa_config(stream->perf, metrics_set);
+ if (!config)
+ return -EINVAL;
+
+ if (config != stream->oa_config) {
+ int err;
+
+ /*
+ * If OA is bound to a specific context, emit the
+ * reconfiguration inline from that context. The update
+ * will then be ordered with respect to submission on that
+ * context.
+ *
+ * When set globally, we use a low priority kernel context,
+ * so it will effectively take effect when idle.
+ */
+ err = emit_oa_config(stream, config, oa_context(stream));
+ if (err == 0)
+ config = xchg(&stream->oa_config, config);
+ else
+ ret = err;
+ }
+
+ i915_oa_config_put(config);
+
+ return ret;
+}
+
/**
* i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
* @stream: An i915 perf stream
* @cmd: the ioctl request
* @arg: the ioctl data
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: zero on success or a negative error code. Returns -EINVAL for
@@ -2580,6 +3164,8 @@ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
case I915_PERF_IOCTL_DISABLE:
i915_perf_disable_locked(stream);
return 0;
+ case I915_PERF_IOCTL_CONFIG:
+ return i915_perf_config_locked(stream, arg);
}
return -EINVAL;
@@ -2601,12 +3187,12 @@ static long i915_perf_ioctl(struct file *file,
unsigned long arg)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
long ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_ioctl_locked(stream, cmd, arg);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2618,7 +3204,7 @@ static long i915_perf_ioctl(struct file *file,
* Frees all resources associated with the given i915 perf @stream, disabling
* any associated data capture in the process.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*/
static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
@@ -2629,8 +3215,6 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
if (stream->ops->destroy)
stream->ops->destroy(stream);
- list_del(&stream->link);
-
if (stream->ctx)
i915_gem_context_put(stream->ctx);
@@ -2651,14 +3235,14 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
static int i915_perf_release(struct inode *inode, struct file *file)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
i915_perf_destroy_locked(stream);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
/* Release the reference the perf stream kept on the driver. */
- drm_dev_put(&dev_priv->drm);
+ drm_dev_put(&perf->i915->drm);
return 0;
}
@@ -2680,7 +3264,7 @@ static const struct file_operations fops = {
/**
* i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
* @props: individually validated u64 property value pairs
* @file: drm file
@@ -2688,7 +3272,7 @@ static const struct file_operations fops = {
* See i915_perf_ioctl_open() for interface details.
*
* Implements further stream config validation and stream initialization on
- * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
+ * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
* taken to serialize with any non-file-operation driver hooks.
*
* Note: at this point the @props have only been validated in isolation and
@@ -2703,7 +3287,7 @@ static const struct file_operations fops = {
* Returns: zero on success or a negative error code.
*/
static int
-i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+i915_perf_open_ioctl_locked(struct i915_perf *perf,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props,
struct drm_file *file)
@@ -2734,17 +3318,34 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
* rest of the system, which we consider acceptable for a
* non-privileged client.
*
- * For Gen8+ the OA unit no longer supports clock gating off for a
+ * For Gen8->11 the OA unit no longer supports clock gating off for a
* specific context and the kernel can't securely stop the counters
* from updating as system-wide / global values. Even though we can
* filter reports based on the included context ID we can't block
* clients from seeing the raw / global counter values via
* MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
* enable the OA unit by default.
+ *
+ * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
+ * per context basis. So we can relax requirements there if the user
+ * doesn't request global stream access (i.e. query based sampling
+ * using MI_RECORD_PERF_COUNT.
*/
- if (IS_HASWELL(dev_priv) && specific_ctx)
+ if (IS_HASWELL(perf->i915) && specific_ctx)
+ privileged_op = false;
+ else if (IS_GEN(perf->i915, 12) && specific_ctx &&
+ (props->sample_flags & SAMPLE_OA_REPORT) == 0)
privileged_op = false;
+ if (props->hold_preemption) {
+ if (!props->single_context) {
+ DRM_DEBUG("preemption disable with no context\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ privileged_op = true;
+ }
+
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
@@ -2752,7 +3353,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
*/
if (privileged_op &&
i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
+ DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
}
@@ -2763,7 +3364,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_ctx;
}
- stream->dev_priv = dev_priv;
+ stream->perf = perf;
stream->ctx = specific_ctx;
ret = i915_oa_stream_init(stream, param, props);
@@ -2779,8 +3380,6 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_flags;
}
- list_add(&stream->link, &dev_priv->perf.streams);
-
if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
f_flags |= O_CLOEXEC;
if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
@@ -2789,7 +3388,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
if (stream_fd < 0) {
ret = stream_fd;
- goto err_open;
+ goto err_flags;
}
if (!(param->flags & I915_PERF_FLAG_DISABLED))
@@ -2798,12 +3397,10 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
/* Take a reference on the driver that will be kept with stream_fd
* until its release.
*/
- drm_dev_get(&dev_priv->drm);
+ drm_dev_get(&perf->i915->drm);
return stream_fd;
-err_open:
- list_del(&stream->link);
err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
@@ -2816,15 +3413,15 @@ err:
return ret;
}
-static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
}
/**
* read_properties_unlocked - validate + copy userspace stream open properties
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @uprops: The array of u64 key value pairs given by userspace
* @n_props: The number of key value pairs expected in @uprops
* @props: The stream configuration built up while validating properties
@@ -2837,7 +3434,7 @@ static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
* we shouldn't validate or assume anything about ordering here. This doesn't
* rule out defining new properties with ordering requirements in the future.
*/
-static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+static int read_properties_unlocked(struct i915_perf *perf,
u64 __user *uprops,
u32 n_props,
struct perf_open_properties *props)
@@ -2852,6 +3449,15 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
return -EINVAL;
}
+ /* At the moment we only support using i915-perf on the RCS. */
+ props->engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0);
+ if (!props->engine) {
+ DRM_DEBUG("No RENDER-capable engines\n");
+ return -EINVAL;
+ }
+
/* Considering that ID = 0 is reserved and assuming that we don't
* (currently) expect any configurations to ever specify duplicate
* values for a particular property ID then the last _PROP_MAX value is
@@ -2903,7 +3509,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
value);
return -EINVAL;
}
- if (!dev_priv->perf.oa_formats[value].size) {
+ if (!perf->oa_formats[value].size) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -2924,7 +3530,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
*/
BUILD_BUG_ON(sizeof(oa_period) != 8);
- oa_period = oa_exponent_to_ns(dev_priv, value);
+ oa_period = oa_exponent_to_ns(perf, value);
/* This check is primarily to ensure that oa_period <=
* UINT32_MAX (before passing to do_div which only
@@ -2949,6 +3555,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
+ case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
+ props->hold_preemption = !!value;
+ break;
case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
return -EINVAL;
@@ -2978,7 +3587,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
* mutex to avoid an awkward lockdep with mmap_sem.
*
* Most of the implementation details are handled by
- * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
+ * i915_perf_open_ioctl_locked() after taking the &perf->lock
* mutex for serializing with any non-file-operation driver hooks.
*
* Return: A newly opened i915 Perf stream file descriptor or negative
@@ -2987,13 +3596,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_open_param *param = data;
struct perf_open_properties props;
u32 known_open_flags;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3006,124 +3615,130 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = read_properties_unlocked(dev_priv,
+ ret = read_properties_unlocked(perf,
u64_to_user_ptr(param->properties_ptr),
param->num_properties,
&props);
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
+ mutex_unlock(&perf->lock);
return ret;
}
/**
* i915_perf_register - exposes i915-perf to userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* In particular OA metric sets are advertised under a sysfs metrics/
* directory allowing userspace to enumerate valid IDs that can be
* used to open an i915-perf stream.
*/
-void i915_perf_register(struct drm_i915_private *dev_priv)
+void i915_perf_register(struct drm_i915_private *i915)
{
+ struct i915_perf *perf = &i915->perf;
int ret;
- if (!dev_priv->perf.initialized)
+ if (!perf->i915)
return;
/* To be sure we're synchronized with an attempted
* i915_perf_open_ioctl(); considering that we register after
* being exposed to userspace.
*/
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
- dev_priv->perf.metrics_kobj =
+ perf->metrics_kobj =
kobject_create_and_add("metrics",
- &dev_priv->drm.primary->kdev->kobj);
- if (!dev_priv->perf.metrics_kobj)
+ &i915->drm.primary->kdev->kobj);
+ if (!perf->metrics_kobj)
goto exit;
- sysfs_attr_init(&dev_priv->perf.test_config.sysfs_metric_id.attr);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- i915_perf_load_test_config_icl(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_SKYLAKE(dev_priv)) {
- if (IS_SKL_GT2(dev_priv))
- i915_perf_load_test_config_sklgt2(dev_priv);
- else if (IS_SKL_GT3(dev_priv))
- i915_perf_load_test_config_sklgt3(dev_priv);
- else if (IS_SKL_GT4(dev_priv))
- i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
-}
-
- if (dev_priv->perf.test_config.id == 0)
+ sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
+
+ if (IS_TIGERLAKE(i915)) {
+ i915_perf_load_test_config_tgl(i915);
+ } else if (INTEL_GEN(i915) >= 11) {
+ i915_perf_load_test_config_icl(i915);
+ } else if (IS_CANNONLAKE(i915)) {
+ i915_perf_load_test_config_cnl(i915);
+ } else if (IS_COFFEELAKE(i915)) {
+ if (IS_CFL_GT2(i915))
+ i915_perf_load_test_config_cflgt2(i915);
+ if (IS_CFL_GT3(i915))
+ i915_perf_load_test_config_cflgt3(i915);
+ } else if (IS_GEMINILAKE(i915)) {
+ i915_perf_load_test_config_glk(i915);
+ } else if (IS_KABYLAKE(i915)) {
+ if (IS_KBL_GT2(i915))
+ i915_perf_load_test_config_kblgt2(i915);
+ else if (IS_KBL_GT3(i915))
+ i915_perf_load_test_config_kblgt3(i915);
+ } else if (IS_BROXTON(i915)) {
+ i915_perf_load_test_config_bxt(i915);
+ } else if (IS_SKYLAKE(i915)) {
+ if (IS_SKL_GT2(i915))
+ i915_perf_load_test_config_sklgt2(i915);
+ else if (IS_SKL_GT3(i915))
+ i915_perf_load_test_config_sklgt3(i915);
+ else if (IS_SKL_GT4(i915))
+ i915_perf_load_test_config_sklgt4(i915);
+ } else if (IS_CHERRYVIEW(i915)) {
+ i915_perf_load_test_config_chv(i915);
+ } else if (IS_BROADWELL(i915)) {
+ i915_perf_load_test_config_bdw(i915);
+ } else if (IS_HASWELL(i915)) {
+ i915_perf_load_test_config_hsw(i915);
+ }
+
+ if (perf->test_config.id == 0)
goto sysfs_error;
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.test_config.sysfs_metric);
+ ret = sysfs_create_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
if (ret)
goto sysfs_error;
- atomic_set(&dev_priv->perf.test_config.ref_count, 1);
+ perf->test_config.perf = perf;
+ kref_init(&perf->test_config.ref);
goto exit;
sysfs_error:
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
exit:
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/**
* i915_perf_unregister - hide i915-perf from userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* i915-perf state cleanup is split up into an 'unregister' and
* 'deinit' phase where the interface is first hidden from
* userspace by i915_perf_unregister() before cleaning up
* remaining state in i915_perf_fini().
*/
-void i915_perf_unregister(struct drm_i915_private *dev_priv)
+void i915_perf_unregister(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.metrics_kobj)
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj)
return;
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.test_config.sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
}
-static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
{
static const i915_reg_t flex_eu_regs[] = {
EU_PERF_CNTL0,
@@ -3143,56 +3758,80 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
return false;
}
-static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
+#define ADDR_IN_RANGE(addr, start, end) \
+ ((addr) >= (start) && \
+ (addr) <= (end))
+
+#define REG_IN_RANGE(addr, start, end) \
+ ((addr) >= i915_mmio_reg_offset(start) && \
+ (addr) <= i915_mmio_reg_offset(end))
+
+#define REG_EQUAL(addr, mmio) \
+ ((addr) == i915_mmio_reg_offset(mmio))
+
+static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
+}
+
+static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
+ REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
+ REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
+ REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
+}
+
+static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
- addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
- addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OACEC0_0) &&
- addr <= i915_mmio_reg_offset(OACEC7_1));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
}
-static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
- (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
- addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
+ return gen8_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
}
-static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
- (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
- addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
+ REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
+ REG_EQUAL(addr, HSW_MBVID2_MISR0);
}
-static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen8_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
}
-static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x25100 && addr <= 0x2FF90) ||
- (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
- addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
- addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
+ return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
+ REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
+ REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
+ REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
+ REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
}
-static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x182300 && addr <= 0x1823A4);
+ return REG_EQUAL(addr, NOA_WRITE) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_EQUAL(addr, RPM_CONFIG0) ||
+ REG_EQUAL(addr, RPM_CONFIG1) ||
+ REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
}
static u32 mask_reg_value(u32 reg, u32 val)
@@ -3201,21 +3840,21 @@ static u32 mask_reg_value(u32 reg, u32 val)
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
- if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
+ if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
- if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
+ if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
-static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
- bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
+static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
+ bool (*is_valid)(struct i915_perf *perf, u32 addr),
u32 __user *regs,
u32 n_regs)
{
@@ -3245,7 +3884,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (err)
goto addr_err;
- if (!is_valid(dev_priv, addr)) {
+ if (!is_valid(perf, addr)) {
DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
err = -EINVAL;
goto addr_err;
@@ -3278,7 +3917,7 @@ static ssize_t show_dynamic_id(struct device *dev,
return sprintf(buf, "%d\n", oa_config->id);
}
-static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
+static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
struct i915_oa_config *oa_config)
{
sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
@@ -3293,7 +3932,7 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
oa_config->sysfs_metric.name = oa_config->uuid;
oa_config->sysfs_metric.attrs = oa_config->attrs;
- return sysfs_create_group(dev_priv->perf.metrics_kobj,
+ return sysfs_create_group(perf->metrics_kobj,
&oa_config->sysfs_metric);
}
@@ -3313,17 +3952,18 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_oa_config *args = data;
struct i915_oa_config *oa_config, *tmp;
+ static struct i915_oa_reg *regs;
int err, id;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
@@ -3346,7 +3986,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
- atomic_set(&oa_config->ref_count, 1);
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
if (!uuid_is_valid(args->uuid)) {
DRM_DEBUG("Invalid uuid format for OA config\n");
@@ -3360,59 +4001,59 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
oa_config->mux_regs_len = args->n_mux_regs;
- oa_config->mux_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_mux_reg,
- u64_to_user_ptr(args->mux_regs_ptr),
- args->n_mux_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_mux_reg,
+ u64_to_user_ptr(args->mux_regs_ptr),
+ args->n_mux_regs);
- if (IS_ERR(oa_config->mux_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for mux_regs\n");
- err = PTR_ERR(oa_config->mux_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->mux_regs = regs;
oa_config->b_counter_regs_len = args->n_boolean_regs;
- oa_config->b_counter_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_b_counter_reg,
- u64_to_user_ptr(args->boolean_regs_ptr),
- args->n_boolean_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_b_counter_reg,
+ u64_to_user_ptr(args->boolean_regs_ptr),
+ args->n_boolean_regs);
- if (IS_ERR(oa_config->b_counter_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
- err = PTR_ERR(oa_config->b_counter_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->b_counter_regs = regs;
- if (INTEL_GEN(dev_priv) < 8) {
+ if (INTEL_GEN(perf->i915) < 8) {
if (args->n_flex_regs != 0) {
err = -EINVAL;
goto reg_err;
}
} else {
oa_config->flex_regs_len = args->n_flex_regs;
- oa_config->flex_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_flex_reg,
- u64_to_user_ptr(args->flex_regs_ptr),
- args->n_flex_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_flex_reg,
+ u64_to_user_ptr(args->flex_regs_ptr),
+ args->n_flex_regs);
- if (IS_ERR(oa_config->flex_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for flex_regs\n");
- err = PTR_ERR(oa_config->flex_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->flex_regs = regs;
}
- err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ err = mutex_lock_interruptible(&perf->metrics_lock);
if (err)
goto reg_err;
/* We shouldn't have too many configs, so this iteration shouldn't be
* too costly.
*/
- idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
if (!strcmp(tmp->uuid, oa_config->uuid)) {
DRM_DEBUG("OA config already exists with this uuid\n");
err = -EADDRINUSE;
@@ -3420,14 +4061,14 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
}
}
- err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
+ err = create_dynamic_oa_sysfs_entry(perf, oa_config);
if (err) {
DRM_DEBUG("Failed to create sysfs entry for OA config\n");
goto sysfs_err;
}
/* Config id 0 is invalid, id 1 for kernel stored test config. */
- oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
+ oa_config->id = idr_alloc(&perf->metrics_idr,
oa_config, 2,
0, GFP_KERNEL);
if (oa_config->id < 0) {
@@ -3436,16 +4077,16 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
goto sysfs_err;
}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
return oa_config->id;
sysfs_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
reg_err:
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
DRM_DEBUG("Failed to add new OA config\n");
return err;
}
@@ -3464,12 +4105,12 @@ reg_err:
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
u64 *arg = data;
struct i915_oa_config *oa_config;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3479,31 +4120,33 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -EACCES;
}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ ret = mutex_lock_interruptible(&perf->metrics_lock);
if (ret)
- goto lock_err;
+ return ret;
- oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
+ oa_config = idr_find(&perf->metrics_idr, *arg);
if (!oa_config) {
DRM_DEBUG("Failed to remove unknown OA config\n");
ret = -ENOENT;
- goto config_err;
+ goto err_unlock;
}
GEM_BUG_ON(*arg != oa_config->id);
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &oa_config->sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
+
+ idr_remove(&perf->metrics_idr, *arg);
- idr_remove(&dev_priv->perf.metrics_idr, *arg);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
-config_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
-lock_err:
+ return 0;
+
+err_unlock:
+ mutex_unlock(&perf->metrics_lock);
return ret;
}
@@ -3551,103 +4194,126 @@ static struct ctl_table dev_root[] = {
/**
* i915_perf_init - initialize i915-perf state on module load
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Initializes i915-perf state without exposing anything to userspace.
*
* Note: i915-perf initialization is split into an 'init' and 'register'
* phase with the i915_perf_register() exposing state to userspace.
*/
-void i915_perf_init(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
- gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
- hsw_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.ops.enable_metric_set = hsw_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = hsw_disable_metric_set;
- dev_priv->perf.ops.oa_enable = gen7_oa_enable;
- dev_priv->perf.ops.oa_disable = gen7_oa_disable;
- dev_priv->perf.ops.read = gen7_oa_read;
- dev_priv->perf.ops.oa_hw_tail_read =
- gen7_oa_hw_tail_read;
-
- dev_priv->perf.oa_formats = hsw_oa_formats;
- } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
+void i915_perf_init(struct drm_i915_private *i915)
+{
+ struct i915_perf *perf = &i915->perf;
+
+ /* XXX const struct i915_perf_ops! */
+
+ if (IS_HASWELL(i915)) {
+ perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg = NULL;
+ perf->ops.enable_metric_set = hsw_enable_metric_set;
+ perf->ops.disable_metric_set = hsw_disable_metric_set;
+ perf->ops.oa_enable = gen7_oa_enable;
+ perf->ops.oa_disable = gen7_oa_disable;
+ perf->ops.read = gen7_oa_read;
+ perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
+
+ perf->oa_formats = hsw_oa_formats;
+ } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa_formats = gen8_plus_oa_formats;
+ perf->ops.read = gen8_oa_read;
- dev_priv->perf.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.ops.read = gen8_oa_read;
- dev_priv->perf.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ if (IS_GEN_RANGE(i915, 8, 9)) {
+ perf->oa_formats = gen8_plus_oa_formats;
- if (IS_GEN_RANGE(dev_priv, 8, 9)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.ops.is_valid_mux_reg =
+ if (IS_CHERRYVIEW(i915)) {
+ perf->ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 8)) {
- dev_priv->perf.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.ctx_flexeu0_offset = 0x2ce;
+ if (IS_GEN(i915, 8)) {
+ perf->ctx_oactxctrl_offset = 0x120;
+ perf->ctx_flexeu0_offset = 0x2ce;
- dev_priv->perf.gen8_valid_ctx_bit = BIT(25);
+ perf->gen8_valid_ctx_bit = BIT(25);
} else {
- dev_priv->perf.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.ctx_flexeu0_offset = 0x3de;
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
+ perf->gen8_valid_ctx_bit = BIT(16);
}
- } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
+ } else if (IS_GEN_RANGE(i915, 10, 11)) {
+ perf->oa_formats = gen8_plus_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen10_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 10)) {
- dev_priv->perf.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.ctx_flexeu0_offset = 0x3de;
+ if (IS_GEN(i915, 10)) {
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
} else {
- dev_priv->perf.ctx_oactxctrl_offset = 0x124;
- dev_priv->perf.ctx_flexeu0_offset = 0x78e;
+ perf->ctx_oactxctrl_offset = 0x124;
+ perf->ctx_flexeu0_offset = 0x78e;
}
- dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
+ perf->gen8_valid_ctx_bit = BIT(16);
+ } else if (IS_GEN(i915, 12)) {
+ perf->oa_formats = gen12_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
+ gen12_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg =
+ gen12_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
+
+ perf->ops.oa_enable = gen12_oa_enable;
+ perf->ops.oa_disable = gen12_oa_disable;
+ perf->ops.enable_metric_set = gen12_enable_metric_set;
+ perf->ops.disable_metric_set = gen12_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
+
+ perf->ctx_flexeu0_offset = 0;
+ perf->ctx_oactxctrl_offset = 0x144;
}
}
- if (dev_priv->perf.ops.enable_metric_set) {
- INIT_LIST_HEAD(&dev_priv->perf.streams);
- mutex_init(&dev_priv->perf.lock);
+ if (perf->ops.enable_metric_set) {
+ mutex_init(&perf->lock);
oa_sample_rate_hard_limit = 1000 *
- (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
- dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+ (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
+ perf->sysctl_header = register_sysctl_table(dev_root);
- mutex_init(&dev_priv->perf.metrics_lock);
- idr_init(&dev_priv->perf.metrics_idr);
+ mutex_init(&perf->metrics_lock);
+ idr_init(&perf->metrics_idr);
/* We set up some ratelimit state to potentially throttle any
* _NOTES about spurious, invalid OA reports which we don't
@@ -3659,44 +4325,70 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
*
* Using the same limiting factors as printk_ratelimit()
*/
- ratelimit_state_init(&dev_priv->perf.spurious_report_rs,
- 5 * HZ, 10);
+ ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
/* Since we use a DRM_NOTE for spurious reports it would be
* inconsistent to let __ratelimit() automatically print a
* warning for throttling.
*/
- ratelimit_set_flags(&dev_priv->perf.spurious_report_rs,
+ ratelimit_set_flags(&perf->spurious_report_rs,
RATELIMIT_MSG_ON_RELEASE);
- dev_priv->perf.initialized = true;
+ atomic64_set(&perf->noa_programming_delay,
+ 500 * 1000 /* 500us */);
+
+ perf->i915 = i915;
}
}
static int destroy_config(int id, void *p, void *data)
{
- struct drm_i915_private *dev_priv = data;
- struct i915_oa_config *oa_config = p;
-
- put_oa_config(dev_priv, oa_config);
-
+ i915_oa_config_put(p);
return 0;
}
/**
* i915_perf_fini - Counter part to i915_perf_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*/
-void i915_perf_fini(struct drm_i915_private *dev_priv)
+void i915_perf_fini(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.initialized)
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->i915)
return;
- idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
- idr_destroy(&dev_priv->perf.metrics_idr);
+ idr_for_each(&perf->metrics_idr, destroy_config, perf);
+ idr_destroy(&perf->metrics_idr);
- unregister_sysctl_table(dev_priv->perf.sysctl_header);
+ unregister_sysctl_table(perf->sysctl_header);
- memset(&dev_priv->perf.ops, 0, sizeof(dev_priv->perf.ops));
+ memset(&perf->ops, 0, sizeof(perf->ops));
+ perf->i915 = NULL;
+}
- dev_priv->perf.initialized = false;
+/**
+ * i915_perf_ioctl_version - Version of the i915-perf subsystem
+ *
+ * This version number is used by userspace to detect available features.
+ */
+int i915_perf_ioctl_version(void)
+{
+ /*
+ * 1: Initial version
+ * I915_PERF_IOCTL_ENABLE
+ * I915_PERF_IOCTL_DISABLE
+ *
+ * 2: Added runtime modification of OA config.
+ * I915_PERF_IOCTL_CONFIG
+ *
+ * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
+ * preemption on a particular context so that performance data is
+ * accessible from a delta of MI_RPC reports without looking at the
+ * OA buffer.
+ */
+ return 3;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_perf.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
index a412b16d9ffc..4ceebce72060 100644
--- a/drivers/gpu/drm/i915/i915_perf.h
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -6,11 +6,15 @@
#ifndef __I915_PERF_H__
#define __I915_PERF_H__
+#include <linux/kref.h>
#include <linux/types.h>
+#include "i915_perf_types.h"
+
struct drm_device;
struct drm_file;
struct drm_i915_private;
+struct i915_oa_config;
struct intel_context;
struct intel_engine_cs;
@@ -18,6 +22,7 @@ void i915_perf_init(struct drm_i915_private *i915);
void i915_perf_fini(struct drm_i915_private *i915);
void i915_perf_register(struct drm_i915_private *i915);
void i915_perf_unregister(struct drm_i915_private *i915);
+int i915_perf_ioctl_version(void);
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -25,8 +30,29 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *reg_state);
+
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set);
+
+static inline struct i915_oa_config *
+i915_oa_config_get(struct i915_oa_config *oa_config)
+{
+ if (kref_get_unless_zero(&oa_config->ref))
+ return oa_config;
+ else
+ return NULL;
+}
+
+void i915_oa_config_release(struct kref *ref);
+static inline void i915_oa_config_put(struct i915_oa_config *oa_config)
+{
+ if (!oa_config)
+ return;
+
+ kref_put(&oa_config->ref, i915_oa_config_release);
+}
#endif /* __I915_PERF_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
new file mode 100644
index 000000000000..74ddc20a0d37
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -0,0 +1,435 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_PERF_TYPES_H_
+#define _I915_PERF_TYPES_H_
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/llist.h>
+#include <linux/poll.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/wait.h>
+
+#include "i915_reg.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct file;
+struct i915_gem_context;
+struct i915_perf;
+struct i915_vma;
+struct intel_context;
+struct intel_engine_cs;
+
+struct i915_oa_format {
+ u32 format;
+ int size;
+};
+
+struct i915_oa_reg {
+ i915_reg_t addr;
+ u32 value;
+};
+
+struct i915_oa_config {
+ struct i915_perf *perf;
+
+ char uuid[UUID_STRING_LEN + 1];
+ int id;
+
+ const struct i915_oa_reg *mux_regs;
+ u32 mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ u32 b_counter_regs_len;
+ const struct i915_oa_reg *flex_regs;
+ u32 flex_regs_len;
+
+ struct attribute_group sysfs_metric;
+ struct attribute *attrs[2];
+ struct device_attribute sysfs_metric_id;
+
+ struct kref ref;
+ struct rcu_head rcu;
+};
+
+struct i915_perf_stream;
+
+/**
+ * struct i915_perf_stream_ops - the OPs to support a specific stream type
+ */
+struct i915_perf_stream_ops {
+ /**
+ * @enable: Enables the collection of HW samples, either in response to
+ * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
+ * without `I915_PERF_FLAG_DISABLED`.
+ */
+ void (*enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable: Disables the collection of HW samples, either in response
+ * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
+ * the stream.
+ */
+ void (*disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @poll_wait: Call poll_wait, passing a wait queue that will be woken
+ * once there is something ready to read() for the stream
+ */
+ void (*poll_wait)(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait);
+
+ /**
+ * @wait_unlocked: For handling a blocking read, wait until there is
+ * something to ready to read() for the stream. E.g. wait on the same
+ * wait queue that would be passed to poll_wait().
+ */
+ int (*wait_unlocked)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy buffered metrics as records to userspace
+ * **buf**: the userspace, destination buffer
+ * **count**: the number of bytes to copy, requested by userspace
+ * **offset**: zero at the start of the read, updated as the read
+ * proceeds, it represents how many bytes have been copied so far and
+ * the buffer offset for copying the next record.
+ *
+ * Copy as many buffered i915 perf samples and records for this stream
+ * to userspace as will fit in the given buffer.
+ *
+ * Only write complete records; returning -%ENOSPC if there isn't room
+ * for a complete record.
+ *
+ * Return any error condition that results in a short read such as
+ * -%ENOSPC or -%EFAULT, even though these may be squashed before
+ * returning to userspace.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @destroy: Cleanup any stream specific resources.
+ *
+ * The stream will always be disabled before this is called.
+ */
+ void (*destroy)(struct i915_perf_stream *stream);
+};
+
+/**
+ * struct i915_perf_stream - state for a single open stream FD
+ */
+struct i915_perf_stream {
+ /**
+ * @perf: i915_perf backpointer
+ */
+ struct i915_perf *perf;
+
+ /**
+ * @uncore: mmio access path
+ */
+ struct intel_uncore *uncore;
+
+ /**
+ * @engine: Engine associated with this performance stream.
+ */
+ struct intel_engine_cs *engine;
+
+ /**
+ * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties given when opening a stream, representing the contents
+ * of a single sample as read() by userspace.
+ */
+ u32 sample_flags;
+
+ /**
+ * @sample_size: Considering the configured contents of a sample
+ * combined with the required header size, this is the total size
+ * of a single sample record.
+ */
+ int sample_size;
+
+ /**
+ * @ctx: %NULL if measuring system-wide across all contexts or a
+ * specific context that is being monitored.
+ */
+ struct i915_gem_context *ctx;
+
+ /**
+ * @enabled: Whether the stream is currently enabled, considering
+ * whether the stream was opened in a disabled state and based
+ * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
+ */
+ bool enabled;
+
+ /**
+ * @hold_preemption: Whether preemption is put on hold for command
+ * submissions done on the @ctx. This is useful for some drivers that
+ * cannot easily post process the OA buffer context to subtract delta
+ * of performance counters not associated with @ctx.
+ */
+ bool hold_preemption;
+
+ /**
+ * @ops: The callbacks providing the implementation of this specific
+ * type of configured stream.
+ */
+ const struct i915_perf_stream_ops *ops;
+
+ /**
+ * @oa_config: The OA configuration used by the stream.
+ */
+ struct i915_oa_config *oa_config;
+
+ /**
+ * @oa_config_bos: A list of struct i915_oa_config_bo allocated lazily
+ * each time @oa_config changes.
+ */
+ struct llist_head oa_config_bos;
+
+ /**
+ * @pinned_ctx: The OA context specific information.
+ */
+ struct intel_context *pinned_ctx;
+
+ /**
+ * @specific_ctx_id: The id of the specific context.
+ */
+ u32 specific_ctx_id;
+
+ /**
+ * @specific_ctx_id_mask: The mask used to masking specific_ctx_id bits.
+ */
+ u32 specific_ctx_id_mask;
+
+ /**
+ * @poll_check_timer: High resolution timer that will periodically
+ * check for data in the circular OA buffer for notifying userspace
+ * (e.g. during a read() or poll()).
+ */
+ struct hrtimer poll_check_timer;
+
+ /**
+ * @poll_wq: The wait queue that hrtimer callback wakes when it
+ * sees data ready to read in the circular OA buffer.
+ */
+ wait_queue_head_t poll_wq;
+
+ /**
+ * @pollin: Whether there is data available to read.
+ */
+ bool pollin;
+
+ /**
+ * @periodic: Whether periodic sampling is currently enabled.
+ */
+ bool periodic;
+
+ /**
+ * @period_exponent: The OA unit sampling frequency is derived from this.
+ */
+ int period_exponent;
+
+ /**
+ * @oa_buffer: State of the OA buffer.
+ */
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ u32 last_ctx_id;
+ int format;
+ int format_size;
+ int size_exponent;
+
+ /**
+ * @ptr_lock: Locks reads and writes to all head/tail state
+ *
+ * Consider: the head and tail pointer state needs to be read
+ * consistently from a hrtimer callback (atomic context) and
+ * read() fop (user context) with tail pointer updates happening
+ * in atomic context and head updates in user context and the
+ * (unlikely) possibility of read() errors needing to reset all
+ * head/tail state.
+ *
+ * Note: Contention/performance aren't currently a significant
+ * concern here considering the relatively low frequency of
+ * hrtimer callbacks (5ms period) and that reads typically only
+ * happen in response to a hrtimer event and likely complete
+ * before the next callback.
+ *
+ * Note: This lock is not held *while* reading and copying data
+ * to userspace so the value of head observed in htrimer
+ * callbacks won't represent any partial consumption of data.
+ */
+ spinlock_t ptr_lock;
+
+ /**
+ * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
+ * used for reading.
+ *
+ * Initial values of 0xffffffff are invalid and imply that an
+ * update is required (and should be ignored by an attempted
+ * read)
+ */
+ struct {
+ u32 offset;
+ } tails[2];
+
+ /**
+ * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+ */
+ unsigned int aged_tail_idx;
+
+ /**
+ * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
+ * was read; used to determine when it is old enough to trust.
+ */
+ u64 aging_timestamp;
+
+ /**
+ * @head: Although we can always read back the head pointer register,
+ * we prefer to avoid trusting the HW state, just to avoid any
+ * risk that some hardware condition could * somehow bump the
+ * head pointer unpredictably and cause us to forward the wrong
+ * OA buffer data to userspace.
+ */
+ u32 head;
+ } oa_buffer;
+
+ /**
+ * @noa_wait: A batch buffer doing a wait on the GPU for the NOA logic to be
+ * reprogrammed.
+ */
+ struct i915_vma *noa_wait;
+};
+
+/**
+ * struct i915_oa_ops - Gen specific implementation of an OA unit stream
+ */
+struct i915_oa_ops {
+ /**
+ * @is_valid_b_counter_reg: Validates register's address for
+ * programming boolean counters for a particular platform.
+ */
+ bool (*is_valid_b_counter_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_mux_reg: Validates register's address for programming mux
+ * for a particular platform.
+ */
+ bool (*is_valid_mux_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_flex_reg: Validates register's address for programming
+ * flex EU filtering for a particular platform.
+ */
+ bool (*is_valid_flex_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @enable_metric_set: Selects and applies any MUX configuration to set
+ * up the Boolean and Custom (B/C) counters that are part of the
+ * counter reports being sampled. May apply system constraints such as
+ * disabling EU clock gating as required.
+ */
+ int (*enable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable_metric_set: Remove system constraints associated with using
+ * the OA unit.
+ */
+ void (*disable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_enable: Enable periodic sampling
+ */
+ void (*oa_enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_disable: Disable periodic sampling
+ */
+ void (*oa_disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy data from the circular OA buffer into a given userspace
+ * buffer.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @oa_hw_tail_read: read the OA tail pointer register
+ *
+ * In particular this enables us to share all the fiddly code for
+ * handling the OA unit tail pointer race that affects multiple
+ * generations.
+ */
+ u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
+};
+
+struct i915_perf {
+ struct drm_i915_private *i915;
+
+ struct kobject *metrics_kobj;
+ struct ctl_table_header *sysctl_header;
+
+ /*
+ * Lock associated with adding/modifying/removing OA configs
+ * in perf->metrics_idr.
+ */
+ struct mutex metrics_lock;
+
+ /*
+ * List of dynamic configurations (struct i915_oa_config), you
+ * need to hold perf->metrics_lock to access it.
+ */
+ struct idr metrics_idr;
+
+ /*
+ * Lock associated with anything below within this structure
+ * except exclusive_stream.
+ */
+ struct mutex lock;
+
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor.
+ */
+ struct i915_perf_stream *exclusive_stream;
+
+ /**
+ * For rate limiting any notifications of spurious
+ * invalid OA reports
+ */
+ struct ratelimit_state spurious_report_rs;
+
+ struct i915_oa_config test_config;
+
+ u32 gen7_latched_oastatus1;
+ u32 ctx_oactxctrl_offset;
+ u32 ctx_flexeu0_offset;
+
+ /**
+ * The RPT_ID/reason field for Gen8+ includes a bit
+ * to determine if the CTX ID in the report is valid
+ * but the specific bit differs between Gen 8 and 9
+ */
+ u32 gen8_valid_ctx_bit;
+
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
+
+ atomic64_t noa_programming_delay;
+};
+
+#endif /* _I915_PERF_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8e251e719390..0d40dccd1409 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -11,6 +11,8 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_pmu.h"
@@ -116,22 +118,124 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
return enable;
}
-void i915_pmu_gt_parked(struct drm_i915_private *i915)
+static u64 __get_rc6(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = gt->i915;
+ u64 val;
- if (!pmu->base.event_init)
- return;
+ val = intel_rc6_residency_ns(&gt->rc6,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+#if IS_ENABLED(CONFIG_PM)
+
+static inline s64 ktime_since(const ktime_t kt)
+{
+ return ktime_to_ns(ktime_sub(ktime_get(), kt));
+}
+
+static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
+{
+ u64 val;
- spin_lock_irq(&pmu->lock);
/*
- * Signal sampling timer to stop if only engine events are enabled and
- * GPU went idle.
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
*/
- pmu->timer_enabled = pmu_needs_timer(pmu, false);
- spin_unlock_irq(&pmu->lock);
+ val = ktime_since(pmu->sleep_last);
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
+
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+ return val;
}
+static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
+{
+ /*
+ * If we are coming back from being runtime suspended we must
+ * be careful not to report a larger value than returned
+ * previously.
+ */
+ if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ }
+
+ return val;
+}
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct i915_pmu *pmu = &i915->pmu;
+ unsigned long flags;
+ u64 val;
+
+ val = 0;
+ if (intel_gt_pm_get_if_awake(gt)) {
+ val = __get_rc6(gt);
+ intel_gt_pm_put(gt);
+ }
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (val)
+ val = __pmu_update_rc6(pmu, val);
+ else
+ val = __pmu_estimate_rc6(pmu);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return val;
+}
+
+static void park_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+
+ pmu->sleep_last = ktime_get();
+}
+
+static void unpark_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ /* Estimate how long we slept and accumulate that into rc6 counters */
+ if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ __pmu_estimate_rc6(pmu);
+}
+
+#else
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ return __get_rc6(gt);
+}
+
+static void park_rc6(struct drm_i915_private *i915) {}
+static void unpark_rc6(struct drm_i915_private *i915) {}
+
+#endif
+
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
@@ -143,6 +247,26 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
}
}
+void i915_pmu_gt_parked(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
+ return;
+
+ spin_lock_irq(&pmu->lock);
+
+ park_rc6(i915);
+
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ pmu->timer_enabled = pmu_needs_timer(pmu, false);
+
+ spin_unlock_irq(&pmu->lock);
+}
+
void i915_pmu_gt_unparked(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
@@ -151,10 +275,14 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
return;
spin_lock_irq(&pmu->lock);
+
/*
* Re-enable sampling timer when GPU goes active.
*/
__i915_pmu_maybe_start_timer(pmu);
+
+ unpark_rc6(i915);
+
spin_unlock_irq(&pmu->lock);
}
@@ -174,7 +302,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
unsigned long flags;
bool busy;
@@ -194,6 +322,10 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if (val & RING_WAIT_SEMAPHORE)
add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+ /* No need to sample when busy stats are supported. */
+ if (intel_engine_supports_stats(engine))
+ goto skip;
+
/*
* While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
@@ -227,25 +359,26 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct i915_pmu *pmu = &i915->pmu;
+ struct intel_rps *rps = &gt->rps;
if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
- val = i915->gt_pm.rps.cur_freq;
+ val = rps->cur_freq;
if (intel_gt_pm_get_if_awake(gt)) {
val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
- val = intel_get_cagf(i915, val);
+ val = intel_get_cagf(rps, val);
intel_gt_pm_put(gt);
}
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(i915, val),
+ intel_gpu_freq(rps, val),
period_ns / 1000);
}
if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
- intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
+ intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
}
}
@@ -426,104 +559,6 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __get_rc6(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- u64 val;
-
- val = intel_rc6_residency_ns(i915,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
-
- if (HAS_RC6p(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
-
- if (HAS_RC6pp(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
-
- return val;
-}
-
-static u64 get_rc6(struct intel_gt *gt)
-{
-#if IS_ENABLED(CONFIG_PM)
- struct drm_i915_private *i915 = gt->i915;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
- unsigned long flags;
- u64 val;
-
- wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (wakeref) {
- val = __get_rc6(gt);
- intel_runtime_pm_put(rpm, wakeref);
-
- /*
- * If we are coming back from being runtime suspended we must
- * be careful not to report a larger value than returned
- * previously.
- */
-
- spin_lock_irqsave(&pmu->lock, flags);
-
- if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = val;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- }
-
- spin_unlock_irqrestore(&pmu->lock, flags);
- } else {
- struct device *kdev = rpm->kdev;
-
- /*
- * We are runtime suspended.
- *
- * Report the delta from when the device was suspended to now,
- * on top of the last known real value, as the approximated RC6
- * counter value.
- */
- spin_lock_irqsave(&pmu->lock, flags);
-
- /*
- * After the above branch intel_runtime_pm_get_if_in_use failed
- * to get the runtime PM reference we cannot assume we are in
- * runtime suspend since we can either: a) race with coming out
- * of it before we took the power.lock, or b) there are other
- * states than suspended which can bring us here.
- *
- * We need to double-check that we are indeed currently runtime
- * suspended and if not we cannot do better than report the last
- * known RC6 value.
- */
- if (pm_runtime_status_suspended(kdev)) {
- val = pm_runtime_suspended_time(kdev);
-
- if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- pmu->suspended_time_last = val;
-
- val -= pmu->suspended_time_last;
- val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
- } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6].cur;
- }
-
- spin_unlock_irqrestore(&pmu->lock, flags);
- }
-
- return val;
-#else
- return __get_rc6(gt);
-#endif
-}
-
static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -843,8 +878,8 @@ create_event_attributes(struct i915_pmu *pmu)
const char *name;
const char *unit;
} events[] = {
- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
+ __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
+ __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
};
@@ -1047,21 +1082,43 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
cpuhp_remove_multi_state(cpuhp_slot);
}
+static bool is_igp(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /* IGP is 0000:00:02.0 */
+ return pci_domain_nr(pdev->bus) == 0 &&
+ pdev->bus->number == 0 &&
+ PCI_SLOT(pdev->devfn) == 2 &&
+ PCI_FUNC(pdev->devfn) == 0;
+}
+
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- int ret;
+ int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
dev_info(i915->drm.dev, "PMU not supported for this GPU.");
return;
}
- i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
- if (!i915_pmu_events_attr_group.attrs) {
- ret = -ENOMEM;
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+
+ if (!is_igp(i915))
+ pmu->name = kasprintf(GFP_KERNEL,
+ "i915-%s",
+ dev_name(i915->drm.dev));
+ else
+ pmu->name = "i915";
+ if (!pmu->name)
goto err;
- }
+
+ i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
+ if (!i915_pmu_events_attr_group.attrs)
+ goto err_name;
pmu->base.attr_groups = i915_pmu_attr_groups;
pmu->base.task_ctx_nr = perf_invalid_context;
@@ -1073,13 +1130,9 @@ void i915_pmu_register(struct drm_i915_private *i915)
pmu->base.read = i915_pmu_event_read;
pmu->base.event_idx = i915_pmu_event_event_idx;
- spin_lock_init(&pmu->lock);
- hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pmu->timer.function = i915_sample;
-
- ret = perf_pmu_register(&pmu->base, "i915", -1);
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err;
+ goto err_attr;
ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
@@ -1089,10 +1142,14 @@ void i915_pmu_register(struct drm_i915_private *i915)
err_unreg:
perf_pmu_unregister(&pmu->base);
-err:
+err_attr:
pmu->base.event_init = NULL;
free_event_attributes(pmu);
- DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+err_name:
+ if (!is_igp(i915))
+ kfree(pmu->name);
+err:
+ dev_notice(i915->drm.dev, "Failed to register PMU!\n");
}
void i915_pmu_unregister(struct drm_i915_private *i915)
@@ -1110,5 +1167,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
perf_pmu_unregister(&pmu->base);
pmu->base.event_init = NULL;
+ if (!is_igp(i915))
+ kfree(pmu->name);
free_event_attributes(pmu);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 4fc4f2478301..bf52e3983631 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -47,6 +47,10 @@ struct i915_pmu {
*/
struct pmu base;
/**
+ * @name: Name as registered with perf core.
+ */
+ const char *name;
+ /**
* @lock: Lock protecting enable mask and ref count handling.
*/
spinlock_t lock;
@@ -97,9 +101,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
- * @suspended_time_last: Cached suspend time from PM core.
+ * @sleep_last: Last time GT parked for RC6 estimation.
*/
- u64 suspended_time_last;
+ ktime_t sleep_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 21037a2e2038..732aad148881 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -16,6 +16,12 @@ enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ /* A preemptive pulse used to monitor the health of each engine */
+ I915_PRIORITY_HEARTBEAT,
+
+ /* Interactive workload, scheduled for immediate pageflipping */
+ I915_PRIORITY_DISPLAY,
};
#define I915_USER_PRIORITY_SHIFT 2
@@ -39,6 +45,7 @@ enum {
* active request.
*/
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+#define I915_PRIORITY_BARRIER INT_MAX
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index ad9240a0817a..c27cfef9281c 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -7,6 +7,7 @@
#include <linux/nospec.h>
#include "i915_drv.h"
+#include "i915_perf.h"
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
@@ -37,8 +38,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
- u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
- u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
if (query_item->flags != 0)
@@ -50,8 +49,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices * subslice_stride;
- eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
+ subslice_length = sseu->max_slices * sseu->ss_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
@@ -69,9 +68,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = subslice_stride;
+ topo.subslice_stride = sseu->ss_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride = eu_stride;
+ topo.eu_stride = sseu->eu_stride;
if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
@@ -142,10 +141,305 @@ query_engine_info(struct drm_i915_private *i915,
return len;
}
+static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
+ u64 user_regs_ptr,
+ u32 kernel_n_regs)
+{
+ /*
+ * We'll just put the number of registers, and won't copy the
+ * register.
+ */
+ if (user_n_regs == 0)
+ return 0;
+
+ if (user_n_regs < kernel_n_regs)
+ return -EINVAL;
+
+ if (!access_ok(u64_to_user_ptr(user_regs_ptr),
+ 2 * sizeof(u32) * kernel_n_regs))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
+ u32 kernel_n_regs,
+ u64 user_regs_ptr,
+ u32 *user_n_regs)
+{
+ u32 r;
+
+ if (*user_n_regs == 0) {
+ *user_n_regs = kernel_n_regs;
+ return 0;
+ }
+
+ *user_n_regs = kernel_n_regs;
+
+ for (r = 0; r < kernel_n_regs; r++) {
+ u32 __user *user_reg_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
+ u32 __user *user_val_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
+ sizeof(u32));
+ int ret;
+
+ ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+ user_reg_ptr);
+ if (ret)
+ return -EFAULT;
+
+ ret = __put_user(kernel_regs[r].value, user_val_ptr);
+ if (ret)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int query_perf_config_data(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item,
+ bool use_uuid)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_perf_oa_config __user *user_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr +
+ sizeof(struct drm_i915_query_perf_config));
+ struct drm_i915_perf_oa_config user_config;
+ struct i915_perf *perf = &i915->perf;
+ struct i915_oa_config *oa_config;
+ char uuid[UUID_STRING_LEN + 1];
+ u64 config_id;
+ u32 flags, total_size;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ total_size =
+ sizeof(struct drm_i915_query_perf_config) +
+ sizeof(struct drm_i915_perf_oa_config);
+
+ if (query_item->length == 0)
+ return total_size;
+
+ if (query_item->length < total_size) {
+ DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
+ query_item->length, total_size);
+ return -EINVAL;
+ }
+
+ if (!access_ok(user_query_config_ptr, total_size))
+ return -EFAULT;
+
+ if (__get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ if (use_uuid) {
+ struct i915_oa_config *tmp;
+ int id;
+
+ BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
+
+ memset(&uuid, 0, sizeof(uuid));
+ if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+ sizeof(user_query_config_ptr->uuid)))
+ return -EFAULT;
+
+ oa_config = NULL;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, uuid)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ } else {
+ if (__get_user(config_id, &user_query_config_ptr->config))
+ return -EFAULT;
+
+ oa_config = i915_perf_get_oa_config(perf, config_id);
+ }
+ if (!oa_config)
+ return -ENOENT;
+
+ if (__copy_from_user(&user_config, user_config_ptr,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
+ user_config.boolean_regs_ptr,
+ oa_config->b_counter_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
+ user_config.flex_regs_ptr,
+ oa_config->flex_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
+ user_config.mux_regs_ptr,
+ oa_config->mux_regs_len);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len,
+ user_config.boolean_regs_ptr,
+ &user_config.n_boolean_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
+ oa_config->flex_regs_len,
+ user_config.flex_regs_ptr,
+ &user_config.n_flex_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
+ oa_config->mux_regs_len,
+ user_config.mux_regs_ptr,
+ &user_config.n_mux_regs);
+ if (ret)
+ goto out;
+
+ memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
+
+ if (__copy_to_user(user_config_ptr, &user_config,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = total_size;
+
+out:
+ i915_oa_config_put(oa_config);
+ return ret;
+}
+
+static size_t sizeof_perf_config_list(size_t count)
+{
+ return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
+}
+
+static size_t sizeof_perf_metrics(struct i915_perf *perf)
+{
+ struct i915_oa_config *tmp;
+ size_t i;
+ int id;
+
+ i = 1;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id)
+ i++;
+ rcu_read_unlock();
+
+ return sizeof_perf_config_list(i);
+}
+
+static int query_perf_config_list(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct i915_perf *perf = &i915->perf;
+ u64 *oa_config_ids = NULL;
+ int alloc, n_configs;
+ u32 flags;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ if (query_item->length == 0)
+ return sizeof_perf_metrics(perf);
+
+ if (get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ n_configs = 1;
+ do {
+ struct i915_oa_config *tmp;
+ u64 *ids;
+ int id;
+
+ ids = krealloc(oa_config_ids,
+ n_configs * sizeof(*oa_config_ids),
+ GFP_KERNEL);
+ if (!ids)
+ return -ENOMEM;
+
+ alloc = fetch_and_zero(&n_configs);
+
+ ids[n_configs++] = 1ull; /* reserved for test_config */
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (n_configs < alloc)
+ ids[n_configs] = id;
+ n_configs++;
+ }
+ rcu_read_unlock();
+
+ oa_config_ids = ids;
+ } while (n_configs > alloc);
+
+ if (query_item->length < sizeof_perf_config_list(n_configs)) {
+ DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
+ query_item->length,
+ sizeof_perf_config_list(n_configs));
+ kfree(oa_config_ids);
+ return -EINVAL;
+ }
+
+ if (put_user(n_configs, &user_query_config_ptr->config)) {
+ kfree(oa_config_ids);
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(user_query_config_ptr + 1,
+ oa_config_ids,
+ n_configs * sizeof(*oa_config_ids));
+ kfree(oa_config_ids);
+ if (ret)
+ return -EFAULT;
+
+ return sizeof_perf_config_list(n_configs);
+}
+
+static int query_perf_config(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ switch (query_item->flags) {
+ case DRM_I915_QUERY_PERF_CONFIG_LIST:
+ return query_perf_config_list(i915, query_item);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
+ return query_perf_config_data(i915, query_item, true);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
+ return query_perf_config_data(i915, query_item, false);
+ default:
+ return -EINVAL;
+ }
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
+ query_perf_config,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2abd199093c5..73079b503724 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -413,6 +413,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE_MAX 4
+
#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
@@ -471,6 +474,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1 << 13)
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
@@ -545,7 +550,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
#define MI_PREDICATE_SRC1 _MMIO(0x2408)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
-
+#define MI_PREDICATE_DATA _MMIO(0x2410)
+#define MI_PREDICATE_RESULT _MMIO(0x2418)
+#define MI_PREDICATE_RESULT_1 _MMIO(0x241c)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
#define LOWER_SLICE_ENABLED (1 << 0)
#define LOWER_SLICE_DISABLED (0 << 0)
@@ -555,6 +562,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/
#define BCS_SWCTRL _MMIO(0x22200)
+/* There are 16 GPR registers */
+#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
+#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
+
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300)
@@ -682,6 +693,45 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+/* Gen12 OAR unit */
+#define GEN12_OAR_OACONTROL _MMIO(0x2960)
+#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
+#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OACTXCONTROL _MMIO(0x2360)
+#define GEN12_OAR_OASTATUS _MMIO(0x2968)
+
+/* Gen12 OAG unit */
+#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00)
+#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0
+#define GEN12_OAG_OATAILPTR _MMIO(0xdb04)
+#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0
+
+#define GEN12_OAG_OABUFFER _MMIO(0xdb08)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3)
+#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */
+
+#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28)
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1)
+#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0)
+
+#define GEN12_OAG_OACONTROL _MMIO(0xdaf4)
+#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2
+#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8)
+#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
+
+#define GEN12_OAG_OASTATUS _MMIO(0xdafc)
+#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1)
+#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0)
+
/*
* Flexible, Aggregate EU Counter Registers.
* Note: these aren't contiguous
@@ -918,6 +968,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
+/* Same layout as OASTARTTRIGX */
+#define GEN12_OAG_OASTARTTRIG1 _MMIO(0xd900)
+#define GEN12_OAG_OASTARTTRIG2 _MMIO(0xd904)
+#define GEN12_OAG_OASTARTTRIG3 _MMIO(0xd908)
+#define GEN12_OAG_OASTARTTRIG4 _MMIO(0xd90c)
+#define GEN12_OAG_OASTARTTRIG5 _MMIO(0xd910)
+#define GEN12_OAG_OASTARTTRIG6 _MMIO(0xd914)
+#define GEN12_OAG_OASTARTTRIG7 _MMIO(0xd918)
+#define GEN12_OAG_OASTARTTRIG8 _MMIO(0xd91c)
+
+/* Same layout as OAREPORTTRIGX */
+#define GEN12_OAG_OAREPORTTRIG1 _MMIO(0xd920)
+#define GEN12_OAG_OAREPORTTRIG2 _MMIO(0xd924)
+#define GEN12_OAG_OAREPORTTRIG3 _MMIO(0xd928)
+#define GEN12_OAG_OAREPORTTRIG4 _MMIO(0xd92c)
+#define GEN12_OAG_OAREPORTTRIG5 _MMIO(0xd930)
+#define GEN12_OAG_OAREPORTTRIG6 _MMIO(0xd934)
+#define GEN12_OAG_OAREPORTTRIG7 _MMIO(0xd938)
+#define GEN12_OAG_OAREPORTTRIG8 _MMIO(0xd93c)
+
/* CECX_0 */
#define OACEC_COMPARE_LESS_OR_EQUAL 6
#define OACEC_COMPARE_NOT_EQUAL 5
@@ -934,6 +1004,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_SELECT_PREV (1 << 19)
#define OACEC_SELECT_BOOLEAN (2 << 19)
+/* 11-bit array 0: pass-through, 1: negated */
+#define GEN12_OASCEC_NEGATE_MASK 0x7ff
+#define GEN12_OASCEC_NEGATE_SHIFT 21
+
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
#define OACEC_CONSIDERATIONS_MASK 0xffff
@@ -956,6 +1030,42 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC7_0 _MMIO(0x27a8)
#define OACEC7_1 _MMIO(0x27ac)
+/* Same layout as CECX_Y */
+#define GEN12_OAG_CEC0_0 _MMIO(0xd940)
+#define GEN12_OAG_CEC0_1 _MMIO(0xd944)
+#define GEN12_OAG_CEC1_0 _MMIO(0xd948)
+#define GEN12_OAG_CEC1_1 _MMIO(0xd94c)
+#define GEN12_OAG_CEC2_0 _MMIO(0xd950)
+#define GEN12_OAG_CEC2_1 _MMIO(0xd954)
+#define GEN12_OAG_CEC3_0 _MMIO(0xd958)
+#define GEN12_OAG_CEC3_1 _MMIO(0xd95c)
+#define GEN12_OAG_CEC4_0 _MMIO(0xd960)
+#define GEN12_OAG_CEC4_1 _MMIO(0xd964)
+#define GEN12_OAG_CEC5_0 _MMIO(0xd968)
+#define GEN12_OAG_CEC5_1 _MMIO(0xd96c)
+#define GEN12_OAG_CEC6_0 _MMIO(0xd970)
+#define GEN12_OAG_CEC6_1 _MMIO(0xd974)
+#define GEN12_OAG_CEC7_0 _MMIO(0xd978)
+#define GEN12_OAG_CEC7_1 _MMIO(0xd97c)
+
+/* Same layout as CECX_Y + negate 11-bit array */
+#define GEN12_OAG_SCEC0_0 _MMIO(0xdc00)
+#define GEN12_OAG_SCEC0_1 _MMIO(0xdc04)
+#define GEN12_OAG_SCEC1_0 _MMIO(0xdc08)
+#define GEN12_OAG_SCEC1_1 _MMIO(0xdc0c)
+#define GEN12_OAG_SCEC2_0 _MMIO(0xdc10)
+#define GEN12_OAG_SCEC2_1 _MMIO(0xdc14)
+#define GEN12_OAG_SCEC3_0 _MMIO(0xdc18)
+#define GEN12_OAG_SCEC3_1 _MMIO(0xdc1c)
+#define GEN12_OAG_SCEC4_0 _MMIO(0xdc20)
+#define GEN12_OAG_SCEC4_1 _MMIO(0xdc24)
+#define GEN12_OAG_SCEC5_0 _MMIO(0xdc28)
+#define GEN12_OAG_SCEC5_1 _MMIO(0xdc2c)
+#define GEN12_OAG_SCEC6_0 _MMIO(0xdc30)
+#define GEN12_OAG_SCEC6_1 _MMIO(0xdc34)
+#define GEN12_OAG_SCEC7_0 _MMIO(0xdc38)
+#define GEN12_OAG_SCEC7_1 _MMIO(0xdc3c)
+
/* OA perf counters */
#define OA_PERFCNT1_LO _MMIO(0x91B8)
#define OA_PERFCNT1_HI _MMIO(0x91BC)
@@ -1036,6 +1146,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
+#define GEN12_OAA_DBG_REG _MMIO(0xdc44)
+#define GEN12_OAG_OA_PESS _MMIO(0x2b2c)
+#define GEN12_OAG_SPCTR_CNF _MMIO(0xdc40)
+
#define GDT_CHICKEN_BITS _MMIO(0x9840)
#define GT_NOA_ENABLE 0x00000080
@@ -1956,8 +2070,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
-#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
- _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
+#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \
+ _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
@@ -1967,10 +2081,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define MG_TX1_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+#define MG_TX1_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX1LN1_PORT1)
#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
@@ -1980,10 +2094,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define MG_TX2_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define MG_TX2_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
@@ -1994,10 +2108,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define MG_TX1_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+#define MG_TX1_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX1LN1_PORT1)
#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
@@ -2007,10 +2121,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define MG_TX2_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define MG_TX2_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
@@ -2021,10 +2135,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define MG_TX1_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
- MG_TX_SWINGCTRL_TX1LN0_PORT2, \
- MG_TX_SWINGCTRL_TX1LN1_PORT1)
+#define MG_TX1_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX1LN1_PORT1)
#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
@@ -2034,10 +2148,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define MG_TX2_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
- MG_TX_SWINGCTRL_TX2LN0_PORT2, \
- MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define MG_TX2_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
@@ -2049,10 +2163,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
-#define MG_TX1_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
- MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
- MG_TX_DRVCTRL_TX1LN1_TXPORT1)
+#define MG_TX1_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
+ MG_TX_DRVCTRL_TX1LN1_TXPORT1)
#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
@@ -2062,10 +2176,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define MG_TX2_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
- MG_TX_DRVCTRL_TX2LN0_PORT2, \
- MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define MG_TX2_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+ MG_TX_DRVCTRL_TX2LN0_PORT2, \
+ MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
@@ -2082,10 +2196,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
-#define MG_CLKHUB(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
- MG_CLKHUB_LN0_PORT2, \
- MG_CLKHUB_LN1_PORT1)
+#define MG_CLKHUB(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \
+ MG_CLKHUB_LN0_PORT2, \
+ MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
#define MG_TX_DCC_TX1LN0_PORT1 0x168110
@@ -2096,10 +2210,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
-#define MG_TX1_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
- MG_TX_DCC_TX1LN0_PORT2, \
- MG_TX_DCC_TX1LN1_PORT1)
+#define MG_TX1_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \
+ MG_TX_DCC_TX1LN0_PORT2, \
+ MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
#define MG_TX_DCC_TX2LN1_PORT1 0x168490
#define MG_TX_DCC_TX2LN0_PORT2 0x169090
@@ -2108,10 +2222,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
-#define MG_TX2_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
- MG_TX_DCC_TX2LN0_PORT2, \
- MG_TX_DCC_TX2LN1_PORT1)
+#define MG_TX2_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \
+ MG_TX_DCC_TX2LN0_PORT2, \
+ MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24)
@@ -2124,10 +2238,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
-#define MG_DP_MODE(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
- MG_DP_MODE_LN0_ACU_PORT2, \
- MG_DP_MODE_LN1_ACU_PORT1)
+#define MG_DP_MODE(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \
+ MG_DP_MODE_LN0_ACU_PORT2, \
+ MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5)
@@ -2166,13 +2280,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
+#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
@@ -2453,6 +2567,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
+#define GEN12_GAM_DONE _MMIO(0xcf68)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
@@ -2483,7 +2598,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
+#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
+#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
+
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28)
@@ -2596,6 +2716,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1 << 31)
@@ -2705,6 +2827,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
@@ -2878,6 +3001,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
@@ -2956,6 +3080,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
+#define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3558,6 +3684,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _PALETTE_A 0xa000
#define _PALETTE_B 0xa800
#define _CHV_PALETTE_C 0xc000
+#define PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
_PICK((pipe), _PALETTE_A, \
_PALETTE_B, _CHV_PALETTE_C) + \
@@ -4038,10 +4167,15 @@ enum {
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
#define GWUNIT_CLKGATE_DIS (1 << 16)
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS (1 << 20)
@@ -4135,6 +4269,7 @@ enum {
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
+#define _EXITLINE_A 0x60018
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
@@ -4186,10 +4321,22 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-/* HSW+ eDP PSR registers */
-#define HSW_EDP_PSR_BASE 0x64800
-#define BDW_EDP_PSR_BASE 0x6f800
-#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
+#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A)
+#define EXITLINE_ENABLE REG_BIT(31)
+#define EXITLINE_MASK REG_GENMASK(12, 0)
+#define EXITLINE_SHIFT 0
+
+/*
+ * HSW+ eDP PSR registers
+ *
+ * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
+ * instance of it
+ */
+#define _HSW_EDP_PSR_BASE 0x64800
+#define _SRD_CTL_A 0x60800
+#define _SRD_CTL_EDP 0x6f800
+#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust)
+#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A))
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -4215,27 +4362,40 @@ enum {
#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
-/* Bspec claims those aren't shifted but stay at 0x64800 */
+/*
+ * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative
+ * to transcoder and bits defined for each one as if using no shift (i.e. as if
+ * it was for TRANSCODER_EDP)
+ */
#define EDP_PSR_IMR _MMIO(0x64834)
#define EDP_PSR_IIR _MMIO(0x64838)
-#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
-#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
-#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
-#define EDP_PSR_TRANSCODER_C_SHIFT 24
-#define EDP_PSR_TRANSCODER_B_SHIFT 16
-#define EDP_PSR_TRANSCODER_A_SHIFT 8
-#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
-
-#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
+#define _PSR_IMR_A 0x60814
+#define _PSR_IIR_A 0x60818
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
+ 0 : ((trans) - TRANSCODER_A + 1) * 8)
+#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+
+#define _SRD_AUX_CTL_A 0x60810
+#define _SRD_AUX_CTL_EDP 0x6f810
+#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A))
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
-#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
+#define _SRD_AUX_DATA_A 0x60814
+#define _SRD_AUX_DATA_EDP 0x6f814
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
-#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
+#define _SRD_STATUS_A 0x60840
+#define _SRD_STATUS_EDP 0x6f840
+#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A))
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -4260,10 +4420,15 @@ enum {
#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
-#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
+#define _SRD_PERF_CNT_A 0x60844
+#define _SRD_PERF_CNT_EDP 0x6f844
+#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A))
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
+/* PSR_MASK on SKL+ */
+#define _SRD_DEBUG_A 0x60860
+#define _SRD_DEBUG_EDP 0x6f860
+#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A))
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -4271,7 +4436,9 @@ enum {
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
-#define EDP_PSR2_CTL _MMIO(0x6f900)
+#define _PSR2_CTL_A 0x60900
+#define _PSR2_CTL_EDP 0x6f900
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
#define EDP_PSR2_ENABLE (1 << 31)
#define EDP_SU_TRACK_ENABLE (1 << 30)
#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
@@ -4293,8 +4460,8 @@ enum {
#define _PSR_EVENT_TRANS_B 0x61848
#define _PSR_EVENT_TRANS_C 0x62848
#define _PSR_EVENT_TRANS_D 0x63848
-#define _PSR_EVENT_TRANS_EDP 0x6F848
-#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A)
+#define _PSR_EVENT_TRANS_EDP 0x6f848
+#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
#define PSR_EVENT_PSR2_DISABLED (1 << 16)
#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
@@ -4312,15 +4479,16 @@ enum {
#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
#define PSR_EVENT_PSR_DISABLE (1 << 0)
-#define EDP_PSR2_STATUS _MMIO(0x6f940)
+#define _PSR2_STATUS_A 0x60940
+#define _PSR2_STATUS_EDP 0x6f940
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
-#define _PSR2_SU_STATUS_0 0x6F914
-#define _PSR2_SU_STATUS_1 0x6F918
-#define _PSR2_SU_STATUS_2 0x6F91C
-#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1))
-#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3))
+#define _PSR2_SU_STATUS_A 0x60914
+#define _PSR2_SU_STATUS_EDP 0x6f914
+#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4)
+#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
#define PSR2_SU_STATUS_FRAMES 8
@@ -4646,6 +4814,7 @@ enum {
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_GMP_DATA_SIZE 36
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
@@ -5482,45 +5651,9 @@ enum {
*/
#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
-#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
-#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
-#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
-#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
-#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
-#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
-#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
-
-#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
-#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
-#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
-#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
-#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
-#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
-
-#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
-#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
-#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
-#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
-#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
-#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
-
-#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
-#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
-#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
-#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
-#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
-#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
-
-#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
-#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
-#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
-#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
-#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
-#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5652,6 +5785,11 @@ enum {
#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0 << 5)
#define PIPECONF_10BPC (1 << 5)
@@ -5739,12 +5877,13 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
+#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1 << 27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
@@ -6201,6 +6340,7 @@ enum {
#define CHV_CURSOR_C_OFFSET 0x700e0
#define IVB_CURSOR_B_OFFSET 0x71080
#define IVB_CURSOR_C_OFFSET 0x72080
+#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
#define _DSPACNTR 0x70180
@@ -7171,11 +7311,17 @@ enum {
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define LGC_PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define LGC_PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
/* ilk/snb precision palette */
#define _PREC_PALETTE_A 0x4b000
#define _PREC_PALETTE_B 0x4c000
+#define PREC_PALETTE_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PALETTE_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PALETTE_BLUE_MASK REG_GENMASK(9, 0)
#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
#define _PREC_PIPEAGCMAX 0x4d000
@@ -7211,6 +7357,12 @@ enum {
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+#define DMC_DEBUG3 _MMIO(0x101090)
+
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT _MMIO(0x42060)
+#define MMIO_TIMEOUT_US(us) ((us) << 0)
+
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29)
@@ -7322,6 +7474,9 @@ enum {
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN11_PIPE_PLANE7_FAULT (1 << 22)
+#define GEN11_PIPE_PLANE6_FAULT (1 << 21)
+#define GEN11_PIPE_PLANE5_FAULT (1 << 20)
#define GEN9_PIPE_PLANE4_FAULT (1 << 10)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
@@ -7341,6 +7496,11 @@ enum {
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
+#define GEN11_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
+ GEN11_PIPE_PLANE7_FAULT | \
+ GEN11_PIPE_PLANE6_FAULT | \
+ GEN11_PIPE_PLANE5_FAULT)
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
@@ -7360,6 +7520,12 @@ enum {
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 (1 << 13)
+#define TGL_DE_PORT_AUX_USBC5 (1 << 12)
+#define TGL_DE_PORT_AUX_USBC4 (1 << 11)
+#define TGL_DE_PORT_AUX_USBC3 (1 << 10)
+#define TGL_DE_PORT_AUX_USBC2 (1 << 9)
+#define TGL_DE_PORT_AUX_USBC1 (1 << 8)
#define TGL_DE_PORT_AUX_DDIC (1 << 2)
#define TGL_DE_PORT_AUX_DDIB (1 << 1)
#define TGL_DE_PORT_AUX_DDIA (1 << 0)
@@ -7548,10 +7714,17 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define CHICKEN_TRANS_A _MMIO(0x420c0)
-#define CHICKEN_TRANS_B _MMIO(0x420c4)
-#define CHICKEN_TRANS_C _MMIO(0x420c8)
-#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7584,15 +7757,19 @@ enum {
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
#define SKL_DFSM _MMIO(0x51000)
-#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
-#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
-#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
-#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define CNL_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
#define SKL_DSSM _MMIO(0x51004)
#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
@@ -7609,7 +7786,10 @@ enum {
#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
+#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
@@ -7634,6 +7814,7 @@ enum {
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
+ #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9)
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
@@ -7818,29 +7999,24 @@ enum {
SDE_FDI_RXA_CPT)
/* south display engine interrupt: ICP/TGP */
-#define SDE_TC6_HOTPLUG_TGP (1 << 29)
-#define SDE_TC5_HOTPLUG_TGP (1 << 28)
-#define SDE_TC4_HOTPLUG_ICP (1 << 27)
-#define SDE_TC3_HOTPLUG_ICP (1 << 26)
-#define SDE_TC2_HOTPLUG_ICP (1 << 25)
-#define SDE_TC1_HOTPLUG_ICP (1 << 24)
#define SDE_GMBUS_ICP (1 << 23)
-#define SDE_DDIC_HOTPLUG_TGP (1 << 18)
-#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
-#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16))
-#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
- SDE_DDIA_HOTPLUG_ICP)
-#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
- SDE_TC3_HOTPLUG_ICP | \
- SDE_TC2_HOTPLUG_ICP | \
- SDE_TC1_HOTPLUG_ICP)
-#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \
- SDE_DDI_MASK_ICP)
-#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \
- SDE_TC5_HOTPLUG_TGP | \
- SDE_TC_MASK_ICP)
+#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
+#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
@@ -7907,26 +8083,13 @@ enum {
* SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
*/
-#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
-#define TGP_DDIC_HPD_ENABLE (1 << 11)
-#define TGP_DDIC_HPD_STATUS_MASK (3 << 8)
-#define TGP_DDIC_HPD_NO_DETECT (0 << 8)
-#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8)
-#define TGP_DDIC_HPD_LONG_DETECT (2 << 8)
-#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8)
-#define ICP_DDIB_HPD_ENABLE (1 << 7)
-#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
-#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
-#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
-#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
-#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
-#define ICP_DDIA_HPD_ENABLE (1 << 3)
-#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
-#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
-#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
-#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
-#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
-#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port)))
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
@@ -8037,14 +8200,15 @@ enum {
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
-#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \
- ICP_DDIA_HPD_ENABLE)
+#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \
ICP_TC_HPD_ENABLE(PORT_TC3) | \
ICP_TC_HPD_ENABLE(PORT_TC2) | \
ICP_TC_HPD_ENABLE(PORT_TC1))
-#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \
- ICP_DDI_HPD_ENABLE_MASK)
+#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \
ICP_TC_HPD_ENABLE(PORT_TC5) | \
ICP_TC_HPD_ENABLE_MASK)
@@ -8594,6 +8758,10 @@ enum {
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
+#define POWERGATE_ENABLE _MMIO(0xa210)
+#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3)
+#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4)
+
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
@@ -8831,6 +8999,7 @@ enum {
#define GEN9_SAGV_DISABLE 0x0
#define GEN9_SAGV_IS_DISABLED 0x1
#define GEN9_SAGV_ENABLE 0x3
+#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -9094,6 +9263,10 @@ enum {
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+#define AUD_FREQ_CNTRL _MMIO(0x65900)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+
/*
* HSW - ICL power wells
*
@@ -9256,12 +9429,20 @@ enum skl_power_gate {
/* HDCP Repeater Registers */
#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT BIT(31)
+#define HDCP_TRANSB_REP_PRESENT BIT(30)
+#define HDCP_TRANSC_REP_PRESENT BIT(29)
+#define HDCP_TRANSD_REP_PRESENT BIT(28)
#define HDCP_DDIB_REP_PRESENT BIT(30)
#define HDCP_DDIA_REP_PRESENT BIT(29)
#define HDCP_DDIC_REP_PRESENT BIT(28)
#define HDCP_DDID_REP_PRESENT BIT(27)
#define HDCP_DDIF_REP_PRESENT BIT(26)
#define HDCP_DDIE_REP_PRESENT BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
#define HDCP_DDIB_SHA1_M0 (1 << 20)
#define HDCP_DDIA_SHA1_M0 (2 << 20)
#define HDCP_DDIC_SHA1_M0 (3 << 20)
@@ -9301,15 +9482,92 @@ enum skl_power_gate {
_PORTE_HDCP_AUTHENC, \
_PORTF_HDCP_AUTHENC) + (x))
#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
#define HDCP_CONF_CAPTURE_AN BIT(0)
#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
#define HDCP_STATUS_STREAM_A_ENC BIT(31)
#define HDCP_STATUS_STREAM_B_ENC BIT(30)
#define HDCP_STATUS_STREAM_C_ENC BIT(29)
@@ -9336,23 +9594,44 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
-
-#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
#define AUTH_LINK_AUTHENTICATED BIT(31)
#define AUTH_LINK_TYPE BIT(30)
#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
#define AUTH_CLR_KEYS BIT(18)
-
-#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ BIT(31)
-
-#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
-#define STREAM_ENCRYPTION_STATUS_A BIT(31)
-#define STREAM_ENCRYPTION_STATUS_B BIT(30)
-#define STREAM_ENCRYPTION_STATUS_C BIT(29)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
#define LINK_TYPE_STATUS BIT(22)
#define LINK_AUTH_STATUS BIT(21)
#define LINK_ENCRYPTION_STATUS BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
@@ -9392,6 +9671,9 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
@@ -9419,7 +9701,9 @@ enum skl_power_gate {
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A)
#define DP_TP_CTL_ENABLE (1 << 31)
#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
@@ -9439,7 +9723,9 @@ enum skl_power_gate {
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A)
#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
@@ -9594,17 +9880,7 @@ enum skl_power_gate {
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
-
-#define TRANS_MSA_SYNC_CLK (1 << 0)
-#define TRANS_MSA_SAMPLING_444 (2 << 1)
-#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
-#define TRANS_MSA_6_BPC (0 << 5)
-#define TRANS_MSA_8_BPC (1 << 5)
-#define TRANS_MSA_10_BPC (2 << 5)
-#define TRANS_MSA_12_BPC (3 << 5)
-#define TRANS_MSA_16_BPC (4 << 5)
-#define TRANS_MSA_CEA_RANGE (1 << 3)
-#define TRANS_MSA_USE_VSC_SDP (1 << 14)
+/* See DP_MSA_MISC_* for the bit definitions */
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
@@ -9645,7 +9921,10 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
@@ -9966,6 +10245,166 @@ enum skl_power_gate {
_TGL_DPLL1_CFGCR1, \
_TGL_TBTPLL_CFGCR1)
+#define _DKL_PHY1_BASE 0x168000
+#define _DKL_PHY2_BASE 0x169000
+#define _DKL_PHY3_BASE 0x16A000
+#define _DKL_PHY4_BASE 0x16B000
+#define _DKL_PHY5_BASE 0x16C000
+#define _DKL_PHY6_BASE 0x16D000
+
+/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
+#define _DKL_PLL_DIV0 0x200
+#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
+#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
+#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
+#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12)
+#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8)
+#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
+#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
+#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV0)
+
+#define _DKL_PLL_DIV1 0x204
+#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16)
+#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0)
+#define DKL_PLL_DIV1(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV1)
+
+#define _DKL_PLL_SSC 0x210
+#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29)
+#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29)
+#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16)
+#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16)
+#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11)
+#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11)
+#define DKL_PLL_SSC_EN (1 << 9)
+#define DKL_PLL_SSC(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_SSC)
+
+#define _DKL_PLL_BIAS 0x214
+#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30)
+#define DKL_PLL_BIAS_FBDIV_SHIFT (8)
+#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_BIAS)
+
+#define _DKL_PLL_TDC_COLDST_BIAS 0x218
+#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8)
+#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8)
+#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0)
+#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0)
+#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_TDC_COLDST_BIAS)
+
+#define _DKL_REFCLKIN_CTL 0x12C
+/* Bits are the same as MG_REFCLKIN_CTL */
+#define DKL_REFCLKIN_CTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_REFCLKIN_CTL)
+
+#define _DKL_CLKTOP2_HSCLKCTL 0xD4
+/* Bits are the same as MG_CLKTOP2_HSCLKCTL */
+#define DKL_CLKTOP2_HSCLKCTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_HSCLKCTL)
+
+#define _DKL_CLKTOP2_CORECLKCTL1 0xD8
+/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */
+#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_CORECLKCTL1)
+
+#define _DKL_TX_DPCNTL0 0x2C0
+#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13)
+#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13)
+#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8)
+#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8)
+#define DKL_TX_VSWING_CONTROL(x) ((x) << 0)
+#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0)
+#define DKL_TX_DPCNTL0(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL0)
+
+#define _DKL_TX_DPCNTL1 0x2C4
+/* Bits are the same as DKL_TX_DPCNTRL0 */
+#define DKL_TX_DPCNTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL1)
+
+#define _DKL_TX_DPCNTL2 0x2C8
+#define DKL_TX_DP20BITMODE (1 << 2)
+#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL2)
+
+#define _DKL_TX_FW_CALIB 0x2F8
+#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7)
+#define DKL_TX_FW_CALIB(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_FW_CALIB)
+
+#define _DKL_TX_PMD_LANE_SUS 0xD00
+#define DKL_TX_PMD_LANE_SUS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_PMD_LANE_SUS)
+
+#define _DKL_TX_DW17 0xDC4
+#define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW17)
+
+#define _DKL_TX_DW18 0xDC8
+#define DKL_TX_DW18(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW18)
+
+#define _DKL_DP_MODE 0xA0
+#define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_DP_MODE)
+
+#define _DKL_CMN_UC_DW27 0x36C
+#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15)
+#define DKL_CMN_UC_DW_27(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CMN_UC_DW27)
+
+/*
+ * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than
+ * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0
+ * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address
+ * bits that point the 4KB window into the full PHY register space.
+ */
+#define _HIP_INDEX_REG0 0x1010A0
+#define _HIP_INDEX_REG1 0x1010A4
+#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \
+ : _HIP_INDEX_REG1)
+#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4))
+#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port))
+
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -9980,6 +10419,8 @@ enum skl_power_gate {
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
#define DC_STATE_EN_UPTO_DC5 (1 << 0)
#define DC_STATE_EN_DC9 (1 << 3)
#define DC_STATE_EN_UPTO_DC6 (2 << 0)
@@ -10108,11 +10549,11 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
-#define ICL_CSC_ENABLE (1 << 31)
-#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define ICL_CSC_ENABLE (1 << 31) /* icl+ */
+#define ICL_OUTPUT_CSC_ENABLE (1 << 30) /* icl+ */
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2) /* ilk/snb */
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1) /* pre-glk */
+#define CSC_MODE_YUV_TO_RGB (1 << 0) /* ilk/snb */
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
@@ -10228,6 +10669,9 @@ enum skl_power_gate {
#define _PAL_PREC_GC_MAX_A 0x4A410
#define _PAL_PREC_GC_MAX_B 0x4AC10
#define _PAL_PREC_GC_MAX_C 0x4B410
+#define PREC_PAL_DATA_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PAL_DATA_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PAL_DATA_BLUE_MASK REG_GENMASK(9, 0)
#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
@@ -10280,6 +10724,9 @@ enum skl_power_gate {
#define CGM_PIPE_MODE_GAMMA (1 << 2)
#define CGM_PIPE_MODE_CSC (1 << 1)
#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
+#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0)
+#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16)
+#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0)
#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
@@ -11527,16 +11974,31 @@ enum skl_power_gate {
#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
#define MODULAR_FIA_MASK (1 << 4)
-#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
-#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
-#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
-#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
-#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
-#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
-#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+
+/* This register controls the Display State Buffer (DSB) engines. */
+#define _DSBSL_INSTANCE_BASE 0x70B00
+#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
+ (pipe) * 0x1000 + (id) * 100)
+#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
+#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
+#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
+#define DSB_ENABLE (1 << 31)
+#define DSB_STATUS (1 << 0)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 1c5506822dc7..bbd71af00a91 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -31,6 +31,8 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
+#include "gt/intel_rps.h"
#include "i915_active.h"
#include "i915_drv.h"
@@ -169,16 +171,17 @@ remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
- file_priv = READ_ONCE(request->file_priv);
- if (!file_priv)
+ if (!READ_ONCE(request->file_priv))
return;
- spin_lock(&file_priv->mm.lock);
- if (request->file_priv) {
+ rcu_read_lock();
+ file_priv = xchg(&request->file_priv, NULL);
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
list_del(&request->client_link);
- request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
}
- spin_unlock(&file_priv->mm.lock);
+ rcu_read_unlock();
}
static void free_capture_list(struct i915_request *request)
@@ -205,21 +208,18 @@ static void remove_from_engine(struct i915_request *rq)
* check that the rq still belongs to the newly locked engine.
*/
locked = READ_ONCE(rq->engine);
- spin_lock(&locked->active.lock);
+ spin_lock_irq(&locked->active.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->active.lock);
spin_lock(&engine->active.lock);
locked = engine;
}
list_del(&rq->sched.link);
- spin_unlock(&locked->active.lock);
+ spin_unlock_irq(&locked->active.lock);
}
-static bool i915_request_retire(struct i915_request *rq)
+bool i915_request_retire(struct i915_request *rq)
{
- struct i915_active_request *active, *next;
-
- lockdep_assert_held(&rq->timeline->mutex);
if (!i915_request_completed(rq))
return false;
@@ -240,41 +240,11 @@ static bool i915_request_retire(struct i915_request *rq)
* Note this requires that we are always called in request
* completion order.
*/
- GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
+ GEM_BUG_ON(!list_is_first(&rq->link,
+ &i915_request_timeline(rq)->requests));
rq->ring->head = rq->postfix;
/*
- * Walk through the active list, calling retire on each. This allows
- * objects to track their GPU activity and mark themselves as idle
- * when their *last* active request is completed (updating state
- * tracking lists for eviction, active references for GEM, etc).
- *
- * As the ->retire() may free the node, we decouple it first and
- * pass along the auxiliary information (to avoid dereferencing
- * the node after the callback).
- */
- list_for_each_entry_safe(active, next, &rq->active_list, link) {
- /*
- * In microbenchmarks or focusing upon time inside the kernel,
- * we may spend an inordinate amount of time simply handling
- * the retirement of requests and processing their callbacks.
- * Of which, this loop itself is particularly hot due to the
- * cache misses when jumping around the list of
- * i915_active_request. So we try to keep this loop as
- * streamlined as possible and also prefetch the next
- * i915_active_request to try and hide the likely cache miss.
- */
- prefetchw(next);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, rq);
- }
-
- local_irq_disable();
-
- /*
* We only loosely track inflight requests across preemption,
* and so we may find ourselves attempting to retire a _completed_
* request that we have removed from the HW and put back on a run
@@ -282,24 +252,22 @@ static bool i915_request_retire(struct i915_request *rq)
*/
remove_from_engine(rq);
- spin_lock(&rq->lock);
+ spin_lock_irq(&rq->lock);
i915_request_mark_complete(rq);
if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
- atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ atomic_dec(&rq->engine->gt->rps.num_waiters);
}
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
__notify_execute_cb(rq);
}
GEM_BUG_ON(!list_empty(&rq->execute_cb));
- spin_unlock(&rq->lock);
-
- local_irq_enable();
+ spin_unlock_irq(&rq->lock);
remove_from_client(rq);
list_del(&rq->link);
@@ -316,7 +284,7 @@ static bool i915_request_retire(struct i915_request *rq)
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_timeline * const tl = rq->timeline;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -324,7 +292,6 @@ void i915_request_retire_upto(struct i915_request *rq)
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
- lockdep_assert_held(&tl->mutex);
GEM_BUG_ON(!i915_request_completed(rq));
do {
@@ -680,9 +647,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->gem_context = ce->gem_context;
rq->engine = ce->engine;
rq->ring = ce->ring;
- rq->timeline = tl;
+ rq->execution_mask = ce->engine->mask;
+
+ rcu_assign_pointer(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
+
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
@@ -700,9 +670,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->batch = NULL;
rq->capture_list = NULL;
rq->flags = 0;
- rq->execution_mask = ALL_ENGINES;
- INIT_LIST_HEAD(&rq->active_list);
INIT_LIST_HEAD(&rq->execute_cb);
/*
@@ -741,7 +709,6 @@ err_unwind:
ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&rq->active_list));
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
@@ -786,16 +753,43 @@ err_unlock:
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- if (list_is_first(&signal->link, &signal->timeline->requests))
- return 0;
+ struct intel_timeline *tl;
+ struct dma_fence *fence;
+ int err;
- signal = list_prev_entry(signal, link);
- if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
+ GEM_BUG_ON(i915_request_timeline(rq) ==
+ rcu_access_pointer(signal->timeline));
+
+ rcu_read_lock();
+ tl = rcu_dereference(signal->timeline);
+ if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+ if (!tl) /* already started or maybe even completed */
return 0;
- return i915_sw_fence_await_dma_fence(&rq->submit,
- &signal->fence, 0,
- I915_FENCE_GFP);
+ fence = ERR_PTR(-EBUSY);
+ if (mutex_trylock(&tl->mutex)) {
+ fence = NULL;
+ if (!i915_request_started(signal) &&
+ !list_is_first(&signal->link, &tl->requests)) {
+ signal = list_prev_entry(signal, link);
+ fence = dma_fence_get(&signal->fence);
+ }
+ mutex_unlock(&tl->mutex);
+ }
+ intel_timeline_put(tl);
+ if (IS_ERR_OR_NULL(fence))
+ return PTR_ERR_OR_ZERO(fence);
+
+ err = 0;
+ if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ err = i915_sw_fence_await_dma_fence(&rq->submit,
+ fence, 0,
+ I915_FENCE_GFP);
+ dma_fence_put(fence);
+
+ return err;
}
static intel_engine_mask_t
@@ -821,34 +815,33 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const int has_token = INTEL_GEN(to->i915) >= 12;
u32 hwsp_offset;
+ int len;
u32 *cs;
- int err;
- GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* Just emit the first semaphore we see as request space is limited. */
if (already_busywaiting(to) & from->engine->mask)
- return i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
+ goto await_fence;
- err = i915_request_await_start(to, from);
- if (err < 0)
- return err;
+ if (i915_request_await_start(to, from) < 0)
+ goto await_fence;
/* Only submit our spinner after the signaler is running! */
- err = __i915_request_await_execution(to, from, NULL, gfp);
- if (err)
- return err;
+ if (__i915_request_await_execution(to, from, NULL, gfp))
+ goto await_fence;
/* We need to pin the signaler's HWSP until we are finished reading. */
- err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
- if (err)
- return err;
+ if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
+ goto await_fence;
+
+ len = 4;
+ if (has_token)
+ len += 2;
- cs = intel_ring_begin(to, 4);
+ cs = intel_ring_begin(to, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -860,18 +853,28 @@ emit_semaphore_wait(struct i915_request *to,
* (post-wrap) values than they were expecting (and so wait
* forever).
*/
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD) +
+ has_token;
*cs++ = from->fence.seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
+ if (has_token) {
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
intel_ring_advance(to, cs);
to->sched.semaphores |= from->engine->mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
+
+await_fence:
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
}
static int
@@ -955,21 +958,23 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
/* Squash repeated waits to the same timelines */
if (fence->context &&
- intel_timeline_sync_is_later(rq->timeline, fence))
+ intel_timeline_sync_is_later(i915_request_timeline(rq),
+ fence))
continue;
if (dma_fence_is_i915(fence))
ret = i915_request_await_request(rq, to_request(fence));
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
+ fence->context ? I915_FENCE_TIMEOUT : 0,
I915_FENCE_GFP);
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
if (fence->context)
- intel_timeline_sync_set(rq->timeline, fence);
+ intel_timeline_sync_set(i915_request_timeline(rq),
+ fence);
} while (--nchild);
return 0;
@@ -1111,7 +1116,7 @@ void i915_request_skip(struct i915_request *rq, int error)
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
- struct intel_timeline *timeline = rq->timeline;
+ struct intel_timeline *timeline = i915_request_timeline(rq);
struct i915_request *prev;
/*
@@ -1134,8 +1139,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = rcu_dereference_protected(timeline->last_request.request,
- lockdep_is_held(&timeline->mutex));
+ prev = to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1160,7 +1165,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
- __i915_active_request_set(&timeline->last_request, rq);
return prev;
}
@@ -1224,7 +1228,7 @@ void __i915_request_queue(struct i915_request *rq,
void i915_request_add(struct i915_request *rq)
{
struct i915_sched_attr attr = rq->gem_context->sched;
- struct intel_timeline * const tl = rq->timeline;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *prev;
lockdep_assert_held(&tl->mutex);
@@ -1279,7 +1283,9 @@ void i915_request_add(struct i915_request *rq)
* work on behalf of others -- but instead we should benefit from
* improved resource management. (Well, that's the theory at least.)
*/
- if (prev && i915_request_completed(prev) && prev->timeline == tl)
+ if (prev &&
+ i915_request_completed(prev) &&
+ rcu_access_pointer(prev->timeline) == tl)
i915_request_retire_upto(prev);
mutex_unlock(&tl->mutex);
@@ -1442,7 +1448,7 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (CONFIG_DRM_I915_SPIN_REQUEST &&
+ if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
__i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
dma_fence_signal(&rq->fence);
goto out;
@@ -1462,7 +1468,7 @@ long i915_request_wait(struct i915_request *rq,
*/
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
}
@@ -1488,6 +1494,7 @@ long i915_request_wait(struct i915_request *rq,
break;
}
+ intel_engine_flush_submission(rq->engine);
timeout = io_schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
@@ -1495,53 +1502,11 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
- mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
-bool i915_retire_requests(struct drm_i915_private *i915)
-{
- struct intel_gt_timelines *timelines = &i915->gt.timelines;
- struct intel_timeline *tl, *tn;
- unsigned long flags;
- LIST_HEAD(free);
-
- spin_lock_irqsave(&timelines->lock, flags);
- list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
- if (!mutex_trylock(&tl->mutex))
- continue;
-
- intel_timeline_get(tl);
- GEM_BUG_ON(!tl->active_count);
- tl->active_count++; /* pin the list element */
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- retire_requests(tl);
-
- spin_lock_irqsave(&timelines->lock, flags);
-
- /* Resume iteration after dropping lock */
- list_safe_reset_next(tl, tn, link);
- if (!--tl->active_count)
- list_del(&tl->link);
-
- mutex_unlock(&tl->mutex);
-
- /* Defer the final release to after the spinlock */
- if (refcount_dec_and_test(&tl->kref.refcount)) {
- GEM_BUG_ON(tl->active_count);
- list_add(&tl->link, &free);
- }
- }
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- list_for_each_entry_safe(tl, tn, &free, link)
- __intel_timeline_free(&tl->kref);
-
- return !list_empty(&timelines->active_list);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index e4dd013761e8..96991d64759c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -113,7 +113,7 @@ struct i915_request {
struct intel_engine_cs *engine;
struct intel_context *hw_context;
struct intel_ring *ring;
- struct intel_timeline *timeline;
+ struct intel_timeline __rcu *timeline;
struct list_head signal_link;
/*
@@ -211,14 +211,14 @@ struct i915_request {
* on the active_list (of their final request).
*/
struct i915_capture_list *capture_list;
- struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
unsigned long flags;
-#define I915_REQUEST_WAITBOOST BIT(0)
-#define I915_REQUEST_NOPREEMPT BIT(1)
+#define I915_REQUEST_WAITBOOST BIT(0)
+#define I915_REQUEST_NOPREEMPT BIT(1)
+#define I915_REQUEST_SENTINEL BIT(2)
/** timeline->request entry for this request */
struct list_head link;
@@ -251,6 +251,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
+bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
static inline struct i915_request *
@@ -309,10 +310,8 @@ long i915_request_wait(struct i915_request *rq,
long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
-#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
-#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline bool i915_request_signaled(const struct i915_request *rq)
{
@@ -442,6 +441,29 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
}
-bool i915_retire_requests(struct drm_i915_private *i915);
+static inline bool i915_request_has_sentinel(const struct i915_request *rq)
+{
+ return unlikely(rq->flags & I915_REQUEST_SENTINEL);
+}
+
+static inline struct intel_timeline *
+i915_request_timeline(struct i915_request *rq)
+{
+ /* Valid only while the request is being constructed (or retired). */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+}
+
+static inline struct intel_timeline *
+i915_request_active_timeline(struct i915_request *rq)
+{
+ /*
+ * When in use during submission, we are protected by a guarantee that
+ * the context/timeline is pinned and must remain pinned until after
+ * this submission.
+ */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rq->engine->active.lock));
+}
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index 6617963df9ed..b7b59328cb76 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -67,15 +67,15 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
}
/**
- * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
- * @__dmap: DMA address (output)
+ * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
+ * @__dp: Device address (output)
* @__iter: 'struct sgt_iter' (iterator state, internal)
* @__sgt: sg_table to iterate over (input)
* @__step: step size
*/
-#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \
+#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
- ((__dmap) = (__iter).dma + (__iter).curr); \
+ ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
(((__iter).curr += (__step)) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 7b84ebca2901..010d67f48ad9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -177,21 +177,51 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION;
}
-static void kick_submission(struct intel_engine_cs *engine, int prio)
+static inline bool need_preempt(int prio, int active)
{
- const struct i915_request *inflight = *engine->execlists.active;
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ */
+ return prio >= max(I915_PRIORITY_NORMAL, active);
+}
+
+static void kick_submission(struct intel_engine_cs *engine,
+ const struct i915_request *rq,
+ int prio)
+{
+ const struct i915_request *inflight;
/*
- * If we are already the currently executing context, don't
- * bother evaluating if we should preempt ourselves, or if
- * we expect nothing to change as a result of running the
- * tasklet, i.e. we have not change the priority queue
- * sufficiently to oust the running context.
+ * We only need to kick the tasklet once for the high priority
+ * new context we add into the queue.
*/
- if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
+ if (prio <= engine->execlists.queue_priority_hint)
return;
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ rcu_read_lock();
+
+ /* Nothing currently active? We're overdue for a submission! */
+ inflight = execlists_active(&engine->execlists);
+ if (!inflight)
+ goto unlock;
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves.
+ */
+ if (inflight->hw_context == rq->hw_context)
+ goto unlock;
+
+ engine->execlists.queue_priority_hint = prio;
+ if (need_preempt(prio, rq_prio(inflight)))
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+ rcu_read_unlock();
}
static void __i915_schedule(struct i915_sched_node *node,
@@ -317,13 +347,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist);
}
- if (prio <= engine->execlists.queue_priority_hint)
- continue;
-
- engine->execlists.queue_priority_hint = prio;
-
/* Defer (tasklet) submission until after all of our updates. */
- kick_submission(engine, prio);
+ kick_submission(engine, node_to_request(node), prio);
}
spin_unlock(&engine->active.lock);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 7eefccff39bf..07d243acf553 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -52,22 +52,4 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
-static inline bool i915_scheduler_need_preempt(int prio, int active)
-{
- /*
- * Allow preemption of low -> normal -> high, but we do
- * not allow low priority tasks to preempt other low priority
- * tasks under the impression that latency for low priority
- * tasks does not matter (as much as background throughput),
- * so kiss.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- return prio > max(I915_PRIORITY_NORMAL - 1, active);
-}
-
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index aad81acba9dc..d18e70550054 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -49,6 +49,15 @@ struct i915_sched_attr {
* DAG of each request, we are able to insert it into a sorted queue when it
* is ready, and are able to reorder its portion of the graph to accommodate
* dynamic priority changes.
+ *
+ * Ok, there is now one active element to the "scheduler" in the backends.
+ * We let a new context run for a small amount of time before re-evaluating
+ * the run order. As we re-evaluate, we maintain the strict ordering of
+ * dependencies, but attempt to rotate the active contexts (the current context
+ * is put to the back of its priority queue, then reshuffling its dependents).
+ * This provides minimal timeslicing and prevents a userspace hog (e.g.
+ * something waiting on a user semaphore [VkEvent]) from denying service to
+ * others.
*/
struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8508a01ad8b9..8812cdd9007f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -28,6 +28,7 @@
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
+#include "display/intel_vga.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -57,7 +58,7 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev_priv);
+ intel_vga_redisable(dev_priv);
}
int i915_save_state(struct drm_i915_private *dev_priv)
@@ -65,8 +66,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
i915_save_display(dev_priv);
if (IS_GEN(dev_priv, 4))
@@ -100,8 +99,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
return 0;
}
@@ -110,8 +107,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
if (IS_GEN(dev_priv, 4))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
@@ -145,8 +140,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
intel_gmbus_reset(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
new file mode 100644
index 000000000000..39c79e1c5b52
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+#include "i915_switcheroo.h"
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (!i915) {
+ dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
+ return;
+ }
+
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(pdev, PCI_D0);
+ i915_resume_switcheroo(i915);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_info("switched off\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(i915, pmm);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return i915 && i915->drm.open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+
+int i915_switcheroo_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+}
+
+void i915_switcheroo_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_switcheroo_unregister_client(pdev);
+}
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.h b/drivers/gpu/drm/i915/i915_switcheroo.h
new file mode 100644
index 000000000000..59b6c1e07d75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SWITCHEROO__
+#define __I915_SWITCHEROO__
+
+struct drm_i915_private;
+
+int i915_switcheroo_register(struct drm_i915_private *i915);
+void i915_switcheroo_unregister(struct drm_i915_private *i915);
+
+#endif /* __I915_SWITCHEROO__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d8a3b180c084..65476909d1bf 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,6 +30,9 @@
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_pm.h"
@@ -49,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
u64 res = 0;
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(dev_priv, reg);
+ res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -142,12 +145,12 @@ static const struct attribute_group media_rc6_attr_group = {
};
#endif
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
{
- if (!HAS_L3_DPF(dev_priv))
+ if (!HAS_L3_DPF(i915))
return -EPERM;
- if (offset % 4 != 0)
+ if (!IS_ALIGNED(offset, sizeof(u32)))
return -EINVAL;
if (offset >= GEN7_L3LOG_SIZE)
@@ -162,31 +165,24 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
int ret;
- count = round_down(count, 4);
-
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
+ count = round_down(count, sizeof(u32));
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+ memset(buf, 0, count);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- if (dev_priv->l3_parity.remap_info[slice])
+ spin_lock(&i915->gem.contexts.lock);
+ if (i915->l3_parity.remap_info[slice])
memcpy(buf,
- dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
count);
- else
- memset(buf, 0, count);
-
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock(&i915->gem.contexts.lock);
return count;
}
@@ -197,46 +193,49 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
- u32 **remap_info;
+ u32 *remap_info, *freeme = NULL;
+ struct i915_gem_context *ctx;
int ret;
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ if (count < sizeof(u32))
+ return -EINVAL;
- remap_info = &dev_priv->l3_parity.remap_info[slice];
- if (!*remap_info) {
- *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
- if (!*remap_info) {
- ret = -ENOMEM;
- goto out;
- }
+ remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!remap_info)
+ return -ENOMEM;
+
+ spin_lock(&i915->gem.contexts.lock);
+
+ if (i915->l3_parity.remap_info[slice]) {
+ freeme = remap_info;
+ remap_info = i915->l3_parity.remap_info[slice];
+ } else {
+ i915->l3_parity.remap_info[slice] = remap_info;
}
- /* TODO: Ideally we really want a GPU reset here to make sure errors
- * aren't propagated. Since I cannot find a stable way to reset the GPU
- * at this point it is left as a TODO.
- */
- memcpy(*remap_info + (offset/4), buf, count);
+ count = round_down(count, sizeof(u32));
+ memcpy(remap_info + offset / sizeof(u32), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- ctx->remap_slice |= (1<<slice);
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link)
+ ctx->remap_slice |= BIT(slice);
- ret = count;
+ spin_unlock(&i915->gem.contexts.lock);
+ kfree(freeme);
-out:
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * TODO: Ideally we really want a GPU reset here to make sure errors
+ * aren't propagated. Since I cannot find a stable way to reset the GPU
+ * at this point it is left as a TODO.
+ */
- return ret;
+ return count;
}
static const struct bin_attribute dpf_attrs = {
@@ -261,6 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
intel_wakeref_t wakeref;
u32 freq;
@@ -273,31 +273,31 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
freq = (freq >> 8) & 0xff;
} else {
- freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
+ freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
}
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
+ return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -305,7 +305,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
bool boost = false;
ssize_t ret;
u32 val;
@@ -315,7 +315,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
return ret;
/* Validate against (static) hardware limits */
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
@@ -335,19 +335,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.max_freq_softlimit));
+ intel_gpu_freq(rps, rps->max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -355,19 +355,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
@@ -377,7 +375,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
- intel_gpu_freq(dev_priv, val));
+ intel_gpu_freq(rps, val));
rps->max_freq_softlimit = val;
@@ -385,14 +383,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new max_delay and
+ /*
+ * We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -400,10 +399,10 @@ unlock:
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.min_freq_softlimit));
+ intel_gpu_freq(rps, rps->min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -411,19 +410,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
@@ -437,14 +434,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new min_delay and
+ /*
+ * We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -466,15 +464,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp0_freq);
+ val = intel_gpu_freq(rps, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp1_freq);
+ val = intel_gpu_freq(rps, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->min_freq);
+ val = intel_gpu_freq(rps, rps->min_freq);
else
BUG();
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 24f2944da09d..7ef7a1e1664c 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -665,7 +665,6 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -675,7 +674,6 @@ TRACE_EVENT(i915_request_queue,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -683,10 +681,9 @@ TRACE_EVENT(i915_request_queue,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->flags)
+ __entry->ctx, __entry->seqno, __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -695,7 +692,6 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -704,16 +700,15 @@ DECLARE_EVENT_CLASS(i915_request,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno)
+ __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -738,7 +733,6 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -749,7 +743,6 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -758,9 +751,9 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->prio, __entry->port)
);
@@ -770,7 +763,6 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -780,7 +772,6 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -788,10 +779,9 @@ TRACE_EVENT(i915_request_out,
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->completed)
+ __entry->ctx, __entry->seqno, __entry->completed)
);
#else
@@ -829,7 +819,6 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -845,7 +834,6 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -853,9 +841,9 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->flags)
);
@@ -958,19 +946,17 @@ DECLARE_EVENT_CLASS(i915_context,
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_gem_context *, ctx)
- __field(u32, hw_id)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
- __entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->vm;
+ __entry->vm = rcu_access_pointer(ctx->vm);
),
- TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
- __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ctx, __entry->vm)
)
DEFINE_EVENT(i915_context, i915_context_create,
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 16acdf7bdbe6..0348c6d0ef5f 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -54,25 +54,54 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static unsigned int i915_probe_fail_count;
-int __i915_inject_load_error(struct drm_i915_private *i915, int err,
- const char *func, int line)
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line)
{
- if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
+ if (i915_probe_fail_count >= i915_modparams.inject_probe_failure)
return 0;
- if (++i915_probe_fail_count < i915_modparams.inject_load_failure)
+ if (++i915_probe_fail_count < i915_modparams.inject_probe_failure)
return 0;
__i915_printk(i915, KERN_INFO,
"Injecting failure %d at checkpoint %u [%s:%d]\n",
- err, i915_modparams.inject_load_failure, func, line);
- i915_modparams.inject_load_failure = 0;
+ err, i915_modparams.inject_probe_failure, func, line);
+ i915_modparams.inject_probe_failure = 0;
return err;
}
bool i915_error_injected(void)
{
- return i915_probe_fail_count && !i915_modparams.inject_load_failure;
+ return i915_probe_fail_count && !i915_modparams.inject_probe_failure;
}
#endif
+
+void cancel_timer(struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return;
+
+ del_timer(t);
+ WRITE_ONCE(t->expires, 0);
+}
+
+void set_timer_ms(struct timer_list *t, unsigned long timeout)
+{
+ if (!timeout) {
+ cancel_timer(t);
+ return;
+ }
+
+ timeout = msecs_to_jiffies_timeout(timeout);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffie.
+ */
+ barrier();
+
+ mod_timer(t, jiffies + timeout);
+}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 562f756da421..04139ba1191e 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -32,6 +32,7 @@
#include <linux/workqueue.h>
struct drm_i915_private;
+struct timer_list;
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -60,20 +61,20 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-int __i915_inject_load_error(struct drm_i915_private *i915, int err,
- const char *func, int line);
-#define i915_inject_load_error(_i915, _err) \
- __i915_inject_load_error((_i915), (_err), __func__, __LINE__)
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line);
+#define i915_inject_probe_error(_i915, _err) \
+ __i915_inject_probe_error((_i915), (_err), __func__, __LINE__)
bool i915_error_injected(void);
#else
-#define i915_inject_load_error(_i915, _err) 0
+#define i915_inject_probe_error(_i915, _err) 0
#define i915_error_injected() false
#endif
-#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV)
+#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
#define i915_probe_error(i915, fmt, ...) \
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
@@ -421,4 +422,25 @@ static inline void add_taint_for_CI(unsigned int taint)
add_taint(taint, LOCKDEP_STILL_OK);
}
+void cancel_timer(struct timer_list *t);
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
+
+static inline bool timer_expired(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires) && !timer_pending(t);
+}
+
+/*
+ * This is a lookalike for IS_ENABLED() that takes a kconfig value,
+ * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero
+ * i.e. whether the configuration is active. Wrapping up the config inside
+ * a boolean context prevents clang and smatch from complaining about potential
+ * issues in confusing logical-&& with bitwise-& for constants.
+ *
+ * Sadly IS_ENABLED() itself does not work with kconfig values.
+ *
+ * Returns 0 if @config is 0, 1 if set to any value.
+ */
+#define IS_ACTIVE(config) ((config) != 0)
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e0e677b2a3a9..e5512f26e20a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_vma.h"
@@ -90,6 +91,7 @@ static int __i915_vma_active(struct i915_active *ref)
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
+__i915_active_call
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
@@ -104,21 +106,21 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- vma->vm = vm;
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(vm->i915, &vma->active,
- __i915_vma_active, __i915_vma_retire);
+ i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
/* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -171,7 +173,7 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
- vma->flags |= I915_VMA_GGTT;
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
spin_lock(&obj->vma.lock);
@@ -218,10 +220,6 @@ vma_create(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
- mutex_lock(&vm->mutex);
- list_add(&vma->vm_link, &vm->unbound_list);
- mutex_unlock(&vm->mutex);
-
return vma;
err_vma:
@@ -265,8 +263,6 @@ vma_lookup(struct drm_i915_gem_object *obj,
* Once created, the VMA is kept until either the object is freed, or the
* address space is closed.
*
- * Must be called with struct_mutex held.
- *
* Returns the vma, or an error pointer.
*/
struct i915_vma *
@@ -277,7 +273,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(vm->closed);
+ GEM_BUG_ON(!atomic_read(&vm->open));
spin_lock(&obj->vma.lock);
vma = vma_lookup(obj, vm, view);
@@ -291,18 +287,63 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
return vma;
}
+struct i915_vma_work {
+ struct dma_fence_work base;
+ struct i915_vma *vma;
+ enum i915_cache_level cache_level;
+ unsigned int flags;
+};
+
+static int __vma_bind(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+ struct i915_vma *vma = vw->vma;
+ int err;
+
+ err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+ if (err)
+ atomic_or(I915_VMA_ERROR, &vma->flags);
+
+ if (vma->obj)
+ __i915_gem_object_unpin_pages(vma->obj);
+
+ return err;
+}
+
+static const struct dma_fence_work_ops bind_ops = {
+ .name = "bind",
+ .work = __vma_bind,
+};
+
+struct i915_vma_work *i915_vma_work(void)
+{
+ struct i915_vma_work *vw;
+
+ vw = kzalloc(sizeof(*vw), GFP_KERNEL);
+ if (!vw)
+ return NULL;
+
+ dma_fence_work_init(&vw->base, &bind_ops);
+ vw->base.dma.error = -EAGAIN; /* disable the worker by default */
+
+ return vw;
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
+ * @work: preallocated worker for allocating and binding the PTE
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work)
{
u32 bind_flags;
u32 vma_flags;
@@ -319,13 +360,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
+ bind_flags = flags;
+ bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ vma_flags = atomic_read(&vma->flags);
+ vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
if (flags & PIN_UPDATE)
bind_flags |= vma_flags;
else
@@ -336,11 +375,34 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
+ if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ work->vma = vma;
+ work->cache_level = cache_level;
+ work->flags = bind_flags | I915_VMA_ALLOC;
- vma->flags |= bind_flags;
+ /*
+ * Note we only want to chain up to the migration fence on
+ * the pages (not the object itself). As we don't track that,
+ * yet, we have to use the exclusive fence instead.
+ *
+ * Also note that we do not want to track the async vma as
+ * part of the obj->resv->excl_fence as it only affects
+ * execution and not content or object's backing store lifetime.
+ */
+ GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
+ i915_active_set_exclusive(&vma->active, &work->base.dma);
+ work->base.dma.error = 0; /* enable the queue_work() */
+
+ if (vma->obj)
+ __i915_gem_object_pin_pages(vma->obj);
+ } else {
+ GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
+ ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+ }
+
+ atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -350,18 +412,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
int err;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
-
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
+ if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+ GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
- ptr = vma->iomap;
+ ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
@@ -371,7 +431,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err;
}
- vma->iomap = ptr;
+ if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
+ io_mapping_unmap(ptr);
+ ptr = vma->iomap;
+ }
}
__i915_vma_pin(vma);
@@ -391,18 +454,12 @@ err:
void i915_vma_flush_writes(struct i915_vma *vma)
{
- if (!i915_vma_has_ggtt_write(vma))
- return;
-
- intel_gt_flush_ggtt_writes(vma->vm->gt);
-
- i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
}
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_flush_writes(vma);
@@ -438,6 +495,9 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
if (!drm_mm_node_allocated(&vma->node))
return false;
+ if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
+ return true;
+
if (vma->node.size < size)
return true;
@@ -472,17 +532,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
- vma->flags |= I915_VMA_CAN_FENCE;
+ set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-static bool color_differs(struct drm_mm_node *node, unsigned long color)
-{
- return node->allocated && node->color != color;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
{
struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
@@ -494,7 +549,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/
- if (vma->vm->mm.color_adjust == NULL)
+ if (!i915_vm_has_cache_coloring(vma->vm))
return true;
/* Only valid to be called on an already inserted vma */
@@ -502,11 +557,13 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(other))
return false;
other = list_next_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(node))
return false;
return true;
@@ -541,13 +598,12 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = vma->vm->i915;
- unsigned int cache_level;
+ unsigned long color;
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
@@ -567,7 +623,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
@@ -583,35 +639,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return -ENOSPC;
}
- if (vma->obj) {
- ret = i915_gem_object_pin_pages(vma->obj);
- if (ret)
- return ret;
-
- cache_level = vma->obj->cache_level;
- } else {
- cache_level = 0;
- }
-
- GEM_BUG_ON(vma->pages);
-
- ret = vma->ops->set_pages(vma);
- if (ret)
- goto err_unpin;
+ color = 0;
+ if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
+ color = vma->obj->cache_level;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
- range_overflows(offset, size, end)) {
- ret = -EINVAL;
- goto err_clear;
- }
+ range_overflows(offset, size, end))
+ return -EINVAL;
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, cache_level,
+ size, offset, color,
flags);
if (ret)
- goto err_clear;
+ return ret;
} else {
/*
* We only support huge gtt pages through the 48b PPGTT,
@@ -647,116 +689,259 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, cache_level,
+ size, alignment, color,
start, end, flags);
if (ret)
- goto err_clear;
+ return ret;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
-
- mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
if (vma->obj) {
- atomic_inc(&vma->obj->bind_count);
- assert_bind_count(vma->obj);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ atomic_inc(&obj->bind_count);
+ assert_bind_count(obj);
}
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
-
-err_clear:
- vma->ops->clear_pages(vma);
-err_unpin:
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
- return ret;
}
static void
-i915_vma_remove(struct i915_vma *vma)
+i915_vma_detach(struct i915_vma *vma)
{
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
-
- vma->ops->clear_pages(vma);
-
- mutex_lock(&vma->vm->mutex);
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
/*
- * Since the unbound list is global, only move to that list if
- * no more VMAs exist.
+ * And finally now the object is completely decoupled from this
+ * vma, we can drop its hold on the backing storage and allow
+ * it to be reaped by the shrinker.
*/
+ list_del(&vma->vm_link);
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
+ assert_bind_count(obj);
atomic_dec(&obj->bind_count);
+ }
+}
- /*
- * And finally now the object is completely decoupled from this
- * vma, we can drop its hold on the backing storage and allow
- * it to be reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- assert_bind_count(obj);
+static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
+{
+ unsigned int bound;
+ bool pinned = true;
+
+ bound = atomic_read(&vma->flags);
+ do {
+ if (unlikely(flags & ~bound))
+ return false;
+
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
+ return false;
+
+ if (!(bound & I915_VMA_PIN_MASK))
+ goto unpinned;
+
+ GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+
+ return true;
+
+unpinned:
+ /*
+ * If pin_count==0, but we are bound, check under the lock to avoid
+ * racing with a concurrent i915_vma_unbind().
+ */
+ mutex_lock(&vma->vm->mutex);
+ do {
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
+ pinned = false;
+ break;
+ }
+
+ if (unlikely(flags & ~bound)) {
+ pinned = false;
+ break;
+ }
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+ mutex_unlock(&vma->vm->mutex);
+
+ return pinned;
+}
+
+static int vma_get_pages(struct i915_vma *vma)
+{
+ int err = 0;
+
+ if (atomic_add_unless(&vma->pages_count, 1, 0))
+ return 0;
+
+ /* Allocations ahoy! */
+ if (mutex_lock_interruptible(&vma->pages_mutex))
+ return -EINTR;
+
+ if (!atomic_read(&vma->pages_count)) {
+ if (vma->obj) {
+ err = i915_gem_object_pin_pages(vma->obj);
+ if (err)
+ goto unlock;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err) {
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ goto unlock;
+ }
}
+ atomic_inc(&vma->pages_count);
+
+unlock:
+ mutex_unlock(&vma->pages_mutex);
+
+ return err;
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
+static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
- const unsigned int bound = vma->flags;
- int ret;
+ /* We allocate under vma_get_pages, so beware the shrinker */
+ mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
+ GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
+ if (atomic_sub_return(count, &vma->pages_count) == 0) {
+ vma->ops->clear_pages(vma);
+ GEM_BUG_ON(vma->pages);
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ }
+ mutex_unlock(&vma->pages_mutex);
+}
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+static void vma_put_pages(struct i915_vma *vma)
+{
+ if (atomic_add_unless(&vma->pages_count, -1, 1))
+ return;
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err_unpin;
+ __vma_put_pages(vma, 1);
+}
+
+static void vma_unbind_pages(struct i915_vma *vma)
+{
+ unsigned int count;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ /* The upper portion of pages_count is the number of bindings */
+ count = atomic_read(&vma->pages_count);
+ count >>= I915_VMA_PAGES_BIAS;
+ GEM_BUG_ON(!count);
+
+ __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
+}
+
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct i915_vma_work *work = NULL;
+ unsigned int bound;
+ int err;
+
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ GEM_BUG_ON(flags & PIN_UPDATE);
+ GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
+
+ /* First try and grab the pin without rebinding the vma */
+ if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+ return 0;
+
+ err = vma_get_pages(vma);
+ if (err)
+ return err;
+
+ if (flags & vma->vm->bind_async_flags) {
+ work = i915_vma_work();
+ if (!work) {
+ err = -ENOMEM;
+ goto err_pages;
+ }
}
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err_unpin;
+ /* No more allocations allowed once we hold vm->mutex */
+ err = mutex_lock_interruptible(&vma->vm->mutex);
+ if (err)
+ goto err_fence;
+
+ bound = atomic_read(&vma->flags);
+ if (unlikely(bound & I915_VMA_ERROR)) {
+ err = -ENOMEM;
+ goto err_unlock;
}
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
- if (ret)
- goto err_remove;
+ if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
+ err = -EAGAIN; /* pins are meant to be fairly temporary */
+ goto err_unlock;
+ }
- GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+ if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
+ __i915_vma_pin(vma);
+ goto err_unlock;
+ }
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unlock;
+
+ if (!(bound & I915_VMA_BIND_MASK)) {
+ err = i915_vma_insert(vma, size, alignment, flags);
+ if (err)
+ goto err_active;
+
+ if (i915_is_ggtt(vma->vm))
+ __i915_vma_set_map_and_fenceable(vma);
+ }
+
+ GEM_BUG_ON(!vma->pages);
+ err = i915_vma_bind(vma,
+ vma->obj ? vma->obj->cache_level : 0,
+ flags, work);
+ if (err)
+ goto err_remove;
+
+ /* There should only be at most 2 active bindings (user, global) */
+ GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
+ atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ __i915_vma_pin(vma);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
err_remove:
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- i915_vma_remove(vma);
- GEM_BUG_ON(vma->pages);
- GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
+ i915_vma_detach(vma);
+ drm_mm_remove_node(&vma->node);
}
-err_unpin:
- __i915_vma_unpin(vma);
- return ret;
+err_active:
+ i915_active_release(&vma->active);
+err_unlock:
+ mutex_unlock(&vma->vm->mutex);
+err_fence:
+ if (work)
+ dma_fence_work_commit(&work->base);
+err_pages:
+ vma_put_pages(vma);
+ return err;
}
void i915_vma_close(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
+ struct intel_gt *gt = vma->vm->gt;
unsigned long flags;
GEM_BUG_ON(i915_vma_is_closed(vma));
@@ -773,79 +958,87 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- spin_lock_irqsave(&i915->gt.closed_lock, flags);
- list_add(&vma->closed_link, &i915->gt.closed_vma);
- spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
+ spin_lock_irqsave(&gt->closed_lock, flags);
+ list_add(&vma->closed_link, &gt->closed_vma);
+ spin_unlock_irqrestore(&gt->closed_lock, flags);
}
static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
- if (!i915_vma_is_closed(vma))
- return;
+ struct intel_gt *gt = vma->vm->gt;
- spin_lock_irq(&i915->gt.closed_lock);
+ spin_lock_irq(&gt->closed_lock);
list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_reopen(struct i915_vma *vma)
{
- __i915_vma_remove_closed(vma);
+ if (i915_vma_is_closed(vma))
+ __i915_vma_remove_closed(vma);
}
-static void __i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_destroy(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(vma->fence);
-
- mutex_lock(&vma->vm->mutex);
- list_del(&vma->vm_link);
- mutex_unlock(&vma->vm->mutex);
+ if (drm_mm_node_allocated(&vma->node)) {
+ mutex_lock(&vma->vm->mutex);
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+ }
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+ rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
}
- i915_active_fini(&vma->active);
+ __i915_vma_remove_closed(vma);
+ i915_vm_put(vma->vm);
+ i915_active_fini(&vma->active);
i915_vma_free(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_parked(struct intel_gt *gt)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct i915_vma *vma, *next;
- GEM_BUG_ON(i915_vma_is_pinned(vma));
+ spin_lock_irq(&gt->closed_lock);
+ list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- __i915_vma_remove_closed(vma);
+ /* XXX All to avoid keeping a reference on i915_vma itself */
- WARN_ON(i915_vma_unbind(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
- __i915_vma_destroy(vma);
-}
+ if (!i915_vm_tryopen(vm)) {
+ i915_gem_object_put(obj);
+ obj = NULL;
+ }
-void i915_vma_parked(struct drm_i915_private *i915)
-{
- struct i915_vma *vma, *next;
+ spin_unlock_irq(&gt->closed_lock);
- spin_lock_irq(&i915->gt.closed_lock);
- list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
- list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ if (obj) {
+ i915_vma_destroy(vma);
+ i915_gem_object_put(obj);
+ }
- i915_vma_destroy(vma);
+ i915_vm_close(vm);
- spin_lock_irq(&i915->gt.closed_lock);
+ /* Restart after dropping lock */
+ spin_lock_irq(&gt->closed_lock);
+ next = list_first_entry(&gt->closed_vma,
+ typeof(*next), closed_link);
}
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -883,6 +1076,20 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
+int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
+{
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ /* Wait for the vma to be bound before we start! */
+ err = i915_request_await_active(rq, &vma->active);
+ if (err)
+ return err;
+
+ return i915_active_add_request(&vma->active, rq);
+}
+
int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
@@ -890,27 +1097,15 @@ int i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
int err;
- assert_vma_held(vma);
assert_object_held(obj);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- err = i915_active_ref(&vma->active, rq->timeline, rq);
+ err = __i915_vma_move_to_active(vma, rq);
if (unlikely(err))
return err;
if (flags & EXEC_OBJECT_WRITE) {
if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
- i915_active_ref(&obj->frontbuffer->write,
- rq->timeline,
- rq);
+ i915_active_add_request(&obj->frontbuffer->write, rq);
dma_resv_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
@@ -930,44 +1125,31 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-int i915_vma_unbind(struct i915_vma *vma)
+int __i915_vma_unbind(struct i915_vma *vma)
{
int ret;
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vma->vm->mutex);
/*
* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
+ *
+ * XXX Actually waiting under the vm->mutex is a hinderance and
+ * should be pipelined wherever possible. In cases where that is
+ * unavoidable, we should lift the wait to before the mutex.
*/
- might_sleep();
- if (i915_vma_is_active(vma)) {
- /*
- * When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- *
- * Even more scary is that the retire callback may free
- * the object (last active vma). To prevent the explosion
- * we defer the actual object free to a worker that can
- * only proceed once it acquires the struct_mutex (which
- * we currently hold, therefore it cannot free this object
- * before we are finished).
- */
- __i915_vma_pin(vma);
- ret = i915_active_wait(&vma->active);
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
- }
- GEM_BUG_ON(i915_vma_is_active(vma));
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EBUSY;
}
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (!drm_mm_node_allocated(&vma->node))
return 0;
@@ -982,34 +1164,47 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
- mutex_lock(&vma->vm->mutex);
ret = i915_vma_revoke_fence(vma);
- mutex_unlock(&vma->vm->mutex);
if (ret)
return ret;
/* Force a pagefault for domain tracking on next user access */
- mutex_lock(&vma->vm->mutex);
i915_vma_revoke_mmap(vma);
- mutex_unlock(&vma->vm->mutex);
__i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- if (likely(!vma->vm->closed)) {
+ if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma);
}
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
- i915_vma_remove(vma);
+ i915_vma_detach(vma);
+ vma_unbind_pages(vma);
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
return 0;
}
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ err = mutex_lock_interruptible(&vm->mutex);
+ if (err)
+ return err;
+
+ err = __i915_vma_unbind(vma);
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{
i915_gem_object_make_unshrinkable(vma->obj);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 889fc7cb910a..465932813bc5 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -72,7 +72,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma.
*/
atomic_t open_count;
- unsigned long flags;
+ atomic_t flags;
/**
* How many users have pinned this object in GTT space.
*
@@ -96,22 +96,41 @@ struct i915_vma {
* exclusive cachelines of a single page, so a maximum of 64 possible
* users.
*/
-#define I915_VMA_PIN_MASK 0xff
-#define I915_VMA_PIN_OVERFLOW BIT(8)
+#define I915_VMA_PIN_MASK 0x3ff
+#define I915_VMA_OVERFLOW 0x200
/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(9)
-#define I915_VMA_LOCAL_BIND BIT(10)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_GLOBAL_BIND_BIT 10
+#define I915_VMA_LOCAL_BIND_BIT 11
-#define I915_VMA_GGTT BIT(11)
-#define I915_VMA_CAN_FENCE BIT(12)
-#define I915_VMA_USERFAULT_BIT 13
-#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE BIT(14)
+#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
+#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
+
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
+
+#define I915_VMA_ALLOC_BIT 12
+#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
+
+#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
+
+#define I915_VMA_GGTT_BIT 14
+#define I915_VMA_CAN_FENCE_BIT 15
+#define I915_VMA_USERFAULT_BIT 16
+#define I915_VMA_GGTT_WRITE_BIT 17
+
+#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
+#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
+#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
+#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
struct i915_active active;
+#define I915_VMA_PAGES_BIAS 24
+#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
+ atomic_t pages_count; /* number of active binds to the pages */
+ struct mutex pages_mutex; /* protect acquire/release of backing pages */
+
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
@@ -158,52 +177,57 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
return !i915_active_is_idle(&vma->active);
}
+int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq);
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags);
+#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
+
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT;
+ return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT_WRITE;
+ return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- vma->flags |= I915_VMA_GGTT_WRITE;
+ set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
-static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
{
- vma->flags &= ~I915_VMA_GGTT_WRITE;
+ return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
+ __i915_vma_flags(vma));
}
void i915_vma_flush_writes(struct i915_vma *vma);
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_CAN_FENCE;
+ return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
- return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
@@ -214,7 +238,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start);
@@ -293,13 +317,18 @@ i915_vma_compare(struct i915_vma *vma,
return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+struct i915_vma_work *i915_vma_work(void);
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work);
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
@@ -318,30 +347,12 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
dma_resv_unlock(vma->resv);
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
- }
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_PIN_MASK;
+ return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
}
static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -351,18 +362,18 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma)
{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+ atomic_inc(&vma->flags);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
}
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- vma->flags--;
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ atomic_dec(&vma->flags);
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
@@ -370,7 +381,13 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
static inline bool i915_vma_is_bound(const struct i915_vma *vma,
unsigned int where)
{
- return vma->flags & where;
+ return atomic_read(&vma->flags) & where;
+}
+
+static inline bool i915_node_color_differs(const struct drm_mm_node *node,
+ unsigned long color)
+{
+ return drm_mm_node_allocated(node) && node->color != color;
}
/**
@@ -382,8 +399,6 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
- * Callers must hold the struct_mutex.
- *
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
@@ -395,8 +410,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ * This function is only valid to be called on a VMA previously
+ * iomapped by the caller with i915_vma_pin_iomap().
*/
void i915_vma_unpin_iomap(struct i915_vma *vma);
@@ -424,6 +439,8 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+int __i915_vma_pin_fence(struct i915_vma *vma);
+
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
@@ -441,12 +458,11 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence)
__i915_vma_unpin_fence(vma);
}
-void i915_vma_parked(struct drm_i915_private *i915);
+void i915_vma_parked(struct intel_gt *gt);
#define for_each_until(cond) if (cond) break; else
@@ -470,4 +486,10 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
+static inline int i915_vma_sync(struct i915_vma *vma)
+{
+ /* Wait for the asynchronous bindings and pending GPU reads */
+ return i915_active_wait(&vma->active);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 546577e39b4e..09870a31b4f0 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -44,8 +44,8 @@
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
-#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin"
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9)
#define ICL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_CSR_PATH);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d0ed44d33484..a5b571364cf6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -93,9 +93,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
hweight8(sseu->slice_mask), sseu->slice_mask);
drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
}
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
@@ -118,10 +118,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
- int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
- int slice_stride = sseu->max_subslices * subslice_stride;
+ int slice_stride = sseu->max_subslices * sseu->eu_stride;
- return slice * slice_stride + subslice * subslice_stride;
+ return slice * slice_stride + subslice * sseu->eu_stride;
}
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
@@ -130,7 +129,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
@@ -143,7 +142,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
@@ -160,9 +159,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
}
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
+ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
@@ -183,44 +182,80 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
return total;
}
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u8 s_en, u32 ss_en, u16 eu_en)
+{
+ int s, ss;
+
+ /* ss_en represents entire subslice mask across all slices */
+ GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
+ sizeof(ss_en) * BITS_PER_BYTE);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ if ((s_en & BIT(s)) == 0)
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ intel_sseu_set_subslices(sseu, s, ss_en);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, s, ss))
+ sseu_set_eus(sseu, s, ss, eu_en);
+ }
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ u8 s_en;
+ u32 dss_en;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ int eu;
+
+ /*
+ * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+ * Instead of splitting these, provide userspace with an array
+ * of DSS to more closely represent the hardware resource.
+ */
+ intel_sseu_set_info(sseu, 1, 6, 16);
+
+ s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
+
+ dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
+
+ /* one bit per pair of EUs */
+ eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+
+ /* TGL only supports slice-level power gating */
+ sseu->has_slice_pg = 1;
+}
+
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u8 s_en;
- u32 ss_en, ss_en_mask;
+ u32 ss_en;
u8 eu_en;
- int s;
- if (IS_ELKHARTLAKE(dev_priv)) {
- sseu->max_slices = 1;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
- } else {
- sseu->max_slices = 1;
- sseu->max_subslices = 8;
- sseu->max_eus_per_subslice = 8;
- }
+ if (IS_ELKHARTLAKE(dev_priv))
+ intel_sseu_set_info(sseu, 1, 4, 8);
+ else
+ intel_sseu_set_info(sseu, 1, 8, 8);
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
- ss_en_mask = BIT(sseu->max_subslices) - 1;
eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
- for (s = 0; s < sseu->max_slices; s++) {
- if (s_en & BIT(s)) {
- int ss_idx = sseu->max_subslices * s;
- int ss;
-
- sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- if (sseu->subslice_mask[s] & BIT(ss))
- sseu_set_eus(sseu, s, ss, eu_en);
- }
- }
- }
- sseu->eu_per_subslice = hweight8(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
+ gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -236,23 +271,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
const int eu_mask = 0xff;
u32 subslice_mask, eu_en;
+ intel_sseu_set_info(sseu, 6, 4, 8);
+
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
GEN10_F2_S_ENA_SHIFT;
- sseu->max_slices = 6;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
-
- subslice_mask = (1 << 4) - 1;
- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
-
- /*
- * Slice0 can have up to 3 subslices, but there are only 2 in
- * slice1/2.
- */
- sseu->subslice_mask[0] = subslice_mask;
- for (s = 1; s < sseu->max_slices; s++)
- sseu->subslice_mask[s] = subslice_mask & 0x3;
/* Slice0 */
eu_en = ~I915_READ(GEN8_EU_DISABLE0);
@@ -277,14 +299,25 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
eu_en = ~I915_READ(GEN10_EU_DISABLE3);
sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
- /* Do a second pass where we mark the subslices disabled if all their
- * eus are off.
- */
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
for (s = 0; s < sseu->max_slices; s++) {
+ u32 subslice_mask_with_eus = subslice_mask;
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
if (sseu_get_eus(sseu, s, ss) == 0)
- sseu->subslice_mask[s] &= ~BIT(ss);
+ subslice_mask_with_eus &= ~BIT(ss);
}
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ intel_sseu_set_subslices(sseu, s, s == 0 ?
+ subslice_mask_with_eus :
+ subslice_mask_with_eus & 0x3);
}
sseu->eu_total = compute_eu_total(sseu);
@@ -310,13 +343,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
+ u8 subslice_mask = 0;
fuse = I915_READ(CHV_FUSE_GT);
sseu->slice_mask = BIT(0);
- sseu->max_slices = 1;
- sseu->max_subslices = 2;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 1, 2, 8);
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
u8 disabled_mask =
@@ -325,7 +357,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(0);
+ subslice_mask |= BIT(0);
sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
@@ -336,10 +368,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(1);
+ subslice_mask |= BIT(1);
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
+ intel_sseu_set_subslices(sseu, 0, subslice_mask);
+
sseu->eu_total = compute_eu_total(sseu);
/*
@@ -372,9 +406,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
/* BXT has a single slice and at most 3 subslices. */
- sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
- sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
+ IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -393,14 +426,14 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
int eu_per_ss;
u8 eu_disabled_mask;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -473,9 +506,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- sseu->max_slices = 3;
- sseu->max_subslices = 3;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 3, 3, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -502,13 +533,13 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -552,6 +583,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
+ u8 subslice_mask = 0;
int s, ss;
/*
@@ -564,22 +596,18 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0);
+ subslice_mask = BIT(0);
break;
case 2:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
case 3:
sseu->slice_mask = BIT(0) | BIT(1);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
- sseu->subslice_mask[1] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
}
- sseu->max_slices = hweight8(sseu->slice_mask);
- sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
-
fuse1 = I915_READ(HSW_PAVP_FUSE1);
switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
default:
@@ -596,9 +624,14 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->eu_per_subslice = 6;
break;
}
- sseu->max_eus_per_subslice = sseu->eu_per_subslice;
+
+ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+ hweight8(subslice_mask),
+ sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
(1UL << sseu->eu_per_subslice) - 1);
@@ -900,12 +933,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_sprites[pipe] = 1;
}
- if (i915_modparams.disable_display) {
- DRM_INFO("Display disabled (module parameter)\n");
- info->num_pipes = 0;
- } else if (HAS_DISPLAY(dev_priv) &&
- (IS_GEN_RANGE(dev_priv, 7, 8)) &&
- HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
+ HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -923,14 +952,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
(HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
- info->num_pipes = 0;
+ info->pipe_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
DRM_INFO("PipeC fused off\n");
- info->num_pipes -= 1;
+ info->pipe_mask &= ~BIT(PIPE_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 enabled_mask = BIT(info->num_pipes) - 1;
+ u8 enabled_mask = info->pipe_mask;
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
enabled_mask &= ~BIT(PIPE_A);
@@ -951,7 +980,20 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
enabled_mask);
else
- info->num_pipes = hweight8(enabled_mask);
+ info->pipe_mask = enabled_mask;
+
+ if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
+ info->display.has_hdcp = 0;
+
+ if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
+ info->display.has_fbc = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ info->display.has_csr = 0;
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
+ info->display.has_dsc = 0;
}
/* Initialize slice/subslice/EU info */
@@ -965,8 +1007,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
gen9_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 10))
gen10_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (IS_GEN(dev_priv, 11))
gen11_sseu_info_init(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 12)
+ gen12_sseu_info_init(dev_priv);
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
@@ -1010,8 +1054,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEN11_GT_VEBOX_DISABLE_SHIFT;
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ if (!HAS_ENGINE(dev_priv, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
@@ -1032,8 +1078,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ if (!HAS_ENGINE(dev_priv, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 92e0c2e0954c..4bdf8a6cfb47 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -107,6 +107,7 @@ enum intel_ppgtt_type {
func(is_mobile); \
func(is_lp); \
func(require_force_probe); \
+ func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
@@ -135,8 +136,11 @@ enum intel_ppgtt_type {
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
+ func(has_dsb); \
+ func(has_dsc); \
func(has_fbc); \
func(has_gmch); \
+ func(has_hdcp); \
func(has_hotplug); \
func(has_ipc); \
func(has_modular_fia); \
@@ -159,9 +163,11 @@ struct intel_device_info {
unsigned int page_sizes; /* page sizes supported by the HW */
+ u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
- u8 num_pipes;
+ u8 pipe_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
new file mode 100644
index 000000000000..baaeaecc64af
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_drv.h"
+
+/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
+#define REGION_MAP(type, inst) \
+ BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
+
+const u32 intel_region_map[] = {
+ [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
+ [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
+ [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
+};
+
+static u64
+intel_memory_region_free_pages(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ struct i915_buddy_block *block, *on;
+ u64 size = 0;
+
+ list_for_each_entry_safe(block, on, blocks, link) {
+ size += i915_buddy_block_size(&mem->mm, block);
+ i915_buddy_free(&mem->mm, block);
+ }
+ INIT_LIST_HEAD(blocks);
+
+ return size;
+}
+
+void
+__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ mutex_lock(&mem->mm_lock);
+ intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+}
+
+void
+__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
+{
+ struct list_head blocks;
+
+ INIT_LIST_HEAD(&blocks);
+ list_add(&block->link, &blocks);
+ __intel_memory_region_put_pages_buddy(block->private, &blocks);
+}
+
+int
+__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks)
+{
+ unsigned int min_order = 0;
+ unsigned long n_pages;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
+ GEM_BUG_ON(!list_empty(blocks));
+
+ if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
+ min_order = ilog2(mem->min_page_size) -
+ ilog2(mem->mm.chunk_size);
+ }
+
+ if (flags & I915_ALLOC_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
+ }
+
+ n_pages = size >> ilog2(mem->mm.chunk_size);
+
+ mutex_lock(&mem->mm_lock);
+
+ do {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ order = fls(n_pages) - 1;
+ GEM_BUG_ON(order > mem->mm.max_order);
+ GEM_BUG_ON(order < min_order);
+
+ do {
+ block = i915_buddy_alloc(&mem->mm, order);
+ if (!IS_ERR(block))
+ break;
+
+ if (order-- == min_order)
+ goto err_free_blocks;
+ } while (1);
+
+ n_pages -= BIT(order);
+
+ block->private = mem;
+ list_add(&block->link, blocks);
+
+ if (!n_pages)
+ break;
+ } while (1);
+
+ mutex_unlock(&mem->mm_lock);
+ return 0;
+
+err_free_blocks:
+ intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+ return -ENXIO;
+}
+
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct i915_buddy_block *block;
+ LIST_HEAD(blocks);
+ int ret;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
+ if (ret)
+ return ERR_PTR(ret);
+
+ block = list_first_entry(&blocks, typeof(*block), link);
+ list_del_init(&block->link);
+ return block;
+}
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem)
+{
+ return i915_buddy_init(&mem->mm, resource_size(&mem->region),
+ PAGE_SIZE);
+}
+
+void intel_memory_region_release_buddy(struct intel_memory_region *mem)
+{
+ i915_buddy_fini(&mem->mm);
+}
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops)
+{
+ struct intel_memory_region *mem;
+ int err;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ mem->i915 = i915;
+ mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+ mem->io_start = io_start;
+ mem->min_page_size = min_page_size;
+ mem->ops = ops;
+
+ mutex_init(&mem->objects.lock);
+ INIT_LIST_HEAD(&mem->objects.list);
+ INIT_LIST_HEAD(&mem->objects.purgeable);
+
+ mutex_init(&mem->mm_lock);
+
+ if (ops->init) {
+ err = ops->init(mem);
+ if (err)
+ goto err_free;
+ }
+
+ kref_init(&mem->kref);
+ return mem;
+
+err_free:
+ kfree(mem);
+ return ERR_PTR(err);
+}
+
+static void __intel_memory_region_destroy(struct kref *kref)
+{
+ struct intel_memory_region *mem =
+ container_of(kref, typeof(*mem), kref);
+
+ if (mem->ops->release)
+ mem->ops->release(mem);
+
+ mutex_destroy(&mem->mm_lock);
+ mutex_destroy(&mem->objects.lock);
+ kfree(mem);
+}
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem)
+{
+ kref_get(&mem->kref);
+ return mem;
+}
+
+void intel_memory_region_put(struct intel_memory_region *mem)
+{
+ kref_put(&mem->kref, __intel_memory_region_destroy);
+}
+
+/* Global memory region registration -- only slight layer inversions! */
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *mem = ERR_PTR(-ENODEV);
+ u32 type;
+
+ if (!HAS_REGION(i915, BIT(i)))
+ continue;
+
+ type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ mem = i915_gem_shmem_setup(i915);
+ break;
+ case INTEL_MEMORY_STOLEN:
+ mem = i915_gem_stolen_setup(i915);
+ break;
+ case INTEL_MEMORY_LOCAL:
+ mem = intel_setup_fake_lmem(i915);
+ break;
+ }
+
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+ goto out_cleanup;
+ }
+
+ mem->id = intel_region_map[i];
+ mem->type = type;
+ mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+
+ i915->mm.regions[i] = mem;
+ }
+
+ return 0;
+
+out_cleanup:
+ intel_memory_regions_driver_release(i915);
+ return err;
+}
+
+void intel_memory_regions_driver_release(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *region =
+ fetch_and_zero(&i915->mm.regions[i]);
+
+ if (region)
+ intel_memory_region_put(region);
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_memory_region.c"
+#include "selftests/mock_region.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
new file mode 100644
index 000000000000..238722009677
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_MEMORY_REGION_H__
+#define __INTEL_MEMORY_REGION_H__
+
+#include <linux/kref.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/io-mapping.h>
+#include <drm/drm_mm.h>
+
+#include "i915_buddy.h"
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+struct sg_table;
+
+/**
+ * Base memory type
+ */
+enum intel_memory_type {
+ INTEL_MEMORY_SYSTEM = 0,
+ INTEL_MEMORY_LOCAL,
+ INTEL_MEMORY_STOLEN,
+};
+
+enum intel_region_id {
+ INTEL_REGION_SMEM = 0,
+ INTEL_REGION_LMEM,
+ INTEL_REGION_STOLEN,
+ INTEL_REGION_UNKNOWN, /* Should be last */
+};
+
+#define REGION_SMEM BIT(INTEL_REGION_SMEM)
+#define REGION_LMEM BIT(INTEL_REGION_LMEM)
+#define REGION_STOLEN BIT(INTEL_REGION_STOLEN)
+
+#define INTEL_MEMORY_TYPE_SHIFT 16
+
+#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT))
+#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff))
+
+#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
+#define I915_ALLOC_CONTIGUOUS BIT(1)
+
+/**
+ * Memory regions encoded as type | instance
+ */
+extern const u32 intel_region_map[];
+
+struct intel_memory_region_ops {
+ unsigned int flags;
+
+ int (*init)(struct intel_memory_region *mem);
+ void (*release)(struct intel_memory_region *mem);
+
+ struct drm_i915_gem_object *
+ (*create_object)(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+};
+
+struct intel_memory_region {
+ struct drm_i915_private *i915;
+
+ const struct intel_memory_region_ops *ops;
+
+ struct io_mapping iomap;
+ struct resource region;
+
+ /* For fake LMEM */
+ struct drm_mm_node fake_mappable;
+
+ struct i915_buddy_mm mm;
+ struct mutex mm_lock;
+
+ struct kref kref;
+
+ resource_size_t io_start;
+ resource_size_t min_page_size;
+
+ unsigned int type;
+ unsigned int instance;
+ unsigned int id;
+
+ dma_addr_t remap_addr;
+
+ struct {
+ struct mutex lock; /* Protects access to objects */
+ struct list_head list;
+ struct list_head purgeable;
+ } objects;
+};
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem);
+void intel_memory_region_release_buddy(struct intel_memory_region *mem);
+
+int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks);
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks);
+void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops);
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem);
+void intel_memory_region_put(struct intel_memory_region *mem);
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
+void intel_memory_regions_driver_release(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 15f8bff141f9..8fd92b9130a7 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -52,7 +52,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
@@ -74,12 +75,16 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* Comet Lake V PCH is based on KBP, which is SPT compatible */
+ return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
@@ -87,6 +92,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
WARN_ON(!IS_TIGERLAKE(dev_priv));
return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Jasper Lake PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_JSP;
default:
return PCH_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index c29c81ec7971..d26c25dd8d54 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -23,6 +23,7 @@ enum intel_pch {
PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
+ PCH_JSP, /* Jasper Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
PCH_TGP, /* Tiger Lake PCH */
};
@@ -42,16 +43,19 @@ enum intel_pch {
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 75ee027abb80..809bff955b5a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,7 +25,6 @@
*
*/
-#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -38,6 +37,8 @@
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
+#include "gt/intel_llc.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
@@ -45,26 +46,6 @@
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
-/**
- * DOC: RC6
- *
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage. This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -126,6 +107,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
+
+ /*
+ * Lower the display internal timeout.
+ * This is needed to avoid any hard hangs when DSI port PLL
+ * is off and a MMIO access is attempted by any privilege
+ * application, using batch buffers or any other means.
+ */
+ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -216,8 +205,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
break;
}
- dev_priv->ips.r_t = dev_priv->mem_freq;
-
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
@@ -246,14 +233,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq = 0;
break;
}
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->ips.c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->ips.c_m = 1;
- } else {
- dev_priv->ips.c_m = 2;
- }
}
static const struct cxsr_latency cxsr_latency_table[] = {
@@ -1137,10 +1116,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- if (plane->id == PLANE_CURSOR)
- width = plane_state->base.crtc_w;
- else
- width = drm_rect_width(&plane_state->base.dst);
+ width = drm_rect_width(&plane_state->base.dst);
if (plane->id == PLANE_CURSOR) {
wm = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1327,8 +1303,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
const struct g4x_pipe_wm *raw;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1490,7 +1466,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->cxsr = true;
wm->hpll_en = true;
@@ -1509,10 +1485,10 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->fbc_en)
wm->fbc_en = false;
- num_active_crtcs++;
+ num_active_pipes++;
}
- if (num_active_crtcs != 1) {
+ if (num_active_pipes != 1) {
wm->cxsr = false;
wm->hpll_en = false;
wm->fbc_en = false;
@@ -1659,7 +1635,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight32(active_planes);
+ int num_active_planes = hweight8(active_planes);
const int fifo_size = 511;
int fifo_extra, fifo_left = fifo_size;
int sprite0_fifo_extra = 0;
@@ -1848,8 +1824,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1909,7 +1885,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -2098,7 +2074,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->level = dev_priv->wm.max_level;
wm->cxsr = true;
@@ -2112,14 +2088,14 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->cxsr)
wm->cxsr = false;
- num_active_crtcs++;
+ num_active_pipes++;
wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
}
- if (num_active_crtcs != 1)
+ if (num_active_pipes != 1)
wm->cxsr = false;
- if (num_active_crtcs > 1)
+ if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2569,7 +2545,8 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
return ilk_wm_method2(crtc_state->pixel_rate,
crtc_state->base.adjusted_mode.crtc_htotal,
- plane_state->base.crtc_w, cpp, mem_value);
+ drm_rect_width(&plane_state->base.dst),
+ cpp, mem_value);
}
/* Only for WM_LP. */
@@ -2648,7 +2625,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
+ fifo_size /= INTEL_NUM_PIPES(dev_priv);
/*
* For some reason the non self refresh
@@ -3109,8 +3086,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane *plane;
- const struct drm_plane_state *plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
const struct intel_plane_state *pristate = NULL;
const struct intel_plane_state *sprstate = NULL;
const struct intel_plane_state *curstate = NULL;
@@ -3119,15 +3096,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
pipe_wm = &crtc_state->wm.ilk.optimal;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) {
- const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
-
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- pristate = ps;
- else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = ps;
- else if (plane->type == DRM_PLANE_TYPE_CURSOR)
- curstate = ps;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ curstate = plane_state;
}
pipe_wm->pipe_enabled = crtc_state->base.active;
@@ -3654,10 +3629,47 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
+ /* HACK! */
+ if (IS_GEN(dev_priv, 12))
+ return false;
+
return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
+static void
+skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (!ret) {
+ dev_priv->sagv_block_time_us = val;
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n");
+ } else if (IS_GEN(dev_priv, 11)) {
+ dev_priv->sagv_block_time_us = 10;
+ return;
+ } else if (IS_GEN(dev_priv, 10)) {
+ dev_priv->sagv_block_time_us = 20;
+ return;
+ } else if (IS_GEN(dev_priv, 9)) {
+ dev_priv->sagv_block_time_us = 30;
+ return;
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ }
+
+ /* Default to an unusable block time */
+ dev_priv->sagv_block_time_us = -1;
+}
+
/*
* SAGV dynamically adjusts the system agent voltage and clock frequencies
* depending on power and performance requirements. The display engine access
@@ -3746,33 +3758,25 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
struct intel_crtc_state *crtc_state;
enum pipe pipe;
int level, latency;
- int sagv_block_time_us;
if (!intel_has_sagv(dev_priv))
return false;
- if (IS_GEN(dev_priv, 9))
- sagv_block_time_us = 30;
- else if (IS_GEN(dev_priv, 10))
- sagv_block_time_us = 20;
- else
- sagv_block_time_us = 10;
-
/*
* If there are no active CRTCs, no additional checks need be performed
*/
- if (hweight32(state->active_crtcs) == 0)
+ if (hweight8(state->active_pipes) == 0)
return true;
/*
* SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*/
- if (hweight32(state->active_crtcs) > 1)
+ if (hweight8(state->active_pipes) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(state->active_crtcs) - 1;
+ pipe = ffs(state->active_pipes) - 1;
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
@@ -3804,7 +3808,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
* incur memory latencies higher than sagv_block_time_us we
* can't enable SAGV.
*/
- if (latency < sagv_block_time_us)
+ if (latency < dev_priv->sagv_block_time_us)
return false;
}
@@ -3867,14 +3871,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
if (WARN_ON(!state) || !crtc_state->base.active) {
alloc->start = 0;
alloc->end = 0;
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
return;
}
if (intel_state->active_pipe_changes)
- *num_active = hweight32(intel_state->active_crtcs);
+ *num_active = hweight8(intel_state->active_pipes);
else
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
*num_active, ddb);
@@ -4005,7 +4009,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (is_planar_yuv_format(fourcc))
+ if (fourcc &&
+ drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4063,7 +4068,6 @@ static uint_fixed_16_16_t
skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4071,27 +4075,17 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
- /* n.b., src is 16.16 fixed point, dst is whole integer */
- if (plane->id == PLANE_CURSOR) {
- /*
- * Cursors only support 0/180 degree rotation,
- * hence no need to account for rotation here.
- */
- src_w = plane_state->base.src_w >> 16;
- src_h = plane_state->base.src_h >> 16;
- dst_w = plane_state->base.crtc_w;
- dst_h = plane_state->base.crtc_h;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
- dst_h = drm_rect_height(&plane_state->base.dst);
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ *
+ * n.b., src is 16.16 fixed point, dst is whole integer.
+ */
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = div_fixed16(src_h, dst_h);
@@ -4101,117 +4095,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
return mul_fixed16(downscale_w, downscale_h);
}
-static uint_fixed_16_16_t
-skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
-{
- uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
-
- if (!crtc_state->base.enable)
- return pipe_downscale;
-
- if (crtc_state->pch_pfit.enabled) {
- u32 src_w, src_h, dst_w, dst_h;
- u32 pfit_size = crtc_state->pch_pfit.size;
- uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
- uint_fixed_16_16_t downscale_h, downscale_w;
-
- src_w = crtc_state->pipe_src_w;
- src_h = crtc_state->pipe_src_h;
- dst_w = pfit_size >> 16;
- dst_h = pfit_size & 0xffff;
-
- if (!dst_w || !dst_h)
- return pipe_downscale;
-
- fp_w_ratio = div_fixed16(src_w, dst_w);
- fp_h_ratio = div_fixed16(src_h, dst_h);
- downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
- downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
-
- pipe_downscale = mul_fixed16(downscale_w, downscale_h);
- }
-
- return pipe_downscale;
-}
-
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
- int crtc_clock, dotclk;
- u32 pipe_max_pixel_rate;
- uint_fixed_16_16_t pipe_downscale;
- uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
-
- if (!crtc_state->base.enable)
- return 0;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- uint_fixed_16_16_t plane_downscale;
- uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
- int bpp;
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- continue;
-
- if (WARN_ON(!plane_state->base.fb))
- return -EINVAL;
-
- plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
- bpp = plane_state->base.fb->format->cpp[0] * 8;
- if (bpp == 64)
- plane_downscale = mul_fixed16(plane_downscale,
- fp_9_div_8);
-
- max_downscale = max_fixed16(plane_downscale, max_downscale);
- }
- pipe_downscale = skl_pipe_downscale_amount(crtc_state);
-
- pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
-
- crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- dotclk *= 2;
-
- pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
-
- if (pipe_max_pixel_rate < crtc_clock) {
- DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
- const int plane)
+ int color_plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
u32 data_rate;
u32 width = 0, height = 0;
- struct drm_framebuffer *fb;
- u32 format;
uint_fixed_16_16_t down_scale_amount;
u64 rate;
if (!plane_state->base.visible)
return 0;
- fb = plane_state->base.fb;
- format = fb->format->format;
-
- if (intel_plane->id == PLANE_CURSOR)
+ if (plane->id == PLANE_CURSOR)
return 0;
- if (plane == 1 && !is_planar_yuv_format(format))
+
+ if (color_plane == 1 &&
+ !drm_format_info_is_yuv_semiplanar(fb->format))
return 0;
/*
@@ -4223,7 +4126,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height = drm_rect_height(&plane_state->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
- if (plane == 1 && is_planar_yuv_format(format)) {
+ if (color_plane == 1) {
width /= 2;
height /= 2;
}
@@ -4234,7 +4137,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
- rate *= fb->format->cpp[plane];
+ rate *= fb->format->cpp[color_plane];
return rate;
}
@@ -4244,18 +4147,16 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *uv_plane_data_rate)
{
struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
/* packed/y */
@@ -4276,21 +4177,19 @@ static u64
icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate)
{
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!crtc_state->base.state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
- if (!plane_state->linked_plane) {
+ if (!plane_state->planar_linked_plane) {
rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
@@ -4299,17 +4198,17 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
/*
* The slave plane might not iterate in
- * drm_atomic_crtc_state_for_each_plane_state(),
+ * intel_atomic_crtc_state_for_each_plane_state(),
* and needs the master plane state which may be
* NULL if we try get_new_plane_state(), so we
* always calculate from the master.
*/
- if (plane_state->slave)
+ if (plane_state->planar_slave)
continue;
/* Y plane rate is calculated on the slave */
rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
- y_plane_id = plane_state->linked_plane->id;
+ y_plane_id = plane_state->planar_linked_plane->id;
plane_data_rate[y_plane_id] = rate;
total_data_rate += rate;
@@ -4639,7 +4538,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 interm_pbpl;
/* only planar format has two planes */
- if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
+ if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) {
DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
@@ -4651,7 +4550,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = is_planar_yuv_format(format->format);
+ wp->is_planar = drm_format_info_is_yuv_semiplanar(format);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
@@ -4723,20 +4622,15 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct skl_wm_params *wp, int color_plane)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
const struct drm_framebuffer *fb = plane_state->base.fb;
int width;
- if (plane->id == PLANE_CURSOR) {
- width = plane_state->base.crtc_w;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->base.src) >> 16;
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->base.src) >> 16;
return skl_compute_wm_params(crtc_state, width,
fb->format, fb->modifier,
@@ -5048,12 +4942,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
int ret;
/* Watermarks calculated in master */
- if (plane_state->slave)
+ if (plane_state->planar_slave)
return 0;
- if (plane_state->linked_plane) {
+ if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id y_plane_id = plane_state->linked_plane->id;
+ enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
WARN_ON(!fb->format->is_yuv ||
@@ -5082,8 +4976,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
int ret;
/*
@@ -5092,10 +4986,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
*/
memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state,
- &crtc_state->base) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
+ crtc_state) {
if (INTEL_GEN(dev_priv) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
@@ -5255,19 +5147,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static u32
-pipes_modified(struct intel_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- u32 i, ret = 0;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- ret |= drm_crtc_mask(&crtc->base);
-
- return ret;
-}
-
static int
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
@@ -5443,36 +5322,27 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
+static int intel_add_all_pipes(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- u32 realloc_pipes = pipes_modified(state);
- int ret, i;
- /*
- * When we distrust bios wm we always need to recompute to set the
- * expected DDB allocations for each CRTC.
- */
- if (dev_priv->wm.distrust_bios_wm)
- (*changed) = true;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
- /*
- * If this transaction isn't actually touching any CRTC's, don't
- * bother with watermark calculation. Note that if we pass this
- * test, we're guaranteed to hold at least one CRTC state mutex,
- * which means we can safely use values like dev_priv->active_crtcs
- * since any racing commits that want to update them would need to
- * hold _all_ CRTC state mutexes.
- */
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- (*changed) = true;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
- if (!*changed)
- return 0;
+ return 0;
+}
+
+static int
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
/*
* If this is our first atomic update following hardware readout,
@@ -5481,7 +5351,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* ensure a full DDB recompute.
*/
if (dev_priv->wm.distrust_bios_wm) {
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
state->base.acquire_ctx);
if (ret)
return ret;
@@ -5489,13 +5359,13 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
state->active_pipe_changes = ~0;
/*
- * We usually only initialize state->active_crtcs if we
+ * We usually only initialize state->active_pipes if we
* we're doing a modeset; make sure this field is always
* initialized during the sanitization process that happens
* on the first commit too.
*/
if (!state->modeset)
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
}
/*
@@ -5512,18 +5382,11 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- realloc_pipes = ~0;
state->wm_results.dirty_pipes = ~0;
- }
- /*
- * We're not recomputing for the pipes not included in the commit, so
- * make sure we start with the current state.
- */
- for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ ret = intel_add_all_pipes(state);
+ if (ret)
+ return ret;
}
return 0;
@@ -5596,14 +5459,13 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
- bool changed = false;
int ret, i;
/* Clear all dirty flags */
results->dirty_pipes = 0;
- ret = skl_ddb_add_affected_pipes(state, &changed);
- if (ret || !changed)
+ ret = skl_ddb_add_affected_pipes(state);
+ if (ret)
return ret;
/*
@@ -5625,7 +5487,7 @@ skl_compute_wm(struct intel_atomic_state *state)
if (!skl_pipe_wm_equals(crtc,
&old_crtc_state->wm.skl.optimal,
&new_crtc_state->wm.skl.optimal))
- results->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ results->dirty_pipes |= BIT(crtc->pipe);
}
ret = skl_compute_ddb(state);
@@ -5645,7 +5507,7 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
- if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
+ if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
@@ -5654,12 +5516,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
static void skl_initial_wm(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_values *results = &state->wm_results;
- if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
+ if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
@@ -5808,10 +5669,10 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
if (crtc->active)
- hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ hw->dirty_pipes |= BIT(crtc->pipe);
}
- if (dev_priv->active_crtcs) {
+ if (dev_priv->active_pipes) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
}
@@ -6389,2378 +6250,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
intel_enable_ipc(dev_priv);
}
-/*
- * Lock protecting IPS related data structures
- */
-DEFINE_SPINLOCK(mchdev_lock);
-
-bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- lockdep_assert_held(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- intel_uncore_posting_read16(uncore, MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 rgvmodectl;
- u8 fmax, fmin, fstart, vstart;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
-
- /* Enable temp reporting */
- intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- intel_uncore_write(uncore, RCUPEI, 100000);
- intel_uncore_write(uncore, RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- intel_uncore_write(uncore, RCBMAXAVG, 90000);
- intel_uncore_write(uncore, RCBMINAVG, 80000);
-
- intel_uncore_write(uncore, MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
- PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
-
- dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
- dev_priv->ips.fstart = fstart;
-
- dev_priv->ips.max_delay = fstart;
- dev_priv->ips.min_delay = fmin;
- dev_priv->ips.cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- intel_uncore_write(uncore,
- MEMINTREN,
- MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- intel_uncore_write(uncore, VIDSTART, vstart);
- intel_uncore_posting_read(uncore, VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
-
- if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
- MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- mdelay(1);
-
- ironlake_set_drps(dev_priv, fstart);
-
- dev_priv->ips.last_count1 =
- intel_uncore_read(uncore, DMIEC) +
- intel_uncore_read(uncore, DDREC) +
- intel_uncore_read(uncore, CSIEC);
- dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
- dev_priv->ips.last_time2 = ktime_get_raw_ns();
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-static void ironlake_disable_drps(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- intel_uncore_write(uncore,
- MEMINTREN,
- intel_uncore_read(uncore, MEMINTREN) &
- ~MEMINT_EVAL_CHG_EN);
- intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- intel_uncore_write(uncore,
- DEIER,
- intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
- intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
- intel_uncore_write(uncore,
- DEIMR,
- intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(i915, i915->ips.fstart);
- mdelay(1);
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
- mdelay(1);
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-/* There's a funny hw issue where the hw returns all 0 when reading from
- * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
- * ourselves, instead of doing a rmw cycle (which might result in us clearing
- * all limits and the gpu stuck at whatever frequency it is at atm).
- */
-static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 limits;
-
- /* Only set the down limit when we've reached the lowest level to avoid
- * getting more interrupts, otherwise leave this clear. This prevents a
- * race in the hw when coming out of rc6: There's a tiny window where
- * the hw runs at the minimal clock before selecting the desired
- * frequency, if the down threshold expires in that window we will not
- * receive a down interrupt. */
- if (INTEL_GEN(dev_priv) >= 9) {
- limits = (rps->max_freq_softlimit) << 23;
- if (val <= rps->min_freq_softlimit)
- limits |= (rps->min_freq_softlimit) << 14;
- } else {
- limits = rps->max_freq_softlimit << 24;
- if (val <= rps->min_freq_softlimit)
- limits |= rps->min_freq_softlimit << 16;
- }
-
- return limits;
-}
-
-static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 threshold_up = 0, threshold_down = 0; /* in % */
- u32 ei_up = 0, ei_down = 0;
-
- lockdep_assert_held(&rps->power.mutex);
-
- if (new_power == rps->power.mode)
- return;
-
- /* Note the units here are not exactly 1us, but 1280ns. */
- switch (new_power) {
- case LOW_POWER:
- /* Upclock if more than 95% busy over 16ms */
- ei_up = 16000;
- threshold_up = 95;
-
- /* Downclock if less than 85% busy over 32ms */
- ei_down = 32000;
- threshold_down = 85;
- break;
-
- case BETWEEN:
- /* Upclock if more than 90% busy over 13ms */
- ei_up = 13000;
- threshold_up = 90;
-
- /* Downclock if less than 75% busy over 32ms */
- ei_down = 32000;
- threshold_down = 75;
- break;
-
- case HIGH_POWER:
- /* Upclock if more than 85% busy over 10ms */
- ei_up = 10000;
- threshold_up = 85;
-
- /* Downclock if less than 60% busy over 32ms */
- ei_down = 32000;
- threshold_down = 60;
- break;
- }
-
- /* When byt can survive without system hang with dynamic
- * sw freq adjustments, this restriction can be lifted.
- */
- if (IS_VALLEYVIEW(dev_priv))
- goto skip_hw_write;
-
- I915_WRITE(GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_up));
- I915_WRITE(GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_up * threshold_up / 100));
-
- I915_WRITE(GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_down));
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_down * threshold_down / 100));
-
- I915_WRITE(GEN6_RP_CONTROL,
- (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
-skip_hw_write:
- rps->power.mode = new_power;
- rps->power.up_threshold = threshold_up;
- rps->power.down_threshold = threshold_down;
-}
-
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int new_power;
-
- new_power = rps->power.mode;
- switch (rps->power.mode) {
- case LOW_POWER:
- if (val > rps->efficient_freq + 1 &&
- val > rps->cur_freq)
- new_power = BETWEEN;
- break;
-
- case BETWEEN:
- if (val <= rps->efficient_freq &&
- val < rps->cur_freq)
- new_power = LOW_POWER;
- else if (val >= rps->rp0_freq &&
- val > rps->cur_freq)
- new_power = HIGH_POWER;
- break;
-
- case HIGH_POWER:
- if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
- val < rps->cur_freq)
- new_power = BETWEEN;
- break;
- }
- /* Max/min bins are special */
- if (val <= rps->min_freq_softlimit)
- new_power = LOW_POWER;
- if (val >= rps->max_freq_softlimit)
- new_power = HIGH_POWER;
-
- mutex_lock(&rps->power.mutex);
- if (rps->power.interactive)
- new_power = HIGH_POWER;
- rps_set_power(dev_priv, new_power);
- mutex_unlock(&rps->power.mutex);
-}
-
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
-{
- struct intel_rps *rps = &i915->gt_pm.rps;
-
- if (INTEL_GEN(i915) < 6)
- return;
-
- mutex_lock(&rps->power.mutex);
- if (interactive) {
- if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
- rps_set_power(i915, HIGH_POWER);
- } else {
- GEM_BUG_ON(!rps->power.interactive);
- rps->power.interactive--;
- }
- mutex_unlock(&rps->power.mutex);
-}
-
-static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 mask = 0;
-
- /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
- if (val > rps->min_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
- if (val < rps->max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
-
- mask &= dev_priv->pm_rps_events;
-
- return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
-}
-
-/* gen6_set_rps is called to update the frequency request, but should also be
- * called when the range (min_delay and max_delay) is modified so that we can
- * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* min/max delay may still have been modified so be sure to
- * write the limits value.
- */
- if (val != rps->cur_freq) {
- gen6_set_rps_thresholds(dev_priv, val);
-
- if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN6_RPNSWREQ,
- GEN9_FREQUENCY(val));
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(val));
- else
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(val) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- }
-
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- rps->cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- int err;
-
- if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
- "Odd GPU freq value\n"))
- val &= ~1;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- if (val != dev_priv->gt_pm.rps.cur_freq) {
- vlv_punit_get(dev_priv);
- err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- vlv_punit_put(dev_priv);
- if (err)
- return err;
-
- gen6_set_rps_thresholds(dev_priv, val);
- }
-
- dev_priv->gt_pm.rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
-*/
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val = rps->idle_freq;
- int err;
-
- if (rps->cur_freq <= val)
- return;
-
- /* The punit delays the write of the frequency and voltage until it
- * determines the GPU is awake. During normal usage we don't want to
- * waste power changing the frequency if the GPU is sleeping (rc6).
- * However, the GPU and driver is now idle and we do not want to delay
- * switching to minimum voltage (reducing power whilst idle) as we do
- * not expect to be woken in the near future and so must flush the
- * change by waking the device.
- *
- * We choose to take the media powerwell (either would do to trick the
- * punit into committing the voltage change) as that takes a lot less
- * power than the render powerwell.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
- err = valleyview_set_rps(dev_priv, val);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
-
- if (err)
- DRM_ERROR("Failed to set RPS for idle\n");
-}
-
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- u8 freq;
-
- if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
- gen6_rps_reset_ei(dev_priv);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, rps->cur_freq));
-
- gen6_enable_rps_interrupts(dev_priv);
-
- /* Use the user's desired frequency as a guide, but for better
- * performance, jump directly to RPe as our starting frequency.
- */
- freq = max(rps->cur_freq,
- rps->efficient_freq);
-
- if (intel_set_rps(dev_priv,
- clamp(freq,
- rps->min_freq_softlimit,
- rps->max_freq_softlimit)))
- DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* Flush our bottom-half so that it does not race with us
- * setting the idle frequency and so that it is bounded by
- * our rpm wakeref. And then disable the interrupts to stop any
- * futher RPS reclocking whilst we are asleep.
- */
- gen6_disable_rps_interrupts(dev_priv);
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_set_rps_idle(dev_priv);
- else
- gen6_set_rps(dev_priv, rps->idle_freq);
- rps->last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_sanitize_rps_pm_mask(dev_priv, ~0));
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_boost(struct i915_request *rq)
-{
- struct intel_rps *rps = &rq->i915->gt_pm.rps;
- unsigned long flags;
- bool boost;
-
- /* This is intentionally racy! We peek at the state here, then
- * validate inside the RPS worker.
- */
- if (!rps->enabled)
- return;
-
- if (i915_request_signaled(rq))
- return;
-
- /* Serializes with i915_request_retire() */
- boost = false;
- spin_lock_irqsave(&rq->lock, flags);
- if (!i915_request_has_waitboost(rq) &&
- !dma_fence_is_signaled_locked(&rq->fence)) {
- boost = !atomic_fetch_inc(&rps->num_waiters);
- rq->flags |= I915_REQUEST_WAITBOOST;
- }
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!boost)
- return;
-
- if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
- schedule_work(&rps->work);
-
- atomic_inc(&rps->boosts);
-}
-
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int err;
-
- lockdep_assert_held(&rps->lock);
- GEM_BUG_ON(val > rps->max_freq);
- GEM_BUG_ON(val < rps->min_freq);
-
- if (!rps->enabled) {
- rps->cur_freq = val;
- return 0;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = valleyview_set_rps(dev_priv, val);
- else
- err = gen6_set_rps(dev_priv, val);
-
- return err;
-}
-
-static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN9_PG_ENABLE, 0);
-}
-
-static void gen9_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- /* We're doing forcewake before Disabling RC6,
- * This what the BIOS expects when going into suspend */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
-{
- bool enable_rc6 = true;
- unsigned long rc6_ctx_base;
- u32 rc_ctl;
- int rc_sw_target;
-
- rc_ctl = I915_READ(GEN6_RC_CONTROL);
- rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
- RC_SW_TARGET_STATE_SHIFT;
- DRM_DEBUG_DRIVER("BIOS enabled RC states: "
- "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
- onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
- onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
- rc_sw_target);
-
- if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
- enable_rc6 = false;
- }
-
- /*
- * The exact context size is not known for BXT, so assume a page size
- * for this check.
- */
- rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
- (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
- DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
- enable_rc6 = false;
- }
-
- if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
- DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
- !I915_READ(GEN8_PUSHBUS_ENABLE) ||
- !I915_READ(GEN8_PUSHBUS_SHIFT)) {
- DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN6_GFXPAUSE)) {
- DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_MISC_CTRL0)) {
- DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
- enable_rc6 = false;
- }
-
- return enable_rc6;
-}
-
-static bool sanitize_rc6(struct drm_i915_private *i915)
-{
- struct intel_device_info *info = mkwrite_device_info(i915);
-
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(i915)) {
- info->has_rc6 = 0;
- info->has_rps = false;
- }
-
- if (info->has_rc6 &&
- IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
- DRM_INFO("RC6 disabled by BIOS\n");
- info->has_rc6 = 0;
- }
-
- /*
- * We assume that we do not have any deep rc6 levels if we don't have
- * have the previous rc6 level supported, i.e. we use HAS_RC6()
- * as the initial coarse check for rc6 in general, moving on to
- * progressively finer/deeper levels.
- */
- if (!info->has_rc6 && info->has_rc6p)
- info->has_rc6p = 0;
-
- return info->has_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* All of these values are in units of 50MHz */
-
- /* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_GEN9_LP(dev_priv)) {
- u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 0) & 0xff;
- } else {
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 16) & 0xff;
- }
- /* hw_max = RP0 until we check for overclocking */
- rps->max_freq = rps->rp0_freq;
-
- rps->efficient_freq = rps->rp1_freq;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- u32 ddcc_status = 0;
-
- if (sandybridge_pcode_read(dev_priv,
- HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
- &ddcc_status, NULL) == 0)
- rps->efficient_freq =
- clamp_t(u8,
- ((ddcc_status >> 8) & 0xff),
- rps->min_freq,
- rps->max_freq);
- }
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Store the frequency values in 16.66 MHZ units, which is
- * the natural hardware unit for SKL
- */
- rps->rp0_freq *= GEN9_FREQ_SCALER;
- rps->rp1_freq *= GEN9_FREQ_SCALER;
- rps->min_freq *= GEN9_FREQ_SCALER;
- rps->max_freq *= GEN9_FREQ_SCALER;
- rps->efficient_freq *= GEN9_FREQ_SCALER;
- }
-}
-
-static void reset_rps(struct drm_i915_private *dev_priv,
- int (*set)(struct drm_i915_private *, u8))
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u8 freq = rps->cur_freq;
-
- /* force a reset */
- rps->power.mode = -1;
- rps->cur_freq = -1;
-
- if (set(dev_priv, freq))
- DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
-/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_i915_private *dev_priv)
-{
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Program defaults and thresholds for RPS */
- if (IS_GEN(dev_priv, 9))
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
-
- /* 1 second timeout*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
- GT_INTERVAL_FROM_US(dev_priv, 1000000));
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
-
- /* Leaning on the below call to gen6_set_rps to program/setup the
- * Up/Down EI & threshold registers, as well as the RP_CONTROL,
- * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /*
- * 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GT_UC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from plane_state is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_EI_MODE(1));
-
- /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6_mode;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- if (INTEL_GEN(dev_priv) >= 10) {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
- } else if (IS_SKYLAKE(dev_priv)) {
- /*
- * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
- * when CPG is enabled
- */
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
- } else {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
- }
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GT_UC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from plane_state is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
- /* WaRsUseTimeoutMode:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- else
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode);
-
- /*
- * 3b: Enable Coarse Power Gating only when RC6 is enabled.
- * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
- */
- if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- I915_WRITE(GEN9_PG_ENABLE, 0);
- else
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
-
- /* 3: Enable RC6 */
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_RC6_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1 Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
-
- /* Docs recommend 900MHz, and 300 MHz respectively */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- rps->max_freq_softlimit << 24 |
- rps->min_freq_softlimit << 16);
-
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
- I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
- I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6vids, rc6_mask;
- u32 gtfifodbg;
- int ret;
-
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
- else
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- /* We don't use those on Haswell */
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- if (HAS_RC6p(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
- if (HAS_RC6pp(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- rc6vids = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
- if (IS_GEN(dev_priv, 6) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
- rc6vids &= 0xffff00;
- rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
- if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Power down if completely idle for over 50ms */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const int min_freq = 15;
- const int scaling_factor = 180;
- unsigned int gpu_freq;
- unsigned int max_ia_freq, min_ring_freq;
- unsigned int max_gpu_freq, min_gpu_freq;
- struct cpufreq_policy *policy;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->max_freq <= rps->min_freq)
- return;
-
- policy = cpufreq_cpu_get(0);
- if (policy) {
- max_ia_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- } else {
- /*
- * Default to measured freq if none found, PCU will ensure we
- * don't go over
- */
- max_ia_freq = tsc_khz;
- }
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- min_ring_freq = I915_READ(DCLK) & 0xf;
- /* convert DDR frequency from units of 266.6MHz to bandwidth */
- min_ring_freq = mult_frac(min_ring_freq, 8, 3);
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
- const int diff = max_gpu_freq - gpu_freq;
- unsigned int ia_freq = 0, ring_freq = 0;
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /*
- * ring_freq = 2 * GT. ring_freq is in 100MHz units
- * No floor required for ring frequency on SKL.
- */
- ring_freq = gpu_freq;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- /* max(2 * GT, DDR). NB: GT is 50MHz units */
- ring_freq = max(min_ring_freq, gpu_freq);
- } else if (IS_HASWELL(dev_priv)) {
- ring_freq = mult_frac(gpu_freq, 5, 4);
- ring_freq = max(min_ring_freq, ring_freq);
- /* leave ia_freq as the default, chosen by cpufreq */
- } else {
- /* On older processors, there is no separate ring
- * clock domain, so in order to boost the bandwidth
- * of the ring, we need to upclock the CPU (ia_freq).
- *
- * For GPU frequencies less than 750MHz,
- * just use the lowest ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
- }
-
- sandybridge_pcode_write(dev_priv,
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
- ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
- ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
- gpu_freq);
- }
-}
-
-static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
-
- switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
- case 8:
- /* (2 * 4) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
- break;
- case 12:
- /* (2 * 6) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
- break;
- case 16:
- /* (2 * 8) config */
- default:
- /* Setting (2 * 8) Min RP0 for any other combination */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
- break;
- }
-
- rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
-
- return rp0;
-}
-
-static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
- rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
-
- return rpe;
-}
-
-static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
-
- return rp1;
-}
-
-static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpn;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
- rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
- FB_GFX_FREQ_FUSE_MASK);
-
- return rpn;
-}
-
-static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
-
- return rp1;
-}
-
-static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
- /* Clamp to max */
- rp0 = min_t(u32, rp0, 0xea);
-
- return rp0;
-}
-
-static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
- rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
- rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
-
- return rpe;
-}
-
-static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
- /*
- * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
- * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
- * a BYT-M B0 the above register contains 0xbf. Moreover when setting
- * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
- * to make sure it matches what Punit accepts.
- */
- return max_t(u32, val, 0xc0);
-}
-
-/* Check that the pctx buffer wasn't move under us. */
-static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON(pctx_addr != dev_priv->dsm.start +
- dev_priv->vlv_pctx->stolen->start);
-}
-
-
-/* Check that the pcbr address is not empty. */
-static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
-}
-
-static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- resource_size_t pctx_paddr, paddr;
- resource_size_t pctx_size = 32*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = dev_priv->dsm.end + 1 - pctx_size;
- GEM_BUG_ON(paddr > U32_MAX);
-
- pctx_paddr = (paddr & (~4095));
- I915_WRITE(VLV_PCBR, pctx_paddr);
- }
-
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
-}
-
-static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
- resource_size_t pctx_paddr;
- resource_size_t pctx_size = 24*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if (pcbr) {
- /* BIOS set it up already, grab the pre-alloc'd space */
- resource_size_t pcbr_offset;
-
- pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
- pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- pcbr_offset,
- I915_GTT_OFFSET_NONE,
- pctx_size);
- goto out;
- }
-
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-
- /*
- * From the Gunit register HAS:
- * The Gfx driver is expected to program this register and ensure
- * proper allocation within Gfx stolen memory. For example, this
- * register should be programmed such than the PCBR range does not
- * overlap with other ranges, such as the frame buffer, protected
- * memory, or any other relevant ranges.
- */
- pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
- if (!pctx) {
- DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
- goto out;
- }
-
- GEM_BUG_ON(range_overflows_t(u64,
- dev_priv->dsm.start,
- pctx->stolen->start,
- U32_MAX));
- pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
- I915_WRITE(VLV_PCBR, pctx_paddr);
-
-out:
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
- dev_priv->vlv_pctx = pctx;
-}
-
-static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
-
- pctx = fetch_and_zero(&dev_priv->vlv_pctx);
- if (pctx)
- i915_gem_object_put(pctx);
-}
-
-static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.gpll_ref_freq =
- vlv_get_cck_clock(dev_priv, "GPLL ref",
- CCK_GPLL_CLOCK_CONTROL,
- dev_priv->czclk_freq);
-
- DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
- dev_priv->gt_pm.rps.gpll_ref_freq);
-}
-
-static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- valleyview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- dev_priv->mem_freq = 800;
- break;
- case 2:
- dev_priv->mem_freq = 1066;
- break;
- case 3:
- dev_priv->mem_freq = 1333;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = valleyview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = valleyview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-}
-
-static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- cherryview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- dev_priv->mem_freq = 2000;
- break;
- default:
- dev_priv->mem_freq = 1600;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = cherryview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = cherryview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
- rps->min_freq) & 1,
- "Odd GPU freq values\n");
-}
-
-static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- valleyview_cleanup_pctx(dev_priv);
-}
-
-static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode, pcbr;
-
- gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
- GT_FIFO_FREE_ENTRIES_CHV);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- cherryview_check_pctx(dev_priv);
-
- /* 1a & 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2a: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- /* For now we assume BIOS is allocating and populating the PCBR */
- pcbr = I915_READ(VLV_PCBR);
-
- /* 3: Enable RC6 */
- rc6_mode = 0;
- if (pcbr >> VLV_PCBR_ADDR_SHIFT)
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1: Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- /* Setting Fixed Bias */
- vlv_punit_get(dev_priv);
-
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg;
-
- valleyview_check_pctx(dev_priv);
-
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- vlv_punit_get(dev_priv);
-
- /* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
-
- lockdep_assert_held(&mchdev_lock);
-
- diff1 = now - dev_priv->ips.last_time1;
-
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
- */
- if (diff1 <= 10)
- return dev_priv->ips.chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->ips.last_count1) {
- diff = ~0UL - dev_priv->ips.last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->ips.last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->ips.c_m &&
- cparams[i].t == dev_priv->ips.r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
- }
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->ips.last_count1 = total_count;
- dev_priv->ips.last_time1 = now;
-
- dev_priv->ips.chipset_power = ret;
-
- return ret;
-}
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_chipset_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *i915)
-{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = intel_uncore_read(&i915->uncore, TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = intel_uncore_read8(&i915->uncore, TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static int _pxvid_to_vd(u8 pxvid)
-{
- if (pxvid == 0)
- return 0;
-
- if (pxvid >= 8 && pxvid < 31)
- pxvid = 31;
-
- return (pxvid + 2) * 125;
-}
-
-static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- const int vd = _pxvid_to_vd(pxvid);
- const int vm = vd - 1125;
-
- if (INTEL_INFO(dev_priv)->is_mobile)
- return vm > 0 ? vm : 0;
-
- return vd;
-}
-
-static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- u64 now, diff, diffms;
- u32 count;
-
- lockdep_assert_held(&mchdev_lock);
-
- now = ktime_get_raw_ns();
- diffms = now - dev_priv->ips.last_time2;
- do_div(diffms, NSEC_PER_MSEC);
-
- /* Don't divide by 0 */
- if (!diffms)
- return;
-
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->ips.last_count2) {
- diff = ~0UL - dev_priv->ips.last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->ips.last_count2;
- }
-
- dev_priv->ips.last_count2 = count;
- dev_priv->ips.last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->ips.gfx_power = diff;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- if (!IS_GEN(dev_priv, 5))
- return;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- __i915_update_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-}
-
-static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- lockdep_assert_held(&mchdev_lock);
-
- pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->ips.corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- __i915_update_gfx_val(dev_priv);
-
- return dev_priv->ips.gfx_power + state2;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-static struct drm_i915_private __rcu *i915_mch_dev;
-
-static struct drm_i915_private *mchdev_get(void)
-{
- struct drm_i915_private *i915;
-
- rcu_read_lock();
- i915 = rcu_dereference(i915_mch_dev);
- if (!kref_get_unless_zero(&i915->drm.ref))
- i915 = NULL;
- rcu_read_unlock();
-
- return i915;
-}
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *i915;
- unsigned long chipset_val = 0;
- unsigned long graphics_val = 0;
- intel_wakeref_t wakeref;
-
- i915 = mchdev_get();
- if (!i915)
- return 0;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- chipset_val = __i915_chipset_val(i915);
- graphics_val = __i915_gfx_val(i915);
- spin_unlock_irq(&mchdev_lock);
- }
-
- drm_dev_put(&i915->drm);
- return chipset_val + graphics_val;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay > i915->ips.fmax)
- i915->ips.max_delay--;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay < i915->ips.min_delay)
- i915->ips.max_delay++;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- ret = i915->gt.awake;
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- i915->ips.max_delay = i915->ips.fstart;
- ret = ironlake_set_drps(i915, i915->ips.fstart);
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
- }
-}
-
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
-{
- /* We only register the i915 ips part with intel-ips once everything is
- * set up, to avoid intel-ips sneaking in and reading bogus values. */
- rcu_assign_pointer(i915_mch_dev, dev_priv);
-
- ips_ping_for_i915_load();
-}
-
-void intel_gpu_ips_teardown(void)
-{
- rcu_assign_pointer(i915_mch_dev, NULL);
-}
-
-static void intel_init_emon(struct drm_i915_private *dev_priv)
-{
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW(i), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW(i), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ(i));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW(i), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL(i), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
- * requirement.
- */
- if (!sanitize_rc6(dev_priv)) {
- DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- pm_runtime_get(&dev_priv->drm.pdev->dev);
- }
-
- /* Initialize RPS limits (for userspace) */
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_init_gt_powersave(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_init_gt_powersave(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_init_rps_frequencies(dev_priv);
-
- /* Derive initial user preferences/limits from the hardware limits */
- rps->max_freq_softlimit = rps->max_freq;
- rps->min_freq_softlimit = rps->min_freq;
-
- /* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN(dev_priv, 6) ||
- IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
- u32 params = 0;
-
- sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
- &params, NULL);
- if (params & BIT(31)) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (rps->max_freq & 0xff) * 50,
- (params & 0xff) * 50);
- rps->max_freq = params & 0xff;
- }
- }
-
- /* Finally allow us to boost to max by default */
- rps->boost_freq = rps->max_freq;
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
-}
-
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv))
- valleyview_cleanup_gt_powersave(dev_priv);
-
- if (!HAS_RC6(dev_priv))
- pm_runtime_put(&dev_priv->drm.pdev->dev);
-}
-
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
- dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
- intel_disable_gt_powersave(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (!i915->gt_pm.llc_pstate.enabled)
- return;
-
- /* Currently there is no HW configuration to be done to disable. */
-
- i915->gt_pm.llc_pstate.enabled = false;
-}
-
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rc6(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = false;
-}
-
-static void intel_disable_rps(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rps.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rps(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rps(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rps(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rps(dev_priv);
- else if (IS_IRONLAKE_M(dev_priv))
- ironlake_disable_drps(dev_priv);
-
- dev_priv->gt_pm.rps.enabled = false;
-}
-
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- intel_disable_rc6(dev_priv);
- intel_disable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_disable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
-static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (i915->gt_pm.llc_pstate.enabled)
- return;
-
- gen6_update_ring_freq(i915);
-
- i915->gt_pm.llc_pstate.enabled = true;
-}
-
-static void intel_enable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_enable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
- gen11_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 9)
- gen9_enable_rc6(dev_priv);
- else if (IS_BROADWELL(dev_priv))
- gen8_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_enable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = true;
-}
-
-static void intel_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_enable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- valleyview_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 9) {
- gen9_enable_rps(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- gen8_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- gen6_enable_rps(dev_priv);
- } else if (IS_IRONLAKE_M(dev_priv)) {
- ironlake_enable_drps(dev_priv);
- intel_init_emon(dev_priv);
- }
-
- WARN_ON(rps->max_freq < rps->min_freq);
- WARN_ON(rps->idle_freq > rps->max_freq);
-
- WARN_ON(rps->efficient_freq < rps->min_freq);
- WARN_ON(rps->efficient_freq > rps->max_freq);
-
- rps->enabled = true;
-}
-
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev_priv))
- return;
-
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- if (HAS_RC6(dev_priv))
- intel_enable_rc6(dev_priv);
- if (HAS_RPS(dev_priv))
- intel_enable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_enable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -8858,7 +6347,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- int pipe;
+ enum pipe pipe;
u32 val;
/*
@@ -9078,6 +6567,22 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
_MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
}
+static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 vd_pg_enable = 0;
+ unsigned int i;
+
+ /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (HAS_ENGINE(dev_priv, _VCS(i)))
+ vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
+ VDN_MFX_POWERGATE_ENABLE(i);
+ }
+
+ I915_WRITE(POWERGATE_ENABLE,
+ I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -9598,7 +7103,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_GEN(dev_priv, 12))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = tgl_init_clock_gating;
else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
@@ -9654,6 +7159,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
else if (IS_GEN(dev_priv, 5))
i915_ironlake_get_mem_freq(dev_priv);
+ if (intel_has_sagv(dev_priv))
+ skl_setup_sagv_block_time(dev_priv);
+
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
@@ -9712,7 +7220,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_GEN(dev_priv, 2)) {
- if (INTEL_INFO(dev_priv)->num_pipes == 1) {
+ if (INTEL_NUM_PIPES(dev_priv) == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -9724,217 +7232,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
}
}
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val - 0xb7
- * Slow = Fast = GPLL ref * N
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
-}
-
-static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
-}
-
-static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val / 2
- * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
-}
-
-static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* CHV needs even values */
- return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
-}
-
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
- GEN9_FREQ_SCALER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_gpu_freq(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_gpu_freq(dev_priv, val);
- else
- return val * GT_FREQUENCY_MULTIPLIER;
-}
-
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
- GT_FREQUENCY_MULTIPLIER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_freq_opcode(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_freq_opcode(dev_priv, val);
- else
- return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
-}
-
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->gt_pm.rps.lock);
- mutex_init(&dev_priv->gt_pm.rps.power.mutex);
-
- atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
-
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
-
-static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- u32 lower, upper, tmp;
- int loop = 2;
-
- /*
- * The register accessed do not need forcewake. We borrow
- * uncore lock to prevent concurrent access to range reg.
- */
- lockdep_assert_held(&dev_priv->uncore.lock);
-
- /*
- * vlv and chv residency counters are 40 bits in width.
- * With a control bit, we can choose between upper or lower
- * 32bit window into this counter.
- *
- * Although we always use the counter in high-range mode elsewhere,
- * userspace may attempt to read the value before rc6 is initialised,
- * before we have set the default VLV_COUNTER_CONTROL value. So always
- * set the high bit to be safe.
- */
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- do {
- tmp = upper;
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
- lower = I915_READ_FW(reg);
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- } while (upper != tmp && --loop);
-
- /*
- * Everywhere else we always use VLV_COUNTER_CONTROL with the
- * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
- * now.
- */
-
- return lower | (u64)upper << 8;
-}
-
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u64 time_hw, prev_hw, overflow_hw;
- unsigned int fw_domains;
- unsigned long flags;
- unsigned int i;
- u32 mul, div;
-
- if (!HAS_RC6(dev_priv))
- return 0;
-
- /*
- * Store previous hw counter values for counter wrap-around handling.
- *
- * There are only four interesting registers and they live next to each
- * other so we can use the relative address, compared to the smallest
- * one as the index into driver storage.
- */
- i = (i915_mmio_reg_offset(reg) -
- i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
- if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
- return 0;
-
- fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
-
- spin_lock_irqsave(&uncore->lock, flags);
- intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
- /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- mul = 1000000;
- div = dev_priv->czclk_freq;
- overflow_hw = BIT_ULL(40);
- time_hw = vlv_residency_raw(dev_priv, reg);
- } else {
- /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
- if (IS_GEN9_LP(dev_priv)) {
- mul = 10000;
- div = 12;
- } else {
- mul = 1280;
- div = 1;
- }
-
- overflow_hw = BIT_ULL(32);
- time_hw = intel_uncore_read_fw(uncore, reg);
- }
-
- /*
- * Counter wrap handling.
- *
- * But relying on a sufficient frequency of queries otherwise counters
- * can still wrap.
- */
- prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
- dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
-
- /* RC6 delta from last sample. */
- if (time_hw >= prev_hw)
- time_hw -= prev_hw;
- else
- time_hw += overflow_hw - prev_hw;
-
- /* Add delta to RC6 extended raw driver copy. */
- time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
- dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
-
- intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irqrestore(&uncore->lock, flags);
-
- return mul_u64_u32_div(time_hw, mul, div);
-}
-
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
-}
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
-{
- u32 cagf;
-
- if (INTEL_GEN(dev_priv) >= 9)
- cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-
- return cagf;
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index e3573e1e16e3..b579c724b915 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -29,16 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
@@ -65,26 +55,9 @@ void skl_write_plane_wm(struct intel_plane *plane,
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
new file mode 100644
index 000000000000..583118095635
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "intel_region_lmem.h"
+
+static int init_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ unsigned long n;
+ int ret;
+
+ /* We want to 1:1 map the mappable aperture to our reserved region */
+
+ mem->fake_mappable.start = 0;
+ mem->fake_mappable.size = resource_size(&mem->region);
+ mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
+
+ ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
+ if (ret)
+ return ret;
+
+ mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
+ mem->region.start,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
+ drm_mm_remove_node(&mem->fake_mappable);
+ return -EINVAL;
+ }
+
+ for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ mem->remap_addr + (n << PAGE_SHIFT),
+ n << PAGE_SHIFT,
+ I915_CACHE_NONE, 0);
+ }
+
+ mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
+ mem->fake_mappable.size);
+
+ return 0;
+}
+
+static void release_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ if (drm_mm_node_allocated(&mem->fake_mappable))
+ drm_mm_remove_node(&mem->fake_mappable);
+
+ dma_unmap_resource(&mem->i915->drm.pdev->dev,
+ mem->remap_addr,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+ release_fake_lmem_bar(mem);
+ io_mapping_fini(&mem->iomap);
+ intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+ int ret;
+
+ if (i915_modparams.fake_lmem_start) {
+ ret = init_fake_lmem_bar(mem);
+ GEM_BUG_ON(ret);
+ }
+
+ if (!io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ resource_size(&mem->region)))
+ return -EIO;
+
+ ret = intel_memory_region_init_buddy(mem);
+ if (ret)
+ io_mapping_fini(&mem->iomap);
+
+ return ret;
+}
+
+const struct intel_memory_region_ops intel_region_lmem_ops = {
+ .init = region_lmem_init,
+ .release = region_lmem_release,
+ .create_object = __i915_gem_lmem_object_create,
+};
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_memory_region *mem;
+ resource_size_t mappable_end;
+ resource_size_t io_start;
+ resource_size_t start;
+
+ GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
+ GEM_BUG_ON(!i915_modparams.fake_lmem_start);
+
+ /* Your mappable aperture belongs to me now! */
+ mappable_end = pci_resource_len(pdev, 2);
+ io_start = pci_resource_start(pdev, 2),
+ start = i915_modparams.fake_lmem_start;
+
+ mem = intel_memory_region_create(i915,
+ start,
+ mappable_end,
+ PAGE_SIZE,
+ io_start,
+ &intel_region_lmem_ops);
+ if (!IS_ERR(mem)) {
+ DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
+ DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
+ (u64)mem->io_start);
+ DRM_INFO("Intel graphics fake LMEM size: %llx\n",
+ (u64)resource_size(&mem->region));
+ }
+
+ return mem;
+}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h
new file mode 100644
index 000000000000..213def7c7b8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_REGION_LMEM_H
+#define __INTEL_REGION_LMEM_H
+
+struct drm_i915_private;
+
+extern const struct intel_memory_region_ops intel_region_lmem_ops;
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915);
+
+#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 2fd3c097e1f5..ad719c9602af 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -27,7 +27,6 @@
*/
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9e583f13a9e4..94a97bf8c021 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -805,9 +805,6 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
-#define GEN11_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
-
#define __gen6_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
@@ -903,12 +900,10 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
})
#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
+ find_fw_domain(uncore, offset)
+
+#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
+ find_fw_domain(uncore, offset)
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
@@ -935,6 +930,20 @@ static const i915_reg_t gen11_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
+static const i915_reg_t gen12_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+ RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
+ RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
+ RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
+ RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
+ RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
+ RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
+ /* TODO: Other registers are not yet used */
+};
+
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@@ -957,6 +966,7 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
+__is_genX_shadowed(12)
static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
@@ -1005,8 +1015,18 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
- __fwd = find_fw_domain(uncore, offset); \
+ const u32 __offset = (offset); \
+ if (!is_gen11_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
+ __fwd; \
+})
+
+#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ const u32 __offset = (offset); \
+ if (!is_gen12_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
__fwd; \
})
@@ -1065,9 +1085,51 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
+ GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
+ GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+};
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen12_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
@@ -1228,6 +1290,7 @@ __gen_read(func, 16) \
__gen_read(func, 32) \
__gen_read(func, 64)
+__gen_reg_read_funcs(gen12_fwtable);
__gen_reg_read_funcs(gen11_fwtable);
__gen_reg_read_funcs(fwtable);
__gen_reg_read_funcs(gen6);
@@ -1319,6 +1382,7 @@ __gen_write(func, 8) \
__gen_write(func, 16) \
__gen_write(func, 32)
+__gen_reg_write_funcs(gen12_fwtable);
__gen_reg_write_funcs(gen11_fwtable);
__gen_reg_write_funcs(fwtable);
__gen_reg_write_funcs(gen8);
@@ -1690,10 +1754,14 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- } else {
+ } else if (IS_GEN(i915, 11)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ } else {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
}
uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 414fc2cb0459..dcfa243892c6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -378,23 +378,23 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
static inline void intel_uncore_rmw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
- val = intel_uncore_read(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write(uncore, reg, val);
+ old = intel_uncore_read(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write(uncore, reg, val);
}
static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
- val = intel_uncore_read_fw(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write_fw(uncore, reg, val);
+ old = intel_uncore_read_fw(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write_fw(uncore, reg, val);
}
static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
new file mode 100644
index 000000000000..a29d93707345
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_tgl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0xD920), 0x00000000 },
+ { _MMIO(0xD900), 0x00000000 },
+ { _MMIO(0xD904), 0xF0800000 },
+ { _MMIO(0xD910), 0x00000000 },
+ { _MMIO(0xD914), 0xF0800000 },
+ { _MMIO(0xDC40), 0x00FF0000 },
+ { _MMIO(0xD940), 0x00000004 },
+ { _MMIO(0xD944), 0x0000FFFF },
+ { _MMIO(0xDC00), 0x00000004 },
+ { _MMIO(0xDC04), 0x0000FFFF },
+ { _MMIO(0xD948), 0x00000003 },
+ { _MMIO(0xD94C), 0x0000FFFF },
+ { _MMIO(0xDC08), 0x00000003 },
+ { _MMIO(0xDC0C), 0x0000FFFF },
+ { _MMIO(0xD950), 0x00000007 },
+ { _MMIO(0xD954), 0x0000FFFF },
+ { _MMIO(0xDC10), 0x00000007 },
+ { _MMIO(0xDC14), 0x0000FFFF },
+ { _MMIO(0xD958), 0x00100002 },
+ { _MMIO(0xD95C), 0x0000FFF7 },
+ { _MMIO(0xDC18), 0x00100002 },
+ { _MMIO(0xDC1C), 0x0000FFF7 },
+ { _MMIO(0xD960), 0x00100002 },
+ { _MMIO(0xD964), 0x0000FFCF },
+ { _MMIO(0xDC20), 0x00100002 },
+ { _MMIO(0xDC24), 0x0000FFCF },
+ { _MMIO(0xD968), 0x00100082 },
+ { _MMIO(0xD96C), 0x0000FFEF },
+ { _MMIO(0xDC28), 0x00100082 },
+ { _MMIO(0xDC2C), 0x0000FFEF },
+ { _MMIO(0xD970), 0x001000C2 },
+ { _MMIO(0xD974), 0x0000FFE7 },
+ { _MMIO(0xDC30), 0x001000C2 },
+ { _MMIO(0xDC34), 0x0000FFE7 },
+ { _MMIO(0xD978), 0x00100001 },
+ { _MMIO(0xD97C), 0x0000FFE7 },
+ { _MMIO(0xDC38), 0x00100001 },
+ { _MMIO(0xDC3C), 0x0000FFE7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x0D04), 0x00000200 },
+ { _MMIO(0x9840), 0x00000000 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x280E0000 },
+ { _MMIO(0x9888), 0x1E0E0147 },
+ { _MMIO(0x9888), 0x180E0000 },
+ { _MMIO(0x9888), 0x160E0000 },
+ { _MMIO(0x9888), 0x1E0F1000 },
+ { _MMIO(0x9888), 0x1E104000 },
+ { _MMIO(0x9888), 0x2E020100 },
+ { _MMIO(0x9888), 0x2C030004 },
+ { _MMIO(0x9888), 0x38003000 },
+ { _MMIO(0x9888), 0x1E0A8000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x49110000 },
+ { _MMIO(0x9888), 0x5D101400 },
+ { _MMIO(0x9888), 0x1D140020 },
+ { _MMIO(0x9888), 0x1D1103A3 },
+ { _MMIO(0x9888), 0x01110000 },
+ { _MMIO(0x9888), 0x61111000 },
+ { _MMIO(0x9888), 0x1F128000 },
+ { _MMIO(0x9888), 0x17100000 },
+ { _MMIO(0x9888), 0x55100630 },
+ { _MMIO(0x9888), 0x57100000 },
+ { _MMIO(0x9888), 0x31100000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x65100002 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x42000001 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
+{
+ strlcpy(dev_priv->perf.test_config.uuid,
+ "80a833f0-2504-4321-8894-e9277844ce7b",
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
+
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
+
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
new file mode 100644
index 000000000000..4c25f0be825c
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_TGL_H__
+#define __I915_OA_TGL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 77d844ac8b71..260b0ee5d1e3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
return NULL;
kref_init(&active->ref);
- i915_active_init(i915, &active->base, __live_active, __live_retire);
+ i915_active_init(&active->base, __live_active, __live_retire);
return active;
}
@@ -79,7 +79,6 @@ __live_active_setup(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
struct i915_sw_fence *submit;
struct live_active *active;
- enum intel_engine_id id;
unsigned int count = 0;
int err = 0;
@@ -97,7 +96,7 @@ __live_active_setup(struct drm_i915_private *i915)
if (err)
goto out;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
rq = i915_request_create(engine->kernel_context);
@@ -110,7 +109,7 @@ __live_active_setup(struct drm_i915_private *i915)
submit,
GFP_KERNEL);
if (err >= 0)
- err = i915_active_ref(&active->base, rq->timeline, rq);
+ err = i915_active_add_request(&active->base, rq);
i915_request_add(rq);
if (err) {
pr_err("Failed to track active ref!\n");
@@ -121,7 +120,7 @@ __live_active_setup(struct drm_i915_private *i915)
}
i915_active_release(&active->base);
- if (active->retired && count) {
+ if (READ_ONCE(active->retired) && count) {
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
@@ -146,35 +145,25 @@ static int live_active_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
struct live_active *active;
- intel_wakeref_t wakeref;
int err = 0;
/* Check that we get a callback when requests retire upon waiting */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
active = __live_active_setup(i915);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
- goto err;
- }
+ if (IS_ERR(active))
+ return PTR_ERR(active);
i915_active_wait(&active->base);
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after waiting!\n");
err = -EINVAL;
}
__live_put(active);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
-err:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
@@ -182,35 +171,25 @@ static int live_active_retire(void *arg)
{
struct drm_i915_private *i915 = arg;
struct live_active *active;
- intel_wakeref_t wakeref;
int err = 0;
/* Check that we get a callback when requests are indirectly retired */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
active = __live_active_setup(i915);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
- goto err;
- }
+ if (IS_ERR(active))
+ return PTR_ERR(active);
/* waits for & retires all requests */
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n");
err = -EINVAL;
}
__live_put(active);
-err:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
@@ -226,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915);
}
+
+static struct intel_engine_cs *node_to_barrier(struct active_node *it)
+{
+ struct intel_engine_cs *engine;
+
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ return engine;
+}
+
+void i915_active_print(struct i915_active *ref, struct drm_printer *m)
+{
+ drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+ drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
+ drm_printf(m, "\tpreallocated barriers? %s\n",
+ yesno(!llist_empty(&ref->preallocated_barriers)));
+
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct intel_engine_cs *engine;
+
+ engine = node_to_barrier(it);
+ if (engine) {
+ drm_printf(m, "\tbarrier: %s\n", engine->name);
+ continue;
+ }
+
+ if (i915_active_fence_isset(&it->base)) {
+ drm_printf(m,
+ "\ttimeline: %llx\n", it->timeline);
+ continue;
+ }
+ }
+
+ i915_active_release(ref);
+ }
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 23f784eae1e7..1b856bae67b5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -375,6 +375,8 @@ retry:
if (err)
break;
+
+ cond_resched();
}
if (err == -ENOMEM)
@@ -687,6 +689,8 @@ static int igt_buddy_alloc_range(void *arg)
rem -= size;
if (!rem)
break;
+
+ cond_resched();
}
if (err == -ENOMEM)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 37593831b539..d83f6bf6d9d4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -15,23 +15,26 @@
#include "igt_flush_test.h"
#include "mock_drm.h"
-static int switch_to_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static int switch_to_context(struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_add(rq);
}
+ i915_gem_context_unlock_engines(ctx);
- return 0;
+ return err;
}
static void trash_stolen(struct drm_i915_private *i915)
@@ -42,6 +45,10 @@ static void trash_stolen(struct drm_i915_private *i915)
unsigned long page;
u32 prng = 0x12345678;
+ /* XXX: fsck. needs some more thought... */
+ if (!i915_ggtt_has_aperture(ggtt))
+ return;
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
@@ -117,12 +124,9 @@ static void pm_resume(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(&i915->gt, false);
- i915_gem_sanitize(i915);
- mutex_lock(&i915->drm.struct_mutex);
i915_gem_restore_gtt_mappings(i915);
- i915_gem_restore_fences(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_restore_fences(&i915->ggtt);
i915_gem_resume(i915);
}
@@ -140,11 +144,9 @@ static int igt_gem_suspend(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -159,9 +161,7 @@ static int igt_gem_suspend(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
mock_file_free(i915, file);
return err;
@@ -179,11 +179,9 @@ static int igt_gem_hibernate(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -198,9 +196,7 @@ static int igt_gem_hibernate(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
mock_file_free(i915, file);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index cb30c669b1b7..42e948144f1b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -43,8 +43,7 @@ static void quirk_add(struct drm_i915_gem_object *obj,
list_add(&obj->st_link, objects);
}
-static int populate_ggtt(struct drm_i915_private *i915,
- struct list_head *objects)
+static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
@@ -53,7 +52,8 @@ static int populate_ggtt(struct drm_i915_private *i915,
do {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -70,7 +70,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
count++;
} while (1);
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
- count, i915->ggtt.vm.total / PAGE_SIZE);
+ count, ggtt->vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;
@@ -96,7 +96,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
return -EINVAL;
}
- if (list_empty(&i915->ggtt.vm.bound_list)) {
+ if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -104,20 +104,16 @@ static int populate_ggtt(struct drm_i915_private *i915,
return 0;
}
-static void unpin_ggtt(struct drm_i915_private *i915)
+static void unpin_ggtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
- mutex_lock(&ggtt->vm.mutex);
- list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
if (vma->obj->mm.quirked)
i915_vma_unpin(vma);
- mutex_unlock(&ggtt->vm.mutex);
}
-static void cleanup_objects(struct drm_i915_private *i915,
- struct list_head *list)
+static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
{
struct drm_i915_gem_object *obj, *on;
@@ -127,44 +123,44 @@ static void cleanup_objects(struct drm_i915_private *i915,
i915_gem_object_put(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
- i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
}
static int igt_evict_something(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict one. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict something */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
@@ -172,13 +168,14 @@ static int igt_evict_something(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_overcommit(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
LIST_HEAD(objects);
@@ -188,11 +185,11 @@ static int igt_overcommit(void *arg)
* We expect it to fail.
*/
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -208,14 +205,14 @@ static int igt_overcommit(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_for_vma(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_mm_node target = {
.start = 0,
.size = 4096,
@@ -225,22 +222,26 @@ static int igt_evict_for_vma(void *arg)
/* Fill the GGTT with pinned objects and try to evict a range. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict the node */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -248,7 +249,7 @@ static int igt_evict_for_vma(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
@@ -261,8 +262,8 @@ static void mock_color_adjust(const struct drm_mm_node *node,
static int igt_evict_for_cache_color(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
const unsigned long flags = PIN_OFFSET_FIXED;
struct drm_mm_node target = {
.start = I915_GTT_PAGE_SIZE * 2,
@@ -274,14 +275,16 @@ static int igt_evict_for_cache_color(void *arg)
LIST_HEAD(objects);
int err;
- /* Currently the use of color_adjust is limited to cache domains within
- * the ggtt, and so the presence of mm.color_adjust is assumed to be
- * i915_gtt_color_adjust throughout our driver, so using a mock color
- * adjust will work just fine for our purposes.
+ /*
+ * Currently the use of color_adjust for the GGTT is limited to cache
+ * coloring and guard pages, and so the presence of mm.color_adjust for
+ * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
+ * color adjust will work just fine for our purposes.
*/
ggtt->vm.mm.color_adjust = mock_color_adjust;
+ GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -297,7 +300,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -317,7 +320,9 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -328,7 +333,9 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -338,36 +345,40 @@ static int igt_evict_for_cache_color(void *arg)
err = 0;
cleanup:
- unpin_ggtt(i915);
- cleanup_objects(i915, &objects);
+ unpin_ggtt(ggtt);
+ cleanup_objects(ggtt, &objects);
ggtt->vm.mm.color_adjust = NULL;
return err;
}
static int igt_evict_vm(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict everything. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -375,14 +386,16 @@ static int igt_evict_vm(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_contexts(void *arg)
{
const u64 PRETEND_GGTT_SIZE = 16ull << 20;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct reserved {
@@ -408,14 +421,14 @@ static int igt_evict_contexts(void *arg)
if (!HAS_FULL_PPGTT(i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_gtt_insert(&ggtt->vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -425,15 +438,17 @@ static int igt_evict_contexts(void *arg)
do {
struct reserved *r;
+ mutex_unlock(&ggtt->vm.mutex);
r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ mutex_lock(&ggtt->vm.mutex);
if (!r) {
err = -ENOMEM;
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
+ if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
@@ -445,11 +460,11 @@ static int igt_evict_contexts(void *arg)
count++;
} while (1);
drm_mm_remove_node(&hole);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&ggtt->vm.mutex);
pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
/* Overfill the GGTT with context objects and so try to evict one. */
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
struct drm_file *file;
@@ -460,7 +475,6 @@ static int igt_evict_contexts(void *arg)
}
count = 0;
- mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence);
do {
struct i915_request *rq;
@@ -478,8 +492,8 @@ static int igt_evict_contexts(void *arg)
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
if (PTR_ERR(rq) != -EBUSY) {
- pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
- ctx->hw_id, engine->name,
+ pr_err("Unexpected error from request alloc (on %s): %d\n",
+ engine->name,
(int)PTR_ERR(rq));
err = PTR_ERR(rq);
}
@@ -497,8 +511,6 @@ static int igt_evict_contexts(void *arg)
count++;
err = 0;
} while(1);
- mutex_unlock(&i915->drm.struct_mutex);
-
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
@@ -508,9 +520,9 @@ static int igt_evict_contexts(void *arg)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&ggtt->vm.mutex);
out_locked:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
@@ -522,8 +534,8 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
+ mutex_unlock(&ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -545,11 +557,8 @@ int i915_gem_evict_mock_selftests(void)
if (!i915)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- mutex_unlock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, &i915->gt);
drm_dev_put(&i915->drm);
return err;
@@ -564,5 +573,5 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 31a51ca1ddcb..3f7e80fb3bbd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,26 +25,20 @@
#include <linux/list_sort.h>
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_context.h"
#include "i915_random.h"
#include "i915_selftest.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
+#include "igt_flush_test.h"
static void cleanup_freed_objects(struct drm_i915_private *i915)
{
- /*
- * As we may hold onto the struct_mutex for inordinate lengths of
- * time, the NMI khungtaskd detector may fire for the free objects
- * worker.
- */
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
}
static void fake_free_pages(struct drm_i915_gem_object *obj,
@@ -88,8 +82,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
}
GEM_BUG_ON(rem);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0;
@@ -101,7 +93,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
{
fake_free_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -113,6 +104,7 @@ static const struct drm_i915_gem_object_ops fake_ops = {
static struct drm_i915_gem_object *
fake_dma_object(struct drm_i915_private *i915, u64 size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -126,7 +118,9 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -293,18 +287,20 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vm->insert_entries(vm, &mock_vma,
+ I915_CACHE_NONE, 0);
}
count = n;
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
+ intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
- vm->clear_range(vm, addr, BIT_ULL(size));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vm->clear_range(vm, addr, BIT_ULL(size));
}
i915_gem_object_unpin_pages(obj);
@@ -875,6 +871,15 @@ static int __shrink_hole(struct drm_i915_private *i915,
i915_vma_unpin(vma);
addr += size;
+ /*
+ * Since we are injecting allocation faults at random intervals,
+ * wait for this allocation to complete before we change the
+ * faultinjection.
+ */
+ err = i915_vma_sync(vma);
+ if (err)
+ break;
+
if (igt_timeout(end_time,
"%s timed out at ofset %llx [%llx - %llx]\n",
__func__, addr, hole_start, hole_end)) {
@@ -1008,21 +1013,19 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
- goto out_unlock;
+ goto out_free;
}
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
- GEM_BUG_ON(ppgtt->vm.closed);
+ GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_vm_put(&ppgtt->vm);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+out_free:
mock_file_free(dev_priv, file);
return err;
}
@@ -1085,7 +1088,6 @@ static int exercise_ggtt(struct drm_i915_private *i915,
IGT_TIMEOUT(end_time);
int err = 0;
- mutex_lock(&i915->drm.struct_mutex);
restart:
list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
@@ -1106,7 +1108,6 @@ restart:
last = hole_end;
goto restart;
}
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1148,13 +1149,12 @@ static int igt_ggtt_page(void *arg)
unsigned int *order, n;
int err;
- mutex_lock(&i915->drm.struct_mutex);
+ if (!i915_ggtt_has_aperture(ggtt))
+ return 0;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj);
if (err)
@@ -1222,8 +1222,6 @@ out_unpin:
i915_gem_object_unpin_pages(obj);
out_free:
i915_gem_object_put(obj);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1234,10 +1232,13 @@ static void track_vma_bind(struct i915_vma *vma)
atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
+ GEM_BUG_ON(vma->pages);
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+ __i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
}
@@ -1248,6 +1249,7 @@ static int exercise_mock(struct drm_i915_private *i915,
unsigned long end_time))
{
const u64 limit = totalram_pages() << PAGE_SHIFT;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
IGT_TIMEOUT(end_time);
int err;
@@ -1256,7 +1258,9 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ err = func(i915, vm, 0, min(vm->total, limit), end_time);
+ i915_vm_put(vm);
mock_context_close(ctx);
return err;
@@ -1294,6 +1298,7 @@ static int igt_gtt_reserve(void *arg)
{
struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
+ I915_RND_STATE(prng);
LIST_HEAD(objects);
u64 total;
int err = -ENODEV;
@@ -1330,11 +1335,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1380,11 +1387,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1420,15 +1429,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, ggtt->vm.total,
- 2*I915_GTT_PAGE_SIZE,
- I915_GTT_MIN_ALIGNMENT);
+ offset = igt_random_offset(&prng,
+ 0, ggtt->vm.total,
+ 2 * I915_GTT_PAGE_SIZE,
+ I915_GTT_MIN_ALIGNMENT);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1497,11 +1509,13 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
ii->size, ii->alignment, ii->start, ii->end,
@@ -1537,10 +1551,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
i915_gem_object_put(obj);
@@ -1595,10 +1611,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1642,10 +1660,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1689,13 +1709,10 @@ int i915_gem_gtt_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -1703,6 +1720,312 @@ out_put:
return err;
}
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ long timeout;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 5);
+ i915_request_put(rq);
+
+ return timeout < 0 ? -EIO : 0;
+}
+
+static struct i915_request *
+submit_batch(struct intel_context *ce, u64 addr)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb) /* detect a hang */
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = rq->engine->emit_bb_start(rq, addr, 0, 0);
+
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return err ? ERR_PTR(err) : rq;
+}
+
+static u32 *spinner(u32 *batch, int i)
+{
+ return batch + i * 64 / sizeof(*batch) + 4;
+}
+
+static void end_spin(u32 *batch, int i)
+{
+ *spinner(batch, i) = MI_BATCH_BUFFER_END;
+ wmb();
+}
+
+static int igt_cs_tlb(void *arg)
+{
+ const unsigned int count = PAGE_SIZE / 64;
+ const unsigned int chunk_size = count * PAGE_SIZE;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *bbe, *act, *out;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ struct i915_vma *vma;
+ I915_RND_STATE(prng);
+ unsigned int i;
+ u32 *result;
+ u32 *batch;
+ int err = 0;
+
+ /*
+ * Our mission here is to fool the hardware to execute something
+ * from scratch as it has not seen the batch move (due to missing
+ * the TLB invalidate).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (i915_is_ggtt(vm))
+ goto out_vm;
+
+ /* Create two pages; dummy we prefill the TLB, and intended */
+ bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(bbe)) {
+ err = PTR_ERR(bbe);
+ goto out_vm;
+ }
+
+ batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_bbe;
+ }
+ memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
+ i915_gem_object_flush_map(bbe);
+ i915_gem_object_unpin_map(bbe);
+
+ act = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(act)) {
+ err = PTR_ERR(act);
+ goto out_put_bbe;
+ }
+
+ /* Track the execution of each request by writing into different slot */
+ batch = i915_gem_object_pin_map(act, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_act;
+ }
+ for (i = 0; i < count; i++) {
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 6);
+ cs[0] = MI_STORE_DWORD_IMM_GEN4;
+ if (INTEL_GEN(i915) >= 8) {
+ cs[1] = lower_32_bits(addr);
+ cs[2] = upper_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START_GEN8;
+ } else {
+ cs[1] = 0;
+ cs[2] = lower_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START;
+ }
+ }
+
+ out = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(out)) {
+ err = PTR_ERR(out);
+ goto out_put_batch;
+ }
+ i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(out, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put_batch;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ PIN_USER |
+ PIN_OFFSET_FIXED |
+ (vm->total - PAGE_SIZE));
+ if (err)
+ goto out_put_out;
+ GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
+
+ result = i915_gem_object_pin_map(out, I915_MAP_WB);
+ if (IS_ERR(result)) {
+ err = PTR_ERR(result);
+ goto out_put_out;
+ }
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ IGT_TIMEOUT(end_time);
+ unsigned long pass = 0;
+
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ struct i915_request *rq;
+ u64 offset;
+
+ offset = igt_random_offset(&prng,
+ 0, vm->total - PAGE_SIZE,
+ chunk_size, PAGE_SIZE);
+
+ err = vm->allocate_va_range(vm, offset, chunk_size);
+ if (err)
+ goto end;
+
+ memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ vma = i915_vma_instance(bbe, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Prime the TLB with the dummy pages */
+ for (i = 0; i < count; i++) {
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ rq = submit_batch(ce, vma->node.start);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+ i915_request_put(rq);
+ }
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: dummy setup timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ vma = i915_vma_instance(act, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Replace the TLB with target batches */
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr;
+
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ addr = vma->node.start + i * 64;
+ cs[4] = MI_NOOP;
+ cs[6] = lower_32_bits(addr);
+ cs[7] = upper_32_bits(addr);
+ wmb();
+
+ rq = submit_batch(ce, addr);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+
+ /* Wait until the context chain has started */
+ if (i == 0) {
+ while (READ_ONCE(result[i]) &&
+ !i915_request_completed(rq))
+ cond_resched();
+ } else {
+ end_spin(batch, i - 1);
+ }
+
+ i915_request_put(rq);
+ }
+ end_spin(batch, count - 1);
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: writes timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (result[i] != i) {
+ pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
+ ce->engine->name, pass,
+ offset, i, result[i], i);
+ err = -EINVAL;
+ goto end;
+ }
+ }
+
+ vm->clear_range(vm, offset, chunk_size);
+ pass++;
+ }
+ }
+end:
+ if (igt_flush_test(i915))
+ err = -EIO;
+ i915_gem_context_unlock_engines(ctx);
+ i915_gem_object_unpin_map(out);
+out_put_out:
+ i915_gem_object_put(out);
+out_put_batch:
+ i915_gem_object_unpin_map(act);
+out_put_act:
+ i915_gem_object_put(act);
+out_put_bbe:
+ i915_gem_object_put(bbe);
+out_vm:
+ i915_vm_put(vm);
+out_unlock:
+ mock_file_free(i915, file);
+ return err;
+}
+
int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -1720,6 +2043,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_pot),
SUBTEST(igt_ggtt_fill),
SUBTEST(igt_ggtt_page),
+ SUBTEST(igt_cs_tlb),
};
GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 1ccf0f731ac0..4b3cac73e291 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -15,6 +15,9 @@ selftest(workarounds, intel_workarounds_live_selftests)
selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
+selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_pm, intel_gt_pm_live_selftests)
+selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
@@ -30,6 +33,8 @@ selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
+selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
selftest(guc, intel_guc_live_selftest)
+selftest(perf, i915_perf_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index b88084fe3269..aa5a0e7f5d9e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -26,3 +26,4 @@ selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
selftest(buddy, i915_buddy_mock_selftests)
+selftest(memory_region, intel_memory_region_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
new file mode 100644
index 000000000000..aabd07f67e49
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -0,0 +1,217 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kref.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+static struct i915_perf_stream *
+test_stream(struct i915_perf *perf)
+{
+ struct drm_i915_perf_open_param param = {};
+ struct perf_open_properties props = {
+ .engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0),
+ .sample_flags = SAMPLE_OA_REPORT,
+ .oa_format = IS_GEN(perf->i915, 12) ?
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
+ .metrics_set = 1,
+ };
+ struct i915_perf_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return NULL;
+
+ stream->perf = perf;
+
+ mutex_lock(&perf->lock);
+ if (i915_oa_stream_init(stream, &param, &props)) {
+ kfree(stream);
+ stream = NULL;
+ }
+ mutex_unlock(&perf->lock);
+
+ return stream;
+}
+
+static void stream_destroy(struct i915_perf_stream *stream)
+{
+ struct i915_perf *perf = stream->perf;
+
+ mutex_lock(&perf->lock);
+ i915_perf_destroy_locked(stream);
+ mutex_unlock(&perf->lock);
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+
+ /* Quick check we can create a perf stream */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -EINVAL;
+
+ stream_destroy(stream);
+ return 0;
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 *cs;
+ int len;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ len = 5;
+ if (INTEL_GEN(rq->i915) >= 8)
+ len++;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(len);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_WRITE_TIMESTAMP;
+ *cs++ = slot * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static ktime_t poll_status(struct i915_request *rq, int slot)
+{
+ while (!intel_read_status_page(rq->engine, slot) &&
+ !i915_request_completed(rq))
+ cpu_relax();
+
+ return ktime_get();
+}
+
+static int live_noa_delay(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u64 expected;
+ u32 delay;
+ int err;
+ int i;
+
+ /* Check that the GPU delays matches expectations */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ expected = atomic64_read(&stream->perf->noa_programming_delay);
+
+ if (stream->engine->class != RENDER_CLASS) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < 4; i++)
+ intel_write_status_page(stream->engine, 0x100 + i, 0);
+
+ rq = i915_request_create(stream->engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb &&
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ err = write_timestamp(rq, 0x100);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = write_timestamp(rq, 0x102);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ preempt_disable();
+ t0 = poll_status(rq, 0x100);
+ t1 = poll_status(rq, 0x102);
+ preempt_enable();
+
+ pr_info("CPU delay: %lluns, expected %lluns\n",
+ ktime_sub(t1, t0), expected);
+
+ delay = intel_read_status_page(stream->engine, 0x102);
+ delay -= intel_read_status_page(stream->engine, 0x100);
+ delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
+ pr_info("GPU delay: %uns, expected %lluns\n",
+ delay, expected);
+
+ if (4 * delay < 3 * expected || 2 * delay > 3 * expected) {
+ pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n",
+ delay / 1000,
+ div_u64(3 * expected, 4000),
+ div_u64(3 * expected, 2000));
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
+int i915_perf_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_noa_delay),
+ };
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 716a3f19f030..abdfadcf626b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "i915_random.h"
+#include "i915_utils.h"
u64 i915_prandom_u64_state(struct rnd_state *rnd)
{
@@ -87,3 +88,22 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
i915_random_reorder(order, count, state);
return order;
}
+
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align)
+{
+ u64 range, addr;
+
+ BUG_ON(range_overflows(start, len, end));
+ BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+ range = round_down(end - len, align) - round_up(start, align);
+ if (range) {
+ addr = i915_prandom_u64_state(state);
+ div64_u64_rem(addr, range, &addr);
+ start += addr;
+ }
+
+ return round_up(start, align);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 8e1ff9c105b6..35cc69a3a1b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -57,4 +57,8 @@ void i915_random_reorder(unsigned int *order,
void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
struct rnd_state *state);
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align);
+
#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index b3688543ed7d..8618a4dc0701 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -37,25 +37,32 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+static unsigned int num_uabi_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ return count;
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *request;
- int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
- goto out_unlock;
+ return -ENOMEM;
i915_request_add(request);
- err = 0;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
static int igt_wait_request(void *arg)
@@ -67,12 +74,10 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ if (!request)
+ return -ENOMEM;
+
i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
@@ -125,9 +130,7 @@ static int igt_wait_request(void *arg)
err = 0;
out_request:
i915_request_put(request);
-out_unlock:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -140,52 +143,45 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_locked;
- }
+ if (!request)
+ return -ENOMEM;
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_locked;
+ goto out;
}
i915_request_add(request);
- mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
pr_err("fence signaled immediately!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
pr_err("fence wait success after submit (expected timeout)!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out (expected success)!\n");
- goto out_device;
+ goto out;
}
if (!dma_fence_is_signaled(&request->fence)) {
pr_err("fence unsignaled after waiting!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out when complete (expected success)!\n");
- goto out_device;
+ goto out;
}
err = 0;
-out_device:
- mutex_lock(&i915->drm.struct_mutex);
-out_locked:
+out:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -197,8 +193,8 @@ static int igt_request_rewind(void *arg)
struct intel_context *ce;
int err = -EINVAL;
- mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
+
ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce));
request = mock_request(ce, 2 * HZ);
@@ -212,6 +208,7 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
+
ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce));
vip = mock_request(ce, 0);
@@ -233,7 +230,6 @@ static int igt_request_rewind(void *arg)
request->engine->submit_request(request);
rcu_read_unlock();
- mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request\n");
@@ -248,14 +244,12 @@ static int igt_request_rewind(void *arg)
err = 0;
err:
i915_request_put(vip);
- mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -282,7 +276,6 @@ __live_request_alloc(struct intel_context *ce)
static int __igt_breadcrumbs_smoketest(void *arg)
{
struct smoketest *t = arg;
- struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
@@ -300,7 +293,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
* that the fences were marked as signaled.
*/
- requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL);
+ requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
if (!requests)
return -ENOMEM;
@@ -337,14 +330,11 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_request *rq;
struct intel_context *ce;
- mutex_lock(BKL);
-
ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
rq = t->request_alloc(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
- mutex_unlock(BKL);
err = PTR_ERR(rq);
count = n;
break;
@@ -357,8 +347,6 @@ static int __igt_breadcrumbs_smoketest(void *arg)
requests[n] = i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(BKL);
-
if (err >= 0)
err = i915_sw_fence_await_dma_fence(wait,
&rq->fence,
@@ -446,18 +434,16 @@ static int mock_breadcrumbs_smoketest(void *arg)
* See __igt_breadcrumbs_smoketest();
*/
- threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
- t.contexts =
- kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
+ t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
if (!t.contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < t.ncontexts; n++) {
t.contexts[n] = mock_context(t.engine->i915, "mock");
if (!t.contexts[n]) {
@@ -465,7 +451,6 @@ static int mock_breadcrumbs_smoketest(void *arg)
goto out_contexts;
}
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
@@ -479,6 +464,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
get_task_struct(threads[n]);
}
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
for (n = 0; n < ncpus; n++) {
@@ -495,18 +481,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
atomic_long_read(&t.num_fences),
ncpus);
- mutex_lock(&t.engine->i915->drm.struct_mutex);
out_contexts:
for (n = 0; n < t.ncontexts; n++) {
if (!t.contexts[n])
break;
mock_context_close(t.contexts[n]);
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
kfree(t.contexts);
out_threads:
kfree(threads);
-
return ret;
}
@@ -539,40 +522,37 @@ static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
int err = -ENODEV;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- for_each_engine(engine, i915, id) {
- struct i915_request *request = NULL;
+ for_each_uabi_engine(engine, i915) {
unsigned long n, prime;
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ return err;
for_each_prime_number_from(prime, 1, 8192) {
+ struct i915_request *request = NULL;
+
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = i915_request_create(engine->kernel_context);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
- goto out_unlock;
- }
+ if (IS_ERR(request))
+ return PTR_ERR(request);
- /* This space is left intentionally blank.
+ /*
+ * This space is left intentionally blank.
*
* We do not actually want to perform any
* action with this request, we just want
@@ -585,9 +565,11 @@ static int live_nop_request(void *arg)
* for latency.
*/
+ i915_request_get(request);
i915_request_add(request);
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -599,7 +581,7 @@ static int live_nop_request(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ return err;
pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -607,9 +589,6 @@ static int live_nop_request(void *arg)
prime, div64_u64(ktime_to_ns(times[1]), prime));
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -647,8 +626,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
+ /* Force the wait wait now to avoid including it in the benchmark */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
return vma;
+err_pin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -672,6 +658,7 @@ empty_request(struct intel_engine_cs *engine,
if (err)
goto out_request;
+ i915_request_get(request);
out_request:
i915_request_add(request);
return err ? ERR_PTR(err) : request;
@@ -681,27 +668,21 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
int err = 0;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
batch = empty_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_unlock;
- }
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
struct i915_request *request;
unsigned long n, prime;
@@ -723,6 +704,7 @@ static int live_empty_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
@@ -738,6 +720,7 @@ static int live_empty_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
+ i915_request_put(request);
err = igt_live_test_end(&t);
if (err)
@@ -752,18 +735,15 @@ static int live_empty_request(void *arg)
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
+ struct i915_address_space *vm;
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -772,7 +752,9 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -832,67 +814,73 @@ static int recursive_batch_resolve(struct i915_vma *batch)
static int live_all_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
+ const unsigned int nengines = num_uabi_engines(i915);
struct intel_engine_cs *engine;
- struct i915_request *request[I915_NUM_ENGINES];
- intel_wakeref_t wakeref;
+ struct i915_request **request;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines simultaneously. We
+ /*
+ * Check we can submit requests to all engines simultaneously. We
* send a recursive batch to each engine - checking that we don't
* block doing so, and that they don't complete too soon.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
batch = recursive_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_unlock;
+ goto out_free;
}
- for_each_engine(engine, i915, id) {
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ request[idx] = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
__func__, err);
goto out_request;
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_request_await_object(request[id], batch->obj, 0);
+ err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
+ idx++;
}
- for_each_engine(engine, i915, id) {
- if (i915_request_completed(request[id])) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
+ idx++;
}
err = recursive_batch_resolve(batch);
@@ -901,10 +889,11 @@ static int live_all_engines(void *arg)
goto out_request;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -913,50 +902,56 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
- i915_request_put(request[id]);
- request[id] = NULL;
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ i915_request_put(request[idx]);
+ request[idx] = NULL;
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id)
- if (request[id])
- i915_request_put(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (request[idx])
+ i915_request_put(request[idx]);
+ idx++;
+ }
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
return err;
}
static int live_sequential_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_request *request[I915_NUM_ENGINES] = {};
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct i915_request **request;
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines sequentially, such
+ /*
+ * Check we can submit requests to all engines sequentially, such
* that each successive request waits for the earlier ones. This
* tests that we don't execute requests out of order, even though
* they are running on independent engines.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
batch = recursive_batch(i915);
@@ -964,66 +959,69 @@ static int live_sequential_engines(void *arg)
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
__func__, engine->name, err);
- goto out_unlock;
+ goto out_free;
}
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ request[idx] = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
if (prev) {
- err = i915_request_await_dma_fence(request[id],
+ err = i915_request_await_dma_fence(request[idx],
&prev->fence);
if (err) {
- i915_request_add(request[id]);
+ i915_request_add(request[idx]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_request_await_object(request[id], batch->obj, false);
+ err = i915_request_await_object(request[idx],
+ batch->obj, false);
if (err == 0)
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
- prev = request[id];
+ prev = request[idx];
+ idx++;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- if (i915_request_completed(request[id])) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
- err = recursive_batch_resolve(request[id]->batch);
+ err = recursive_batch_resolve(request[idx]->batch);
if (err) {
pr_err("%s: failed to resolve batch, err=%d\n",
__func__, err);
goto out_request;
}
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1032,33 +1030,156 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
u32 *cmd;
- if (!request[id])
+ if (!request[idx])
break;
- cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+ cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
- i915_gem_object_unpin_map(request[id]->batch->obj);
+ i915_gem_object_unpin_map(request[idx]->batch->obj);
}
- i915_vma_put(request[id]->batch);
- i915_request_put(request[id]);
+ i915_vma_put(request[idx]->batch);
+ i915_request_put(request[idx]);
+ idx++;
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
+ return err;
+}
+
+static int __live_parallel_engine1(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu request + sync\n", engine->name, count);
+ return 0;
+}
+
+static int __live_parallel_engineN(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu requests\n", engine->name, count);
+ return 0;
+}
+
+static int live_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_engine1,
+ __live_parallel_engineN,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct task_struct **tsk;
+ int err = 0;
+
+ /*
+ * Check we can submit requests to all engines concurrently. This
+ * tests that we load up the system maximally.
+ */
+
+ tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk)
+ return -ENOMEM;
+
+ for (fn = func; !err && *fn; fn++) {
+ struct igt_live_test t;
+ unsigned int idx;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ tsk[idx] = kthread_run(*fn, engine,
+ "igt/parallel:%s",
+ engine->name);
+ if (IS_ERR(tsk[idx])) {
+ err = PTR_ERR(tsk[idx]);
+ break;
+ }
+ get_task_struct(tsk[idx++]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(tsk[idx]))
+ break;
+
+ status = kthread_stop(tsk[idx]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[idx++]);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+ kfree(tsk);
return err;
}
@@ -1102,16 +1223,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
static int live_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct smoketest t[I915_NUM_ENGINES];
- unsigned int ncpus = num_online_cpus();
+ const unsigned int nengines = num_uabi_engines(i915);
+ const unsigned int ncpus = num_online_cpus();
unsigned long num_waits, num_fences;
struct intel_engine_cs *engine;
struct task_struct **threads;
struct igt_live_test live;
- enum intel_engine_id id;
intel_wakeref_t wakeref;
struct drm_file *file;
- unsigned int n;
+ struct smoketest *smoke;
+ unsigned int n, idx;
int ret = 0;
/*
@@ -1130,29 +1251,31 @@ static int live_breadcrumbs_smoketest(void *arg)
goto out_rpm;
}
- threads = kcalloc(ncpus * I915_NUM_ENGINES,
- sizeof(*threads),
- GFP_KERNEL);
- if (!threads) {
+ smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
+ if (!smoke) {
ret = -ENOMEM;
goto out_file;
}
- memset(&t[0], 0, sizeof(t[0]));
- t[0].request_alloc = __live_request_alloc;
- t[0].ncontexts = 64;
- t[0].contexts = kmalloc_array(t[0].ncontexts,
- sizeof(*t[0].contexts),
- GFP_KERNEL);
- if (!t[0].contexts) {
+ threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ ret = -ENOMEM;
+ goto out_smoke;
+ }
+
+ smoke[0].request_alloc = __live_request_alloc;
+ smoke[0].ncontexts = 64;
+ smoke[0].contexts = kcalloc(smoke[0].ncontexts,
+ sizeof(*smoke[0].contexts),
+ GFP_KERNEL);
+ if (!smoke[0].contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&i915->drm.struct_mutex);
- for (n = 0; n < t[0].ncontexts; n++) {
- t[0].contexts[n] = live_context(i915, file);
- if (!t[0].contexts[n]) {
+ for (n = 0; n < smoke[0].ncontexts; n++) {
+ smoke[0].contexts[n] = live_context(i915, file);
+ if (!smoke[0].contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
}
@@ -1162,45 +1285,48 @@ static int live_breadcrumbs_smoketest(void *arg)
if (ret)
goto out_contexts;
- for_each_engine(engine, i915, id) {
- t[id] = t[0];
- t[id].engine = engine;
- t[id].max_batch = max_batches(t[0].contexts[0], engine);
- if (t[id].max_batch < 0) {
- ret = t[id].max_batch;
- mutex_unlock(&i915->drm.struct_mutex);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ smoke[idx] = smoke[0];
+ smoke[idx].engine = engine;
+ smoke[idx].max_batch =
+ max_batches(smoke[0].contexts[0], engine);
+ if (smoke[idx].max_batch < 0) {
+ ret = smoke[idx].max_batch;
goto out_flush;
}
/* One ring interleaved between requests from all cpus */
- t[id].max_batch /= num_online_cpus() + 1;
+ smoke[idx].max_batch /= num_online_cpus() + 1;
pr_debug("Limiting batches to %d requests on %s\n",
- t[id].max_batch, engine->name);
+ smoke[idx].max_batch, engine->name);
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk;
tsk = kthread_run(__igt_breadcrumbs_smoketest,
- &t[id], "igt/%d.%d", id, n);
+ &smoke[idx], "igt/%d.%d", idx, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
- mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
get_task_struct(tsk);
- threads[id * ncpus + n] = tsk;
+ threads[idx * ncpus + n] = tsk;
}
+
+ idx++;
}
- mutex_unlock(&i915->drm.struct_mutex);
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
out_flush:
+ idx = 0;
num_waits = 0;
num_fences = 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (n = 0; n < ncpus; n++) {
- struct task_struct *tsk = threads[id * ncpus + n];
+ struct task_struct *tsk = threads[idx * ncpus + n];
int err;
if (!tsk)
@@ -1213,19 +1339,20 @@ out_flush:
put_task_struct(tsk);
}
- num_waits += atomic_long_read(&t[id].num_waits);
- num_fences += atomic_long_read(&t[id].num_fences);
+ num_waits += atomic_long_read(&smoke[idx].num_waits);
+ num_fences += atomic_long_read(&smoke[idx].num_fences);
+ idx++;
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
- mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
- mutex_unlock(&i915->drm.struct_mutex);
- kfree(t[0].contexts);
+ kfree(smoke[0].contexts);
out_threads:
kfree(threads);
+out_smoke:
+ kfree(smoke);
out_file:
mock_file_free(i915, file);
out_rpm:
@@ -1240,6 +1367,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_nop_request),
SUBTEST(live_all_engines),
SUBTEST(live_sequential_engines),
+ SUBTEST(live_parallel_engines),
SUBTEST(live_empty_request),
SUBTEST(live_breadcrumbs_smoketest),
};
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 438ea0eaa416..a6cca4ad96f6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -23,13 +23,14 @@
#include <linux/random.h>
-#include "../i915_drv.h"
-#include "../i915_selftest.h"
+#include "gt/intel_gt_pm.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
#include "igt_flush_test.h"
struct i915_selftest i915_selftest __read_mostly = {
- .timeout_ms = 1000,
+ .timeout_ms = 500,
};
int i915_mock_sanitycheck(void)
@@ -256,6 +257,10 @@ int __i915_live_setup(void *data)
{
struct drm_i915_private *i915 = data;
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(&i915->gt))
+ return -EIO;
+
return intel_gt_terminally_wedged(&i915->gt);
}
@@ -263,10 +268,8 @@ int __i915_live_teardown(int err, void *data)
{
struct drm_i915_private *i915 = data;
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
@@ -277,6 +280,10 @@ int __intel_gt_live_setup(void *data)
{
struct intel_gt *gt = data;
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(gt))
+ return -EIO;
+
return intel_gt_terminally_wedged(gt);
}
@@ -284,10 +291,8 @@ int __intel_gt_live_teardown(int err, void *data)
{
struct intel_gt *gt = data;
- mutex_lock(&gt->i915->drm.struct_mutex);
- if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
- mutex_unlock(&gt->i915->drm.struct_mutex);
i915_gem_drain_freed_objects(gt->i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index a5bec0a4cdcc..58b5f40a07dd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -24,6 +24,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "i915_scatterlist.h"
@@ -38,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != ctx->vm) {
+ if (vma->vm != rcu_access_pointer(ctx->vm)) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -113,11 +114,13 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm = ctx->vm;
+ struct i915_address_space *vm;
struct i915_vma *vma;
int err;
+ vm = i915_gem_context_get_vm_rcu(ctx);
vma = checked_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -170,7 +173,7 @@ static int igt_vma_create(void *arg)
}
nc = 0;
- for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
+ for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) {
for (; nc < num_ctx; nc++) {
ctx = mock_context(i915, "mock");
if (!ctx)
@@ -623,7 +626,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj,
struct sgt_iter sgt;
dma_addr_t dma;
- for_each_sgt_dma(dma, sgt, vma->pages) {
+ for_each_sgt_daddr(dma, sgt, vma->pages) {
dma_addr_t src;
if (!size) {
@@ -831,13 +834,10 @@ int i915_vma_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -879,8 +879,6 @@ static int igt_vma_remapped_gtt(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (t = types; *t; t++) {
@@ -976,7 +974,6 @@ static int igt_vma_remapped_gtt(void *arg)
out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index d3b5eb402d33..7b0939e3f007 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -4,39 +4,32 @@
* Copyright © 2018 Intel Corporation
*/
-#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_selftest.h"
#include "igt_flush_test.h"
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+int igt_flush_test(struct drm_i915_private *i915)
{
- int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
- int repeat = !!(flags & I915_WAIT_LOCKED);
+ struct intel_gt *gt = &i915->gt;
+ int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
cond_resched();
- do {
- if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- GEM_TRACE("%pS timed out.\n",
- __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
- repeat = 0;
- ret = -EIO;
- }
-
- /* Ensure we also flush after wedging. */
- if (flags & I915_WAIT_LOCKED)
- i915_retire_requests(i915);
- } while (repeat--);
+ intel_gt_set_wedged(gt);
+ ret = -EIO;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
index 63e009927c43..7541fa74e641 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
@@ -9,6 +9,6 @@
struct drm_i915_private;
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+int igt_flush_test(struct drm_i915_private *i915);
#endif /* IGT_FLUSH_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 3e902761cd16..c130010a7033 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -4,7 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_drv.h"
+#include "i915_drv.h"
+#include "gt/intel_gt_requests.h"
#include "../i915_selftest.h"
#include "igt_flush_test.h"
@@ -15,20 +16,16 @@ int igt_live_test_begin(struct igt_live_test *t,
const char *func,
const char *name)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
t->i915 = i915;
t->func = func;
t->name = name;
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
@@ -37,7 +34,7 @@ int igt_live_test_begin(struct igt_live_test *t,
t->reset_global = i915_reset_count(&i915->gpu_error);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
t->reset_engine[id] =
i915_reset_engine_count(&i915->gpu_error, engine);
@@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
return -EIO;
if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
@@ -62,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t)
return -EIO;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, &i915->gt, id) {
if (t->reset_engine[id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 7ec8f8b049c6..9f8590b868a9 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -22,7 +22,7 @@ void igt_global_reset_lock(struct intel_gt *gt)
wait_event(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
while (test_and_set_bit(I915_RESET_ENGINE + id,
&gt->reset.flags))
wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
@@ -35,7 +35,7 @@ void igt_global_reset_unlock(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 11f04ad48e68..ee8450b871da 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -147,7 +147,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
intel_gt_chipset_flush(engine->gt);
if (engine->emit_init_breadcrumb &&
- rq->timeline->has_initial_breadcrumb) {
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
err = engine->emit_init_breadcrumb(rq);
if (err)
goto cancel_rq;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
new file mode 100644
index 000000000000..19e1cca8f143
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+#include "mock_region.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void close_objects(struct intel_memory_region *mem,
+ struct list_head *objects)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ /* No polluting the memory region between tests */
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+
+ cond_resched();
+
+ i915_gem_drain_freed_objects(i915);
+}
+
+static int igt_mock_fill(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ resource_size_t total = resource_size(&mem->region);
+ resource_size_t page_size;
+ resource_size_t rem;
+ unsigned long max_pages;
+ unsigned long page_num;
+ LIST_HEAD(objects);
+ int err = 0;
+
+ page_size = mem->mm.chunk_size;
+ max_pages = div64_u64(total, page_size);
+ rem = total;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ resource_size_t size = page_num * page_size;
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_region(mem, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+ rem -= size;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+ if (err == -ENXIO) {
+ if (page_num * page_size <= rem) {
+ pr_err("%s failed, space still left in region\n",
+ __func__);
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
+ }
+
+ close_objects(mem, &objects);
+
+ return err;
+}
+
+static struct drm_i915_gem_object *
+igt_object_create(struct intel_memory_region *mem,
+ struct list_head *objects,
+ u64 size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mem, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto put;
+
+ list_add(&obj->st_link, objects);
+ return obj;
+
+put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static void igt_object_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+}
+
+static int igt_mock_contiguous(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned long n_objects;
+ LIST_HEAD(objects);
+ LIST_HEAD(holes);
+ I915_RND_STATE(prng);
+ resource_size_t total;
+ resource_size_t min;
+ u64 target;
+ int err = 0;
+
+ total = resource_size(&mem->region);
+
+ /* Min size */
+ obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s min object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Max size */
+ obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s max object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Internal fragmentation should not bleed into the object size */
+ target = i915_prandom_u64_state(&prng);
+ div64_u64_rem(target, total, &target);
+ target = round_up(target, PAGE_SIZE);
+ target = max_t(u64, PAGE_SIZE, target);
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != target) {
+ pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
+ obj->base.size, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Try to fragment the address space, such that half of it is free, but
+ * the max contiguous block size is SZ_64K.
+ */
+
+ target = SZ_64K;
+ n_objects = div64_u64(total, target);
+
+ while (n_objects--) {
+ struct list_head *list;
+
+ if (n_objects % 2)
+ list = &holes;
+ else
+ list = &objects;
+
+ obj = igt_object_create(mem, list, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+ }
+
+ close_objects(mem, &holes);
+
+ min = target;
+ target = total >> 1;
+
+ /* Make sure we can still allocate all the fragmented space */
+ obj = igt_object_create(mem, &objects, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Even though we have enough free space, we don't have a big enough
+ * contiguous block. Make sure that holds true.
+ */
+
+ do {
+ bool should_fail = target > min;
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (should_fail != IS_ERR(obj)) {
+ pr_err("%s target allocation(%llx) mismatch\n",
+ __func__, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ target >>= 1;
+ } while (target >= mem->mm.chunk_size);
+
+err_close_objects:
+ list_splice_tail(&holes, &objects);
+ close_objects(mem, &objects);
+ return err;
+}
+
+static int igt_gpu_write_dw(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 dword,
+ u32 value)
+{
+ return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+ vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 __iomem *base;
+ u32 read_val;
+
+ base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+ read_val = ioread32(base + dword);
+ io_mapping_unmap_atomic(base);
+ if (read_val != val) {
+ pr_err("n=%lu base[%u]=%u, val=%u\n",
+ n, dword, read_val, val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+ struct i915_vma *vma;
+ int *order;
+ int i, n;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ n = 0;
+ count = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ vm = ce->vm;
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
+ if (!n)
+ return 0;
+
+ order = i915_random_order(count * count, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_free;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_free;
+
+ i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
+ do {
+ u32 rng = prandom_u32_state(&prng);
+ u32 dword = offset_in_page(rng) / 4;
+
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = igt_gpu_write_dw(ce, vma, dword, rng);
+ if (err)
+ break;
+
+ err = igt_cpu_check(obj, dword, rng);
+ if (err)
+ break;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_gem_context_unlock_engines(ctx);
+
+out_free:
+ kfree(order);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+static int igt_lmem_create(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_lmem_write_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ u32 sz;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+ obj = i915_gem_object_create_lmem(i915, sz, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_file;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ err = igt_gpu_write(ctx, obj);
+ if (err)
+ pr_err("igt_gpu_write failed(%d)\n", err);
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+static struct intel_engine_cs *
+random_engine_class(struct drm_i915_private *i915,
+ unsigned int class,
+ struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for (engine = intel_engine_lookup_user(i915, class, 0);
+ engine && engine->uabi_class == class;
+ engine = rb_entry_safe(rb_next(&engine->uabi_node),
+ typeof(*engine), uabi_node))
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ return intel_engine_lookup_user(i915, class, count);
+}
+
+static int igt_lmem_write_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 bytes[] = {
+ 0, /* rng placeholder */
+ sizeof(u32),
+ sizeof(u64),
+ 64, /* cl */
+ PAGE_SIZE,
+ PAGE_SIZE - sizeof(u32),
+ PAGE_SIZE - sizeof(u64),
+ PAGE_SIZE - 64,
+ };
+ struct intel_engine_cs *engine;
+ u32 *vaddr;
+ u32 sz;
+ u32 i;
+ int *order;
+ int count;
+ int err;
+
+ engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
+ if (!engine)
+ return 0;
+
+ pr_info("%s: using %s\n", __func__, engine->name);
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+ sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+ obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ /* Put the pages into a known state -- from the gpu for added fun */
+ err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_unpin;
+
+ count = ARRAY_SIZE(bytes);
+ order = i915_random_order(count * count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_unpin;
+ }
+
+ /* We want to throw in a random width/align */
+ bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+ sizeof(u32));
+
+ i = 0;
+ do {
+ u32 offset;
+ u32 align;
+ u32 dword;
+ u32 size;
+ u32 val;
+
+ size = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+ offset = igt_random_offset(&prng, 0, obj->base.size,
+ size, align);
+
+ val = prandom_u32_state(&prng);
+ memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+ size / sizeof(u32));
+
+ /*
+ * Sample random dw -- don't waste precious time reading every
+ * single dw.
+ */
+ dword = igt_random_offset(&prng, offset,
+ offset + size,
+ sizeof(u32), sizeof(u32));
+ dword /= sizeof(u32);
+ if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+ pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+ __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+ size, align, offset);
+ err = -EINVAL;
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int intel_memory_region_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_mock_contiguous),
+ };
+ struct intel_memory_region *mem;
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_unref;
+ }
+
+ err = i915_subtests(tests, mem);
+
+ intel_memory_region_put(mem);
+out_unref:
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
+int intel_memory_region_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_write_cpu),
+ SUBTEST(igt_lmem_write_gpu),
+ };
+
+ if (!HAS_LMEM(i915)) {
+ pr_info("device lacks LMEM support, skipping\n");
+ return 0;
+ }
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 86815c6072a1..0e4e6be0101d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -67,6 +67,7 @@ static int intel_shadow_table_check(void)
} reg_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
+ { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
};
const i915_reg_t *reg;
unsigned int i, j;
@@ -101,6 +102,7 @@ int intel_uncore_mock_selftests(void)
{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
+ { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
};
int err, i;
@@ -138,19 +140,19 @@ static int live_forcewake_ops(void *arg)
}
};
const struct reg *r;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_uncore_forcewake_domain *domain;
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned int tmp;
int err = 0;
- GEM_BUG_ON(i915->gt.awake);
+ GEM_BUG_ON(gt->awake);
/* vlv/chv with their pcu behave differently wrt reads */
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
pr_debug("PCU fakes forcewake badly; skipping\n");
return 0;
}
@@ -168,15 +170,15 @@ static int live_forcewake_ops(void *arg)
/* We have to pick carefully to get the exact behaviour we need */
for (r = registers; r->name; r++)
- if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ if (r->platforms & INTEL_INFO(gt->i915)->gen_mask)
break;
if (!r->name) {
pr_debug("Forcewaked register not known for %s; skipping\n",
- intel_platform_name(INTEL_INFO(i915)->platform));
+ intel_platform_name(INTEL_INFO(gt->i915)->platform));
return 0;
}
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
@@ -186,7 +188,7 @@ static int live_forcewake_ops(void *arg)
intel_uncore_fw_release_timer(&domain->timer);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
enum forcewake_domains fw_domains;
@@ -247,22 +249,22 @@ static int live_forcewake_ops(void *arg)
}
out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return err;
}
static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
- struct drm_i915_private *dev_priv = arg;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = arg;
+ struct intel_uncore *uncore = gt->uncore;
unsigned long *valid;
u32 offset;
int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
+ !IS_VALLEYVIEW(gt->i915) &&
+ !IS_CHERRYVIEW(gt->i915))
return 0;
/*
@@ -281,7 +283,7 @@ static int live_forcewake_domains(void *arg)
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
- (void)I915_READ_FW(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
@@ -298,7 +300,7 @@ static int live_forcewake_domains(void *arg)
check_for_unclaimed_mmio(uncore);
- (void)I915_READ(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
@@ -310,21 +312,23 @@ static int live_forcewake_domains(void *arg)
return err;
}
+static int live_fw_table(void *arg)
+{
+ struct intel_gt *gt = arg;
+
+ /* Confirm the table we load is still valid */
+ return intel_fw_table_check(gt->uncore->fw_domains_table,
+ gt->uncore->fw_domains_table_entries,
+ INTEL_GEN(gt->i915) >= 9);
+}
+
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_fw_table),
SUBTEST(live_forcewake_ops),
SUBTEST(live_forcewake_domains),
};
- int err;
-
- /* Confirm the table we load is still valid */
- err = intel_fw_table_check(i915->uncore.fw_domains_table,
- i915->uncore.fw_domains_table_entries,
- INTEL_GEN(i915) >= 9);
- if (err)
- return err;
-
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 01a89c071bf5..27ed3cee6a9b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -26,27 +26,29 @@
#include <linux/pm_runtime.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
+#include "intel_memory_region.h"
#include "mock_request.h"
#include "mock_gem_device.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "mock_region.h"
#include "gem/selftests/mock_context.h"
#include "gem/selftests/mock_gem_object.h"
void mock_device_flush(struct drm_i915_private *i915)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
do {
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
mock_engine_flush(engine);
- } while (i915_retire_requests(i915));
+ } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
}
static void mock_device_release(struct drm_device *dev)
@@ -55,31 +57,23 @@ static void mock_device_release(struct drm_device *dev)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);
- mutex_lock(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, &i915->gt, id)
mock_engine_free(engine);
- i915_gem_contexts_fini(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_driver_release__contexts(i915);
intel_timelines_fini(i915);
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
mock_fini_ggtt(&i915->ggtt);
- mutex_unlock(&i915->drm.struct_mutex);
-
destroy_workqueue(i915->wq);
- i915_gemfs_fini(i915);
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -103,14 +97,6 @@ static void release_dev(struct device *dev)
kfree(pdev);
}
-static void mock_retire_work_handler(struct work_struct *work)
-{
-}
-
-static void mock_idle_work_handler(struct work_struct *work)
-{
-}
-
static int pm_domain_resume(struct device *dev)
{
return pm_generic_runtime_resume(dev);
@@ -178,10 +164,15 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(&i915->uncore);
+ mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
+ intel_memory_regions_hw_probe(i915);
+
+ mock_uncore_init(&i915->uncore, i915);
+
i915_gem_init__mm(i915);
intel_gt_init_early(&i915->gt, i915);
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
+ i915->gt.awake = -ENODEV;
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
@@ -189,15 +180,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
- INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
-
- i915->gt.awake = true;
-
intel_timelines_init(i915);
- mutex_lock(&i915->drm.struct_mutex);
-
mock_init_ggtt(i915, &i915->ggtt);
mkwrite_device_info(i915)->engine_mask = BIT(0);
@@ -214,21 +198,18 @@ struct drm_i915_private *mock_gem_device(void)
goto err_context;
intel_engines_driver_register(i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- WARN_ON(i915_gemfs_init(i915));
return i915;
err_context:
- i915_gem_contexts_fini(i915);
+ i915_gem_driver_release__contexts(i915);
err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
drm_dev_fini(&i915->drm);
put_device:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index e62a67e0f79c..20ac3844edec 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- vma->flags |= I915_VMA_LOCAL_BIND;
+ set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
return 0;
}
@@ -63,6 +63,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
if (!ppgtt)
return NULL;
+ ppgtt->vm.gt = &i915->gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
@@ -86,7 +87,7 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@@ -117,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
- intel_gt_init_hw(i915);
+ i915->gt.ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
new file mode 100644
index 000000000000..b2ad41c27e67
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_region.h"
+#include "intel_memory_region.h"
+
+#include "mock_region.h"
+
+static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+static struct drm_i915_gem_object *
+mock_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
+
+static const struct intel_memory_region_ops mock_region_ops = {
+ .init = intel_memory_region_init_buddy,
+ .release = intel_memory_region_release_buddy,
+ .create_object = mock_object_create,
+};
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start)
+{
+ return intel_memory_region_create(i915, start, size, min_page_size,
+ io_start, &mock_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
new file mode 100644
index 000000000000..24608089d833
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __MOCK_REGION_H
+#define __MOCK_REGION_H
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start);
+
+#endif /* !__MOCK_REGION_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 49585f16d4a2..ca57e4008701 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -39,8 +39,11 @@ __nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct intel_uncore *uncore)
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
{
+ intel_uncore_init_early(uncore, i915);
+
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index dacb36b5ffcd..8a2cc553f466 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,7 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct intel_uncore *uncore);
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 695f307f36b2..208069faf183 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index e7ce17503ae1..35518e5de356 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -13,6 +13,7 @@
#include <video/of_display_timing.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 2e2ed653e9c6..ec32e1c67335 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -676,8 +677,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
}
if (panel)
- bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
&priv->dma_hwdesc_phys,
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index bb4ddc6bb0a6..571dc369a7e9 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -9,5 +9,6 @@ config DRM_LIMA
depends on COMMON_CLK
depends on OF
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index 38cc70281ba5..a85444b0a1d4 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -13,9 +13,7 @@ lima-y := \
lima_vm.o \
lima_sched.o \
lima_ctx.o \
- lima_gem_prime.o \
lima_dlbu.o \
- lima_bcast.o \
- lima_object.o
+ lima_bcast.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index d86b8d81a483..19829b543024 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -105,7 +105,8 @@ static int lima_clk_init(struct lima_device *dev)
if (err)
goto error_out0;
- dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+ dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+
if (IS_ERR(dev->reset)) {
err = PTR_ERR(dev->reset);
if (err != -EPROBE_DEFER)
@@ -313,7 +314,7 @@ int lima_device_init(struct lima_device *ldev)
ldev->va_end = LIMA_VA_RESERVE_START;
ldev->dlbu_cpu = dma_alloc_wc(
ldev->dev, LIMA_PAGE_SIZE,
- &ldev->dlbu_dma, GFP_KERNEL);
+ &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN);
if (!ldev->dlbu_cpu) {
err = -ENOMEM;
goto err_out2;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 75ec703d22e0..124efe4fa97b 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -12,7 +12,6 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
@@ -240,16 +239,7 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW),
};
-static const struct file_operations lima_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .mmap = lima_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -258,10 +248,6 @@ static struct drm_driver lima_drm_driver = {
.ioctls = lima_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
.fops = &lima_drm_driver_fops,
- .gem_free_object_unlocked = lima_gem_free_object,
- .gem_open_object = lima_gem_object_open,
- .gem_close_object = lima_gem_object_close,
- .gem_vm_ops = &lima_gem_vm_ops,
.name = "lima",
.desc = "lima DRM",
.date = "20190217",
@@ -269,11 +255,11 @@ static struct drm_driver lima_drm_driver = {
.minor = 0,
.patchlevel = 0,
+ .gem_create_object = lima_gem_create_object,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
- .gem_prime_mmap = lima_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
};
static int lima_pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 4da21353c3a2..d0059d8c97d8 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
-#include <linux/pfn_t.h>
+#include <linux/pagemap.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -13,40 +13,55 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
-#include "lima_object.h"
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
int err;
- struct lima_bo *bo;
- struct lima_device *ldev = to_lima_dev(dev);
+ gfp_t mask;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ struct sg_table *sgt;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ obj = &shmem->base;
- bo = lima_bo_create(ldev, size, flags, NULL);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
+ /* Mali Utgard GPU can only support 32bit address space */
+ mask = mapping_gfp_mask(obj->filp->f_mapping);
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- err = drm_gem_handle_create(file, &bo->gem, handle);
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
+
+ err = drm_gem_handle_create(file, obj, handle);
+out:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(obj);
return err;
}
-void lima_gem_free_object(struct drm_gem_object *obj)
+static void lima_gem_free_object(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
- lima_bo_destroy(bo);
+ drm_gem_shmem_free_object(obj);
}
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -55,7 +70,7 @@ int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
return lima_vm_bo_add(vm, bo, true);
}
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -64,13 +79,41 @@ void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
lima_vm_bo_del(vm, bo);
}
+static const struct drm_gem_object_funcs lima_gem_funcs = {
+ .free = lima_gem_free_object,
+ .open = lima_gem_object_open,
+ .close = lima_gem_object_close,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct lima_bo *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+
+ bo->base.base.funcs = &lima_gem_funcs;
+
+ return &bo->base.base;
+}
+
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
{
struct drm_gem_object *obj;
struct lima_bo *bo;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
- int err;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
@@ -80,53 +123,9 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
*va = lima_vm_get_va(vm, bo);
- err = drm_gem_create_mmap_offset(obj);
- if (!err)
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put_unlocked(obj);
- return err;
-}
-
-static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct lima_bo *bo = to_lima_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
-
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
-
- return vmf_insert_mixed(vma, vmf->address, pfn);
-}
-
-const struct vm_operations_struct lima_gem_vm_ops = {
- .fault = lima_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-void lima_set_vma_flags(struct vm_area_struct *vma)
-{
- pgprot_t prot = vm_get_page_prot(vma->vm_flags);
-
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_page_prot = pgprot_writecombine(prot);
-}
-
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
return 0;
}
@@ -136,7 +135,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
int err = 0;
if (!write) {
- err = dma_resv_reserve_shared(bo->gem.resv, 1);
+ err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
if (err)
return err;
}
@@ -145,62 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
-}
-
-static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i, ret = 0, contended, slow_locked = -1;
-
- ww_acquire_init(ctx, &reservation_ww_class);
-
-retry:
- for (i = 0; i < nr_bos; i++) {
- if (i == slow_locked) {
- slow_locked = -1;
- continue;
- }
-
- ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
- if (ret < 0) {
- contended = i;
- goto err;
- }
- }
-
- ww_acquire_done(ctx);
- return 0;
-
-err:
- for (i--; i >= 0; i--)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
-
- if (slow_locked >= 0)
- ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
-
- if (ret == -EDEADLK) {
- /* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(
- &bos[contended]->gem.resv->lock, ctx);
- if (!ret) {
- slow_locked = contended;
- goto retry;
- }
- }
- ww_acquire_fini(ctx);
-
- return ret;
-}
-
-static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < nr_bos; i++)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
- ww_acquire_fini(ctx);
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -268,7 +212,8 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
bos[i] = bo;
}
- err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
if (err)
goto err_out0;
@@ -296,15 +241,16 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
else
- dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
}
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
for (i = 0; i < submit->nr_bos; i++)
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
if (out_sync) {
drm_syncobj_replace_fence(out_sync, fence);
@@ -318,13 +264,14 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err_out2:
lima_sched_task_fini(submit->task);
err_out1:
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
err_out0:
for (i = 0; i < submit->nr_bos; i++) {
if (!bos[i])
break;
lima_vm_bo_del(vm, bos[i]);
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
}
if (out_sync)
drm_syncobj_put(out_sync);
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 556111a01135..1800feb3e47f 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -4,19 +4,37 @@
#ifndef __LIMA_GEM_H__
#define __LIMA_GEM_H__
-struct lima_bo;
+#include <drm/drm_gem_shmem_helper.h>
+
struct lima_submit;
-extern const struct vm_operations_struct lima_gem_vm_ops;
+struct lima_bo {
+ struct drm_gem_shmem_object base;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct lima_bo, base);
+}
+
+static inline size_t lima_bo_size(struct lima_bo *bo)
+{
+ return bo->base.base.size;
+}
+
+static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
+{
+ return bo->base.base.resv;
+}
-struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
-void lima_gem_free_object(struct drm_gem_object *obj);
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
deleted file mode 100644
index e3eb251e0a12..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <linux/dma-buf.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-
-#include "lima_device.h"
-#include "lima_object.h"
-#include "lima_gem.h"
-#include "lima_gem_prime.h"
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct lima_device *ldev = to_lima_dev(dev);
- struct lima_bo *bo;
-
- bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
-
- return &bo->gem;
-}
-
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct lima_bo *bo = to_lima_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
-
- return drm_prime_pages_to_sg(bo->pages, npages);
-}
-
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
- return 0;
-}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
deleted file mode 100644
index 34b4d35c21e3..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_GEM_PRIME_H__
-#define __LIMA_GEM_PRIME_H__
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 8e1651d6a61f..97ec09dee572 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -8,7 +8,6 @@
#include "lima_device.h"
#include "lima_mmu.h"
#include "lima_vm.h"
-#include "lima_object.h"
#include "lima_regs.h"
#define mmu_write(reg, data) writel(data, ip->iomem + reg)
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
deleted file mode 100644
index 87123b1d083c..000000000000
--- a/drivers/gpu/drm/lima/lima_object.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <drm/drm_prime.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-
-#include "lima_object.h"
-
-void lima_bo_destroy(struct lima_bo *bo)
-{
- if (bo->sgt) {
- kfree(bo->pages);
- drm_prime_gem_destroy(&bo->gem, bo->sgt);
- } else {
- if (bo->pages_dma_addr) {
- int i, npages = bo->gem.size >> PAGE_SHIFT;
-
- for (i = 0; i < npages; i++) {
- if (bo->pages_dma_addr[i])
- dma_unmap_page(bo->gem.dev->dev,
- bo->pages_dma_addr[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
- }
-
- if (bo->pages)
- drm_gem_put_pages(&bo->gem, bo->pages, true, true);
- }
-
- kfree(bo->pages_dma_addr);
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags)
-{
- struct lima_bo *bo;
- int err;
-
- size = PAGE_ALIGN(size);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&bo->lock);
- INIT_LIST_HEAD(&bo->va);
-
- err = drm_gem_object_init(dev->ddev, &bo->gem, size);
- if (err) {
- kfree(bo);
- return ERR_PTR(err);
- }
-
- return bo;
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt)
-{
- int i, err;
- size_t npages;
- struct lima_bo *bo, *ret;
-
- bo = lima_bo_create_struct(dev, size, flags);
- if (IS_ERR(bo))
- return bo;
-
- npages = bo->gem.size >> PAGE_SHIFT;
-
- bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bo->pages_dma_addr) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- if (sgt) {
- bo->sgt = sgt;
-
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- err = drm_prime_sg_to_page_addr_arrays(
- sgt, bo->pages, bo->pages_dma_addr, npages);
- if (err) {
- ret = ERR_PTR(err);
- goto err_out;
- }
- } else {
- mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
- bo->pages = drm_gem_get_pages(&bo->gem);
- if (IS_ERR(bo->pages)) {
- ret = ERR_CAST(bo->pages);
- bo->pages = NULL;
- goto err_out;
- }
-
- for (i = 0; i < npages; i++) {
- dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = ERR_PTR(-EFAULT);
- goto err_out;
- }
- bo->pages_dma_addr[i] = addr;
- }
-
- }
-
- return bo;
-
-err_out:
- lima_bo_destroy(bo);
- return ret;
-}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
deleted file mode 100644
index 31ca2d8dc0a1..000000000000
--- a/drivers/gpu/drm/lima/lima_object.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_OBJECT_H__
-#define __LIMA_OBJECT_H__
-
-#include <drm/drm_gem.h>
-
-#include "lima_device.h"
-
-struct lima_bo {
- struct drm_gem_object gem;
-
- struct page **pages;
- dma_addr_t *pages_dma_addr;
- struct sg_table *sgt;
- void *vaddr;
-
- struct mutex lock;
- struct list_head va;
-};
-
-static inline struct lima_bo *
-to_lima_bo(struct drm_gem_object *obj)
-{
- return container_of(obj, struct lima_bo, gem);
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt);
-void lima_bo_destroy(struct lima_bo *bo);
-void *lima_bo_vmap(struct lima_bo *bo);
-void lima_bo_vunmap(struct lima_bo *bo);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 4127cacac454..f522c5f99729 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -10,7 +10,7 @@
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
-#include "lima_object.h"
+#include "lima_gem.h"
struct lima_fence {
struct dma_fence base;
@@ -117,7 +117,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
return -ENOMEM;
for (i = 0; i < num_bos; i++)
- drm_gem_object_get(&bos[i]->gem);
+ drm_gem_object_get(&bos[i]->base.base);
err = drm_sched_job_init(&task->base, &context->base, vm);
if (err) {
@@ -148,7 +148,7 @@ void lima_sched_task_fini(struct lima_sched_task *task)
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
- drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ drm_gem_object_put_unlocked(&task->bos[i]->base.base);
kfree(task->bos);
}
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 19e88ca16527..840e2350d872 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -6,7 +6,7 @@
#include "lima_device.h"
#include "lima_vm.h"
-#include "lima_object.h"
+#include "lima_gem.h"
#include "lima_regs.h"
struct lima_bo_va {
@@ -32,7 +32,7 @@ struct lima_bo_va {
#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
-static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
{
u32 addr;
@@ -44,41 +44,32 @@ static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
}
}
-static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
- u32 start, u32 end)
+static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
{
- u64 addr;
- int i = 0;
-
- for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
- u32 pbe = LIMA_PBE(addr);
- u32 bte = LIMA_BTE(addr);
-
- if (!vm->bts[pbe].cpu) {
- dma_addr_t pts;
- u32 *pd;
- int j;
-
- vm->bts[pbe].cpu = dma_alloc_wc(
- vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
- &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
- if (!vm->bts[pbe].cpu) {
- if (addr != start)
- lima_vm_unmap_page_table(vm, start, addr - 1);
- return -ENOMEM;
- }
-
- pts = vm->bts[pbe].dma;
- pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
- for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
- pd[j] = pts | LIMA_VM_FLAG_PRESENT;
- pts += LIMA_PAGE_SIZE;
- }
+ u32 pbe = LIMA_PBE(va);
+ u32 bte = LIMA_BTE(va);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu)
+ return -ENOMEM;
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
}
-
- vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
}
+ vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
+
return 0;
}
@@ -100,7 +91,8 @@ lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
{
struct lima_bo_va *bo_va;
- int err;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
mutex_lock(&bo->lock);
@@ -128,14 +120,18 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
mutex_lock(&vm->lock);
- err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
if (err)
goto err_out1;
- err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
- if (err)
- goto err_out2;
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ bo_va->node.start + offset);
+ if (err)
+ goto err_out2;
+
+ offset += PAGE_SIZE;
+ }
mutex_unlock(&vm->lock);
@@ -145,6 +141,8 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
return 0;
err_out2:
+ if (offset)
+ lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
drm_mm_remove_node(&bo_va->node);
err_out1:
mutex_unlock(&vm->lock);
@@ -168,8 +166,8 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
- lima_vm_unmap_page_table(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ lima_vm_unmap_range(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -210,14 +208,13 @@ struct lima_vm *lima_vm_create(struct lima_device *dev)
kref_init(&vm->refcount);
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!vm->pd.cpu)
goto err_out0;
if (dev->dlbu_cpu) {
- int err = lima_vm_map_page_table(
- vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
- LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ int err = lima_vm_map_page(
+ vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
if (err)
goto err_out1;
}
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9a09eba53182..5649887d2b90 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev)
}
if (!match) {
dev_err(dev, "no matching components\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto clk_disable;
}
if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n");
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index f9c9e32b299c..d6214d3c8b33 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -946,8 +946,8 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
}
}
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge)) {
dev_err(dev, "error adding panel bridge\n");
return PTR_ERR(bridge);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 82ae49c64221..8067a4be8311 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,6 +12,8 @@ mediatek-drm-y := mtk_disp_color.o \
mtk_drm_plane.o \
mtk_dsi.o \
mtk_mipi_tx.o \
+ mtk_mt8173_mipi_tx.o \
+ mtk_mt8183_mipi_tx.o \
mtk_dpi.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 21851756c579..4a55bb6e2213 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -3,6 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <drm/drm_fourcc.h>
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
@@ -19,6 +21,8 @@
#define DISP_REG_OVL_EN 0x000c
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
+#define DISP_REG_OVL_DATAPATH_CON 0x0024
+#define OVL_BGCLR_SEL_IN BIT(2)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
@@ -31,7 +35,9 @@
#define DISP_REG_OVL_ADDR_MT8173 0x0f40
#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
-#define OVL_RDMA_MEM_GMC 0x40402020
+#define GMC_THRESHOLD_BITS 16
+#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
+#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
#define OVL_CON_BYTE_SWAP BIT(24)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -46,9 +52,13 @@
OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
+#define OVL_CON_VIRT_FLIP BIT(9)
+#define OVL_CON_HORZ_FLIP BIT(10)
struct mtk_disp_ovl_data {
unsigned int addr;
+ unsigned int gmc_bits;
+ unsigned int layer_nr;
bool fmt_rgb565_is_0;
};
@@ -126,15 +136,65 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
{
- return 4;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+
+ return ovl->data->layer_nr;
+}
+
+static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp)
+{
+ return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+}
+
+static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *mtk_state)
+{
+ struct drm_plane_state *state = &mtk_state->base;
+ unsigned int rotation = 0;
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ rotation &= ~DRM_MODE_ROTATE_0;
+
+ /* We can only do reflection, not rotation */
+ if ((rotation & DRM_MODE_ROTATE_MASK) != 0)
+ return -EINVAL;
+
+ /*
+ * TODO: Rotating/reflecting YUV buffers is not supported at this time.
+ * Only RGB[AX] variants are supported.
+ */
+ if (state->fb->format->is_yuv && rotation != 0)
+ return -EINVAL;
+
+ state->rotation = rotation;
+
+ return 0;
}
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
+ unsigned int gmc_thrshd_l;
+ unsigned int gmc_thrshd_h;
+ unsigned int gmc_value;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
- writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+
+ gmc_thrshd_l = GMC_THRESHOLD_LOW >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ if (ovl->data->gmc_bits == 10)
+ gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
+ else
+ gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
+ gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
+ writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
reg = reg | BIT(idx);
@@ -207,6 +267,16 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
if (idx != 0)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
+ if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ con |= OVL_CON_VIRT_FLIP;
+ addr += (pending->height - 1) * pending->pitch;
+ }
+
+ if (pending->rotation & DRM_MODE_REFLECT_X) {
+ con |= OVL_CON_HORZ_FLIP;
+ addr += pending->pitch - 1;
+ }
+
writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
@@ -217,16 +287,38 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
mtk_ovl_layer_on(comp, idx);
}
+static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg | OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
+static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg & ~OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.config = mtk_ovl_config,
.start = mtk_ovl_start,
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .supported_rotations = mtk_ovl_supported_rotations,
.layer_nr = mtk_ovl_layer_nr,
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
+ .layer_check = mtk_ovl_layer_check,
.layer_config = mtk_ovl_layer_config,
+ .bgclr_in_on = mtk_ovl_bgclr_in_on,
+ .bgclr_in_off = mtk_ovl_bgclr_in_off,
};
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
@@ -276,7 +368,12 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ priv->data = of_device_get_match_data(dev);
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node,
+ priv->data->layer_nr == 4 ?
+ MTK_DISP_OVL :
+ MTK_DISP_OVL_2L);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
return comp_id;
@@ -289,8 +386,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
- priv->data = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -316,11 +411,15 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT2701,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = false,
};
static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = true,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index be6d95c5ff25..01fa8b8d763d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -17,6 +17,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 34a731755791..f80a8ba75977 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -207,6 +207,28 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
+static
+struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ unsigned int *local_layer)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp;
+ int i, count = 0;
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ comp = mtk_crtc->ddp_comp[i];
+ if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
+ *local_layer = plane->index - count;
+ return comp;
+ }
+ count += mtk_ddp_comp_layer_nr(comp);
+ }
+
+ WARN(1, "Failed to find component for plane %d\n", plane->index);
+ return NULL;
+}
+
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -272,6 +294,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_on(comp);
+
mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
mtk_ddp_comp_start(comp);
}
@@ -280,10 +305,12 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
+ struct mtk_ddp_comp *comp;
+ unsigned int local_layer;
plane_state = to_mtk_plane_state(plane->state);
- mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
- plane_state);
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
}
return 0;
@@ -301,8 +328,12 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
+ }
+
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
@@ -327,6 +358,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
+ unsigned int local_layer;
/*
* TODO: instead of updating the registers here, we should prepare
@@ -348,15 +380,30 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(comp, i, plane_state);
- plane_state->pending.config = false;
- }
+ if (!plane_state->pending.config)
+ continue;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+ &local_layer);
+
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state);
+ plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
}
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state)
+{
+ unsigned int local_layer;
+ struct mtk_ddp_comp *comp;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ return mtk_ddp_comp_layer_check(comp, local_layer, state);
+}
+
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -518,14 +565,65 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
mtk_drm_finish_page_flip(mtk_crtc);
}
+static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx)
+{
+ struct mtk_ddp_comp *comp;
+
+ if (comp_idx > 1)
+ return 0;
+
+ comp = mtk_crtc->ddp_comp[comp_idx];
+ if (!comp->funcs)
+ return 0;
+
+ if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
+ return 0;
+
+ return mtk_ddp_comp_layer_nr(comp);
+}
+
+static inline
+enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
+{
+ if (plane_idx == 0)
+ return DRM_PLANE_TYPE_PRIMARY;
+ else if (plane_idx == 1)
+ return DRM_PLANE_TYPE_CURSOR;
+ else
+ return DRM_PLANE_TYPE_OVERLAY;
+
+}
+
+static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
+ struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx, int pipe)
+{
+ int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
+ int i, ret;
+
+ for (i = 0; i < num_planes; i++) {
+ ret = mtk_plane_init(drm_dev,
+ &mtk_crtc->planes[mtk_crtc->layer_nr],
+ BIT(pipe),
+ mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
+ mtk_ddp_comp_supported_rotations(comp));
+ if (ret)
+ return ret;
+
+ mtk_crtc->layer_nr++;
+ }
+ return 0;
+}
+
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path, unsigned int path_len)
{
struct mtk_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
struct mtk_drm_crtc *mtk_crtc;
- enum drm_plane_type type;
- unsigned int zpos;
+ unsigned int num_comp_planes = 0;
int pipe = priv->num_pipes;
int ret;
int i;
@@ -581,17 +679,15 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp[i] = comp;
}
- mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
- mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
- sizeof(struct drm_plane),
- GFP_KERNEL);
-
- for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
- type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
- (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
- DRM_PLANE_TYPE_OVERLAY;
- ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
- BIT(pipe), type);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
+
+ mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
+ sizeof(struct drm_plane), GFP_KERNEL);
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
+ pipe);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index fcc134eb00c9..6afe1c19557a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -19,5 +19,7 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state);
#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8106a71a7404..13035c906035 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -33,12 +33,15 @@
#define DISP_REG_CONFIG_DSI_SEL 0x050
#define DISP_REG_CONFIG_DPI_SEL 0x064
-#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
-#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
-#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
-#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
+#define MT2701_DISP_MUTEX0_MOD0 0x2c
+#define MT2701_DISP_MUTEX0_SOF0 0x30
+
+#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
@@ -139,12 +142,30 @@ struct mtk_disp_mutex {
bool claimed;
};
+enum mtk_ddp_mutex_sof_id {
+ DDP_MUTEX_SOF_SINGLE_MODE,
+ DDP_MUTEX_SOF_DSI0,
+ DDP_MUTEX_SOF_DSI1,
+ DDP_MUTEX_SOF_DPI0,
+ DDP_MUTEX_SOF_DPI1,
+ DDP_MUTEX_SOF_DSI2,
+ DDP_MUTEX_SOF_DSI3,
+};
+
+struct mtk_ddp_data {
+ const unsigned int *mutex_mod;
+ const unsigned int *mutex_sof;
+ const unsigned int mutex_mod_reg;
+ const unsigned int mutex_sof_reg;
+ const bool no_clk;
+};
+
struct mtk_ddp {
struct device *dev;
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
- const unsigned int *mutex_mod;
+ const struct mtk_ddp_data *data;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -194,6 +215,37 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+ [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+ [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+ [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+ [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+ .mutex_mod = mt2701_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+ .mutex_mod = mt2712_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next,
unsigned int *addr)
@@ -432,45 +484,49 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int sof_id;
unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI1:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI2:
- reg = MUTEX_SOF_DSI2;
+ sof_id = DDP_MUTEX_SOF_DSI2;
break;
case DDP_COMPONENT_DSI3:
- reg = MUTEX_SOF_DSI3;
+ sof_id = DDP_MUTEX_SOF_DSI3;
break;
case DDP_COMPONENT_DPI0:
- reg = MUTEX_SOF_DPI0;
+ sof_id = DDP_MUTEX_SOF_DPI0;
break;
case DDP_COMPONENT_DPI1:
- reg = MUTEX_SOF_DPI1;
+ sof_id = DDP_MUTEX_SOF_DPI1;
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << ddp->mutex_mod[id];
+ reg |= 1 << ddp->data->mutex_mod[id];
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << (ddp->mutex_mod[id] - 32);
+ reg |= 1 << (ddp->data->mutex_mod[id] - 32);
writel_relaxed(reg, ddp->regs + offset);
}
return;
}
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ writel_relaxed(ddp->data->mutex_sof[sof_id],
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
}
void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
@@ -491,18 +547,21 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
- ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+ mutex->id));
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << ddp->mutex_mod[id]);
+ reg &= ~(1 << ddp->data->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
writel_relaxed(reg, ddp->regs + offset);
}
break;
@@ -564,10 +623,14 @@ static int mtk_ddp_probe(struct platform_device *pdev)
for (i = 0; i < 10; i++)
ddp->mutex[i].id = i;
- ddp->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddp->clk);
+ ddp->data = of_device_get_match_data(dev);
+
+ if (!ddp->data->no_clk) {
+ ddp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddp->clk);
+ }
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -577,8 +640,6 @@ static int mtk_ddp_probe(struct platform_device *pdev)
return PTR_ERR(ddp->regs);
}
- ddp->mutex_mod = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, ddp);
return 0;
@@ -590,9 +651,12 @@ static int mtk_ddp_remove(struct platform_device *pdev)
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
- { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
- { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = &mt2701_ddp_driver_data},
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = &mt2712_ddp_driver_data},
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = &mt8173_ddp_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index efa85973e46b..7f21307cda75 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -33,6 +33,18 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
+#define DISP_CCORR_EN 0x0000
+#define CCORR_EN BIT(0)
+#define DISP_CCORR_CFG 0x0020
+#define CCORR_RELAY_MODE BIT(0)
+#define DISP_CCORR_SIZE 0x0030
+
+#define DISP_DITHER_EN 0x0000
+#define DITHER_EN BIT(0)
+#define DISP_DITHER_CFG 0x0020
+#define DITHER_RELAY_MODE BIT(0)
+#define DISP_DITHER_SIZE 0x0030
+
#define DISP_GAMMA_EN 0x0000
#define DISP_GAMMA_CFG 0x0020
#define DISP_GAMMA_SIZE 0x0030
@@ -123,6 +135,42 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
}
+static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
+ writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+}
+
+static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
+{
+ writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
+ writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+}
+
+static void mtk_dither_start(struct mtk_ddp_comp *comp)
+{
+ writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+}
+
static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
@@ -171,6 +219,18 @@ static const struct mtk_ddp_comp_funcs ddp_aal = {
.stop = mtk_aal_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+ .config = mtk_ccorr_config,
+ .start = mtk_ccorr_start,
+ .stop = mtk_ccorr_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dither = {
+ .config = mtk_dither_config,
+ .start = mtk_dither_start,
+ .stop = mtk_dither_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_gamma = {
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
@@ -189,11 +249,14 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_OVL] = "ovl",
+ [MTK_DISP_OVL_2L] = "ovl_2l",
[MTK_DISP_RDMA] = "rdma",
[MTK_DISP_WDMA] = "wdma",
[MTK_DISP_COLOR] = "color",
+ [MTK_DISP_CCORR] = "ccorr",
[MTK_DISP_AAL] = "aal",
[MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_DITHER] = "dither",
[MTK_DISP_UFOE] = "ufoe",
[MTK_DSI] = "dsi",
[MTK_DPI] = "dpi",
@@ -213,8 +276,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
+ [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
+ [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
@@ -226,6 +291,8 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0ad287f427cc..2f1e9e75b8da 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -17,9 +17,12 @@ struct drm_crtc_state;
enum mtk_ddp_comp_type {
MTK_DISP_OVL,
+ MTK_DISP_OVL_2L,
MTK_DISP_RDMA,
MTK_DISP_WDMA,
MTK_DISP_COLOR,
+ MTK_DISP_CCORR,
+ MTK_DISP_DITHER,
MTK_DISP_AAL,
MTK_DISP_GAMMA,
MTK_DISP_UFOE,
@@ -36,8 +39,10 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
+ DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DITHER,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
@@ -48,6 +53,8 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
@@ -70,13 +77,19 @@ struct mtk_ddp_comp_funcs {
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
+ int (*layer_check)(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
struct mtk_plane_state *state);
void (*gamma_set)(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state);
+ void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
+ void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
};
struct mtk_ddp_comp {
@@ -121,6 +134,15 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
+static inline
+unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->supported_rotations)
+ return comp->funcs->supported_rotations(comp);
+
+ return 0;
+}
+
static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->layer_nr)
@@ -143,6 +165,15 @@ static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
comp->funcs->layer_off(comp, idx);
}
+static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ if (comp->funcs && comp->funcs->layer_check)
+ return comp->funcs->layer_check(comp, idx, state);
+ return 0;
+}
+
static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
unsigned int idx,
struct mtk_plane_state *state)
@@ -158,6 +189,18 @@ static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
comp->funcs->gamma_set(comp, state);
}
+static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_on)
+ comp->funcs->bgclr_in_on(comp);
+}
+
+static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_off)
+ comp->funcs->bgclr_in_off(comp);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 352b81a7a670..84d14213d992 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -547,6 +547,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
*/
if (comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_OVL ||
+ comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
@@ -669,8 +670,8 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
&mtk_drm_platform_driver,
- &mtk_dsi_driver,
&mtk_mipi_tx_driver,
+ &mtk_dsi_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index ca672f1d140d..b04a3c2b111e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -271,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
pgprot_writecombine(PAGE_KERNEL));
out:
- kfree((void *)sgt);
+ kfree(sgt);
return mtk_gem->kvaddr;
}
@@ -285,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
vunmap(vaddr);
mtk_gem->kvaddr = 0;
- kfree((void *)mtk_gem->pages);
+ kfree(mtk_gem->pages);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 584a9ecadce6..3b0cc91c7023 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -20,6 +20,12 @@
static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
@@ -84,6 +90,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc_state *crtc_state;
+ int ret;
if (!fb)
return 0;
@@ -91,6 +98,11 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!state->crtc)
return 0;
+ ret = mtk_drm_crtc_plane_check(state->crtc, plane,
+ to_mtk_plane_state(state));
+ if (ret)
+ return ret;
+
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -132,6 +144,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
state->pending.y = plane->state->dst.y1;
state->pending.width = drm_rect_width(&plane->state->dst);
state->pending.height = drm_rect_height(&plane->state->dst);
+ state->pending.rotation = plane->state->rotation;
wmb(); /* Make sure the above parameters are set before update */
state->pending.dirty = true;
}
@@ -154,7 +167,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
};
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type)
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations)
{
int err;
@@ -166,6 +180,14 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
return err;
}
+ if (supported_rotations & ~DRM_MODE_ROTATE_0) {
+ err = drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+ if (err)
+ DRM_INFO("Create rotation property failed\n");
+ }
+
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 6f842df722c7..760885e35b27 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -20,6 +20,7 @@ struct mtk_plane_pending_state {
unsigned int y;
unsigned int width;
unsigned int height;
+ unsigned int rotation;
bool dirty;
};
@@ -35,6 +36,7 @@ to_mtk_plane_state(struct drm_plane_state *state)
}
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type);
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 224afb666881..e9931bbbe846 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -16,6 +16,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -39,6 +40,7 @@
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
+#define DPHY_RESET BIT(2)
#define DSI_MODE_CTRL 0x14
#define MODE (3)
@@ -72,6 +74,7 @@
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
+#define DSI_SIZE_CON 0x38
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
@@ -125,7 +128,10 @@
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_CMDQ0 0x180
+#define DSI_SHADOW_DEBUG 0x190U
+#define FORCE_COMMIT BIT(0)
+#define BYPASS_SHADOW BIT(1)
+
#define CONFIG (0xff << 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
@@ -134,12 +140,6 @@
#define DATA_0 (0xff << 16)
#define DATA_1 (0xff << 24)
-#define T_LPX 5
-#define T_HS_PREP 6
-#define T_HS_TRAIL 8
-#define T_HS_EXIT 7
-#define T_HS_ZERO 10
-
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
#define MTK_DSI_HOST_IS_READ(type) \
@@ -148,8 +148,33 @@
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
+struct mtk_phy_timing {
+ u32 lpx;
+ u32 da_hs_prepare;
+ u32 da_hs_zero;
+ u32 da_hs_trail;
+
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+ u32 da_hs_exit;
+
+ u32 clk_hs_zero;
+ u32 clk_hs_trail;
+
+ u32 clk_hs_prepare;
+ u32 clk_hs_post;
+ u32 clk_hs_exit;
+};
+
struct phy;
+struct mtk_dsi_driver_data {
+ const u32 reg_cmdq_off;
+ bool has_shadow_ctl;
+ bool has_size_ctl;
+};
+
struct mtk_dsi {
struct mtk_ddp_comp ddp_comp;
struct device *dev;
@@ -172,10 +197,12 @@ struct mtk_dsi {
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct videomode vm;
+ struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
+ const struct mtk_dsi_driver_data *driver_data;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -204,17 +231,36 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 ui, cycle_time;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
+
+ ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
+ cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+
+ timing->lpx = NS_TO_CYCLE(60, cycle_time);
+ timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
+ timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
+ timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+
+ timing->ta_go = 4 * timing->lpx;
+ timing->ta_sure = 3 * timing->lpx / 2;
+ timing->ta_get = 5 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx;
- ui = 1000 / dsi->data_rate + 0x01;
- cycle_time = 8000 / dsi->data_rate + 0x01;
+ timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
+ timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
- timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
- timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
- T_HS_EXIT << 24;
- timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
- (NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
- NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
+ timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
+ timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
+ timing->clk_hs_exit = 2 * timing->lpx;
+
+ timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
+ timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
+ timcon1 = timing->ta_go | timing->ta_sure << 8 |
+ timing->ta_get << 16 | timing->da_hs_exit << 24;
+ timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
+ timing->clk_hs_trail << 24;
+ timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
+ timing->clk_hs_exit << 16;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -238,6 +284,12 @@ static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
+static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
+}
+
static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
@@ -401,7 +453,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
- u32 dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp, data_phy_cycles;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
@@ -415,6 +468,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+ if (dsi->driver_data->has_size_ctl)
+ writel(vm->vactive << 16 | vm->hactive,
+ dsi->regs + DSI_SIZE_CON);
+
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -424,7 +481,34 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp - 10);
- horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+ data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+ timing->da_hs_zero + timing->da_hs_exit + 2;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 18) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 18;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ } else {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 12) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 12;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ }
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
@@ -522,10 +606,9 @@ static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
- struct device *dev = dsi->dev;
+ struct device *dev = dsi->host.dev;
int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+ u32 bit_per_pixel;
if (++dsi->refcount != 1)
return 0;
@@ -544,24 +627,8 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
break;
}
- /**
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
+ dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
+ dsi->lanes);
ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
@@ -584,10 +651,17 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
}
mtk_dsi_enable(dsi);
+
+ if (dsi->driver_data->has_shadow_ctl)
+ writel(FORCE_COMMIT | BYPASS_SHADOW,
+ dsi->regs + DSI_SHADOW_DEBUG);
+
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
mtk_dsi_rxtx_control(dsi);
+ usleep_range(30, 100);
+ mtk_dsi_reset_dphy(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
@@ -938,6 +1012,7 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
const char *tx_buf = msg->tx_buf;
u8 config, cmdq_size, cmdq_off, type = msg->type;
u32 reg_val, cmdq_mask, i;
+ u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
if (MTK_DSI_HOST_IS_READ(type))
config = BTA;
@@ -957,9 +1032,11 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
}
for (i = 0; i < msg->tx_len; i++)
- writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+ mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
+ (0xffUL << (((i + cmdq_off) & 3U) * 8U)),
+ tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
- mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
}
@@ -1049,12 +1126,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = mipi_dsi_host_register(&dsi->host);
- if (ret < 0) {
- dev_err(dev, "failed to register DSI host: %d\n", ret);
- goto err_ddp_comp_unregister;
- }
-
ret = mtk_dsi_create_conn_enc(drm, dsi);
if (ret) {
DRM_ERROR("Encoder create failed with %d\n", ret);
@@ -1064,8 +1135,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_unregister:
- mipi_dsi_host_unregister(&dsi->host);
-err_ddp_comp_unregister:
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
return ret;
}
@@ -1077,7 +1146,6 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master,
struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_destroy_conn_enc(dsi);
- mipi_dsi_host_unregister(&dsi->host);
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
@@ -1101,31 +1169,38 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0) {
+ dev_err(dev, "failed to register DSI host: %d\n", ret);
+ return ret;
+ }
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&dsi->panel, &dsi->bridge);
if (ret)
- return ret;
+ goto err_unregister_host;
+
+ dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
dev_err(dev, "Failed to get engine clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
dev_err(dev, "Failed to get digital clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(dsi->hs_clk)) {
ret = PTR_ERR(dsi->hs_clk);
dev_err(dev, "Failed to get hs clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1133,33 +1208,35 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->regs)) {
ret = PTR_ERR(dsi->regs);
dev_err(dev, "Failed to ioremap memory: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ ret = comp_id;
+ goto err_unregister_host;
}
ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
&mtk_dsi_funcs);
if (ret) {
dev_err(dev, "Failed to initialize component: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
- dev_err(&pdev->dev, "failed to request dsi irq resource\n");
- return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
+ ret = irq_num;
+ goto err_unregister_host;
}
irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -1167,14 +1244,24 @@ static int mtk_dsi_probe(struct platform_device *pdev)
IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
- return -EPROBE_DEFER;
+ goto err_unregister_host;
}
init_waitqueue_head(&dsi->irq_wait_queue);
platform_set_drvdata(pdev, dsi);
- return component_add(&pdev->dev, &mtk_dsi_component_ops);
+ ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add component: %d\n", ret);
+ goto err_unregister_host;
+ }
+
+ return 0;
+
+err_unregister_host:
+ mipi_dsi_host_unregister(&dsi->host);
+ return ret;
}
static int mtk_dsi_remove(struct platform_device *pdev)
@@ -1183,13 +1270,32 @@ static int mtk_dsi_remove(struct platform_device *pdev)
mtk_output_dsi_disable(dsi);
component_del(&pdev->dev, &mtk_dsi_component_ops);
+ mipi_dsi_host_unregister(&dsi->host);
return 0;
}
+static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+};
+
+static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
+ .reg_cmdq_off = 0x180,
+};
+
+static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+ .has_shadow_ctl = true,
+ .has_size_ctl = true,
+};
+
static const struct of_device_id mtk_dsi_of_match[] = {
- { .compatible = "mediatek,mt2701-dsi" },
- { .compatible = "mediatek,mt8173-dsi" },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = &mt2701_dsi_driver_data },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = &mt8173_dsi_driver_data },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = &mt8183_dsi_driver_data },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ce91b61364eb..c79b1f855d89 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -23,6 +23,7 @@
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1842dc2caae9..e4d34484ecc8 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -3,292 +3,39 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-
-#define MIPITX_DSI_CON 0x00
-#define RG_DSI_LDOCORE_EN BIT(0)
-#define RG_DSI_CKG_LDOOUT_EN BIT(1)
-#define RG_DSI_BCLK_SEL (3 << 2)
-#define RG_DSI_LD_IDX_SEL (7 << 4)
-#define RG_DSI_PHYCLK_SEL (2 << 8)
-#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
-#define RG_DSI_LPTX_CLMP_EN BIT(11)
-
-#define MIPITX_DSI_CLOCK_LANE 0x04
-#define MIPITX_DSI_DATA_LANE0 0x08
-#define MIPITX_DSI_DATA_LANE1 0x0c
-#define MIPITX_DSI_DATA_LANE2 0x10
-#define MIPITX_DSI_DATA_LANE3 0x14
-#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
-#define RG_DSI_LNTx_CKLANE_EN BIT(1)
-#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
-#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
-#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
-#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
-#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
-#define RG_DSI_LNTx_RT_CODE (0xf << 8)
-
-#define MIPITX_DSI_TOP_CON 0x40
-#define RG_DSI_LNT_INTR_EN BIT(0)
-#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
-#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
-#define RG_DSI_LNT_TESTMODE_EN BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
-#define RG_DSI_LNT_AIO_SEL (7 << 8)
-#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
-#define RG_DSI_DEBUG_INPUT_EN BIT(12)
-#define RG_DSI_PRESERVE (7 << 13)
-
-#define MIPITX_DSI_BG_CON 0x44
-#define RG_DSI_BG_CORE_EN BIT(0)
-#define RG_DSI_BG_CKEN BIT(1)
-#define RG_DSI_BG_DIV (0x3 << 2)
-#define RG_DSI_BG_FAST_CHARGE BIT(4)
-#define RG_DSI_VOUT_MSK (0x3ffff << 5)
-#define RG_DSI_V12_SEL (7 << 5)
-#define RG_DSI_V10_SEL (7 << 8)
-#define RG_DSI_V072_SEL (7 << 11)
-#define RG_DSI_V04_SEL (7 << 14)
-#define RG_DSI_V032_SEL (7 << 17)
-#define RG_DSI_V02_SEL (7 << 20)
-#define RG_DSI_BG_R1_TRIM (0xf << 24)
-#define RG_DSI_BG_R2_TRIM (0xf << 28)
-
-#define MIPITX_DSI_PLL_CON0 0x50
-#define RG_DSI_MPPLL_PLL_EN BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV (3 << 1)
-#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
-#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
-#define RG_DSI_MPPLL_POSDIV (7 << 7)
-#define RG_DSI_MPPLL_MONVC_EN BIT(10)
-#define RG_DSI_MPPLL_MONREF_EN BIT(11)
-#define RG_DSI_MPPLL_VOD_EN BIT(12)
-
-#define MIPITX_DSI_PLL_CON1 0x54
-#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
-#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
-#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
-
-#define MIPITX_DSI_PLL_CON2 0x58
-
-#define MIPITX_DSI_PLL_TOP 0x64
-#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
-
-#define MIPITX_DSI_PLL_PWR 0x68
-#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
-#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
-#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
-
-#define MIPITX_DSI_SW_CTRL 0x80
-#define SW_CTRL_EN BIT(0)
-
-#define MIPITX_DSI_SW_CTRL_CON0 0x84
-#define SW_LNTC_LPTX_PRE_OE BIT(0)
-#define SW_LNTC_LPTX_OE BIT(1)
-#define SW_LNTC_LPTX_P BIT(2)
-#define SW_LNTC_LPTX_N BIT(3)
-#define SW_LNTC_HSTX_PRE_OE BIT(4)
-#define SW_LNTC_HSTX_OE BIT(5)
-#define SW_LNTC_HSTX_ZEROCLK BIT(6)
-#define SW_LNT0_LPTX_PRE_OE BIT(7)
-#define SW_LNT0_LPTX_OE BIT(8)
-#define SW_LNT0_LPTX_P BIT(9)
-#define SW_LNT0_LPTX_N BIT(10)
-#define SW_LNT0_HSTX_PRE_OE BIT(11)
-#define SW_LNT0_HSTX_OE BIT(12)
-#define SW_LNT0_LPRX_EN BIT(13)
-#define SW_LNT1_LPTX_PRE_OE BIT(14)
-#define SW_LNT1_LPTX_OE BIT(15)
-#define SW_LNT1_LPTX_P BIT(16)
-#define SW_LNT1_LPTX_N BIT(17)
-#define SW_LNT1_HSTX_PRE_OE BIT(18)
-#define SW_LNT1_HSTX_OE BIT(19)
-#define SW_LNT2_LPTX_PRE_OE BIT(20)
-#define SW_LNT2_LPTX_OE BIT(21)
-#define SW_LNT2_LPTX_P BIT(22)
-#define SW_LNT2_LPTX_N BIT(23)
-#define SW_LNT2_HSTX_PRE_OE BIT(24)
-#define SW_LNT2_HSTX_OE BIT(25)
-
-struct mtk_mipitx_data {
- const u32 mppll_preserve;
-};
-
-struct mtk_mipi_tx {
- struct device *dev;
- void __iomem *regs;
- u32 data_rate;
- const struct mtk_mipitx_data *driver_data;
- struct clk_hw pll_hw;
- struct clk *pll;
-};
+#include "mtk_mipi_tx.h"
-static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
{
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
-static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp & ~bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp | bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 mask, u32 data)
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 mask, u32 data)
{
u32 temp = readl(mipi_tx->regs + offset);
writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
}
-static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- u8 txdiv, txdiv0, txdiv1;
- u64 pcw;
-
- dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
-
- if (mipi_tx->data_rate >= 500000000) {
- txdiv = 1;
- txdiv0 = 0;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 250000000) {
- txdiv = 2;
- txdiv0 = 1;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 125000000) {
- txdiv = 4;
- txdiv0 = 2;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate > 62000000) {
- txdiv = 8;
- txdiv0 = 2;
- txdiv1 = 1;
- } else if (mipi_tx->data_rate >= 50000000) {
- txdiv = 16;
- txdiv0 = 2;
- txdiv1 = 2;
- } else {
- return -EINVAL;
- }
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_VOUT_MSK |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
- (4 << 20) | (4 << 17) | (4 << 14) |
- (4 << 11) | (4 << 8) | (4 << 5) |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- usleep_range(30, 100);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
- (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_PWR_ON |
- RG_DSI_MPPLL_SDM_ISO_EN,
- RG_DSI_MPPLL_SDM_PWR_ON);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
- RG_DSI_MPPLL_PREDIV,
- (txdiv0 << 3) | (txdiv1 << 5));
-
- /*
- * PLL PCW config
- * PCW bit 24~30 = integer part of pcw
- * PCW bit 0~23 = fractional part of pcw
- * pcw = data_Rate*4*txdiv/(Ref_clk*2);
- * Post DIV =4, so need data_Rate*4
- * Ref_clk is 26MHz
- */
- pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
- 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_FRA_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
-
- usleep_range(20, 100);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_SSC_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE,
- mipi_tx->driver_data->mppll_preserve);
-
- return 0;
-}
-
-static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
-
- dev_dbg(mipi_tx->dev, "unprepare\n");
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE, 0);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_ISO_EN |
- RG_DSI_MPPLL_SDM_PWR_ON,
- RG_DSI_MPPLL_SDM_ISO_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_DIV_MSK);
-}
-
-static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- return clamp_val(rate, 50000000, 1250000000);
-}
-
-static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
@@ -299,37 +46,14 @@ static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
return mipi_tx->data_rate;
}
-static const struct clk_ops mtk_mipi_tx_pll_ops = {
- .prepare = mtk_mipi_tx_pll_prepare,
- .unprepare = mtk_mipi_tx_pll_unprepare,
- .round_rate = mtk_mipi_tx_pll_round_rate,
- .set_rate = mtk_mipi_tx_pll_set_rate,
- .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
-};
-
-static int mtk_mipi_tx_power_on_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- return 0;
-}
-
static int mtk_mipi_tx_power_on(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -341,30 +65,16 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
return ret;
/* Enable DSI Lane LDO outputs, disable pad tie low */
- mtk_mipi_tx_power_on_signal(phy);
-
+ mipi_tx->driver_data->mipi_tx_enable_signal(phy);
return 0;
}
-static void mtk_mipi_tx_power_off_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-}
-
static int mtk_mipi_tx_power_off(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
/* Enable pad tie low, disable DSI Lane LDO outputs */
- mtk_mipi_tx_power_off_signal(phy);
+ mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
clk_disable_unprepare(mipi_tx->pll);
@@ -383,10 +93,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_mipi_tx *mipi_tx;
struct resource *mem;
- struct clk *ref_clk;
const char *ref_clk_name;
+ struct clk *ref_clk;
struct clk_init_data clk_init = {
- .ops = &mtk_mipi_tx_pll_ops,
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
.flags = CLK_SET_RATE_GATE,
@@ -400,6 +109,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
@@ -414,6 +124,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get reference clock: %d\n", ret);
return ret;
}
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -423,6 +134,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
+
mipi_tx->pll_hw.init = &clk_init;
mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
if (IS_ERR(mipi_tx->pll)) {
@@ -457,20 +170,14 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
return 0;
}
-static const struct mtk_mipitx_data mt2701_mipitx_data = {
- .mppll_preserve = (3 << 8)
-};
-
-static const struct mtk_mipitx_data mt8173_mipitx_data = {
- .mppll_preserve = (0 << 8)
-};
-
static const struct of_device_id mtk_mipi_tx_match[] = {
{ .compatible = "mediatek,mt2701-mipi-tx",
.data = &mt2701_mipitx_data },
{ .compatible = "mediatek,mt8173-mipi-tx",
.data = &mt8173_mipitx_data },
- {},
+ { .compatible = "mediatek,mt8183-mipi-tx",
+ .data = &mt8183_mipitx_data },
+ { },
};
struct platform_driver mtk_mipi_tx_driver = {
@@ -481,3 +188,4 @@ struct platform_driver mtk_mipi_tx_driver = {
.of_match_table = mtk_mipi_tx_match,
},
};
+
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
new file mode 100644
index 000000000000..413f35d86219
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#ifndef _MTK_MIPI_TX_H
+#define _MTK_MIPI_TX_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+ const struct clk_ops *mipi_tx_clk_ops;
+ void (*mipi_tx_enable_signal)(struct phy *phy);
+ void (*mipi_tx_disable_signal)(struct phy *phy);
+};
+
+struct mtk_mipi_tx {
+ struct device *dev;
+ void __iomem *regs;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+};
+
+struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
+ u32 data);
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+extern const struct mtk_mipitx_data mt2701_mipitx_data;
+extern const struct mtk_mipitx_data mt8173_mipitx_data;
+extern const struct mtk_mipitx_data mt8183_mipitx_data;
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
new file mode 100644
index 000000000000..f18db14d8b63
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_DSI_CON 0x00
+#define RG_DSI_LDOCORE_EN BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN BIT(1)
+#define RG_DSI_BCLK_SEL (3 << 2)
+#define RG_DSI_LD_IDX_SEL (7 << 4)
+#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
+#define RG_DSI_LPTX_CLMP_EN BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE 0x04
+#define MIPITX_DSI_DATA_LANE0 0x08
+#define MIPITX_DSI_DATA_LANE1 0x0c
+#define MIPITX_DSI_DATA_LANE2 0x10
+#define MIPITX_DSI_DATA_LANE3 0x14
+#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
+#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+
+#define MIPITX_DSI_TOP_CON 0x40
+#define RG_DSI_LNT_INTR_EN BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
+#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN BIT(12)
+#define RG_DSI_PRESERVE (7 << 13)
+
+#define MIPITX_DSI_BG_CON 0x44
+#define RG_DSI_BG_CORE_EN BIT(0)
+#define RG_DSI_BG_CKEN BIT(1)
+#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE BIT(4)
+#define RG_DSI_VOUT_MSK (0x3ffff << 5)
+#define RG_DSI_V12_SEL (7 << 5)
+#define RG_DSI_V10_SEL (7 << 8)
+#define RG_DSI_V072_SEL (7 << 11)
+#define RG_DSI_V04_SEL (7 << 14)
+#define RG_DSI_V032_SEL (7 << 17)
+#define RG_DSI_V02_SEL (7 << 20)
+#define RG_DSI_BG_R1_TRIM (0xf << 24)
+#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0 0x50
+#define RG_DSI_MPPLL_PLL_EN BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV (3 << 1)
+#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
+#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
+#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN BIT(11)
+#define RG_DSI_MPPLL_VOD_EN BIT(12)
+
+#define MIPITX_DSI_PLL_CON1 0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2 0x58
+
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
+#define MIPITX_DSI_PLL_PWR 0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
+
+#define MIPITX_DSI_SW_CTRL 0x80
+#define SW_CTRL_EN BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0 0x84
+#define SW_LNTC_LPTX_PRE_OE BIT(0)
+#define SW_LNTC_LPTX_OE BIT(1)
+#define SW_LNTC_LPTX_P BIT(2)
+#define SW_LNTC_LPTX_N BIT(3)
+#define SW_LNTC_HSTX_PRE_OE BIT(4)
+#define SW_LNTC_HSTX_OE BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK BIT(6)
+#define SW_LNT0_LPTX_PRE_OE BIT(7)
+#define SW_LNT0_LPTX_OE BIT(8)
+#define SW_LNT0_LPTX_P BIT(9)
+#define SW_LNT0_LPTX_N BIT(10)
+#define SW_LNT0_HSTX_PRE_OE BIT(11)
+#define SW_LNT0_HSTX_OE BIT(12)
+#define SW_LNT0_LPRX_EN BIT(13)
+#define SW_LNT1_LPTX_PRE_OE BIT(14)
+#define SW_LNT1_LPTX_OE BIT(15)
+#define SW_LNT1_LPTX_P BIT(16)
+#define SW_LNT1_LPTX_N BIT(17)
+#define SW_LNT1_HSTX_PRE_OE BIT(18)
+#define SW_LNT1_HSTX_OE BIT(19)
+#define SW_LNT2_LPTX_PRE_OE BIT(20)
+#define SW_LNT2_LPTX_OE BIT(21)
+#define SW_LNT2_LPTX_P BIT(22)
+#define SW_LNT2_LPTX_N BIT(23)
+#define SW_LNT2_HSTX_PRE_OE BIT(24)
+#define SW_LNT2_HSTX_OE BIT(25)
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ u8 txdiv, txdiv0, txdiv1;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 250000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate > 62000000) {
+ txdiv = 8;
+ txdiv0 = 2;
+ txdiv1 = 1;
+ } else if (mipi_tx->data_rate >= 50000000) {
+ txdiv = 16;
+ txdiv0 = 2;
+ txdiv1 = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+ (4 << 20) | (4 << 17) | (4 << 14) |
+ (4 << 11) | (4 << 8) | (4 << 5) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ usleep_range(30, 100);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON |
+ RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ (txdiv0 << 3) | (txdiv1 << 5));
+
+ /*
+ * PLL PCW config
+ * PCW bit 24~30 = integer part of pcw
+ * PCW bit 0~23 = fractional part of pcw
+ * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+ * Post DIV =4, so need data_Rate*4
+ * Ref_clk is 26MHz
+ */
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+ 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_FRA_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+ usleep_range(20, 100);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_SSC_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ dev_dbg(mipi_tx->dev, "unprepare\n");
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN |
+ RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1250000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .prepare = mtk_mipi_tx_pll_prepare,
+ .unprepare = mtk_mipi_tx_pll_unprepare,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
+const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
new file mode 100644
index 000000000000..91f08a351fd0
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_LANE_CON 0x000c
+#define RG_DSI_CPHY_T1DRV_EN BIT(0)
+#define RG_DSI_ANA_CK_SEL BIT(1)
+#define RG_DSI_PHY_CK_SEL BIT(2)
+#define RG_DSI_CPHY_EN BIT(3)
+#define RG_DSI_PHYCK_INV_EN BIT(4)
+#define RG_DSI_PWR04_EN BIT(5)
+#define RG_DSI_BG_LPF_EN BIT(6)
+#define RG_DSI_BG_CORE_EN BIT(7)
+#define RG_DSI_PAD_TIEL_SEL BIT(8)
+
+#define MIPITX_PLL_PWR 0x0028
+#define MIPITX_PLL_CON0 0x002c
+#define MIPITX_PLL_CON1 0x0030
+#define MIPITX_PLL_CON2 0x0034
+#define MIPITX_PLL_CON3 0x0038
+#define MIPITX_PLL_CON4 0x003c
+#define RG_DSI_PLL_IBIAS (3 << 10)
+
+#define MIPITX_D2_SW_CTL_EN 0x0144
+#define MIPITX_D0_SW_CTL_EN 0x0244
+#define MIPITX_CK_CKMODE_EN 0x0328
+#define DSI_CK_CKMODE_EN BIT(0)
+#define MIPITX_CK_SW_CTL_EN 0x0344
+#define MIPITX_D1_SW_CTL_EN 0x0444
+#define MIPITX_D3_SW_CTL_EN 0x0544
+#define DSI_SW_CTL_EN BIT(0)
+#define AD_DSI_PLL_SDM_PWR_ON BIT(0)
+#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
+
+#define RG_DSI_PLL_EN BIT(4)
+#define RG_DSI_PLL_POSDIV (0x7 << 8)
+
+static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ unsigned int txdiv, txdiv0;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 2000000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ } else if (mipi_tx->data_rate >= 1000000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ } else if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ } else if (mipi_tx->data_rate > 250000000) {
+ txdiv = 8;
+ txdiv0 = 3;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 16;
+ txdiv0 = 4;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ udelay(1);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
+ txdiv0 << 8);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1600000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .enable = mtk_mipi_tx_pll_enable,
+ .disable = mtk_mipi_tx_pll_disable,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* BG_LPF_EN / BG_CORE_EN */
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ usleep_range(30, 100);
+ writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+
+ /* Switch OFF each Lane */
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* Switch ON each Lane */
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+}
+
+const struct mtk_mipitx_data mt8183_mipitx_data = {
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index a24f8dec5adc..397c33182f4f 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -372,6 +372,33 @@ static const struct component_master_ops meson_drv_master_ops = {
.unbind = meson_drv_unbind,
};
+static int __maybe_unused meson_drv_pm_suspend(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int __maybe_unused meson_drv_pm_resume(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ meson_vpu_init(priv);
+ meson_venc_init(priv);
+ meson_vpp_init(priv);
+ meson_viu_init(priv);
+
+ drm_mode_config_helper_resume(priv->drm);
+
+ return 0;
+}
+
static int compare_of(struct device *dev, void *data)
{
DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -467,11 +494,16 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
+static const struct dev_pm_ops meson_drv_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_drv_pm_suspend, meson_drv_pm_resume)
+};
+
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
+ .pm = &meson_drv_pm_ops,
},
};
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 68bbd987147b..3bb7ffe5fc39 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -802,6 +802,47 @@ static bool meson_hdmi_connector_is_available(struct device *dev)
return false;
}
+static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+{
+ struct meson_drm *priv = meson_dw_hdmi->priv;
+
+ /* Enable clocks */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+
+ /* Bring HDMITX MEM output of power down */
+ regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+
+ /* Reset HDMITX APB & TX & PHY */
+ reset_control_reset(meson_dw_hdmi->hdmitx_apb);
+ reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
+ reset_control_reset(meson_dw_hdmi->hdmitx_phy);
+
+ /* Enable APB3 fail on error */
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ }
+
+ /* Bring out of reset */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ msleep(20);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
+
+ /* Enable HDMI-TX Interrupt */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
+
+}
+
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -925,40 +966,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
DRM_DEBUG_DRIVER("encoder initialized\n");
- /* Enable clocks */
- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
-
- /* Bring HDMITX MEM output of power down */
- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
-
- /* Reset HDMITX APB & TX & PHY */
- reset_control_reset(meson_dw_hdmi->hdmitx_apb);
- reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
- reset_control_reset(meson_dw_hdmi->hdmitx_phy);
-
- /* Enable APB3 fail on error */
- if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
- }
-
- /* Bring out of reset */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_SW_RESET, 0);
-
- msleep(20);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_CLK_CNTL, 0xff);
-
- /* Enable HDMI-TX Interrupt */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
- HDMITX_TOP_INTR_CORE);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi_init(meson_dw_hdmi);
/* Bridge / Connector */
@@ -969,6 +977,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
+ dw_plat_data->use_drm_infoframe = true;
+
platform_set_drvdata(pdev, meson_dw_hdmi);
meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
@@ -994,6 +1007,34 @@ static const struct component_ops meson_dw_hdmi_ops = {
.unbind = meson_dw_hdmi_unbind,
};
+static int __maybe_unused meson_dw_hdmi_pm_suspend(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ /* Reset TOP */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ return 0;
+}
+
+static int __maybe_unused meson_dw_hdmi_pm_resume(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ meson_dw_hdmi_init(meson_dw_hdmi);
+
+ dw_hdmi_resume(meson_dw_hdmi->hdmi);
+
+ return 0;
+}
+
static int meson_dw_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &meson_dw_hdmi_ops);
@@ -1006,6 +1047,11 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_dw_hdmi_pm_suspend,
+ meson_dw_hdmi_pm_resume)
+};
+
static const struct of_device_id meson_dw_hdmi_of_table[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
.data = &meson_dw_hdmi_gx_data },
@@ -1025,6 +1071,7 @@ static struct platform_driver meson_dw_hdmi_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_hdmi_of_table,
+ .pm = &meson_dw_hdmi_pm_ops,
},
};
module_platform_driver(meson_dw_hdmi_platform_driver);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index ac491a781952..f690793ae2d5 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -638,13 +638,18 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
if (frac >= HDMI_FRAC_MAX_GXL)
return false;
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_G12A)
+ return false;
}
return true;
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 76fee0fbdcae..aed11f4f4c55 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -4,6 +4,8 @@ config DRM_MGAG200
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
This is a KMS driver for the MGA G200 server chips, it
does not support the original MGA G200 or any of the desktop
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 289ce3e29032..79711dbb5b03 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -12,35 +12,10 @@
static bool warn_transparent = true;
static bool warn_palette = true;
-/*
- Hide the cursor off screen. We can't disable the cursor hardware because it
- takes too long to re-activate and causes momentary corruption
-*/
-static void mga_hide_cursor(struct mga_device *mdev)
-{
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- if (mdev->cursor.pixels_current)
- drm_gem_vram_unpin(mdev->cursor.pixels_current);
- mdev->cursor.pixels_current = NULL;
-}
-
-int mga_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
+static int mgag200_cursor_update(struct mga_device *mdev, void *dst, void *src,
+ unsigned int width, unsigned int height)
{
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = (struct mga_device *)dev->dev_private;
- struct drm_gem_vram_object *pixels_1 = mdev->cursor.pixels_1;
- struct drm_gem_vram_object *pixels_2 = mdev->cursor.pixels_2;
- struct drm_gem_vram_object *pixels_current = mdev->cursor.pixels_current;
- struct drm_gem_vram_object *pixels_next;
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo = NULL;
- int ret = 0;
- u8 *src, *dst;
+ struct drm_device *dev = mdev->dev;
unsigned int i, row, col;
uint32_t colour_set[16];
uint32_t *next_space = &colour_set[0];
@@ -48,79 +23,9 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t this_colour;
bool found = false;
int colour_count = 0;
- s64 gpu_addr;
- u64 dst_gpu;
u8 reg_index;
u8 this_row[48];
- if (!pixels_1 || !pixels_2) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -ENOTSUPP; /* Didn't allocate space for cursors */
- }
-
- if (WARN_ON(pixels_current &&
- pixels_1 != pixels_current &&
- pixels_2 != pixels_current)) {
- return -ENOTSUPP; /* inconsistent state */
- }
-
- if (!handle || !file_priv) {
- mga_hide_cursor(mdev);
- return 0;
- }
-
- if (width != 64 || height != 64) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -EINVAL;
- }
-
- if (pixels_current == pixels_1)
- pixels_next = pixels_2;
- else
- pixels_next = pixels_1;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj)
- return -ENOENT;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to lock user bo\n");
- goto err_drm_gem_object_put_unlocked;
- }
- src = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(src)) {
- ret = PTR_ERR(src);
- dev_err(&dev->pdev->dev,
- "failed to kmap user buffer updates\n");
- goto err_drm_gem_vram_unpin_src;
- }
-
- /* Pin and map up-coming buffer to write colour indices */
- ret = drm_gem_vram_pin(pixels_next, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- dev_err(&dev->pdev->dev,
- "failed to pin cursor buffer: %d\n", ret);
- goto err_drm_gem_vram_kunmap_src;
- }
- dst = drm_gem_vram_kmap(pixels_next, true, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- dev_err(&dev->pdev->dev,
- "failed to kmap cursor updates: %d\n", ret);
- goto err_drm_gem_vram_unpin_dst;
- }
- gpu_addr = drm_gem_vram_offset(pixels_next);
- if (gpu_addr < 0) {
- ret = (int)gpu_addr;
- dev_err(&dev->pdev->dev,
- "failed to get cursor scanout address: %d\n", ret);
- goto err_drm_gem_vram_kunmap_dst;
- }
- dst_gpu = (u64)gpu_addr;
-
memset(&colour_set[0], 0, sizeof(uint32_t)*16);
/* width*height*4 = 16384 */
for (i = 0; i < 16384; i += 4) {
@@ -133,8 +38,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_transparent = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
/* Don't need to store transparent pixels as colours */
if (this_colour>>24 == 0x0)
@@ -155,8 +59,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_palette = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
*next_space = this_colour;
next_space++;
@@ -200,54 +103,218 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
memcpy_toio(dst + row*48, &this_row[0], 48);
}
+ return 0;
+}
+
+static void mgag200_cursor_set_base(struct mga_device *mdev, u64 address)
+{
+ u8 addrl = (address >> 10) & 0xff;
+ u8 addrh = (address >> 18) & 0x3f;
+
/* Program gpu address of cursor buffer */
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((dst_gpu>>10) & 0xff));
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((dst_gpu>>18) & 0x3f));
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, addrl);
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, addrh);
+}
+
+static int mgag200_show_cursor(struct mga_device *mdev, void *src,
+ unsigned int width, unsigned int height)
+{
+ struct drm_device *dev = mdev->dev;
+ struct drm_gem_vram_object *gbo;
+ void *dst;
+ s64 off;
+ int ret;
+
+ gbo = mdev->cursor.gbo[mdev->cursor.next_index];
+ if (!gbo) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -ENOTSUPP; /* Didn't allocate space for cursors */
+ }
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ dev_err(&dev->pdev->dev,
+ "failed to map cursor updates: %d\n", ret);
+ return ret;
+ }
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = (int)off;
+ dev_err(&dev->pdev->dev,
+ "failed to get cursor scanout address: %d\n", ret);
+ goto err_drm_gem_vram_vunmap;
+ }
+
+ ret = mgag200_cursor_update(mdev, dst, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ mgag200_cursor_set_base(mdev, off);
/* Adjust cursor control register to turn on the cursor */
WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
- /* Now update internal buffer pointers */
- if (pixels_current)
- drm_gem_vram_unpin(pixels_current);
- mdev->cursor.pixels_current = pixels_next;
+ drm_gem_vram_vunmap(gbo, dst);
- drm_gem_vram_kunmap(pixels_next);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(obj);
+ ++mdev->cursor.next_index;
+ mdev->cursor.next_index %= ARRAY_SIZE(mdev->cursor.gbo);
+
+ return 0;
+
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, dst);
+ return ret;
+}
+
+/*
+ * Hide the cursor off screen. We can't disable the cursor hardware because
+ * it takes too long to re-activate and causes momentary corruption.
+ */
+static void mgag200_hide_cursor(struct mga_device *mdev)
+{
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+}
+
+static void mgag200_move_cursor(struct mga_device *mdev, int x, int y)
+{
+ if (WARN_ON(x <= 0))
+ return;
+ if (WARN_ON(y <= 0))
+ return;
+ if (WARN_ON(x & ~0xffff))
+ return;
+ if (WARN_ON(y & ~0xffff))
+ return;
+
+ WREG8(MGA_CURPOSXL, x & 0xff);
+ WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
+
+ WREG8(MGA_CURPOSYL, y & 0xff);
+ WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
+}
+
+int mgag200_cursor_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ size_t ncursors = ARRAY_SIZE(mdev->cursor.gbo);
+ size_t size;
+ int ret;
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ size = roundup(64 * 48, PAGE_SIZE);
+ if (size * ncursors > mdev->vram_fb_available)
+ return -ENOMEM;
+
+ for (i = 0; i < ncursors; ++i) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
+
+ mdev->cursor.gbo[i] = gbo;
+ }
+
+ /*
+ * At the high end of video memory, we reserve space for
+ * buffer objects. The cursor plane uses this memory to store
+ * a double-buffered image of the current cursor. Hence, it's
+ * not available for framebuffers.
+ */
+ mdev->vram_fb_available -= ncursors * size;
return 0;
-err_drm_gem_vram_kunmap_dst:
- drm_gem_vram_kunmap(pixels_next);
-err_drm_gem_vram_unpin_dst:
- drm_gem_vram_unpin(pixels_next);
-err_drm_gem_vram_kunmap_src:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin_src:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ mdev->cursor.gbo[i] = NULL;
+ }
+ return ret;
+}
+
+void mgag200_cursor_fini(struct mga_device *mdev)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(mdev->cursor.gbo); ++i) {
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
+}
+
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_gem_vram_object *gbo = NULL;
+ int ret;
+ u8 *src;
+
+ if (!handle || !file_priv) {
+ mgag200_hide_cursor(mdev);
+ return 0;
+ }
+
+ if (width != 64 || height != 64) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+ gbo = drm_gem_vram_of_gem(obj);
+ src = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ dev_err(&dev->pdev->dev,
+ "failed to map user buffer updates\n");
+ goto err_drm_gem_object_put_unlocked;
+ }
+
+ ret = mgag200_show_cursor(mdev, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+
+ /* Now update internal buffer pointers */
+ drm_gem_vram_vunmap(gbo, src);
+ drm_gem_object_put_unlocked(obj);
+
+ return 0;
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
}
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+
/* Our origin is at (64,64) */
x += 64;
y += 64;
- BUG_ON(x <= 0);
- BUG_ON(y <= 0);
- BUG_ON(x & ~0xffff);
- BUG_ON(y & ~0xffff);
+ mgag200_move_cursor(mdev, x, y);
- WREG8(MGA_CURPOSXL, x & 0xff);
- WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
-
- WREG8(MGA_CURPOSYL, y & 0xff);
- WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index afd9119b6cf1..397f8b0a9af8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb");
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -58,10 +58,7 @@ static void mga_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static const struct file_operations mgag200_driver_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 1c93f8dc08c7..0ea9a525e57d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -19,7 +19,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "mgag200_reg.h"
@@ -130,16 +129,8 @@ struct mga_connector {
};
struct mga_cursor {
- /*
- We have to have 2 buffers for the cursor to avoid occasional
- corruption while switching cursor icons.
- If either of these is NULL, then don't do hardware cursors, and
- fall back to software.
- */
- struct drm_gem_vram_object *pixels_1;
- struct drm_gem_vram_object *pixels_2;
- /* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
- struct drm_gem_vram_object *pixels_current;
+ struct drm_gem_vram_object *gbo[2];
+ unsigned int next_index;
};
struct mga_mc {
@@ -174,6 +165,8 @@ struct mga_device {
struct mga_cursor cursor;
+ size_t vram_fb_available;
+
bool suspended;
int num_crtc;
enum mga_type type;
@@ -204,8 +197,10 @@ int mgag200_mm_init(struct mga_device *mdev);
void mgag200_mm_fini(struct mga_device *mdev);
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
-int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height);
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+int mgag200_cursor_init(struct mga_device *mdev);
+void mgag200_cursor_fini(struct mga_device *mdev);
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index a9773334dedf..5f74aabcd3df 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -159,7 +159,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
- if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
dev->mode_config.preferred_depth = 16;
else
dev->mode_config.preferred_depth = 32;
@@ -171,20 +171,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
goto err_modeset;
}
- /* Make small buffers to store a hardware cursor (double buffered icon updates) */
- mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- if (IS_ERR(mdev->cursor.pixels_2) || IS_ERR(mdev->cursor.pixels_1)) {
- mdev->cursor.pixels_1 = NULL;
- mdev->cursor.pixels_2 = NULL;
+ r = mgag200_cursor_init(mdev);
+ if (r)
dev_warn(&dev->pdev->dev,
- "Could not allocate space for cursors. Not doing hardware cursors.\n");
- }
- mdev->cursor.pixels_current = NULL;
+ "Could not initialize cursors. Not doing hardware cursors.\n");
r = drm_fbdev_generic_setup(mdev->dev, 0);
if (r)
@@ -194,6 +184,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
err_modeset:
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
err_mm:
dev->dev_private = NULL;
@@ -209,6 +200,7 @@ void mgag200_driver_unload(struct drm_device *dev)
return;
mgag200_modeset_fini(mdev);
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5e778b5f1a10..5ec697148fc1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1413,8 +1413,8 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
- .cursor_set = mga_crtc_cursor_set,
- .cursor_move = mga_crtc_cursor_move,
+ .cursor_set = mgag200_crtc_cursor_set,
+ .cursor_move = mgag200_crtc_cursor_move,
.gamma_set = mga_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = mga_crtc_destroy,
@@ -1629,7 +1629,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
bpp = connector->cmdline_mode.bpp;
}
- if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+ if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->vram_fb_available) {
if (connector->cmdline_mode.specified)
connector->cmdline_mode.specified = false;
return MODE_BAD;
@@ -1638,16 +1638,6 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
static void mga_connector_destroy(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1659,7 +1649,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
.get_modes = mga_vga_get_modes,
.mode_valid = mga_vga_mode_valid,
- .best_encoder = mga_connector_best_encoder,
};
static const struct drm_connector_funcs mga_vga_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 73a6b848601c..99997d737362 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -37,8 +37,7 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- mdev->mc.vram_size,
- &drm_gem_vram_mm_funcs);
+ mdev->mc.vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
@@ -51,6 +50,8 @@ int mgag200_mm_init(struct mga_device *mdev)
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
+ mdev->vram_fb_available = mdev->mc.vram_size;
+
return 0;
}
@@ -58,6 +59,8 @@ void mgag200_mm_fini(struct mga_device *mdev)
{
struct drm_device *dev = mdev->dev;
+ mdev->vram_fb_available = 0;
+
drm_vram_helper_release_mm(dev);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index e686331fa089..691c1a277d91 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg = ioremap(res->start, resource_size(res));
if (cxdbg) {
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
0x76543210);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
0xFEDCBA98);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
a6xx_state->debugbus = state_kcalloc(a6xx_state,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 4c889aabdaf9..959d03e007fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -31,7 +31,7 @@
*/
#define DPU_DEBUG(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_KMS)) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
DRM_DEBUG(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
@@ -43,7 +43,7 @@
*/
#define DPU_DEBUG_DRIVER(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 0da8a4e428ad..eff1a4c61258 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -9,6 +9,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0f312ac5b624..ad4e963ccd9b 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,7 +178,9 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- encoder->bridge = edp->bridge;
+ ret = drm_bridge_attach(encoder, edp->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
index f2c17858a703..eb34243dad53 100644
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7f3dd3ffe2c9..0d9657cc70db 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -89,7 +89,6 @@ struct edp_ctrl {
/* edid raw data */
struct edid *edid;
- struct drm_dp_link dp_link;
struct drm_dp_aux *drm_aux;
/* dpcd raw data */
@@ -403,7 +402,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
u32 prate;
u32 lrate;
u32 bpp;
- u8 max_lane = ctrl->dp_link.num_lanes;
+ u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
u8 lane;
prate = ctrl->pixel_rate;
@@ -413,7 +412,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
* By default, use the maximum link rate and minimum lane count,
* so that we can do rate down shift during link training.
*/
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
prate *= bpp;
prate /= 8; /* in kByte */
@@ -439,7 +438,7 @@ static void edp_config_ctrl(struct edp_ctrl *ctrl)
data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
- if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
depth = EDP_6BIT;
@@ -701,7 +700,7 @@ static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
rate = ctrl->link_rate;
lane = ctrl->lane_cnt;
- max_lane = ctrl->dp_link.num_lanes;
+ max_lane = drm_dp_max_lane_count(ctrl->dpcd);
bpp = ctrl->color_depth * 3;
prate = ctrl->pixel_rate;
@@ -751,18 +750,22 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
static int edp_do_link_train(struct edp_ctrl *ctrl)
{
+ u8 values[2];
int ret;
- struct drm_dp_link dp_link;
DBG("");
/*
* Set the current link rate and lane cnt to panel. They may have been
* adjusted and the values are different from them in DPCD CAP
*/
- dp_link.num_lanes = ctrl->lane_cnt;
- dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate);
- dp_link.capabilities = ctrl->dp_link.capabilities;
- if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0)
+ values[0] = ctrl->lane_cnt;
+ values[1] = ctrl->link_rate;
+
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
+ sizeof(values)) < 0)
return EDP_TRAIN_FAIL;
ctrl->v_level = 0; /* start from default level */
@@ -952,6 +955,7 @@ static void edp_ctrl_on_worker(struct work_struct *work)
{
struct edp_ctrl *ctrl = container_of(
work, struct edp_ctrl, on_work);
+ u8 value;
int ret;
mutex_lock(&ctrl->dev_mutex);
@@ -965,9 +969,27 @@ static void edp_ctrl_on_worker(struct work_struct *work)
edp_ctrl_link_enable(ctrl, 1);
edp_ctrl_irq_enable(ctrl, 1);
- ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link);
- if (ret)
- goto fail;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret < 0)
+ goto fail;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
ctrl->power_on = true;
@@ -1011,7 +1033,19 @@ static void edp_ctrl_off_worker(struct work_struct *work)
edp_state_ctrl(ctrl, 0);
- drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link);
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ u8 value;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret > 0) {
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ }
+ }
edp_ctrl_irq_enable(ctrl, 0);
@@ -1225,14 +1259,8 @@ int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
edp_ctrl_irq_enable(ctrl, 1);
}
- ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link);
- if (ret) {
- pr_err("%s: read dpcd cap failed, %d\n", __func__, ret);
- goto disable_ret;
- }
-
/* Initialize link rate as panel max link rate */
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
if (!ctrl->edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 355afb936401..1a9b6289637d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,7 +327,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- encoder->bridge = hdmi->bridge;
+ ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index bdac452b00fb..d0b84f0abee1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -14,6 +14,8 @@
#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
+#include <drm/drm_bridge.h>
+
#include "msm_drv.h"
#include "hdmi.xml.h"
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 6be879578140..1c74381a4fc9 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv = m->private;
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
- int ret;
-
- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&show_priv->dev->struct_mutex);
gpu->funcs->gpu_state_put(show_priv->state);
mutex_unlock(&show_priv->dev->struct_mutex);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 12421567af89..b69ace8bf526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -95,8 +95,11 @@ static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
reg = readl(mxsfb->base + LCDC_CTRL);
- if (mxsfb->connector.display_info.num_bus_formats)
- bus_format = mxsfb->connector.display_info.bus_formats[0];
+ if (mxsfb->connector->display_info.num_bus_formats)
+ bus_format = mxsfb->connector->display_info.bus_formats[0];
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n",
+ bus_format);
reg &= ~CTRL_BUS_WIDTH_MASK;
switch (bus_format) {
@@ -204,8 +207,9 @@ static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb)
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
+ struct drm_device *drm = mxsfb->pipe.crtc.dev;
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
- const u32 bus_flags = mxsfb->connector.display_info.bus_flags;
+ u32 bus_flags = mxsfb->connector->display_info.bus_flags;
u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
int err;
@@ -229,6 +233,16 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+ if (mxsfb->bridge && mxsfb->bridge->timings)
+ bus_flags = mxsfb->bridge->timings->input_bus_flags;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
+ m->crtc_clock,
+ (int)(clk_get_rate(mxsfb->clk) / 1000));
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ bus_flags);
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
+
writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
mxsfb->base + mxsfb->devdata->transfer_count);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index e8506335cd15..497cf443a9af 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -101,9 +101,25 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_connector *connector;
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
struct drm_device *drm = pipe->plane.dev;
+ if (!mxsfb->connector) {
+ list_for_each_entry(connector,
+ &drm->mode_config.connector_list,
+ head)
+ if (connector->encoder == &mxsfb->pipe.encoder) {
+ mxsfb->connector = connector;
+ break;
+ }
+ }
+
+ if (!mxsfb->connector) {
+ dev_warn(drm->dev, "No connector attached, using default\n");
+ mxsfb->connector = &mxsfb->panel_connector;
+ }
+
pm_runtime_get_sync(drm->dev);
drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb);
@@ -129,6 +145,9 @@ static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&drm->event_lock);
+
+ if (mxsfb->connector != &mxsfb->panel_connector)
+ mxsfb->connector = NULL;
}
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -226,16 +245,33 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL,
- &mxsfb->connector);
+ mxsfb->connector);
if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n");
goto err_vblank;
}
- ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
- if (ret) {
- dev_err(drm->dev, "Cannot connect panel\n");
- goto err_vblank;
+ /*
+ * Attach panel only if there is one.
+ * If there is no panel attach, it must be a bridge. In this case, we
+ * need a reference to its connector for a proper initialization.
+ * We will do this check in pipe->enable(), since the connector won't
+ * be attached to an encoder until then.
+ */
+
+ if (mxsfb->panel) {
+ ret = drm_panel_attach(mxsfb->panel, mxsfb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect panel: %d\n", ret);
+ goto err_vblank;
+ }
+ } else if (mxsfb->bridge) {
+ ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe,
+ mxsfb->bridge);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ goto err_vblank;
+ }
}
drm->mode_config.min_width = MXSFB_MIN_XRES;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
index d975300dca05..0b65b5194a9c 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -27,8 +27,10 @@ struct mxsfb_drm_private {
struct clk *clk_disp_axi;
struct drm_simple_display_pipe pipe;
- struct drm_connector connector;
+ struct drm_connector panel_connector;
+ struct drm_connector *connector;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
int mxsfb_setup_crtc(struct drm_device *dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index be36f4d6cc96..4eb94744c526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -21,7 +21,8 @@
static struct mxsfb_drm_private *
drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
{
- return container_of(connector, struct mxsfb_drm_private, connector);
+ return container_of(connector, struct mxsfb_drm_private,
+ panel_connector);
}
static int mxsfb_panel_get_modes(struct drm_connector *connector)
@@ -76,22 +77,23 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
int mxsfb_create_output(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL);
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0,
+ &mxsfb->panel, &mxsfb->bridge);
if (ret)
return ret;
- mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
- mxsfb->connector.polled = 0;
- drm_connector_helper_add(&mxsfb->connector,
- &mxsfb_panel_connector_helper_funcs);
- ret = drm_connector_init(drm, &mxsfb->connector,
- &mxsfb_panel_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (!ret)
- mxsfb->panel = panel;
+ if (mxsfb->panel) {
+ mxsfb->connector = &mxsfb->panel_connector;
+ mxsfb->connector->dpms = DRM_MODE_DPMS_OFF;
+ mxsfb->connector->polled = 0;
+ drm_connector_helper_add(mxsfb->connector,
+ &mxsfb_panel_connector_helper_funcs);
+ ret = drm_connector_init(drm, mxsfb->connector,
+ &mxsfb_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index dc64863b5fd8..44ee82d0c9b6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -256,7 +256,7 @@ nv04_display_create(struct drm_device *dev)
list_for_each_entry_safe(connector, ct,
&dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
+ if (!connector->possible_encoders) {
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index b46be8a091e9..549486f1d937 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
}
-static const struct drm_connector_helper_funcs
-nv50_mstc_help = {
- .get_modes = nv50_mstc_get_modes,
- .mode_valid = nv50_mstc_mode_valid,
- .best_encoder = nv50_mstc_best_encoder,
- .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
- .atomic_check = nv50_mstc_atomic_check,
-};
-
-static enum drm_connector_status
-nv50_mstc_detect(struct drm_connector *connector, bool force)
+static int
+nv50_mstc_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
- enum drm_connector_status conn_status;
int ret;
if (drm_connector_is_unregistered(connector))
@@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
if (ret < 0 && ret != -EACCES)
return connector_status_disconnected;
- conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
- mstc->port);
+ ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
+ mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
- return conn_status;
+ return ret;
}
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+ .atomic_check = nv50_mstc_atomic_check,
+ .detect_ctx = nv50_mstc_detect,
+};
+
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
@@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
- .detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
@@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
}
static void
-nv50_mstm_init(struct nv50_mstm *mstm)
+nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
{
int ret;
if (!mstm || !mstm->mgr.mst_state)
return;
- ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
drm_kms_helper_hotplug_event(mstm->mgr.dev);
@@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
- nv50_mstm_init(nv_encoder->dp.mstm);
+ nv50_mstm_init(nv_encoder->dp.mstm, runtime);
}
}
@@ -2392,7 +2392,7 @@ nv50_display_create(struct drm_device *dev)
/* cull any connectors we created that don't have an encoder */
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
+ if (connector->possible_encoders)
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 94dfa2e5a9ab..5b413588b823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -365,9 +365,8 @@ find_encoder(struct drm_connector *connector, int type)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
- int i;
- drm_connector_for_each_possible_encoder(connector, enc, i) {
+ drm_connector_for_each_possible_encoder(connector, enc) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
@@ -414,10 +413,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
- int i, ret;
+ int ret;
bool switcheroo_ddc = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
nv_encoder = nouveau_encoder(encoder);
switch (nv_encoder->dcb->type) {
@@ -1131,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
int ret;
+ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+
+ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ drm_dp_cec_irq(&nv_connector->aux);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
+
+ return NVIF_NOTIFY_KEEP;
+ }
ret = pm_runtime_get(drm->dev->dev);
if (ret == 0) {
@@ -1151,25 +1160,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
return NVIF_NOTIFY_DROP;
}
- if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
- NV_DEBUG(drm, "service %s\n", name);
- drm_dp_cec_irq(&nv_connector->aux);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
- nv50_mstm_service(nv_encoder->dp.mstm);
- } else {
- bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
-
+ if (!plugged)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
if (!plugged)
- drm_dp_cec_unset_edid(&nv_connector->aux);
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
- if (!plugged)
- nv50_mstm_remove(nv_encoder->dp.mstm);
- }
-
- drm_helper_hpd_irq_event(connector->dev);
+ nv50_mstm_remove(nv_encoder->dp.mstm);
}
+ drm_helper_hpd_irq_event(connector->dev);
+
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
@@ -1415,8 +1415,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- drm_dp_cec_register_connector(&nv_connector->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&nv_connector->aux, connector);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6f038511a03a..53f9bceaf17a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_connector_list_iter conn_iter;
int ret;
+ /*
+ * Enable hotplug interrupts (done as early as possible, since we need
+ * them for MST)
+ */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nvif_notify_get(&conn->hpd);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
ret = disp->init(dev, resume, runtime);
if (ret)
return ret;
@@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
*/
drm_kms_helper_poll_enable(dev);
- /* enable hotplug interrupts */
- drm_connector_list_iter_begin(dev, &conn_iter);
- nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
- struct nouveau_connector *conn = nouveau_connector(connector);
- nvif_notify_get(&conn->hpd);
- }
- drm_connector_list_iter_end(&conn_iter);
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f0daf958e03a..77a0c6ad3cef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -236,6 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 904101c5e79d..5950c3f52c2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -6,7 +6,7 @@ omapdss-base-y := base.o display.o dss-of.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
-omapdss-y := core.o dss.o dispc.o dispc_coefs.o \
+omapdss-y := dss.o dispc.o dispc_coefs.o \
pll.o video-pll.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
deleted file mode 100644
index 6ac497b63711..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "CORE"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "omapdss.h"
-#include "dss.h"
-
-/* INIT */
-static struct platform_driver * const omap_dss_drivers[] = {
- &omap_dsshw_driver,
- &omap_dispchw_driver,
-#ifdef CONFIG_OMAP2_DSS_DSI
- &omap_dsihw_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- &omap_venchw_driver,
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- &omapdss_hdmi4hw_driver,
-#endif
-#ifdef CONFIG_OMAP5_DSS_HDMI
- &omapdss_hdmi5hw_driver,
-#endif
-};
-
-static int __init omap_dss_init(void)
-{
- return platform_register_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-static void __exit omap_dss_exit(void)
-{
- platform_unregister_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
-MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index ed0ccbeed70f..413dbdd1771e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -114,6 +114,7 @@ struct dispc_features {
const unsigned int num_reg_fields;
const enum omap_overlay_caps *overlay_caps;
const u32 **supported_color_modes;
+ const u32 *supported_scaler_color_modes;
unsigned int num_mgrs;
unsigned int num_ovls;
unsigned int buffer_size_unit;
@@ -184,9 +185,6 @@ struct dispc_device {
struct regmap *syscon_pol;
u32 syscon_pol_offset;
-
- /* DISPC_CONTROL & DISPC_CONFIG lock*/
- spinlock_t control_lock;
};
enum omap_color_component {
@@ -368,25 +366,17 @@ static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx)
static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- return REG_GET(dispc, rfld.reg, rfld.high, rfld.low);
+ return REG_GET(dispc, rfld->reg, rfld->high, rfld->low);
}
static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld, int val)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
- const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG;
- unsigned long flags;
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- if (need_lock) {
- spin_lock_irqsave(&dispc->control_lock, flags);
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- spin_unlock_irqrestore(&dispc->control_lock, flags);
- } else {
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- }
+ REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
static int dispc_get_num_ovls(struct dispc_device *dispc)
@@ -2510,6 +2500,19 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
if (width == out_width && height == out_height)
return 0;
+ if (dispc->feat->supported_scaler_color_modes) {
+ const u32 *modes = dispc->feat->supported_scaler_color_modes;
+ unsigned int i;
+
+ for (i = 0; modes[i]; ++i) {
+ if (modes[i] == fourcc)
+ break;
+ }
+
+ if (modes[i] == 0)
+ return -EINVAL;
+ }
+
if (plane == OMAP_DSS_WB) {
switch (fourcc) {
case DRM_FORMAT_NV12:
@@ -4225,6 +4228,12 @@ static const u32 *omap4_dispc_supported_color_modes[] = {
DRM_FORMAT_RGBX8888),
};
+static const u32 omap3_dispc_supported_scaler_color_modes[] = {
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ 0,
+};
+
static const struct dispc_features omap24xx_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
@@ -4253,6 +4262,7 @@ static const struct dispc_features omap24xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields),
.overlay_caps = omap2_dispc_overlay_caps,
.supported_color_modes = omap2_dispc_supported_color_modes,
+ .supported_scaler_color_modes = COLOR_ARRAY(DRM_FORMAT_XRGB8888),
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4287,6 +4297,7 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4321,6 +4332,7 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4355,6 +4367,7 @@ static const struct dispc_features omap36xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3630_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4389,6 +4402,7 @@ static const struct dispc_features am43xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 1,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4768,8 +4782,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, dispc);
dispc->dss = dss;
- spin_lock_init(&dispc->control_lock);
-
/*
* The OMAP3-based models can't be told apart using the compatible
* string, use SoC device matching.
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b30fcaa2d0f5..da16ea095f13 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -3548,7 +3548,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
static void dsi_proto_timings(struct dsi_data *dsi)
{
- unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned int tlpx, tclk_zero, tclk_prepare;
unsigned int tclk_pre, tclk_post;
unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned int ths_trail, ths_exit;
@@ -3567,7 +3567,6 @@ static void dsi_proto_timings(struct dsi_data *dsi)
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
- tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 4bdd63b57100..225ec808b01a 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1598,3 +1598,40 @@ struct platform_driver omap_dsshw_driver = {
.suppress_bind_attrs = true,
},
};
+
+/* INIT */
+static struct platform_driver * const omap_dss_drivers[] = {
+ &omap_dsshw_driver,
+ &omap_dispchw_driver,
+#ifdef CONFIG_OMAP2_DSS_DSI
+ &omap_dsihw_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ &omap_venchw_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ &omapdss_hdmi4hw_driver,
+#endif
+#ifdef CONFIG_OMAP5_DSS_HDMI
+ &omapdss_hdmi5hw_driver,
+#endif
+};
+
+static int __init omap_dss_init(void)
+{
+ return platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+static void __exit omap_dss_exit(void)
+{
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+module_init(omap_dss_init);
+module_exit(omap_dss_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 5d5d5588ebc1..ea5d5c228534 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -542,8 +542,9 @@ static void hdmi_core_audio_config(struct hdmi_core_data *core,
}
/* Set ACR clock divisor */
- REG_FLD_MOD(av_base,
- HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+ if (cfg->use_mclk)
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL,
+ cfg->mclk_mode, 2, 0);
r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
/*
@@ -675,7 +676,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -737,7 +738,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 7400fb99d453..ff4d35c8771f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,24 +23,12 @@
#include "hdmi5_core.h"
-/* only 24 bit color depth used for now */
-static const struct csc_table csc_table_deepcolor[] = {
- /* HDMI_DEEP_COLOR_24BIT */
- [0] = { 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32, },
- /* HDMI_DEEP_COLOR_30BIT */
- [1] = { 7015, 0, 0, 128, 0, 7015, 0, 128, 0, 0, 7015, 128, },
- /* HDMI_DEEP_COLOR_36BIT */
- [2] = { 7010, 0, 0, 512, 0, 7010, 0, 512, 0, 0, 7010, 512, },
- /* FULL RANGE */
- [3] = { 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0, },
-};
-
static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned int ss_scl_high = 4600; /* ns */
- const unsigned int ss_scl_low = 5400; /* ns */
+ const unsigned int ss_scl_high = 4700; /* ns */
+ const unsigned int ss_scl_low = 5500; /* ns */
const unsigned int fs_scl_high = 600; /* ns */
const unsigned int fs_scl_low = 1300; /* ns */
const unsigned int sda_hold = 1000; /* ns */
@@ -397,14 +385,6 @@ static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 2, 1, 0);
}
-static void hdmi_core_config_csc(struct hdmi_core_data *core)
-{
- int clr_depth = 0; /* 24 bit color depth */
-
- /* CSC_COLORDEPTH */
- REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, clr_depth, 7, 4);
-}
-
static void hdmi_core_config_video_sampler(struct hdmi_core_data *core)
{
int video_mapping = 1; /* for 24 bit color depth */
@@ -469,47 +449,67 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0);
}
-static void hdmi_core_csc_config(struct hdmi_core_data *core,
- struct csc_table csc_coeff)
+static void hdmi_core_write_csc(struct hdmi_core_data *core,
+ const struct csc_table *csc_coeff)
{
void __iomem *base = core->base;
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff.a1 >> 8 , 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff.a1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff.a2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff.a2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff.a3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff.a3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff.a4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff.a4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff.b1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff.b1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff.b2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff.b2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff.b3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff.b3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff.b4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff.b4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff.c1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff.c1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff.c2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff.c2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff.c3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff.c3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff.c4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff.c4, 7, 0);
-
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff->a1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff->a1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff->a2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff->a2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff->a3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff->a3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff->a4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff->a4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff->b1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff->b1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff->b2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff->b2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff->b3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff->b3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff->b4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff->b4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff->c1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff->c1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff->c2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff->c2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff->c3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff->c3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff->c4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff->c4, 7, 0);
+
+ /* enable CSC */
REG_FLD_MOD(base, HDMI_CORE_MC_FLOWCTRL, 0x1, 0, 0);
}
-static void hdmi_core_configure_range(struct hdmi_core_data *core)
+static void hdmi_core_configure_range(struct hdmi_core_data *core,
+ enum hdmi_quantization_range range)
{
- struct csc_table csc_coeff = { 0 };
+ static const struct csc_table csc_limited_range = {
+ 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32
+ };
+ static const struct csc_table csc_full_range = {
+ 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0
+ };
+ const struct csc_table *csc_coeff;
+
+ /* CSC_COLORDEPTH = 24 bits*/
+ REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, 0, 7, 4);
+
+ switch (range) {
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ csc_coeff = &csc_full_range;
+ break;
- /* support limited range with 24 bit color depth for now */
- csc_coeff = csc_table_deepcolor[0];
+ case HDMI_QUANTIZATION_RANGE_DEFAULT:
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ default:
+ csc_coeff = &csc_limited_range;
+ break;
+ }
- hdmi_core_csc_config(core, csc_coeff);
+ hdmi_core_write_csc(core, csc_coeff);
}
static void hdmi_core_enable_video_path(struct hdmi_core_data *core)
@@ -600,9 +600,20 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
+ enum hdmi_quantization_range range;
hdmi_core_mask_interrupts(core);
+ if (cfg->hdmi_dvi_mode == HDMI_HDMI) {
+ char vic = cfg->infoframe.video_code;
+
+ /* All CEA modes other than VIC 1 use limited quantization range. */
+ range = vic > 1 ? HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ range = HDMI_QUANTIZATION_RANGE_FULL;
+ }
+
hdmi_core_init(&v_core_cfg, cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
@@ -616,9 +627,8 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_wp_video_config_interface(wp, &vm);
- /* support limited range with 24 bit color depth for now */
- hdmi_core_configure_range(core);
- cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ hdmi_core_configure_range(core, range);
+ cfg->infoframe.quantization_range = range;
/*
* configure core video part, set software reset in the core
@@ -628,7 +638,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_video_config(core, &v_core_cfg);
hdmi_core_config_video_packetizer(core);
- hdmi_core_config_csc(core);
hdmi_core_config_video_sampler(core);
if (cfg->hdmi_dvi_mode == HDMI_HDMI)
@@ -798,7 +807,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -841,7 +850,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 14b41de44ebc..0693d34fca1b 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include "dss.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 835e6654fa82..43c1d096b021 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(u32 flags)
{
- switch (flags & OMAP_BO_TILED) {
+ switch (flags & OMAP_BO_TILED_MASK) {
case OMAP_BO_TILED_8:
return TILFMT_8BIT;
case OMAP_BO_TILED_16:
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2983c003698e..b3e22c890c51 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -11,6 +11,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6fe14111cd95..24bbe9f2a32e 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 1b8b5108caf8..9aeab81dfb90 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -95,7 +95,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb,
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -135,7 +135,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
- struct plane *plane = &omap_fb->planes[0];
u32 x, y, orient = 0;
info->fourcc = fb->format->format;
@@ -154,7 +153,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -209,10 +208,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
info->screen_width /= format->cpp[0];
if (fb->format->format == DRM_FORMAT_NV12) {
- plane = &omap_fb->planes[1];
-
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 08f539efddfb..e518d93ca6df 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -67,7 +67,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
- u32 dma_addr_cnt;
+ refcount_t dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -196,7 +196,7 @@ static void omap_gem_evict(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
@@ -324,7 +324,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
@@ -513,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
* probably trigger put_pages()?
*/
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
@@ -773,18 +773,20 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
- if (omap_obj->dma_addr_cnt == 0) {
+ if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
+ refcount_set(&omap_obj->dma_addr_cnt, 1);
+
ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
omap_obj->height, 0);
@@ -813,13 +815,15 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->block = block;
DBG("got dma address: %pad", &omap_obj->dma_addr);
+ } else {
+ refcount_inc(&omap_obj->dma_addr_cnt);
}
- omap_obj->dma_addr_cnt++;
-
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else if (omap_gem_is_contiguous(omap_obj)) {
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
@@ -832,38 +836,46 @@ fail:
}
/**
+ * omap_gem_unpin_locked() - Unpin a GEM object from memory
+ * @obj: the GEM object
+ *
+ * omap_gem_unpin() without locking.
+ */
+static void omap_gem_unpin_locked(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret;
+
+ if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->dma_addr = 0;
+ omap_obj->block = NULL;
+ }
+}
+
+/**
* omap_gem_unpin() - Unpin a GEM object from memory
* @obj: the GEM object
*
* Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
- * reference-counted, the actualy unpin will only be performed when the number
+ * reference-counted, the actual unpin will only be performed when the number
* of calls to this function matches the number of calls to omap_gem_pin().
*/
void omap_gem_unpin(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret;
mutex_lock(&omap_obj->lock);
-
- if (omap_obj->dma_addr_cnt > 0) {
- omap_obj->dma_addr_cnt--;
- if (omap_obj->dma_addr_cnt == 0) {
- ret = tiler_unpin(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not unpin pages: %d\n", ret);
- }
- ret = tiler_release(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not release unmap: %d\n", ret);
- }
- omap_obj->dma_addr = 0;
- omap_obj->block = NULL;
- }
- }
-
+ omap_gem_unpin_locked(obj);
mutex_unlock(&omap_obj->lock);
}
@@ -879,8 +891,8 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
mutex_lock(&omap_obj->lock);
- if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
- (omap_obj->flags & OMAP_BO_TILED)) {
+ if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
+ (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
@@ -895,7 +907,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
@@ -1030,10 +1042,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
- off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
+ off, &omap_obj->dma_addr,
+ refcount_read(&omap_obj->dma_addr_cnt),
omap_obj->vaddr, omap_obj->roll);
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
@@ -1093,7 +1106,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */
- WARN_ON(omap_obj->dma_addr_cnt > 0);
+ WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
@@ -1120,6 +1133,38 @@ void omap_gem_free_object(struct drm_gem_object *obj)
kfree(omap_obj);
}
+static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+
+ switch (flags & OMAP_BO_CACHE_MASK) {
+ case OMAP_BO_CACHED:
+ case OMAP_BO_WC:
+ case OMAP_BO_CACHE_MASK:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (flags & OMAP_BO_TILED_MASK) {
+ if (!priv->usergart)
+ return false;
+
+ switch (flags & OMAP_BO_TILED_MASK) {
+ case OMAP_BO_TILED_8:
+ case OMAP_BO_TILED_16:
+ case OMAP_BO_TILED_32:
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
@@ -1131,18 +1176,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size_t size;
int ret;
- /* Validate the flags and compute the memory and cache flags. */
- if (flags & OMAP_BO_TILED) {
- if (!priv->usergart) {
- dev_err(dev->dev, "Tiled buffers require DMM\n");
- return NULL;
- }
+ if (!omap_gem_validate_flags(dev, flags))
+ return NULL;
+ /* Validate the flags and compute the memory and cache flags. */
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* Tiled buffers are always shmem paged backed. When they are
* scanned out, they are remapped into DMM/TILER.
*/
- flags &= ~OMAP_BO_SCANOUT;
flags |= OMAP_BO_MEM_SHMEM;
/*
@@ -1153,9 +1195,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
flags |= tiler_get_cpu_cache_flags();
} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
/*
- * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
- * tiled. However, to lower the pressure on memory allocation,
- * use contiguous memory only if no TILER is available.
+ * If we don't have DMM, we must allocate scanout buffers
+ * from contiguous DMA memory.
*/
flags |= OMAP_BO_MEM_DMA_API;
} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
@@ -1174,7 +1215,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index e8c3ae7ac77e..7344bb61936c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -67,7 +67,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
- if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
/* TODO we would need to pin at least part of the buffer to
* get de-tiled view. For now just reject it.
*/
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 5f72c922a04b..a0574dc03e16 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -350,9 +350,8 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel);
- vpanel->panel.dev = dev;
- vpanel->panel.funcs = &versatile_panel_drm_funcs;
+ drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&vpanel->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index dabf59e0f56f..98f184b81187 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -204,9 +204,8 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &feiyang_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd)) {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 3c58f63adbf7..24955bec1958 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -895,9 +895,8 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel);
- ili->panel.dev = dev;
- ili->panel.funcs = &ili9322_drm_funcs;
+ drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ili->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 3ad4a46c4e94..e8789e460a16 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -433,9 +433,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &ili9881c_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power)) {
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d92d1c98878c..83df1ac4211f 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -487,9 +487,8 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
if (IS_ERR(innolux->backlight))
return PTR_ERR(innolux->backlight);
- drm_panel_init(&innolux->base);
- innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = dev;
+ drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
err = drm_panel_add(&innolux->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index ff3e89e61e3f..56364a93f0b8 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -437,9 +437,8 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return ret;
}
- drm_panel_init(&jdi->base);
- jdi->base.funcs = &jdi_panel_funcs;
- jdi->base.dev = &jdi->dsi->dev;
+ drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&jdi->base);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 3ac04eb8d0fe..45f96556ec8c 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -391,9 +391,8 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
if (IS_ERR(kingdisplay->backlight))
return PTR_ERR(kingdisplay->backlight);
- drm_panel_init(&kingdisplay->base);
- kingdisplay->base.funcs = &kingdisplay_panel_funcs;
- kingdisplay->base.dev = &kingdisplay->link->dev;
+ drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
+ &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&kingdisplay->base);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index ee4379729a5b..7a1385e834f0 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -196,9 +196,8 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &lb035q02_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 41bf02d122a1..db4865a4c2b9 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -259,9 +259,8 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &lg4573_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index ad47cc95459e..2405f26e5d31 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -197,7 +197,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
static int panel_lvds_probe(struct platform_device *pdev)
{
struct panel_lvds *lvds;
- struct device_node *np;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
@@ -243,14 +242,9 @@ static int panel_lvds_probe(struct platform_device *pdev)
return ret;
}
- np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
- if (np) {
- lvds->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!lvds->backlight)
- return -EPROBE_DEFER;
- }
+ lvds->backlight = devm_of_find_backlight(lvds->dev);
+ if (IS_ERR(lvds->backlight))
+ return PTR_ERR(lvds->backlight);
/*
* TODO: Handle all power supplies specified in the DT node in a generic
@@ -260,20 +254,15 @@ static int panel_lvds_probe(struct platform_device *pdev)
*/
/* Register the panel. */
- drm_panel_init(&lvds->panel);
- lvds->panel.dev = lvds->dev;
- lvds->panel.funcs = &panel_lvds_funcs;
+ drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
ret = drm_panel_add(&lvds->panel);
if (ret < 0)
- goto error;
+ return ret;
dev_set_drvdata(lvds->dev, lvds);
return 0;
-
-error:
- put_device(&lvds->backlight->dev);
- return ret;
}
static int panel_lvds_remove(struct platform_device *pdev)
@@ -284,9 +273,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
panel_lvds_disable(&lvds->panel);
- if (lvds->backlight)
- put_device(&lvds->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 20f17e46e65d..fd593532ab23 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -205,9 +205,8 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &nl8048_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 2ad1063b068d..60ccedce530c 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -292,9 +292,8 @@ static int nt39016_probe(struct spi_device *spi)
return err;
}
- drm_panel_init(&panel->drm_panel);
- panel->drm_panel.dev = dev;
- panel->drm_panel.funcs = &nt39016_funcs;
+ drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->drm_panel);
if (err < 0) {
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 2bae1db3ff34..f2a72ee6ee07 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,9 +288,8 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
if (IS_ERR(lcd->backlight))
return PTR_ERR(lcd->backlight);
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = dev;
- lcd->panel.funcs = &lcd_olinuxino_funcs;
+ drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c7b48df8869a..bf1f928b215f 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -455,9 +455,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &otm8009a_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dsi->host->dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index e0e20ecff916..2b40913899d8 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -166,9 +166,8 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (IS_ERR(osd101t2587->backlight))
return PTR_ERR(osd101t2587->backlight);
- drm_panel_init(&osd101t2587->base);
- osd101t2587->base.funcs = &osd101t2587_panel_funcs;
- osd101t2587->base.dev = &osd101t2587->dsi->dev;
+ drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
+ &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&osd101t2587->base);
}
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3dff0b3f73c2..664605071d34 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -223,9 +223,8 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
return -EPROBE_DEFER;
}
- drm_panel_init(&wuxga_nt->base);
- wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
- wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
+ drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
+ &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&wuxga_nt->base);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index b5b14aa059ea..09824e92fc78 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -426,8 +426,8 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
return PTR_ERR(ts->dsi);
}
- ts->base.dev = dev;
- ts->base.funcs = &rpi_touchscreen_funcs;
+ drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 6a5d37006103..fd67fc6185c4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -606,9 +606,8 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel);
- panel->panel.funcs = &rad_panel_funcs;
- panel->panel.dev = dev;
+ drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
ret = drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index ba889625ad43..994e855721f4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -404,9 +404,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &rm68200_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index b9109922397f..31234b79d3b1 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -343,9 +343,8 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &jh057n_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 3c15764f0c03..170a5cda21b9 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -173,9 +173,8 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &rb070d30_panel_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3be902dcedc0..250809ba37c7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -351,9 +351,8 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &ld9040_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index f75bef24e050..e3a0397e953e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -215,9 +215,8 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel);
- s6->panel.dev = dev;
- s6->panel.funcs = &s6d16d0_drm_funcs;
+ drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&s6->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index b923de23ed65..938ab72c5540 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -732,9 +732,8 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e3ha2_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index cd90fa700c49..a60635e9226d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -466,9 +466,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63j0x03_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
&s6e63j0x03_bl_ops, NULL);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 142d395ea512..ba01af0b14fd 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -473,9 +473,8 @@ static int s6e63m0_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63m0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = s6e63m0_backlight_register(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 81858267723a..dbced6501204 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1017,9 +1017,8 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e8aa0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 18b22b1294fb..b3619ba443bd 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -274,9 +274,8 @@ static int seiko_panel_probe(struct device *dev,
return -EPROBE_DEFER;
}
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &seiko_panel_funcs;
+ drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index e910b4ad1310..5e136c3ba185 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -329,9 +329,8 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->backlight))
return PTR_ERR(sharp->backlight);
- drm_panel_init(&sharp->base);
- sharp->base.funcs = &sharp_panel_funcs;
- sharp->base.dev = &sharp->link1->dev;
+ drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp->base);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index 46cd9a250129..eeab7998c7de 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -185,9 +185,8 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &pdev->dev;
- lcd->panel.funcs = &ls037v7dw01_funcs;
+ drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index c39abde9f9f1..b963ba4ab589 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -264,9 +264,8 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
if (IS_ERR(sharp_nt->backlight))
return PTR_ERR(sharp_nt->backlight);
- drm_panel_init(&sharp_nt->base);
- sharp_nt->base.funcs = &sharp_nt_panel_funcs;
- sharp_nt->base.dev = &sharp_nt->dsi->dev;
+ drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
+ &sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp_nt->base);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 28fa6ba7b767..5d487686d25c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -94,6 +94,7 @@ struct panel_desc {
u32 bus_format;
u32 bus_flags;
+ int connector_type;
};
struct panel_simple {
@@ -464,9 +465,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &panel_simple_funcs;
+ drm_panel_init(&panel->base, dev, &panel_simple_funcs,
+ desc->connector_type);
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -833,6 +833,7 @@ static const struct panel_desc auo_g133han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g185han01_timings = {
@@ -862,6 +863,7 @@ static const struct panel_desc auo_g185han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_p320hvn03_timings = {
@@ -890,6 +892,7 @@ static const struct panel_desc auo_p320hvn03 = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -1205,6 +1208,7 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing dlc_dlc1010gig_timing = {
@@ -1235,6 +1239,7 @@ static const struct panel_desc dlc_dlc1010gig = {
.unprepare = 60,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode edt_et035012dm6_mode = {
@@ -1501,6 +1506,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
.height = 94,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -1525,6 +1531,7 @@ static const struct panel_desc hannstar_hsd100pxn1 = {
.height = 152,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
@@ -1631,6 +1638,7 @@ static const struct panel_desc innolux_g070y2_l01 = {
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g101ice_l01_timing = {
@@ -1659,6 +1667,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g121i1_l01_timing = {
@@ -1686,6 +1695,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.disable = 20,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
@@ -1869,6 +1879,7 @@ static const struct panel_desc koe_tx31d200vm0baa = {
.height = 109,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing kyo_tcg121xglp_timing = {
@@ -1893,6 +1904,7 @@ static const struct panel_desc kyo_tcg121xglp = {
.height = 184,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
@@ -1941,6 +1953,7 @@ static const struct panel_desc lg_lb070wv8 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
@@ -2063,6 +2076,7 @@ static const struct panel_desc mitsubishi_aa070mc01 = {
.disable = 400,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
@@ -2091,6 +2105,7 @@ static const struct panel_desc nec_nl12880bc20_05 = {
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
@@ -2193,6 +2208,7 @@ static const struct panel_desc nlt_nl192108ac18_02d = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nvd_9128_mode = {
@@ -2216,6 +2232,7 @@ static const struct panel_desc nvd_9128 = {
.height = 88,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
@@ -2381,6 +2398,7 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
@@ -2628,6 +2646,7 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing sharp_lq123p1jx31_timing = {
@@ -2807,6 +2826,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.height = 95,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
@@ -2831,6 +2851,7 @@ static const struct panel_desc tianma_tm070rvhg71 = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
@@ -2913,6 +2934,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -2983,6 +3005,7 @@ static const struct panel_desc urt_umsh_8596md_lvds = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct panel_desc urt_umsh_8596md_parallel = {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 09c5d9a6f9fa..ee3f23f45755 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -369,7 +369,8 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(st7701->backlight))
return PTR_ERR(st7701->backlight);
- drm_panel_init(&st7701->panel);
+ drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
@@ -381,8 +382,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
* ts8550b and there is no valid documentation for that.
*/
st7701->sleep_delay = 120 + desc->panel_sleep_delay;
- st7701->panel.funcs = &st7701_funcs;
- st7701->panel.dev = &dsi->dev;
ret = drm_panel_add(&st7701->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 5e3e92ea9ea6..108a85bb6667 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -381,8 +381,8 @@ static int st7789v_probe(struct spi_device *spi)
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &st7789v_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ctx->power = devm_regulator_get(&spi->dev, "power");
if (IS_ERR(ctx->power))
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 3d5b9c4f68d9..d6387d8f88a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -648,9 +648,8 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &acx565akm_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index f2baff827f50..c44d6a65c0aa 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -347,9 +347,8 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td028ttec1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index ba163c779084..621b65feec07 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -458,9 +458,8 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td043mtea1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 71591e5f5938..1a5418ae2ccf 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -457,9 +457,8 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel);
- tpg->panel.dev = dev;
- tpg->panel.funcs = &tpg110_drm_funcs;
+ drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
spi_set_drvdata(spi, tpg);
return drm_panel_add(&tpg->panel);
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 77e1311b7c69..0feea2456e14 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -518,9 +518,8 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
/* dual port */
gpiod_set_value(ctx->mode_gpio, 0);
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &truly_nt35597_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index 536a0d4f8d29..8c811a9e683b 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -10,3 +10,5 @@
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
+
+- Support core dump on job failure
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 12ff77dacc95..4c4e8a30a1ac 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -13,97 +13,42 @@
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- struct dev_pm_opp *opp;
- unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
- unsigned long target_volt, target_rate;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
int err;
- opp = devfreq_recommended_opp(dev, freq, flags);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
-
- target_rate = dev_pm_opp_get_freq(opp);
- target_volt = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (old_clk_rate == target_rate)
- return 0;
-
- /*
- * If frequency scaling from low to high, adjust voltage first.
- * If frequency scaling from high to low, adjust frequency first.
- */
- if (old_clk_rate < target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err) {
- dev_err(dev, "Cannot set voltage %lu uV\n",
- target_volt);
- return err;
- }
- }
-
- err = clk_set_rate(pfdev->clock, target_rate);
- if (err) {
- dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
- err);
- regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
- pfdev->devfreq.cur_volt);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
return err;
- }
- if (old_clk_rate > target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err)
- dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
- }
-
- pfdev->devfreq.cur_freq = target_rate;
- pfdev->devfreq.cur_volt = target_volt;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
- ktime_t now = ktime_get();
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- pfdev->devfreq.slot[i].busy_time = 0;
- pfdev->devfreq.slot[i].idle_time = 0;
- pfdev->devfreq.slot[i].time_last_update = now;
- }
+ pfdev->devfreq.busy_time = 0;
+ pfdev->devfreq.idle_time = 0;
+ pfdev->devfreq.time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- int i;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- panfrost_devfreq_update_utilization(pfdev, i);
- }
+ panfrost_devfreq_update_utilization(pfdev);
status->current_frequency = clk_get_rate(pfdev->clock);
- status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
- pfdev->devfreq.slot[0].idle_time));
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
+ pfdev->devfreq.idle_time));
- status->busy_time = 0;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
- }
-
- /* We're scheduling only to one core atm, so don't divide for now */
- /* status->busy_time /= NUM_JOB_SLOTS; */
+ status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
panfrost_devfreq_reset(pfdev);
@@ -119,7 +64,7 @@ static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- *freq = pfdev->devfreq.cur_freq;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
@@ -135,6 +80,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
+ unsigned long cur_freq;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
@@ -144,13 +90,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_reset(pfdev);
- pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+ cur_freq = clk_get_rate(pfdev->clock);
- opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
- panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
@@ -174,14 +120,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
- int i;
-
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
@@ -194,9 +136,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
@@ -204,22 +145,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
return;
now = ktime_get();
- last = pfdev->devfreq.slot[slot].time_last_update;
+ last = pfdev->devfreq.time_last_update;
- /* If we last recorded a transition to busy, we have been idle since */
- if (devfreq_slot->busy)
- pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ if (atomic_read(&pfdev->devfreq.busy_count) > 0)
+ pfdev->devfreq.busy_time += ktime_sub(now, last);
else
- pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+ pfdev->devfreq.idle_time += ktime_sub(now, last);
- pfdev->devfreq.slot[slot].time_last_update = now;
+ pfdev->devfreq.time_last_update = now;
+}
+
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
+{
+ panfrost_devfreq_update_utilization(pfdev);
+ atomic_inc(&pfdev->devfreq.busy_count);
}
-/* The job scheduler is expected to call this at every transition busy <-> idle */
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ int count;
- panfrost_devfreq_update_utilization(pfdev, slot);
- devfreq_slot->busy = !devfreq_slot->busy;
+ panfrost_devfreq_update_utilization(pfdev);
+ count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
+ WARN_ON(count < 0);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index e3bc63e82843..0611beffc8d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 9c39b9794811..06713811b92c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -51,13 +51,6 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
-struct panfrost_devfreq_slot {
- ktime_t busy_time;
- ktime_t idle_time;
- ktime_t time_last_update;
- bool busy;
-};
-
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -93,9 +86,10 @@ struct panfrost_device {
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
- unsigned long cur_freq;
- unsigned long cur_volt;
- struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ atomic_t busy_count;
} devfreq;
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f21bc8a7ee3a..9458dc6c750c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -470,7 +470,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
/*
* Panfrost driver version:
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index acb07fe06580..deca0c30bbd4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -112,7 +112,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
index cec6dcdadb5c..8e59d765bf19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_issues.h
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -13,37 +13,118 @@
* to care about.
*/
enum panfrost_hw_issue {
+ /* Need way to guarantee that all previously-translated memory accesses
+ * are commited */
HW_ISSUE_6367,
+
+ /* On job complete with non-done the cache is not flushed */
HW_ISSUE_6787,
+
+ /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a
+ * instrumentation dump if PRFCNT_TILER_EN is enabled */
HW_ISSUE_8186,
+
+ /* TIB: Reports faults from a vtile which has not yet been allocated */
HW_ISSUE_8245,
+
+ /* uTLB deadlock could occur when writing to an invalid page at the
+ * same time as access to a valid page in the same uTLB cache line ( ==
+ * 4 PTEs == 16K block of mapping) */
HW_ISSUE_8316,
+
+ /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is
+ * still executing */
HW_ISSUE_8394,
+
+ /* CSE: Sends a TERMINATED response for a task that should not be
+ * terminated */
HW_ISSUE_8401,
+
+ /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader,
+ * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */
HW_ISSUE_8408,
+
+ /* Disable the Pause Buffer in the LS pipe. */
HW_ISSUE_8443,
+
+ /* Change in RMUs in use causes problems related with the core's SDC */
HW_ISSUE_8987,
+
+ /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop
+ * won't complete until all 4 tasks have completed */
HW_ISSUE_9435,
+
+ /* HT: Tiler returns TERMINATED for non-terminated command */
HW_ISSUE_9510,
+
+ /* Occasionally the GPU will issue multiple page faults for the same
+ * address before the MMU page table has been read by the GPU */
HW_ISSUE_9630,
+
+ /* RA DCD load request to SDC returns invalid load ignore causing
+ * colour buffer mismatch */
HW_ISSUE_10327,
+
+ /* MMU TLB invalidation hazards */
HW_ISSUE_10649,
+
+ /* Missing cache flush in multi core-group configuration */
HW_ISSUE_10676,
+
+ /* Chicken bit on T72X for a hardware workaround in compiler */
HW_ISSUE_10797,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */
HW_ISSUE_10817,
+
+ /* Intermittent missing interrupt on job completion */
HW_ISSUE_10883,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR
+ * (similar to issue 10817) and can use #10817 workaround */
HW_ISSUE_10959,
+
+ /* Soft-stopped fragment shader job can restart with out-of-bound
+ * restart index */
HW_ISSUE_10969,
+
+ /* Race condition can cause tile list corruption */
HW_ISSUE_11020,
+
+ /* Write buffer can cause tile list corruption */
HW_ISSUE_11024,
+
+ /* Pause buffer can cause a fragment job hang */
HW_ISSUE_11035,
+
+ /* Dynamic Core Scaling not supported due to errata */
HW_ISSUE_11056,
+
+ /* Clear encoder state for a hard stopped fragment job which is AFBC
+ * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and
+ * r0p1_50rel0 */
HW_ISSUE_T76X_3542,
+
+ /* Keep tiler module clock on to prevent GPU stall */
HW_ISSUE_T76X_3953,
+
+ /* Must ensure L2 is not transitioning when we reset. Workaround with a
+ * busy wait until L2 completes transition; ensure there is a maximum
+ * loop count as she may never complete her transition. (On chips
+ * without this errata, it's totally okay if L2 transitions.) */
HW_ISSUE_TMIX_8463,
+
+ /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */
GPUCORE_1619,
+
+ /* When a hard-stop follows close after a soft-stop, the completion
+ * code for the terminated job may be incorrectly set to STOPPED */
HW_ISSUE_TMIX_8438,
+
+ /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the
+ * kernel must fiddle with L2 caches to prevent data leakage */
HW_ISSUE_TGOX_R1_1234,
+
HW_ISSUE_END
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 21f34d44aac2..d411eb6c8eb9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -404,9 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
}
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
- /* panfrost_core_dump(pfdev); */
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_idle(pfdev);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
@@ -469,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
pfdev->jobs[j] = NULL;
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
- panfrost_devfreq_record_transition(pfdev, j);
+ panfrost_devfreq_record_idle(pfdev);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -570,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
+ /* Check whether the hardware is idle */
+ if (atomic_read(&pfdev->devfreq.busy_count))
+ return false;
+
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
-
- /* Check whether the hardware is idle */
- if (pfdev->devfreq.slot[i].busy)
- return false;
}
return true;
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 024771a4083e..703ddc803c55 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -48,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data)
}
static enum drm_mode_status
-pl111_mode_valid(struct drm_crtc *crtc,
+pl111_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
- struct drm_device *drm = crtc->dev;
+ struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cpp = priv->variant->fb_bpp / 8;
u64 bw;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 276b53473a84..63dfcda04147 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -150,8 +150,8 @@ static int pl111_modeset_init(struct drm_device *dev)
return -EPROBE_DEFER;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_config;
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d0d691b31f4a..ca3f51c2a8fe 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -4,6 +4,7 @@ config DRM_QXL
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
+ select DRM_TTM_HELPER
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 265bfe9f8016..1d601f57a6ba 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -88,7 +88,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto free_dev;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
goto disable_pci;
@@ -150,15 +150,7 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_dev_put(dev);
}
-static const struct file_operations qxl_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .mmap = qxl_mmap,
-};
+DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
@@ -276,16 +268,8 @@ static struct drm_driver qxl_driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = qxl_gem_prime_pin,
- .gem_prime_unpin = qxl_gem_prime_unpin,
- .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
- .gem_prime_vmap = qxl_gem_prime_vmap,
- .gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
- .gem_free_object_unlocked = qxl_gem_object_free,
- .gem_open_object = qxl_gem_object_open,
- .gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
.irq_handler = qxl_irq_handler,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 9e034c5fa87d..27e45a2d6b52 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
@@ -354,7 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 548dfe6f3b26..ab72dc3476e9 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+ u32 pflag = 0;
unsigned int i;
+ if (pinned)
+ pflag |= TTM_PL_FLAG_NO_EVICT;
+ if (qbo->tbo.base.size <= PAGE_SIZE)
+ pflag |= TTM_PL_FLAG_TOPDOWN;
+
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
@@ -77,6 +82,19 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
}
}
+static const struct drm_gem_object_funcs qxl_object_funcs = {
+ .free = qxl_gem_object_free,
+ .open = qxl_gem_object_open,
+ .close = qxl_gem_object_close,
+ .pin = qxl_gem_prime_pin,
+ .unpin = qxl_gem_prime_unpin,
+ .get_sg_table = qxl_gem_prime_get_sg_table,
+ .vmap = qxl_gem_prime_vmap,
+ .vunmap = qxl_gem_prime_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
int qxl_bo_create(struct qxl_device *qdev,
unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
@@ -100,6 +118,7 @@ int qxl_bo_create(struct qxl_device *qdev,
kfree(bo);
return r;
}
+ bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
@@ -148,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr;
int ret;
struct io_mapping *map;
@@ -160,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
+ ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback:
@@ -193,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
-
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
(bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
-
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
return;
fallback:
qxl_bo_kunmap(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 312216caeea2..2feca734c7b1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL, true);
+ !no_intr, NULL);
if (ret)
return ret;
@@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -451,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- glob = bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 9b24514c75aa..16a5e903533d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,47 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static struct vm_operations_struct qxl_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- vm_fault_t ret;
-
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
- ret = ttm_vm_ops->fault(vmf);
- return ret;
-}
-
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int r;
- struct drm_file *file_priv = filp->private_data;
- struct qxl_device *qdev = file_priv->minor->dev->dev_private;
-
- if (qdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
- filp->private_data, vma->vm_pgoff);
-
- r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
- if (unlikely(r != 0))
- return r;
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- qxl_ttm_vm_ops = *ttm_vm_ops;
- qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
- }
- vma->vm_ops = &qxl_ttm_vm_ops;
- return 0;
-}
-
static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -151,16 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
-static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct qxl_bo *qbo = to_qxl_bo(bo);
-
- return drm_vma_node_verify_access(&qbo->tbo.base.vma_node,
- filp->private_data);
-}
-
-static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -310,7 +261,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
- .verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.move_notify = &qxl_bo_move_notify,
@@ -325,6 +275,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
+ qdev->ddev.vma_offset_manager,
false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -368,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct qxl_device *rdev = dev->dev_private;
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 62eab82a64f9..acabeaf28732 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -221,9 +221,7 @@ int ci_get_temp(struct radeon_device *rdev)
else
actual_temp = temp & 0x1ff;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/* get temperature in millidegrees */
@@ -239,9 +237,7 @@ int kv_get_temp(struct radeon_device *rdev)
else
actual_temp = 0;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/*
@@ -6969,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* XXX this should actually be a bus address, not an MC address. same on older asics */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e937cc01910d..033bc466a862 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index b9aea5776d3d..72db2b41e96d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
return;
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b684cd719612..c07427d3c199 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -249,11 +249,10 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -269,9 +268,8 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -380,10 +378,9 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -428,14 +425,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
struct drm_encoder *enc;
- int i;
if (conflict == connector)
continue;
radeon_conflict = to_radeon_connector(conflict);
- drm_connector_for_each_possible_encoder(conflict, enc, i) {
+ drm_connector_for_each_possible_encoder(conflict, enc) {
/* if the IDs match */
if (enc == encoder) {
if (conflict->status != connector_status_connected)
@@ -1363,9 +1359,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
- int i;
-
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1443,9 +1437,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1460,7 +1453,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1603,9 +1596,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1624,10 +1616,9 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 2994f07fbad9..ee28f5b3785e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
return &radeon_connector->mst_encoder->base;
}
+static int
+radeon_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ radeon_connector->port);
+}
+
static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
.get_modes = radeon_dp_mst_get_modes,
.mode_valid = radeon_dp_mst_mode_valid,
.best_encoder = radeon_mst_best_encoder,
+ .detect_ctx = radeon_dp_mst_detect,
};
-static enum drm_connector_status
-radeon_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
-
- return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
-}
-
static void
radeon_dp_mst_connector_destroy(struct drm_connector *connector)
{
@@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
};
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4528f4dc0b2d..fd74e2611185 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -361,7 +361,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb");
if (ret)
return ret;
@@ -379,10 +379,6 @@ radeon_pci_remove(struct pci_dev *pdev)
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
-#ifdef CONFIG_PPC64
- struct drm_device *ddev = pci_get_drvdata(pdev);
-#endif
-
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
@@ -390,13 +386,14 @@ radeon_pci_shutdown(struct pci_dev *pdev)
radeon_pci_remove(pdev);
#ifdef CONFIG_PPC64
- /* Some adapters need to be suspended before a
+ /*
+ * Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
* during kexec.
* Make this power specific becauase it breaks
* some non-power boards.
*/
- radeon_suspend_kms(ddev, true, true, false);
+ radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b2b076606f54..67298a0739cb 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2abe1eab471f..140d94cc080d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a05e10724d46..098bc9f40b98 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -794,6 +794,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
+ rdev->ddev->vma_offset_manager,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 05894d198a79..1d8efb0eefdb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 9c93eb4fad8b..f266c17b907a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -131,6 +131,35 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.dpll_mask = BIT(1),
};
+static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(3) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A774B1 has one RGB output, one LVDS output and one HDMI
+ * output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+ .dpll_mask = BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -416,6 +445,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
+ { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 0f00bdfe2366..3cd83a030a04 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,6 +9,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
@@ -84,8 +85,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
goto done;
}
- bridge = devm_drm_panel_bridge_add(rcdu->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto done;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 2dc9caee8767..0d59f390de19 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -585,7 +585,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsps[j].crtcs_mask |= BIT(i);
- /* Store the VSP pointer and pipe index in the CRTC. */
+ /*
+ * Store the VSP pointer and pipe index in the CRTC. If the
+ * second cell of the 'vsps' specifier isn't present, default
+ * to 0 to remain compatible with older DT bindings.
+ */
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3fc7e6899cab..8c6c172bbf2e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -16,6 +16,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
return 0;
}
+static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
+ .gen = 2,
+ .quirks = RCAR_LVDS_QUIRK_LANES,
+ .pll_setup = rcar_lvds_pll_setup_gen2,
+};
+
+static const struct soc_device_attribute lvds_quirk_matches[] = {
+ {
+ .soc_id = "r8a7790", .revision = "ES1.*",
+ .data = &rcar_lvds_r8a7790es1_info,
+ },
+ { /* sentinel */ }
+};
+
static int rcar_lvds_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
struct resource *mem;
int ret;
@@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
+ attr = soc_device_match(lvds_quirk_matches);
+ if (attr)
+ lvds->info = attr->data;
+
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
@@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
.pll_setup = rcar_lvds_pll_setup_gen2,
};
-static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
- .gen = 2,
- .quirks = RCAR_LVDS_QUIRK_LANES,
- .pll_setup = rcar_lvds_pll_setup_gen2,
-};
-
static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD,
@@ -929,8 +943,9 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
- { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
+ { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index d505ea7d5384..eed594bd38d3 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -477,8 +477,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
cdn_dp_set_firmware_active(dp, false);
cdn_dp_clk_disable(dp);
dp->active = false;
- dp->link.rate = 0;
- dp->link.num_lanes = 0;
+ dp->max_lanes = 0;
+ dp->max_rate = 0;
if (!dp->connected) {
kfree(dp->edid);
dp->edid = NULL;
@@ -570,7 +570,7 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
struct cdn_dp_port *port = cdn_dp_connected_port(dp);
u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
- if (!port || !dp->link.rate || !dp->link.num_lanes)
+ if (!port || !dp->max_rate || !dp->max_lanes)
return false;
if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
@@ -952,8 +952,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* Enabled and connected with a sink, re-train if requested */
} else if (!cdn_dp_check_link_status(dp)) {
- unsigned int rate = dp->link.rate;
- unsigned int lanes = dp->link.num_lanes;
+ unsigned int rate = dp->max_rate;
+ unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
@@ -966,7 +966,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* If training result is changed, update the video config */
if (mode->clock &&
- (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+ (rate != dp->max_rate || lanes != dp->max_lanes)) {
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index b85ea89eb60b..83c4586665b4 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -92,9 +92,10 @@ struct cdn_dp_device {
struct reset_control *core_rst;
struct audio_info audio_info;
struct video_info video_info;
- struct drm_dp_link link;
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
+ u8 max_lanes;
+ u8 max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 077c87021908..7361c07cb4a7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -535,8 +535,8 @@ static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
if (ret)
goto err_get_training_status;
- dp->link.rate = drm_dp_bw_code_to_link_rate(status[0]);
- dp->link.num_lanes = status[1];
+ dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]);
+ dp->max_lanes = status[1];
err_get_training_status:
if (ret)
@@ -560,8 +560,8 @@ int cdn_dp_train_link(struct cdn_dp_device *dp)
return ret;
}
- DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
- dp->link.num_lanes);
+ DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate,
+ dp->max_lanes);
return ret;
}
@@ -639,7 +639,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
(video->color_depth * 2) : (video->color_depth * 3);
- link_rate = dp->link.rate / 1000;
+ link_rate = dp->max_rate / 1000;
ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
if (ret)
@@ -659,14 +659,13 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
do {
tu_size_reg += 2;
symbol = tu_size_reg * mode->clock * bit_per_pix;
- do_div(symbol, dp->link.num_lanes * link_rate * 8);
+ do_div(symbol, dp->max_lanes * link_rate * 8);
rem = do_div(symbol, 1000);
if (tu_size_reg > 64) {
ret = -EINVAL;
DRM_DEV_ERROR(dp->dev,
"tu error, clk:%d, lanes:%d, rate:%d\n",
- mode->clock, dp->link.num_lanes,
- link_rate);
+ mode->clock, dp->max_lanes, link_rate);
goto err_config_video;
}
} while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
@@ -680,7 +679,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
/* set the FIFO Buffer size */
val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
- val /= (dp->link.num_lanes * link_rate);
+ val /= (dp->max_lanes * link_rate);
val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
val += 2;
ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
@@ -833,7 +832,7 @@ static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
u32 val;
if (audio->channels == 2) {
- if (dp->link.num_lanes == 1)
+ if (dp->max_lanes == 1)
sub_pckt_num = 2;
else
sub_pckt_num = 4;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 906891b03a38..7f56d8c3491d 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -450,6 +450,7 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
.phy_force_vendor = true,
+ .use_drm_infoframe = true,
};
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
@@ -464,6 +465,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3399_chip_data,
+ .use_drm_infoframe = true,
};
static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 85fc5f01f761..cdb401f4283d 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -743,7 +743,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct rk3066_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -753,12 +752,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 291e89b4045f..7582d0e6a60a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -294,7 +294,7 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
kfree(rk_obj);
}
-struct rockchip_gem_object *
+static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 613404f86668..d04b3492bdac 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -139,6 +139,7 @@ struct vop {
uint32_t *regsbak;
void __iomem *regs;
+ void __iomem *lut_regs;
/* physical map length of vop register */
uint32_t len;
@@ -1040,14 +1041,118 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct vop *vop = to_vop(crtc);
+ unsigned long rate;
- adjusted_mode->clock =
- DIV_ROUND_UP(clk_round_rate(vop->dclk,
- adjusted_mode->clock * 1000), 1000);
+ /*
+ * Clock craziness.
+ *
+ * Key points:
+ *
+ * - DRM works in in kHz.
+ * - Clock framework works in Hz.
+ * - Rockchip's clock driver picks the clock rate that is the
+ * same _OR LOWER_ than the one requested.
+ *
+ * Action plan:
+ *
+ * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
+ * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
+ * make 60000 kHz then the clock framework will actually give us
+ * the right clock.
+ *
+ * NOTE: if the PLL (maybe through a divider) could actually make
+ * a clock rate 999 Hz higher instead of the one we want then this
+ * could be a problem. Unfortunately there's not much we can do
+ * since it's baked into DRM to use kHz. It shouldn't matter in
+ * practice since Rockchip PLLs are controlled by tables and
+ * even if there is a divider in the middle I wouldn't expect PLL
+ * rates in the table that are just a few kHz different.
+ *
+ * 2. Get the clock framework to round the rate for us to tell us
+ * what it will actually make.
+ *
+ * 3. Store the rounded up rate so that we don't need to worry about
+ * this in the actual clk_set_rate().
+ */
+ rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+ adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
}
+static bool vop_dsp_lut_is_enabled(struct vop *vop)
+{
+ return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
+}
+
+static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
+{
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ u32 word;
+
+ word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
+ (drm_color_lut_extract(lut[i].green, 10) << 10) |
+ drm_color_lut_extract(lut[i].blue, 10);
+ writel(word, vop->lut_regs + i * 4);
+ }
+}
+
+static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_crtc_state *state = crtc->state;
+ unsigned int idle;
+ int ret;
+
+ if (!vop->lut_regs)
+ return;
+ /*
+ * To disable gamma (gamma_lut is null) or to write
+ * an update to the LUT, clear dsp_lut_en.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, dsp_lut_en, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ /*
+ * In order to write the LUT to the internal memory,
+ * we need to first make sure the dsp_lut_en bit is cleared.
+ */
+ ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
+ idle, !idle, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
+ return;
+ }
+
+ if (!state->gamma_lut)
+ return;
+
+ spin_lock(&vop->reg_lock);
+ vop_crtc_write_gamma_lut(vop, crtc);
+ VOP_REG_SET(vop, common, dsp_lut_en, 1);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ /*
+ * Only update GAMMA if the 'active' flag is not changed,
+ * otherwise it's updated by .atomic_enable.
+ */
+ if (crtc->state->color_mgmt_changed &&
+ !crtc->state->active_changed)
+ vop_crtc_gamma_set(vop, crtc, old_crtc_state);
+}
+
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1075,6 +1180,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
+ /*
+ * If we have a GAMMA LUT in the state, then let's make sure
+ * it's updated. We might be coming out of suspend,
+ * which means the LUT internal memory needs to be re-written.
+ */
+ if (crtc->state->gamma_lut)
+ vop_crtc_gamma_set(vop, crtc, old_state);
+
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
@@ -1085,9 +1198,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
-
- pin_pol = BIT(DCLK_INVERT);
- pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
+ pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
BIT(HSYNC_POSITIVE) : 0;
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
@@ -1096,25 +1207,29 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
- VOP_REG_SET(vop, output, rgb_en, 1);
+ VOP_REG_SET(vop, output, rgb_dclk_pol, 1);
VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, rgb_en, 1);
break;
case DRM_MODE_CONNECTOR_eDP:
+ VOP_REG_SET(vop, output, edp_dclk_pol, 1);
VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_REG_SET(vop, output, hdmi_dclk_pol, 1);
VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
+ VOP_REG_SET(vop, output, mipi_dclk_pol, 1);
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
VOP_REG_SET(vop, output, mipi_dual_channel_en,
!!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- pin_pol &= ~BIT(DCLK_INVERT);
+ VOP_REG_SET(vop, output, dp_dclk_pol, 0);
VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, dp_en, 1);
break;
@@ -1191,6 +1306,26 @@ static void vop_wait_for_irq_handler(struct vop *vop)
synchronize_irq(vop->irq);
}
+static int vop_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ if (vop->lut_regs && crtc_state->color_mgmt_changed &&
+ crtc_state->gamma_lut) {
+ unsigned int len;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -1243,6 +1378,8 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
.mode_fixup = vop_crtc_mode_fixup,
+ .atomic_check = vop_crtc_atomic_check,
+ .atomic_begin = vop_crtc_atomic_begin,
.atomic_flush = vop_crtc_atomic_flush,
.atomic_enable = vop_crtc_atomic_enable,
.atomic_disable = vop_crtc_atomic_disable,
@@ -1361,6 +1498,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
@@ -1518,6 +1656,10 @@ static int vop_create_crtc(struct vop *vop)
goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+ if (vop->lut_regs) {
+ drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size);
+ }
/*
* Create drm_planes for overlay windows with possible_crtcs restricted
@@ -1822,6 +1964,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ if (!vop_data->lut_size) {
+ DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
+ return -EINVAL;
+ }
+ vop->lut_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->lut_regs))
+ return PTR_ERR(vop->lut_regs);
+ }
+
vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
if (!vop->regsbak)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 2149a889c29d..0b3d18c457b2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -46,10 +46,15 @@ struct vop_modeset {
struct vop_output {
struct vop_reg pin_pol;
struct vop_reg dp_pin_pol;
+ struct vop_reg dp_dclk_pol;
struct vop_reg edp_pin_pol;
+ struct vop_reg edp_dclk_pol;
struct vop_reg hdmi_pin_pol;
+ struct vop_reg hdmi_dclk_pol;
struct vop_reg mipi_pin_pol;
+ struct vop_reg mipi_dclk_pol;
struct vop_reg rgb_pin_pol;
+ struct vop_reg rgb_dclk_pol;
struct vop_reg dp_en;
struct vop_reg edp_en;
struct vop_reg hdmi_en;
@@ -67,6 +72,7 @@ struct vop_common {
struct vop_reg dither_down_mode;
struct vop_reg dither_down_en;
struct vop_reg dither_up;
+ struct vop_reg dsp_lut_en;
struct vop_reg gate_en;
struct vop_reg mmu_en;
struct vop_reg out_mode;
@@ -170,6 +176,7 @@ struct vop_data {
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
+ unsigned int lut_size;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
#define VOP_FEATURE_INTERNAL_RGB BIT(1)
@@ -294,8 +301,7 @@ enum dither_down_mode_sel {
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
- DEN_NEGATIVE = 2,
- DCLK_INVERT = 3
+ DEN_NEGATIVE = 2
};
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 64aefa856896..8a4c9af0ba73 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 89e0bb0fe0ab..ae730275a34f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -9,6 +9,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -135,7 +136,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(bridge))
return ERR_CAST(bridge);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d1494be14471..7a9d979c8d5d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -16,6 +16,7 @@
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
+#include "rockchip_drm_drv.h"
#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
{ \
@@ -214,9 +215,11 @@ static const struct vop_modeset px30_modeset = {
};
static const struct vop_output px30_output = {
- .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1),
- .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25),
+ .rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1),
+ .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2),
.rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
+ .mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25),
+ .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26),
.mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
};
@@ -598,6 +601,7 @@ static const struct vop_common rk3288_common = {
.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
+ .dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
@@ -646,6 +650,7 @@ static const struct vop_data rk3288_vop = {
.output = &rk3288_output,
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
+ .lut_size = 1024,
};
static const int rk3368_vop_intrs[] = {
@@ -717,10 +722,14 @@ static const struct vop_win_data rk3368_vop_win_data[] = {
};
static const struct vop_output rk3368_output = {
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
@@ -764,11 +773,16 @@ static const struct vop_data rk3366_vop = {
};
static const struct vop_output rk3399_output = {
- .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
@@ -872,14 +886,18 @@ static const struct vop_modeset rk3328_modeset = {
};
static const struct vop_output rk3328_output = {
+ .rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
- .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28),
+ .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
};
static const struct vop_misc rk3328_misc = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 1a5153197fe9..461a7a8129f4 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/completion.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
@@ -68,6 +69,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
if (!entity->rq_list)
return -ENOMEM;
+ init_completion(&entity->entity_idle);
+
for (i = 0; i < num_rq_list; ++i)
entity->rq_list[i] = rq_list[i];
@@ -286,11 +289,12 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
*/
if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
+ /*
+ * Wait for thread to idle to make sure it isn't processing
+ * this entity.
*/
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
+ wait_for_completion(&entity->entity_idle);
+
}
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency,
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 54977408f574..8b45c3a1b84e 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -128,13 +128,13 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops drm_sched_fence_ops_finished = {
+static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index f39b97ed4ade..3c57e84222ca 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -47,6 +47,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/completion.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
@@ -134,6 +135,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
list_for_each_entry_continue(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -144,6 +146,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -496,8 +499,10 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
fence = sched->ops->run_job(s_job);
if (IS_ERR_OR_NULL(fence)) {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
s_job->s_fence->parent = NULL;
- dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
} else {
s_job->s_fence->parent = fence;
}
@@ -632,43 +637,45 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_cleanup_jobs - destroy finished jobs
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Remove all finished jobs from the mirror list and destroy them.
+ * Returns the next finished job from the mirror list (if there is one)
+ * ready for it to be destroyed.
*/
-static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_job *job;
unsigned long flags;
- /* Don't destroy jobs while the timeout worker is running */
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !cancel_delayed_work(&sched->work_tdr))
- return;
-
+ /*
+ * Don't destroy jobs while the timeout worker is running OR thread
+ * is being parked and hence assumed to not touch ring_mirror_list
+ */
+ if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !cancel_delayed_work(&sched->work_tdr)) ||
+ __kthread_should_park(sched->thread))
+ return NULL;
- while (!list_empty(&sched->ring_mirror_list)) {
- struct drm_sched_job *job;
+ spin_lock_irqsave(&sched->job_list_lock, flags);
- job = list_first_entry(&sched->ring_mirror_list,
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- if (!dma_fence_is_signaled(&job->s_fence->finished))
- break;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- sched->ops->free_job(job);
+ } else {
+ job = NULL;
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
}
- /* queue timeout for next job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ return job;
}
/**
@@ -708,17 +715,27 @@ static int drm_sched_main(void *param)
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
+ struct drm_sched_job *cleanup_job = NULL;
wait_event_interruptible(sched->wake_up_worker,
- (drm_sched_cleanup_jobs(sched),
+ (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop()));
+ kthread_should_stop());
+
+ if (cleanup_job) {
+ sched->ops->free_job(cleanup_job);
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
+ }
if (!entity)
continue;
sched_job = drm_sched_entity_pop_job(entity);
+
+ complete(&entity->entity_idle);
+
if (!sched_job)
continue;
@@ -741,8 +758,9 @@ static int drm_sched_main(void *param)
r);
dma_fence_put(fence);
} else {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
- dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
drm_sched_process_job(NULL, &sched_job->cb);
}
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index aae88f8a016c..d2137342b371 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
test-drm_format.o test-drm_framebuffer.o \
- test-drm_damage_helper.o
+ test-drm_damage_helper.o test-drm_dp_mst_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 464753746013..1898de0b4a4d 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -32,3 +32,5 @@ selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
+selftest(dp_mst_calc_pbn_mode, igt_dp_mst_calc_pbn_mode)
+selftest(dp_mst_sideband_msg_req_decode, igt_dp_mst_sideband_msg_req_decode)
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
new file mode 100644
index 000000000000..af2b2de65316
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for for the DRM DP MST helpers
+ */
+
+#define PREFIX_STR "[drm_dp_mst_helper]"
+
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_print.h>
+
+#include "../drm_dp_mst_topology_internal.h"
+#include "test-drm_modeset_common.h"
+
+int igt_dp_mst_calc_pbn_mode(void *ignored)
+{
+ int pbn, i;
+ const struct {
+ int rate;
+ int bpp;
+ int expected;
+ } test_params[] = {
+ { 154000, 30, 689 },
+ { 234000, 30, 1047 },
+ { 297000, 24, 1063 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(test_params); i++) {
+ pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
+ test_params[i].bpp);
+ FAIL(pbn != test_params[i].expected,
+ "Expected PBN %d for clock %d bpp %d, got %d\n",
+ test_params[i].expected, test_params[i].rate,
+ test_params[i].bpp, pbn);
+ }
+
+ return 0;
+}
+
+static bool
+sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
+ const struct drm_dp_sideband_msg_req_body *out)
+{
+ const struct drm_dp_remote_i2c_read_tx *txin, *txout;
+ int i;
+
+ if (in->req_type != out->req_type)
+ return false;
+
+ switch (in->req_type) {
+ /*
+ * Compare struct members manually for request types which can't be
+ * compared simply using memcmp(). This is because said request types
+ * contain pointers to other allocated structs
+ */
+ case DP_REMOTE_I2C_READ:
+#define IN in->u.i2c_read
+#define OUT out->u.i2c_read
+ if (IN.num_bytes_read != OUT.num_bytes_read ||
+ IN.num_transactions != OUT.num_transactions ||
+ IN.port_number != OUT.port_number ||
+ IN.read_i2c_device_id != OUT.read_i2c_device_id)
+ return false;
+
+ for (i = 0; i < IN.num_transactions; i++) {
+ txin = &IN.transactions[i];
+ txout = &OUT.transactions[i];
+
+ if (txin->i2c_dev_id != txout->i2c_dev_id ||
+ txin->no_stop_bit != txout->no_stop_bit ||
+ txin->num_bytes != txout->num_bytes ||
+ txin->i2c_transaction_delay !=
+ txout->i2c_transaction_delay)
+ return false;
+
+ if (memcmp(txin->bytes, txout->bytes,
+ txin->num_bytes) != 0)
+ return false;
+ }
+ break;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_DPCD_WRITE:
+#define IN in->u.dpcd_write
+#define OUT out->u.dpcd_write
+ if (IN.dpcd_address != OUT.dpcd_address ||
+ IN.num_bytes != OUT.num_bytes ||
+ IN.port_number != OUT.port_number)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_I2C_WRITE:
+#define IN in->u.i2c_write
+#define OUT out->u.i2c_write
+ if (IN.port_number != OUT.port_number ||
+ IN.write_i2c_device_id != OUT.write_i2c_device_id ||
+ IN.num_bytes != OUT.num_bytes)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ default:
+ return memcmp(in, out, sizeof(*in)) == 0;
+ }
+
+ return true;
+}
+
+static bool
+sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
+{
+ struct drm_dp_sideband_msg_req_body out = {0};
+ struct drm_printer p = drm_err_printer(PREFIX_STR);
+ struct drm_dp_sideband_msg_tx txmsg;
+ int i, ret;
+
+ drm_dp_encode_sideband_req(in, &txmsg);
+ ret = drm_dp_decode_sideband_req(&txmsg, &out);
+ if (ret < 0) {
+ drm_printf(&p, "Failed to decode sideband request: %d\n",
+ ret);
+ return false;
+ }
+
+ if (!sideband_msg_req_equal(in, &out)) {
+ drm_printf(&p, "Encode/decode failed, expected:\n");
+ drm_dp_dump_sideband_msg_req_body(in, 1, &p);
+ drm_printf(&p, "Got:\n");
+ drm_dp_dump_sideband_msg_req_body(&out, 1, &p);
+ return false;
+ }
+
+ switch (in->req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(out.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < out.u.i2c_read.num_transactions; i++)
+ kfree(out.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(out.u.i2c_write.bytes);
+ break;
+ }
+
+ /* Clear everything but the req_type for the input */
+ memset(&in->u, 0, sizeof(in->u));
+
+ return true;
+}
+
+int igt_dp_mst_sideband_msg_req_decode(void *unused)
+{
+ struct drm_dp_sideband_msg_req_body in = { 0 };
+ u8 data[] = { 0xff, 0x0, 0xdd };
+ int i;
+
+#define DO_TEST() FAIL_ON(!sideband_msg_req_encode_decode(&in))
+
+ in.req_type = DP_ENUM_PATH_RESOURCES;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_UP_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_DOWN_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_ALLOCATE_PAYLOAD;
+ in.u.allocate_payload.number_sdp_streams = 3;
+ for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
+ in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
+ DO_TEST();
+ in.u.allocate_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.allocate_payload.vcpi = 0x7f;
+ DO_TEST();
+ in.u.allocate_payload.pbn = U16_MAX;
+ DO_TEST();
+
+ in.req_type = DP_QUERY_PAYLOAD;
+ in.u.query_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.query_payload.vcpi = 0x7f;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_READ;
+ in.u.dpcd_read.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_read.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_read.num_bytes = U8_MAX;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_WRITE;
+ in.u.dpcd_write.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_write.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
+ in.u.dpcd_write.bytes = data;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_READ;
+ in.u.i2c_read.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_read.read_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_read.num_transactions = 3;
+ in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
+ for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
+ in.u.i2c_read.transactions[i].bytes = data;
+ in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
+ in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
+ }
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_WRITE;
+ in.u.i2c_write.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_write.write_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_write.bytes = data;
+ DO_TEST();
+
+#undef DO_TEST
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
index 74d5561a862b..2d29ea6f92e2 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -126,7 +126,7 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
.handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
}
},
-{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag",
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 388f9844f4ba..9aabe82dcd3a 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node before start: node=%llx+%llu, start=%llx\n",
node->start, node->size, start);
return false;
@@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node after end: node=%llx+%llu, end=%llx\n",
node->start, node->size, end);
return false;
@@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count)
struct drm_mm_node *next = list_next_entry(hole, node_list);
const char *node1 = NULL, *node2 = NULL;
- if (hole->allocated)
+ if (drm_mm_node_allocated(hole))
node1 = kasprintf(GFP_KERNEL,
"[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
- if (next->allocated)
+ if (drm_mm_node_allocated(next))
node2 = kasprintf(GFP_KERNEL,
", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
@@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
u64 *start,
u64 *end)
{
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
++*start;
node = list_next_entry(node, node_list);
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
--*end;
}
static bool colors_abutt(const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
- list_next_entry(node, node_list)->allocated) {
+ drm_mm_node_allocated(list_next_entry(node, node_list))) {
pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index 8c76f09c12d1..0fcb8bbc6a1b 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -39,5 +39,7 @@ int igt_damage_iter_damage_one_intersect(void *ignored);
int igt_damage_iter_damage_one_outside(void *ignored);
int igt_damage_iter_damage_src_moved(void *ignored);
int igt_damage_iter_damage_not_visible(void *ignored);
+int igt_dp_mst_calc_pbn_mode(void *ignored);
+int igt_dp_mst_sideband_msg_req_decode(void *ignored);
#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 0bf7c332cf0b..ea64c1dcaf63 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -47,7 +47,7 @@ struct dma_pixmap {
void *base;
};
-/**
+/*
* STI Cursor structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index e55870190bf5..68289b0b063a 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@@ -65,7 +66,7 @@ static struct dvo_config rgb_24bit_de_cfg = {
.awg_fwgen_fct = sti_awg_generate_code_data_enable_mode,
};
-/**
+/*
* STI digital video output structure
*
* @dev: driver device
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 8e926cd6a1c8..11595c748844 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -103,7 +103,7 @@ struct sti_gdp_node_list {
dma_addr_t btm_field_paddr;
};
-/**
+/*
* STI GDP structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 94e404f13234..8f7bf33815fd 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
@@ -230,7 +231,7 @@ static const struct sti_hda_video_config hda_supported_modes[] = {
AWGi_720x480p_60, NN_720x480p_60, VID_ED}
};
-/**
+/*
* STI hd analog structure
*
* @dev: driver device
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 9862c322f0c4..814560ead4e1 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -9,11 +9,12 @@
#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -333,7 +334,6 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
* Helper to concatenate infoframe in 32 bits word
*
* @ptr: pointer on the hdmi internal structure
- * @data: infoframe to write
* @size: size to write
*/
static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
@@ -543,13 +543,14 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
return 0;
}
+#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
+
/**
* Software reset of the hdmi subsystem
*
* @hdmi: pointer on the hdmi internal structure
*
*/
-#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
static void hdmi_swreset(struct sti_hdmi *hdmi)
{
u32 val;
@@ -1256,6 +1257,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_hdmi_connector *connector;
+ struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
@@ -1318,6 +1320,14 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ cec_fill_conn_info_from_drm(&conn_info, drm_connector);
+ hdmi->notifier = cec_notifier_conn_register(&hdmi->dev, NULL,
+ &conn_info);
+ if (!hdmi->notifier) {
+ hdmi->drm_connector = NULL;
+ return -ENOMEM;
+ }
+
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
@@ -1331,6 +1341,9 @@ err_sysfs:
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+ cec_notifier_conn_unregister(hdmi->notifier);
}
static const struct component_ops sti_hdmi_ops = {
@@ -1436,10 +1449,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
goto release_adapter;
}
- hdmi->notifier = cec_notifier_get(&pdev->dev);
- if (!hdmi->notifier)
- goto release_adapter;
-
hdmi->reset = devm_reset_control_get(dev, "hdmi");
/* Take hdmi out of reset */
if (!IS_ERR(hdmi->reset))
@@ -1459,14 +1468,11 @@ static int sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
- cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
-
i2c_put_adapter(hdmi->ddc_adapt);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
- cec_notifier_put(hdmi->notifier);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index aba79c172512..5767e93dd1cd 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -157,9 +157,9 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
*
* @tvout: tvout structure
* @reg: register to set
- * @cr_r:
- * @y_g:
- * @cb_b:
+ * @cr_r: red chroma or red order
+ * @y_g: y or green order
+ * @cb_b: blue chroma or blue order
*/
static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
@@ -214,7 +214,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
* @tvout: tvout structure
* @reg: register to set
* @main_path: main or auxiliary path
- * @sel_input: selected_input (main/aux + conv)
+ * @video_out: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
int reg,
@@ -251,7 +251,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
*
* @tvout: tvout structure
* @reg: register to set
- * @in_vid_signed: used video input format
+ * @in_vid_fmt: used video input format
*/
static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
int reg, u32 in_vid_fmt)
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index ef4009f11396..0b17ac8a3faa 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -121,7 +121,7 @@ struct sti_vtg_sync_params {
u32 vsync_off_bot;
};
-/**
+/*
* STI VTG structure
*
* @regs: register mapping
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index a03a642c147c..514efefb0016 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -260,8 +260,11 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
/* Compute requested pll out */
bpp = mipi_dsi_pixel_format_to_bpp(format);
pll_out_khz = mode->clock * bpp / lanes;
+
/* Add 20% to pll out to be higher than pixel bw (burst mode only) */
- pll_out_khz = (pll_out_khz * 12) / 10;
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ pll_out_khz = (pll_out_khz * 12) / 10;
+
if (pll_out_khz > dsi->lane_max_kbps) {
pll_out_khz = dsi->lane_max_kbps;
DRM_WARN("Warning max phy mbps is used\n");
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 3ab4fbf8eb0d..5b51298921cf 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -1040,6 +1041,36 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static void ltdc_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Set to sleep state the pinctrl whatever type of encoder */
+ pinctrl_pm_select_sleep_state(ddev->dev);
+}
+
+static void ltdc_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /*
+ * Set to default state the pinctrl only with DPI type.
+ * Others types like DSI, don't need pinctrl due to
+ * internal bridge (the signals do not come out of the chipset).
+ */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPI)
+ pinctrl_pm_select_default_state(ddev->dev);
+}
+
+static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = {
+ .disable = ltdc_encoder_disable,
+ .enable = ltdc_encoder_enable,
+};
+
static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct drm_encoder *encoder;
@@ -1055,6 +1086,8 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
+ drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
+
ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
drm_encoder_cleanup(encoder);
@@ -1236,8 +1269,8 @@ int ltdc_load(struct drm_device *ddev)
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
- bridge[i] = drm_panel_bridge_add(panel[i],
- DRM_MODE_CONNECTOR_DPI);
+ bridge[i] = drm_panel_bridge_add_typed(panel[i],
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge[i])) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge[i]);
@@ -1280,6 +1313,8 @@ int ltdc_load(struct drm_device *ddev)
clk_disable_unprepare(ldev->pixel_clk);
+ pinctrl_pm_select_sleep_state(ddev->dev);
+
pm_runtime_enable(ddev->dev);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index eb8071a4d6d0..a7c4654445c7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -490,6 +490,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+ struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
struct resource *res;
@@ -629,8 +630,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops,
- hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC);
+ hdmi, "sun4i", CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_cleanup_connector;
@@ -649,6 +649,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
"Couldn't initialise the HDMI connector\n");
goto err_cleanup_connector;
}
+ cec_fill_conn_info_from_drm(&conn_info, &hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
/* There is no HPD interrupt, so we need to poll the controller */
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 7fbf425acb55..25ab2ef6d545 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index aac56983f208..e74b9eddca01 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 04c721d0d3b9..42651d737c55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -16,6 +16,7 @@
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
@@ -488,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 6;
+ tcon->dclk_min_div = 1;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 1636344ba9ec..c958ca9bae63 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -365,8 +366,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
- u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+ u16 delay = mode->vtotal - (mode->vsync_start - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;
@@ -437,9 +437,9 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
- } else if ((mode->hsync_end - mode->hdisplay) > 20) {
+ } else if ((mode->hsync_start - mode->hdisplay) > 20) {
/* Maaaaaagic */
- u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
+ u16 drq = (mode->hsync_start - mode->hdisplay) - 20;
drq *= mipi_dsi_pixel_format_to_bpp(device->format);
drq /= 32;
@@ -569,11 +569,12 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
- * The frontporch is set using a blanking packet (4
- * bytes + payload + 2 bytes). Its minimal size is
- * therefore 6 bytes
+ * The frontporch is set using a sync event (4 bytes)
+ * and two blanking packets (each one is 4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore
+ * 16 bytes
*/
-#define HFP_PACKET_OVERHEAD 6
+#define HFP_PACKET_OVERHEAD 16
hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
@@ -831,8 +832,8 @@ static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
u32 pkt = msg->type;
if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
- pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
- pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
+ pkt |= ((msg->tx_len) & 0xffff) << 8;
+ pkt |= (((msg->tx_len) >> 8) & 0xffff) << 16;
} else {
pkt |= (((u8 *)msg->tx_buf)[0] << 8);
if (msg->tx_len > 1)
@@ -1100,6 +1101,12 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
+ dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
+ if (IS_ERR(dsi->regulator)) {
+ dev_err(dev, "Couldn't get VCC-DSI supply\n");
+ return PTR_ERR(dsi->regulator);
+ }
+
dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
&sun6i_dsi_regmap_config);
if (IS_ERR(dsi->regs)) {
@@ -1173,6 +1180,13 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(dsi->regulator);
+ if (err) {
+ dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+ return err;
+ }
reset_control_deassert(dsi->reset);
clk_prepare_enable(dsi->mod_clk);
@@ -1205,6 +1219,7 @@ static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
clk_disable_unprepare(dsi->mod_clk);
reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 5c3ad5be0690..3f4846f581ef 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -23,6 +23,7 @@ struct sun6i_dsi {
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regs;
+ struct regulator *regulator;
struct reset_control *reset;
struct phy *dphy;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index a44dca4b0219..e8a317d5ba19 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -226,6 +226,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
sun8i_hdmi_phy_init(hdmi->phy);
plat_data->mode_valid = hdmi->quirks->mode_valid;
+ plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
@@ -300,6 +301,7 @@ static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+ .use_drm_infoframe = true,
};
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d707c9171824..8e64945167e9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,6 +179,7 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
const struct drm_display_mode *mode);
unsigned int set_rate : 1;
+ unsigned int use_drm_infoframe : 1;
};
struct sun8i_dw_hdmi {
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 1d1269fde3c1..5043dcaf1cf9 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,7 +9,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 33c463e8d49f..d6cf202414f0 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -5,6 +5,7 @@ tegra-drm-y := \
drm.o \
gem.o \
fb.o \
+ dp.o \
hub.o \
plane.o \
dc.o \
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fbf57bc3cdab..5b1f9ff97576 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.swap = state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
-
- window.base[i] = bo->paddr + fb->offsets[i];
+ window.base[i] = state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
@@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_disable = tegra_plane_atomic_disable,
.atomic_update = tegra_plane_atomic_update,
@@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
return;
}
- value |= (bo->paddr >> 10) & 0x3fffff;
+ value |= (bo->iova >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (bo->paddr >> 32) & 0x3;
+ value = (bo->iova >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
@@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client)
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
- dc->group = host1x_client_iommu_attach(client, true);
- if (IS_ERR(dc->group)) {
- err = PTR_ERR(dc->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
@@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
cleanup:
@@ -2083,7 +2090,7 @@ cleanup:
if (!IS_ERR(primary))
drm_plane_cleanup(primary);
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return err;
@@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
@@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 0c4d17851f47..3d8ddccd758f 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -90,8 +90,6 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
-
- struct iommu_group *group;
};
static inline struct tegra_dc *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
new file mode 100644
index 000000000000..70dfb7d1dec5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp.h"
+
+static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
+
+static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+ caps->alternate_scrambler_reset = false;
+}
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src)
+{
+ dest->enhanced_framing = src->enhanced_framing;
+ dest->tps3_supported = src->tps3_supported;
+ dest->fast_training = src->fast_training;
+ dest->channel_coding = src->channel_coding;
+ dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
+}
+
+static void drm_dp_link_reset(struct drm_dp_link *link)
+{
+ unsigned int i;
+
+ if (!link)
+ return;
+
+ link->revision = 0;
+ link->max_rate = 0;
+ link->max_lanes = 0;
+
+ drm_dp_link_caps_reset(&link->caps);
+ link->aux_rd_interval.cr = 0;
+ link->aux_rd_interval.ce = 0;
+ link->edp = 0;
+
+ link->rate = 0;
+ link->lanes = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = 0;
+}
+
+/**
+ * drm_dp_link_add_rate() - add a rate to the list of supported rates
+ * @link: the link to add the rate to
+ * @rate: the rate to add
+ *
+ * Add a link rate to the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - ENOSPC if the maximum number of supported rates has been reached
+ * - EEXISTS if the link already supports this rate
+ *
+ * See also:
+ * drm_dp_link_remove_rate()
+ */
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i, pivot;
+
+ if (link->num_rates == DP_MAX_SUPPORTED_RATES)
+ return -ENOSPC;
+
+ for (pivot = 0; pivot < link->num_rates; pivot++)
+ if (rate <= link->rates[pivot])
+ break;
+
+ if (pivot != link->num_rates && rate == link->rates[pivot])
+ return -EEXIST;
+
+ for (i = link->num_rates; i > pivot; i--)
+ link->rates[i] = link->rates[i - 1];
+
+ link->rates[pivot] = rate;
+ link->num_rates++;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_remove_rate() - remove a rate from the list of supported rates
+ * @link: the link from which to remove the rate
+ * @rate: the rate to remove
+ *
+ * Removes a link rate from the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - EINVAL if the specified rate is not among the supported rates
+ *
+ * See also:
+ * drm_dp_link_add_rate()
+ */
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < link->num_rates; i++)
+ if (rate == link->rates[i])
+ break;
+
+ if (i == link->num_rates)
+ return -EINVAL;
+
+ link->num_rates--;
+
+ while (i < link->num_rates) {
+ link->rates[i] = link->rates[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_update_rates() - normalize the supported link rates array
+ * @link: the link for which to normalize the supported link rates
+ *
+ * Users should call this function after they've manually modified the array
+ * of supported link rates. This function removes any stale entries, compacts
+ * the array and updates the supported link rate count. Note that calling the
+ * drm_dp_link_remove_rate() function already does this janitorial work.
+ *
+ * See also:
+ * drm_dp_link_add_rate(), drm_dp_link_remove_rate()
+ */
+void drm_dp_link_update_rates(struct drm_dp_link *link)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < link->num_rates; i++) {
+ if (link->rates[i] != 0)
+ link->rates[count++] = link->rates[i];
+ }
+
+ for (i = count; i < link->num_rates; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = count;
+}
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
+ unsigned int rd_interval;
+ int err;
+
+ drm_dp_link_reset(link);
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
+ if (err < 0)
+ return err;
+
+ link->revision = dpcd[DP_DPCD_REV];
+ link->max_rate = drm_dp_max_link_rate(dpcd);
+ link->max_lanes = drm_dp_max_lane_count(dpcd);
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
+
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ link->caps.alternate_scrambler_reset = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
+ if (err < 0)
+ return err;
+
+ if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
+ DRM_ERROR("unsupported eDP version: %02x\n", value);
+ else
+ link->edp = drm_dp_edp_revisions[value];
+ }
+
+ /*
+ * The DPCD stores the AUX read interval in units of 4 ms. There are
+ * two special cases:
+ *
+ * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
+ * and channel equalization should use 100 us or 400 us AUX read
+ * intervals, respectively
+ *
+ * 2) for DP v1.4 and above, clock recovery should always use 100 us
+ * AUX read intervals
+ */
+ rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4) {
+ DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
+ rd_interval);
+ rd_interval = 4;
+ }
+
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
+ link->aux_rd_interval.cr = 100;
+
+ if (rd_interval == 0)
+ link->aux_rd_interval.ce = 400;
+
+ link->rate = link->max_rate;
+ link->lanes = link->max_lanes;
+
+ /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
+ if (link->edp >= 0x14) {
+ u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
+ unsigned int i;
+ u16 rate;
+
+ err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
+ supported_rates,
+ sizeof(supported_rates));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
+ rate = supported_rates[i * 2 + 1] << 8 |
+ supported_rates[i * 2 + 0];
+
+ drm_dp_link_add_rate(link, rate * 200);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2], value;
+ int err;
+
+ if (link->ops && link->ops->configure) {
+ err = link->ops->configure(link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+ }
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->lanes;
+
+ if (link->caps.enhanced_framing)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ if (link->caps.channel_coding)
+ value = DP_SET_ANSI_8B10B;
+ else
+ value = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
+ if (err < 0)
+ return err;
+
+ if (link->caps.alternate_scrambler_reset) {
+ err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_choose() - choose the lowest possible configuration for a mode
+ * @link: DRM DP link object
+ * @mode: DRM display mode
+ * @info: DRM display information
+ *
+ * According to the eDP specification, a source should select a configuration
+ * with the lowest number of lanes and the lowest possible link rate that can
+ * match the bitrate requirements of a video mode. However it must ensure not
+ * to exceed the capabilities of the sink.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ /* available link symbol clock rates */
+ static const unsigned int rates[3] = { 162000, 270000, 540000 };
+ /* available number of lanes */
+ static const unsigned int lanes[3] = { 1, 2, 4 };
+ unsigned long requirement, capacity;
+ unsigned int rate = link->max_rate;
+ unsigned int i, j;
+
+ /* bandwidth requirement */
+ requirement = mode->clock * info->bpc * 3;
+
+ for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
+ for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
+ /*
+ * Capacity for this combination of lanes and rate,
+ * factoring in the ANSI 8B/10B encoding.
+ *
+ * Link rates in the DRM DP helpers are really link
+ * symbol frequencies, so a tenth of the actual rate
+ * of the link.
+ */
+ capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
+
+ if (capacity >= requirement) {
+ DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
+ lanes[i], rates[j], requirement,
+ capacity);
+ link->lanes = lanes[i];
+ link->rate = rates[j];
+ return 0;
+ }
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * DOC: Link training
+ *
+ * These functions contain common logic and helpers to implement DisplayPort
+ * link training.
+ */
+
+/**
+ * drm_dp_link_train_init() - initialize DisplayPort link training state
+ * @train: DisplayPort link training state
+ */
+void drm_dp_link_train_init(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ request->voltage_swing[i] = 0;
+ adjust->voltage_swing[i] = 0;
+
+ request->pre_emphasis[i] = 0;
+ adjust->pre_emphasis[i] = 0;
+
+ request->post_cursor[i] = 0;
+ adjust->post_cursor[i] = 0;
+ }
+
+ train->pattern = DP_TRAINING_PATTERN_DISABLE;
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int drm_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct drm_dp_link_train_set *request = &link->train.request;
+ unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
+ struct drm_dp_aux *aux = link->aux;
+ u8 values[4], pattern = 0;
+ int err;
+
+ err = link->ops->apply_training(link);
+ if (err < 0) {
+ DRM_ERROR("failed to apply link training: %d\n", err);
+ return err;
+ }
+
+ vs = request->voltage_swing;
+ pe = request->pre_emphasis;
+ pc = request->post_cursor;
+
+ /* write currently selected voltage-swing and pre-emphasis levels */
+ for (i = 0; i < lanes; i++)
+ values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
+ DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
+ if (err < 0) {
+ DRM_ERROR("failed to set training parameters: %d\n", err);
+ return err;
+ }
+
+ /* write currently selected post-cursor level (if supported) */
+ if (link->revision >= 0x12 && link->rate == 540000) {
+ values[0] = values[1] = 0;
+
+ for (i = 0; i < lanes; i++)
+ values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
+ DIV_ROUND_UP(lanes, 2));
+ if (err < 0) {
+ DRM_ERROR("failed to set post-cursor: %d\n", err);
+ return err;
+ }
+ }
+
+ /* write link pattern */
+ if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
+ pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ pattern |= link->train.pattern;
+
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
+ if (err < 0) {
+ DRM_ERROR("failed to set training pattern: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_wait(struct drm_dp_link *link)
+{
+ unsigned long min = 0;
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_1:
+ min = link->aux_rd_interval.cr;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ min = link->aux_rd_interval.ce;
+ break;
+
+ default:
+ break;
+ }
+
+ if (min > 0)
+ usleep_range(min, 2 * min);
+}
+
+static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct drm_dp_link_train_set *adjust = &link->train.adjust;
+ unsigned int i;
+
+ for (i = 0; i < link->lanes; i++) {
+ adjust->voltage_swing[i] =
+ drm_dp_get_adjust_request_voltage(status, i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ adjust->pre_emphasis[i] =
+ drm_dp_get_adjust_request_pre_emphasis(status, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ adjust->post_cursor[i] =
+ drm_dp_get_adjust_request_post_cursor(status, i);
+ }
+}
+
+static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ if (request->voltage_swing[i] != adjust->voltage_swing[i])
+ request->voltage_swing[i] = adjust->voltage_swing[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
+ request->pre_emphasis[i] = adjust->pre_emphasis[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->post_cursor[i] != adjust->post_cursor[i])
+ request->post_cursor[i] = adjust->post_cursor[i];
+}
+
+static int drm_dp_link_recover_clock(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.clock_recovered = true;
+
+ return 0;
+}
+
+static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start clock recovery using training pattern 1 */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_recover_clock(link);
+ if (err < 0) {
+ DRM_ERROR("failed to recover clock: %d\n", err);
+ return err;
+ }
+
+ if (link->train.clock_recovered)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
+{
+ struct drm_dp_aux *aux = link->aux;
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ return 0;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.channel_equalized = true;
+
+ return 0;
+}
+
+static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start channel equalization using pattern 2 or 3 */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_equalize_channel(link);
+ if (err < 0) {
+ DRM_ERROR("failed to equalize channel: %d\n", err);
+ return err;
+ }
+
+ if (link->train.channel_equalized)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_downgrade(struct drm_dp_link *link)
+{
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+
+ case 270000:
+ link->rate = 162000;
+ break;
+
+ case 540000:
+ link->rate = 270000;
+ return 0;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_disable(struct drm_dp_link *link)
+{
+ int err;
+
+ link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ DRM_ERROR("failed to disable link training: %d\n", err);
+}
+
+static int drm_dp_link_train_full(struct drm_dp_link *link)
+{
+ int err;
+
+retry:
+ DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ err = drm_dp_link_clock_recovery(link);
+ if (err < 0) {
+ DRM_ERROR("clock recovery failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ DRM_ERROR("clock recovery failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("clock recovery succeeded\n");
+
+ err = drm_dp_link_channel_equalization(link);
+ if (err < 0) {
+ DRM_ERROR("channel equalization failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ DRM_ERROR("channel equalization failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("channel equalization succeeded\n");
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+static int drm_dp_link_train_fast(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ /* transmit training pattern 1 for 500 microseconds */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ /* transmit training pattern 2 or 3 for 500 microseconds */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery failed\n");
+ err = -EIO;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ DRM_ERROR("channel equalization failed\n");
+ err = -EIO;
+ }
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+/**
+ * drm_dp_link_train() - perform DisplayPort link training
+ * @link: a DP link object
+ *
+ * Uses the context stored in the DP link object to perform link training. It
+ * is expected that drivers will call drm_dp_link_probe() to obtain the link
+ * capabilities before performing link training.
+ *
+ * If the sink supports fast link training (no AUX CH handshake) and valid
+ * training settings are available, this function will try to perform fast
+ * link training and fall back to full link training on failure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_train(struct drm_dp_link *link)
+{
+ int err;
+
+ drm_dp_link_train_init(&link->train);
+
+ if (link->caps.fast_training) {
+ if (drm_dp_link_train_valid(&link->train)) {
+ err = drm_dp_link_train_fast(link);
+ if (err < 0)
+ DRM_ERROR("fast link training failed: %d\n",
+ err);
+ else
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("training parameters not available\n");
+ }
+ } else {
+ DRM_DEBUG_KMS("fast link training not supported\n");
+ }
+
+ err = drm_dp_link_train_full(link);
+ if (err < 0)
+ DRM_ERROR("full link training failed: %d\n", err);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
new file mode 100644
index 000000000000..cb12ed0c54e7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation.
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#ifndef DRM_TEGRA_DP_H
+#define DRM_TEGRA_DP_H 1
+
+#include <linux/types.h>
+
+struct drm_display_info;
+struct drm_display_mode;
+struct drm_dp_aux;
+struct drm_dp_link;
+
+/**
+ * struct drm_dp_link_caps - DP link capabilities
+ */
+struct drm_dp_link_caps {
+ /**
+ * @enhanced_framing:
+ *
+ * enhanced framing capability (mandatory as of DP 1.2)
+ */
+ bool enhanced_framing;
+
+ /**
+ * tps3_supported:
+ *
+ * training pattern sequence 3 supported for equalization
+ */
+ bool tps3_supported;
+
+ /**
+ * @fast_training:
+ *
+ * AUX CH handshake not required for link training
+ */
+ bool fast_training;
+
+ /**
+ * @channel_coding:
+ *
+ * ANSI 8B/10B channel coding capability
+ */
+ bool channel_coding;
+
+ /**
+ * @alternate_scrambler_reset:
+ *
+ * eDP alternate scrambler reset capability
+ */
+ bool alternate_scrambler_reset;
+};
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src);
+
+/**
+ * struct drm_dp_link_ops - DP link operations
+ */
+struct drm_dp_link_ops {
+ /**
+ * @apply_training:
+ */
+ int (*apply_training)(struct drm_dp_link *link);
+
+ /**
+ * @configure:
+ */
+ int (*configure)(struct drm_dp_link *link);
+};
+
+#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
+#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
+#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
+
+/**
+ * struct drm_dp_link_train_set - link training settings
+ * @voltage_swing: per-lane voltage swing
+ * @pre_emphasis: per-lane pre-emphasis
+ * @post_cursor: per-lane post-cursor
+ */
+struct drm_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ unsigned int post_cursor[4];
+};
+
+/**
+ * struct drm_dp_link_train - link training state information
+ * @request: currently requested settings
+ * @adjust: adjustments requested by sink
+ * @pattern: currently requested training pattern
+ * @clock_recovered: flag to track if clock recovery has completed
+ * @channel_equalized: flag to track if channel equalization has completed
+ */
+struct drm_dp_link_train {
+ struct drm_dp_link_train_set request;
+ struct drm_dp_link_train_set adjust;
+
+ unsigned int pattern;
+
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+/**
+ * struct drm_dp_link - DP link capabilities and configuration
+ * @revision: DP specification revision supported on the link
+ * @max_rate: maximum clock rate supported on the link
+ * @max_lanes: maximum number of lanes supported on the link
+ * @caps: capabilities supported on the link (see &drm_dp_link_caps)
+ * @aux_rd_interval: AUX read interval to use for training (in microseconds)
+ * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
+ * @rate: currently configured link rate
+ * @lanes: currently configured number of lanes
+ * @rates: additional supported link rates in kHz (eDP 1.4)
+ * @num_rates: number of additional supported link rates (eDP 1.4)
+ */
+struct drm_dp_link {
+ unsigned char revision;
+ unsigned int max_rate;
+ unsigned int max_lanes;
+
+ struct drm_dp_link_caps caps;
+
+ /**
+ * @cr: clock recovery read interval
+ * @ce: channel equalization read interval
+ */
+ struct {
+ unsigned int cr;
+ unsigned int ce;
+ } aux_rd_interval;
+
+ unsigned char edp;
+
+ unsigned int rate;
+ unsigned int lanes;
+
+ unsigned long rates[DP_MAX_SUPPORTED_RATES];
+ unsigned int num_rates;
+
+ /**
+ * @ops: DP link operations
+ */
+ const struct drm_dp_link_ops *ops;
+
+ /**
+ * @aux: DP AUX channel
+ */
+ struct drm_dp_aux *aux;
+
+ /**
+ * @train: DP link training state
+ */
+ struct drm_dp_link_train train;
+};
+
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
+void drm_dp_link_update_rates(struct drm_dp_link *link);
+
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+
+void drm_dp_link_train_init(struct drm_dp_link_train *train);
+int drm_dp_link_train(struct drm_dp_link *link);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index a0f6f9b0d258..622cdf1ad246 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
@@ -22,6 +23,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
+#include "dp.h"
#include "dpaux.h"
#include "drm.h"
#include "trace.h"
@@ -29,10 +31,18 @@
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
+struct tegra_dpaux_soc {
+ unsigned int cmh;
+ unsigned int drvz;
+ unsigned int drvi;
+};
+
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
+ const struct tegra_dpaux_soc *soc;
+
void __iomem *regs;
int irq;
@@ -120,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
+ u8 reply = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
@@ -214,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
- msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
- msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
- msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
- msg->reply = DP_AUX_I2C_REPLY_NACK;
+ reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
- msg->reply = DP_AUX_I2C_REPLY_DEFER;
+ reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
@@ -238,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
- if (WARN_ON(count != msg->size))
- count = min_t(size_t, count, msg->size);
+ /*
+ * There might be a smarter way to do this, but since
+ * the DP helpers will already retry transactions for
+ * an -EBUSY return value, simply reuse that instead.
+ */
+ if (count != msg->size) {
+ ret = -EBUSY;
+ goto out;
+ }
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
+ msg->reply = reply;
+
+out:
return ret;
}
@@ -310,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
switch (function) {
case DPAUX_PADCTL_FUNC_AUX:
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
break;
@@ -320,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
@@ -436,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (!dpaux)
return -ENOMEM;
+ dpaux->soc = of_device_get_match_data(&pdev->dev);
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
@@ -493,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->vdd);
}
+
+ dpaux->vdd = NULL;
}
platform_set_drvdata(pdev, dpaux);
@@ -641,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
+static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x18,
+};
+
+static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x30,
+};
+
+static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x2c,
+};
+
static const struct of_device_id tegra_dpaux_of_match[] = {
- { .compatible = "nvidia,tegra194-dpaux", },
- { .compatible = "nvidia,tegra186-dpaux", },
- { .compatible = "nvidia,tegra210-dpaux", },
- { .compatible = "nvidia,tegra124-dpaux", },
+ { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
+ { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
@@ -686,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
- err = regulator_enable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_enable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_connected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_connected) {
- enable_irq(dpaux->irq);
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_connected)
+ return -ETIMEDOUT;
}
- return -ETIMEDOUT;
+ enable_irq(dpaux->irq);
+ return 0;
}
int drm_dp_aux_detach(struct drm_dp_aux *aux)
@@ -715,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
disable_irq(dpaux->irq);
- err = regulator_disable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (dpaux->output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_disable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_disconnected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_disconnected) {
- dpaux->output = NULL;
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_disconnected)
+ return -ETIMEDOUT;
+
+ dpaux->output = NULL;
}
- return -ETIMEDOUT;
+ return 0;
}
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
@@ -764,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux)
return 0;
}
-
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
-{
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
- encoding);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern)
-{
- u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
- u8 status[DP_LINK_STATUS_SIZE], values[4];
- unsigned int i;
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
- if (err < 0)
- return err;
-
- if (tp == DP_TRAINING_PATTERN_DISABLE)
- return 0;
-
- for (i = 0; i < link->num_lanes; i++)
- values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
- DP_TRAIN_PRE_EMPH_LEVEL_0 |
- DP_TRAIN_MAX_SWING_REACHED |
- DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-
- err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
- link->num_lanes);
- if (err < 0)
- return err;
-
- usleep_range(500, 1000);
-
- err = drm_dp_dpcd_read_link_status(aux, status);
- if (err < 0)
- return err;
-
- switch (tp) {
- case DP_TRAINING_PATTERN_1:
- if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- case DP_TRAINING_PATTERN_2:
- if (!drm_dp_channel_eq_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- default:
- dev_err(aux->dev, "unsupported training pattern %u\n", tp);
- return -EINVAL;
- }
-
- err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6fb7d74ff553..56e5e7a5c108 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -20,10 +20,6 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "drm.h"
#include "gem.h"
@@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
};
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra;
- int err;
-
- tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
- if (!tegra)
- return -ENOMEM;
-
- if (iommu_present(&platform_bus_type)) {
- tegra->domain = iommu_domain_alloc(&platform_bus_type);
- if (!tegra->domain) {
- err = -ENOMEM;
- goto free;
- }
-
- err = iova_cache_get();
- if (err < 0)
- goto domain;
- }
-
- mutex_init(&tegra->clients_lock);
- INIT_LIST_HEAD(&tegra->clients);
-
- drm->dev_private = tegra;
- tegra->drm = drm;
-
- drm_mode_config_init(drm);
-
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
-
- drm->mode_config.allow_fb_modifiers = true;
-
- drm->mode_config.normalize_zpos = true;
-
- drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
- drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
-
- err = tegra_drm_fb_prepare(drm);
- if (err < 0)
- goto config;
-
- drm_kms_helper_poll_init(drm);
-
- err = host1x_device_init(device);
- if (err < 0)
- goto fbdev;
-
- if (tegra->domain) {
- u64 carveout_start, carveout_end, gem_start, gem_end;
- u64 dma_mask = dma_get_mask(&device->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- start = tegra->domain->geometry.aperture_start & dma_mask;
- end = tegra->domain->geometry.aperture_end & dma_mask;
-
- gem_start = start;
- gem_end = end - CARVEOUT_SZ;
- carveout_start = gem_end + 1;
- carveout_end = end;
-
- order = __ffs(tegra->domain->pgsize_bitmap);
- init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order);
-
- tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
- tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
-
- drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
- mutex_init(&tegra->mm_lock);
-
- DRM_DEBUG("IOMMU apertures:\n");
- DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
- DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
- carveout_end);
- }
-
- if (tegra->hub) {
- err = tegra_display_hub_prepare(tegra->hub);
- if (err < 0)
- goto device;
- }
-
- /*
- * We don't use the drm_irq_install() helpers provided by the DRM
- * core, so we need to set this manually in order to allow the
- * DRM_IOCTL_WAIT_VBLANK to operate correctly.
- */
- drm->irq_enabled = true;
-
- /* syncpoints are used for full 32-bit hardware VBLANK counters */
- drm->max_vblank_count = 0xffffffff;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- goto hub;
-
- drm_mode_config_reset(drm);
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- goto hub;
-
- return 0;
-
-hub:
- if (tegra->hub)
- tegra_display_hub_cleanup(tegra->hub);
-device:
- host1x_device_exit(device);
-fbdev:
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_free(drm);
-config:
- drm_mode_config_cleanup(drm);
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- }
-domain:
- if (tegra->domain)
- iommu_domain_free(tegra->domain);
-free:
- kfree(tegra);
- return err;
-}
-
-static void tegra_drm_unload(struct drm_device *drm)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra = drm->dev_private;
- int err;
-
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
-
- err = host1x_device_exit(device);
- if (err < 0)
- return;
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- iommu_domain_free(tegra->domain);
- }
-
- kfree(tegra);
-}
-
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
@@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
+ dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
+
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
@@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor)
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
.lastclose = drm_fb_helper_lastclose,
@@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared)
+int host1x_client_iommu_attach(struct host1x_client *client)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
+ /*
+ * If the host1x client is already attached to an IOMMU domain that is
+ * not the shared IOMMU domain, don't try to attach it to a different
+ * domain. This allows using the IOMMU-backed DMA API.
+ */
+ if (domain && domain != tegra->domain)
+ return 0;
+
if (tegra->domain) {
group = iommu_group_get(client->dev);
if (!group) {
dev_err(client->dev, "failed to get IOMMU group\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
- if (!shared || (shared && (group != tegra->group))) {
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (client->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(client->dev);
- arm_iommu_detach_device(client->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
+ if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
- return ERR_PTR(err);
+ return err;
}
-
- if (shared && !tegra->group)
- tegra->group = group;
}
+
+ tegra->use_explicit_iommu = true;
}
- return group;
+ client->group = group;
+
+ return 0;
}
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group)
+void host1x_client_iommu_detach(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
+ struct iommu_domain *domain;
- if (group) {
- if (group == tegra->group) {
- iommu_detach_group(tegra->domain, group);
- tegra->group = NULL;
- }
+ if (client->group) {
+ /*
+ * Devices that are part of the same group may no longer be
+ * attached to a domain at this point because their group may
+ * have been detached by an earlier client.
+ */
+ domain = iommu_get_domain_for_dev(client->dev);
+ if (domain)
+ iommu_detach_group(tegra->domain, client->group);
- iommu_group_put(group);
+ iommu_group_put(client->group);
+ client->group = NULL;
}
}
@@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
+ struct iommu_domain *domain;
+ struct tegra_drm *tegra;
struct drm_device *drm;
int err;
@@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (IS_ERR(drm))
return PTR_ERR(drm);
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ /*
+ * If the Tegra DRM clients are backed by an IOMMU, push buffers are
+ * likely to be allocated beyond the 32-bit boundary if sufficient
+ * system memory is available. This is problematic on earlier Tegra
+ * generations where host1x supports a maximum of 32 address bits in
+ * the GATHER opcode. In this case, unless host1x is behind an IOMMU
+ * as well it won't be able to process buffers allocated beyond the
+ * 32-bit boundary.
+ *
+ * The DMA API will use bounce buffers in this case, so that could
+ * perhaps still be made to work, even if less efficient, but there
+ * is another catch: in order to perform cache maintenance on pages
+ * allocated for discontiguous buffers we need to map and unmap the
+ * SG table representing these buffers. This is fine for something
+ * small like a push buffer, but it exhausts the bounce buffer pool
+ * (typically on the order of a few MiB) for framebuffers (many MiB
+ * for any modern resolution).
+ *
+ * Work around this by making sure that Tegra DRM clients only use
+ * an IOMMU if the parent host1x also uses an IOMMU.
+ *
+ * Note that there's still a small gap here that we don't cover: if
+ * the DMA API is backed by an IOMMU there's no way to control which
+ * device is attached to an IOMMU and which isn't, except via wiring
+ * up the device tree appropriately. This is considered an problem
+ * of integration, so care must be taken for the DT to be consistent.
+ */
+ domain = iommu_get_domain_for_dev(drm->dev->parent);
+
+ if (domain && iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tegra->domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto domain;
+ }
+
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+
dev_set_drvdata(&dev->dev, drm);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
- err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.allow_fb_modifiers = true;
+
+ drm->mode_config.normalize_zpos = true;
+
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
+
+ err = tegra_drm_fb_prepare(drm);
if (err < 0)
- goto put;
+ goto config;
+
+ drm_kms_helper_poll_init(drm);
+
+ err = host1x_device_init(dev);
+ if (err < 0)
+ goto fbdev;
+
+ if (tegra->use_explicit_iommu) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
+ u64 dma_mask = dma_get_mask(&dev->dev);
+ dma_addr_t start, end;
+ unsigned long order;
+
+ start = tegra->domain->geometry.aperture_start & dma_mask;
+ end = tegra->domain->geometry.aperture_end & dma_mask;
+
+ gem_start = start;
+ gem_end = end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG_DRIVER("IOMMU apertures:\n");
+ DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
+ } else if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ tegra->domain = NULL;
+ iova_cache_put();
+ }
+
+ if (tegra->hub) {
+ err = tegra_display_hub_prepare(tegra->hub);
+ if (err < 0)
+ goto device;
+ }
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->max_vblank_count = 0xffffffff;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ goto hub;
+
+ drm_mode_config_reset(drm);
+
+ err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
+ false);
+ if (err < 0)
+ goto hub;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ goto hub;
err = drm_dev_register(drm, 0);
if (err < 0)
- goto put;
+ goto fb;
return 0;
+fb:
+ tegra_drm_fb_exit(drm);
+hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+device:
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ }
+
+ host1x_device_exit(dev);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+domain:
+ if (tegra->domain)
+ iommu_domain_free(tegra->domain);
+free:
+ kfree(tegra);
put:
drm_dev_put(drm);
return err;
@@ -1229,8 +1233,29 @@ put:
static int host1x_drm_remove(struct host1x_device *dev)
{
struct drm_device *drm = dev_get_drvdata(&dev->dev);
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_mode_config_cleanup(drm);
+
+ err = host1x_device_exit(dev);
+ if (err < 0)
+ dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
+
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ iommu_domain_free(tegra->domain);
+ }
+
+ kfree(tegra);
drm_dev_put(drm);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 29911eff9ceb..d941553f7a3d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -36,7 +36,7 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
- struct iommu_group *group;
+ bool use_explicit_iommu;
struct mutex mm_lock;
struct drm_mm mm;
@@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared);
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group);
+int host1x_client_iommu_attach(struct host1x_client *client);
+void host1x_client_iommu_detach(struct host1x_client *client);
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
@@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
-struct drm_dp_link;
-
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
int drm_dp_aux_detach(struct drm_dp_aux *aux);
int drm_dp_aux_enable(struct drm_dp_aux *aux);
int drm_dp_aux_disable(struct drm_dp_aux *aux);
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f49ad36e24db..56edef06c48e 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon,
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
- u32 *firmware_vaddr = falcon->firmware.vaddr;
- dma_addr_t daddr;
+ u32 *virt = falcon->firmware.virt;
size_t i;
- int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
-
- /* ensure that caches are flushed and falcon can see the firmware */
- daddr = dma_map_single(falcon->dev, firmware_vaddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- err = dma_mapping_error(falcon->dev, daddr);
- if (err) {
- dev_err(falcon->dev, "failed to map firmware: %d\n", err);
- return;
- }
- dma_sync_single_for_device(falcon->dev, daddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
- DMA_TO_DEVICE);
+ virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
- struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
@@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
return -EINVAL;
}
- os = falcon->firmware.vaddr + bin->os_header_offset;
+ os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
@@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
if (err < 0)
return err;
+ falcon->firmware.size = falcon->firmware.firmware->size;
+
return 0;
}
@@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
const struct firmware *firmware = falcon->firmware.firmware;
int err;
- falcon->firmware.size = firmware->size;
-
- /* allocate iova space for the firmware */
- falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
- &falcon->firmware.paddr);
- if (IS_ERR(falcon->firmware.vaddr)) {
- dev_err(falcon->dev, "DMA memory mapping failed\n");
- return PTR_ERR(falcon->firmware.vaddr);
- }
-
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
@@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon)
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
- goto err_setup_firmware_image;
+ return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
-
-err_setup_firmware_image:
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr, falcon->firmware.vaddr);
-
- return err;
}
int falcon_init(struct falcon *falcon)
{
- /* check mandatory ops */
- if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
- return -EINVAL;
-
- falcon->firmware.vaddr = NULL;
+ falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
- if (falcon->firmware.firmware) {
+ if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
- falcon->firmware.firmware = NULL;
- }
-
- if (falcon->firmware.vaddr) {
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr,
- falcon->firmware.vaddr);
- falcon->firmware.vaddr = NULL;
- }
}
int falcon_boot(struct falcon *falcon)
@@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon)
u32 value;
int err;
- if (!falcon->firmware.vaddr)
+ if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
@@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon)
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
- falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 3d1243217410..c56ee32d92ee 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
u32 data_size;
};
-struct falcon;
-
-struct falcon_ops {
- void *(*alloc)(struct falcon *falcon, size_t size,
- dma_addr_t *paddr);
- void (*free)(struct falcon *falcon, size_t size,
- dma_addr_t paddr, void *vaddr);
-};
-
struct falcon_firmware_section {
unsigned long offset;
size_t size;
@@ -93,8 +84,9 @@ struct falcon_firmware {
const struct firmware *firmware;
/* Raw firmware data */
- dma_addr_t paddr;
- void *vaddr;
+ dma_addr_t iova;
+ dma_addr_t phys;
+ void *virt;
size_t size;
/* Parsed firmware information */
@@ -107,8 +99,6 @@ struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
- const struct falcon_ops *ops;
- void *data;
struct falcon_firmware firmware;
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e34325c83d28..7cea89f29a5c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
- drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+ drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
- info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+ info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
return 0;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index fb7667c8dd4c..746dae32c484 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo)
drm_gem_object_put_unlocked(&obj->gem);
}
-static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct sg_table *sgt;
+ int err;
+
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make
+ * sure to return the IOVA address of our mapping.
+ */
+ if (phys && obj->mm) {
+ *phys = obj->iova;
+ return NULL;
+ }
+
+ /*
+ * If we don't have a mapping for this buffer yet, return an SG table
+ * so that host1x can do the mapping for us via the DMA API.
+ */
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- *sgt = obj->sgt;
+ if (obj->pages) {
+ err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
+ 0, obj->gem.size, GFP_KERNEL);
+ if (err < 0)
+ goto free;
+ } else {
+ err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
+ obj->gem.size);
+ if (err < 0)
+ goto free;
+ }
- return obj->paddr;
+ return sgt;
+
+free:
+ kfree(sgt);
+ return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
{
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
@@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
goto unlock;
}
- bo->paddr = bo->mm->start;
+ bo->iova = bo->mm->start;
- bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
bo->sgt->nents, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
@@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
return 0;
mutex_lock(&tegra->mm_lock);
- iommu_unmap(tegra->domain, bo->paddr, bo->size);
+ iommu_unmap(tegra->domain, bo->iova, bo->size);
drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
@@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
}
}
@@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
@@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
- bo->paddr = sg_dma_address(bo->sgt->sgl);
+ bo->iova = sg_dma_address(bo->sgt->sgl);
}
bo->gem.import_attach = attach;
@@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
gem->size);
if (err < 0) {
drm_gem_vm_close(vma);
@@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return NULL;
if (bo->pages) {
- struct scatterlist *sg;
- unsigned int i;
-
- if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
- goto free;
-
- for_each_sg(sgt->sgl, sg, bo->num_pages, i)
- sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
-
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
+ 0, gem->size, GFP_KERNEL) < 0)
goto free;
} else {
- if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
+ gem->size) < 0)
goto free;
-
- sg_dma_address(sgt->sgl) = bo->paddr;
- sg_dma_len(sgt->sgl) = gem->size;
}
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free;
+
return sgt;
free:
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 83ffb1e14ca3..fafb5724499b 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -31,7 +31,7 @@ struct tegra_bo {
struct host1x_bo base;
unsigned long flags;
struct sg_table *sgt;
- dma_addr_t paddr;
+ dma_addr_t iova;
void *vaddr;
struct drm_mm_node *mm;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 641299cc85b8..1fc4e56c7cc5 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -17,7 +17,6 @@ struct gr2d_soc {
};
struct gr2d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
@@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client)
struct gr2d *gr2d = to_gr2d(drm);
int err;
- gr2d->channel = host1x_channel_request(client->dev);
+ gr2d->channel = host1x_channel_request(client);
if (!gr2d->channel)
return -ENOMEM;
@@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client)
goto put;
}
- gr2d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr2d->group)) {
- err = PTR_ERR(gr2d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 8b9a35b1cbb3..24fae0f64032 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -23,7 +23,6 @@ struct gr3d_soc {
};
struct gr3d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk_secondary;
@@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client)
struct gr3d *gr3d = to_gr3d(drm);
int err;
- gr3d->channel = host1x_channel_request(client->dev);
+ gr3d->channel = host1x_channel_request(client);
if (!gr3d->channel)
return -ENOMEM;
@@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client)
goto put;
}
- gr3d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr3d->group)) {
- err = PTR_ERR(gr3d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 839b49c40e51..2b4082d0bc9e 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
unsigned int zpos = plane->state->normalized_zpos;
struct drm_framebuffer *fb = plane->state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_bo *bo;
dma_addr_t base;
u32 value;
@@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
- bo = tegra_fb_get_plane(fb, 0);
- base = bo->paddr;
+ base = state->iova[0] + fb->offsets[0];
tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_shared_plane_atomic_check,
.atomic_update = tegra_shared_plane_atomic_update,
.atomic_disable = tegra_shared_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index bdcaa4c7168c..34373734ff68 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
void tegra_output_connector_destroy(struct drm_connector *connector)
{
+ struct tegra_output *output = connector_to_output(connector);
+
+ if (output->cec)
+ cec_notifier_conn_unregister(output->cec);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output)
disable_irq(output->hpd_irq);
}
- output->cec = cec_notifier_get(output->dev);
- if (!output->cec)
- return -ENOMEM;
-
return 0;
}
void tegra_output_remove(struct tegra_output *output)
{
- if (output->cec)
- cec_notifier_put(output->cec);
-
if (output->hpd_gpio)
free_irq(output->hpd_irq, output);
@@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output)
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
+ int connector_type;
int err;
if (output->panel) {
@@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (output->hpd_gpio)
enable_irq(output->hpd_irq);
+ connector_type = output->connector.connector_type;
+ /*
+ * Create a CEC notifier for HDMI connector.
+ */
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ struct cec_connector_info conn_info;
+
+ cec_fill_conn_info_from_drm(&conn_info, &output->connector);
+ output->cec = cec_notifier_conn_register(output->dev, NULL,
+ &conn_info);
+ if (!output->cec)
+ return -ENOMEM;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 6bab71d6e81d..163b590be224 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
@@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
+ unsigned int i;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
@@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
+
+ for (i = 0; i < 3; i++)
+ state->iova[i] = DMA_MAPPING_ERROR;
}
}
@@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
+ for (i = 0; i < 3; i++) {
+ copy->iova[i] = DMA_MAPPING_ERROR;
+ copy->sgt[i] = NULL;
+ }
+
return &copy->base;
}
@@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = {
.format_mod_supported = tegra_plane_format_mod_supported,
};
+static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt;
+
+ sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (err == 0) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ state->iova[i] = sg_dma_address(sgt->sgl);
+ state->sgt[i] = sgt;
+ } else {
+ state->iova[i] = bo->iova;
+ }
+ }
+
+ return 0;
+
+unpin:
+ dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
+
+ while (i--) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
+
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+
+ return err;
+}
+
+static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt = state->sgt[i];
+
+ if (sgt) {
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ }
+ }
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+}
+
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (!state->fb)
+ return 0;
+
+ drm_gem_fb_prepare_fb(plane, state);
+
+ return tegra_dc_pin(dc, to_tegra_plane_state(state));
+}
+
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (dc)
+ tegra_dc_unpin(dc, to_tegra_plane_state(state));
+}
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 510c394e6d9a..a158a915109a 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
+ struct sg_table *sgt[3];
+ dma_addr_t iova[3];
+
struct tegra_bo_tiling tiling;
u32 format;
u32 swap;
@@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state)
extern const struct drm_plane_funcs tegra_plane_funcs;
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e1669ada0a40..615cb319fa8b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -25,6 +25,7 @@
#include <drm/drm_scdc_helper.h>
#include "dc.h"
+#include "dp.h"
#include "drm.h"
#include "hda.h"
#include "sor.h"
@@ -370,10 +371,11 @@ struct tegra_sor_regs {
};
struct tegra_sor_soc {
- bool supports_edp;
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
+ bool supports_audio;
+ bool supports_hdcp;
const struct tegra_sor_regs *regs;
bool has_nvdisplay;
@@ -382,6 +384,12 @@ struct tegra_sor_soc {
unsigned int num_settings;
const u8 *xbar_cfg;
+ const u8 *lane_map;
+
+ const u8 (*voltage_swing)[4][4];
+ const u8 (*pre_emphasis)[4][4];
+ const u8 (*post_cursor)[4][4];
+ const u8 (*tx_pu)[4][4];
};
struct tegra_sor;
@@ -390,6 +398,8 @@ struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
int (*remove)(struct tegra_sor *sor);
+ void (*audio_enable)(struct tegra_sor *sor);
+ void (*audio_disable)(struct tegra_sor *sor);
};
struct tegra_sor {
@@ -412,6 +422,7 @@ struct tegra_sor {
u8 xbar_cfg[5];
+ struct drm_dp_link link;
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
@@ -514,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_pad_parents[] = {
- "pll_d2_out0", "pll_dp"
+static const char * const tegra_clk_sor_pad_parents[2][2] = {
+ { "pll_d_out0", "pll_dp" },
+ { "pll_d2_out0", "pll_dp" },
};
+/*
+ * Implementing ->set_parent() here isn't really required because the parent
+ * will be explicitly selected in the driver code via the DP_CLK_SEL mux in
+ * the SOR_CLK_CNTRL register. This is primarily for compatibility with the
+ * Tegra186 and later SoC generations where the BPMP implements this clock
+ * and doesn't expose the mux via the common clock framework.
+ */
+
static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
@@ -586,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_pad_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.parent_names = tegra_clk_sor_pad_parents[sor->index];
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
init.ops = &tegra_clk_sor_pad_ops;
pad->hw.init = &init;
@@ -597,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
return clk;
}
-static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
- struct drm_dp_link *link)
+static void tegra_sor_filter_rates(struct tegra_sor *sor)
{
+ struct drm_dp_link *link = &sor->link;
unsigned int i;
- u8 pattern;
+
+ /* Tegra only supports RBR, HBR and HBR2 */
+ for (i = 0; i < link->num_rates; i++) {
+ switch (link->rates[i]) {
+ case 1620000:
+ case 2700000:
+ case 5400000:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
+ link->rates[i]);
+ link->rates[i] = 0;
+ break;
+ }
+ }
+
+ drm_dp_link_update_rates(link);
+}
+
+static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
+{
+ unsigned long timeout;
u32 value;
- int err;
- /* setup lane parameters */
- value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
- tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+ /*
+ * Clear or set the PD_TXD bit corresponding to each lane, depending
+ * on whether it is used or not.
+ */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE0(0x0f);
- tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
- value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
- SOR_LANE_POSTCURSOR_LANE2(0x00) |
- SOR_LANE_POSTCURSOR_LANE1(0x00) |
- SOR_LANE_POSTCURSOR_LANE0(0x00);
- tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
- /* disable LVDS mode */
- tegra_sor_writel(sor, 0, SOR_LVDS);
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
+{
+ unsigned long timeout;
+ u32 value;
+
+ /* power down all lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_TX_PU_ENABLE;
- value &= ~SOR_DP_PADCTL_TX_PU_MASK;
- value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
+{
+ u32 value;
+
+ /* pre-charge all used lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
- SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
+
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
+
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
- usleep_range(10, 100);
+ usleep_range(15, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
- if (err < 0)
- return err;
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN1;
- value = (value << 8) | lane;
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_1;
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
+ const struct tegra_sor_soc *soc = sor->soc;
+ u32 pattern = 0, tx_pu = 0, value;
+ unsigned int i;
- value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- value |= SOR_DP_SPARE_SEQ_ENABLE;
- value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
- value |= SOR_DP_SPARE_MACRO_SOR_CLK;
- tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+ for (value = 0, i = 0; i < link->lanes; i++) {
+ u8 vs = link->train.request.voltage_swing[i];
+ u8 pe = link->train.request.pre_emphasis[i];
+ u8 pc = link->train.request.post_cursor[i];
+ u8 shift = sor->soc->lane_map[i] << 3;
+
+ voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
+ pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
+ post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
+
+ if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
+ tx_pu = sor->soc->tx_pu[pc][vs][pe];
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ value = SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ break;
+
+ case DP_TRAINING_PATTERN_1:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN1;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN2;
+ break;
+
+ case DP_TRAINING_PATTERN_3:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (link->caps.channel_coding)
+ value |= SOR_DP_TPG_CHANNEL_CODING;
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN2;
- value = (value << 8) | lane;
+ pattern = pattern << 8 | value;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
+ tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
- pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
+ if (link->caps.tps3_supported)
+ tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+ tegra_sor_writel(sor, pattern, SOR_DP_TPG);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(tx_pu);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ unsigned int rate, lanes;
+ u32 value;
+ int err;
+
+ rate = drm_dp_link_rate_to_bw_code(link->rate);
+ lanes = link->lanes;
+
+ /* configure link speed and lane count */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+
+ if (link->caps.enhanced_framing)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ usleep_range(400, 1000);
+
+ /* configure load pulse position adjustment */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+
+ switch (rate) {
+ case DP_LINK_BW_1_62:
+ value |= SOR_PLL1_LOADADJ(0x3);
+ break;
+
+ case DP_LINK_BW_2_7:
+ value |= SOR_PLL1_LOADADJ(0x4);
+ break;
+
+ case DP_LINK_BW_5_4:
+ value |= SOR_PLL1_LOADADJ(0x6);
+ break;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_DISABLE;
+ /* use alternate scrambler reset for eDP */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
+ if (link->edp == 0)
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ err = tegra_sor_power_down_lanes(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power down lanes: %d\n", err);
+ return err;
+ }
+
+ /* power up and pre-charge lanes */
+ err = tegra_sor_power_up_lanes(sor, lanes);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
+ lanes, (lanes != 1) ? "s" : "", err);
return err;
+ }
+
+ tegra_sor_dp_precharge(sor, lanes);
return 0;
}
+static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
+ .apply_training = tegra_sor_dp_link_apply_training,
+ .configure = tegra_sor_dp_link_configure,
+};
+
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
@@ -912,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
u32 num_syms_per_line;
unsigned int i;
- if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
+ if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
return -EINVAL;
- output = link_rate * 8 * link->num_lanes;
input = pclk * config->bits_per_pixel;
+ output = link_rate * 8 * link->lanes;
if (input >= output)
return -ERANGE;
@@ -959,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
watermark = div_u64(watermark + params.error, f);
config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
- (link->num_lanes * 8);
+ (link->lanes * 8);
if (config->watermark > 30) {
config->watermark = 30;
@@ -976,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
config->hblank_symbols = div_u64(num, pclk);
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (link->caps.enhanced_framing)
config->hblank_symbols -= 3;
- config->hblank_symbols -= 12 / link->num_lanes;
+ config->hblank_symbols -= 12 / link->lanes;
/* compute the number of symbols per vertical blanking interval */
num = (mode->hdisplay - 25) * link_rate;
config->vblank_symbols = div_u64(num, pclk);
- config->vblank_symbols -= 36 / link->num_lanes + 4;
+ config->vblank_symbols -= 36 / link->lanes + 4;
dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
config->vblank_symbols);
@@ -1200,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
return err;
}
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
- SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* stop lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
- SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
- return -ETIMEDOUT;
-
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
@@ -1584,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_sor_edp_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- u32 value;
- int err;
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- err = tegra_sor_detach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-
- tegra_sor_writel(sor, 0, SOR_STATE1);
- tegra_sor_update(sor);
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-
- err = tegra_sor_power_down(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-
- if (sor->aux) {
- err = drm_dp_aux_disable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to disable DP: %d\n", err);
- }
-
- err = tegra_io_pad_power_disable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-
- pm_runtime_put(sor->dev);
-}
-
-#if 0
-static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
- unsigned int *value)
-{
- unsigned int hfp, hsw, hbp, a = 0, b;
-
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
-
- b = hfp - 1;
-
- pr_info("a: %u, b: %u\n", a, b);
- pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
-
- if (a + hsw + hbp <= 11) {
- a = 1 + 11 - hsw - hbp;
- pr_info("a: %u\n", a);
- }
-
- if (a > b)
- return -EINVAL;
-
- if (hsw < 1)
- return -EINVAL;
-
- if (mode->hdisplay < 16)
- return -EINVAL;
-
- if (value) {
- if (b > a && a % 2)
- *value = a + 1;
- else
- *value = a;
- }
-
- return 0;
-}
-#endif
-
-static void tegra_sor_edp_enable(struct drm_encoder *encoder)
-{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- struct tegra_sor_config config;
- struct tegra_sor_state *state;
- struct drm_dp_link link;
- u8 rate, lanes;
- unsigned int i;
- int err = 0;
- u32 value;
-
- state = to_sor_state(output->connector.state);
-
- pm_runtime_get_sync(sor->dev);
-
- if (output->panel)
- drm_panel_prepare(output->panel);
-
- err = drm_dp_aux_enable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DP: %d\n", err);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0) {
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
- return;
- }
-
- /* switch to safe parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
- dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
-
- memset(&config, 0, sizeof(config));
- config.bits_per_pixel = state->bpc * 3;
-
- err = tegra_sor_compute_config(sor, mode, &config, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to compute configuration: %d\n", err);
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
- value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
- usleep_range(20, 100);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll3);
- value |= SOR_PLL3_PLL_VDD_MODE_3V3;
- tegra_sor_writel(sor, value, sor->soc->regs->pll3);
-
- value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
- SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD;
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- value |= SOR_PLL2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
- tegra_sor_writel(sor, value, sor->soc->regs->pll1);
-
- while (true) {
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /*
- * power up
- */
-
- /* set safe link bandwidth (1.62 Gbps) */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- /* step 1 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
- SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* step 2 */
- err = tegra_io_pad_power_enable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
-
- usleep_range(5, 100);
-
- /* step 3 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(20, 100);
-
- /* step 4 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value &= ~SOR_PLL0_VCOPD;
- value &= ~SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(200, 1000);
-
- /* step 5 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /* XXX not in TRM */
- for (value = 0, i = 0; i < 5; i++)
- value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
- SOR_XBAR_CTRL_LINK1_XSEL(i, i);
-
- tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
-
- /* switch to DP parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
- if (err < 0)
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
-
- /* power DP lanes */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
-
- if (link.num_lanes <= 2)
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
- else
- value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
-
- if (link.num_lanes <= 1)
- value &= ~SOR_DP_PADCTL_PD_TXD_1;
- else
- value |= SOR_DP_PADCTL_PD_TXD_1;
-
- if (link.num_lanes == 0)
- value &= ~SOR_DP_PADCTL_PD_TXD_0;
- else
- value |= SOR_DP_PADCTL_PD_TXD_0;
-
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* start lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
- SOR_LANE_SEQ_CTL_POWER_STATE_UP;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- while (true) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- /* set link bandwidth */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- tegra_sor_apply_config(sor, &config);
-
- /* enable link */
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value |= SOR_DP_LINKCTL_ENABLE;
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- for (i = 0, value = 0; i < 4; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- /* enable pad calibration logic */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
-
- err = drm_dp_link_power_up(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
-
- err = drm_dp_link_configure(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
-
- rate = drm_dp_link_rate_to_bw_code(link.rate);
- lanes = link.num_lanes;
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
-
- if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* disable training pattern generator */
-
- for (i = 0; i < link.num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- err = tegra_sor_dp_train_fast(sor, &link);
- if (err < 0)
- dev_err(sor->dev, "DP fast link training failed: %d\n", err);
-
- dev_dbg(sor->dev, "fast link training succeeded\n");
-
- err = tegra_sor_power_up(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-
- /* CSTM (LVDS, link A/B, upper) */
- value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
- SOR_CSTM_UPPER;
- tegra_sor_writel(sor, value, SOR_CSTM);
-
- /* use DP-A protocol */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
- value |= SOR_STATE_ASY_PROTOCOL_DP_A;
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- tegra_sor_mode_set(sor, mode, state);
-
- /* PWM setup */
- err = tegra_sor_setup_pwm(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-
- tegra_sor_update(sor);
-
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
-
- err = tegra_sor_attach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-
- err = tegra_sor_wakeup(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DC: %d\n", err);
-
- if (output->panel)
- drm_panel_enable(output->panel);
-}
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -2030,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
- .disable = tegra_sor_edp_disable,
- .enable = tegra_sor_edp_enable,
- .atomic_check = tegra_sor_encoder_atomic_check,
-};
-
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
@@ -2160,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
tegra_sor_write_eld(sor);
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
@@ -2169,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+}
+
+static void tegra_sor_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->format.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
}
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
@@ -2206,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
- value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
-
- /* select HDA audio input */
- value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
- value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
-
- /* inject null samples */
- if (sor->format.channels != 2)
- value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
- else
- value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
-
- value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
-
- tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
-
- /* enable advertising HBR capability */
- tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+ tegra_sor_audio_enable(sor);
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
@@ -2399,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1));
- else
- value &= ~SOR_ENABLE(sor->index);
+ value &= ~SOR1_TIMING_CYA;
+
+ value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2559,16 +2399,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- /* switch to parent clock */
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_dp);
if (err < 0) {
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
return;
}
+#endif
+ /* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
- dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
return;
}
@@ -2774,9 +2632,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value |= SOR_ENABLE(1) | SOR1_TIMING_CYA;
- else
- value |= SOR_ENABLE(sor->index);
+ value |= SOR1_TIMING_CYA;
+
+ value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2803,6 +2661,396 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.atomic_check = tegra_sor_encoder_atomic_check,
};
+static void tegra_sor_dp_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
+
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ /*
+ * Do not attempt to power down a DP link if we're not connected since
+ * the AUX transactions would just be timing out.
+ */
+ if (output->connector.status != connector_status_disconnected) {
+ err = drm_dp_link_power_down(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down link: %d\n",
+ err);
+ }
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ tegra_dc_commit(dc);
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe clock: %d\n", err);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_pad_power_disable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
+
+ err = drm_dp_aux_disable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ pm_runtime_put(sor->dev);
+}
+
+static void tegra_sor_dp_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_config config;
+ struct tegra_sor_state *state;
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ state = to_sor_state(output->connector.state);
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ pm_runtime_get_sync(sor->dev);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ err = tegra_io_pad_power_enable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ err = drm_dp_aux_enable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
+
+ err = drm_dp_link_probe(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to probe DP link: %d\n", err);
+
+ tegra_sor_filter_rates(sor);
+
+ err = drm_dp_link_choose(&sor->link, mode, info);
+ if (err < 0)
+ dev_err(sor->dev, "failed to choose link: %d\n", err);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 40);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (output->panel)
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ else
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ /* XXX not in TRM */
+ if (output->panel)
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ /* XXX not in TRM */
+ tegra_sor_writel(sor, 0, SOR_LVDS);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(0x1);
+ value |= SOR_PLL0_VCOCAP(0x3);
+ value |= SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
+ return;
+ }
+#endif
+
+ /* switch the SOR clock to the pad clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* enable port */
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value |= SOR_DP_LINKCTL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ err = drm_dp_link_train(&sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "link training failed: %d\n", err);
+ else
+ dev_dbg(sor->dev, "link training succeeded\n");
+
+ err = drm_dp_link_power_up(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up DP link: %d\n", err);
+
+ /* compute configuration */
+ memset(&config, 0, sizeof(config));
+ config.bits_per_pixel = state->bpc * 3;
+
+ err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
+
+ tegra_sor_apply_config(sor, &config);
+ tegra_sor_mode_set(sor, mode, state);
+
+ if (output->panel) {
+ /* CSTM (LVDS, link A/B, upper) */
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
+ SOR_CSTM_UPPER;
+ tegra_sor_writel(sor, value, SOR_CSTM);
+
+ /* PWM setup */
+ err = tegra_sor_setup_pwm(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup PWM: %d\n", err);
+ }
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* attach and wake up */
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
+ .disable = tegra_sor_dp_disable,
+ .enable = tegra_sor_dp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+
+ return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->hdmi_supply);
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .remove = tegra_sor_hdmi_remove,
+ .audio_enable = tegra_sor_hdmi_audio_enable,
+ .audio_disable = tegra_sor_hdmi_audio_disable,
+};
+
+static int tegra_sor_dp_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
+ if (IS_ERR(sor->avdd_io_supply))
+ return PTR_ERR(sor->avdd_io_supply);
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0)
+ return err;
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
+ if (IS_ERR(sor->vdd_pll_supply))
+ return PTR_ERR(sor->vdd_pll_supply);
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_sor_dp_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_dp_ops = {
+ .name = "DP",
+ .probe = tegra_sor_dp_probe,
+ .remove = tegra_sor_dp_remove,
+};
+
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -2810,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
- u32 value;
int err;
if (!sor->aux) {
- if (sor->soc->supports_hdmi) {
+ if (sor->ops == &tegra_sor_hdmi_ops) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_hdmi_helpers;
@@ -2823,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client)
encoder = DRM_MODE_ENCODER_LVDS;
}
} else {
- if (sor->soc->supports_edp) {
+ if (sor->output.panel) {
connector = DRM_MODE_CONNECTOR_eDP;
encoder = DRM_MODE_ENCODER_TMDS;
- helpers = &tegra_sor_edp_helpers;
- } else if (sor->soc->supports_dp) {
+ helpers = &tegra_sor_dp_helpers;
+ } else {
connector = DRM_MODE_CONNECTOR_DisplayPort;
encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_dp_helpers;
}
+
+ sor->link.ops = &tegra_sor_dp_link_ops;
+ sor->link.aux = sor->aux;
}
sor->output.dev = sor->dev;
@@ -2913,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
- /*
- * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
- * is used for interoperability between the HDA codec driver and the
- * HDMI/DP driver.
- */
- value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
- tegra_sor_writel(sor, value, SOR_INT_ENABLE);
- tegra_sor_writel(sor, value, SOR_INT_MASK);
-
return 0;
}
@@ -2930,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
- tegra_sor_writel(sor, 0, SOR_INT_MASK);
- tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
-
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -2955,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = {
.exit = tegra_sor_exit,
};
-static const struct tegra_sor_ops tegra_sor_edp_ops = {
- .name = "eDP",
-};
-
-static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
-{
- int err;
-
- sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
- if (IS_ERR(sor->avdd_io_supply)) {
- dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
- PTR_ERR(sor->avdd_io_supply));
- return PTR_ERR(sor->avdd_io_supply);
- }
-
- err = regulator_enable(sor->avdd_io_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
- err);
- return err;
- }
-
- sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
- if (IS_ERR(sor->vdd_pll_supply)) {
- dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
- PTR_ERR(sor->vdd_pll_supply));
- return PTR_ERR(sor->vdd_pll_supply);
- }
-
- err = regulator_enable(sor->vdd_pll_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
- err);
- return err;
- }
-
- sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
- if (IS_ERR(sor->hdmi_supply)) {
- dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
- PTR_ERR(sor->hdmi_supply));
- return PTR_ERR(sor->hdmi_supply);
- }
-
- err = regulator_enable(sor->hdmi_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
- return err;
- }
-
- INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
-
- return 0;
-}
-
-static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->hdmi_supply);
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
-static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
- .name = "HDMI",
- .probe = tegra_sor_hdmi_probe,
- .remove = tegra_sor_hdmi_remove,
-};
-
static const u8 tegra124_sor_xbar_cfg[5] = {
0, 1, 2, 3, 4
};
@@ -3043,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = {
.dp_padctl2 = 0x73,
};
+/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
+static const u8 tegra124_sor_lane_map[4] = {
+ 2, 1, 0, 3,
+};
+
+static const u8 tegra124_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x3c, },
+ }, {
+ { 0x12, 0x17, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x39, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x36, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
+
+static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x09, 0x13, 0x25 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ },
+};
+
+static const u8 tegra124_sor_post_cursor[4][4][4] = {
+ {
+ { 0x00, 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0x00, },
+ { 0x00, 0x00, },
+ { 0x00, },
+ }, {
+ { 0x02, 0x02, 0x04, 0x05 },
+ { 0x02, 0x04, 0x05, },
+ { 0x04, 0x05, },
+ { 0x05, },
+ }, {
+ { 0x04, 0x05, 0x08, 0x0b },
+ { 0x05, 0x09, 0x0b, },
+ { 0x08, 0x0a, },
+ { 0x0b, },
+ }, {
+ { 0x05, 0x09, 0x0b, 0x12 },
+ { 0x09, 0x0d, 0x12, },
+ { 0x0b, 0x0f, },
+ { 0x12, },
+ },
+};
+
+static const u8 tegra124_sor_tx_pu[4][4][4] = {
+ {
+ { 0x20, 0x30, 0x40, 0x60 },
+ { 0x30, 0x40, 0x60, },
+ { 0x40, 0x60, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x50 },
+ { 0x30, 0x40, 0x50, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x20, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x40, },
+ { 0x60, },
+ },
+};
+
static const struct tegra_sor_soc tegra124_sor = {
- .supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
+};
+
+static const struct tegra_sor_soc tegra132_sor = {
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra132_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra210_sor_regs = {
@@ -3068,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = {
.dp_padctl2 = 0x73,
};
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
+};
+
+static const u8 tegra210_sor_lane_map[4] = {
+ 0, 1, 2, 3,
+};
+
static const struct tegra_sor_soc tegra210_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
- .xbar_cfg = tegra124_sor_xbar_cfg,
-};
-static const u8 tegra210_sor_xbar_cfg[5] = {
- 2, 1, 0, 3, 4
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_soc tegra210_sor1 = {
- .supports_edp = false,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
-
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra186_sor_regs = {
@@ -3112,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = {
.dp_padctl2 = 0x16a,
};
-static const struct tegra_sor_soc tegra186_sor = {
- .supports_edp = false,
- .supports_lvds = false,
- .supports_hdmi = false,
- .supports_dp = true,
-
- .regs = &tegra186_sor_regs,
- .has_nvdisplay = true,
+static const u8 tegra186_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x39, },
+ }, {
+ { 0x12, 0x16, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x37, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x35, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
- .xbar_cfg = tegra124_sor_xbar_cfg,
+static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x14, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
};
-static const struct tegra_sor_soc tegra186_sor1 = {
- .supports_edp = false,
+static const struct tegra_sor_soc tegra186_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
.settings = tegra186_sor_hdmi_defaults,
-
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra194_sor_regs = {
@@ -3155,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = {
};
static const struct tegra_sor_soc tegra194_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra194_sor_regs,
.has_nvdisplay = true,
@@ -3167,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = {
.settings = tegra194_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
- { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
{ },
};
@@ -3200,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
* earlier
*/
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
+ } else {
+ if (!sor->soc->supports_audio)
+ sor->index = 0;
+ else
+ sor->index = 1;
}
err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
@@ -3234,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data)
tegra_hda_parse_format(format, &sor->format);
- tegra_sor_hdmi_audio_enable(sor);
+ if (sor->ops->audio_enable)
+ sor->ops->audio_enable(sor);
} else {
- tegra_sor_hdmi_audio_disable(sor);
+ if (sor->ops->audio_disable)
+ sor->ops->audio_disable(sor);
}
}
@@ -3273,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
+
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3287,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -ENODEV;
}
} else {
- if (sor->soc->supports_edp) {
- sor->ops = &tegra_sor_edp_ops;
- sor->pad = TEGRA_IO_PAD_LVDS;
- } else if (sor->soc->supports_dp) {
- dev_err(&pdev->dev, "DisplayPort not supported yet\n");
- return -ENODEV;
- } else {
- dev_err(&pdev->dev, "unknown (DP) support\n");
- return -ENODEV;
- }
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
+ /*
+ * No need to keep this around since we only use it as a check
+ * to see if a panel is connected (eDP) or not (DP).
+ */
+ of_node_put(np);
+
+ sor->ops = &tegra_sor_dp_ops;
+ sor->pad = TEGRA_IO_PAD_LVDS;
}
err = tegra_sor_parse_dt(sor);
@@ -3451,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
* pad output clock.
*/
if (!sor->clk_pad) {
+ char *name;
+
err = pm_runtime_get_sync(&pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
@@ -3458,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->clk_pad = tegra_clk_sor_pad_register(sor,
- "sor1_pad_clkout");
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
+ if (!name) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
pm_runtime_put(&pdev->dev);
}
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index f8efd8be4b7c..00e09d5dca30 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -39,6 +39,7 @@
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
@@ -283,10 +284,12 @@
#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
+#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
+#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
#define SOR_DP_PADCTL1 0x5d
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index cd0399fd8c63..9444ba183990 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -34,7 +34,6 @@ struct vic {
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
- struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
struct reset_control *rst;
@@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev)
static int vic_boot(struct vic *vic)
{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+#endif
u32 fce_ucode_size, fce_bin_data_offset;
void *hdr;
int err = 0;
@@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic)
return 0;
#ifdef CONFIG_IOMMU_API
- if (vic->config->supports_sid) {
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+ if (vic->config->supports_sid && spec) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
- if (spec && spec->num_ids > 0) {
+ if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
vic_writel(vic, value, VIC_THI_STREAMID0);
@@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic)
if (err < 0)
return err;
- hdr = vic->falcon.firmware.vaddr;
+ hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.vaddr +
+ hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
@@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic)
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ (vic->falcon.firmware.iova + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
@@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic)
return 0;
}
-static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
- dma_addr_t *iova)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_alloc(tegra, size, iova);
-}
-
-static void vic_falcon_free(struct falcon *falcon, size_t size,
- dma_addr_t iova, void *va)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_free(tegra, size, va, iova);
-}
-
-static const struct falcon_ops vic_falcon_ops = {
- .alloc = vic_falcon_alloc,
- .free = vic_falcon_free
-};
-
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
- if (group && tegra->domain) {
- err = iommu_attach_group(tegra->domain, group);
- if (err < 0) {
- dev_err(vic->dev, "failed to attach to domain: %d\n",
- err);
- return err;
- }
-
- vic->domain = tegra->domain;
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n", err);
+ return err;
}
- vic->channel = host1x_channel_request(client->dev);
+ vic->channel = host1x_channel_request(client);
if (!vic->channel) {
err = -ENOMEM;
goto detach;
@@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client)
if (err < 0)
goto free_syncpt;
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
free_syncpt:
@@ -221,8 +201,7 @@ free_syncpt:
free_channel:
host1x_channel_put(vic->channel);
detach:
- if (group && tegra->domain)
- iommu_detach_group(tegra->domain, group);
+ host1x_client_iommu_detach(client);
return err;
}
@@ -230,22 +209,32 @@ detach:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(vic->channel);
-
- if (vic->domain) {
- iommu_detach_group(vic->domain, group);
- vic->domain = NULL;
+ host1x_client_iommu_detach(client);
+
+ if (client->group) {
+ dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
+ vic->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(vic->dev, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
}
return 0;
@@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = {
static int vic_load_firmware(struct vic *vic)
{
+ struct host1x_client *client = &vic->client.base;
+ struct tegra_drm *tegra = vic->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
int err;
- if (vic->falcon.data)
+ if (vic->falcon.firmware.virt)
return 0;
- vic->falcon.data = vic->client.drm;
-
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
- goto cleanup;
+ return err;
+
+ size = vic->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(vic->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ vic->falcon.firmware.virt = virt;
+ vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(vic->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ vic->falcon.firmware.phys = phys;
+ }
+
return 0;
cleanup:
- vic->falcon.data = NULL;
+ if (!client->group)
+ dma_free_coherent(vic->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
return err;
}
@@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev)
struct vic *vic;
int err;
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
@@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev)
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
- vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 43d756b7810e..51d034e095f4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -8,6 +8,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include "tilcdc_drv.h"
@@ -139,8 +140,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
}
if (panel) {
- bridge = devm_drm_panel_bridge_add(ddev->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(ddev->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 3abb9641f212..e2090020b3a0 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -11,7 +11,7 @@
#include "tilcdc_drv.h"
-static struct drm_plane_funcs tilcdc_plane_funcs = {
+static const struct drm_plane_funcs tilcdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 03d0e2df6774..94fb1f593564 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -649,7 +649,7 @@ static void gm12u320_driver_release(struct drm_device *dev)
kfree(gm12u320);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(gm12u320_fops);
+DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 01fc670ce7a2..caea2a099496 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,8 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
- ttm_page_alloc_dma.o
+ ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
+ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index ea4d59eb8966..6050dc846894 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -51,7 +51,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
+ struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98819462f025..8d91b0428af1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_bo_glob_use_count;
struct ttm_bo_global ttm_bo_glob;
+EXPORT_SYMBOL(ttm_bo_glob);
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref)
{
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
- struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(kref_read(&bo->list_kref));
BUG_ON(kref_read(&bo->kref));
- BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
- atomic_dec(&bo->bdev->glob->bo_count);
+ atomic_dec(&ttm_bo_glob.bo_count);
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@@ -188,23 +187,17 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
@@ -224,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
-}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
-
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
@@ -248,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
@@ -311,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
- lru = &pos->first->bdev->glob->swap_lru[i];
+ lru = &ttm_bo_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
@@ -475,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
@@ -485,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
goto error;
}
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (bo->base.resv != &bo->base._resv)
dma_resv_unlock(&bo->base._resv);
@@ -512,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
}
dma_resv_unlock(bo->base.resv);
@@ -523,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
@@ -546,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct ttm_bo_global *glob = bo->bdev->glob;
struct dma_resv *resv;
int ret;
@@ -565,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible,
@@ -576,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
@@ -586,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
ret = 0;
@@ -595,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -603,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
@@ -618,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct list_head removed;
bool empty;
@@ -676,7 +657,7 @@ static void ttm_bo_release(struct kref *kref)
if (bo->bdev->driver->release_notify)
bo->bdev->driver->release_notify(bo);
- drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -842,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false;
unsigned i;
int ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
@@ -880,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
kref_get(&busy_bo->list_kref);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
kref_put(&busy_bo->list_kref, ttm_bo_release_list);
@@ -896,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx);
- if (locked) {
+ if (locked)
ttm_bo_unreserve(bo);
- } else {
- spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&glob->lru_lock);
- }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -926,7 +900,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
@@ -935,19 +910,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
- if (fence) {
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ if (!fence)
+ return 0;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ if (no_wait_gpu)
+ return -EBUSY;
- dma_fence_put(bo->moving);
- bo->moving = fence;
+ dma_resv_add_shared_fence(bo->base.resv, fence);
+
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
+ return ret;
}
+ dma_fence_put(bo->moving);
+ bo->moving = fence;
return 0;
}
@@ -978,7 +956,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem);
+ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1068,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
@@ -1120,14 +1096,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- if (mem->mm_node) {
- ret = ttm_bo_add_move_fence(bo, man, mem);
- if (unlikely(ret)) {
- (*man->func->put_node)(man, mem);
- goto error;
- }
- return 0;
+ if (!mem->mm_node)
+ continue;
+
+ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ if (ret == -EBUSY)
+ continue;
+
+ goto error;
}
+ return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {
@@ -1160,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
return ret;
@@ -1286,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0;
unsigned long num_pages;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@@ -1315,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
- atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1349,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
@@ -1357,7 +1336,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
*/
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
+ ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
bo->mem.num_pages);
/* passed reservation objects should already be locked,
@@ -1379,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -1481,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
@@ -1650,8 +1627,6 @@ static int ttm_bo_global_init(void)
goto out;
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = &ttm_mem_glob;
- glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1675,10 +1650,10 @@ out:
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
+ struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man;
- struct ttm_bo_global *glob = bdev->glob;
while (i--) {
man = &bdev->man[i];
@@ -1708,8 +1683,6 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
- drm_vma_offset_manager_destroy(&bdev->vma_manager);
-
if (!ret)
ttm_bo_global_release();
@@ -1720,11 +1693,15 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
+ if (WARN_ON(vma_manager == NULL))
+ return -EINVAL;
+
ret = ttm_bo_global_init();
if (ret)
return ret;
@@ -1741,13 +1718,10 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager,
- DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
+ bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
- bdev->glob = glob;
bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1827,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
- int ret = 0;
-
- /*
- * Using ttm_bo_reserve makes sure the lru lists are updated.
- */
-
- ret = ttm_bo_reserve(bo, true, no_wait, NULL);
- if (unlikely(ret != 0))
- return ret;
- ret = ttm_bo_wait(bo, true, no_wait);
- if (likely(ret == 0))
- atomic_inc(&bo->cpu_writers);
- ttm_bo_unreserve(bo);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
-
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-{
- atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
@@ -1951,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
.no_wait_gpu = false
};
- while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
- ;
+ while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fe81c565e7ef..6b0883a1776e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -153,7 +151,6 @@ retry:
}
return ret;
}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
@@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
@@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.base.resv = &fbo->base.base._resv;
- dma_resv_init(fbo->base.base.resv);
- ret = dma_resv_trylock(fbo->base.base.resv);
+ if (bo->base.resv == &bo->base._resv)
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
+ ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
}
@@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
- ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
@@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
- ttm_bo_unreserve(ghost);
+ dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 46dc3de7e81b..4b34a278d65b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (bo->moving != moving) {
- spin_lock(&bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
dma_fence_put(moving);
}
@@ -407,16 +407,16 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
struct drm_vma_offset_node *node;
struct ttm_buffer_object *bo = NULL;
- drm_vma_offset_lock_lookup(&bdev->vma_manager);
+ drm_vma_offset_lock_lookup(bdev->vma_manager);
- node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+ node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
bo = ttm_bo_get_unless_zero(bo);
}
- drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+ drm_vma_offset_unlock_lookup(bdev->vma_manager);
if (!bo)
pr_err("Could not find buffer object to map\n");
@@ -424,6 +424,28 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
+static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &ttm_bo_vm_ops;
+
+ /*
+ * Note: We're transferring the bo reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = bo;
+
+ /*
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
+ */
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -447,24 +469,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(ret != 0))
goto out_unref;
- vma->vm_ops = &ttm_bo_vm_ops;
-
- /*
- * Note: We're transferring the bo reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = bo;
-
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
out_unref:
ttm_bo_put(bo);
@@ -472,17 +477,17 @@ out_unref:
}
EXPORT_SYMBOL(ttm_bo_mmap);
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
- if (vma->vm_pgoff != 0)
- return -EACCES;
-
ttm_bo_get(bo);
- vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = bo;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ /*
+ * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
+ * removed. Add it back here until the rest of TTM works without it.
+ */
+ vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
+
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
+EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 131dae8f4170..1797f04c0534 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
}
}
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
- }
-}
-
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru)
+ struct list_head *dups)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
@@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- dma_resv_unlock(bo->base.resv);
-
- ret = -EBUSY;
-
- } else if (ret == -EALREADY && dups) {
+ if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
@@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (del_lru) {
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
-
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
- bo = entry->bo;
+ struct ttm_buffer_object *bo = entry->bo;
+
if (entry->num_shared)
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 8617958b7ae6..acd63b70d814 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(glob->bo_glob, ctx);
+ ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 627f8dc91d0e..b40a4678c296 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
static void
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
if (mem_count_update == 0)
@@ -1049,7 +1049,7 @@ put_pages:
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7d78e6deac89..bf876faea592 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,7 +33,6 @@
* when freed).
*/
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h>
@@ -886,8 +885,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@@ -991,8 +990,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
@@ -1238,5 +1237,3 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
-
-#endif
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 416f24823c0a..954b09c948eb 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -80,8 +80,8 @@ static int tve200_modeset_init(struct drm_device *dev)
if (ret && ret != -ENODEV)
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_bridge;
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ddb61a60c610..b4ae3e89a7b4 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -90,13 +90,6 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static struct drm_encoder*
-udl_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- return drm_encoder_find(connector->dev, NULL, enc_id);
-}
-
static int udl_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
@@ -120,7 +113,6 @@ static void udl_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
- .best_encoder = udl_best_single_encoder,
};
static const struct drm_connector_funcs udl_connector_funcs = {
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a22b75a3a533..edd299ab53d8 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -58,7 +58,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/* gem_create_object function for allocating a BO struct and doing
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 3506ae2723ae..1a07462b4528 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -126,6 +126,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_CSD:
args->value = v3d_has_csd(v3d);
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -169,7 +172,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
+DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 19c092d75266..549dde83408b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -530,13 +530,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_v3d_submit_cl *args = data;
struct v3d_bin_job *bin = NULL;
struct v3d_render_job *render;
+ struct v3d_job *clean_job = NULL;
+ struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
- if (args->pad != 0) {
- DRM_INFO("pad must be zero: %d\n", args->pad);
+ if (args->flags != 0 &&
+ args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
@@ -565,6 +568,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
+ kfree(bin);
v3d_job_put(&render->base);
kfree(bin);
return ret;
@@ -578,12 +582,31 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
bin->render = render;
}
- ret = v3d_lookup_bos(dev, file_priv, &render->base,
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
+ if (!clean_job) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
+ if (ret) {
+ kfree(clean_job);
+ clean_job = NULL;
+ goto fail;
+ }
+
+ last_job = clean_job;
+ } else {
+ last_job = &render->base;
+ }
+
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
args->bo_handles, args->bo_handle_count);
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
if (ret)
goto fail;
@@ -602,28 +625,44 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
if (ret)
goto fail_unreserve;
+
+ if (clean_job) {
+ struct dma_fence *render_fence =
+ dma_fence_get(render->base.done_fence);
+ ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ if (ret)
+ goto fail_unreserve;
+ ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail_unreserve;
+ }
+
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
- &render->base,
+ last_job,
&acquire_ctx,
args->out_sync,
- render->base.done_fence);
+ last_job->done_fence);
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- drm_gem_unlock_reservations(render->base.bo,
- render->base.bo_count, &acquire_ctx);
+ drm_gem_unlock_reservations(last_job->bo,
+ last_job->bo_count, &acquire_ctx);
fail:
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return ret;
}
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index 56ba510f21a2..45fe135d6e43 100644
--- a/drivers/gpu/drm/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -4,6 +4,8 @@ config DRM_VBOXVIDEO
depends on DRM && X86 && PCI
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
select GENERIC_ALLOCATOR
help
This is a KMS driver for the virtual Graphics Card used in
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 55d798c76b21..f2e968b5ffa6 100644
--- a/drivers/gpu/drm/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
- vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_drv.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
vbox_mode.o vbox_ttm.o
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 862db495d111..8512d970a09f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -32,10 +33,6 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
- .fb_probe = vboxfb_create,
-};
-
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct vbox_private *vbox;
@@ -79,20 +76,16 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
- &vbox_fb_helper_funcs, 32,
- vbox->num_crtcs);
+ ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
if (ret)
goto err_irq_fini;
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
- goto err_fbdev_fini;
+ goto err_irq_fini;
return 0;
-err_fbdev_fini:
- vbox_fbdev_fini(vbox);
err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
@@ -113,7 +106,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
struct vbox_private *vbox = pci_get_drvdata(pdev);
drm_dev_unregister(&vbox->ddev);
- vbox_fbdev_fini(vbox);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
@@ -189,10 +181,7 @@ static struct pci_driver vbox_pci_driver = {
#endif
};
-static const struct file_operations vbox_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(vbox_fops);
static struct drm_driver driver = {
.driver_features =
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index e8cb9efc6088..87421903816c 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -16,12 +16,9 @@
#include <linux/string.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
-
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_ch_setup.h"
@@ -48,16 +45,9 @@
sizeof(struct hgsmi_host_flags))
#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
-struct vbox_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct vbox_private {
/* Must be first; or we must define our own release callback */
struct drm_device ddev;
- struct drm_fb_helper fb_helper;
- struct vbox_framebuffer afb;
u8 __iomem *guest_heap;
u8 __iomem *vbva_buffers;
@@ -137,7 +127,6 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
-#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
@@ -148,25 +137,9 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects);
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-void vbox_fbdev_fini(struct vbox_private *vbox);
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj);
-
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
deleted file mode 100644
index 8f74bcffc034..000000000000
--- a/drivers/gpu/drm/vboxvideo/vbox_fb.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright (C) 2013-2017 Oracle Corporation
- * This file is based on ast_fb.c
- * Copyright 2012 Red Hat Inc.
- * Authors: Dave Airlie <airlied@redhat.com>
- * Michael Thayer <michael.thayer@oracle.com,
- */
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "vbox_drv.h"
-#include "vboxvideo.h"
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
-static struct fb_deferred_io vbox_defio = {
- .delay = HZ / 30,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-#endif
-
-static struct fb_ops vboxfb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
-};
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct vbox_private *vbox =
- container_of(helper, struct vbox_private, fb_helper);
- struct pci_dev *pdev = vbox->ddev.pdev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_framebuffer *fb;
- struct fb_info *info;
- struct drm_gem_object *gobj;
- struct drm_gem_vram_object *gbo;
- int size, ret;
- s64 gpu_addr;
- u32 pitch;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- mode_cmd.pitches[0] = pitch;
-
- size = pitch * mode_cmd.height;
-
- ret = vbox_gem_create(vbox, size, true, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
- if (ret)
- return ret;
-
- gbo = drm_gem_vram_of_gem(gobj);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- info->screen_size = size;
- info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(info->screen_base))
- return PTR_ERR(info->screen_base);
-
- fb = &vbox->afb.base;
- helper->fb = fb;
-
- info->fbops = &vboxfb_ops;
-
- /*
- * This seems to be done for safety checking that the framebuffer
- * is not registered twice by different drivers.
- */
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
-
- drm_fb_helper_fill_info(info, helper, sizes);
-
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0)
- return (int)gpu_addr;
- info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
- info->fix.smem_len = vbox->available_vram_size - gpu_addr;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- info->fbdefio = &vbox_defio;
- fb_deferred_io_init(info);
-#endif
-
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
-
- return 0;
-}
-
-void vbox_fbdev_fini(struct vbox_private *vbox)
-{
- struct vbox_framebuffer *afb = &vbox->afb;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
- fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
-#endif
-
- drm_fb_helper_unregister_fbi(&vbox->fb_helper);
-
- if (afb->obj) {
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj);
-
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
-
- drm_gem_object_put_unlocked(afb->obj);
- afb->obj = NULL;
- }
- drm_fb_helper_fini(&vbox->fb_helper);
-
- drm_framebuffer_unregister_private(&afb->base);
- drm_framebuffer_cleanup(&afb->base);
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 02fa8277ff1e..9dcab115a261 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -11,22 +11,12 @@
#include <linux/vbox_err.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include "vbox_drv.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
-static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
-
- if (vbox_fb->obj)
- drm_gem_object_put_unlocked(vbox_fb->obj);
-
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
void vbox_report_caps(struct vbox_private *vbox)
{
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
@@ -38,87 +28,6 @@ void vbox_report_caps(struct vbox_private *vbox)
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
-/* Send information about dirty rectangles to VBVA. */
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- struct vbox_private *vbox = fb->dev->dev_private;
- struct drm_display_mode *mode;
- struct drm_crtc *crtc;
- int crtc_x, crtc_y;
- unsigned int i;
-
- mutex_lock(&vbox->hw_mutex);
- list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
- if (crtc->primary->state->fb != fb)
- continue;
-
- mode = &crtc->state->mode;
- crtc_x = crtc->primary->state->src_x >> 16;
- crtc_y = crtc->primary->state->src_y >> 16;
-
- for (i = 0; i < num_rects; ++i) {
- struct vbva_cmd_hdr cmd_hdr;
- unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
-
- if (rects[i].x1 > crtc_x + mode->hdisplay ||
- rects[i].y1 > crtc_y + mode->vdisplay ||
- rects[i].x2 < crtc_x ||
- rects[i].y2 < crtc_y)
- continue;
-
- cmd_hdr.x = (s16)rects[i].x1;
- cmd_hdr.y = (s16)rects[i].y1;
- cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
- cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
-
- if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
- vbox->guest_pool))
- continue;
-
- vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
- &cmd_hdr, sizeof(cmd_hdr));
- vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
- }
- }
- mutex_unlock(&vbox->hw_mutex);
-}
-
-static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int flags, unsigned int color,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs vbox_fb_funcs = {
- .destroy = vbox_user_framebuffer_destroy,
- .dirty = vbox_user_framebuffer_dirty,
-};
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
- vbox_fb->obj = obj;
- ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int vbox_accel_init(struct vbox_private *vbox)
{
struct vbva_buffer *vbva;
@@ -270,29 +179,3 @@ void vbox_hw_fini(struct vbox_private *vbox)
gen_pool_destroy(vbox->guest_pool);
pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
-
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
- size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
-
- *obj = &gbo->bo.base;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index e1e48ba919eb..19612132c8a3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -13,7 +13,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -133,7 +135,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
if (!fb1) {
fb1 = fb;
- if (to_vbox_framebuffer(fb1) == &vbox->afb)
+ if (fb1 == vbox->ddev.fb_helper->fb)
break;
} else if (fb != fb1) {
single_framebuffer = false;
@@ -172,8 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -283,10 +284,43 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips, i;
vbox_crtc_set_base_and_mode(crtc, fb,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
+
+ /* Send information about dirty rectangles to VBVA. */
+
+ clips = drm_plane_get_damage_clips(plane->state);
+ num_clips = drm_plane_get_damage_clips_count(plane->state);
+
+ if (!num_clips)
+ return;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ for (i = 0; i < num_clips; ++i, ++clips) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ cmd_hdr.x = (s16)clips->x1;
+ cmd_hdr.y = (s16)clips->y1;
+ cmd_hdr.w = (u16)clips->x2 - clips->x1;
+ cmd_hdr.h = (u16)clips->y2 - clips->y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+
+ mutex_unlock(&vbox->hw_mutex);
}
static void vbox_primary_atomic_disable(struct drm_plane *plane,
@@ -300,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
old_state->src_y >> 16);
}
-static int vbox_primary_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
-
- return ret;
-}
-
-static void vbox_primary_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static int vbox_cursor_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -386,8 +391,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
container_of(plane->dev, struct vbox_private, ddev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
u32 width = plane->state->crtc_w;
u32 height = plane->state->crtc_h;
size_t data_size, mask_size;
@@ -459,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
mutex_unlock(&vbox->hw_mutex);
}
-static int vbox_cursor_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-}
-
-static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!plane->state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static const u32 vbox_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
@@ -491,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
.atomic_check = vbox_cursor_atomic_check,
.atomic_update = vbox_cursor_atomic_update,
.atomic_disable = vbox_cursor_atomic_disable,
- .prepare_fb = vbox_cursor_prepare_fb,
- .cleanup_fb = vbox_cursor_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
@@ -513,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
.atomic_check = vbox_primary_atomic_check,
.atomic_update = vbox_primary_atomic_update,
.atomic_disable = vbox_primary_atomic_disable,
- .prepare_fb = vbox_primary_prepare_fb,
- .cleanup_fb = vbox_primary_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
@@ -856,40 +836,8 @@ static int vbox_connector_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *vbox_user_framebuffer_create(
- struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct vbox_private *vbox =
- container_of(dev, struct vbox_private, ddev);
- struct drm_gem_object *obj;
- struct vbox_framebuffer *vbox_fb;
- int ret = -ENOMEM;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
- if (!vbox_fb)
- goto err_unref_obj;
-
- ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
- if (ret)
- goto err_free_vbox_fb;
-
- return &vbox_fb->base;
-
-err_free_vbox_fb:
- kfree(vbox_fb);
-err_unref_obj:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
static const struct drm_mode_config_funcs vbox_mode_funcs = {
- .fb_create = vbox_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index b82595a9ed0f..976423d0c3cc 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -17,8 +17,7 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- vbox->available_vram_size,
- &drm_gem_vram_mm_funcs);
+ vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index f1f0a7c87771..b00e20f5ce05 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -994,7 +994,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
- if (vc4_state->mm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->mm)) {
unsigned long flags;
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 8a27a6acee61..c586325de2a5 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -249,7 +249,8 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
}
if (panel)
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
return drm_bridge_attach(dpi->encoder, bridge, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index c78fa8144776..c9ba83ed49b9 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -1575,8 +1576,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
}
if (panel) {
- dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_DSI);
+ dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ee7d4e7b0ee3..1c62c6c9244b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -398,10 +398,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- frame.avi.right_bar = cstate->tv.margins.right;
- frame.avi.left_bar = cstate->tv.margins.left;
- frame.avi.top_bar = cstate->tv.margins.top;
- frame.avi.bottom_bar = cstate->tv.margins.bottom;
+ drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
}
@@ -1285,6 +1282,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+ struct cec_connector_info conn_info;
+#endif
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
@@ -1403,13 +1403,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_DRM_VC4_HDMI_CEC
hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
vc4, "vc4",
- CEC_CAP_TRANSMIT |
- CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH |
- CEC_CAP_RC, 1);
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_destroy_conn;
+
+ cec_fill_conn_info_from_drm(&conn_info, hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
+
HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff);
value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 9936b15d0bf1..5a43659da319 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -315,7 +315,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
- if (vc4->hvs->mitchell_netravali_filter.allocated)
+ if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
drm_mm_takedown(&vc4->hvs->dlist_mm);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 5e5f90810aca..4934127f0d76 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -178,7 +178,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane,
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
- if (vc4_state->lbm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
@@ -557,7 +557,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
/* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion.
*/
- if (!vc4_state->lbm.allocated) {
+ if (!drm_mm_node_allocated(&vc4_state->lbm)) {
int ret;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index ba36e933bb49..eff3047052d4 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -3,7 +3,7 @@ config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_GEM_SHMEM_HELPER
help
This is the virtual GPU driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen).
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 458e606a936f..92aa2b3d349d 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
- virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
+ virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 0fc32fa0b3c0..8dee698c90ff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -56,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
dev->pdev = pdev;
if (vga)
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
- 0,
"virtiodrmfb");
/*
@@ -185,17 +184,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-static const struct file_operations virtio_gpu_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = virtio_gpu_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
@@ -210,15 +199,10 @@ static struct drm_driver driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
- .gem_prime_vmap = virtgpu_gem_prime_vmap,
- .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
- .gem_prime_mmap = virtgpu_gem_prime_mmap,
- .gem_free_object_unlocked = virtio_gpu_gem_free_object,
- .gem_open_object = virtio_gpu_gem_object_open,
- .gem_close_object = virtio_gpu_gem_object_close,
+ .gem_create_object = virtio_gpu_create_object,
.fops = &virtio_gpu_driver_fops,
.ioctls = virtio_gpu_ioctls,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index e28829661724..0b56ba005e25 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -35,12 +35,9 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_placement.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -68,21 +65,23 @@ struct virtio_gpu_object_params {
};
struct virtio_gpu_object {
- struct drm_gem_object gem_base;
+ struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
struct sg_table *pages;
uint32_t mapped;
- void *vmap;
bool dumb;
- struct ttm_place placement_code;
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
- container_of((gobj), struct virtio_gpu_object, gem_base)
+ container_of((gobj), struct virtio_gpu_object, base.base)
+
+struct virtio_gpu_object_array {
+ struct ww_acquire_ctx ticket;
+ struct list_head next;
+ u32 nents, total;
+ struct drm_gem_object *objs[];
+};
struct virtio_gpu_vbuffer;
struct virtio_gpu_device;
@@ -115,9 +114,9 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
-
virtio_gpu_resp_cb resp_cb;
+ struct virtio_gpu_object_array *objs;
struct list_head list;
};
@@ -147,10 +146,6 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
-struct virtio_gpu_mman {
- struct ttm_bo_device bdev;
-};
-
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -179,8 +174,6 @@ struct virtio_gpu_device {
struct virtio_device *vdev;
- struct virtio_gpu_mman mman;
-
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts;
@@ -205,6 +198,10 @@ struct virtio_gpu_device {
struct work_struct config_changed_work;
+ struct work_struct obj_free_work;
+ spinlock_t obj_free_lock;
+ struct list_head obj_free_list;
+
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
struct list_head cap_cache;
@@ -217,9 +214,6 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head);
-void virtio_gpu_unref_list(struct list_head *head);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
@@ -240,10 +234,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file);
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -251,20 +241,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj);
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_work(struct work_struct *work);
+
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
@@ -295,28 +300,32 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence);
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
@@ -339,11 +348,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
-/* virtio_gpu_ttm.c */
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* virtio_gpu_fence.c */
bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
@@ -355,70 +359,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo);
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
-static inline struct virtio_gpu_object*
-virtio_gpu_object_ref(struct virtio_gpu_object *bo)
-{
- ttm_bo_get(&bo->tbo);
- return bo;
-}
-
-static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
-}
-
-static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
- bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- struct virtio_gpu_device *qdev =
- bo->gem_base.dev->dev_private;
- dev_err(qdev->dev, "%p reserve failed\n", bo);
- }
- return r;
- }
- return 0;
-}
-
-static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
-{
- ttm_bo_unreserve(&bo->tbo);
+ return drm_vma_node_offset_addr(&bo->base.base.vma_node);
}
/* virgl debufs */
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index a0514f5bd006..a4b9881ca1d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -41,6 +41,10 @@ bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
+ if (WARN_ON_ONCE(fence->f.seqno == 0))
+ /* leaked fence outside driver before completing
+ * initialization with virtio_gpu_fence_emit */
+ return false;
if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
return true;
return false;
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 292566146814..4c1f579edfb3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -28,54 +28,31 @@
#include "virtgpu_drv.h"
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
-
- if (obj)
- virtio_gpu_object_unref(&obj);
-}
-
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_object *obj;
- int ret;
-
- ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
- if (ret)
- return ERR_PTR(ret);
-
- return obj;
-}
-
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, params, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
+ if (ret < 0)
+ return ret;
- ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
+ ret = drm_gem_handle_create(file, &obj->base.base, &handle);
if (ret) {
- drm_gem_object_release(&obj->gem_base);
+ drm_gem_object_release(&obj->base.base);
return ret;
}
- *obj_p = &obj->gem_base;
+ *obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&obj->gem_base);
+ drm_gem_object_put_unlocked(&obj->base.base);
*handle_p = handle;
return 0;
@@ -136,19 +113,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return 0;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
- return r;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
return 0;
}
@@ -157,17 +133,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
return;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
+}
+
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
+
+ objs = kmalloc(size, GFP_KERNEL);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = nents;
+ return objs;
+}
+
+static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
+{
+ kfree(objs);
+}
+
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ u32 i;
+
+ objs = virtio_gpu_array_alloc(nents);
+ if (!objs)
+ return NULL;
+
+ for (i = 0; i < nents; i++) {
+ objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
+ if (!objs->objs[i]) {
+ objs->nents = i;
+ virtio_gpu_array_put_free(objs);
+ return NULL;
+ }
+ }
+ objs->nents = i;
+ return objs;
+}
+
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj)
+{
+ if (WARN_ON_ONCE(objs->nents == objs->total))
+ return;
+
+ drm_gem_object_get(obj);
+ objs->objs[objs->nents] = obj;
+ objs->nents++;
+}
+
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
+{
+ int ret;
+
+ if (objs->nents == 1) {
+ ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
+ } else {
+ ret = drm_gem_lock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+ return ret;
+}
+
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
+{
+ if (objs->nents == 1) {
+ dma_resv_unlock(objs->objs[0]->resv);
+ } else {
+ drm_gem_unlock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+}
+
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < objs->nents; i++)
+ dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+}
+
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
+{
+ u32 i;
+
+ for (i = 0; i < objs->nents; i++)
+ drm_gem_object_put_unlocked(objs->objs[i]);
+ virtio_gpu_array_free(objs);
+}
+
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ spin_lock(&vgdev->obj_free_lock);
+ list_add_tail(&objs->next, &vgdev->obj_free_list);
+ spin_unlock(&vgdev->obj_free_lock);
+ schedule_work(&vgdev->obj_free_work);
+}
+
+void virtio_gpu_array_put_free_work(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device, obj_free_work);
+ struct virtio_gpu_object_array *objs;
+
+ spin_lock(&vgdev->obj_free_lock);
+ while (!list_empty(&vgdev->obj_free_list)) {
+ objs = list_first_entry(&vgdev->obj_free_list,
+ struct virtio_gpu_object_array, next);
+ list_del(&objs->next);
+ spin_unlock(&vgdev->obj_free_lock);
+ virtio_gpu_array_put_free(objs);
+ spin_lock(&vgdev->obj_free_lock);
+ }
+ spin_unlock(&vgdev->obj_free_lock);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0a88ef11b9d3..9af1ec62434f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -29,7 +29,6 @@
#include <linux/sync_file.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
@@ -56,45 +55,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
&virtio_gpu_map->offset);
}
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
- int ret;
-
- ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
- if (ret != 0)
- return ret;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
- ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
- if (ret) {
- ttm_eu_backoff_reservation(ticket, head);
- return ret;
- }
- }
- return 0;
-}
-
-void virtio_gpu_unref_list(struct list_head *head)
-{
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
-
- drm_gem_object_put_unlocked(&qobj->gem_base);
- }
-}
-
/*
* Usage of execbuffer:
* Relocations need to take into account the full VIRTIO_GPUDrawable size.
@@ -107,16 +67,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
- struct drm_gem_object *gobj;
struct virtio_gpu_fence *out_fence;
- struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL;
- struct list_head validate_list;
- struct ttm_validate_buffer *buflist = NULL;
- int i;
- struct ww_acquire_ctx ticket;
+ struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
@@ -157,15 +112,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
return out_fence_fd;
}
- INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
-
bo_handles = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(uint32_t), GFP_KERNEL);
- buflist = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(struct ttm_validate_buffer),
- GFP_KERNEL | __GFP_ZERO);
- if (!bo_handles || !buflist) {
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!bo_handles) {
ret = -ENOMEM;
goto out_unused_fd;
}
@@ -177,27 +127,23 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- for (i = 0; i < exbuf->num_bo_handles; i++) {
- gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
- if (!gobj) {
- ret = -ENOENT;
- goto out_unused_fd;
- }
-
- qobj = gem_to_virtio_gpu_obj(gobj);
- buflist[i].bo = &qobj->tbo;
-
- list_add(&buflist[i].head, &validate_list);
+ buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ exbuf->num_bo_handles);
+ if (!buflist) {
+ ret = -ENOENT;
+ goto out_unused_fd;
}
kvfree(bo_handles);
bo_handles = NULL;
}
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret)
- goto out_free;
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_unused_fd;
+ }
- buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
+ buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unresv;
@@ -222,24 +168,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
}
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, out_fence);
-
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
-
- /* fence the command bo */
- virtio_gpu_unref_list(&validate_list);
- kvfree(buflist);
+ vfpriv->ctx_id, buflist, out_fence);
return 0;
out_memdup:
- kfree(buf);
+ kvfree(buf);
out_unresv:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
-out_free:
- virtio_gpu_unref_list(&validate_list);
+ if (buflist)
+ virtio_gpu_array_unlock_resv(buflist);
out_unused_fd:
kvfree(bo_handles);
- kvfree(buflist);
+ if (buflist)
+ virtio_gpu_array_put_free(buflist);
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
@@ -316,11 +256,11 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence)
return -ENOMEM;
- qobj = virtio_gpu_alloc_object(dev, &params, fence);
+ ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
dma_fence_put(&fence->f);
- if (IS_ERR(qobj))
- return PTR_ERR(qobj);
- obj = &qobj->gem_base;
+ if (ret < 0)
+ return ret;
+ obj = &qobj->base.base;
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
@@ -347,7 +287,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
qobj = gem_to_virtio_gpu_obj(gobj);
- ri->size = qobj->gem_base.size;
+ ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
drm_gem_object_put_unlocked(gobj);
return 0;
@@ -360,9 +300,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
@@ -371,39 +309,31 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
convert_to_hw_box(&box, &args->box);
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
- goto out_unres;
+ goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, qobj->hw_res_handle,
- vfpriv->ctx_id, offset, args->level,
- &box, fence);
- dma_resv_add_excl_fence(qobj->tbo.base.resv,
- &fence->f);
-
+ (vgdev, vfpriv->ctx_id, offset, args->level,
+ &box, objs, fence);
dma_fence_put(&fence->f);
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+ return 0;
+
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
@@ -413,75 +343,71 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
struct virtio_gpu_box box;
int ret;
u32 offset = args->offset;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
-
convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, qobj, offset,
- box.w, box.h, box.x, box.y, NULL);
+ (vgdev, offset,
+ box.w, box.h, box.x, box.y,
+ objs, NULL);
} else {
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
+
+ ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unres;
- }
+ if (!fence)
+ goto err_unlock;
+
virtio_gpu_cmd_transfer_to_host_3d
- (vgdev, qobj,
+ (vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, fence);
- dma_resv_add_excl_fence(qobj->tbo.base.resv,
- &fence->f);
+ args->level, &box, objs, fence);
dma_fence_put(&fence->f);
}
+ return 0;
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+ struct drm_file *file)
{
struct drm_virtgpu_3d_wait *args = data;
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct drm_gem_object *obj;
+ long timeout = 15 * HZ;
int ret;
- bool nowait = false;
- gobj = drm_gem_object_lookup(file, args->handle);
- if (gobj == NULL)
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (obj == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (args->flags & VIRTGPU_WAIT_NOWAIT)
- nowait = true;
- ret = virtio_gpu_object_wait(qobj, nowait);
+ if (args->flags & VIRTGPU_WAIT_NOWAIT) {
+ ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ } else {
+ ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+ timeout);
+ }
+ if (ret == 0)
+ ret = -EBUSY;
+ else if (ret > 0)
+ ret = 0;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index c190702fab72..2f5773e43557 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -147,19 +147,23 @@ int virtio_gpu_init(struct drm_device *dev)
INIT_WORK(&vgdev->config_changed_work,
virtio_gpu_config_changed_work_func);
+ INIT_WORK(&vgdev->obj_free_work,
+ virtio_gpu_array_put_free_work);
+ INIT_LIST_HEAD(&vgdev->obj_free_list);
+ spin_lock_init(&vgdev->obj_free_lock);
+
#ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
- DRM_INFO("virgl 3d acceleration %s\n",
- vgdev->has_virgl_3d ? "enabled" : "not supported by host");
-#else
- DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
- DRM_INFO("EDID support available.\n");
}
+ DRM_INFO("features: %cvirgl %cedid\n",
+ vgdev->has_virgl_3d ? '+' : '-',
+ vgdev->has_edid ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
@@ -173,12 +177,6 @@ int virtio_gpu_init(struct drm_device *dev)
goto err_vbufs;
}
- ret = virtio_gpu_ttm_init(vgdev);
- if (ret) {
- DRM_ERROR("failed to init ttm %d\n", ret);
- goto err_ttm;
- }
-
/* get display info */
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
num_scanouts, &num_scanouts);
@@ -210,8 +208,6 @@ int virtio_gpu_init(struct drm_device *dev)
return 0;
err_scanouts:
- virtio_gpu_ttm_fini(vgdev);
-err_ttm:
virtio_gpu_free_vbufs(vgdev);
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
@@ -234,6 +230,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
+ flush_work(&vgdev->obj_free_work);
vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
@@ -242,7 +239,6 @@ void virtio_gpu_deinit(struct drm_device *dev)
vgdev->vdev->config->del_vqs(vgdev->vdev);
virtio_gpu_modeset_fini(vgdev);
- virtio_gpu_ttm_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
kfree(vgdev->capsets);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 09b526518f5a..017a9e0fc3bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,73 +23,83 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
+static int virtio_gpu_virglrenderer_workaround = 1;
+module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
+
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
uint32_t *resid)
{
-#if 0
- int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
-#else
- static int handle;
-
- /*
- * FIXME: dirty hack to avoid re-using IDs, virglrenderer
- * can't deal with that. Needs fixing in virglrenderer, also
- * should figure a better way to handle that in the guest.
- */
- handle++;
-#endif
-
- *resid = handle + 1;
+ if (virtio_gpu_virglrenderer_workaround) {
+ /*
+ * Hack to avoid re-using resource IDs.
+ *
+ * virglrenderer versions up to (and including) 0.7.0
+ * can't deal with that. virglrenderer commit
+ * "f91a9dd35715 Fix unlinking resources from hash
+ * table." (Feb 2019) fixes the bug.
+ */
+ static int handle;
+ handle++;
+ *resid = handle + 1;
+ } else {
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+ if (handle < 0)
+ return handle;
+ *resid = handle + 1;
+ }
return 0;
}
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
-#if 0
- ida_free(&vgdev->resource_ida, id - 1);
-#endif
+ if (!virtio_gpu_virglrenderer_workaround) {
+ ida_free(&vgdev->resource_ida, id - 1);
+ }
}
-static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
{
- struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ if (bo->pages)
+ virtio_gpu_object_detach(vgdev, bo);
if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
- if (bo->vmap)
- virtio_gpu_object_kunmap(bo);
- drm_gem_object_release(&bo->gem_base);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
+
+ drm_gem_shmem_free_object(obj);
}
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
+static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+ .free = virtio_gpu_free_object,
+ .open = virtio_gpu_gem_object_open,
+ .close = virtio_gpu_gem_object_close,
+
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = &drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size)
{
- u32 c = 1;
-
- vgbo->placement.placement = &vgbo->placement_code;
- vgbo->placement.busy_placement = &vgbo->placement_code;
- vgbo->placement_code.fpfn = 0;
- vgbo->placement_code.lpfn = 0;
- vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_NO_EVICT;
- vgbo->placement.num_placement = c;
- vgbo->placement.num_busy_placement = c;
+ struct virtio_gpu_object *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ return &bo->base.base;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
@@ -97,157 +107,59 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object_array *objs = NULL;
+ struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
- size_t acc_size;
int ret;
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
- sizeof(struct virtio_gpu_object));
+ params->size = roundup(params->size, PAGE_SIZE);
+ shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+ bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
- bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
- if (bo == NULL)
- return -ENOMEM;
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
- if (ret < 0) {
- kfree(bo);
- return ret;
- }
- params->size = roundup(params->size, PAGE_SIZE);
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
- if (ret != 0) {
- virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
- return ret;
- }
+ if (ret < 0)
+ goto err_free_gem;
+
bo->dumb = params->dumb;
+ if (fence) {
+ ret = -ENOMEM;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ goto err_put_id;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_objs;
+ }
+
if (params->virgl) {
- virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+ objs, fence);
} else {
- virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+ virtio_gpu_cmd_create_resource(vgdev, bo, params,
+ objs, fence);
}
- virtio_gpu_init_ttm_placement(bo);
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
- ttm_bo_type_device, &bo->placement, 0,
- true, acc_size, NULL, NULL,
- &virtio_gpu_ttm_bo_destroy);
- /* ttm_bo_init failure will call the destroy */
- if (ret != 0)
+ ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
return ret;
-
- if (fence) {
- struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct list_head validate_list;
- struct ttm_validate_buffer mainbuf;
- struct ww_acquire_ctx ticket;
- unsigned long irq_flags;
- bool signaled;
-
- INIT_LIST_HEAD(&validate_list);
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
-
- /* use a gem reference since unref list undoes them */
- drm_gem_object_get(&bo->gem_base);
- mainbuf.bo = &bo->tbo;
- list_add(&mainbuf.head, &validate_list);
-
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret == 0) {
- spin_lock_irqsave(&drv->lock, irq_flags);
- signaled = virtio_fence_signaled(&fence->f);
- if (!signaled)
- /* virtio create command still in flight */
- ttm_eu_fence_buffer_objects(&ticket, &validate_list,
- &fence->f);
- spin_unlock_irqrestore(&drv->lock, irq_flags);
- if (signaled)
- /* virtio create command finished */
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- }
- virtio_gpu_unref_list(&validate_list);
}
*bo_ptr = bo;
return 0;
-}
-
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
-{
- bo->vmap = NULL;
- ttm_bo_kunmap(&bo->kmap);
-}
-
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
-{
- bool is_iomem;
- int r;
-
- WARN_ON(bo->vmap);
-
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
- if (r)
- return r;
- bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
- return 0;
-}
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo)
-{
- int ret;
- struct page **pages = bo->tbo.ttm->pages;
- int nr_pages = bo->tbo.num_pages;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- size_t max_segment;
-
- /* wtf swapping */
- if (bo->pages)
- return 0;
-
- if (bo->tbo.ttm->state == tt_unpopulated)
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
- bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!bo->pages)
- goto out;
-
- max_segment = virtio_max_dma_size(qdev->vdev);
- max_segment &= PAGE_MASK;
- if (max_segment > SCATTERLIST_MAX_SEGMENT)
- max_segment = SCATTERLIST_MAX_SEGMENT;
- ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
- max_segment, GFP_KERNEL);
- if (ret)
- goto out;
- return 0;
-out:
- kfree(bo->pages);
- bo->pages = NULL;
- return -ENOMEM;
-}
-
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
-{
- sg_free_table(bo->pages);
- kfree(bo->pages);
- bo->pages = NULL;
-}
-
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0))
- return r;
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
+err_put_objs:
+ virtio_gpu_array_put_free(objs);
+err_put_id:
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+err_free_gem:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ret;
}
-
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a492ac3f4a7e..390524143139 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -84,7 +84,22 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- return 0;
+ bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->fb || !state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ is_cursor, true);
+ return ret;
}
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
@@ -109,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
if (bo->dumb) {
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->src_w >> 16),
- cpu_to_le32(plane->state->src_h >> 16),
- cpu_to_le32(plane->state->src_x >> 16),
- cpu_to_le32(plane->state->src_y >> 16), NULL);
+ (vgdev, 0,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ objs, NULL);
}
} else {
handle = 0;
@@ -186,7 +208,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
- int ret = 0;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -205,20 +226,20 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
/* new cursor -- update & wait */
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
- 0, 0, vgfb->fence);
- ret = virtio_gpu_object_reserve(bo, false);
- if (!ret) {
- dma_resv_add_excl_fence(bo->tbo.base.resv,
- &vgfb->fence->f);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
- virtio_gpu_object_unreserve(bo);
- virtio_gpu_object_wait(bo, false);
- }
+ (vgdev, 0,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ 0, 0, objs, vgfb->fence);
+ dma_fence_wait(&vgfb->fence->f, true);
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
}
if (plane->state->fb != old_state->fb) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index dc642a884b88..050d24c39a8f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -30,43 +30,9 @@
* device that might share buffers with virtgpu
*/
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-
- if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
- /* should not happen */
- return ERR_PTR(-EINVAL);
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
- bo->tbo.ttm->num_pages);
-}
-
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
return ERR_PTR(-ENODEV);
}
-
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- int ret;
-
- ret = virtio_gpu_object_kmap(bo);
- if (ret)
- return NULL;
- return bo->vmap;
-}
-
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj));
-}
-
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- return drm_gem_prime_mmap(obj, vma);
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
deleted file mode 100644
index f87903641847..000000000000
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- * All Rights Reserved.
- *
- * Authors:
- * Dave Airlie
- * Alon Levy
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/delay.h>
-
-#include <drm/drm.h>
-#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/virtgpu_drm.h>
-
-#include "virtgpu_drv.h"
-
-static struct
-virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
-{
- struct virtio_gpu_mman *mman;
- struct virtio_gpu_device *vgdev;
-
- mman = container_of(bdev, struct virtio_gpu_mman, bdev);
- vgdev = container_of(mman, struct virtio_gpu_device, mman);
- return vgdev;
-}
-
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv;
- struct virtio_gpu_device *vgdev;
- int r;
-
- file_priv = filp->private_data;
- vgdev = file_priv->minor->dev->dev_private;
- if (vgdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
-
- return r;
-}
-
-static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
- uint32_t flags)
-{
- return 0;
-}
-
-static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)1;
- return 0;
-}
-
-static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)NULL;
-}
-
-static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
-{
- return 0;
-}
-
-static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
-{
- return 0;
-}
-
-static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
-static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
- .init = ttm_bo_man_init,
- .takedown = ttm_bo_man_takedown,
- .get_node = ttm_bo_man_get_node,
- .put_node = ttm_bo_man_put_node,
- .debug = ttm_bo_man_debug
-};
-
-static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- man->func = &virtio_gpu_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- static const struct ttm_place placements = {
- .fpfn = 0,
- .lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
- };
-
- placement->placement = &placements;
- placement->busy_placement = &placements;
- placement->num_placement = 1;
- placement->num_busy_placement = 1;
-}
-
-static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- return 0;
-}
-
-static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- case TTM_PL_TT:
- /* system memory */
- return 0;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
-}
-
-/*
- * TTM backend functions.
- */
-struct virtio_gpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct virtio_gpu_object *obj;
-};
-
-static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
- return 0;
-}
-
-static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_detach(vgdev, gtt->obj);
- return 0;
-}
-
-static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
-
- ttm_dma_tt_fini(&gtt->ttm);
- kfree(gtt);
-}
-
-static struct ttm_backend_func virtio_gpu_tt_func = {
- .bind = &virtio_gpu_ttm_tt_bind,
- .unbind = &virtio_gpu_ttm_tt_unbind,
- .destroy = &virtio_gpu_ttm_tt_destroy,
-};
-
-static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct virtio_gpu_device *vgdev;
- struct virtio_gpu_ttm_tt *gtt;
-
- vgdev = virtio_gpu_get_vgdev(bo->bdev);
- gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
- if (gtt == NULL)
- return NULL;
- gtt->ttm.ttm.func = &virtio_gpu_tt_func;
- gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
- kfree(gtt);
- return NULL;
- }
- return &gtt->ttm.ttm;
-}
-
-static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
-{
- struct virtio_gpu_object *bo;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
-
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
-}
-
-static struct ttm_bo_driver virtio_gpu_bo_driver = {
- .ttm_tt_create = &virtio_gpu_ttm_tt_create,
- .invalidate_caches = &virtio_gpu_invalidate_caches,
- .init_mem_type = &virtio_gpu_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = &virtio_gpu_evict_flags,
- .verify_access = &virtio_gpu_verify_access,
- .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
- .io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .swap_notify = &virtio_gpu_bo_swap_notify,
-};
-
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
-{
- int r;
-
- /* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&vgdev->mman.bdev,
- &virtio_gpu_bo_driver,
- vgdev->ddev->anon_inode->i_mapping,
- false);
- if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
- goto err_dev_init;
- }
-
- r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
- if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
- goto err_mm_init;
- }
- return 0;
-
-err_mm_init:
- ttm_bo_device_release(&vgdev->mman.bdev);
-err_dev_init:
- return r;
-}
-
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
-{
- ttm_bo_device_release(&vgdev->mman.bdev);
- DRM_INFO("virtio_gpu: ttm finalized\n");
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 7ac20490e1b4..74ad3bc3ebe8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -155,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev,
{
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
kfree(vbuf->resp_buf);
- kfree(vbuf->data_buf);
+ kvfree(vbuf->data_buf);
kmem_cache_free(vgdev->vbufs, vbuf);
}
@@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
spin_unlock(&vgdev->ctrlq.qlock);
- list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
@@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
-
- list_del(&entry->list);
- free_vbuf(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
if (fence_id)
virtio_gpu_fence_event_process(vgdev, fence_id);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ if (entry->objs)
+ virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
+ list_del(&entry->list);
+ free_vbuf(vgdev, entry);
+ }
}
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
@@ -252,26 +256,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+/* Create sg_table from a vmalloc'd buffer. */
+static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
+{
+ int ret, s, i;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct page *pg;
+
+ if (WARN_ON(!PAGE_ALIGNED(data)))
+ return NULL;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
+ ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return NULL;
+ }
+
+ for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+ pg = vmalloc_to_page(data);
+ if (!pg) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
+ }
+
+ s = min_t(int, PAGE_SIZE, size);
+ sg_set_page(sg, pg, s, 0);
+
+ size -= s;
+ data += s;
+ }
+
+ return sgt;
+}
+
+static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct scatterlist *vout)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vout, vresp;
+ struct scatterlist *sgs[3], vcmd, vresp;
int outcnt = 0, incnt = 0;
+ bool notify = false;
int ret;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return notify;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &vcmd;
outcnt++;
- if (vbuf->data_size) {
- sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
- sgs[outcnt + incnt] = &vout;
+ if (vout) {
+ sgs[outcnt + incnt] = vout;
outcnt++;
}
@@ -292,32 +337,35 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
-
- if (!ret)
- ret = vq->num_free;
- return ret;
-}
-
-static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
-{
- int rc;
-
- spin_lock(&vgdev->ctrlq.qlock);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
- spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ return notify;
}
-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence *fence)
+static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_ctrl_hdr *hdr,
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- int rc;
+ struct scatterlist *vout = NULL, sg;
+ struct sg_table *sgt = NULL;
+ bool notify;
+ int outcnt = 0;
+
+ if (vbuf->data_size) {
+ if (is_vmalloc_addr(vbuf->data_buf)) {
+ sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
+ &outcnt);
+ if (!sgt)
+ return;
+ vout = sgt->sgl;
+ } else {
+ sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
+ vout = &sg;
+ outcnt = 1;
+ }
+ }
again:
spin_lock(&vgdev->ctrlq.qlock);
@@ -330,29 +378,47 @@ again:
* to wait for free space, which can result in fence ids being
* submitted out-of-order.
*/
- if (vq->num_free < 3) {
+ if (vq->num_free < 2 + outcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
goto again;
}
- if (fence)
+ if (hdr && fence) {
virtio_gpu_fence_emit(vgdev, hdr, fence);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
+ }
+ notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
+static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
}
-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ bool notify;
int ret;
int outcnt;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return;
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -370,14 +436,13 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
spin_unlock(&vgdev->cursorq.qlock);
- if (!ret)
- ret = vq->num_free;
- return ret;
+ if (notify)
+ virtqueue_notify(vq);
}
/* just create gem objects for userspace and long lived objects,
@@ -388,6 +453,7 @@ retry:
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
@@ -395,6 +461,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -481,12 +548,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -498,14 +566,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
- cmd_p->r.width = width;
- cmd_p->r.height = height;
- cmd_p->r.x = x;
- cmd_p->r.y = y;
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
@@ -826,34 +895,38 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -861,6 +934,7 @@ void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
@@ -868,6 +942,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -888,12 +963,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -906,6 +982,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -917,20 +995,24 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->box = *box;
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
@@ -940,7 +1022,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence)
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -950,6 +1034,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
vbuf->data_buf = data;
vbuf->data_size = data_size;
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
@@ -965,17 +1050,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si, nents;
+ int si, nents, ret;
if (WARN_ON_ONCE(!obj->created))
return -EINVAL;
+ if (WARN_ON_ONCE(obj->pages))
+ return -EINVAL;
- if (!obj->pages) {
- int ret;
+ ret = drm_gem_shmem_pin(&obj->base.base);
+ if (ret < 0)
+ return -EINVAL;
- ret = virtio_gpu_object_get_sg_table(vgdev, obj);
- if (ret)
- return ret;
+ obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
+ if (obj->pages == NULL) {
+ drm_gem_shmem_unpin(&obj->base.base);
+ return -EINVAL;
}
if (use_dma_api) {
@@ -1014,6 +1103,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ if (WARN_ON_ONCE(!obj->pages))
+ return;
+
if (use_dma_api && obj->mapped) {
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
@@ -1029,6 +1121,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
} else {
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
}
+
+ sg_free_table(obj->pages);
+ obj->pages = NULL;
+
+ drm_gem_shmem_unpin(&obj->base.base);
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 927dafaebc76..74f703b8d22a 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -16,17 +16,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
u64 ret_overrun;
bool ret;
- spin_lock(&output->lock);
-
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
WARN_ON(ret_overrun != 1);
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
+ spin_unlock(&output->lock);
+
if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
@@ -48,8 +49,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
DRM_DEBUG_DRIVER("Composer worker already queued\n");
}
- spin_unlock(&output->lock);
-
return HRTIMER_RESTART;
}
@@ -85,7 +84,7 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- *vblank_time = output->vblank_hrtimer.node.expires;
+ *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
return true;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 44ab9f8ef8be..d1fe144aa289 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -11,13 +11,14 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
-#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
@@ -83,7 +84,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_hw_done(old_state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
struct vkms_crtc_state *vkms_state =
@@ -103,6 +104,8 @@ static struct drm_driver vkms_driver = {
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = vkms_prime_import_sg_table,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -157,6 +160,14 @@ static int __init vkms_init(void)
if (ret)
goto out_unregister;
+ ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
+ DMA_BIT_MASK(64));
+
+ if (ret) {
+ DRM_ERROR("Could not initialize DMA support\n");
+ goto out_fini;
+ }
+
vkms_device->drm.irq_enabled = true;
ret = drm_vblank_init(&vkms_device->drm, 1);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5a95100fa18b..7d52e24564db 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -137,6 +137,12 @@ int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
+/* Prime */
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
+
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 6489bfe0a149..2e01186fb943 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/dma-buf.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
+#include <drm/drm_prime.h>
#include "vkms_drv.h"
@@ -218,3 +220,28 @@ out:
mutex_unlock(&vkms_obj->pages_lock);
return ret;
}
+
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct vkms_gem_object *obj;
+ int npages;
+
+ obj = __vkms_gem_create(dev, attach->dmabuf->size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+ DRM_DEBUG_PRIME("Importing %d pages\n", npages);
+
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!obj->pages) {
+ vkms_gem_free_object(&obj->gem);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+ return &obj->gem;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index aad8d8140259..74016a08d118 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -566,7 +566,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
@@ -682,12 +682,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
lret = dma_resv_wait_timeout_rcu
@@ -700,15 +700,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
return 0;
}
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ ret = ttm_bo_reserve(bo, true, nonblock, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_wait(bo, true, nonblock);
+ if (likely(ret == 0))
+ atomic_inc(&user_bo->vbo.cpu_writers);
+
+ ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b38bcb032c99..e962048f65d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -576,8 +576,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
else
dev_priv->map_mode = vmw_dma_map_populate;
- /* No TTM coherent page pool? FIXME: Ask TTM instead! */
- if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+ if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
@@ -827,9 +826,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman;
}
+ drm_vma_offset_manager_init(&dev_priv->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
+ &dev_priv->vma_manager,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -986,6 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
(void) ttm_bo_device_release(&dev_priv->bdev);
+ drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5eb73ded8e07..b18842f73081 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -102,6 +102,8 @@ struct vmw_fpriv {
* @base: The TTM buffer object
* @res_list: List of resources using this buffer object as a backing MOB
* @pin_count: pin depth
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources
@@ -110,6 +112,7 @@ struct vmw_buffer_object {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
+ atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
@@ -438,6 +441,7 @@ struct vmw_private {
struct vmw_fifo_state fifo;
struct drm_device *dev;
+ struct drm_vma_offset_manager vma_manager;
unsigned long vmw_chipset;
unsigned int io_start;
uint32_t vram_start;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5581a7826b4c..6dfe36fb817c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
- true);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 29d8794f0421..de0530b4dc1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -336,7 +336,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
void *cmd;
if (res->func->destroy == vmw_gb_surface_destroy) {
@@ -360,7 +359,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = vmw_res_to_srf(res);
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f611b2290a1b..7bff3628fc54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -521,6 +521,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
};
int ret;
+ if (atomic_read(&vbo->cpu_writers))
+ return -EBUSY;
+
if (vbo->pin_count > 0)
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 0e063743dd86..71ce4b318850 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL, true);
+ NULL);
}
/**
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 21ad1c359b61..ff506bc99414 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -270,11 +270,12 @@ static void display_update(struct drm_simple_display_pipe *pipe,
}
static enum drm_mode_status
-display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+display_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
{
struct xen_drm_front_drm_pipeline *pipeline =
- container_of(crtc, struct xen_drm_front_drm_pipeline,
- pipe.crtc);
+ container_of(pipe, struct xen_drm_front_drm_pipeline,
+ pipe);
if (mode->hdisplay != pipeline->width)
return MODE_ERROR;
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index cf987a317a55..6dab94adf25e 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -2,7 +2,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 742aa9ff21b8..2c8559ff3481 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x,
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
- dma_set_max_seg_size(&device->dev, SZ_4M);
+ dma_set_max_seg_size(&device->dev, UINT_MAX);
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 48c84c48299c..e8d3fda91d8a 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
*
* Must be called with the cdma lock held.
*/
-int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
- struct host1x_cdma *cdma,
- unsigned int needed)
+static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
+ struct host1x_cdma *cdma,
+ unsigned int needed)
{
while (true) {
struct push_buffer *pb = &cdma->push_buffer;
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 1436295aa450..4cd212bb570d 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
/**
* host1x_channel_request() - Allocate a channel
- * @device: Host1x unit this channel will be used to send commands to
+ * @client: Host1x client this channel will be used to send commands to
*
- * Allocates a new host1x channel for @device. May return NULL if CDMA
+ * Allocates a new host1x channel for @client. May return NULL if CDMA
* initialization fails.
*/
-struct host1x_channel *host1x_channel_request(struct device *dev)
+struct host1x_channel *host1x_channel_request(struct host1x_client *client)
{
- struct host1x *host = dev_get_drvdata(dev->parent);
+ struct host1x *host = dev_get_drvdata(client->dev->parent);
struct host1x_channel_list *chlist = &host->channel_list;
struct host1x_channel *channel;
int err;
@@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
kref_init(&channel->refcount);
mutex_init(&channel->submitlock);
- channel->dev = dev;
+ channel->client = client;
+ channel->dev = client->dev;
err = host1x_hw_channel_init(host, channel, channel->id);
if (err < 0)
@@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
fail:
clear_bit(channel->id, chlist->allocated_channels);
- dev_err(dev, "failed to initialize channel\n");
+ dev_err(client->dev, "failed to initialize channel\n");
return NULL;
}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 4fd694834f74..39044ff6c3aa 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -26,6 +26,7 @@ struct host1x_channel {
unsigned int id;
struct mutex submitlock;
void __iomem *regs;
+ struct host1x_client *client;
struct device *dev;
struct host1x_cdma cdma;
};
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 5a3f797240d4..a738ea55e407 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -18,10 +18,6 @@
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "bus.h"
#include "channel.h"
#include "debug.h"
@@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = {
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x02_info = {
@@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = {
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x04_info = {
@@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = {
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x05_info = {
@@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = {
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
@@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = {
.init = host1x06_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
@@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = {
.init = host1x07_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
@@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
+static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
+ int err;
+
+ /*
+ * If the host1x firewall is enabled, there's no need to enable IOMMU
+ * support. Similarly, if host1x is already attached to an IOMMU (via
+ * the DMA API), don't try to attach again.
+ */
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+ return domain;
+
+ host->group = iommu_group_get(host->dev);
+ if (host->group) {
+ struct iommu_domain_geometry *geometry;
+ dma_addr_t start, end;
+ unsigned long order;
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto put_group;
+
+ host->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!host->domain) {
+ err = -ENOMEM;
+ goto put_cache;
+ }
+
+ err = iommu_attach_group(host->domain, host->group);
+ if (err) {
+ if (err == -ENODEV)
+ err = 0;
+
+ goto free_domain;
+ }
+
+ geometry = &host->domain->geometry;
+ start = geometry->aperture_start & host->info->dma_mask;
+ end = geometry->aperture_end & host->info->dma_mask;
+
+ order = __ffs(host->domain->pgsize_bitmap);
+ init_iova_domain(&host->iova, 1UL << order, start >> order);
+ host->iova_end = end;
+
+ domain = host->domain;
+ }
+
+ return domain;
+
+free_domain:
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+put_cache:
+ iova_cache_put();
+put_group:
+ iommu_group_put(host->group);
+ host->group = NULL;
+
+ return ERR_PTR(err);
+}
+
+static int host1x_iommu_init(struct host1x *host)
+{
+ u64 mask = host->info->dma_mask;
+ struct iommu_domain *domain;
+ int err;
+
+ domain = host1x_iommu_attach(host);
+ if (IS_ERR(domain)) {
+ err = PTR_ERR(domain);
+ dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
+ return err;
+ }
+
+ /*
+ * If we're not behind an IOMMU make sure we don't get push buffers
+ * that are allocated outside of the range addressable by the GATHER
+ * opcode.
+ *
+ * Newer generations of Tegra (Tegra186 and later) support a wide
+ * variant of the GATHER opcode that allows addressing more bits.
+ */
+ if (!domain && !host->info->has_wide_gather)
+ mask = DMA_BIT_MASK(32);
+
+ err = dma_coerce_mask_and_coherent(host->dev, mask);
+ if (err < 0) {
+ dev_err(host->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void host1x_iommu_exit(struct host1x *host)
+{
+ if (host->domain) {
+ put_iova_domain(&host->iova);
+ iommu_detach_group(host->domain, host->group);
+
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+
+ iova_cache_put();
+
+ iommu_group_put(host->group);
+ host->group = NULL;
+ }
+}
+
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
@@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev)
return PTR_ERR(host->hv_regs);
}
- dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
+ host->dev->dma_parms = &host->dma_parms;
+ dma_set_max_seg_size(host->dev, UINT_MAX);
if (host->info->init) {
err = host->info->init(host);
@@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (host->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(host->dev);
- arm_iommu_detach_device(host->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
- goto skip_iommu;
-
- host->group = iommu_group_get(&pdev->dev);
- if (host->group) {
- struct iommu_domain_geometry *geometry;
- u64 mask = dma_get_mask(host->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- err = iova_cache_get();
- if (err < 0)
- goto put_group;
-
- host->domain = iommu_domain_alloc(&platform_bus_type);
- if (!host->domain) {
- err = -ENOMEM;
- goto put_cache;
- }
- err = iommu_attach_group(host->domain, host->group);
- if (err) {
- if (err == -ENODEV) {
- iommu_domain_free(host->domain);
- host->domain = NULL;
- iova_cache_put();
- iommu_group_put(host->group);
- host->group = NULL;
- goto skip_iommu;
- }
-
- goto fail_free_domain;
- }
-
- geometry = &host->domain->geometry;
- start = geometry->aperture_start & mask;
- end = geometry->aperture_end & mask;
-
- order = __ffs(host->domain->pgsize_bitmap);
- init_iova_domain(&host->iova, 1UL << order, start >> order);
- host->iova_end = end;
+ err = host1x_iommu_init(host);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
+ return err;
}
-skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
- goto fail_detach_device;
+ goto iommu_exit;
}
err = clk_prepare_enable(host->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock\n");
- goto fail_free_channels;
+ goto free_channels;
}
err = reset_control_deassert(host->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
- goto fail_unprepare_disable;
+ goto unprepare_disable;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- goto fail_reset_assert;
+ goto reset_assert;
}
err = host1x_intr_init(host, syncpt_irq);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
- goto fail_deinit_syncpt;
+ goto deinit_syncpt;
}
host1x_debug_init(host);
@@ -351,33 +432,22 @@ skip_iommu:
err = host1x_register(host);
if (err < 0)
- goto fail_deinit_intr;
+ goto deinit_intr;
return 0;
-fail_deinit_intr:
+deinit_intr:
host1x_intr_deinit(host);
-fail_deinit_syncpt:
+deinit_syncpt:
host1x_syncpt_deinit(host);
-fail_reset_assert:
+reset_assert:
reset_control_assert(host->rst);
-fail_unprepare_disable:
+unprepare_disable:
clk_disable_unprepare(host->clk);
-fail_free_channels:
+free_channels:
host1x_channel_list_free(&host->channel_list);
-fail_detach_device:
- if (host->group && host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- }
-fail_free_domain:
- if (host->domain)
- iommu_domain_free(host->domain);
-put_cache:
- if (host->group)
- iova_cache_put();
-put_group:
- iommu_group_put(host->group);
+iommu_exit:
+ host1x_iommu_exit(host);
return err;
}
@@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev)
struct host1x *host = platform_get_drvdata(pdev);
host1x_unregister(host);
+ host1x_debug_deinit(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
reset_control_assert(host->rst);
clk_disable_unprepare(host->clk);
-
- if (host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- iommu_domain_free(host->domain);
- iova_cache_put();
- iommu_group_put(host->group);
- }
+ host1x_iommu_exit(host);
return 0;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index ff56f5e23a02..f781a9b0f39d 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -97,6 +97,7 @@ struct host1x_info {
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
unsigned int sync_offset; /* offset of syncpoint registers */
u64 dma_mask; /* mask of addressable memory */
+ bool has_wide_gather; /* supports GATHER_W opcode */
bool has_hypervisor; /* has hypervisor registers */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
@@ -140,6 +141,8 @@ struct host1x {
struct list_head devices;
struct list_head list;
+
+ struct device_dma_parameters dma_parms;
};
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 26f3c741d085..9245add23b5d 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
/* Add nr_completed to trace */
trace_host1x_channel_submit_complete(dev_name(channel->dev),
waiter->count, waiter->thresh);
-
}
static void action_wakeup(struct host1x_waitlist *waiter)
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index eaa5c3352c13..25ca54de8fc5 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
+ struct host1x_client *client = job->client;
+ struct device *dev = client->dev;
unsigned int i;
int err;
@@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
+ dma_addr_t phys_addr, *phys;
struct sg_table *sgt;
- dma_addr_t phys_addr;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
@@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
+ if (client->group)
+ phys = &phys_addr;
+ else
+ phys = NULL;
+
+ sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ if (sgt) {
+ unsigned long mask = HOST1X_RELOC_READ |
+ HOST1X_RELOC_WRITE;
+ enum dma_data_direction dir;
+
+ switch (reloc->flags & mask) {
+ case HOST1X_RELOC_READ:
+ dir = DMA_TO_DEVICE;
+ break;
+
+ case HOST1X_RELOC_WRITE:
+ dir = DMA_FROM_DEVICE;
+ break;
+
+ case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
+ dir = DMA_BIDIRECTIONAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto unpin;
+ }
+
+ err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = dev;
+ job->unpins[job->num_unpins].dir = dir;
+ phys_addr = sg_dma_address(sgt->sgl);
+ }
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
@@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(g->bo, &sgt);
+ sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
@@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- job->addr_phys[job->num_unpins] =
- iova_dma_addr(&host->iova, alloc);
job->unpins[job->num_unpins].size = gather_size;
+ phys_addr = iova_dma_addr(&host->iova, alloc);
} else {
- job->addr_phys[job->num_unpins] = phys_addr;
+ err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = host->dev;
+ phys_addr = sg_dma_address(sgt->sgl);
}
- job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
+ job->addr_phys[job->num_unpins] = phys_addr;
+ job->gather_addr_phys[i] = phys_addr;
+ job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
@@ -436,7 +494,8 @@ out:
return err;
}
-static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+static inline int copy_gathers(struct device *host, struct host1x_job *job,
+ struct device *dev)
{
struct host1x_firewall fw;
size_t size = 0;
@@ -459,12 +518,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
* Try a non-blocking allocation from a higher priority pools first,
* as awaiting for the allocation here is a major performance hit.
*/
- job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
GFP_NOWAIT);
/* the higher priority allocation failed, try the generic-blocking */
if (!job->gather_copy_mapped)
- job->gather_copy_mapped = dma_alloc_wc(dev, size,
+ job->gather_copy_mapped = dma_alloc_wc(host, size,
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped)
@@ -512,7 +571,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
goto out;
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
- err = copy_gathers(job, dev);
+ err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
@@ -557,6 +616,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+ struct device *dev = unpin->dev ?: host->dev;
+ struct sg_table *sgt = unpin->sgt;
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
unpin->size && host->domain) {
@@ -566,14 +627,18 @@ void host1x_job_unpin(struct host1x_job *job)
iova_pfn(&host->iova, job->addr_phys[i]));
}
- host1x_bo_unpin(unpin->bo, unpin->sgt);
+ if (unpin->dev && sgt)
+ dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
+ unpin->dir);
+
+ host1x_bo_unpin(dev, unpin->bo, sgt);
host1x_bo_put(unpin->bo);
}
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_wc(job->channel->dev, job->gather_copy_size,
+ dma_free_wc(host->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 62b8805e6b35..94bc2e4ae241 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -8,6 +8,8 @@
#ifndef __HOST1X_JOB_H
#define __HOST1X_JOB_H
+#include <linux/dma-direction.h>
+
struct host1x_job_gather {
unsigned int words;
dma_addr_t base;
@@ -19,7 +21,9 @@ struct host1x_job_gather {
struct host1x_job_unpin_data {
struct host1x_bo *bo;
struct sg_table *sgt;
+ struct device *dev;
size_t size;
+ enum dma_data_direction dir;
};
/*
diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c
index fc8f57f97ce6..e3799a53a193 100644
--- a/drivers/greybus/connection.c
+++ b/drivers/greybus/connection.c
@@ -361,9 +361,6 @@ static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
if (connection->mode_switch)
peer_space += sizeof(struct gb_operation_msg_hdr);
- if (!hd->driver->cport_quiesce)
- return 0;
-
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
peer_space,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index fcc52797c169..6098e0cbdb4b 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -202,7 +202,7 @@ int hv_synic_init(unsigned int cpu)
{
hv_synic_enable_regs(cpu);
- hv_stimer_init(cpu);
+ hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
return 0;
}
@@ -277,7 +277,7 @@ int hv_synic_cleanup(unsigned int cpu)
if (channel_found && vmbus_connection.conn_state == CONNECTED)
return -EBUSY;
- hv_stimer_cleanup(cpu);
+ hv_stimer_legacy_cleanup(cpu);
hv_synic_disable_regs(cpu);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 53a60c81e220..8c06b3361c27 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1340,10 +1340,6 @@ static int vmbus_bus_init(void)
if (ret)
goto err_alloc;
- ret = hv_stimer_alloc(VMBUS_MESSAGE_SINT);
- if (ret < 0)
- goto err_alloc;
-
/*
* Initialize the per-cpu interrupt state and stimer state.
* Then connect to the host.
@@ -1400,9 +1396,8 @@ static int vmbus_bus_init(void)
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
err_cpuhp:
- hv_stimer_free();
-err_alloc:
hv_synic_free();
+err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
@@ -2315,20 +2310,23 @@ static void hv_crash_handler(struct pt_regs *regs)
static int hv_synic_suspend(void)
{
/*
- * When we reach here, all the non-boot CPUs have been offlined, and
- * the stimers on them have been unbound in hv_synic_cleanup() ->
+ * When we reach here, all the non-boot CPUs have been offlined.
+ * If we're in a legacy configuration where stimer Direct Mode is
+ * not enabled, the stimers on the non-boot CPUs have been unbound
+ * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
* hv_stimer_cleanup() -> clockevents_unbind_device().
*
- * hv_synic_suspend() only runs on CPU0 with interrupts disabled. Here
- * we do not unbind the stimer on CPU0 because: 1) it's unnecessary
- * because the interrupts remain disabled between syscore_suspend()
- * and syscore_resume(): see create_image() and resume_target_kernel();
+ * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
+ * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
+ * 1) it's unnecessary as interrupts remain disabled between
+ * syscore_suspend() and syscore_resume(): see create_image() and
+ * resume_target_kernel()
* 2) the stimer on CPU0 is automatically disabled later by
* syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
- * -> clockevents_shutdown() -> ... -> hv_ce_shutdown(); 3) a warning
- * would be triggered if we call clockevents_unbind_device(), which
- * may sleep, in an interrupts-disabled context. So, we intentionally
- * don't call hv_stimer_cleanup(0) here.
+ * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
+ * 3) a warning would be triggered if we call
+ * clockevents_unbind_device(), which may sleep, in an
+ * interrupts-disabled context.
*/
hv_synic_disable_regs(0);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 13a6b4afb4b3..23dfe848979a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -40,7 +40,8 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
- depends on AB8500_GPADC && AB8500_BM
+ depends on AB8500_GPADC && AB8500_BM && (IIO = y)
+ default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -309,7 +310,6 @@ config SENSORS_APPLESMC
depends on INPUT && X86
select NEW_LEDS
select LEDS_CLASS
- select INPUT_POLLDEV
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -727,6 +727,33 @@ config SENSORS_LTC2945
This driver can also be built as a module. If so, the module will
be called ltc2945.
+config SENSORS_LTC2947
+ tristate
+
+config SENSORS_LTC2947_I2C
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over I2C"
+ depends on I2C
+ select REGMAP_I2C
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ I2C High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-i2c.
+
+config SENSORS_LTC2947_SPI
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over SPI"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ SPI High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-spi.
+
config SENSORS_LTC2990
tristate "Linear Technology LTC2990"
depends on I2C
@@ -1709,6 +1736,16 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TMP513
+ tristate "Texas Instruments TMP513 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP512,
+ and TMP513 temperature and power supply sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp513.
+
config SENSORS_VEXPRESS
tristate "Versatile Express"
depends on VEXPRESS_CONFIG
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 40c036ea45e6..6db5db9cdc29 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -106,6 +106,9 @@ obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2947) += ltc2947-core.o
+obj-$(CONFIG_SENSORS_LTC2947_I2C) += ltc2947-i2c.o
+obj-$(CONFIG_SENSORS_LTC2947_SPI) += ltc2947-spi.o
obj-$(CONFIG_SENSORS_LTC2990) += ltc2990.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
@@ -166,6 +169,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TMP513) += tmp513.o
obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
index 207f77f85a40..53f3379d799d 100644
--- a/drivers/hwmon/ab8500.c
+++ b/drivers/hwmon/ab8500.c
@@ -17,20 +17,24 @@
#include <linux/hwmon-sysfs.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power/ab8500.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/iio/consumer.h>
#include "abx500.h"
#define DEFAULT_POWER_OFF_DELAY (HZ * 10)
#define THERMAL_VCC 1800
#define PULL_UP_RESISTOR 47000
-/* Number of monitored sensors should not greater than NUM_SENSORS */
-#define NUM_MONITORED_SENSORS 4
+
+#define AB8500_SENSOR_AUX1 0
+#define AB8500_SENSOR_AUX2 1
+#define AB8500_SENSOR_BTEMP_BALL 2
+#define AB8500_SENSOR_BAT_CTRL 3
+#define NUM_MONITORED_SENSORS 4
struct ab8500_gpadc_cfg {
const struct abx500_res_to_temp *temp_tbl;
@@ -40,7 +44,8 @@ struct ab8500_gpadc_cfg {
};
struct ab8500_temp {
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *aux1;
+ struct iio_channel *aux2;
struct ab8500_btemp *btemp;
struct delayed_work power_off_work;
struct ab8500_gpadc_cfg cfg;
@@ -82,15 +87,21 @@ static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor, int *temp)
int voltage, ret;
struct ab8500_temp *ab8500_data = data->plat_data;
- if (sensor == BAT_CTRL) {
- *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
- } else if (sensor == BTEMP_BALL) {
+ if (sensor == AB8500_SENSOR_BTEMP_BALL) {
*temp = ab8500_btemp_get_temp(ab8500_data->btemp);
- } else {
- voltage = ab8500_gpadc_convert(ab8500_data->gpadc, sensor);
- if (voltage < 0)
- return voltage;
-
+ } else if (sensor == AB8500_SENSOR_BAT_CTRL) {
+ *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
+ } else if (sensor == AB8500_SENSOR_AUX1) {
+ ret = iio_read_channel_processed(ab8500_data->aux1, &voltage);
+ if (ret < 0)
+ return ret;
+ ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
+ if (ret < 0)
+ return ret;
+ } else if (sensor == AB8500_SENSOR_AUX2) {
+ ret = iio_read_channel_processed(ab8500_data->aux2, &voltage);
+ if (ret < 0)
+ return ret;
ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
if (ret < 0)
return ret;
@@ -164,10 +175,6 @@ int abx500_hwmon_init(struct abx500_temp *data)
if (!ab8500_data)
return -ENOMEM;
- ab8500_data->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- if (IS_ERR(ab8500_data->gpadc))
- return PTR_ERR(ab8500_data->gpadc);
-
ab8500_data->btemp = ab8500_btemp_get();
if (IS_ERR(ab8500_data->btemp))
return PTR_ERR(ab8500_data->btemp);
@@ -181,15 +188,25 @@ int abx500_hwmon_init(struct abx500_temp *data)
ab8500_data->cfg.tbl_sz = ab8500_temp_tbl_a_size;
data->plat_data = ab8500_data;
+ ab8500_data->aux1 = devm_iio_channel_get(&data->pdev->dev, "aux1");
+ if (IS_ERR(ab8500_data->aux1)) {
+ if (PTR_ERR(ab8500_data->aux1) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&data->pdev->dev, "failed to get AUX1 ADC channel\n");
+ return PTR_ERR(ab8500_data->aux1);
+ }
+ ab8500_data->aux2 = devm_iio_channel_get(&data->pdev->dev, "aux2");
+ if (IS_ERR(ab8500_data->aux2)) {
+ if (PTR_ERR(ab8500_data->aux2) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&data->pdev->dev, "failed to get AUX2 ADC channel\n");
+ return PTR_ERR(ab8500_data->aux2);
+ }
- /*
- * ADC_AUX1 and ADC_AUX2, connected to external NTC
- * BTEMP_BALL and BAT_CTRL, fixed usage
- */
- data->gpadc_addr[0] = ADC_AUX1;
- data->gpadc_addr[1] = ADC_AUX2;
- data->gpadc_addr[2] = BTEMP_BALL;
- data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[0] = AB8500_SENSOR_AUX1;
+ data->gpadc_addr[1] = AB8500_SENSOR_AUX2;
+ data->gpadc_addr[2] = AB8500_SENSOR_BTEMP_BALL;
+ data->gpadc_addr[3] = AB8500_SENSOR_BAT_CTRL;
data->monitored_sensors = NUM_MONITORED_SENSORS;
data->ops.read_sensor = ab8500_read_sensor;
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index a5cf6b2a6e49..681f0623868f 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1264,7 +1264,7 @@ static int abituguru_probe(struct platform_device *pdev)
* El weirdo probe order, to keep the sysfs order identical to the
* BIOS and window-appliction listing order.
*/
- const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
+ static const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
0x00, 0x01, 0x03, 0x04, 0x0A, 0x08, 0x0E, 0x02,
0x09, 0x06, 0x05, 0x0B, 0x0F, 0x0D, 0x07, 0x0C };
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 183ff3d25129..ec93b8d673f5 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -140,7 +140,7 @@ static s16 rest_y;
static u8 backlight_state[2];
static struct device *hwmon_dev;
-static struct input_polled_dev *applesmc_idev;
+static struct input_dev *applesmc_idev;
/*
* Last index written to key_at_index sysfs file, and value to use for all other
@@ -681,9 +681,8 @@ static void applesmc_calibrate(void)
rest_x = -rest_x;
}
-static void applesmc_idev_poll(struct input_polled_dev *dev)
+static void applesmc_idev_poll(struct input_dev *idev)
{
- struct input_dev *idev = dev->input;
s16 x, y;
if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x))
@@ -1134,7 +1133,6 @@ out:
/* Create accelerometer resources */
static int applesmc_create_accelerometer(void)
{
- struct input_dev *idev;
int ret;
if (!smcreg.has_accelerometer)
@@ -1144,37 +1142,38 @@ static int applesmc_create_accelerometer(void)
if (ret)
goto out;
- applesmc_idev = input_allocate_polled_device();
+ applesmc_idev = input_allocate_device();
if (!applesmc_idev) {
ret = -ENOMEM;
goto out_sysfs;
}
- applesmc_idev->poll = applesmc_idev_poll;
- applesmc_idev->poll_interval = APPLESMC_POLL_INTERVAL;
-
/* initial calibrate for the input device */
applesmc_calibrate();
/* initialize the input device */
- idev = applesmc_idev->input;
- idev->name = "applesmc";
- idev->id.bustype = BUS_HOST;
- idev->dev.parent = &pdev->dev;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X,
+ applesmc_idev->name = "applesmc";
+ applesmc_idev->id.bustype = BUS_HOST;
+ applesmc_idev->dev.parent = &pdev->dev;
+ input_set_abs_params(applesmc_idev, ABS_X,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- input_set_abs_params(idev, ABS_Y,
+ input_set_abs_params(applesmc_idev, ABS_Y,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- ret = input_register_polled_device(applesmc_idev);
+ ret = input_setup_polling(applesmc_idev, applesmc_idev_poll);
+ if (ret)
+ goto out_idev;
+
+ input_set_poll_interval(applesmc_idev, APPLESMC_POLL_INTERVAL);
+
+ ret = input_register_device(applesmc_idev);
if (ret)
goto out_idev;
return 0;
out_idev:
- input_free_polled_device(applesmc_idev);
+ input_free_device(applesmc_idev);
out_sysfs:
applesmc_destroy_nodes(accelerometer_group);
@@ -1189,8 +1188,7 @@ static void applesmc_release_accelerometer(void)
{
if (!smcreg.has_accelerometer)
return;
- input_unregister_polled_device(applesmc_idev);
- input_free_polled_device(applesmc_idev);
+ input_unregister_device(applesmc_idev);
applesmc_destroy_nodes(accelerometer_group);
}
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 40c489be62ea..33fb54845bf6 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -891,17 +891,12 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
struct device_node *np, *child;
struct aspeed_pwm_tacho_data *priv;
void __iomem *regs;
- struct resource *res;
struct device *hwmon;
struct clk *clk;
int ret;
np = dev->of_node;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 4212d022d253..17583bf8c2dc 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -68,6 +68,8 @@ static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
static bool disallow_fan_support;
+static unsigned int manual_fan;
+static unsigned int auto_fan;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -301,6 +303,20 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
}
/*
+ * Enable or disable automatic BIOS fan control support
+ */
+static int i8k_enable_fan_auto_mode(bool enable)
+{
+ struct smm_regs regs = { };
+
+ if (disallow_fan_support)
+ return -EINVAL;
+
+ regs.eax = enable ? auto_fan : manual_fan;
+ return i8k_smm(&regs);
+}
+
+/*
* Set the fan speed (off, low, high). Returns the new fan status.
*/
static int i8k_set_fan(int fan, int speed)
@@ -726,6 +742,35 @@ static ssize_t i8k_hwmon_pwm_store(struct device *dev,
return err < 0 ? -EIO : count;
}
+static ssize_t i8k_hwmon_pwm_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ bool enable;
+ unsigned long val;
+
+ if (!auto_fan)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val == 1)
+ enable = false;
+ else if (val == 2)
+ enable = true;
+ else
+ return -EINVAL;
+
+ mutex_lock(&i8k_mutex);
+ err = i8k_enable_fan_auto_mode(enable);
+ mutex_unlock(&i8k_mutex);
+
+ return err ? err : count;
+}
+
static SENSOR_DEVICE_ATTR_RO(temp1_input, i8k_hwmon_temp, 0);
static SENSOR_DEVICE_ATTR_RO(temp1_label, i8k_hwmon_temp_label, 0);
static SENSOR_DEVICE_ATTR_RO(temp2_input, i8k_hwmon_temp, 1);
@@ -749,6 +794,7 @@ static SENSOR_DEVICE_ATTR_RO(temp10_label, i8k_hwmon_temp_label, 9);
static SENSOR_DEVICE_ATTR_RO(fan1_input, i8k_hwmon_fan, 0);
static SENSOR_DEVICE_ATTR_RO(fan1_label, i8k_hwmon_fan_label, 0);
static SENSOR_DEVICE_ATTR_RW(pwm1, i8k_hwmon_pwm, 0);
+static SENSOR_DEVICE_ATTR_WO(pwm1_enable, i8k_hwmon_pwm_enable, 0);
static SENSOR_DEVICE_ATTR_RO(fan2_input, i8k_hwmon_fan, 1);
static SENSOR_DEVICE_ATTR_RO(fan2_label, i8k_hwmon_fan_label, 1);
static SENSOR_DEVICE_ATTR_RW(pwm2, i8k_hwmon_pwm, 1);
@@ -780,12 +826,13 @@ static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr, /* 20 */
&sensor_dev_attr_fan1_label.dev_attr.attr, /* 21 */
&sensor_dev_attr_pwm1.dev_attr.attr, /* 22 */
- &sensor_dev_attr_fan2_input.dev_attr.attr, /* 23 */
- &sensor_dev_attr_fan2_label.dev_attr.attr, /* 24 */
- &sensor_dev_attr_pwm2.dev_attr.attr, /* 25 */
- &sensor_dev_attr_fan3_input.dev_attr.attr, /* 26 */
- &sensor_dev_attr_fan3_label.dev_attr.attr, /* 27 */
- &sensor_dev_attr_pwm3.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr, /* 23 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 24 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 25 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 26 */
+ &sensor_dev_attr_fan3_input.dev_attr.attr, /* 27 */
+ &sensor_dev_attr_fan3_label.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm3.dev_attr.attr, /* 29 */
NULL
};
@@ -828,16 +875,19 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP10))
return 0;
- if (index >= 20 && index <= 22 &&
+ if (index >= 20 && index <= 23 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
return 0;
- if (index >= 23 && index <= 25 &&
+ if (index >= 24 && index <= 26 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
- if (index >= 26 && index <= 28 &&
+ if (index >= 27 && index <= 29 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
return 0;
+ if (index == 23 && !auto_fan)
+ return 0;
+
return attr->mode;
}
@@ -1135,12 +1185,48 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
{ }
};
+struct i8k_fan_control_data {
+ unsigned int manual_fan;
+ unsigned int auto_fan;
+};
+
+enum i8k_fan_controls {
+ I8K_FAN_34A3_35A3,
+};
+
+static const struct i8k_fan_control_data i8k_fan_control_data[] = {
+ [I8K_FAN_34A3_35A3] = {
+ .manual_fan = 0x34a3,
+ .auto_fan = 0x35a3,
+ },
+};
+
+static struct dmi_system_id i8k_whitelist_fan_control[] __initdata = {
+ {
+ .ident = "Dell Precision 5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Precision 5530"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ {
+ .ident = "Dell Latitude E6440",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
{
- const struct dmi_system_id *id;
+ const struct dmi_system_id *id, *fan_control;
int fan, ret;
/*
@@ -1200,6 +1286,15 @@ static int __init i8k_probe(void)
i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max);
+ fan_control = dmi_first_match(i8k_whitelist_fan_control);
+ if (fan_control && fan_control->driver_data) {
+ const struct i8k_fan_control_data *data = fan_control->driver_data;
+
+ manual_fan = data->manual_fan;
+ auto_fan = data->auto_fan;
+ pr_info("enabling support for setting automatic/manual fan control\n");
+ }
+
if (!fan_mult) {
/*
* Autodetect fan multiplier based on nominal rpm
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 8a51dcf055ea..f335d0cb0c77 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -31,6 +31,8 @@
#define INA3221_WARN2 0x0a
#define INA3221_CRIT3 0x0b
#define INA3221_WARN3 0x0c
+#define INA3221_SHUNT_SUM 0x0d
+#define INA3221_CRIT_SUM 0x0e
#define INA3221_MASK_ENABLE 0x0f
#define INA3221_CONFIG_MODE_MASK GENMASK(2, 0)
@@ -50,6 +52,8 @@
#define INA3221_CONFIG_CHs_EN_MASK GENMASK(14, 12)
#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
+#define INA3221_MASK_ENABLE_SCC_MASK GENMASK(14, 12)
+
#define INA3221_CONFIG_DEFAULT 0x7127
#define INA3221_RSHUNT_DEFAULT 10000
@@ -60,9 +64,11 @@ enum ina3221_fields {
/* Status Flags */
F_CVRF,
- /* Alert Flags */
+ /* Warning Flags */
F_WF3, F_WF2, F_WF1,
- F_CF3, F_CF2, F_CF1,
+
+ /* Alert Flags: SF is the summation-alert flag */
+ F_SF, F_CF3, F_CF2, F_CF1,
/* sentinel */
F_MAX_FIELDS
@@ -75,6 +81,7 @@ static const struct reg_field ina3221_reg_fields[] = {
[F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3),
[F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4),
[F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5),
+ [F_SF] = REG_FIELD(INA3221_MASK_ENABLE, 6, 6),
[F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7),
[F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8),
[F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9),
@@ -107,6 +114,7 @@ struct ina3221_input {
* @inputs: Array of channel input source specific structures
* @lock: mutex lock to serialize sysfs attribute accesses
* @reg_config: Register value of INA3221_CONFIG
+ * @summation_shunt_resistor: equivalent shunt resistor value for summation
* @single_shot: running in single-shot operating mode
*/
struct ina3221_data {
@@ -116,16 +124,51 @@ struct ina3221_data {
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
struct mutex lock;
u32 reg_config;
+ int summation_shunt_resistor;
bool single_shot;
};
static inline bool ina3221_is_enabled(struct ina3221_data *ina, int channel)
{
+ /* Summation channel checks shunt resistor values */
+ if (channel > INA3221_CHANNEL3)
+ return ina->summation_shunt_resistor != 0;
+
return pm_runtime_active(ina->pm_dev) &&
(ina->reg_config & INA3221_CONFIG_CHx_EN(channel));
}
+/**
+ * Helper function to return the resistor value for current summation.
+ *
+ * There is a condition to calculate current summation -- all the shunt
+ * resistor values should be the same, so as to simply fit the formula:
+ * current summation = shunt voltage summation / shunt resistor
+ *
+ * Returns the equivalent shunt resistor value on success or 0 on failure
+ */
+static inline int ina3221_summation_shunt_resistor(struct ina3221_data *ina)
+{
+ struct ina3221_input *input = ina->inputs;
+ int i, shunt_resistor = 0;
+
+ for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
+ if (input[i].disconnected || !input[i].shunt_resistor)
+ continue;
+ if (!shunt_resistor) {
+ /* Found the reference shunt resistor value */
+ shunt_resistor = input[i].shunt_resistor;
+ } else {
+ /* No summation if resistor values are different */
+ if (shunt_resistor != input[i].shunt_resistor)
+ return 0;
+ }
+ }
+
+ return shunt_resistor;
+}
+
/* Lookup table for Bus and Shunt conversion times in usec */
static const u16 ina3221_conv_time[] = {
140, 204, 332, 588, 1100, 2116, 4156, 8244,
@@ -183,7 +226,14 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
if (ret)
return ret;
- *val = sign_extend32(regval >> 3, 12);
+ /*
+ * Shunt Voltage Sum register has 14-bit value with 1-bit shift
+ * Other Shunt Voltage registers have 12 bits with 3-bit shift
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ *val = sign_extend32(regval >> 1, 14);
+ else
+ *val = sign_extend32(regval >> 3, 12);
return 0;
}
@@ -195,6 +245,7 @@ static const u8 ina3221_in_reg[] = {
INA3221_SHUNT1,
INA3221_SHUNT2,
INA3221_SHUNT3,
+ INA3221_SHUNT_SUM,
};
static int ina3221_read_chip(struct device *dev, u32 attr, long *val)
@@ -224,8 +275,12 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
u8 reg = ina3221_in_reg[channel];
int regval, ret;
- /* Translate shunt channel index to sensor channel index */
- channel %= INA3221_NUM_CHANNELS;
+ /*
+ * Translate shunt channel index to sensor channel index except
+ * the 7th channel (6 since being 0-aligned) is for summation.
+ */
+ if (channel != 6)
+ channel %= INA3221_NUM_CHANNELS;
switch (attr) {
case hwmon_in_input:
@@ -259,22 +314,29 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
}
}
-static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS] = {
- [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2, INA3221_SHUNT3 },
- [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3 },
- [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2, INA3221_CRIT3 },
- [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3 },
- [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3 },
+static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS + 1] = {
+ [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2,
+ INA3221_SHUNT3, INA3221_SHUNT_SUM },
+ [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3, 0 },
+ [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2,
+ INA3221_CRIT3, INA3221_CRIT_SUM },
+ [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3, 0 },
+ [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3, F_SF },
};
static int ina3221_read_curr(struct device *dev, u32 attr,
int channel, long *val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, voltage_nv, ret;
+ int resistance_uo, voltage_nv;
+ int regval, ret;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
switch (attr) {
case hwmon_curr_input:
@@ -293,6 +355,9 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
/* fall through */
case hwmon_curr_crit:
case hwmon_curr_max:
+ if (!resistance_uo)
+ return -ENODATA;
+
ret = ina3221_read_value(ina, reg, &regval);
if (ret)
return ret;
@@ -366,10 +431,18 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
int channel, long val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, current_ma, voltage_uv;
+ int resistance_uo, current_ma, voltage_uv;
+ int regval;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
+
+ if (!resistance_uo)
+ return -EOPNOTSUPP;
/* clamp current */
current_ma = clamp_val(val,
@@ -381,8 +454,21 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
/* clamp voltage */
voltage_uv = clamp_val(voltage_uv, -163800, 163800);
- /* 1 / 40uV(scale) << 3(register shift) = 5 */
- regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+ /*
+ * Formula to convert voltage_uv to register value:
+ * regval = (voltage_uv / scale) << shift
+ * Note:
+ * The scale is 40uV for all shunt voltage registers
+ * Shunt Voltage Sum register left-shifts 1 bit
+ * All other Shunt Voltage registers shift 3 bits
+ * Results:
+ * SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV
+ * SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe;
+ else
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
return regmap_write(ina->regmap, reg, regval);
}
@@ -499,7 +585,10 @@ static int ina3221_read_string(struct device *dev, enum hwmon_sensor_types type,
struct ina3221_data *ina = dev_get_drvdata(dev);
int index = channel - 1;
- *str = ina->inputs[index].label;
+ if (channel == 7)
+ *str = "sum of shunt voltages";
+ else
+ *str = ina->inputs[index].label;
return 0;
}
@@ -529,6 +618,8 @@ static umode_t ina3221_is_visible(const void *drvdata,
case hwmon_in_label:
if (channel - 1 <= INA3221_CHANNEL3)
input = &ina->inputs[channel - 1];
+ else if (channel == 7)
+ return 0444;
/* Hide label node if label is not provided */
return (input && input->label) ? 0444 : 0;
case hwmon_in_input:
@@ -573,11 +664,16 @@ static const struct hwmon_channel_info *ina3221_info[] = {
/* 4-6: shunt voltage Channels */
HWMON_I_INPUT,
HWMON_I_INPUT,
- HWMON_I_INPUT),
+ HWMON_I_INPUT,
+ /* 7: summation of shunt voltage channels */
+ HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(curr,
+ /* 1-3: current channels*/
+ INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG),
+ /* 4: summation of current channels */
+ HWMON_C_INPUT | HWMON_C_CRIT | HWMON_C_CRIT_ALARM),
NULL
};
@@ -624,6 +720,9 @@ static ssize_t ina3221_shunt_store(struct device *dev,
input->shunt_resistor = val;
+ /* Update summation_shunt_resistor for summation channel */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
return count;
}
@@ -642,6 +741,7 @@ ATTRIBUTE_GROUPS(ina3221);
static const struct regmap_range ina3221_yes_ranges[] = {
regmap_reg_range(INA3221_CONFIG, INA3221_BUS3),
+ regmap_reg_range(INA3221_SHUNT_SUM, INA3221_SHUNT_SUM),
regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
};
@@ -772,6 +872,9 @@ static int ina3221_probe(struct i2c_client *client,
ina->reg_config &= ~INA3221_CONFIG_CHx_EN(i);
}
+ /* Initialize summation_shunt_resistor for summation channel control */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
ina->pm_dev = dev;
mutex_init(&ina->lock);
dev_set_drvdata(dev, ina);
@@ -875,6 +978,22 @@ static int __maybe_unused ina3221_resume(struct device *dev)
if (ret)
return ret;
+ /* Initialize summation channel control */
+ if (ina->summation_shunt_resistor) {
+ /*
+ * Take all three channels into summation by default
+ * Shunt measurements of disconnected channels should
+ * be 0, so it does not matter for summation.
+ */
+ ret = regmap_update_bits(ina->regmap, INA3221_MASK_ENABLE,
+ INA3221_MASK_ENABLE_SCC_MASK,
+ INA3221_MASK_ENABLE_SCC_MASK);
+ if (ret) {
+ dev_err(dev, "Unable to control summation channel\n");
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
new file mode 100644
index 000000000000..bb3f7749a0b0
--- /dev/null
+++ b/drivers/hwmon/ltc2947-core.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+/* register's */
+#define LTC2947_REG_PAGE_CTRL 0xFF
+#define LTC2947_REG_CTRL 0xF0
+#define LTC2947_REG_TBCTL 0xE9
+#define LTC2947_CONT_MODE_MASK BIT(3)
+#define LTC2947_CONT_MODE(x) FIELD_PREP(LTC2947_CONT_MODE_MASK, x)
+#define LTC2947_PRE_MASK GENMASK(2, 0)
+#define LTC2947_PRE(x) FIELD_PREP(LTC2947_PRE_MASK, x)
+#define LTC2947_DIV_MASK GENMASK(7, 3)
+#define LTC2947_DIV(x) FIELD_PREP(LTC2947_DIV_MASK, x)
+#define LTC2947_SHUTDOWN_MASK BIT(0)
+#define LTC2947_REG_ACCUM_POL 0xE1
+#define LTC2947_ACCUM_POL_1_MASK GENMASK(1, 0)
+#define LTC2947_ACCUM_POL_1(x) FIELD_PREP(LTC2947_ACCUM_POL_1_MASK, x)
+#define LTC2947_ACCUM_POL_2_MASK GENMASK(3, 2)
+#define LTC2947_ACCUM_POL_2(x) FIELD_PREP(LTC2947_ACCUM_POL_2_MASK, x)
+#define LTC2947_REG_ACCUM_DEADBAND 0xE4
+#define LTC2947_REG_GPIOSTATCTL 0x67
+#define LTC2947_GPIO_EN_MASK BIT(0)
+#define LTC2947_GPIO_EN(x) FIELD_PREP(LTC2947_GPIO_EN_MASK, x)
+#define LTC2947_GPIO_FAN_EN_MASK BIT(6)
+#define LTC2947_GPIO_FAN_EN(x) FIELD_PREP(LTC2947_GPIO_FAN_EN_MASK, x)
+#define LTC2947_GPIO_FAN_POL_MASK BIT(7)
+#define LTC2947_GPIO_FAN_POL(x) FIELD_PREP(LTC2947_GPIO_FAN_POL_MASK, x)
+#define LTC2947_REG_GPIO_ACCUM 0xE3
+/* 200Khz */
+#define LTC2947_CLK_MIN 200000
+/* 25Mhz */
+#define LTC2947_CLK_MAX 25000000
+#define LTC2947_PAGE0 0
+#define LTC2947_PAGE1 1
+/* Voltage registers */
+#define LTC2947_REG_VOLTAGE 0xA0
+#define LTC2947_REG_VOLTAGE_MAX 0x50
+#define LTC2947_REG_VOLTAGE_MIN 0x52
+#define LTC2947_REG_VOLTAGE_THRE_H 0x90
+#define LTC2947_REG_VOLTAGE_THRE_L 0x92
+#define LTC2947_REG_DVCC 0xA4
+#define LTC2947_REG_DVCC_MAX 0x58
+#define LTC2947_REG_DVCC_MIN 0x5A
+#define LTC2947_REG_DVCC_THRE_H 0x98
+#define LTC2947_REG_DVCC_THRE_L 0x9A
+#define LTC2947_VOLTAGE_GEN_CHAN 0
+#define LTC2947_VOLTAGE_DVCC_CHAN 1
+/* in mV */
+#define VOLTAGE_MAX 15500
+#define VOLTAGE_MIN -300
+#define VDVCC_MAX 15000
+#define VDVCC_MIN 4750
+/* Current registers */
+#define LTC2947_REG_CURRENT 0x90
+#define LTC2947_REG_CURRENT_MAX 0x40
+#define LTC2947_REG_CURRENT_MIN 0x42
+#define LTC2947_REG_CURRENT_THRE_H 0x80
+#define LTC2947_REG_CURRENT_THRE_L 0x82
+/* in mA */
+#define CURRENT_MAX 30000
+#define CURRENT_MIN -30000
+/* Power registers */
+#define LTC2947_REG_POWER 0x93
+#define LTC2947_REG_POWER_MAX 0x44
+#define LTC2947_REG_POWER_MIN 0x46
+#define LTC2947_REG_POWER_THRE_H 0x84
+#define LTC2947_REG_POWER_THRE_L 0x86
+/* in uW */
+#define POWER_MAX 450000000
+#define POWER_MIN -450000000
+/* Temperature registers */
+#define LTC2947_REG_TEMP 0xA2
+#define LTC2947_REG_TEMP_MAX 0x54
+#define LTC2947_REG_TEMP_MIN 0x56
+#define LTC2947_REG_TEMP_THRE_H 0x94
+#define LTC2947_REG_TEMP_THRE_L 0x96
+#define LTC2947_REG_TEMP_FAN_THRE_H 0x9C
+#define LTC2947_REG_TEMP_FAN_THRE_L 0x9E
+#define LTC2947_TEMP_FAN_CHAN 1
+/* in millidegress Celsius */
+#define TEMP_MAX 85000
+#define TEMP_MIN -40000
+/* Energy registers */
+#define LTC2947_REG_ENERGY1 0x06
+#define LTC2947_REG_ENERGY2 0x16
+/* Status/Alarm/Overflow registers */
+#define LTC2947_REG_STATUS 0x80
+#define LTC2947_REG_STATVT 0x81
+#define LTC2947_REG_STATIP 0x82
+#define LTC2947_REG_STATVDVCC 0x87
+
+#define LTC2947_ALERTS_SIZE (LTC2947_REG_STATVDVCC - LTC2947_REG_STATUS)
+#define LTC2947_MAX_VOLTAGE_MASK BIT(0)
+#define LTC2947_MIN_VOLTAGE_MASK BIT(1)
+#define LTC2947_MAX_CURRENT_MASK BIT(0)
+#define LTC2947_MIN_CURRENT_MASK BIT(1)
+#define LTC2947_MAX_POWER_MASK BIT(2)
+#define LTC2947_MIN_POWER_MASK BIT(3)
+#define LTC2947_MAX_TEMP_MASK BIT(2)
+#define LTC2947_MIN_TEMP_MASK BIT(3)
+#define LTC2947_MAX_TEMP_FAN_MASK BIT(4)
+#define LTC2947_MIN_TEMP_FAN_MASK BIT(5)
+
+struct ltc2947_data {
+ struct regmap *map;
+ struct device *dev;
+ /*
+ * The mutex is needed because the device has 2 memory pages. When
+ * reading/writing the correct page needs to be set so that, the
+ * complete sequence select_page->read/write needs to be protected.
+ */
+ struct mutex lock;
+ u32 lsb_energy;
+ bool gpio_out;
+};
+
+static int __ltc2947_val_read16(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be16 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(__val);
+
+ return 0;
+}
+
+static int __ltc2947_val_read24(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be32 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 3);
+ if (ret)
+ return ret;
+
+ *val = be32_to_cpu(__val) >> 8;
+
+ return 0;
+}
+
+static int __ltc2947_val_read64(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be64 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 6);
+ if (ret)
+ return ret;
+
+ *val = be64_to_cpu(__val) >> 16;
+
+ return 0;
+}
+
+static int ltc2947_val_read(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, s64 *val)
+{
+ int ret;
+ u64 __val = 0;
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Read val, reg:%02X, p:%d sz:%zu\n", reg, page,
+ size);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_read16(st, reg, &__val);
+ break;
+ case 3:
+ ret = __ltc2947_val_read24(st, reg, &__val);
+ break;
+ case 6:
+ ret = __ltc2947_val_read64(st, reg, &__val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ if (ret)
+ return ret;
+
+ *val = sign_extend64(__val, (8 * size) - 1);
+
+ dev_dbg(st->dev, "Got s:%lld, u:%016llX\n", *val, __val);
+
+ return 0;
+}
+
+static int __ltc2947_val_write64(const struct ltc2947_data *st, const u8 reg,
+ const u64 val)
+{
+ __be64 __val;
+
+ __val = cpu_to_be64(val << 16);
+ return regmap_bulk_write(st->map, reg, &__val, 6);
+}
+
+static int __ltc2947_val_write16(const struct ltc2947_data *st, const u8 reg,
+ const u16 val)
+{
+ __be16 __val;
+
+ __val = cpu_to_be16(val);
+ return regmap_bulk_write(st->map, reg, &__val, 2);
+}
+
+static int ltc2947_val_write(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, const u64 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ /* set device on correct page */
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Write val, r:%02X, p:%d, sz:%zu, val:%016llX\n",
+ reg, page, size, val);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_write16(st, reg, val);
+ break;
+ case 6:
+ ret = __ltc2947_val_write64(st, reg, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ltc2947_reset_history(struct ltc2947_data *st, const u8 reg_h,
+ const u8 reg_l)
+{
+ int ret;
+ /*
+ * let's reset the tracking register's. Tracking register's have all
+ * 2 bytes size
+ */
+ ret = ltc2947_val_write(st, reg_h, LTC2947_PAGE0, 2, 0x8000U);
+ if (ret)
+ return ret;
+
+ return ltc2947_val_write(st, reg_l, LTC2947_PAGE0, 2, 0x7FFFU);
+}
+
+static int ltc2947_alarm_read(struct ltc2947_data *st, const u8 reg,
+ const u32 mask, long *val)
+{
+ u8 offset = reg - LTC2947_REG_STATUS;
+ /* +1 to include status reg */
+ char alarms[LTC2947_ALERTS_SIZE + 1];
+ int ret = 0;
+
+ memset(alarms, 0, sizeof(alarms));
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, LTC2947_PAGE0);
+ if (ret)
+ goto unlock;
+
+ dev_dbg(st->dev, "Read alarm, reg:%02X, mask:%02X\n", reg, mask);
+ /*
+ * As stated in the datasheet, when Threshold and Overflow registers
+ * are used, the status and all alert registers must be read in one
+ * multi-byte transaction.
+ */
+ ret = regmap_bulk_read(st->map, LTC2947_REG_STATUS, alarms,
+ sizeof(alarms));
+ if (ret)
+ goto unlock;
+
+ /* get the alarm */
+ *val = !!(alarms[offset] & mask);
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static ssize_t ltc2947_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int ret;
+ s64 val = 0;
+
+ ret = ltc2947_val_read(st, attr->index, LTC2947_PAGE0, 6, &val);
+ if (ret)
+ return ret;
+
+ /* value in microJoule. st->lsb_energy was multiplied by 10E9 */
+ val = div_s64(val * st->lsb_energy, 1000);
+
+ return sprintf(buf, "%lld\n", val);
+}
+
+static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ int ret;
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_max_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_MASK, val);
+ case hwmon_temp_min_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_MASK, val);
+ case hwmon_temp_max:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_temp_min:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* in milidegrees celcius, temp is given by: */
+ *val = (__val * 204) + 550;
+
+ return 0;
+}
+
+static int ltc2947_read_power(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u32 lsb = 200000; /* in uW */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER, LTC2947_PAGE0,
+ 3, &__val);
+ lsb = 50000;
+ break;
+ case hwmon_power_input_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_input_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_POWER_MASK, val);
+ case hwmon_power_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_POWER_MASK, val);
+ case hwmon_power_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_power_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_curr(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 12; /* in mA */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT,
+ LTC2947_PAGE0, 3, &__val);
+ lsb = 3;
+ break;
+ case hwmon_curr_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_CURRENT_MASK, val);
+ case hwmon_curr_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_CURRENT_MASK, val);
+ case hwmon_curr_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_curr_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_in(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 2; /* in mV */
+ s64 __val = 0;
+
+ if (channel < 0 || channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_input:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_highest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_lowest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_max_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MAX_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_VOLTAGE_MASK, val);
+ case hwmon_in_min_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MIN_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_VOLTAGE_MASK, val);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_read_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_read_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_read_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_read_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_temp(struct device *dev, const u32 attr,
+ long val, const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel < 0 || channel > LTC2947_TEMP_FAN_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for temperature", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_temp_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_TEMP_MAX,
+ LTC2947_REG_TEMP_MIN);
+ case hwmon_temp_max:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ case hwmon_temp_min:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_power(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_power_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_POWER_MAX,
+ LTC2947_REG_POWER_MIN);
+ case hwmon_power_max:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ case hwmon_power_min:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_curr(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_curr_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_REG_CURRENT_MIN);
+ case hwmon_curr_max:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ case hwmon_curr_min:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_in(struct device *dev, const u32 attr, long val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_reset_history:
+ if (val != 1)
+ return -EINVAL;
+
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_reset_history(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_REG_DVCC_MIN);
+
+ return ltc2947_reset_history(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_REG_VOLTAGE_MIN);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_write_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_write_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_write_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_write_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ *str = "DVCC";
+ else
+ *str = "VP-VM";
+ return 0;
+ case hwmon_curr:
+ *str = "IP-IM";
+ return 0;
+ case hwmon_temp:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ *str = "TEMPFAN";
+ else
+ *str = "Ambient";
+ return 0;
+ case hwmon_power:
+ *str = "Power";
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_in_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_highest:
+ case hwmon_in_lowest:
+ case hwmon_in_max_alarm:
+ case hwmon_in_min_alarm:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_reset_history:
+ return 0200;
+ case hwmon_in_max:
+ case hwmon_in_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_curr_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_highest:
+ case hwmon_curr_lowest:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_label:
+ return 0444;
+ case hwmon_curr_reset_history:
+ return 0200;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_power_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_input_highest:
+ case hwmon_power_input_lowest:
+ case hwmon_power_label:
+ case hwmon_power_max_alarm:
+ case hwmon_power_min_alarm:
+ return 0444;
+ case hwmon_power_reset_history:
+ return 0200;
+ case hwmon_power_max:
+ case hwmon_power_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_temp_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_highest:
+ case hwmon_temp_lowest:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_reset_history:
+ return 0200;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t ltc2947_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_in_is_visible(attr);
+ case hwmon_curr:
+ return ltc2947_curr_is_visible(attr);
+ case hwmon_power:
+ return ltc2947_power_is_visible(attr);
+ case hwmon_temp:
+ return ltc2947_temp_is_visible(attr);
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info *ltc2947_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LOWEST | HWMON_C_HIGHEST |
+ HWMON_C_MAX | HWMON_C_MIN | HWMON_C_RESET_HISTORY |
+ HWMON_C_MIN_ALARM | HWMON_C_MAX_ALARM |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_INPUT_LOWEST |
+ HWMON_P_INPUT_HIGHEST | HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_RESET_HISTORY | HWMON_P_MAX_ALARM |
+ HWMON_P_MIN_ALARM | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MAX | HWMON_T_MIN | HWMON_T_RESET_HISTORY |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_LABEL,
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | HWMON_T_MAX |
+ HWMON_T_MIN | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops ltc2947_hwmon_ops = {
+ .is_visible = ltc2947_is_visible,
+ .read = ltc2947_read,
+ .write = ltc2947_write,
+ .read_string = ltc2947_read_labels,
+};
+
+static const struct hwmon_chip_info ltc2947_chip_info = {
+ .ops = &ltc2947_hwmon_ops,
+ .info = ltc2947_info,
+};
+
+/* energy attributes are 6bytes wide so we need u64 */
+static SENSOR_DEVICE_ATTR(energy1_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY1);
+static SENSOR_DEVICE_ATTR(energy2_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY2);
+
+static struct attribute *ltc2947_attrs[] = {
+ &sensor_dev_attr_energy1_input.dev_attr.attr,
+ &sensor_dev_attr_energy2_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2947);
+
+static void ltc2947_clk_disable(void *data)
+{
+ struct clk *extclk = data;
+
+ clk_disable_unprepare(extclk);
+}
+
+static int ltc2947_setup(struct ltc2947_data *st)
+{
+ int ret;
+ struct clk *extclk;
+ u32 dummy, deadband, pol;
+ u32 accum[2];
+
+ /* clear status register by reading it */
+ ret = regmap_read(st->map, LTC2947_REG_STATUS, &dummy);
+ if (ret)
+ return ret;
+ /*
+ * Set max/min for power here since the default values x scale
+ * would overflow on 32bit arch
+ */
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H, LTC2947_PAGE1, 2,
+ POWER_MAX / 200000);
+ if (ret)
+ return ret;
+
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L, LTC2947_PAGE1, 2,
+ POWER_MIN / 200000);
+ if (ret)
+ return ret;
+
+ /* check external clock presence */
+ extclk = devm_clk_get(st->dev, NULL);
+ if (!IS_ERR(extclk)) {
+ unsigned long rate_hz;
+ u8 pre = 0, div, tbctl;
+ u64 aux;
+
+ /* let's calculate and set the right valus in TBCTL */
+ rate_hz = clk_get_rate(extclk);
+ if (rate_hz < LTC2947_CLK_MIN || rate_hz > LTC2947_CLK_MAX) {
+ dev_err(st->dev, "Invalid rate:%lu for external clock",
+ rate_hz);
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(extclk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(st->dev, ltc2947_clk_disable,
+ extclk);
+ if (ret)
+ return ret;
+ /* as in table 1 of the datasheet */
+ if (rate_hz >= LTC2947_CLK_MIN && rate_hz <= 1000000)
+ pre = 0;
+ else if (rate_hz > 1000000 && rate_hz <= 2000000)
+ pre = 1;
+ else if (rate_hz > 2000000 && rate_hz <= 4000000)
+ pre = 2;
+ else if (rate_hz > 4000000 && rate_hz <= 8000000)
+ pre = 3;
+ else if (rate_hz > 8000000 && rate_hz <= 16000000)
+ pre = 4;
+ else if (rate_hz > 16000000 && rate_hz <= LTC2947_CLK_MAX)
+ pre = 5;
+ /*
+ * Div is given by:
+ * floor(fref / (2^PRE * 32768))
+ */
+ div = rate_hz / ((1 << pre) * 32768);
+ tbctl = LTC2947_PRE(pre) | LTC2947_DIV(div);
+
+ ret = regmap_write(st->map, LTC2947_REG_TBCTL, tbctl);
+ if (ret)
+ return ret;
+ /*
+ * The energy lsb is given by (in W*s):
+ * 06416 * (1/fref) * 2^PRE * (DIV + 1)
+ * The value is multiplied by 10E9
+ */
+ aux = (div + 1) * ((1 << pre) * 641600000ULL);
+ st->lsb_energy = DIV_ROUND_CLOSEST_ULL(aux, rate_hz);
+ } else {
+ /* 19.89E-6 * 10E9 */
+ st->lsb_energy = 19890;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node,
+ "adi,accumulator-ctl-pol", accum,
+ ARRAY_SIZE(accum));
+ if (!ret) {
+ u32 accum_reg = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_POL, accum_reg);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32(st->dev->of_node,
+ "adi,accumulation-deadband-microamp",
+ &deadband);
+ if (!ret) {
+ /* the LSB is the same as the current, so 3mA */
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_DEADBAND,
+ deadband / (1000 * 3));
+ if (ret)
+ return ret;
+ }
+ /* check gpio cfg */
+ ret = of_property_read_u32(st->dev->of_node, "adi,gpio-out-pol", &pol);
+ if (!ret) {
+ /* setup GPIO as output */
+ u32 gpio_ctl = LTC2947_GPIO_EN(1) | LTC2947_GPIO_FAN_EN(1) |
+ LTC2947_GPIO_FAN_POL(pol);
+
+ st->gpio_out = true;
+ ret = regmap_write(st->map, LTC2947_REG_GPIOSTATCTL, gpio_ctl);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node, "adi,gpio-in-accum",
+ accum, ARRAY_SIZE(accum));
+ if (!ret) {
+ /*
+ * Setup the accum options. The gpioctl is already defined as
+ * input by default.
+ */
+ u32 accum_val = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ if (st->gpio_out) {
+ dev_err(st->dev,
+ "Cannot have input gpio config if already configured as output");
+ return -EINVAL;
+ }
+
+ ret = regmap_write(st->map, LTC2947_REG_GPIO_ACCUM, accum_val);
+ if (ret)
+ return ret;
+ }
+
+ /* set continuos mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+int ltc2947_core_probe(struct regmap *map, const char *name)
+{
+ struct ltc2947_data *st;
+ struct device *dev = regmap_get_device(map);
+ struct device *hwmon;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->map = map;
+ st->dev = dev;
+ dev_set_drvdata(dev, st);
+ mutex_init(&st->lock);
+
+ ret = ltc2947_setup(st);
+ if (ret)
+ return ret;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, name, st,
+ &ltc2947_chip_info,
+ ltc2947_groups);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+EXPORT_SYMBOL_GPL(ltc2947_core_probe);
+
+static int __maybe_unused ltc2947_resume(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ u32 ctrl = 0;
+ int ret;
+
+ /* dummy read to wake the device */
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /*
+ * Wait for the device. It takes 100ms to wake up so, 10ms extra
+ * should be enough.
+ */
+ msleep(110);
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /* ctrl should be 0 */
+ if (ctrl != 0) {
+ dev_err(st->dev, "Device failed to wake up, ctl:%02X\n", ctrl);
+ return -ETIMEDOUT;
+ }
+
+ /* set continuous mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+static int __maybe_unused ltc2947_suspend(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_SHUTDOWN_MASK, 1);
+}
+
+SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
+EXPORT_SYMBOL_GPL(ltc2947_pm_ops);
+
+const struct of_device_id ltc2947_of_match[] = {
+ { .compatible = "adi,ltc2947" },
+ {}
+};
+EXPORT_SYMBOL_GPL(ltc2947_of_match);
+MODULE_DEVICE_TABLE(of, ltc2947_of_match);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 power and energy monitor core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
new file mode 100644
index 000000000000..cf6074b110ae
--- /dev/null
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over I2C
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ltc2947_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_i2c(i2c, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, i2c->name);
+}
+
+static const struct i2c_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2947_id);
+
+static struct i2c_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_i2c_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 I2C power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-spi.c b/drivers/hwmon/ltc2947-spi.c
new file mode 100644
index 000000000000..c24ca569db1b
--- /dev/null
+++ b/drivers/hwmon/ltc2947-spi.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over SPI
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = BIT(0),
+};
+
+static int ltc2947_probe(struct spi_device *spi)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_spi(spi, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, spi_get_device_id(spi)->name);
+}
+
+static const struct spi_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ltc2947_id);
+
+static struct spi_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_spi_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 SPI power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947.h b/drivers/hwmon/ltc2947.h
new file mode 100644
index 000000000000..5b8ff81a3dba
--- /dev/null
+++ b/drivers/hwmon/ltc2947.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LTC2947_H
+#define _LINUX_LTC2947_H
+
+struct regmap;
+
+extern const struct of_device_id ltc2947_of_match[];
+extern const struct dev_pm_ops ltc2947_pm_ops;
+
+int ltc2947_core_probe(struct regmap *map, const char *name);
+
+#endif
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index d62d69bb7e49..59859979571d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -36,6 +36,15 @@ config SENSORS_ADM1275
This driver can also be built as a module. If so, the module will
be called adm1275.
+config SENSORS_BEL_PFE
+ tristate "Bel PFE Compatible Power Supplies"
+ help
+ If you say yes here you get hardware monitoring support for BEL
+ PFE1100 and PFE3000 Power Supplies.
+
+ This driver can also be built as a module. If so, the module will
+ be called bel-pfe.
+
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
depends on LEDS_CLASS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 03bacfcfd660..3f8c1014938b 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
new file mode 100644
index 000000000000..f236e18f45a5
--- /dev/null
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for BEL PFE family power supplies.
+ *
+ * Copyright (c) 2019 Facebook Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+enum chips {pfe1100, pfe3000};
+
+/*
+ * Disable status check for pfe3000 devices, because some devices report
+ * communication error (invalid command) for VOUT_MODE command (0x20)
+ * although correct VOUT_MODE (0x16) is returned: it leads to incorrect
+ * exponent in linear mode.
+ */
+static struct pmbus_platform_data pfe3000_plat_data = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
+static struct pmbus_driver_info pfe_driver_info[] = {
+ [pfe1100] = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_FAN12,
+ },
+
+ [pfe3000] = {
+ .pages = 7,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ /* Page 0: V1. */
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_FAN12 |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_VCAP,
+
+ /* Page 1: Vsb. */
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_POUT,
+
+ /*
+ * Page 2: V1 Ishare.
+ * Page 3: Reserved.
+ * Page 4: V1 Cathode.
+ * Page 5: Vsb Cathode.
+ * Page 6: V1 Sense.
+ */
+ .func[2] = PMBUS_HAVE_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT,
+ },
+};
+
+static int pfe_pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int model;
+
+ model = (int)id->driver_data;
+
+ /*
+ * PFE3000-12-069RA devices may not stay in page 0 during device
+ * probe which leads to probe failure (read status word failed).
+ * So let's set the device to page 0 at the beginning.
+ */
+ if (model == pfe3000) {
+ client->dev.platform_data = &pfe3000_plat_data;
+ i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ }
+
+ return pmbus_do_probe(client, id, &pfe_driver_info[model]);
+}
+
+static const struct i2c_device_id pfe_device_id[] = {
+ {"pfe1100", pfe1100},
+ {"pfe3000", pfe3000},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pfe_device_id);
+
+static struct i2c_driver pfe_pmbus_driver = {
+ .driver = {
+ .name = "bel-pfe",
+ },
+ .probe = pfe_pmbus_probe,
+ .remove = pmbus_do_remove,
+ .id_table = pfe_device_id,
+};
+
+module_i2c_driver(pfe_pmbus_driver);
+
+MODULE_AUTHOR("Tao Ren <rentao.bupt@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for BEL PFE Family Power Supplies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d44745e498e7..d359b76bcb36 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -3,6 +3,7 @@
* Copyright 2017 IBM Corp.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -29,6 +30,10 @@
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
+#define CFFPS_CCIN_VERSION GENMASK(15, 8)
+#define CFFPS_CCIN_VERSION_1 0x2b
+#define CFFPS_CCIN_VERSION_2 0x2e
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -39,9 +44,13 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+/*
+ * LED off state actually relinquishes LED control to PSU firmware, so it can
+ * turn on the LED for faults.
+ */
+#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
-#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
@@ -54,7 +63,7 @@ enum {
CFFPS_DEBUGFS_NUM_ENTRIES
};
-enum versions { cffps1, cffps2 };
+enum versions { cffps1, cffps2, cffps_unknown };
struct ibm_cffps_input_history {
struct mutex update_lock;
@@ -292,28 +301,38 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
-static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
+static int ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
int rc;
+ u8 next_led_state;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
if (brightness == LED_OFF) {
- psu->led_state = CFFPS_LED_OFF;
+ next_led_state = CFFPS_LED_OFF;
} else {
brightness = LED_FULL;
+
if (psu->led_state != CFFPS_LED_BLINK)
- psu->led_state = CFFPS_LED_ON;
+ next_led_state = CFFPS_LED_ON;
+ else
+ next_led_state = CFFPS_LED_BLINK;
}
+ dev_dbg(&psu->client->dev, "LED brightness set: %d. Command: %d.\n",
+ brightness, next_led_state);
+
pmbus_set_page(psu->client, 0);
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
- psu->led_state);
+ next_led_state);
if (rc < 0)
- return;
+ return rc;
+ psu->led_state = next_led_state;
led_cdev->brightness = brightness;
+
+ return 0;
}
static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
@@ -323,10 +342,7 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
int rc;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
- psu->led_state = CFFPS_LED_BLINK;
-
- if (led_cdev->brightness == LED_OFF)
- return 0;
+ dev_dbg(&psu->client->dev, "LED blink set.\n");
pmbus_set_page(psu->client, 0);
@@ -335,6 +351,8 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
if (rc < 0)
return rc;
+ psu->led_state = CFFPS_LED_BLINK;
+ led_cdev->brightness = LED_FULL;
*delay_on = CFFPS_BLINK_RATE_MS;
*delay_off = CFFPS_BLINK_RATE_MS;
@@ -351,7 +369,7 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
client->addr);
psu->led.name = psu->led_name;
psu->led.max_brightness = LED_FULL;
- psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.brightness_set_blocking = ibm_cffps_led_brightness_set;
psu->led.blink_set = ibm_cffps_led_blink_set;
rc = devm_led_classdev_register(dev, &psu->led);
@@ -395,7 +413,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int i, rc;
- enum versions vs;
+ enum versions vs = cffps_unknown;
struct dentry *debugfs;
struct dentry *ibm_cffps_dir;
struct ibm_cffps *psu;
@@ -405,8 +423,27 @@ static int ibm_cffps_probe(struct i2c_client *client,
vs = (enum versions)md;
else if (id)
vs = (enum versions)id->driver_data;
- else
- vs = cffps1;
+
+ if (vs == cffps_unknown) {
+ u16 ccin_version = CFFPS_CCIN_VERSION_1;
+ int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD);
+
+ if (ccin > 0)
+ ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin);
+
+ switch (ccin_version) {
+ default:
+ case CFFPS_CCIN_VERSION_1:
+ vs = cffps1;
+ break;
+ case CFFPS_CCIN_VERSION_2:
+ vs = cffps2;
+ break;
+ }
+
+ /* Set the client name to include the version number. */
+ snprintf(client->name, I2C_NAME_SIZE, "cffps%d", vs + 1);
+ }
client->dev.platform_data = &ibm_cffps_pdata;
rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
@@ -465,6 +502,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
static const struct i2c_device_id ibm_cffps_id[] = {
{ "ibm_cffps1", cffps1 },
{ "ibm_cffps2", cffps2 },
+ { "ibm_cffps", cffps_unknown },
{}
};
MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
@@ -478,6 +516,10 @@ static const struct of_device_id ibm_cffps_of_match[] = {
.compatible = "ibm,cffps2",
.data = (void *)cffps2
},
+ {
+ .compatible = "ibm,cffps",
+ .data = (void *)cffps_unknown
+ },
{}
};
MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index a94e35cff3e5..83a4fab151d2 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -127,7 +127,8 @@ static struct tmp421_data *tmp421_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
+ !data->valid) {
data->config = i2c_smbus_read_byte_data(client,
TMP421_CONFIG_REG_1);
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
new file mode 100644
index 000000000000..df66e0bc1253
--- /dev/null
+++ b/drivers/hwmon/tmp513.c
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Texas Instruments TMP512, TMP513 power monitor chips
+ *
+ * TMP513:
+ * Thermal/Power Management with Triple Remote and
+ * Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp513
+ *
+ * TMP512:
+ * Thermal/Power Management with Dual Remote
+ * and Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp512
+ *
+ * Copyright (C) 2019 Eric Tremblay <etremblay@distech-controls.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+// Common register definition
+#define TMP51X_SHUNT_CONFIG 0x00
+#define TMP51X_TEMP_CONFIG 0x01
+#define TMP51X_STATUS 0x02
+#define TMP51X_SMBUS_ALERT 0x03
+#define TMP51X_SHUNT_CURRENT_RESULT 0x04
+#define TMP51X_BUS_VOLTAGE_RESULT 0x05
+#define TMP51X_POWER_RESULT 0x06
+#define TMP51X_BUS_CURRENT_RESULT 0x07
+#define TMP51X_LOCAL_TEMP_RESULT 0x08
+#define TMP51X_REMOTE_TEMP_RESULT_1 0x09
+#define TMP51X_REMOTE_TEMP_RESULT_2 0x0A
+#define TMP51X_SHUNT_CURRENT_H_LIMIT 0x0C
+#define TMP51X_SHUNT_CURRENT_L_LIMIT 0x0D
+#define TMP51X_BUS_VOLTAGE_H_LIMIT 0x0E
+#define TMP51X_BUS_VOLTAGE_L_LIMIT 0x0F
+#define TMP51X_POWER_LIMIT 0x10
+#define TMP51X_LOCAL_TEMP_LIMIT 0x11
+#define TMP51X_REMOTE_TEMP_LIMIT_1 0x12
+#define TMP51X_REMOTE_TEMP_LIMIT_2 0x13
+#define TMP51X_SHUNT_CALIBRATION 0x15
+#define TMP51X_N_FACTOR_AND_HYST_1 0x16
+#define TMP51X_N_FACTOR_2 0x17
+#define TMP51X_MAN_ID_REG 0xFE
+#define TMP51X_DEVICE_ID_REG 0xFF
+
+// TMP513 specific register definition
+#define TMP513_REMOTE_TEMP_RESULT_3 0x0B
+#define TMP513_REMOTE_TEMP_LIMIT_3 0x14
+#define TMP513_N_FACTOR_3 0x18
+
+// Common attrs, and NULL
+#define TMP51X_MANUFACTURER_ID 0x55FF
+
+#define TMP512_DEVICE_ID 0x22FF
+#define TMP513_DEVICE_ID 0x23FF
+
+// Default config
+#define TMP51X_SHUNT_CONFIG_DEFAULT 0x399F
+#define TMP51X_SHUNT_VALUE_DEFAULT 1000
+#define TMP51X_VBUS_RANGE_DEFAULT TMP51X_VBUS_RANGE_32V
+#define TMP51X_PGA_DEFAULT 8
+#define TMP51X_MAX_REGISTER_ADDR 0xFF
+
+#define TMP512_TEMP_CONFIG_DEFAULT 0xBF80
+#define TMP513_TEMP_CONFIG_DEFAULT 0xFF80
+
+// Mask and shift
+#define CURRENT_SENSE_VOLTAGE_320_MASK 0x1800
+#define CURRENT_SENSE_VOLTAGE_160_MASK 0x1000
+#define CURRENT_SENSE_VOLTAGE_80_MASK 0x0800
+#define CURRENT_SENSE_VOLTAGE_40_MASK 0
+
+#define TMP51X_BUS_VOLTAGE_MASK 0x2000
+#define TMP51X_NFACTOR_MASK 0xFF00
+#define TMP51X_HYST_MASK 0x00FF
+
+#define TMP51X_BUS_VOLTAGE_SHIFT 3
+#define TMP51X_TEMP_SHIFT 3
+
+// Alarms
+#define TMP51X_SHUNT_CURRENT_H_LIMIT_POS 15
+#define TMP51X_SHUNT_CURRENT_L_LIMIT_POS 14
+#define TMP51X_BUS_VOLTAGE_H_LIMIT_POS 13
+#define TMP51X_BUS_VOLTAGE_L_LIMIT_POS 12
+#define TMP51X_POWER_LIMIT_POS 11
+#define TMP51X_LOCAL_TEMP_LIMIT_POS 10
+#define TMP51X_REMOTE_TEMP_LIMIT_1_POS 9
+#define TMP51X_REMOTE_TEMP_LIMIT_2_POS 8
+#define TMP513_REMOTE_TEMP_LIMIT_3_POS 7
+
+#define TMP51X_VBUS_RANGE_32V 32000000
+#define TMP51X_VBUS_RANGE_16V 16000000
+
+// Max and Min value
+#define MAX_BUS_VOLTAGE_32_LIMIT 32764
+#define MAX_BUS_VOLTAGE_16_LIMIT 16382
+
+// Max possible value is -256 to +256 but datasheet indicated -40 to 125.
+#define MAX_TEMP_LIMIT 125000
+#define MIN_TEMP_LIMIT -40000
+
+#define MAX_TEMP_HYST 127500
+
+static const u8 TMP51X_TEMP_INPUT[4] = {
+ TMP51X_LOCAL_TEMP_RESULT,
+ TMP51X_REMOTE_TEMP_RESULT_1,
+ TMP51X_REMOTE_TEMP_RESULT_2,
+ TMP513_REMOTE_TEMP_RESULT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT,
+ TMP51X_REMOTE_TEMP_LIMIT_1,
+ TMP51X_REMOTE_TEMP_LIMIT_2,
+ TMP513_REMOTE_TEMP_LIMIT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT_ALARM[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_1_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_2_POS,
+ TMP513_REMOTE_TEMP_LIMIT_3_POS
+};
+
+static const u8 TMP51X_TEMP_CRIT_HYST[4] = {
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1
+};
+
+static const u8 TMP51X_CURR_INPUT[2] = {
+ TMP51X_SHUNT_CURRENT_RESULT,
+ TMP51X_BUS_CURRENT_RESULT
+};
+
+static struct regmap_config tmp51x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP51X_MAX_REGISTER_ADDR,
+};
+
+enum tmp51x_ids {
+ tmp512, tmp513
+};
+
+struct tmp51x_data {
+ u16 shunt_config;
+ u16 pga_gain;
+ u32 vbus_range_uvolt;
+
+ u16 temp_config;
+ u32 nfactor[3];
+
+ u32 shunt_uohms;
+
+ u32 curr_lsb_ua;
+ u32 pwr_lsb_uw;
+
+ enum tmp51x_ids id;
+ struct regmap *regmap;
+};
+
+// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
+static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
+{
+ return 5 - ffs(data->pga_gain);
+}
+
+static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ unsigned int regval, long *val)
+{
+ switch (reg) {
+ case TMP51X_STATUS:
+ *val = (regval >> pos) & 1;
+ break;
+ case TMP51X_SHUNT_CURRENT_RESULT:
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The valus is read in voltage in the chip but reported as
+ * current to the user.
+ * 2's compliment number shifted by one to four depending
+ * on the pga gain setting. 1lsb = 10uV
+ */
+ *val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
+ *val = DIV_ROUND_CLOSEST(*val * 10000, data->shunt_uohms);
+ break;
+ case TMP51X_BUS_VOLTAGE_RESULT:
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ *val = (regval >> TMP51X_BUS_VOLTAGE_SHIFT) * 4;
+ break;
+ case TMP51X_POWER_RESULT:
+ case TMP51X_POWER_LIMIT:
+ // Power = (current * BusVoltage) / 5000
+ *val = regval * data->pwr_lsb_uw;
+ break;
+ case TMP51X_BUS_CURRENT_RESULT:
+ // Current = (ShuntVoltage * CalibrationRegister) / 4096
+ *val = sign_extend32(regval, 16) * data->curr_lsb_ua;
+ *val = DIV_ROUND_CLOSEST(*val, 1000);
+ break;
+ case TMP51X_LOCAL_TEMP_RESULT:
+ case TMP51X_REMOTE_TEMP_RESULT_1:
+ case TMP51X_REMOTE_TEMP_RESULT_2:
+ case TMP513_REMOTE_TEMP_RESULT_3:
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ *val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
+ *val = DIV_ROUND_CLOSEST(*val * 625, 10);
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ *val = (regval & TMP51X_HYST_MASK) * 500;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ *val = 0;
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
+{
+ int regval, max_val;
+ u32 mask = 0;
+
+ switch (reg) {
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The user enter current value and we convert it to
+ * voltage. 1lsb = 10uV
+ */
+ val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10000);
+ max_val = U16_MAX >> tmp51x_get_pga_shift(data);
+ regval = clamp_val(val, -max_val, max_val);
+ break;
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ max_val = (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) ?
+ MAX_BUS_VOLTAGE_32_LIMIT : MAX_BUS_VOLTAGE_16_LIMIT;
+
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 4), 0, max_val);
+ regval = val << TMP51X_BUS_VOLTAGE_SHIFT;
+ break;
+ case TMP51X_POWER_LIMIT:
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, data->pwr_lsb_uw), 0,
+ U16_MAX);
+ break;
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ val = clamp_val(val, MIN_TEMP_LIMIT, MAX_TEMP_LIMIT);
+ regval = DIV_ROUND_CLOSEST(val * 10, 625) << TMP51X_TEMP_SHIFT;
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ val = clamp_val(val, 0, MAX_TEMP_HYST);
+ regval = DIV_ROUND_CLOSEST(val, 500);
+ mask = TMP51X_HYST_MASK;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ return -EOPNOTSUPP;
+ }
+
+ if (mask == 0)
+ return regmap_write(data->regmap, reg, regval);
+ else
+ return regmap_update_bits(data->regmap, reg, mask, regval);
+}
+
+static u8 tmp51x_get_reg(enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return TMP51X_TEMP_INPUT[channel];
+ case hwmon_temp_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_temp_crit:
+ return TMP51X_TEMP_CRIT[channel];
+ case hwmon_temp_crit_hyst:
+ return TMP51X_TEMP_CRIT_HYST[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return TMP51X_BUS_VOLTAGE_RESULT;
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_in_lcrit:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT;
+ case hwmon_in_crit:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return TMP51X_CURR_INPUT[channel];
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_curr_lcrit:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT;
+ case hwmon_curr_crit:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return TMP51X_POWER_RESULT;
+ case hwmon_power_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_power_crit:
+ return TMP51X_POWER_LIMIT;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static u8 tmp51x_get_status_pos(enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_crit_alarm:
+ return TMP51X_TEMP_CRIT_ALARM[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_lcrit_alarm:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT_POS;
+ case hwmon_in_crit_alarm:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_lcrit_alarm:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT_POS;
+ case hwmon_curr_crit_alarm:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_crit_alarm:
+ return TMP51X_POWER_LIMIT_POS;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tmp51x_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct tmp51x_data *data = dev_get_drvdata(dev);
+ int ret;
+ u32 regval;
+ u8 pos = 0, reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ if (reg == TMP51X_STATUS)
+ pos = tmp51x_get_status_pos(type, attr, channel);
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ return tmp51x_get_value(data, reg, pos, regval, val);
+}
+
+static int tmp51x_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ u8 reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ return tmp51x_set_value(dev_get_drvdata(dev), reg, val);
+}
+
+static umode_t tmp51x_is_visible(const void *_data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct tmp51x_data *data = _data;
+
+ switch (type) {
+ case hwmon_temp:
+ if (data->id == tmp512 && channel == 4)
+ return 0;
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_crit:
+ return 0644;
+ case hwmon_temp_crit_hyst:
+ if (channel == 0)
+ return 0644;
+ return 0444;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return 0444;
+ case hwmon_in_lcrit:
+ case hwmon_in_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_curr:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return 0444;
+ case hwmon_curr_lcrit:
+ case hwmon_curr_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_power:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_crit:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *tmp51x_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM |
+ HWMON_I_CRIT | HWMON_I_CRIT_ALARM),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LCRIT | HWMON_C_LCRIT_ALARM |
+ HWMON_C_CRIT | HWMON_C_CRIT_ALARM,
+ HWMON_C_INPUT),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops tmp51x_hwmon_ops = {
+ .is_visible = tmp51x_is_visible,
+ .read = tmp51x_read,
+ .write = tmp51x_write,
+};
+
+static const struct hwmon_chip_info tmp51x_chip_info = {
+ .ops = &tmp51x_hwmon_ops,
+ .info = tmp51x_info,
+};
+
+/*
+ * Calibrate the tmp51x following the datasheet method
+ */
+static int tmp51x_calibrate(struct tmp51x_data *data)
+{
+ int vshunt_max = data->pga_gain * 40;
+ u64 max_curr_ma;
+ u32 div;
+
+ /*
+ * If shunt_uohms is equal to 0, the calibration should be set to 0.
+ * The consequence will be that the current and power measurement engine
+ * of the sensor will not work. Temperature and voltage sensing will
+ * continue to work.
+ */
+ if (data->shunt_uohms == 0)
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION, 0);
+
+ max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * 1000 * 1000,
+ data->shunt_uohms);
+
+ /*
+ * Calculate the minimal bit resolution for the current and the power.
+ * Those values will be used during register interpretation.
+ */
+ data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * 1000, 32767);
+ data->pwr_lsb_uw = 20 * data->curr_lsb_ua;
+
+ div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms,
+ 1000 * 1000);
+
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION,
+ DIV_ROUND_CLOSEST(40960, div));
+}
+
+/*
+ * Initialize the configuration and calibration registers.
+ */
+static int tmp51x_init(struct tmp51x_data *data)
+{
+ unsigned int regval;
+ int ret = regmap_write(data->regmap, TMP51X_SHUNT_CONFIG,
+ data->shunt_config);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_TEMP_CONFIG, data->temp_config);
+ if (ret < 0)
+ return ret;
+
+ // nFactor configuration
+ ret = regmap_update_bits(data->regmap, TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_NFACTOR_MASK, data->nfactor[0] << 8);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_N_FACTOR_2,
+ data->nfactor[1] << 8);
+ if (ret < 0)
+ return ret;
+
+ if (data->id == tmp513) {
+ ret = regmap_write(data->regmap, TMP513_N_FACTOR_3,
+ data->nfactor[2] << 8);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = tmp51x_calibrate(data);
+ if (ret < 0)
+ return ret;
+
+ // Read the status register before using as the datasheet propose
+ return regmap_read(data->regmap, TMP51X_STATUS, &regval);
+}
+
+static const struct i2c_device_id tmp51x_id[] = {
+ { "tmp512", tmp512 },
+ { "tmp513", tmp513 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp51x_id);
+
+static const struct of_device_id tmp51x_of_match[] = {
+ {
+ .compatible = "ti,tmp512",
+ .data = (void *)tmp512
+ },
+ {
+ .compatible = "ti,tmp513",
+ .data = (void *)tmp513
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tmp51x_of_match);
+
+static int tmp51x_vbus_range_to_reg(struct device *dev,
+ struct tmp51x_data *data)
+{
+ if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) {
+ data->shunt_config |= TMP51X_BUS_VOLTAGE_MASK;
+ } else if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_16V) {
+ data->shunt_config &= ~TMP51X_BUS_VOLTAGE_MASK;
+ } else {
+ dev_err(dev, "ti,bus-range-microvolt is invalid: %u\n",
+ data->vbus_range_uvolt);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_pga_gain_to_reg(struct device *dev, struct tmp51x_data *data)
+{
+ if (data->pga_gain == 8) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_320_MASK;
+ } else if (data->pga_gain == 4) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_160_MASK;
+ } else if (data->pga_gain == 2) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_80_MASK;
+ } else if (data->pga_gain == 1) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_40_MASK;
+ } else {
+ dev_err(dev, "ti,pga-gain is invalid: %u\n", data->pga_gain);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_read_properties(struct device *dev, struct tmp51x_data *data)
+{
+ int ret;
+ u32 nfactor[3];
+ u32 val;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms", &val);
+ data->shunt_uohms = (ret >= 0) ? val : TMP51X_SHUNT_VALUE_DEFAULT;
+
+ ret = device_property_read_u32(dev, "ti,bus-range-microvolt", &val);
+ data->vbus_range_uvolt = (ret >= 0) ? val : TMP51X_VBUS_RANGE_DEFAULT;
+ ret = tmp51x_vbus_range_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32(dev, "ti,pga-gain", &val);
+ data->pga_gain = (ret >= 0) ? val : TMP51X_PGA_DEFAULT;
+ ret = tmp51x_pga_gain_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32_array(dev, "ti,nfactor", nfactor,
+ (data->id == tmp513) ? 3 : 2);
+ if (ret >= 0)
+ memcpy(data->nfactor, nfactor, (data->id == tmp513) ? 3 : 2);
+
+ // Check if shunt value is compatible with pga-gain
+ if (data->shunt_uohms > data->pga_gain * 40 * 1000 * 1000) {
+ dev_err(dev, "shunt-resistor: %u too big for pga_gain: %u\n",
+ data->shunt_uohms, data->pga_gain);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void tmp51x_use_default(struct tmp51x_data *data)
+{
+ data->vbus_range_uvolt = TMP51X_VBUS_RANGE_DEFAULT;
+ data->pga_gain = TMP51X_PGA_DEFAULT;
+ data->shunt_uohms = TMP51X_SHUNT_VALUE_DEFAULT;
+}
+
+static int tmp51x_configure(struct device *dev, struct tmp51x_data *data)
+{
+ data->shunt_config = TMP51X_SHUNT_CONFIG_DEFAULT;
+ data->temp_config = (data->id == tmp513) ?
+ TMP513_TEMP_CONFIG_DEFAULT : TMP512_TEMP_CONFIG_DEFAULT;
+
+ if (dev->of_node)
+ return tmp51x_read_properties(dev, data);
+
+ tmp51x_use_default(data);
+
+ return 0;
+}
+
+static int tmp51x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tmp51x_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (client->dev.of_node)
+ data->id = (enum tmp51x_ids)device_get_match_data(&client->dev);
+ else
+ data->id = id->driver_data;
+
+ ret = tmp51x_configure(dev, data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return ret;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client, &tmp51x_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = tmp51x_init(data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &tmp51x_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "power monitor %s\n", id->name);
+
+ return 0;
+}
+
+static struct i2c_driver tmp51x_driver = {
+ .driver = {
+ .name = "tmp51x",
+ .of_match_table = of_match_ptr(tmp51x_of_match),
+ },
+ .probe = tmp51x_probe,
+ .id_table = tmp51x_id,
+};
+
+module_i2c_driver(tmp51x_driver);
+
+MODULE_AUTHOR("Eric Tremblay <etremblay@distechcontrols.com>");
+MODULE_DESCRIPTION("tmp51x driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 9df48b70c70c..a0307e6761b8 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -2096,7 +2096,7 @@ END:
static u8 w83793_read_value(struct i2c_client *client, u16 reg)
{
struct w83793_data *data = i2c_get_clientdata(client);
- u8 res = 0xff;
+ u8 res;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 7a9f5fb08330..6ff30e25af55 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
#
menuconfig CORESIGHT
bool "CoreSight Tracing Support"
+ depends on ARM || ARM64
depends on OF || ACPI
select ARM_AMBA
select PERF_EVENTS
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 219c10eb752c..ce41482431f9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -217,6 +217,7 @@ static ssize_t reset_store(struct device *dev,
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
+ config->vipcssctlr = 0x0;
/* Disable seq events */
for (i = 0; i < drvdata->nrseqstate-1; i++)
@@ -238,6 +239,7 @@ static ssize_t reset_store(struct device *dev,
for (i = 0; i < drvdata->nr_resource; i++)
config->res_ctrl[i] = 0x0;
+ config->ss_idx = 0x0;
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_ctrl[i] = 0x0;
config->ss_pe_cmp[i] = 0x0;
@@ -296,8 +298,6 @@ static ssize_t mode_store(struct device *dev,
spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
@@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
+
+ /* mask off max threshold before checking min value */
+ val &= ETM_CYC_THRESHOLD_MASK;
if (val < drvdata->ccitmin)
return -EINVAL;
- config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+ config->ccctlr = val;
return size;
}
static DEVICE_ATTR_RW(cyc_threshold);
@@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
return -EINVAL;
if (!drvdata->nr_addr_cmp)
return -EINVAL;
+
/*
- * Bit[7:0] selects which address range comparator is used for
- * branch broadcast control.
+ * Bit[8] controls include(1) / exclude(0), bits[0-7] select
+ * individual range comparators. If include then at least 1
+ * range must be selected.
*/
- if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+ if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
return -EINVAL;
- config->bb_ctrl = val;
+ config->bb_ctrl = val & GENMASK(8, 0);
return size;
}
static DEVICE_ATTR_RW(bb_ctrl);
@@ -738,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = BMVAL(config->vinst_ctrl, 16, 19);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -754,8 +759,8 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
- config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
+ /* clear all EXLEVEL_S bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
config->vinst_ctrl |= (val << 16);
@@ -773,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = BMVAL(config->vinst_ctrl, 20, 23);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -789,8 +794,8 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear EXLEVEL_NS bits (bit[23] is never implemented */
- config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
+ /* clear EXLEVEL_NS bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
config->vinst_ctrl |= (val << 20);
@@ -966,8 +971,12 @@ static ssize_t addr_range_store(struct device *dev,
unsigned long val1, val2;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int elements, exclude;
+
+ elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* exclude is optional, but need at least two parameter */
+ if (elements < 2)
return -EINVAL;
/* lower address comparator cannot have a higher address value */
if (val1 > val2)
@@ -995,9 +1004,11 @@ static ssize_t addr_range_store(struct device *dev,
/*
* Program include or exclude control bits for vinst or vdata
* whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
+ * use supplied value, or default to bit set in 'mode'
*/
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
+ if (elements != 3)
+ exclude = config->mode & ETM_MODE_EXCLUDE;
+ etm4_set_mode_exclude(drvdata, exclude ? true : false);
spin_unlock(&drvdata->spinlock);
return size;
@@ -1054,8 +1065,6 @@ static ssize_t addr_start_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->vissctlr |= BIT(idx);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1111,8 +1120,6 @@ static ssize_t addr_stop_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
config->vissctlr |= BIT(idx + 16);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1228,6 +1235,131 @@ static ssize_t addr_context_store(struct device *dev,
}
static DEVICE_ATTR_RW(addr_context);
+static ssize_t addr_exlevel_s_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ val = BMVAL(config->addr_acc[idx], 8, 14);
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_exlevel_s_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val & ~((GENMASK(14, 8) >> 8)))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
+ config->addr_acc[idx] &= ~(GENMASK(14, 8));
+ config->addr_acc[idx] |= (val << 8);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_exlevel_s_ns);
+
+static const char * const addr_type_names[] = {
+ "unused",
+ "single",
+ "range",
+ "start",
+ "stop"
+};
+
+static ssize_t addr_cmp_view_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx, addr_type;
+ unsigned long addr_v, addr_v2, addr_ctrl;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+ int size = 0;
+ bool exclude = false;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ addr_v = config->addr_val[idx];
+ addr_ctrl = config->addr_acc[idx];
+ addr_type = config->addr_type[idx];
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ if (idx & 0x1) {
+ idx -= 1;
+ addr_v2 = addr_v;
+ addr_v = config->addr_val[idx];
+ } else {
+ addr_v2 = config->addr_val[idx + 1];
+ }
+ exclude = config->viiectlr & BIT(idx / 2 + 16);
+ }
+ spin_unlock(&drvdata->spinlock);
+ if (addr_type) {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
+ addr_type_names[addr_type], addr_v);
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " %#lx %s", addr_v2,
+ exclude ? "exclude" : "include");
+ }
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " ctrl(%#lx)\n", addr_ctrl);
+ } else {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
+ }
+ return size;
+}
+static DEVICE_ATTR_RO(addr_cmp_view);
+
+static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+ val = config->vipcssctlr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->vipcssctlr = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
+
static ssize_t seq_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1324,8 +1456,8 @@ static ssize_t seq_event_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
- /* RST, bits[7:0] */
- config->seq_ctrl[idx] = val & 0xFF;
+ /* Seq control has two masks B[15:8] F[7:0] */
+ config->seq_ctrl[idx] = val & 0xFFFF;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1580,12 +1712,129 @@ static ssize_t res_ctrl_store(struct device *dev,
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
val &= ~BIT(21);
- config->res_ctrl[idx] = val;
+ config->res_ctrl[idx] = val & GENMASK(21, 0);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_ctrl);
+static ssize_t sshot_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->ss_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_ss_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->ss_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_idx);
+
+static ssize_t sshot_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_ctrl[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_ctrl[idx] = val & GENMASK(24, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_ctrl);
+
+static ssize_t sshot_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_status[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(sshot_status);
+
+static ssize_t sshot_pe_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_pe_cmp[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_pe_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_pe_ctrl);
+
static ssize_t ctxid_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1714,6 +1963,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* Don't use contextID tracing if coming from a PID namespace. See
@@ -1729,7 +1979,9 @@ static ssize_t ctxid_masks_store(struct device *dev,
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numcidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -1903,6 +2155,7 @@ static ssize_t vmid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* only implemented when vmid tracing is enabled, i.e. at least one
@@ -1910,7 +2163,9 @@ static ssize_t vmid_masks_store(struct device *dev,
*/
if (!drvdata->vmid_size || !drvdata->numvmidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -2033,6 +2288,13 @@ static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_addr_stop.attr,
&dev_attr_addr_ctxtype.attr,
&dev_attr_addr_context.attr,
+ &dev_attr_addr_exlevel_s_ns.attr,
+ &dev_attr_addr_cmp_view.attr,
+ &dev_attr_vinst_pe_cmp_start_stop.attr,
+ &dev_attr_sshot_idx.attr,
+ &dev_attr_sshot_ctrl.attr,
+ &dev_attr_sshot_pe_ctrl.attr,
+ &dev_attr_sshot_status.attr,
&dev_attr_seq_idx.attr,
&dev_attr_seq_state.attr,
&dev_attr_seq_event.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a128b5063f46..dc3f507e7562 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -18,6 +18,7 @@
#include <linux/stat.h>
#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/pm_wakeup.h>
@@ -26,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <asm/sections.h>
#include <asm/local.h>
#include <asm/virt.h>
@@ -37,6 +39,15 @@ static int boot_enable;
module_param(boot_enable, int, 0444);
MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
+#define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */
+#define PARAM_PM_SAVE_NEVER 1 /* never save any state */
+#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
+
+static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
+module_param(pm_save_enable, int, 0444);
+MODULE_PARM_DESC(pm_save_enable,
+ "Save/restore state on power down: 1 = never, 2 = self-hosted");
+
/* The number of ETMv4 currently registered */
static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
@@ -54,6 +65,14 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
isb();
}
+static void etm4_os_lock(struct etmv4_drvdata *drvdata)
+{
+ /* Writing 0x1 to TRCOSLAR locks the trace registers */
+ writel_relaxed(0x1, drvdata->base + TRCOSLAR);
+ drvdata->os_unlock = false;
+ isb();
+}
+
static bool etm4_arch_supported(u8 arch)
{
/* Mask out the minor version number */
@@ -149,6 +168,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
drvdata->base + TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ /* always clear status bit on restart if using single-shot */
+ if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
+ config->ss_status[i] &= ~BIT(31);
writel_relaxed(config->ss_ctrl[i],
drvdata->base + TRCSSCCRn(i));
writel_relaxed(config->ss_status[i],
@@ -448,6 +470,9 @@ static void etm4_disable_hw(void *info)
{
u32 control;
struct etmv4_drvdata *drvdata = info;
+ struct etmv4_config *config = &drvdata->config;
+ struct device *etm_dev = &drvdata->csdev->dev;
+ int i;
CS_UNLOCK(drvdata->base);
@@ -470,6 +495,18 @@ static void etm4_disable_hw(void *info)
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ /* wait for TRCSTATR.PMSTABLE to go to '1' */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1))
+ dev_err(etm_dev,
+ "timeout while waiting for PM stable Trace Status\n");
+
+ /* read the status of the single shot comparators */
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ config->ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
+
coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
@@ -576,6 +613,7 @@ static void etm4_init_arch_data(void *info)
u32 etmidr4;
u32 etmidr5;
struct etmv4_drvdata *drvdata = info;
+ int i;
/* Make sure all registers are accessible */
etm4_os_unlock(drvdata);
@@ -629,6 +667,7 @@ static void etm4_init_arch_data(void *info)
* TRCARCHMAJ, bits[11:8] architecture major versin number
*/
drvdata->arch = BMVAL(etmidr1, 4, 11);
+ drvdata->config.arch = drvdata->arch;
/* maximum size of resources */
etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
@@ -698,9 +737,14 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
- * comparator control for tracing
+ * comparator control for tracing. Read any status regs as these
+ * also contain RO capability data.
*/
drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ drvdata->config.ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
drvdata->numcidc = BMVAL(etmidr4, 24, 27);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
@@ -780,6 +824,7 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
static u64 etm4_get_access_type(struct etmv4_config *config)
{
u64 access_type = etm4_get_ns_access_type(config);
+ u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
/*
* EXLEVEL_S, bits[11:8], don't trace anything happening
@@ -787,7 +832,8 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
*/
access_type |= (ETM_EXLEVEL_S_APP |
ETM_EXLEVEL_S_OS |
- ETM_EXLEVEL_S_HYP);
+ s_hyp |
+ ETM_EXLEVEL_S_MON);
return access_type;
}
@@ -865,6 +911,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
* in the started state
*/
config->vinst_ctrl |= BIT(9);
+ config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
@@ -1085,6 +1132,288 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
+#ifdef CONFIG_CPU_PM
+static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+{
+ int i, ret = 0;
+ struct etmv4_save_state *state;
+ struct device *etm_dev = &drvdata->csdev->dev;
+
+ /*
+ * As recommended by 3.4.1 ("The procedure when powering down the PE")
+ * of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Lock the OS lock to disable trace and external debugger access */
+ etm4_os_lock(drvdata);
+
+ /* wait for TRCSTATR.PMSTABLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for PM Stable Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ state = drvdata->save_state;
+
+ state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
+ state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
+ state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
+ state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
+ state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
+ state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
+ state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
+ state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
+ state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
+ state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
+ state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
+ state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
+ state->trcqctlr = readl(drvdata->base + TRCQCTLR);
+
+ state->trcvictlr = readl(drvdata->base + TRCVICTLR);
+ state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
+ state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
+ state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
+ state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
+ state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
+ state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+
+ state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
+ state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
+ state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
+ state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
+ state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
+ state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
+ state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ state->trcacvr[i] = readl(drvdata->base + TRCACVRn(i));
+ state->trcacatr[i] = readl(drvdata->base + TRCACATRn(i));
+ }
+
+ /*
+ * Data trace stream is architecturally prohibited for A profile cores
+ * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per
+ * section 1.3.4 ("Possible functional configurations of an ETMv4 trace
+ * unit") of ARM IHI 0064D.
+ */
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ state->trccidcvr[i] = readl(drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ state->trcvmidcvr[i] = readl(drvdata->base + TRCVMIDCVRn(i));
+
+ state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
+ state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
+
+ state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+
+ state->trcpdcr = readl(drvdata->base + TRCPDCR);
+
+ /* wait for TRCSTATR.IDLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for Idle Trace Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ drvdata->state_needs_restore = true;
+
+ /*
+ * Power can be removed from the trace unit now. We do this to
+ * potentially save power on systems that respect the TRCPDCR_PU
+ * despite requesting software to save/restore state.
+ */
+ writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
+ drvdata->base + TRCPDCR);
+
+out:
+ CS_LOCK(drvdata->base);
+ return ret;
+}
+
+static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+{
+ int i;
+ struct etmv4_save_state *state = drvdata->save_state;
+
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
+ writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
+ writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
+ writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
+ writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
+ writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
+ writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
+ writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
+ writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
+ writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
+ writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
+
+ writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
+ writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
+ writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
+ writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
+ writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
+ writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
+ writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ writel_relaxed(state->trcseqevr[i],
+ drvdata->base + TRCSEQEVRn(i));
+
+ writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
+ writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
+ writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ writel_relaxed(state->trccntrldvr[i],
+ drvdata->base + TRCCNTRLDVRn(i));
+ writel_relaxed(state->trccntctlr[i],
+ drvdata->base + TRCCNTCTLRn(i));
+ writel_relaxed(state->trccntvr[i],
+ drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ writel_relaxed(state->trcrsctlr[i],
+ drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ writel_relaxed(state->trcssccr[i],
+ drvdata->base + TRCSSCCRn(i));
+ writel_relaxed(state->trcsscsr[i],
+ drvdata->base + TRCSSCSRn(i));
+ writel_relaxed(state->trcsspcicr[i],
+ drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ writel_relaxed(state->trcacvr[i],
+ drvdata->base + TRCACVRn(i));
+ writel_relaxed(state->trcacatr[i],
+ drvdata->base + TRCACATRn(i));
+ }
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ writel_relaxed(state->trccidcvr[i],
+ drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ writel_relaxed(state->trcvmidcvr[i],
+ drvdata->base + TRCVMIDCVRn(i));
+
+ writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
+ writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+
+ drvdata->state_needs_restore = false;
+
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ /* Unlock the OS lock to re-enable trace and external debug access */
+ etm4_os_unlock(drvdata);
+ CS_LOCK(drvdata->base);
+}
+
+static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct etmv4_drvdata *drvdata;
+ unsigned int cpu = smp_processor_id();
+
+ if (!etmdrvdata[cpu])
+ return NOTIFY_OK;
+
+ drvdata = etmdrvdata[cpu];
+
+ if (!drvdata->save_state)
+ return NOTIFY_OK;
+
+ if (WARN_ON_ONCE(drvdata->cpu != cpu))
+ return NOTIFY_BAD;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* save the state if self-hosted coresight is in use */
+ if (local_read(&drvdata->mode))
+ if (etm4_cpu_save(drvdata))
+ return NOTIFY_BAD;
+ break;
+ case CPU_PM_EXIT:
+ /* fallthrough */
+ case CPU_PM_ENTER_FAILED:
+ if (drvdata->state_needs_restore)
+ etm4_cpu_restore(drvdata);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block etm4_cpu_pm_nb = {
+ .notifier_call = etm4_cpu_pm_notify,
+};
+
+static int etm4_cpu_pm_register(void)
+{
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+}
+
+static void etm4_cpu_pm_unregister(void)
+{
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+}
+#else
+static int etm4_cpu_pm_register(void) { return 0; }
+static void etm4_cpu_pm_unregister(void) { }
+#endif
+
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -1101,6 +1430,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
dev_set_drvdata(dev, drvdata);
+ if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
+ pm_save_enable = coresight_loses_context_with_cpu(dev) ?
+ PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
+
+ if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
+ drvdata->save_state = devm_kmalloc(dev,
+ sizeof(struct etmv4_save_state), GFP_KERNEL);
+ if (!drvdata->save_state)
+ return -ENOMEM;
+ }
+
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
@@ -1135,6 +1475,10 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (ret < 0)
goto err_arch_supported;
hp_online = ret;
+
+ ret = etm4_cpu_pm_register();
+ if (ret)
+ goto err_arch_supported;
}
cpus_read_unlock();
@@ -1185,6 +1529,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
if (--etm4_count == 0) {
+ etm4_cpu_pm_unregister();
+
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online)
cpuhp_remove_state_nocalls(hp_online);
@@ -1211,6 +1557,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4523f10ddd0f..4a695bf90582 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -175,22 +175,28 @@
ETM_MODE_EXCL_USER)
#define TRCSTATR_IDLE_BIT 0
+#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
/* PowerDown Control Register bits */
#define TRCPDCR_PU BIT(3)
-/* secure state access levels */
+/* secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_S_APP BIT(8)
#define ETM_EXLEVEL_S_OS BIT(9)
-#define ETM_EXLEVEL_S_NA BIT(10)
-#define ETM_EXLEVEL_S_HYP BIT(11)
-/* non-secure state access levels */
+#define ETM_EXLEVEL_S_HYP BIT(10)
+#define ETM_EXLEVEL_S_MON BIT(11)
+/* non-secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_NS_APP BIT(12)
#define ETM_EXLEVEL_NS_OS BIT(13)
#define ETM_EXLEVEL_NS_HYP BIT(14)
#define ETM_EXLEVEL_NS_NA BIT(15)
+/* secure / non secure masks - TRCVICTLR, IDR3 */
+#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
+/* NS MON (EL3) mode never implemented */
+#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+
/**
* struct etmv4_config - configuration information related to an ETMv4
* @mode: Controls various modes supported by this ETM.
@@ -221,6 +227,7 @@
* @cntr_val: Sets or returns the value for a counter.
* @res_idx: Resource index selector.
* @res_ctrl: Controls the selection of the resources in the trace unit.
+ * @ss_idx: Single-shot index selector.
* @ss_ctrl: Controls the corresponding single-shot comparator resource.
* @ss_status: The status of the corresponding single-shot comparator.
* @ss_pe_cmp: Selects the PE comparator inputs for Single-shot control.
@@ -237,6 +244,7 @@
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
* @ext_inp: External input selection.
+ * @arch: ETM architecture version (for arch dependent config).
*/
struct etmv4_config {
u32 mode;
@@ -263,6 +271,7 @@ struct etmv4_config {
u32 cntr_val[ETMv4_MAX_CNTR];
u8 res_idx;
u32 res_ctrl[ETM_MAX_RES_SEL];
+ u8 ss_idx;
u32 ss_ctrl[ETM_MAX_SS_CMP];
u32 ss_status[ETM_MAX_SS_CMP];
u32 ss_pe_cmp[ETM_MAX_SS_CMP];
@@ -279,6 +288,66 @@ struct etmv4_config {
u32 vmid_mask0;
u32 vmid_mask1;
u32 ext_inp;
+ u8 arch;
+};
+
+/**
+ * struct etm4_save_state - state to be preserved when ETM is without power
+ */
+struct etmv4_save_state {
+ u32 trcprgctlr;
+ u32 trcprocselr;
+ u32 trcconfigr;
+ u32 trcauxctlr;
+ u32 trceventctl0r;
+ u32 trceventctl1r;
+ u32 trcstallctlr;
+ u32 trctsctlr;
+ u32 trcsyncpr;
+ u32 trcccctlr;
+ u32 trcbbctlr;
+ u32 trctraceidr;
+ u32 trcqctlr;
+
+ u32 trcvictlr;
+ u32 trcviiectlr;
+ u32 trcvissctlr;
+ u32 trcvipcssctlr;
+ u32 trcvdctlr;
+ u32 trcvdsacctlr;
+ u32 trcvdarcctlr;
+
+ u32 trcseqevr[ETM_MAX_SEQ_STATES];
+ u32 trcseqrstevr;
+ u32 trcseqstr;
+ u32 trcextinselr;
+ u32 trccntrldvr[ETMv4_MAX_CNTR];
+ u32 trccntctlr[ETMv4_MAX_CNTR];
+ u32 trccntvr[ETMv4_MAX_CNTR];
+
+ u32 trcrsctlr[ETM_MAX_RES_SEL * 2];
+
+ u32 trcssccr[ETM_MAX_SS_CMP];
+ u32 trcsscsr[ETM_MAX_SS_CMP];
+ u32 trcsspcicr[ETM_MAX_SS_CMP];
+
+ u64 trcacvr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trcacatr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trccidcvr[ETMv4_MAX_CTXID_CMP];
+ u32 trcvmidcvr[ETM_MAX_VMID_CMP];
+ u32 trccidcctlr0;
+ u32 trccidcctlr1;
+ u32 trcvmidcctlr0;
+ u32 trcvmidcctlr1;
+
+ u32 trcclaimset;
+
+ u32 cntr_val[ETMv4_MAX_CNTR];
+ u32 seq_state;
+ u32 vinst_ctrl;
+ u32 ss_status[ETM_MAX_SS_CMP];
+
+ u32 trcpdcr;
};
/**
@@ -336,6 +405,8 @@ struct etmv4_config {
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
* @config: structure holding configuration parameters.
+ * @save_state: State to be preserved across power loss
+ * @state_needs_restore: True when there is context to restore after PM exit
*/
struct etmv4_drvdata {
void __iomem *base;
@@ -381,6 +452,8 @@ struct etmv4_drvdata {
bool atbtrig;
bool lpoverride;
struct etmv4_config config;
+ struct etmv4_save_state *save_state;
+ bool state_needs_restore;
};
/* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 05f7896c3a01..900690a9f7f0 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -38,12 +38,14 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
* @atclk: optional clock for the core parts of the funnel.
* @csdev: component vitals needed by the framework.
* @priority: port selection order.
+ * @spinlock: serialize enable/disable operations.
*/
struct funnel_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
unsigned long priority;
+ spinlock_t spinlock;
};
static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
@@ -76,11 +78,21 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_funnel_enable_hw(drvdata, inport);
-
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_funnel_enable_hw(drvdata, inport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[inport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport);
return rc;
}
@@ -107,11 +119,19 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ dynamic_funnel_disable_hw(drvdata, inport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_funnel_disable_hw(drvdata, inport);
-
- dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
+ if (last_disable)
+ dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
}
static const struct coresight_ops_link funnel_link_ops = {
@@ -233,6 +253,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b29ba640eb25..e7dc1c31d20d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -31,11 +31,13 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* whether this one is programmable or not.
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
+ * @spinlock: serialize enable/disable operations.
*/
struct replicator_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
+ spinlock_t spinlock;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
@@ -97,10 +99,22 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_replicator_enable(drvdata, inport, outport);
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_replicator_enable(drvdata, inport,
+ outport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[outport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
return rc;
}
@@ -137,10 +151,19 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ dynamic_replicator_disable(drvdata, inport, outport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_replicator_disable(drvdata, inport, outport);
- dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
@@ -225,6 +248,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 807416b75ecc..d0cc3985b72a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -334,9 +334,10 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
static int tmc_enable_etf_link(struct coresight_device *csdev,
int inport, int outport)
{
- int ret;
+ int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -344,12 +345,18 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- ret = tmc_etf_enable_hw(drvdata);
+ if (atomic_read(&csdev->refcnt[0]) == 0) {
+ ret = tmc_etf_enable_hw(drvdata);
+ if (!ret) {
+ drvdata->mode = CS_MODE_SYSFS;
+ first_enable = true;
+ }
+ }
if (!ret)
- drvdata->mode = CS_MODE_SYSFS;
+ atomic_inc(&csdev->refcnt[0]);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (!ret)
+ if (first_enable)
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
return ret;
}
@@ -359,6 +366,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -366,11 +374,15 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
- tmc_etf_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
+ tmc_etf_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ last_disable = true;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 6453c67a4d01..ef20f74c85fa 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -253,9 +253,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int ret;
+ int ret = 0;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return -EINVAL;
@@ -264,29 +264,17 @@ static int coresight_enable_link(struct coresight_device *csdev,
outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
- refport = inport;
- else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
- refport = outport;
- else
- refport = 0;
-
- if (refport < 0)
- return refport;
-
- if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
- if (link_ops(csdev)->enable) {
- ret = link_ops(csdev)->enable(csdev, inport, outport);
- if (ret) {
- atomic_dec(&csdev->refcnt[refport]);
- return ret;
- }
- }
- }
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && inport < 0)
+ return inport;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0)
+ return outport;
- csdev->enable = true;
+ if (link_ops(csdev)->enable)
+ ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (!ret)
+ csdev->enable = true;
- return 0;
+ return ret;
}
static void coresight_disable_link(struct coresight_device *csdev,
@@ -295,7 +283,7 @@ static void coresight_disable_link(struct coresight_device *csdev,
{
int i, nr_conns;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return;
@@ -305,20 +293,15 @@ static void coresight_disable_link(struct coresight_device *csdev,
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
- refport = inport;
nr_conns = csdev->pdata->nr_inport;
} else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
- refport = outport;
nr_conns = csdev->pdata->nr_outport;
} else {
- refport = 0;
nr_conns = 1;
}
- if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
- if (link_ops(csdev)->disable)
- link_ops(csdev)->disable(csdev, inport, outport);
- }
+ if (link_ops(csdev)->disable)
+ link_ops(csdev)->disable(csdev, inport, outport);
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->refcnt[i]) != 0)
@@ -1308,6 +1291,12 @@ static inline int coresight_search_device_idx(struct coresight_dev_list *dict,
return -ENOENT;
}
+bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return fwnode_property_present(dev_fwnode(dev),
+ "arm,coresight-loses-context-with-cpu");
+}
+
/*
* coresight_alloc_device_name - Get an index for a given device in the
* device index list specific to a driver. An index is allocated for a
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index d5c1821b31c6..0dfd97bbde9e 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -649,10 +649,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = intel_th_device_add_resources(thdev, res, subdev->nres);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_put_device;
- }
if (subdev->type == INTEL_TH_OUTPUT) {
if (subdev->mknode)
@@ -667,10 +665,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = device_add(&thdev->dev);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_free_res;
- }
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 03ca5b1bef9f..ebf3e30e989a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -210,6 +210,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Ice Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8a29),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Tiger Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9a33),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Tiger Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 4b9e44b227d8..4f932a419752 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -345,7 +345,11 @@ void stp_policy_unbind(struct stp_policy *policy)
stm->policy = NULL;
policy->stm = NULL;
+ /*
+ * Drop the reference on the protocol driver and lose the link.
+ */
stm_put_protocol(stm->pdrv);
+ stm->pdrv = NULL;
stm_put_device(stm);
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 9cb2aa1e20ef..62a1c92ab803 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -39,6 +39,7 @@ struct i2c_acpi_lookup {
int index;
u32 speed;
u32 min_speed;
+ u32 force_speed;
};
/**
@@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches,
return acpi_match_device(matches, &client->dev);
}
+static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
+ /*
+ * These Silead touchscreen controllers only work at 400KHz, for
+ * some reason they do not work at 100KHz. On some devices the ACPI
+ * tables list another device at their bus as only being capable
+ * of 100KHz, testing has shown that these other devices work fine
+ * at 400KHz (as can be expected of any recent i2c hw) so we force
+ * the speed of the bus to 400 KHz if a Silead device is present.
+ */
+ { "MSSL1680", 0 },
+ {}
+};
+
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
if (lookup->speed <= lookup->min_speed)
lookup->min_speed = lookup->speed;
+ if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
+ lookup->force_speed = 400000;
+
return AE_OK;
}
@@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
return 0;
}
- return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
+ if (lookup.force_speed) {
+ if (lookup.force_speed != lookup.min_speed)
+ dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n",
+ lookup.min_speed, lookup.force_speed);
+ return lookup.force_speed;
+ } else if (lookup.min_speed != UINT_MAX) {
+ return lookup.min_speed;
+ } else {
+ return 0;
+ }
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 6f632d543fcc..e4d296b40baa 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -50,6 +50,7 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node,
info->addr = addr;
info->of_node = node;
+ info->fwnode = of_fwnode_handle(node);
if (of_property_read_bool(node, "host-notify"))
info->flags |= I2C_CLIENT_HOST_NOTIFY;
@@ -245,14 +246,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
}
client = of_i2c_register_device(adap, rd->dn);
- put_device(&adap->dev);
-
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n",
rd->dn);
+ put_device(&adap->dev);
of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
+ put_device(&adap->dev);
break;
case OF_RECONFIG_CHANGE_REMOVE:
/* already depopulated? */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 5c051dba32a5..043691656245 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1763,7 +1763,7 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
static struct i3c_dev_desc *
i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
{
- struct i3c_master_controller *master = refdev->common.master;
+ struct i3c_master_controller *master = i3c_dev_get_master(refdev);
struct i3c_dev_desc *i3cdev;
i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
@@ -2493,7 +2493,7 @@ int i3c_master_register(struct i3c_master_controller *master,
/*
* We're done initializing the bus and the controller, we can now
- * register I3C devices dicovered during the initial DAA.
+ * register I3C devices discovered during the initial DAA.
*/
master->init_done = true;
i3c_bus_normaluse_lock(&master->bus);
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index a5a07ccb81a7..dbeb2605e5f6 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -15,6 +15,7 @@
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
@@ -25,13 +26,7 @@
#define DRV_NAME "falconide"
/*
- * Base of the IDE interface
- */
-
-#define ATA_HD_BASE 0xfff00000
-
- /*
- * Offsets from the above base
+ * Offsets from base address
*/
#define ATA_HD_CONTROL 0x39
@@ -114,18 +109,18 @@ static const struct ide_port_info falconide_port_info = {
.chipset = ide_generic,
};
-static void __init falconide_setup_ports(struct ide_hw *hw)
+static void __init falconide_setup_ports(struct ide_hw *hw, unsigned long base)
{
int i;
memset(hw, 0, sizeof(*hw));
- hw->io_ports.data_addr = ATA_HD_BASE;
+ hw->io_ports.data_addr = base;
for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4;
+ hw->io_ports_array[i] = base + 1 + i * 4;
- hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL;
+ hw->io_ports.ctl_addr = base + ATA_HD_CONTROL;
hw->irq = IRQ_MFP_IDE;
}
@@ -134,23 +129,29 @@ static void __init falconide_setup_ports(struct ide_hw *hw)
* Probe for a Falcon IDE interface
*/
-static int __init falconide_init(void)
+static int __init falconide_init(struct platform_device *pdev)
{
+ struct resource *res;
struct ide_host *host;
struct ide_hw hw, *hws[] = { &hw };
+ unsigned long base;
int rc;
- if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
- return -ENODEV;
+ dev_info(&pdev->dev, "Atari Falcon IDE controller\n");
- printk(KERN_INFO "ide: Falcon IDE controller\n");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
- if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) {
- printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
- falconide_setup_ports(&hw);
+ base = (unsigned long)res->start;
+
+ falconide_setup_ports(&hw, base);
host = ide_host_alloc(&falconide_port_info, hws, 1);
if (host == NULL) {
@@ -169,10 +170,29 @@ static int __init falconide_init(void)
err_free:
ide_host_free(host);
err:
- release_mem_region(ATA_HD_BASE, 0x40);
+ release_mem_region(res->start, resource_size(res));
return rc;
}
-module_init(falconide_init);
+static int falconide_remove(struct platform_device *pdev)
+{
+ struct ide_host *host = dev_get_drvdata(&pdev->dev);
+
+ ide_host_remove(host);
+
+ return 0;
+}
+
+static struct platform_driver ide_falcon_driver = {
+ .remove = falconide_remove,
+ .driver = {
+ .name = "atari-falcon-ide",
+ },
+};
+
+module_platform_driver_probe(ide_falcon_driver, falconide_init);
+MODULE_AUTHOR("Geert Uytterhoeven");
+MODULE_DESCRIPTION("low-level driver for Atari Falcon IDE");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atari-falcon-ide");
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index 40a3f55b08dd..962eb92501b5 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -46,7 +46,7 @@ static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
while ((shwt * 4 + wt + (wt ? 2 : 3)) * cycle < t->cycle)
shwt++;
if (shwt > 7) {
- pr_warning("tx4938ide: SHWT violation (%d)\n", shwt);
+ pr_warn("tx4938ide: SHWT violation (%d)\n", shwt);
shwt = 7;
}
pr_debug("tx4938ide: ebus %d, bus cycle %dns, WT %d, SHWT %d\n",
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 88d132edc4e3..d5e871fe840d 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -363,9 +363,9 @@ static int tx4939ide_dma_test_irq(ide_drive_t *drive)
case TX4939IDE_INT_HOST | TX4939IDE_INT_XFEREND:
dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
if (!(dma_stat & ATA_DMA_INTR))
- pr_warning("%s: weird interrupt status. "
- "DMA_Stat %#02x int_ctl %#04x\n",
- hwif->name, dma_stat, ctl);
+ pr_warn("%s: weird interrupt status. "
+ "DMA_Stat %#02x int_ctl %#04x\n",
+ hwif->name, dma_stat, ctl);
found = 1;
break;
}
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 2e37f8a6d8cf..7b837641f166 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f0af3a42f53c..5d8540b7b427 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -6,6 +6,16 @@
menu "Analog to digital converters"
+config AB8500_GPADC
+ bool "ST-Ericsson AB8500 GPADC driver"
+ depends on AB8500_CORE && REGULATOR_AB8500
+ default y
+ help
+ AB8500 Analog Baseband, mixed signal integrated circuit GPADC
+ (General Purpose Analog to Digital Converter) driver used to monitor
+ internal voltages, convert accessory and battery, AC (charger, mains)
+ and USB voltages integral to the U8500 platform.
+
config AD_SIGMA_DELTA
tristate
select IIO_BUFFER
@@ -45,6 +55,16 @@ config AD7291
To compile this driver as a module, choose M here: the
module will be called ad7291.
+config AD7292
+ tristate "Analog Devices AD7292 ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7292
+ 8 Channel ADC with temperature sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7292.
+
config AD7298
tristate "Analog Devices AD7298 ADC driver"
depends on SPI
@@ -432,6 +452,17 @@ config INGENIC_ADC
This driver can also be built as a module. If so, the module will be
called ingenic_adc.
+config INTEL_MRFLD_ADC
+ tristate "Intel Merrifield Basin Cove ADC driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ help
+ Say yes here to have support for Basin Cove power management IC (PMIC) ADC
+ device. Depending on platform configuration, this general purpose ADC can
+ be used for sampling sensors such as thermal resistors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called intel_mrfld_adc.
+
config IMX7D_ADC
tristate "Freescale IMX7D ADC driver"
depends on ARCH_MXC || COMPILE_TEST
@@ -508,8 +539,8 @@ config MAX1027
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Maxim SPI ADC models
- max1027, max1029 and max1031.
+ Say yes here to build support for Maxim SPI {10,12}-bit ADC models:
+ max1027, max1029, max1031, max1227, max1229 and max1231.
To compile this driver as a module, choose M here: the module will be
called max1027.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ef9cc485fb67..a1f1fbec0f87 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -4,10 +4,12 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7291) += ad7291.o
+obj-$(CONFIG_AD7292) += ad7292.o
obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7476) += ad7476.o
@@ -42,6 +44,7 @@ obj-$(CONFIG_HX711) += hx711.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
+obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
new file mode 100644
index 000000000000..fd5b18d7f0c2
--- /dev/null
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * Author: Daniel Willerud <daniel.willerud@stericsson.com>
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: M'boumba Cedric Madianga
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * AB8500 General Purpose ADC driver. The AB8500 uses reference voltages:
+ * VinVADC, and VADC relative to GND to do its job. It monitors main and backup
+ * battery voltages, AC (mains) voltage, USB cable voltage, as well as voltages
+ * representing the temperature of the chip die and battery, accessory
+ * detection by resistance measurements using relative voltages and GSM burst
+ * information.
+ *
+ * Some of the voltages are measured on external pins on the IC, such as
+ * battery temperature or "ADC aux" 1 and 2. Other voltages are internal rails
+ * from other parts of the ASIC such as main charger voltage, main and battery
+ * backup voltage or USB VBUS voltage. For this reason drivers for other
+ * parts of the system are required to obtain handles to the ADC to do work
+ * for them and the IIO driver provides arbitration among these consumers.
+ */
+#include <linux/init.h>
+#include <linux/bits.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+
+/* GPADC register offsets and bit definitions */
+
+#define AB8500_GPADC_CTRL1_REG 0x00
+/* GPADC control register 1 bits */
+#define AB8500_GPADC_CTRL1_DISABLE 0x00
+#define AB8500_GPADC_CTRL1_ENABLE BIT(0)
+#define AB8500_GPADC_CTRL1_TRIG_ENA BIT(1)
+#define AB8500_GPADC_CTRL1_START_SW_CONV BIT(2)
+#define AB8500_GPADC_CTRL1_BTEMP_PULL_UP BIT(3)
+/* 0 = use rising edge, 1 = use falling edge */
+#define AB8500_GPADC_CTRL1_TRIG_EDGE BIT(4)
+/* 0 = use VTVOUT, 1 = use VRTC as pull-up supply for battery temp NTC */
+#define AB8500_GPADC_CTRL1_PUPSUPSEL BIT(5)
+#define AB8500_GPADC_CTRL1_BUF_ENA BIT(6)
+#define AB8500_GPADC_CTRL1_ICHAR_ENA BIT(7)
+
+#define AB8500_GPADC_CTRL2_REG 0x01
+#define AB8500_GPADC_CTRL3_REG 0x02
+/*
+ * GPADC control register 2 and 3 bits
+ * the bit layout is the same for SW and HW conversion set-up
+ */
+#define AB8500_GPADC_CTRL2_AVG_1 0x00
+#define AB8500_GPADC_CTRL2_AVG_4 BIT(5)
+#define AB8500_GPADC_CTRL2_AVG_8 BIT(6)
+#define AB8500_GPADC_CTRL2_AVG_16 (BIT(5) | BIT(6))
+
+enum ab8500_gpadc_channel {
+ AB8500_GPADC_CHAN_UNUSED = 0x00,
+ AB8500_GPADC_CHAN_BAT_CTRL = 0x01,
+ AB8500_GPADC_CHAN_BAT_TEMP = 0x02,
+ /* This is not used on AB8505 */
+ AB8500_GPADC_CHAN_MAIN_CHARGER = 0x03,
+ AB8500_GPADC_CHAN_ACC_DET_1 = 0x04,
+ AB8500_GPADC_CHAN_ACC_DET_2 = 0x05,
+ AB8500_GPADC_CHAN_ADC_AUX_1 = 0x06,
+ AB8500_GPADC_CHAN_ADC_AUX_2 = 0x07,
+ AB8500_GPADC_CHAN_VBAT_A = 0x08,
+ AB8500_GPADC_CHAN_VBUS = 0x09,
+ AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT = 0x0a,
+ AB8500_GPADC_CHAN_USB_CHARGER_CURRENT = 0x0b,
+ AB8500_GPADC_CHAN_BACKUP_BAT = 0x0c,
+ /* Only on AB8505 */
+ AB8505_GPADC_CHAN_DIE_TEMP = 0x0d,
+ AB8500_GPADC_CHAN_ID = 0x0e,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_1 = 0x0f,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_2 = 0x10,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_3 = 0x11,
+ /* FIXME: Applicable to all ASIC variants? */
+ AB8500_GPADC_CHAN_XTAL_TEMP = 0x12,
+ AB8500_GPADC_CHAN_VBAT_TRUE_MEAS = 0x13,
+ /* FIXME: Doesn't seem to work with pure AB8500 */
+ AB8500_GPADC_CHAN_BAT_CTRL_AND_IBAT = 0x1c,
+ AB8500_GPADC_CHAN_VBAT_MEAS_AND_IBAT = 0x1d,
+ AB8500_GPADC_CHAN_VBAT_TRUE_MEAS_AND_IBAT = 0x1e,
+ AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT = 0x1f,
+ /*
+ * Virtual channel used only for ibat conversion to ampere.
+ * Battery current conversion (ibat) cannot be requested as a
+ * single conversion but it is always requested in combination
+ * with other input requests.
+ */
+ AB8500_GPADC_CHAN_IBAT_VIRTUAL = 0xFF,
+};
+
+#define AB8500_GPADC_AUTO_TIMER_REG 0x03
+
+#define AB8500_GPADC_STAT_REG 0x04
+#define AB8500_GPADC_STAT_BUSY BIT(0)
+
+#define AB8500_GPADC_MANDATAL_REG 0x05
+#define AB8500_GPADC_MANDATAH_REG 0x06
+#define AB8500_GPADC_AUTODATAL_REG 0x07
+#define AB8500_GPADC_AUTODATAH_REG 0x08
+#define AB8500_GPADC_MUX_CTRL_REG 0x09
+#define AB8540_GPADC_MANDATA2L_REG 0x09
+#define AB8540_GPADC_MANDATA2H_REG 0x0A
+#define AB8540_GPADC_APEAAX_REG 0x10
+#define AB8540_GPADC_APEAAT_REG 0x11
+#define AB8540_GPADC_APEAAM_REG 0x12
+#define AB8540_GPADC_APEAAH_REG 0x13
+#define AB8540_GPADC_APEAAL_REG 0x14
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_GPADC_CAL_1 0x0F
+#define AB8500_GPADC_CAL_2 0x10
+#define AB8500_GPADC_CAL_3 0x11
+#define AB8500_GPADC_CAL_4 0x12
+#define AB8500_GPADC_CAL_5 0x13
+#define AB8500_GPADC_CAL_6 0x14
+#define AB8500_GPADC_CAL_7 0x15
+/* New calibration for 8540 */
+#define AB8540_GPADC_OTP4_REG_7 0x38
+#define AB8540_GPADC_OTP4_REG_6 0x39
+#define AB8540_GPADC_OTP4_REG_5 0x3A
+
+#define AB8540_GPADC_DIS_ZERO 0x00
+#define AB8540_GPADC_EN_VBIAS_XTAL_TEMP 0x02
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define AB8500_ADC_RESOLUTION 1024
+#define AB8500_ADC_CH_BTEMP_MIN 0
+#define AB8500_ADC_CH_BTEMP_MAX 1350
+#define AB8500_ADC_CH_DIETEMP_MIN 0
+#define AB8500_ADC_CH_DIETEMP_MAX 1350
+#define AB8500_ADC_CH_CHG_V_MIN 0
+#define AB8500_ADC_CH_CHG_V_MAX 20030
+#define AB8500_ADC_CH_ACCDET2_MIN 0
+#define AB8500_ADC_CH_ACCDET2_MAX 2500
+#define AB8500_ADC_CH_VBAT_MIN 2300
+#define AB8500_ADC_CH_VBAT_MAX 4800
+#define AB8500_ADC_CH_CHG_I_MIN 0
+#define AB8500_ADC_CH_CHG_I_MAX 1500
+#define AB8500_ADC_CH_BKBAT_MIN 0
+#define AB8500_ADC_CH_BKBAT_MAX 3200
+
+/* GPADC constants from AB8540 spec */
+#define AB8500_ADC_CH_IBAT_MIN (-6000) /* mA range measured by ADC for ibat */
+#define AB8500_ADC_CH_IBAT_MAX 6000
+#define AB8500_ADC_CH_IBAT_MIN_V (-60) /* mV range measured by ADC for ibat */
+#define AB8500_ADC_CH_IBAT_MAX_V 60
+#define AB8500_GPADC_IBAT_VDROP_L (-56) /* mV */
+#define AB8500_GPADC_IBAT_VDROP_H 56
+
+/* This is used to not lose precision when dividing to get gain and offset */
+#define AB8500_GPADC_CALIB_SCALE 1000
+/*
+ * Number of bits shift used to not lose precision
+ * when dividing to get ibat gain.
+ */
+#define AB8500_GPADC_CALIB_SHIFT_IBAT 20
+
+/* Time in ms before disabling regulator */
+#define AB8500_GPADC_AUTOSUSPEND_DELAY 1
+
+#define AB8500_GPADC_CONVERSION_TIME 500 /* ms */
+
+enum ab8500_cal_channels {
+ AB8500_CAL_VMAIN = 0,
+ AB8500_CAL_BTEMP,
+ AB8500_CAL_VBAT,
+ AB8500_CAL_IBAT,
+ AB8500_CAL_NR,
+};
+
+/**
+ * struct ab8500_adc_cal_data - Table for storing gain and offset for the
+ * calibrated ADC channels
+ * @gain: Gain of the ADC channel
+ * @offset: Offset of the ADC channel
+ * @otp_calib_hi: Calibration from OTP
+ * @otp_calib_lo: Calibration from OTP
+ */
+struct ab8500_adc_cal_data {
+ s64 gain;
+ s64 offset;
+ u16 otp_calib_hi;
+ u16 otp_calib_lo;
+};
+
+/**
+ * struct ab8500_gpadc_chan_info - per-channel GPADC info
+ * @name: name of the channel
+ * @id: the internal AB8500 ID number for the channel
+ * @hardware_control: indicate that we want to use hardware ADC control
+ * on this channel, the default is software ADC control. Hardware control
+ * is normally only used to test the battery voltage during GSM bursts
+ * and needs a hardware trigger on the GPADCTrig pin of the ASIC.
+ * @falling_edge: indicate that we want to trigger on falling edge
+ * rather than rising edge, rising edge is the default
+ * @avg_sample: how many samples to average: must be 1, 4, 8 or 16.
+ * @trig_timer: how long to wait for the trigger, in 32kHz periods:
+ * 0 .. 255 periods
+ */
+struct ab8500_gpadc_chan_info {
+ const char *name;
+ u8 id;
+ bool hardware_control;
+ bool falling_edge;
+ u8 avg_sample;
+ u8 trig_timer;
+};
+
+/**
+ * struct ab8500_gpadc - AB8500 GPADC device information
+ * @dev: pointer to the containing device
+ * @ab8500: pointer to the parent AB8500 device
+ * @chans: internal per-channel information container
+ * @nchans: number of channels
+ * @complete: pointer to the completion that indicates
+ * the completion of an gpadc conversion cycle
+ * @vddadc: pointer to the regulator supplying VDDADC
+ * @irq_sw: interrupt number that is used by gpadc for software ADC conversion
+ * @irq_hw: interrupt number that is used by gpadc for hardware ADC conversion
+ * @cal_data: array of ADC calibration data structs
+ */
+struct ab8500_gpadc {
+ struct device *dev;
+ struct ab8500 *ab8500;
+ struct ab8500_gpadc_chan_info *chans;
+ unsigned int nchans;
+ struct completion complete;
+ struct regulator *vddadc;
+ int irq_sw;
+ int irq_hw;
+ struct ab8500_adc_cal_data cal_data[AB8500_CAL_NR];
+};
+
+static struct ab8500_gpadc_chan_info *
+ab8500_gpadc_get_channel(struct ab8500_gpadc *gpadc, u8 chan)
+{
+ struct ab8500_gpadc_chan_info *ch;
+ int i;
+
+ for (i = 0; i < gpadc->nchans; i++) {
+ ch = &gpadc->chans[i];
+ if (ch->id == chan)
+ break;
+ }
+ if (i == gpadc->nchans)
+ return NULL;
+
+ return ch;
+}
+
+/**
+ * ab8500_gpadc_ad_to_voltage() - Convert a raw ADC value to a voltage
+ * @gpadc: GPADC instance
+ * @ch: the sampled channel this raw value is coming from
+ * @ad_value: the raw value
+ */
+static int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
+ enum ab8500_gpadc_channel ch,
+ int ad_value)
+{
+ int res;
+
+ switch (ch) {
+ case AB8500_GPADC_CHAN_MAIN_CHARGER:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_VMAIN].gain) {
+ res = AB8500_ADC_CH_CHG_V_MIN + (AB8500_ADC_CH_CHG_V_MAX -
+ AB8500_ADC_CH_CHG_V_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_VMAIN].gain +
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8500_GPADC_CHAN_BAT_CTRL:
+ case AB8500_GPADC_CHAN_BAT_TEMP:
+ case AB8500_GPADC_CHAN_ACC_DET_1:
+ case AB8500_GPADC_CHAN_ADC_AUX_1:
+ case AB8500_GPADC_CHAN_ADC_AUX_2:
+ case AB8500_GPADC_CHAN_XTAL_TEMP:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_BTEMP].gain) {
+ res = AB8500_ADC_CH_BTEMP_MIN + (AB8500_ADC_CH_BTEMP_MAX -
+ AB8500_ADC_CH_BTEMP_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_BTEMP].gain +
+ gpadc->cal_data[AB8500_CAL_BTEMP].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8500_GPADC_CHAN_VBAT_A:
+ case AB8500_GPADC_CHAN_VBAT_TRUE_MEAS:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_VBAT].gain) {
+ res = AB8500_ADC_CH_VBAT_MIN + (AB8500_ADC_CH_VBAT_MAX -
+ AB8500_ADC_CH_VBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_VBAT].gain +
+ gpadc->cal_data[AB8500_CAL_VBAT].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8505_GPADC_CHAN_DIE_TEMP:
+ res = AB8500_ADC_CH_DIETEMP_MIN +
+ (AB8500_ADC_CH_DIETEMP_MAX - AB8500_ADC_CH_DIETEMP_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_ACC_DET_2:
+ res = AB8500_ADC_CH_ACCDET2_MIN +
+ (AB8500_ADC_CH_ACCDET2_MAX - AB8500_ADC_CH_ACCDET2_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_VBUS:
+ res = AB8500_ADC_CH_CHG_V_MIN +
+ (AB8500_ADC_CH_CHG_V_MAX - AB8500_ADC_CH_CHG_V_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT:
+ case AB8500_GPADC_CHAN_USB_CHARGER_CURRENT:
+ res = AB8500_ADC_CH_CHG_I_MIN +
+ (AB8500_ADC_CH_CHG_I_MAX - AB8500_ADC_CH_CHG_I_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_BACKUP_BAT:
+ res = AB8500_ADC_CH_BKBAT_MIN +
+ (AB8500_ADC_CH_BKBAT_MAX - AB8500_ADC_CH_BKBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_IBAT_VIRTUAL:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_IBAT].gain) {
+ res = AB8500_ADC_CH_IBAT_MIN + (AB8500_ADC_CH_IBAT_MAX -
+ AB8500_ADC_CH_IBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_IBAT].gain +
+ gpadc->cal_data[AB8500_CAL_IBAT].offset)
+ >> AB8500_GPADC_CALIB_SHIFT_IBAT;
+ break;
+
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel ID: %d, not possible to convert\n",
+ ch);
+ res = -EINVAL;
+ break;
+
+ }
+
+ return res;
+}
+
+static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc,
+ const struct ab8500_gpadc_chan_info *ch,
+ int *ibat)
+{
+ int ret;
+ int looplimit = 0;
+ unsigned long completion_timeout;
+ u8 val;
+ u8 low_data, high_data, low_data2, high_data2;
+ u8 ctrl1;
+ u8 ctrl23;
+ unsigned int delay_min = 0;
+ unsigned int delay_max = 0;
+ u8 data_low_addr, data_high_addr;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ /* check if conversion is supported */
+ if ((gpadc->irq_sw <= 0) && !ch->hardware_control)
+ return -ENOTSUPP;
+ if ((gpadc->irq_hw <= 0) && ch->hardware_control)
+ return -ENOTSUPP;
+
+ /* Enable vddadc by grabbing PM runtime */
+ pm_runtime_get_sync(gpadc->dev);
+
+ /* Check if ADC is not busy, lock and proceed */
+ do {
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & AB8500_GPADC_STAT_BUSY))
+ break;
+ msleep(20);
+ } while (++looplimit < 10);
+ if (looplimit >= 10 && (val & AB8500_GPADC_STAT_BUSY)) {
+ dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Enable GPADC */
+ ctrl1 = AB8500_GPADC_CTRL1_ENABLE;
+
+ /* Select the channel source and set average samples */
+ switch (ch->avg_sample) {
+ case 1:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_1;
+ break;
+ case 4:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_4;
+ break;
+ case 8:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_8;
+ break;
+ default:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_16;
+ break;
+ }
+
+ if (ch->hardware_control) {
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL3_REG, ctrl23);
+ ctrl1 |= AB8500_GPADC_CTRL1_TRIG_ENA;
+ if (ch->falling_edge)
+ ctrl1 |= AB8500_GPADC_CTRL1_TRIG_EDGE;
+ } else {
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL2_REG, ctrl23);
+ }
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set avg samples failed\n");
+ goto out;
+ }
+
+ /*
+ * Enable ADC, buffering, select rising edge and enable ADC path
+ * charging current sense if it needed, ABB 3.0 needs some special
+ * treatment too.
+ */
+ switch (ch->id) {
+ case AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT:
+ case AB8500_GPADC_CHAN_USB_CHARGER_CURRENT:
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA |
+ AB8500_GPADC_CTRL1_ICHAR_ENA;
+ break;
+ case AB8500_GPADC_CHAN_BAT_TEMP:
+ if (!is_ab8500_2p0_or_earlier(gpadc->ab8500)) {
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA |
+ AB8500_GPADC_CTRL1_BTEMP_PULL_UP;
+ /*
+ * Delay might be needed for ABB8500 cut 3.0, if not,
+ * remove when hardware will be available
+ */
+ delay_min = 1000; /* Delay in micro seconds */
+ delay_max = 10000; /* large range optimises sleepmode */
+ break;
+ }
+ /* Fall through */
+ default:
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA;
+ break;
+ }
+
+ /* Write configuration to control register 1 */
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ctrl1);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set Control register failed\n");
+ goto out;
+ }
+
+ if (delay_min != 0)
+ usleep_range(delay_min, delay_max);
+
+ if (ch->hardware_control) {
+ /* Set trigger delay timer */
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_AUTO_TIMER_REG,
+ ch->trig_timer);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: trig timer failed\n");
+ goto out;
+ }
+ completion_timeout = 2 * HZ;
+ data_low_addr = AB8500_GPADC_AUTODATAL_REG;
+ data_high_addr = AB8500_GPADC_AUTODATAH_REG;
+ } else {
+ /* Start SW conversion */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
+ AB8500_GPADC_CTRL1_START_SW_CONV,
+ AB8500_GPADC_CTRL1_START_SW_CONV);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: start s/w conv failed\n");
+ goto out;
+ }
+ completion_timeout = msecs_to_jiffies(AB8500_GPADC_CONVERSION_TIME);
+ data_low_addr = AB8500_GPADC_MANDATAL_REG;
+ data_high_addr = AB8500_GPADC_MANDATAH_REG;
+ }
+
+ /* Wait for completion of conversion */
+ if (!wait_for_completion_timeout(&gpadc->complete,
+ completion_timeout)) {
+ dev_err(gpadc->dev,
+ "timeout didn't receive GPADC conv interrupt\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, data_low_addr, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, data_high_addr, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read high data failed\n");
+ goto out;
+ }
+
+ /* Check if double conversion is required */
+ if ((ch->id == AB8500_GPADC_CHAN_BAT_CTRL_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_VBAT_MEAS_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_VBAT_TRUE_MEAS_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT)) {
+
+ if (ch->hardware_control) {
+ /* not supported */
+ ret = -ENOTSUPP;
+ dev_err(gpadc->dev,
+ "gpadc_conversion: only SW double conversion supported\n");
+ goto out;
+ } else {
+ /* Read the converted RAW data 2 */
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8540_GPADC_MANDATA2L_REG,
+ &low_data2);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read sw low data 2 failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8540_GPADC_MANDATA2H_REG,
+ &high_data2);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read sw high data 2 failed\n");
+ goto out;
+ }
+ if (ibat != NULL) {
+ *ibat = (high_data2 << 8) | low_data2;
+ } else {
+ dev_warn(gpadc->dev,
+ "gpadc_conversion: ibat not stored\n");
+ }
+
+ }
+ }
+
+ /* Disable GPADC */
+ ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, AB8500_GPADC_CTRL1_DISABLE);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
+ goto out;
+ }
+
+ /* This eventually drops the regulator */
+ pm_runtime_mark_last_busy(gpadc->dev);
+ pm_runtime_put_autosuspend(gpadc->dev);
+
+ return (high_data << 8) | low_data;
+
+out:
+ /*
+ * It has shown to be needed to turn off the GPADC if an error occurs,
+ * otherwise we might have problem when waiting for the busy bit in the
+ * GPADC status register to go low. In V1.1 there wait_for_completion
+ * seems to timeout when waiting for an interrupt.. Not seen in V2.0
+ */
+ (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, AB8500_GPADC_CTRL1_DISABLE);
+ pm_runtime_put(gpadc->dev);
+ dev_err(gpadc->dev,
+ "gpadc_conversion: Failed to AD convert channel %d\n", ch->id);
+
+ return ret;
+}
+
+/**
+ * ab8500_bm_gpadcconvend_handler() - isr for gpadc conversion completion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for gpadc conversion completion.
+ * Notifies the gpadc completion is completed and the converted raw value
+ * can be read from the registers.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_bm_gpadcconvend_handler(int irq, void *data)
+{
+ struct ab8500_gpadc *gpadc = data;
+
+ complete(&gpadc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int otp_cal_regs[] = {
+ AB8500_GPADC_CAL_1,
+ AB8500_GPADC_CAL_2,
+ AB8500_GPADC_CAL_3,
+ AB8500_GPADC_CAL_4,
+ AB8500_GPADC_CAL_5,
+ AB8500_GPADC_CAL_6,
+ AB8500_GPADC_CAL_7,
+};
+
+static int otp4_cal_regs[] = {
+ AB8540_GPADC_OTP4_REG_7,
+ AB8540_GPADC_OTP4_REG_6,
+ AB8540_GPADC_OTP4_REG_5,
+};
+
+static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
+{
+ int i;
+ int ret[ARRAY_SIZE(otp_cal_regs)];
+ u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
+ int ret_otp4[ARRAY_SIZE(otp4_cal_regs)];
+ u8 gpadc_otp4[ARRAY_SIZE(otp4_cal_regs)];
+ int vmain_high, vmain_low;
+ int btemp_high, btemp_low;
+ int vbat_high, vbat_low;
+ int ibat_high, ibat_low;
+ s64 V_gain, V_offset, V2A_gain, V2A_offset;
+
+ /* First we read all OTP registers and store the error code */
+ for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
+ ret[i] = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
+ if (ret[i] < 0) {
+ /* Continue anyway: maybe the other registers are OK */
+ dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
+ __func__, otp_cal_regs[i]);
+ } else {
+ /* Put this in the entropy pool as device-unique */
+ add_device_randomness(&ret[i], sizeof(ret[i]));
+ }
+ }
+
+ /*
+ * The ADC calibration data is stored in OTP registers.
+ * The layout of the calibration data is outlined below and a more
+ * detailed description can be found in UM0836
+ *
+ * vm_h/l = vmain_high/low
+ * bt_h/l = btemp_high/low
+ * vb_h/l = vbat_high/low
+ *
+ * Data bits 8500/9540:
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h9 | vm_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ * Data bits 8540:
+ * OTP2
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h9 | vm_h8 | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ * Data bits 8540:
+ * OTP4
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | ib_h9 | ib_h8 | ib_h7
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | ib_h6 | ib_h5 | ib_h4 | ib_h3 | ib_h2 | ib_h1 | ib_h0 | ib_l5
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | ib_l4 | ib_l3 | ib_l2 | ib_l1 | ib_l0 |
+ *
+ *
+ * Ideal output ADC codes corresponding to injected input voltages
+ * during manufacturing is:
+ *
+ * vmain_high: Vin = 19500mV / ADC ideal code = 997
+ * vmain_low: Vin = 315mV / ADC ideal code = 16
+ * btemp_high: Vin = 1300mV / ADC ideal code = 985
+ * btemp_low: Vin = 21mV / ADC ideal code = 16
+ * vbat_high: Vin = 4700mV / ADC ideal code = 982
+ * vbat_low: Vin = 2380mV / ADC ideal code = 33
+ */
+
+ if (is_ab8540(gpadc->ab8500)) {
+ /* Calculate gain and offset for VMAIN if all reads succeeded*/
+ if (!(ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[1] & 0xFF) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_hi =
+ (u16)vmain_high;
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_lo =
+ (u16)vmain_low;
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = AB8500_GPADC_CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset = AB8500_GPADC_CALIB_SCALE *
+ 19500 - (AB8500_GPADC_CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = 0;
+ }
+
+ /* Read IBAT calibration Data */
+ for (i = 0; i < ARRAY_SIZE(otp4_cal_regs); i++) {
+ ret_otp4[i] = abx500_get_register_interruptible(
+ gpadc->dev, AB8500_OTP_EMUL,
+ otp4_cal_regs[i], &gpadc_otp4[i]);
+ if (ret_otp4[i] < 0)
+ dev_err(gpadc->dev,
+ "%s: read otp4 reg 0x%02x failed\n",
+ __func__, otp4_cal_regs[i]);
+ }
+
+ /* Calculate gain and offset for IBAT if all reads succeeded */
+ if (!(ret_otp4[0] < 0 || ret_otp4[1] < 0 || ret_otp4[2] < 0)) {
+ ibat_high = (((gpadc_otp4[0] & 0x07) << 7) |
+ ((gpadc_otp4[1] & 0xFE) >> 1));
+ ibat_low = (((gpadc_otp4[1] & 0x01) << 5) |
+ ((gpadc_otp4[2] & 0xF8) >> 3));
+
+ gpadc->cal_data[AB8500_CAL_IBAT].otp_calib_hi =
+ (u16)ibat_high;
+ gpadc->cal_data[AB8500_CAL_IBAT].otp_calib_lo =
+ (u16)ibat_low;
+
+ V_gain = ((AB8500_GPADC_IBAT_VDROP_H - AB8500_GPADC_IBAT_VDROP_L)
+ << AB8500_GPADC_CALIB_SHIFT_IBAT) / (ibat_high - ibat_low);
+
+ V_offset = (AB8500_GPADC_IBAT_VDROP_H << AB8500_GPADC_CALIB_SHIFT_IBAT) -
+ (((AB8500_GPADC_IBAT_VDROP_H - AB8500_GPADC_IBAT_VDROP_L) <<
+ AB8500_GPADC_CALIB_SHIFT_IBAT) / (ibat_high - ibat_low))
+ * ibat_high;
+ /*
+ * Result obtained is in mV (at a scale factor),
+ * we need to calculate gain and offset to get mA
+ */
+ V2A_gain = (AB8500_ADC_CH_IBAT_MAX - AB8500_ADC_CH_IBAT_MIN)/
+ (AB8500_ADC_CH_IBAT_MAX_V - AB8500_ADC_CH_IBAT_MIN_V);
+ V2A_offset = ((AB8500_ADC_CH_IBAT_MAX_V * AB8500_ADC_CH_IBAT_MIN -
+ AB8500_ADC_CH_IBAT_MAX * AB8500_ADC_CH_IBAT_MIN_V)
+ << AB8500_GPADC_CALIB_SHIFT_IBAT)
+ / (AB8500_ADC_CH_IBAT_MAX_V - AB8500_ADC_CH_IBAT_MIN_V);
+
+ gpadc->cal_data[AB8500_CAL_IBAT].gain =
+ V_gain * V2A_gain;
+ gpadc->cal_data[AB8500_CAL_IBAT].offset =
+ V_offset * V2A_gain + V2A_offset;
+ } else {
+ gpadc->cal_data[AB8500_CAL_IBAT].gain = 0;
+ }
+ } else {
+ /* Calculate gain and offset for VMAIN if all reads succeeded */
+ if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
+ ((gpadc_cal[1] & 0x3F) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_hi =
+ (u16)vmain_high;
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_lo =
+ (u16)vmain_low;
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = AB8500_GPADC_CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset = AB8500_GPADC_CALIB_SCALE *
+ 19500 - (AB8500_GPADC_CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = 0;
+ }
+ }
+
+ /* Calculate gain and offset for BTEMP if all reads succeeded */
+ if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
+ btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
+ (gpadc_cal[3] << 1) | ((gpadc_cal[4] & 0x80) >> 7));
+ btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
+
+ gpadc->cal_data[AB8500_CAL_BTEMP].otp_calib_hi = (u16)btemp_high;
+ gpadc->cal_data[AB8500_CAL_BTEMP].otp_calib_lo = (u16)btemp_low;
+
+ gpadc->cal_data[AB8500_CAL_BTEMP].gain =
+ AB8500_GPADC_CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
+ gpadc->cal_data[AB8500_CAL_BTEMP].offset = AB8500_GPADC_CALIB_SCALE * 1300 -
+ (AB8500_GPADC_CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low))
+ * btemp_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_BTEMP].gain = 0;
+ }
+
+ /* Calculate gain and offset for VBAT if all reads succeeded */
+ if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
+ vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
+ vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
+
+ gpadc->cal_data[AB8500_CAL_VBAT].otp_calib_hi = (u16)vbat_high;
+ gpadc->cal_data[AB8500_CAL_VBAT].otp_calib_lo = (u16)vbat_low;
+
+ gpadc->cal_data[AB8500_CAL_VBAT].gain = AB8500_GPADC_CALIB_SCALE *
+ (4700 - 2380) / (vbat_high - vbat_low);
+ gpadc->cal_data[AB8500_CAL_VBAT].offset = AB8500_GPADC_CALIB_SCALE * 4700 -
+ (AB8500_GPADC_CALIB_SCALE * (4700 - 2380) /
+ (vbat_high - vbat_low)) * vbat_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VBAT].gain = 0;
+ }
+}
+
+static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+ const struct ab8500_gpadc_chan_info *ch;
+ int raw_val;
+ int processed;
+
+ ch = ab8500_gpadc_get_channel(gpadc, chan->address);
+ if (!ch) {
+ dev_err(gpadc->dev, "no such channel %lu\n",
+ chan->address);
+ return -EINVAL;
+ }
+
+ raw_val = ab8500_gpadc_read(gpadc, ch, NULL);
+ if (raw_val < 0)
+ return raw_val;
+
+ if (mask == IIO_CHAN_INFO_RAW) {
+ *val = raw_val;
+ return IIO_VAL_INT;
+ }
+
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ processed = ab8500_gpadc_ad_to_voltage(gpadc, ch->id, raw_val);
+ if (processed < 0)
+ return processed;
+
+ /* Return millivolt or milliamps or millicentigrades */
+ *val = processed * 1000;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ab8500_gpadc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].channel == iiospec->args[0])
+ return i;
+
+ return -EINVAL;
+}
+
+static const struct iio_info ab8500_gpadc_info = {
+ .of_xlate = ab8500_gpadc_of_xlate,
+ .read_raw = ab8500_gpadc_read_raw,
+};
+
+#ifdef CONFIG_PM
+static int ab8500_gpadc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+
+ regulator_disable(gpadc->vddadc);
+
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(gpadc->vddadc);
+ if (ret)
+ dev_err(dev, "Failed to enable vddadc: %d\n", ret);
+
+ return ret;
+}
+#endif
+
+/**
+ * ab8500_gpadc_parse_channel() - process devicetree channel configuration
+ * @dev: pointer to containing device
+ * @np: device tree node for the channel to configure
+ * @ch: channel info to fill in
+ * @iio_chan: IIO channel specification to fill in
+ *
+ * The devicetree will set up the channel for use with the specific device,
+ * and define usage for things like AUX GPADC inputs more precisely.
+ */
+static int ab8500_gpadc_parse_channel(struct device *dev,
+ struct device_node *np,
+ struct ab8500_gpadc_chan_info *ch,
+ struct iio_chan_spec *iio_chan)
+{
+ const char *name = np->name;
+ u32 chan;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &chan);
+ if (ret) {
+ dev_err(dev, "invalid channel number %s\n", name);
+ return ret;
+ }
+ if (chan > AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT) {
+ dev_err(dev, "%s channel number out of range %d\n", name, chan);
+ return -EINVAL;
+ }
+
+ iio_chan->channel = chan;
+ iio_chan->datasheet_name = name;
+ iio_chan->indexed = 1;
+ iio_chan->address = chan;
+ iio_chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED);
+ /* Most are voltages (also temperatures), some are currents */
+ if ((chan == AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT) ||
+ (chan == AB8500_GPADC_CHAN_USB_CHARGER_CURRENT))
+ iio_chan->type = IIO_CURRENT;
+ else
+ iio_chan->type = IIO_VOLTAGE;
+
+ ch->id = chan;
+
+ /* Sensible defaults */
+ ch->avg_sample = 16;
+ ch->hardware_control = false;
+ ch->falling_edge = false;
+ ch->trig_timer = 0;
+
+ return 0;
+}
+
+/**
+ * ab8500_gpadc_parse_channels() - Parse the GPADC channels from DT
+ * @gpadc: the GPADC to configure the channels for
+ * @np: device tree node containing the channel configurations
+ * @chans: the IIO channels we parsed
+ * @nchans: the number of IIO channels we parsed
+ */
+static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
+ struct device_node *np,
+ struct iio_chan_spec **chans_parsed,
+ unsigned int *nchans_parsed)
+{
+ struct device_node *child;
+ struct ab8500_gpadc_chan_info *ch;
+ struct iio_chan_spec *iio_chans;
+ unsigned int nchans;
+ int i;
+
+ nchans = of_get_available_child_count(np);
+ if (!nchans) {
+ dev_err(gpadc->dev, "no channel children\n");
+ return -ENODEV;
+ }
+ dev_info(gpadc->dev, "found %d ADC channels\n", nchans);
+
+ iio_chans = devm_kcalloc(gpadc->dev, nchans,
+ sizeof(*iio_chans), GFP_KERNEL);
+ if (!iio_chans)
+ return -ENOMEM;
+
+ gpadc->chans = devm_kcalloc(gpadc->dev, nchans,
+ sizeof(*gpadc->chans), GFP_KERNEL);
+ if (!gpadc->chans)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ struct iio_chan_spec *iio_chan;
+ int ret;
+
+ ch = &gpadc->chans[i];
+ iio_chan = &iio_chans[i];
+
+ ret = ab8500_gpadc_parse_channel(gpadc->dev, child, ch,
+ iio_chan);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ i++;
+ }
+ gpadc->nchans = nchans;
+ *chans_parsed = iio_chans;
+ *nchans_parsed = nchans;
+
+ return 0;
+}
+
+static int ab8500_gpadc_probe(struct platform_device *pdev)
+{
+ struct ab8500_gpadc *gpadc;
+ struct iio_dev *indio_dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_chan_spec *iio_chans;
+ unsigned int n_iio_chans;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ gpadc = iio_priv(indio_dev);
+
+ gpadc->dev = dev;
+ gpadc->ab8500 = dev_get_drvdata(dev->parent);
+
+ ret = ab8500_gpadc_parse_channels(gpadc, np, &iio_chans, &n_iio_chans);
+ if (ret)
+ return ret;
+
+ gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
+ if (gpadc->irq_sw < 0) {
+ dev_err(dev, "failed to get platform sw_conv_end irq\n");
+ return gpadc->irq_sw;
+ }
+
+ gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
+ if (gpadc->irq_hw < 0) {
+ dev_err(dev, "failed to get platform hw_conv_end irq\n");
+ return gpadc->irq_hw;
+ }
+
+ /* Initialize completion used to notify completion of conversion */
+ init_completion(&gpadc->complete);
+
+ /* Request interrupts */
+ ret = devm_request_threaded_irq(dev, gpadc->irq_sw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-sw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to request sw conversion irq %d\n",
+ gpadc->irq_sw);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, gpadc->irq_hw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-hw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to request hw conversion irq: %d\n",
+ gpadc->irq_hw);
+ return ret;
+ }
+
+ /* The VTVout LDO used to power the AB8500 GPADC */
+ gpadc->vddadc = devm_regulator_get(dev, "vddadc");
+ if (IS_ERR(gpadc->vddadc)) {
+ ret = PTR_ERR(gpadc->vddadc);
+ dev_err(dev, "failed to get vddadc\n");
+ return ret;
+ }
+
+ ret = regulator_enable(gpadc->vddadc);
+ if (ret) {
+ dev_err(dev, "failed to enable vddadc: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, AB8500_GPADC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ ab8500_gpadc_read_calibration_data(gpadc);
+
+ pm_runtime_put(dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = np;
+ indio_dev->name = "ab8500-gpadc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ab8500_gpadc_info;
+ indio_dev->channels = iio_chans;
+ indio_dev->num_channels = n_iio_chans;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ goto out_dis_pm;
+
+ return 0;
+
+out_dis_pm:
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ regulator_disable(gpadc->vddadc);
+
+ return ret;
+}
+
+static int ab8500_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(gpadc->dev);
+ pm_runtime_put_noidle(gpadc->dev);
+ pm_runtime_disable(gpadc->dev);
+ regulator_disable(gpadc->vddadc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
+ ab8500_gpadc_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver ab8500_gpadc_driver = {
+ .probe = ab8500_gpadc_probe,
+ .remove = ab8500_gpadc_remove,
+ .driver = {
+ .name = "ab8500-gpadc",
+ .pm = &ab8500_gpadc_pm_ops,
+ },
+};
+builtin_platform_driver(ab8500_gpadc_driver);
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
new file mode 100644
index 000000000000..a6798f7dfdb8
--- /dev/null
+++ b/drivers/iio/adc/ad7292.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices AD7292 SPI ADC driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+
+#define ADI_VENDOR_ID 0x0018
+
+/* AD7292 registers definition */
+#define AD7292_REG_VENDOR_ID 0x00
+#define AD7292_REG_CONF_BANK 0x05
+#define AD7292_REG_CONV_COMM 0x0E
+#define AD7292_REG_ADC_CH(x) (0x10 + (x))
+
+/* AD7292 configuration bank subregisters definition */
+#define AD7292_BANK_REG_VIN_RNG0 0x10
+#define AD7292_BANK_REG_VIN_RNG1 0x11
+#define AD7292_BANK_REG_SAMP_MODE 0x12
+
+#define AD7292_RD_FLAG_MSK(x) (BIT(7) | ((x) & 0x3F))
+
+/* AD7292_REG_ADC_CONVERSION */
+#define AD7292_ADC_DATA_MASK GENMASK(15, 6)
+#define AD7292_ADC_DATA(x) FIELD_GET(AD7292_ADC_DATA_MASK, x)
+
+/* AD7292_CHANNEL_SAMPLING_MODE */
+#define AD7292_CH_SAMP_MODE(reg, ch) (((reg) >> 8) & BIT(ch))
+
+/* AD7292_CHANNEL_VIN_RANGE */
+#define AD7292_CH_VIN_RANGE(reg, ch) ((reg) & BIT(ch))
+
+#define AD7292_VOLTAGE_CHAN(_chan) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = _chan, \
+}
+
+static const struct iio_chan_spec ad7292_channels[] = {
+ AD7292_VOLTAGE_CHAN(0),
+ AD7292_VOLTAGE_CHAN(1),
+ AD7292_VOLTAGE_CHAN(2),
+ AD7292_VOLTAGE_CHAN(3),
+ AD7292_VOLTAGE_CHAN(4),
+ AD7292_VOLTAGE_CHAN(5),
+ AD7292_VOLTAGE_CHAN(6),
+ AD7292_VOLTAGE_CHAN(7)
+};
+
+static const struct iio_chan_spec ad7292_channels_diff[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .indexed = 1,
+ .differential = 1,
+ .channel = 0,
+ .channel2 = 1,
+ },
+ AD7292_VOLTAGE_CHAN(2),
+ AD7292_VOLTAGE_CHAN(3),
+ AD7292_VOLTAGE_CHAN(4),
+ AD7292_VOLTAGE_CHAN(5),
+ AD7292_VOLTAGE_CHAN(6),
+ AD7292_VOLTAGE_CHAN(7)
+};
+
+struct ad7292_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned short vref_mv;
+
+ __be16 d16 ____cacheline_aligned;
+ u8 d8[2];
+};
+
+static int ad7292_spi_reg_read(struct ad7292_state *st, unsigned int addr)
+{
+ int ret;
+
+ st->d8[0] = AD7292_RD_FLAG_MSK(addr);
+
+ ret = spi_write_then_read(st->spi, st->d8, 1, &st->d16, 2);
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->d16);
+}
+
+static int ad7292_spi_subreg_read(struct ad7292_state *st, unsigned int addr,
+ unsigned int sub_addr, unsigned int len)
+{
+ unsigned int shift = 16 - (8 * len);
+ int ret;
+
+ st->d8[0] = AD7292_RD_FLAG_MSK(addr);
+ st->d8[1] = sub_addr;
+
+ ret = spi_write_then_read(st->spi, st->d8, 2, &st->d16, len);
+ if (ret < 0)
+ return ret;
+
+ return (be16_to_cpu(st->d16) >> shift);
+}
+
+static int ad7292_single_conversion(struct ad7292_state *st,
+ unsigned int chan_addr)
+{
+ int ret;
+
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->d8,
+ .len = 4,
+ .delay_usecs = 6,
+ }, {
+ .rx_buf = &st->d16,
+ .len = 2,
+ },
+ };
+
+ st->d8[0] = chan_addr;
+ st->d8[1] = AD7292_RD_FLAG_MSK(AD7292_REG_CONV_COMM);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->d16);
+}
+
+static int ad7292_vin_range_multiplier(struct ad7292_state *st, int channel)
+{
+ int samp_mode, range0, range1, factor = 1;
+
+ /*
+ * Every AD7292 ADC channel may have its input range adjusted according
+ * to the settings at the ADC sampling mode and VIN range subregisters.
+ * For a given channel, the minimum input range is equal to Vref, and it
+ * may be increased by a multiplier factor of 2 or 4 according to the
+ * following rule:
+ * If channel is being sampled with respect to AGND:
+ * factor = 4 if VIN range0 and VIN range1 equal 0
+ * factor = 2 if only one of VIN ranges equal 1
+ * factor = 1 if both VIN range0 and VIN range1 equal 1
+ * If channel is being sampled with respect to AVDD:
+ * factor = 4 if VIN range0 and VIN range1 equal 0
+ * Behavior is undefined if any of VIN range doesn't equal 0
+ */
+
+ samp_mode = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_SAMP_MODE, 2);
+
+ if (samp_mode < 0)
+ return samp_mode;
+
+ range0 = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_VIN_RNG0, 2);
+
+ if (range0 < 0)
+ return range0;
+
+ range1 = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_VIN_RNG1, 2);
+
+ if (range1 < 0)
+ return range1;
+
+ if (AD7292_CH_SAMP_MODE(samp_mode, channel)) {
+ /* Sampling with respect to AGND */
+ if (!AD7292_CH_VIN_RANGE(range0, channel))
+ factor *= 2;
+
+ if (!AD7292_CH_VIN_RANGE(range1, channel))
+ factor *= 2;
+
+ } else {
+ /* Sampling with respect to AVDD */
+ if (AD7292_CH_VIN_RANGE(range0, channel) ||
+ AD7292_CH_VIN_RANGE(range1, channel))
+ return -EPERM;
+
+ factor = 4;
+ }
+
+ return factor;
+}
+
+static int ad7292_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct ad7292_state *st = iio_priv(indio_dev);
+ unsigned int ch_addr;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ch_addr = AD7292_REG_ADC_CH(chan->channel);
+ ret = ad7292_single_conversion(st, ch_addr);
+ if (ret < 0)
+ return ret;
+
+ *val = AD7292_ADC_DATA(ret);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * To convert a raw value to standard units, the IIO defines
+ * this formula: Scaled value = (raw + offset) * scale.
+ * For the scale to be a correct multiplier for (raw + offset),
+ * it must be calculated as the input range divided by the
+ * number of possible distinct input values. Given the ADC data
+ * is 10 bit long, it may assume 2^10 distinct values.
+ * Hence, scale = range / 2^10. The IIO_VAL_FRACTIONAL_LOG2
+ * return type indicates to the IIO API to divide *val by 2 to
+ * the power of *val2 when returning from read_raw.
+ */
+
+ ret = ad7292_vin_range_multiplier(st, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ *val = st->vref_mv * ret;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info ad7292_info = {
+ .read_raw = ad7292_read_raw,
+};
+
+static void ad7292_regulator_disable(void *data)
+{
+ struct ad7292_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static int ad7292_probe(struct spi_device *spi)
+{
+ struct ad7292_state *st;
+ struct iio_dev *indio_dev;
+ struct device_node *child;
+ bool diff_channels = 0;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ ret = ad7292_spi_reg_read(st, AD7292_REG_VENDOR_ID);
+ if (ret != ADI_VENDOR_ID) {
+ dev_err(&spi->dev, "Wrong vendor id 0x%x\n", ret);
+ return -EINVAL;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable external vref supply\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ ad7292_regulator_disable, st);
+ if (ret) {
+ regulator_disable(st->reg);
+ return ret;
+ }
+
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ return ret;
+
+ st->vref_mv = ret / 1000;
+ } else {
+ /* Use the internal voltage reference. */
+ st->vref_mv = 1250;
+ }
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad7292_info;
+
+ for_each_available_child_of_node(spi->dev.of_node, child) {
+ diff_channels = of_property_read_bool(child, "diff-channels");
+ if (diff_channels)
+ break;
+ }
+
+ if (diff_channels) {
+ indio_dev->num_channels = ARRAY_SIZE(ad7292_channels_diff);
+ indio_dev->channels = ad7292_channels_diff;
+ } else {
+ indio_dev->num_channels = ARRAY_SIZE(ad7292_channels);
+ indio_dev->channels = ad7292_channels;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad7292_id_table[] = {
+ { "ad7292", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7292_id_table);
+
+static const struct of_device_id ad7292_of_match[] = {
+ { .compatible = "adi,ad7292" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7292_of_match);
+
+static struct spi_driver ad7292_driver = {
+ .driver = {
+ .name = "ad7292",
+ .of_match_table = ad7292_of_match,
+ },
+ .probe = ad7292_probe,
+ .id_table = ad7292_id_table,
+};
+module_spi_driver(ad7292_driver);
+
+MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt1@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD7292 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index ac0ffff6c5ae..5c2b3446fa4a 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -54,38 +54,20 @@ struct ad7949_adc_chip {
u8 resolution;
u16 cfg;
unsigned int current_channel;
- u32 buffer ____cacheline_aligned;
+ u16 buffer ____cacheline_aligned;
};
-static bool ad7949_spi_cfg_is_read_back(struct ad7949_adc_chip *ad7949_adc)
-{
- if (!(ad7949_adc->cfg & AD7949_CFG_READ_BACK))
- return true;
-
- return false;
-}
-
-static int ad7949_spi_bits_per_word(struct ad7949_adc_chip *ad7949_adc)
-{
- int ret = ad7949_adc->resolution;
-
- if (ad7949_spi_cfg_is_read_back(ad7949_adc))
- ret += AD7949_CFG_REG_SIZE_BITS;
-
- return ret;
-}
-
static int ad7949_spi_write_cfg(struct ad7949_adc_chip *ad7949_adc, u16 val,
u16 mask)
{
int ret;
- int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc);
+ int bits_per_word = ad7949_adc->resolution;
int shift = bits_per_word - AD7949_CFG_REG_SIZE_BITS;
struct spi_message msg;
struct spi_transfer tx[] = {
{
.tx_buf = &ad7949_adc->buffer,
- .len = 4,
+ .len = 2,
.bits_per_word = bits_per_word,
},
};
@@ -107,13 +89,13 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
unsigned int channel)
{
int ret;
- int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc);
+ int bits_per_word = ad7949_adc->resolution;
int mask = GENMASK(ad7949_adc->resolution, 0);
struct spi_message msg;
struct spi_transfer tx[] = {
{
.rx_buf = &ad7949_adc->buffer,
- .len = 4,
+ .len = 2,
.bits_per_word = bits_per_word,
},
};
@@ -138,10 +120,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
ad7949_adc->current_channel = channel;
- if (ad7949_spi_cfg_is_read_back(ad7949_adc))
- *val = (ad7949_adc->buffer >> AD7949_CFG_REG_SIZE_BITS) & mask;
- else
- *val = ad7949_adc->buffer & mask;
+ *val = ad7949_adc->buffer & mask;
return 0;
}
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 2640b75fb774..8ba90486c787 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -205,7 +205,7 @@ int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
}
EXPORT_SYMBOL_GPL(ad_sd_reset);
-static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
int ret;
@@ -242,6 +242,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(ad_sd_calibrate);
/**
* ad_sd_calibrate_all() - Performs channel calibration
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index d3fc39df535d..1e5375235cfe 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -173,7 +173,6 @@ static int aspeed_adc_probe(struct platform_device *pdev)
struct iio_dev *indio_dev;
struct aspeed_adc_data *data;
const struct aspeed_adc_model_data *model_data;
- struct resource *res;
const char *clk_parent_name;
int ret;
u32 adc_engine_control_reg_val;
@@ -185,8 +184,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index a2837a0e7cba..e1850f3d5cf3 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1483,7 +1483,7 @@ dma_free_area:
st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
dma_chan_disable:
dma_release_channel(st->dma_st.dma_chan);
- st->dma_st.dma_chan = 0;
+ st->dma_st.dma_chan = NULL;
dma_exit:
dev_info(&pdev->dev, "continuing without DMA support\n");
}
@@ -1506,7 +1506,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
dma_release_channel(st->dma_st.dma_chan);
- st->dma_st.dma_chan = 0;
+ st->dma_st.dma_chan = NULL;
dev_info(&pdev->dev, "continuing without DMA support\n");
}
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 646ebdc0a8b4..5e396104ac86 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -308,7 +308,7 @@ static int iproc_adc_do_read(struct iio_dev *indio_dev,
"IntMask set failed. Read will likely fail.");
read_len = -EIO;
goto adc_err;
- };
+ }
}
regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index f93f1d93b80d..fe9257624f16 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -310,7 +310,6 @@ static int cc10001_adc_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct cc10001_adc_device *adc_dev;
unsigned long adc_clk_rate;
- struct resource *res;
struct iio_dev *indio_dev;
unsigned long channel_map;
int ret;
@@ -340,8 +339,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
indio_dev->info = &cc10001_adc_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc_dev->reg_base)) {
ret = PTR_ERR(adc_dev->reg_base);
goto err_disable_reg;
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 2d616cafe75f..5086a337f4c9 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -1008,7 +1008,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
cpcap_adc_irq_thread,
- IRQF_TRIGGER_NONE,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"cpcap-adc", indio_dev);
if (error) {
dev_err(&pdev->dev, "could not get irq: %i\n",
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 5fa78c273a25..65c7c9329b1c 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -524,6 +524,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
u16 conflict;
unsigned int trigger_chan;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&dln2->mutex);
/* Enable ADC */
@@ -537,6 +541,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
(int)conflict);
ret = -EBUSY;
}
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
@@ -550,6 +555,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
if (ret < 0) {
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
} else {
@@ -557,12 +563,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
}
- return iio_triggered_buffer_postenable(indio_dev);
+ return 0;
}
static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
- int ret;
+ int ret, ret2;
struct dln2_adc *dln2 = iio_priv(indio_dev);
mutex_lock(&dln2->mutex);
@@ -577,12 +583,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
ret = dln2_adc_set_port_enabled(dln2, false, NULL);
mutex_unlock(&dln2->mutex);
- if (ret < 0) {
+ if (ret < 0)
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
- return ret;
- }
- return iio_triggered_buffer_predisable(indio_dev);
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
}
static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 42a3ced11fbd..2df7d057b249 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -651,7 +651,7 @@ static irqreturn_t exynos_ts_isr(int irq, void *dev_id)
input_sync(info->input);
usleep_range(1000, 1100);
- };
+ }
writel(0, ADC_V1_CLRINTPNDNUP(info->regs));
@@ -769,7 +769,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct s3c2410_ts_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = NULL;
- struct resource *mem;
bool has_ts = false;
int ret = -ENODEV;
int irq;
@@ -788,8 +787,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 62e6c8badd22..c8686558429b 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -23,6 +23,7 @@
/* gain to pulse and scale conversion */
#define HX711_GAIN_MAX 3
+#define HX711_RESET_GAIN 128
struct hx711_gain_to_scale {
int gain;
@@ -185,8 +186,7 @@ static int hx711_wait_for_ready(struct hx711_data *hx711_data)
static int hx711_reset(struct hx711_data *hx711_data)
{
- int ret;
- int val = gpiod_get_value(hx711_data->gpiod_dout);
+ int val = hx711_wait_for_ready(hx711_data);
if (val) {
/*
@@ -202,22 +202,10 @@ static int hx711_reset(struct hx711_data *hx711_data)
msleep(10);
gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
- ret = hx711_wait_for_ready(hx711_data);
- if (ret)
- return ret;
- /*
- * after a reset the gain is 128 so we do a dummy read
- * to set the gain for the next read
- */
- ret = hx711_read(hx711_data);
- if (ret < 0)
- return ret;
-
- /*
- * after a dummy read we need to wait vor readiness
- * for not mixing gain pulses with the clock
- */
val = hx711_wait_for_ready(hx711_data);
+
+ /* after a reset the gain is 128 */
+ hx711_data->gain_set = HX711_RESET_GAIN;
}
return val;
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index e234970b7150..39c0a609fc94 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -25,9 +25,13 @@
#define JZ_ADC_REG_ADSDAT 0x20
#define JZ_ADC_REG_ADCLK 0x28
+#define JZ_ADC_REG_ENABLE_PD BIT(7)
+#define JZ_ADC_REG_CFG_AUX_MD (BIT(0) | BIT(1))
#define JZ_ADC_REG_CFG_BAT_MD BIT(4)
#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
-#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+#define JZ4725B_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+#define JZ4770_ADC_REG_ADCLK_CLKDIV10US_LSB 8
+#define JZ4770_ADC_REG_ADCLK_CLKDIVMS_LSB 16
#define JZ_ADC_AUX_VREF 3300
#define JZ_ADC_AUX_VREF_BITS 12
@@ -37,6 +41,8 @@
#define JZ4725B_ADC_BATTERY_HIGH_VREF_BITS 10
#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
+#define JZ4770_ADC_BATTERY_VREF 6600
+#define JZ4770_ADC_BATTERY_VREF_BITS 12
struct ingenic_adc;
@@ -47,6 +53,8 @@ struct ingenic_adc_soc_data {
size_t battery_raw_avail_size;
const int *battery_scale_avail;
size_t battery_scale_avail_size;
+ unsigned int battery_vref_mode: 1;
+ unsigned int has_aux2: 1;
int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
};
@@ -54,6 +62,7 @@ struct ingenic_adc {
void __iomem *base;
struct clk *clk;
struct mutex lock;
+ struct mutex aux_lock;
const struct ingenic_adc_soc_data *soc_data;
bool low_vref_mode;
};
@@ -120,6 +129,8 @@ static int ingenic_adc_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->channel) {
case INGENIC_ADC_BATTERY:
+ if (!adc->soc_data->battery_vref_mode)
+ return -EINVAL;
if (val > JZ_ADC_BATTERY_LOW_VREF) {
ingenic_adc_set_config(adc,
JZ_ADC_REG_CFG_BAT_MD,
@@ -158,6 +169,14 @@ static const int jz4740_adc_battery_scale_avail[] = {
JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
};
+static const int jz4770_adc_battery_raw_avail[] = {
+ 0, 1, (1 << JZ4770_ADC_BATTERY_VREF_BITS) - 1,
+};
+
+static const int jz4770_adc_battery_scale_avail[] = {
+ JZ4770_ADC_BATTERY_VREF, JZ4770_ADC_BATTERY_VREF_BITS,
+};
+
static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
{
struct clk *parent_clk;
@@ -187,7 +206,45 @@ static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
/* We also need a divider that produces a 10us clock. */
div_10us = DIV_ROUND_UP(rate, 100000);
- writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
+ writel(((div_10us - 1) << JZ4725B_ADC_REG_ADCLK_CLKDIV10US_LSB) |
+ (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
+ adc->base + JZ_ADC_REG_ADCLK);
+
+ return 0;
+}
+
+static int jz4770_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
+{
+ struct clk *parent_clk;
+ unsigned long parent_rate, rate;
+ unsigned int div_main, div_ms, div_10us;
+
+ parent_clk = clk_get_parent(adc->clk);
+ if (!parent_clk) {
+ dev_err(dev, "ADC clock has no parent\n");
+ return -ENODEV;
+ }
+ parent_rate = clk_get_rate(parent_clk);
+
+ /*
+ * The JZ4770 ADC works at 20 kHz to 200 kHz.
+ * We pick the highest rate possible.
+ */
+ div_main = DIV_ROUND_UP(parent_rate, 200000);
+ div_main = clamp(div_main, 1u, 256u);
+ rate = parent_rate / div_main;
+ if (rate < 20000 || rate > 200000) {
+ dev_err(dev, "No valid divider for ADC main clock\n");
+ return -EINVAL;
+ }
+
+ /* We also need a divider that produces a 10us clock. */
+ div_10us = DIV_ROUND_UP(rate, 10000);
+ /* And another, which produces a 1ms clock. */
+ div_ms = DIV_ROUND_UP(rate, 1000);
+
+ writel(((div_ms - 1) << JZ4770_ADC_REG_ADCLK_CLKDIVMS_LSB) |
+ ((div_10us - 1) << JZ4770_ADC_REG_ADCLK_CLKDIV10US_LSB) |
(div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
adc->base + JZ_ADC_REG_ADCLK);
@@ -201,6 +258,8 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
.battery_scale_avail = jz4725b_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
+ .battery_vref_mode = true,
+ .has_aux2 = false,
.init_clk_div = jz4725b_adc_init_clk_div,
};
@@ -211,9 +270,23 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
.battery_scale_avail = jz4740_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
+ .battery_vref_mode = true,
+ .has_aux2 = false,
.init_clk_div = NULL, /* no ADCLK register on JZ4740 */
};
+static const struct ingenic_adc_soc_data jz4770_adc_soc_data = {
+ .battery_high_vref = JZ4770_ADC_BATTERY_VREF,
+ .battery_high_vref_bits = JZ4770_ADC_BATTERY_VREF_BITS,
+ .battery_raw_avail = jz4770_adc_battery_raw_avail,
+ .battery_raw_avail_size = ARRAY_SIZE(jz4770_adc_battery_raw_avail),
+ .battery_scale_avail = jz4770_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4770_adc_battery_scale_avail),
+ .battery_vref_mode = false,
+ .has_aux2 = true,
+ .init_clk_div = jz4770_adc_init_clk_div,
+};
+
static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
const int **vals,
@@ -239,6 +312,42 @@ static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
};
}
+static int ingenic_adc_read_chan_info_raw(struct ingenic_adc *adc,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int bit, ret, engine = (chan->channel == INGENIC_ADC_BATTERY);
+
+ /* We cannot sample AUX/AUX2 in parallel. */
+ mutex_lock(&adc->aux_lock);
+ if (adc->soc_data->has_aux2 && engine == 0) {
+ bit = BIT(chan->channel == INGENIC_ADC_AUX2);
+ ingenic_adc_set_config(adc, JZ_ADC_REG_CFG_AUX_MD, bit);
+ }
+
+ clk_enable(adc->clk);
+ ret = ingenic_adc_capture(adc, engine);
+ if (ret)
+ goto out;
+
+ switch (chan->channel) {
+ case INGENIC_ADC_AUX:
+ case INGENIC_ADC_AUX2:
+ *val = readw(adc->base + JZ_ADC_REG_ADSDAT);
+ break;
+ case INGENIC_ADC_BATTERY:
+ *val = readw(adc->base + JZ_ADC_REG_ADBDAT);
+ break;
+ }
+
+ ret = IIO_VAL_INT;
+out:
+ clk_disable(adc->clk);
+ mutex_unlock(&adc->aux_lock);
+
+ return ret;
+}
+
static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -246,32 +355,14 @@ static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
long m)
{
struct ingenic_adc *adc = iio_priv(iio_dev);
- int ret;
switch (m) {
case IIO_CHAN_INFO_RAW:
- clk_enable(adc->clk);
- ret = ingenic_adc_capture(adc, chan->channel);
- if (ret) {
- clk_disable(adc->clk);
- return ret;
- }
-
- switch (chan->channel) {
- case INGENIC_ADC_AUX:
- *val = readw(adc->base + JZ_ADC_REG_ADSDAT);
- break;
- case INGENIC_ADC_BATTERY:
- *val = readw(adc->base + JZ_ADC_REG_ADBDAT);
- break;
- }
-
- clk_disable(adc->clk);
-
- return IIO_VAL_INT;
+ return ingenic_adc_read_chan_info_raw(adc, chan, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->channel) {
case INGENIC_ADC_AUX:
+ case INGENIC_ADC_AUX2:
*val = JZ_ADC_AUX_VREF;
*val2 = JZ_ADC_AUX_VREF_BITS;
break;
@@ -322,6 +413,14 @@ static const struct iio_chan_spec ingenic_channels[] = {
.indexed = 1,
.channel = INGENIC_ADC_BATTERY,
},
+ { /* Must always be last in the array. */
+ .extend_name = "aux2",
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ .channel = INGENIC_ADC_AUX2,
+ },
};
static int ingenic_adc_probe(struct platform_device *pdev)
@@ -329,7 +428,6 @@ static int ingenic_adc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct iio_dev *iio_dev;
struct ingenic_adc *adc;
- struct resource *mem_base;
const struct ingenic_adc_soc_data *soc_data;
int ret;
@@ -343,10 +441,10 @@ static int ingenic_adc_probe(struct platform_device *pdev)
adc = iio_priv(iio_dev);
mutex_init(&adc->lock);
+ mutex_init(&adc->aux_lock);
adc->soc_data = soc_data;
- mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc->base = devm_ioremap_resource(dev, mem_base);
+ adc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
@@ -374,6 +472,7 @@ static int ingenic_adc_probe(struct platform_device *pdev)
/* Put hardware in a known passive state. */
writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
+ usleep_range(2000, 3000); /* Must wait at least 2ms. */
clk_disable(adc->clk);
ret = devm_add_action_or_reset(dev, ingenic_adc_clk_cleanup, adc->clk);
@@ -387,6 +486,9 @@ static int ingenic_adc_probe(struct platform_device *pdev)
iio_dev->modes = INDIO_DIRECT_MODE;
iio_dev->channels = ingenic_channels;
iio_dev->num_channels = ARRAY_SIZE(ingenic_channels);
+ /* Remove AUX2 from the list of supported channels. */
+ if (!adc->soc_data->has_aux2)
+ iio_dev->num_channels -= 1;
iio_dev->info = &ingenic_adc_info;
ret = devm_iio_device_register(dev, iio_dev);
@@ -400,6 +502,7 @@ static int ingenic_adc_probe(struct platform_device *pdev)
static const struct of_device_id ingenic_adc_of_match[] = {
{ .compatible = "ingenic,jz4725b-adc", .data = &jz4725b_adc_soc_data, },
{ .compatible = "ingenic,jz4740-adc", .data = &jz4740_adc_soc_data, },
+ { .compatible = "ingenic,jz4770-adc", .data = &jz4770_adc_soc_data, },
{ },
};
MODULE_DEVICE_TABLE(of, ingenic_adc_of_match);
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
new file mode 100644
index 000000000000..67d096f8180d
--- /dev/null
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADC driver for Basin Cove PMIC
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Bin Yang <bin.yang@intel.com>
+ *
+ * Rewritten for upstream by:
+ * Vincent Pelletier <plr.vincent@gmail.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+
+#include <asm/unaligned.h>
+
+#define BCOVE_GPADCREQ 0xDC
+#define BCOVE_GPADCREQ_BUSY BIT(0)
+#define BCOVE_GPADCREQ_IRQEN BIT(1)
+
+#define BCOVE_ADCIRQ_ALL ( \
+ BCOVE_ADCIRQ_BATTEMP | \
+ BCOVE_ADCIRQ_SYSTEMP | \
+ BCOVE_ADCIRQ_BATTID | \
+ BCOVE_ADCIRQ_VIBATT | \
+ BCOVE_ADCIRQ_CCTICK)
+
+#define BCOVE_ADC_TIMEOUT msecs_to_jiffies(1000)
+
+static const u8 mrfld_adc_requests[] = {
+ BCOVE_ADCIRQ_VIBATT,
+ BCOVE_ADCIRQ_BATTID,
+ BCOVE_ADCIRQ_VIBATT,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_BATTEMP,
+ BCOVE_ADCIRQ_BATTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+};
+
+struct mrfld_adc {
+ struct regmap *regmap;
+ struct completion completion;
+ /* Lock to protect the IPC transfers */
+ struct mutex lock;
+};
+
+static irqreturn_t mrfld_adc_thread_isr(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+
+ complete(&adc->completion);
+ return IRQ_HANDLED;
+}
+
+static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *result)
+{
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+ struct regmap *regmap = adc->regmap;
+ unsigned int req;
+ long timeout;
+ u8 buf[2];
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ regmap_update_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL, 0);
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC, 0);
+
+ ret = regmap_read_poll_timeout(regmap, BCOVE_GPADCREQ, req,
+ !(req & BCOVE_GPADCREQ_BUSY),
+ 2000, 1000000);
+ if (ret)
+ goto done;
+
+ req = mrfld_adc_requests[chan->channel];
+ ret = regmap_write(regmap, BCOVE_GPADCREQ, BCOVE_GPADCREQ_IRQEN | req);
+ if (ret)
+ goto done;
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ BCOVE_ADC_TIMEOUT);
+ if (timeout < 0) {
+ ret = timeout;
+ goto done;
+ }
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ ret = regmap_bulk_read(regmap, chan->address, buf, 2);
+ if (ret)
+ goto done;
+
+ *result = get_unaligned_be16(buf);
+ ret = IIO_VAL_INT;
+
+done:
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC, 0xff);
+ regmap_update_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL, 0xff);
+
+ return ret;
+}
+
+static int mrfld_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ ret = mrfld_adc_single_conv(indio_dev, chan, val);
+ mutex_unlock(&adc->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mrfld_adc_iio_info = {
+ .read_raw = &mrfld_adc_read_raw,
+};
+
+#define BCOVE_ADC_CHANNEL(_type, _channel, _datasheet_name, _address) \
+ { \
+ .indexed = 1, \
+ .type = _type, \
+ .channel = _channel, \
+ .address = _address, \
+ .datasheet_name = _datasheet_name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ }
+
+static const struct iio_chan_spec mrfld_adc_channels[] = {
+ BCOVE_ADC_CHANNEL(IIO_VOLTAGE, 0, "CH0", 0xE9),
+ BCOVE_ADC_CHANNEL(IIO_RESISTANCE, 1, "CH1", 0xEB),
+ BCOVE_ADC_CHANNEL(IIO_CURRENT, 2, "CH2", 0xED),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 3, "CH3", 0xCC),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 4, "CH4", 0xC8),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 5, "CH5", 0xCA),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 6, "CH6", 0xC2),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 7, "CH7", 0xC4),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 8, "CH8", 0xC6),
+};
+
+static struct iio_map iio_maps[] = {
+ IIO_MAP("CH0", "bcove-battery", "VBATRSLT"),
+ IIO_MAP("CH1", "bcove-battery", "BATTID"),
+ IIO_MAP("CH2", "bcove-battery", "IBATRSLT"),
+ IIO_MAP("CH3", "bcove-temp", "PMICTEMP"),
+ IIO_MAP("CH4", "bcove-temp", "BATTEMP0"),
+ IIO_MAP("CH5", "bcove-temp", "BATTEMP1"),
+ IIO_MAP("CH6", "bcove-temp", "SYSTEMP0"),
+ IIO_MAP("CH7", "bcove-temp", "SYSTEMP1"),
+ IIO_MAP("CH8", "bcove-temp", "SYSTEMP2"),
+ {}
+};
+
+static int mrfld_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct iio_dev *indio_dev;
+ struct mrfld_adc *adc;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*indio_dev));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+
+ mutex_init(&adc->lock);
+ init_completion(&adc->completion);
+ adc->regmap = pmic->regmap;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_adc_thread_isr,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = pdev->name;
+
+ indio_dev->channels = mrfld_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mrfld_adc_channels);
+ indio_dev->info = &mrfld_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_map_array_register(indio_dev, iio_maps);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret < 0)
+ goto err_array_unregister;
+
+ return 0;
+
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+ return ret;
+}
+
+static int mrfld_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_map_array_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id mrfld_adc_id_table[] = {
+ { .name = "mrfld_bcove_adc" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_adc_id_table);
+
+static struct platform_driver mrfld_adc_driver = {
+ .driver = {
+ .name = "mrfld_bcove_adc",
+ },
+ .probe = mrfld_adc_probe,
+ .remove = mrfld_adc_remove,
+ .id_table = mrfld_adc_id_table,
+};
+module_platform_driver(mrfld_adc_driver);
+
+MODULE_AUTHOR("Bin Yang <bin.yang@intel.com>");
+MODULE_AUTHOR("Vincent Pelletier <plr.vincent@gmail.com>");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("ADC driver for Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index e400a95f553d..4c6ac6644dc0 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -119,7 +119,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct lpc18xx_adc *adc;
- struct resource *res;
unsigned int clkdiv;
unsigned long rate;
int ret;
@@ -133,8 +132,7 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
adc->dev = &pdev->dev;
mutex_init(&adc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc->base = devm_ioremap_resource(&pdev->dev, res);
+ adc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 214883458582..e171db20c04a 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -63,12 +63,18 @@ enum max1027_id {
max1027,
max1029,
max1031,
+ max1227,
+ max1229,
+ max1231,
};
static const struct spi_device_id max1027_id[] = {
{"max1027", max1027},
{"max1029", max1029},
{"max1031", max1031},
+ {"max1227", max1227},
+ {"max1229", max1229},
+ {"max1231", max1231},
{}
};
MODULE_DEVICE_TABLE(spi, max1027_id);
@@ -78,12 +84,15 @@ static const struct of_device_id max1027_adc_dt_ids[] = {
{ .compatible = "maxim,max1027" },
{ .compatible = "maxim,max1029" },
{ .compatible = "maxim,max1031" },
+ { .compatible = "maxim,max1227" },
+ { .compatible = "maxim,max1229" },
+ { .compatible = "maxim,max1231" },
{},
};
MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
#endif
-#define MAX1027_V_CHAN(index) \
+#define MAX1027_V_CHAN(index, depth) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -93,7 +102,7 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
.scan_index = index + 1, \
.scan_type = { \
.sign = 'u', \
- .realbits = 10, \
+ .realbits = depth, \
.storagebits = 16, \
.shift = 2, \
.endianness = IIO_BE, \
@@ -115,52 +124,54 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
}, \
}
+#define MAX1X27_CHANNELS(depth) \
+ MAX1027_T_CHAN, \
+ MAX1027_V_CHAN(0, depth), \
+ MAX1027_V_CHAN(1, depth), \
+ MAX1027_V_CHAN(2, depth), \
+ MAX1027_V_CHAN(3, depth), \
+ MAX1027_V_CHAN(4, depth), \
+ MAX1027_V_CHAN(5, depth), \
+ MAX1027_V_CHAN(6, depth), \
+ MAX1027_V_CHAN(7, depth)
+
+#define MAX1X29_CHANNELS(depth) \
+ MAX1X27_CHANNELS(depth), \
+ MAX1027_V_CHAN(8, depth), \
+ MAX1027_V_CHAN(9, depth), \
+ MAX1027_V_CHAN(10, depth), \
+ MAX1027_V_CHAN(11, depth)
+
+#define MAX1X31_CHANNELS(depth) \
+ MAX1X27_CHANNELS(depth), \
+ MAX1X29_CHANNELS(depth), \
+ MAX1027_V_CHAN(12, depth), \
+ MAX1027_V_CHAN(13, depth), \
+ MAX1027_V_CHAN(14, depth), \
+ MAX1027_V_CHAN(15, depth)
+
static const struct iio_chan_spec max1027_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7)
+ MAX1X27_CHANNELS(10),
};
static const struct iio_chan_spec max1029_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7),
- MAX1027_V_CHAN(8),
- MAX1027_V_CHAN(9),
- MAX1027_V_CHAN(10),
- MAX1027_V_CHAN(11)
+ MAX1X29_CHANNELS(10),
};
static const struct iio_chan_spec max1031_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7),
- MAX1027_V_CHAN(8),
- MAX1027_V_CHAN(9),
- MAX1027_V_CHAN(10),
- MAX1027_V_CHAN(11),
- MAX1027_V_CHAN(12),
- MAX1027_V_CHAN(13),
- MAX1027_V_CHAN(14),
- MAX1027_V_CHAN(15)
+ MAX1X31_CHANNELS(10),
+};
+
+static const struct iio_chan_spec max1227_channels[] = {
+ MAX1X27_CHANNELS(12),
+};
+
+static const struct iio_chan_spec max1229_channels[] = {
+ MAX1X29_CHANNELS(12),
+};
+
+static const struct iio_chan_spec max1231_channels[] = {
+ MAX1X31_CHANNELS(12),
};
static const unsigned long max1027_available_scan_masks[] = {
@@ -200,6 +211,21 @@ static const struct max1027_chip_info max1027_chip_info_tbl[] = {
.num_channels = ARRAY_SIZE(max1031_channels),
.available_scan_masks = max1031_available_scan_masks,
},
+ [max1227] = {
+ .channels = max1227_channels,
+ .num_channels = ARRAY_SIZE(max1227_channels),
+ .available_scan_masks = max1027_available_scan_masks,
+ },
+ [max1229] = {
+ .channels = max1229_channels,
+ .num_channels = ARRAY_SIZE(max1229_channels),
+ .available_scan_masks = max1029_available_scan_masks,
+ },
+ [max1231] = {
+ .channels = max1231_channels,
+ .num_channels = ARRAY_SIZE(max1231_channels),
+ .available_scan_masks = max1031_available_scan_masks,
+ },
};
struct max1027_state {
@@ -284,7 +310,7 @@ static int max1027_read_raw(struct iio_dev *indio_dev,
break;
case IIO_VOLTAGE:
*val = 2500;
- *val2 = 10;
+ *val2 = chan->scan_type.realbits;
ret = IIO_VAL_FRACTIONAL_LOG2;
break;
default:
@@ -309,8 +335,11 @@ static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
struct max1027_state *st = iio_priv(indio_dev);
u8 *val = (u8 *)st->buffer;
- if (readval != NULL)
- return -EINVAL;
+ if (readval) {
+ int ret = spi_read(st->spi, val, 2);
+ *readval = be16_to_cpu(st->buffer[0]);
+ return ret;
+ }
*val = (u8)writeval;
return spi_write(st->spi, val, 1);
@@ -427,34 +456,47 @@ static int max1027_probe(struct spi_device *spi)
return -ENOMEM;
}
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
- &iio_pollfunc_store_time,
- &max1027_trigger_handler, NULL);
- if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to setup buffer\n");
- return ret;
- }
+ if (spi->irq) {
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &max1027_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to setup buffer\n");
+ return ret;
+ }
- st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
- indio_dev->name);
- if (st->trig == NULL) {
- ret = -ENOMEM;
- dev_err(&indio_dev->dev, "Failed to allocate iio trigger\n");
- return ret;
- }
+ st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
+ indio_dev->name);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ dev_err(&indio_dev->dev,
+ "Failed to allocate iio trigger\n");
+ return ret;
+ }
- st->trig->ops = &max1027_trigger_ops;
- st->trig->dev.parent = &spi->dev;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- iio_trigger_register(st->trig);
+ st->trig->ops = &max1027_trigger_ops;
+ st->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ iio_trigger_register(st->trig);
+
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_TRIGGER_FALLING,
+ spi->dev.driver->name,
+ st->trig);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
+ return ret;
+ }
+ }
- ret = devm_request_threaded_irq(&spi->dev, spi->irq,
- iio_trigger_generic_data_rdy_poll,
- NULL,
- IRQF_TRIGGER_FALLING,
- spi->dev.driver->name, st->trig);
+ /* Internal reset */
+ st->reg = MAX1027_RST_REG;
+ ret = spi_write(st->spi, &st->reg, 1);
if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
+ dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
return ret;
}
@@ -480,5 +522,5 @@ static struct spi_driver max1027_driver = {
module_spi_driver(max1027_driver);
MODULE_AUTHOR("Philippe Reynes <tremyfr@yahoo.fr>");
-MODULE_DESCRIPTION("MAX1027/MAX1029/MAX1031 ADC");
+MODULE_DESCRIPTION("MAX1X27/MAX1X29/MAX1X31 ADC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 38bf10085696..465c7625a55a 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -164,7 +164,7 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
case mcp3550_60:
case mcp3551:
case mcp3553: {
- u32 raw = be32_to_cpup((u32 *)adc->rx_buf);
+ u32 raw = be32_to_cpup((__be32 *)adc->rx_buf);
if (!(adc->spi->mode & SPI_CPOL))
raw <<= 1; /* strip Data Ready bit in SPI mode 0,0 */
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 3b2fbb7ce431..196c8226381e 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -167,3 +167,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
MODULE_ALIAS("mcb:16z188");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 7b27306330a3..22a470db9ef8 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1187,7 +1187,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
const struct meson_sar_adc_data *match_data;
struct meson_sar_adc_priv *priv;
struct iio_dev *indio_dev;
- struct resource *res;
void __iomem *base;
int irq, ret;
@@ -1214,8 +1213,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &meson_sar_adc_iio_info;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 7bbb64ca3b32..a4776d924f3a 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -237,7 +237,6 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
{
struct mt6577_auxadc_device *adc_dev;
unsigned long adc_clk_rate;
- struct resource *res;
struct iio_dev *indio_dev;
int ret;
@@ -253,8 +252,7 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
indio_dev->channels = mt6577_auxadc_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(mt6577_auxadc_iio_channels);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc_dev->reg_base)) {
dev_err(&pdev->dev, "failed to get auxadc base address\n");
return PTR_ERR(adc_dev->reg_base);
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index 910f3585fa54..a6170a37ebe8 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -183,7 +183,6 @@ static int npcm_adc_probe(struct platform_device *pdev)
int irq;
u32 div;
u32 reg_con;
- struct resource *res;
struct npcm_adc *info;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
@@ -196,8 +195,7 @@ static int npcm_adc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, res);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index c37f201294b2..63ce743ee7af 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -481,7 +481,6 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rcar_gyroadc *priv;
struct iio_dev *indio_dev;
- struct resource *mem;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -491,8 +490,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
priv = iio_priv(indio_dev);
priv->dev = dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, mem);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index a6c046575ec3..66b387f9b36d 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -477,13 +477,6 @@ static void sc27xx_adc_disable(void *_data)
SC27XX_MODULE_ADC_EN, 0);
}
-static void sc27xx_adc_free_hwlock(void *_data)
-{
- struct hwspinlock *hwlock = _data;
-
- hwspin_lock_free(hwlock);
-}
-
static int sc27xx_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -520,19 +513,12 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
return ret;
}
- sc27xx_data->hwlock = hwspin_lock_request_specific(ret);
+ sc27xx_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
if (!sc27xx_data->hwlock) {
dev_err(dev, "failed to request hwspinlock\n");
return -ENXIO;
}
- ret = devm_add_action_or_reset(dev, sc27xx_adc_free_hwlock,
- sc27xx_data->hwlock);
- if (ret) {
- dev_err(dev, "failed to add hwspinlock action\n");
- return ret;
- }
-
sc27xx_data->dev = dev;
ret = sc27xx_adc_enable(sc27xx_data);
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index 592b97c464da..0ad536494e8f 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -260,7 +260,6 @@ static int spear_adc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct spear_adc_state *st;
- struct resource *res;
struct iio_dev *indio_dev = NULL;
int ret = -ENODEV;
int irq;
@@ -279,8 +278,7 @@ static int spear_adc_probe(struct platform_device *pdev)
* (e.g. SPEAr3xx). Let's provide two register base addresses
* to support multi-arch kernels.
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- st->adc_base_spear6xx = devm_ioremap_resource(&pdev->dev, res);
+ st->adc_base_spear6xx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(st->adc_base_spear6xx))
return PTR_ERR(st->adc_base_spear6xx);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 93a096a91f8c..6537f4f776c5 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -38,12 +38,12 @@
#define HAS_ANASWVDD BIT(1)
/**
- * stm32_adc_common_regs - stm32 common registers, compatible dependent data
+ * struct stm32_adc_common_regs - stm32 common registers
* @csr: common status register offset
* @ccr: common control register offset
- * @eoc1: adc1 end of conversion flag in @csr
- * @eoc2: adc2 end of conversion flag in @csr
- * @eoc3: adc3 end of conversion flag in @csr
+ * @eoc1_msk: adc1 end of conversion flag in @csr
+ * @eoc2_msk: adc2 end of conversion flag in @csr
+ * @eoc3_msk: adc3 end of conversion flag in @csr
* @ier: interrupt enable register offset for each adc
* @eocie_msk: end of conversion interrupt enable mask in @ier
*/
@@ -60,7 +60,7 @@ struct stm32_adc_common_regs {
struct stm32_adc_priv;
/**
- * stm32_adc_priv_cfg - stm32 core compatible configuration data
+ * struct stm32_adc_priv_cfg - stm32 core compatible configuration data
* @regs: common registers for all instances
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
@@ -79,6 +79,7 @@ struct stm32_adc_priv_cfg {
* @domain: irq domain reference
* @aclk: clock reference for the analog circuitry
* @bclk: bus clock common for all ADCs, depends on part used
+ * @max_clk_rate: desired maximum clock rate
* @booster: booster supply reference
* @vdd: vdd supply reference
* @vdda: vdda analog supply reference
@@ -95,6 +96,7 @@ struct stm32_adc_priv {
struct irq_domain *domain;
struct clk *aclk;
struct clk *bclk;
+ u32 max_clk_rate;
struct regulator *booster;
struct regulator *vdd;
struct regulator *vdda;
@@ -117,6 +119,7 @@ static int stm32f4_pclk_div[] = {2, 4, 6, 8};
/**
* stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
+ * @pdev: platform device
* @priv: stm32 ADC core private data
* Select clock prescaler used for analog conversions, before using ADC.
*/
@@ -140,7 +143,7 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
}
for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
- if ((rate / stm32f4_pclk_div[i]) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / stm32f4_pclk_div[i]) <= priv->max_clk_rate)
break;
}
if (i >= ARRAY_SIZE(stm32f4_pclk_div)) {
@@ -229,7 +232,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (ckmode)
continue;
- if ((rate / div) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / div) <= priv->max_clk_rate)
goto out;
}
}
@@ -249,7 +252,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (!ckmode)
continue;
- if ((rate / div) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / div) <= priv->max_clk_rate)
goto out;
}
@@ -654,6 +657,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct resource *res;
+ u32 max_rate;
int ret;
if (!pdev->dev.of_node)
@@ -730,6 +734,13 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->common.vref_mv = ret / 1000;
dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
+ ret = of_property_read_u32(pdev->dev.of_node, "st,max-clk-rate-hz",
+ &max_rate);
+ if (!ret)
+ priv->max_clk_rate = min(max_rate, priv->cfg->max_clk_rate_hz);
+ else
+ priv->max_clk_rate = priv->cfg->max_clk_rate_hz;
+
ret = priv->cfg->clk_sel(pdev, priv);
if (ret < 0)
goto err_hw_stop;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 73aee5949b6b..3b291d72701c 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -102,7 +102,7 @@ struct stm32_adc_calib {
};
/**
- * stm32_adc_regs - stm32 ADC misc registers & bitfield desc
+ * struct stm32_adc_regs - stm32 ADC misc registers & bitfield desc
* @reg: register offset
* @mask: bitfield mask
* @shift: left shift
@@ -114,7 +114,7 @@ struct stm32_adc_regs {
};
/**
- * stm32_adc_regspec - stm32 registers definition, compatible dependent data
+ * struct stm32_adc_regspec - stm32 registers definition
* @dr: data register offset
* @ier_eoc: interrupt enable register & eocie bitfield
* @isr_eoc: interrupt status register & eoc bitfield
@@ -140,7 +140,7 @@ struct stm32_adc_regspec {
struct stm32_adc;
/**
- * stm32_adc_cfg - stm32 compatible configuration data
+ * struct stm32_adc_cfg - stm32 compatible configuration data
* @regs: registers descriptions
* @adc_info: per instance input channels definitions
* @trigs: external trigger sources
@@ -183,8 +183,8 @@ struct stm32_adc_cfg {
* @rx_buf: dma rx buffer cpu address
* @rx_dma_buf: dma rx buffer bus address
* @rx_buf_sz: dma rx buffer size
- * @difsel bitmask to set single-ended/differential channel
- * @pcsel bitmask to preselect channels on some devices
+ * @difsel: bitmask to set single-ended/differential channel
+ * @pcsel: bitmask to preselect channels on some devices
* @smpr_val: sampling time settings (e.g. smpr1 / smpr2)
* @cal: optional calibration data on some devices
* @chan_name: channel name array
@@ -254,7 +254,7 @@ static const struct stm32_adc_info stm32h7_adc_info = {
.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
};
-/**
+/*
* stm32f4_sq - describe regular sequence registers
* - L: sequence len (register & bit field)
* - SQ1..SQ16: sequence entries (register & bit field)
@@ -301,7 +301,7 @@ static struct stm32_adc_trig_info stm32f4_adc_trigs[] = {
{}, /* sentinel */
};
-/**
+/*
* stm32f4_smp_bits[] - describe sampling time register index & bit fields
* Sorted so it can be indexed by channel number.
*/
@@ -392,7 +392,7 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
{},
};
-/**
+/*
* stm32h7_smp_bits - describe sampling time register index & bit fields
* Sorted so it can be indexed by channel number.
*/
@@ -994,6 +994,7 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
/**
* stm32_adc_get_trig_extsel() - Get external trigger selection
+ * @indio_dev: IIO device structure
* @trig: trigger
*
* Returns trigger extsel value, if trig matches, -EINVAL otherwise.
@@ -1297,6 +1298,10 @@ static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
/**
* stm32_adc_debugfs_reg_access - read or write register value
+ * @indio_dev: IIO device structure
+ * @reg: register offset
+ * @writeval: value to write
+ * @readval: value to read
*
* To read a value from an ADC register:
* echo [ADC reg offset] > direct_reg_access
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index bd72727fc417..0f88048ea48f 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -175,7 +175,7 @@ static int stmpe_read_raw(struct iio_dev *indio_dev,
static irqreturn_t stmpe_adc_isr(int irq, void *dev_id)
{
struct stmpe_adc *info = (struct stmpe_adc *)dev_id;
- u16 data;
+ __be16 data;
if (info->channel <= STMPE_ADC_LAST_NR) {
int int_sta;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 55c5119fe575..472b08f37fea 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -495,7 +495,7 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
ret = twl4030_madc_disable_irq(madc, i);
if (ret < 0)
dev_dbg(madc->dev, "Disable interrupt failed %d\n", i);
- madc->requests[i].result_pending = 1;
+ madc->requests[i].result_pending = true;
}
for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
r = &madc->requests[i];
@@ -507,8 +507,8 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
len = twl4030_madc_read_channels(madc, method->rbase,
r->channels, r->rbuf, r->raw);
/* Free request */
- r->result_pending = 0;
- r->active = 0;
+ r->result_pending = false;
+ r->active = false;
}
mutex_unlock(&madc->lock);
@@ -521,15 +521,15 @@ err_i2c:
*/
for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
r = &madc->requests[i];
- if (r->active == 0)
+ if (!r->active)
continue;
method = &twl4030_conversion_methods[r->method];
/* Read results */
len = twl4030_madc_read_channels(madc, method->rbase,
r->channels, r->rbuf, r->raw);
/* Free request */
- r->result_pending = 0;
- r->active = 0;
+ r->result_pending = false;
+ r->active = false;
}
mutex_unlock(&madc->lock);
@@ -652,16 +652,16 @@ static int twl4030_madc_conversion(struct twl4030_madc_request *req)
ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
if (ret < 0)
goto out;
- twl4030_madc->requests[req->method].active = 1;
+ twl4030_madc->requests[req->method].active = true;
/* Wait until conversion is ready (ctrl register returns EOC) */
ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl);
if (ret) {
- twl4030_madc->requests[req->method].active = 0;
+ twl4030_madc->requests[req->method].active = false;
goto out;
}
ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
req->channels, req->rbuf, req->raw);
- twl4030_madc->requests[req->method].active = 0;
+ twl4030_madc->requests[req->method].active = false;
out:
mutex_unlock(&twl4030_madc->lock);
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 98b30475bbc6..cb7380bf07ca 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -802,7 +802,6 @@ static int vf610_adc_probe(struct platform_device *pdev)
{
struct vf610_adc *info;
struct iio_dev *indio_dev;
- struct resource *mem;
int irq;
int ret;
@@ -815,8 +814,7 @@ static int vf610_adc_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 4fd389678dba..ec227b358cd6 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1150,7 +1150,6 @@ static int xadc_probe(struct platform_device *pdev)
const struct of_device_id *id;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
- struct resource *mem;
unsigned int conf0;
struct xadc *xadc;
int ret;
@@ -1180,8 +1179,7 @@ static int xadc_probe(struct platform_device *pdev)
spin_lock_init(&xadc->lock);
INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xadc->base = devm_ioremap_resource(&pdev->dev, mem);
+ xadc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xadc->base))
return PTR_ERR(xadc->base);
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index 3a20cb5d9bff..6c175eb1c7a7 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -323,16 +323,16 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
struct atlas_data *data = iio_priv(indio_dev);
int ret;
- ret = iio_triggered_buffer_predisable(indio_dev);
+ ret = atlas_set_interrupt(data, false);
if (ret)
return ret;
- ret = atlas_set_interrupt(data, false);
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
if (ret)
return ret;
- pm_runtime_mark_last_busy(&data->client->dev);
- return pm_runtime_put_autosuspend(&data->client->dev);
+ return iio_triggered_buffer_predisable(indio_dev);
}
static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 8cc8fe5e356d..403e8803471a 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -483,7 +483,7 @@ static void sgp_init(struct sgp_data *data)
data->iaq_defval_skip_jiffies =
43 * data->measure_interval_jiffies;
break;
- };
+ }
}
static const struct iio_info sgp_info = {
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index edbb956e81e8..acb9f8ecbb3d 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -117,7 +117,7 @@ static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size)
break;
case SPS30_READ_AUTO_CLEANING_PERIOD:
buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8;
- buf[1] = (u8)SPS30_AUTO_CLEANING_PERIOD;
+ buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff);
/* fall through */
case SPS30_READ_DATA_READY_FLAG:
case SPS30_READ_DATA:
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index cc42219a64f7..979070196da9 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -60,8 +60,8 @@ config AD5446
help
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
- AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
- AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
+ AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611,
+ AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
To compile this driver as a module, choose M here: the
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 7df8b4cc295d..61c670f7fc5f 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -327,6 +327,7 @@ enum ad5446_supported_spi_device_ids {
ID_AD5541A,
ID_AD5512A,
ID_AD5553,
+ ID_AD5600,
ID_AD5601,
ID_AD5611,
ID_AD5621,
@@ -381,6 +382,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
.channel = AD5446_CHANNEL(14, 16, 0),
.write = ad5446_write,
},
+ [ID_AD5600] = {
+ .channel = AD5446_CHANNEL(16, 16, 0),
+ .write = ad5446_write,
+ },
[ID_AD5601] = {
.channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
.write = ad5446_write,
@@ -448,6 +453,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
{"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
{"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
{"ad5553", ID_AD5553},
+ {"ad5600", ID_AD5600},
{"ad5601", ID_AD5601},
{"ad5611", ID_AD5611},
{"ad5621", ID_AD5621},
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 8de9f40226e6..14bbac6bee98 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -41,6 +41,7 @@ struct ad7303_state {
struct regulator *vdd_reg;
struct regulator *vref_reg;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -79,7 +80,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (pwr_down)
st->config |= AD7303_CFG_POWER_DOWN(chan->channel);
@@ -90,7 +91,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
* mode, so just write one of the DAC channels again */
ad7303_write(st, chan->channel, st->dac_cache[chan->channel]);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return len;
}
@@ -116,7 +117,9 @@ static int ad7303_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
*val = st->dac_cache[chan->channel];
+ mutex_unlock(&st->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
vref_uv = ad7303_get_vref(st, chan);
@@ -144,11 +147,11 @@ static int ad7303_write_raw(struct iio_dev *indio_dev,
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad7303_write(st, chan->address, val);
if (ret == 0)
st->dac_cache[chan->channel] = val;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -211,6 +214,8 @@ static int ad7303_probe(struct spi_device *spi)
st->spi = spi;
+ mutex_init(&st->lock);
+
st->vdd_reg = devm_regulator_get(&spi->dev, "Vdd");
if (IS_ERR(st->vdd_reg))
return PTR_ERR(st->vdd_reg);
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 883e84e96609..0ab357bd3633 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -106,7 +106,6 @@ static int lpc18xx_dac_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct lpc18xx_dac *dac;
- struct resource *res;
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*dac));
@@ -117,8 +116,7 @@ static int lpc18xx_dac_probe(struct platform_device *pdev)
dac = iio_priv(indio_dev);
mutex_init(&dac->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dac->base = devm_ioremap_resource(&pdev->dev, res);
+ dac->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dac->base))
return PTR_ERR(dac->base);
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index d0fb3124de07..9e6b4cd0a5cc 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -50,6 +51,41 @@ static const struct regmap_config stm32_dac_regmap_cfg = {
.max_register = 0x3fc,
};
+static int stm32_dac_core_hw_start(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
+ struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+ int ret;
+
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(dev, "vref enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret < 0) {
+ dev_err(dev, "pclk enable failed: %d\n", ret);
+ goto err_regulator_disable;
+ }
+
+ return 0;
+
+err_regulator_disable:
+ regulator_disable(priv->vref);
+
+ return ret;
+}
+
+static void stm32_dac_core_hw_stop(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
+ struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+
+ clk_disable_unprepare(priv->pclk);
+ regulator_disable(priv->vref);
+}
+
static int stm32_dac_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -66,6 +102,8 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(pdev, &priv->common);
+
cfg = (const struct stm32_dac_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
@@ -74,11 +112,19 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (IS_ERR(mmio))
return PTR_ERR(mmio);
- regmap = devm_regmap_init_mmio(dev, mmio, &stm32_dac_regmap_cfg);
+ regmap = devm_regmap_init_mmio_clk(dev, "pclk", mmio,
+ &stm32_dac_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
priv->common.regmap = regmap;
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ dev_err(dev, "pclk get failed\n");
+ return ret;
+ }
+
priv->vref = devm_regulator_get(dev, "vref");
if (IS_ERR(priv->vref)) {
ret = PTR_ERR(priv->vref);
@@ -86,33 +132,22 @@ static int stm32_dac_probe(struct platform_device *pdev)
return ret;
}
- ret = regulator_enable(priv->vref);
- if (ret < 0) {
- dev_err(dev, "vref enable failed\n");
- return ret;
- }
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = stm32_dac_core_hw_start(dev);
+ if (ret)
+ goto err_pm_stop;
ret = regulator_get_voltage(priv->vref);
if (ret < 0) {
dev_err(dev, "vref get voltage failed, %d\n", ret);
- goto err_vref;
+ goto err_hw_stop;
}
priv->common.vref_mv = ret / 1000;
dev_dbg(dev, "vref+=%dmV\n", priv->common.vref_mv);
- priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk)) {
- ret = PTR_ERR(priv->pclk);
- dev_err(dev, "pclk get failed\n");
- goto err_vref;
- }
-
- ret = clk_prepare_enable(priv->pclk);
- if (ret < 0) {
- dev_err(dev, "pclk enable failed\n");
- goto err_vref;
- }
-
priv->rst = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@@ -128,39 +163,79 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv->common.hfsel ?
STM32H7_DAC_CR_HFSEL : 0);
if (ret)
- goto err_pclk;
+ goto err_hw_stop;
}
- platform_set_drvdata(pdev, &priv->common);
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, dev);
if (ret < 0) {
dev_err(dev, "failed to populate DT children\n");
- goto err_pclk;
+ goto err_hw_stop;
}
+ pm_runtime_put(dev);
+
return 0;
-err_pclk:
- clk_disable_unprepare(priv->pclk);
-err_vref:
- regulator_disable(priv->vref);
+err_hw_stop:
+ stm32_dac_core_hw_stop(dev);
+err_pm_stop:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
static int stm32_dac_remove(struct platform_device *pdev)
{
- struct stm32_dac_common *common = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ of_platform_depopulate(&pdev->dev);
+ stm32_dac_core_hw_stop(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dac_core_resume(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+ int ret;
- of_platform_depopulate(&pdev->dev);
- clk_disable_unprepare(priv->pclk);
- regulator_disable(priv->vref);
+ if (priv->common.hfsel) {
+ /* restore hfsel (maybe lost under low power state) */
+ ret = regmap_update_bits(priv->common.regmap, STM32_DAC_CR,
+ STM32H7_DAC_CR_HFSEL,
+ STM32H7_DAC_CR_HFSEL);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+
+static int __maybe_unused stm32_dac_core_runtime_suspend(struct device *dev)
+{
+ stm32_dac_core_hw_stop(dev);
return 0;
}
+static int __maybe_unused stm32_dac_core_runtime_resume(struct device *dev)
+{
+ return stm32_dac_core_hw_start(dev);
+}
+
+static const struct dev_pm_ops stm32_dac_core_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, stm32_dac_core_resume)
+ SET_RUNTIME_PM_OPS(stm32_dac_core_runtime_suspend,
+ stm32_dac_core_runtime_resume,
+ NULL)
+};
+
static const struct stm32_dac_cfg stm32h7_dac_cfg = {
.has_hfsel = true,
};
@@ -182,6 +257,7 @@ static struct platform_driver stm32_dac_driver = {
.driver = {
.name = "stm32-dac-core",
.of_match_table = stm32_dac_of_match,
+ .pm = &stm32_dac_core_pm_ops,
},
};
module_platform_driver(stm32_dac_driver);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index cce26a3a6627..f22c1d9129b2 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "stm32-dac-core.h"
@@ -20,6 +21,8 @@
#define STM32_DAC_CHANNEL_2 2
#define STM32_DAC_IS_CHAN_1(ch) ((ch) & STM32_DAC_CHANNEL_1)
+#define STM32_DAC_AUTO_SUSPEND_DELAY_MS 2000
+
/**
* struct stm32_dac - private data of DAC driver
* @common: reference to DAC common data
@@ -49,15 +52,34 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
bool enable)
{
struct stm32_dac *dac = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
u32 msk = STM32_DAC_IS_CHAN_1(ch) ? STM32_DAC_CR_EN1 : STM32_DAC_CR_EN2;
u32 en = enable ? msk : 0;
int ret;
+ /* already enabled / disabled ? */
+ mutex_lock(&indio_dev->mlock);
+ ret = stm32_dac_is_enabled(indio_dev, ch);
+ if (ret < 0 || enable == !!ret) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ }
+
ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
+ mutex_unlock(&indio_dev->mlock);
if (ret < 0) {
dev_err(&indio_dev->dev, "%s failed\n", en ?
"Enable" : "Disable");
- return ret;
+ goto err_put_pm;
}
/*
@@ -68,7 +90,20 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
if (en && dac->common->hfsel)
udelay(1);
+ if (!enable) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
return 0;
+
+err_put_pm:
+ if (enable) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
}
static int stm32_dac_get_value(struct stm32_dac *dac, int channel, int *val)
@@ -272,6 +307,7 @@ static int stm32_dac_chan_of_init(struct iio_dev *indio_dev)
static int stm32_dac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct stm32_dac *dac;
int ret;
@@ -296,9 +332,61 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- return devm_iio_device_register(&pdev->dev, indio_dev);
+ /* Get stm32-dac-core PM online */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, STM32_DAC_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_pm_put;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
+ return ret;
}
+static int stm32_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dac_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int channel = indio_dev->channels[0].channel;
+ int ret;
+
+ /* Ensure DAC is disabled before suspend */
+ ret = stm32_dac_is_enabled(indio_dev, channel);
+ if (ret)
+ return ret < 0 ? ret : -EBUSY;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static const struct dev_pm_ops stm32_dac_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dac_suspend, pm_runtime_force_resume)
+};
+
static const struct of_device_id stm32_dac_of_match[] = {
{ .compatible = "st,stm32-dac", },
{},
@@ -307,9 +395,11 @@ MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
static struct platform_driver stm32_dac_driver = {
.probe = stm32_dac_probe,
+ .remove = stm32_dac_remove,
.driver = {
.name = "stm32-dac",
.of_match_table = stm32_dac_of_match,
+ .pm = &stm32_dac_pm_ops,
},
};
module_platform_driver(stm32_dac_driver);
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 0ec4d2609ef9..71f8a5c471c4 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -172,7 +172,6 @@ static int vf610_dac_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct vf610_dac *info;
- struct resource *mem;
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev,
@@ -185,8 +184,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index 236220d6de02..1b84b8e112fe 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -38,10 +38,12 @@ struct adis16080_chip_info {
* @us: actual spi_device to write data
* @info: chip specific parameters
* @buf: transmit or receive buffer
+ * @lock lock to protect buffer during reads
**/
struct adis16080_state {
struct spi_device *us;
const struct adis16080_chip_info *info;
+ struct mutex lock;
__be16 buf ____cacheline_aligned;
};
@@ -82,9 +84,9 @@ static int adis16080_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = adis16080_read_sample(indio_dev, chan->address, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -196,6 +198,8 @@ static int adis16080_probe(struct spi_device *spi)
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
+ mutex_init(&st->lock);
+
/* Allocate the comms buffers */
st->us = spi;
st->info = &adis16080_chip_info[id->driver_data];
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index de3f66f89496..79e63c8a2ea8 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -76,9 +76,7 @@ static int adis16130_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
/* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
ret = adis16130_spi_read(indio_dev, chan->address, &temp);
- mutex_unlock(&indio_dev->mlock);
if (ret)
return ret;
*val = temp;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 5bec7ad53d8b..d637d52d051a 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -80,19 +80,19 @@ static ssize_t adis16136_show_serial(struct file *file,
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT1, &lot1);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT2, &lot2);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT3, &lot3);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.4x%.4x%.4x-%.4x\n", lot1, lot2,
@@ -116,7 +116,7 @@ static int adis16136_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -134,7 +134,7 @@ static int adis16136_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -191,7 +191,7 @@ static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
int ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
*freq = 32768 / (t + 1);
@@ -228,7 +228,7 @@ static ssize_t adis16136_read_frequency(struct device *dev,
int ret;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
return ret;
return sprintf(buf, "%d\n", freq);
@@ -256,7 +256,7 @@ static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
int i, ret;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
return ret;
for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
@@ -277,11 +277,11 @@ static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
mutex_lock(&indio_dev->mlock);
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
- if (ret < 0)
+ if (ret)
goto err_unlock;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
goto err_unlock;
*val = freq / adis16136_3db_divisors[val16 & 0x07];
@@ -318,7 +318,7 @@ static int adis16136_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBBIAS:
ret = adis_read_reg_32(&adis16136->adis,
ADIS16136_REG_GYRO_OFF2, &val32);
- if (ret < 0)
+ if (ret)
return ret;
*val = sign_extend32(val32, 31);
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 998fb8d66fe3..981ae2291505 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -154,7 +154,7 @@ static int itg3200_write_raw(struct iio_dev *indio_dev,
t);
mutex_unlock(&indio_dev->mlock);
- return ret;
+ return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 80154bca18b6..8e908a749f95 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -543,7 +543,7 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
toread = bytes_per_datum;
offset = 1;
/* Put in some dummy value */
- fifo_values[0] = 0xAAAA;
+ fifo_values[0] = cpu_to_be16(0xAAAA);
}
ret = regmap_bulk_read(mpu3050->map,
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index c0acbb5d2ffb..57be68b291fa 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index bfe1cdb16846..963ff043eecf 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -278,31 +278,34 @@ static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
struct hdc100x_data *data = iio_priv(indio_dev);
int ret;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
/* Buffer is enabled. First set ACQ Mode, then attach poll func */
mutex_lock(&data->lock);
ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
HDC100X_REG_CONFIG_ACQ_MODE);
mutex_unlock(&data->lock);
if (ret)
- return ret;
+ iio_triggered_buffer_predisable(indio_dev);
- return iio_triggered_buffer_postenable(indio_dev);
+ return ret;
}
static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
{
struct hdc100x_data *data = iio_priv(indio_dev);
- int ret;
-
- /* First detach poll func, then reset ACQ mode. OK to disable buffer */
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret)
- return ret;
+ int ret, ret2;
mutex_lock(&data->lock);
ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
mutex_unlock(&data->lock);
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret2;
+
return ret;
}
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index f3c7282321a8..60bb1029e759 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -40,6 +40,33 @@ config ADIS16480
source "drivers/iio/imu/bmi160/Kconfig"
+config FXOS8700
+ tristate
+
+config FXOS8700_I2C
+ tristate "NXP FXOS8700 I2C driver"
+ depends on I2C
+ select FXOS8700
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the NXP FXOS8700 m+g combo
+ sensor on I2C.
+
+ This driver can also be built as a module. If so, the module will be
+ called fxos8700_i2c.
+
+config FXOS8700_SPI
+ tristate "NXP FXOS8700 SPI driver"
+ depends on SPI
+ select FXOS8700
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the NXP FXOS8700 m+g combo
+ sensor on SPI.
+
+ This driver can also be built as a module. If so, the module will be
+ called fxos8700_spi.
+
config KMX61
tristate "Kionix KMX61 6-axis accelerometer and magnetometer"
depends on I2C
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 4a6958865504..5237fd4bc384 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -14,6 +14,11 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += bmi160/
+
+obj-$(CONFIG_FXOS8700) += fxos8700_core.o
+obj-$(CONFIG_FXOS8700_I2C) += fxos8700_i2c.o
+obj-$(CONFIG_FXOS8700_SPI) += fxos8700_spi.o
+
obj-y += inv_mpu6050/
obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 1631c255deab..e14c8536fd09 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -39,24 +39,24 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 6,
.bits_per_word = 8,
@@ -139,16 +139,16 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->read_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.rx_buf = adis->rx,
@@ -156,8 +156,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->read_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.rx_buf = adis->rx + 2,
.bits_per_word = 8,
@@ -229,7 +229,8 @@ int adis_debugfs_reg_access(struct iio_dev *indio_dev,
int ret;
ret = adis_read_reg_16(adis, reg, &val16);
- *readval = val16;
+ if (ret == 0)
+ *readval = val16;
return ret;
} else {
@@ -286,7 +287,7 @@ int adis_check_status(struct adis *adis)
int i;
ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
- if (ret < 0)
+ if (ret)
return ret;
status &= adis->data->status_error_mask;
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 0575ff706bd4..44e46dc96e00 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -217,16 +217,16 @@ static ssize_t adis16400_show_serial_number(struct file *file,
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID1, &lot1);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID2, &lot2);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_SERIAL_NUMBER,
&serial_number);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.4x-%.4x-%.4x\n", lot1, lot2,
@@ -249,7 +249,7 @@ static int adis16400_show_product_id(void *arg, u64 *val)
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16400_PRODUCT_ID, &prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -266,7 +266,7 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16400_FLASH_CNT, &flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -327,7 +327,7 @@ static int adis16334_get_freq(struct adis16400_state *st)
uint16_t t;
ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
t >>= ADIS16334_RATE_DIV_SHIFT;
@@ -359,7 +359,7 @@ static int adis16400_get_freq(struct adis16400_state *st)
uint16_t t;
ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 52851 : 1638404;
@@ -416,7 +416,7 @@ static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
}
ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
@@ -615,7 +615,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
ret = adis_read_reg_16(&st->adis,
ADIS16400_SENS_AVG,
&val16);
- if (ret < 0) {
+ if (ret) {
mutex_unlock(&indio_dev->mlock);
return ret;
}
@@ -626,12 +626,12 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val2 = (ret % 1000) * 1000;
}
mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
+ if (ret)
return ret;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = st->variant->get_freq(st);
- if (ret < 0)
+ if (ret)
return ret;
*val = ret / 1000;
*val2 = (ret % 1000) * 1000;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 6aed9e84abbf..b55812521537 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -80,7 +80,7 @@ static int adis16460_show_serial_number(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
*val = serial;
@@ -98,7 +98,7 @@ static int adis16460_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -116,7 +116,7 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_32(&adis16460->adis, ADIS16460_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -176,7 +176,7 @@ static int adis16460_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
unsigned int freq;
ret = adis_read_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, &t);
- if (ret < 0)
+ if (ret)
return ret;
freq = 2048000 / (t + 1);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8743b2f376e2..748f8bbf184d 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -181,7 +181,7 @@ static ssize_t adis16480_show_firmware_revision(struct file *file,
int ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_REV, &rev);
- if (ret < 0)
+ if (ret)
return ret;
len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
@@ -206,11 +206,11 @@ static ssize_t adis16480_show_firmware_date(struct file *file,
int ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_Y, &year);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_DM, &md);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n",
@@ -234,7 +234,7 @@ static int adis16480_show_serial_number(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
*val = serial;
@@ -252,7 +252,7 @@ static int adis16480_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -270,7 +270,7 @@ static int adis16480_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_32(&adis16480->adis, ADIS16480_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -353,7 +353,7 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
struct adis16480 *st = iio_priv(indio_dev);
uint16_t t;
int ret;
- unsigned freq;
+ unsigned int freq;
unsigned int reg;
if (st->clk_mode == ADIS16480_CLK_PPS)
@@ -362,7 +362,7 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
reg = ADIS16480_REG_DEC_RATE;
ret = adis_read_reg_16(&st->adis, reg, &t);
- if (ret < 0)
+ if (ret)
return ret;
/*
@@ -454,18 +454,20 @@ static int adis16480_get_calibbias(struct iio_dev *indio_dev,
case IIO_MAGN:
case IIO_PRESSURE:
ret = adis_read_reg_16(&st->adis, reg, &val16);
- *bias = sign_extend32(val16, 15);
+ if (ret == 0)
+ *bias = sign_extend32(val16, 15);
break;
case IIO_ANGL_VEL:
case IIO_ACCEL:
ret = adis_read_reg_32(&st->adis, reg, &val32);
- *bias = sign_extend32(val32, 31);
+ if (ret == 0)
+ *bias = sign_extend32(val32, 31);
break;
default:
- ret = -EINVAL;
+ ret = -EINVAL;
}
- if (ret < 0)
+ if (ret)
return ret;
return IIO_VAL_INT;
@@ -492,7 +494,7 @@ static int adis16480_get_calibscale(struct iio_dev *indio_dev,
int ret;
ret = adis_read_reg_16(&st->adis, reg, &val16);
- if (ret < 0)
+ if (ret)
return ret;
*scale = sign_extend32(val16, 15);
@@ -538,7 +540,7 @@ static int adis16480_get_filter_freq(struct iio_dev *indio_dev,
enable_mask = BIT(offset + 2);
ret = adis_read_reg_16(&st->adis, reg, &val);
- if (ret < 0)
+ if (ret)
return ret;
if (!(val & enable_mask))
@@ -564,7 +566,7 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
enable_mask = BIT(offset + 2);
ret = adis_read_reg_16(&st->adis, reg, &val);
- if (ret < 0)
+ if (ret)
return ret;
if (freq == 0) {
@@ -623,9 +625,13 @@ static int adis16480_read_raw(struct iio_dev *indio_dev,
*val2 = (st->chip_info->temp_scale % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_PRESSURE:
- *val = 0;
- *val2 = 4000; /* 40ubar = 0.004 kPa */
- return IIO_VAL_INT_PLUS_MICRO;
+ /*
+ * max scale is 1310 mbar
+ * max raw value is 32767 shifted for 32bits
+ */
+ *val = 131; /* 1310mbar = 131 kPa */
+ *val2 = 32767 << 16;
+ return IIO_VAL_FRACTIONAL;
default:
return -EINVAL;
}
@@ -786,13 +792,14 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
/*
- * storing the value in rad/degree and the scale in degree
- * gives us the result in rad and better precession than
- * storing the scale directly in rad.
+ * Typically we do IIO_RAD_TO_DEGREE in the denominator, which
+ * is exactly the same as IIO_DEGREE_TO_RAD in numerator, since
+ * it gives better approximation. However, in this case we
+ * cannot do it since it would not fit in a 32bit variable.
*/
- .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
- .gyro_max_scale = 300,
- .accel_max_val = IIO_M_S_2_TO_G(21973),
+ .gyro_max_val = 22887 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(300),
+ .accel_max_val = IIO_M_S_2_TO_G(21973 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -802,9 +809,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16480] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(12500),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(12500 << 16),
.accel_max_scale = 10,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -814,9 +821,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16485] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(20000),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
.accel_max_scale = 5,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -826,9 +833,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16488] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(22500),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(22500 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -838,9 +845,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_1] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 125,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -851,9 +858,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_2] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -864,9 +871,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_3] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 2000,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -877,9 +884,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_1] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 125,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -890,9 +897,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_2] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -903,9 +910,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_3] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 2000,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -919,6 +926,7 @@ static const struct iio_info adis16480_info = {
.read_raw = &adis16480_read_raw,
.write_raw = &adis16480_write_raw,
.update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
};
static int adis16480_stop_device(struct iio_dev *indio_dev)
@@ -940,7 +948,7 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
int ret;
ret = adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
- if (ret < 0)
+ if (ret)
return ret;
val &= ~ADIS16480_DRDY_EN_MSK;
@@ -1118,7 +1126,7 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, &val);
- if (ret < 0)
+ if (ret)
return ret;
pin = adis16480_of_get_ext_clk_pin(st, of_node);
@@ -1144,7 +1152,7 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
val |= mode;
ret = adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
- if (ret < 0)
+ if (ret)
return ret;
return clk_prepare_enable(st->ext_clk);
diff --git a/drivers/iio/imu/fxos8700.h b/drivers/iio/imu/fxos8700.h
new file mode 100644
index 000000000000..6dfb8d7099e4
--- /dev/null
+++ b/drivers/iio/imu/fxos8700.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef FXOS8700_H_
+#define FXOS8700_H_
+
+extern const struct regmap_config fxos8700_regmap_config;
+
+int fxos8700_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi);
+
+#endif /* FXOS8700_H_ */
diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
new file mode 100644
index 000000000000..7b47be44ea59
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_core.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU (accelerometer plus magnetometer)
+ *
+ * IIO core driver for FXOS8700, with support for I2C/SPI busses
+ *
+ * TODO: Buffer, trigger, and IRQ support
+ */
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include "fxos8700.h"
+
+/* Register Definitions */
+#define FXOS8700_STATUS 0x00
+#define FXOS8700_OUT_X_MSB 0x01
+#define FXOS8700_OUT_X_LSB 0x02
+#define FXOS8700_OUT_Y_MSB 0x03
+#define FXOS8700_OUT_Y_LSB 0x04
+#define FXOS8700_OUT_Z_MSB 0x05
+#define FXOS8700_OUT_Z_LSB 0x06
+#define FXOS8700_F_SETUP 0x09
+#define FXOS8700_TRIG_CFG 0x0a
+#define FXOS8700_SYSMOD 0x0b
+#define FXOS8700_INT_SOURCE 0x0c
+#define FXOS8700_WHO_AM_I 0x0d
+#define FXOS8700_XYZ_DATA_CFG 0x0e
+#define FXOS8700_HP_FILTER_CUTOFF 0x0f
+#define FXOS8700_PL_STATUS 0x10
+#define FXOS8700_PL_CFG 0x11
+#define FXOS8700_PL_COUNT 0x12
+#define FXOS8700_PL_BF_ZCOMP 0x13
+#define FXOS8700_PL_THS_REG 0x14
+#define FXOS8700_A_FFMT_CFG 0x15
+#define FXOS8700_A_FFMT_SRC 0x16
+#define FXOS8700_A_FFMT_THS 0x17
+#define FXOS8700_A_FFMT_COUNT 0x18
+#define FXOS8700_TRANSIENT_CFG 0x1d
+#define FXOS8700_TRANSIENT_SRC 0x1e
+#define FXOS8700_TRANSIENT_THS 0x1f
+#define FXOS8700_TRANSIENT_COUNT 0x20
+#define FXOS8700_PULSE_CFG 0x21
+#define FXOS8700_PULSE_SRC 0x22
+#define FXOS8700_PULSE_THSX 0x23
+#define FXOS8700_PULSE_THSY 0x24
+#define FXOS8700_PULSE_THSZ 0x25
+#define FXOS8700_PULSE_TMLT 0x26
+#define FXOS8700_PULSE_LTCY 0x27
+#define FXOS8700_PULSE_WIND 0x28
+#define FXOS8700_ASLP_COUNT 0x29
+#define FXOS8700_CTRL_REG1 0x2a
+#define FXOS8700_CTRL_REG2 0x2b
+#define FXOS8700_CTRL_REG3 0x2c
+#define FXOS8700_CTRL_REG4 0x2d
+#define FXOS8700_CTRL_REG5 0x2e
+#define FXOS8700_OFF_X 0x2f
+#define FXOS8700_OFF_Y 0x30
+#define FXOS8700_OFF_Z 0x31
+#define FXOS8700_M_DR_STATUS 0x32
+#define FXOS8700_M_OUT_X_MSB 0x33
+#define FXOS8700_M_OUT_X_LSB 0x34
+#define FXOS8700_M_OUT_Y_MSB 0x35
+#define FXOS8700_M_OUT_Y_LSB 0x36
+#define FXOS8700_M_OUT_Z_MSB 0x37
+#define FXOS8700_M_OUT_Z_LSB 0x38
+#define FXOS8700_CMP_X_MSB 0x39
+#define FXOS8700_CMP_X_LSB 0x3a
+#define FXOS8700_CMP_Y_MSB 0x3b
+#define FXOS8700_CMP_Y_LSB 0x3c
+#define FXOS8700_CMP_Z_MSB 0x3d
+#define FXOS8700_CMP_Z_LSB 0x3e
+#define FXOS8700_M_OFF_X_MSB 0x3f
+#define FXOS8700_M_OFF_X_LSB 0x40
+#define FXOS8700_M_OFF_Y_MSB 0x41
+#define FXOS8700_M_OFF_Y_LSB 0x42
+#define FXOS8700_M_OFF_Z_MSB 0x43
+#define FXOS8700_M_OFF_Z_LSB 0x44
+#define FXOS8700_MAX_X_MSB 0x45
+#define FXOS8700_MAX_X_LSB 0x46
+#define FXOS8700_MAX_Y_MSB 0x47
+#define FXOS8700_MAX_Y_LSB 0x48
+#define FXOS8700_MAX_Z_MSB 0x49
+#define FXOS8700_MAX_Z_LSB 0x4a
+#define FXOS8700_MIN_X_MSB 0x4b
+#define FXOS8700_MIN_X_LSB 0x4c
+#define FXOS8700_MIN_Y_MSB 0x4d
+#define FXOS8700_MIN_Y_LSB 0x4e
+#define FXOS8700_MIN_Z_MSB 0x4f
+#define FXOS8700_MIN_Z_LSB 0x50
+#define FXOS8700_TEMP 0x51
+#define FXOS8700_M_THS_CFG 0x52
+#define FXOS8700_M_THS_SRC 0x53
+#define FXOS8700_M_THS_X_MSB 0x54
+#define FXOS8700_M_THS_X_LSB 0x55
+#define FXOS8700_M_THS_Y_MSB 0x56
+#define FXOS8700_M_THS_Y_LSB 0x57
+#define FXOS8700_M_THS_Z_MSB 0x58
+#define FXOS8700_M_THS_Z_LSB 0x59
+#define FXOS8700_M_THS_COUNT 0x5a
+#define FXOS8700_M_CTRL_REG1 0x5b
+#define FXOS8700_M_CTRL_REG2 0x5c
+#define FXOS8700_M_CTRL_REG3 0x5d
+#define FXOS8700_M_INT_SRC 0x5e
+#define FXOS8700_A_VECM_CFG 0x5f
+#define FXOS8700_A_VECM_THS_MSB 0x60
+#define FXOS8700_A_VECM_THS_LSB 0x61
+#define FXOS8700_A_VECM_CNT 0x62
+#define FXOS8700_A_VECM_INITX_MSB 0x63
+#define FXOS8700_A_VECM_INITX_LSB 0x64
+#define FXOS8700_A_VECM_INITY_MSB 0x65
+#define FXOS8700_A_VECM_INITY_LSB 0x66
+#define FXOS8700_A_VECM_INITZ_MSB 0x67
+#define FXOS8700_A_VECM_INITZ_LSB 0x68
+#define FXOS8700_M_VECM_CFG 0x69
+#define FXOS8700_M_VECM_THS_MSB 0x6a
+#define FXOS8700_M_VECM_THS_LSB 0x6b
+#define FXOS8700_M_VECM_CNT 0x6c
+#define FXOS8700_M_VECM_INITX_MSB 0x6d
+#define FXOS8700_M_VECM_INITX_LSB 0x6e
+#define FXOS8700_M_VECM_INITY_MSB 0x6f
+#define FXOS8700_M_VECM_INITY_LSB 0x70
+#define FXOS8700_M_VECM_INITZ_MSB 0x71
+#define FXOS8700_M_VECM_INITZ_LSB 0x72
+#define FXOS8700_A_FFMT_THS_X_MSB 0x73
+#define FXOS8700_A_FFMT_THS_X_LSB 0x74
+#define FXOS8700_A_FFMT_THS_Y_MSB 0x75
+#define FXOS8700_A_FFMT_THS_Y_LSB 0x76
+#define FXOS8700_A_FFMT_THS_Z_MSB 0x77
+#define FXOS8700_A_FFMT_THS_Z_LSB 0x78
+#define FXOS8700_A_TRAN_INIT_MSB 0x79
+#define FXOS8700_A_TRAN_INIT_LSB_X 0x7a
+#define FXOS8700_A_TRAN_INIT_LSB_Y 0x7b
+#define FXOS8700_A_TRAN_INIT_LSB_Z 0x7d
+#define FXOS8700_TM_NVM_LOCK 0x7e
+#define FXOS8700_NVM_DATA0_35 0x80
+#define FXOS8700_NVM_DATA_BNK3 0xa4
+#define FXOS8700_NVM_DATA_BNK2 0xa5
+#define FXOS8700_NVM_DATA_BNK1 0xa6
+#define FXOS8700_NVM_DATA_BNK0 0xa7
+
+/* Bit definitions for FXOS8700_CTRL_REG1 */
+#define FXOS8700_CTRL_ODR_MSK 0x38
+#define FXOS8700_CTRL_ODR_MAX 0x00
+#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3)
+
+/* Bit definitions for FXOS8700_M_CTRL_REG1 */
+#define FXOS8700_HMS_MASK GENMASK(1, 0)
+#define FXOS8700_OS_MASK GENMASK(4, 2)
+
+/* Bit definitions for FXOS8700_M_CTRL_REG2 */
+#define FXOS8700_MAXMIN_RST BIT(2)
+#define FXOS8700_MAXMIN_DIS_THS BIT(3)
+#define FXOS8700_MAXMIN_DIS BIT(4)
+
+#define FXOS8700_ACTIVE 0x01
+#define FXOS8700_ACTIVE_MIN_USLEEP 4000 /* from table 6 in datasheet */
+
+#define FXOS8700_DEVICE_ID 0xC7
+#define FXOS8700_PRE_DEVICE_ID 0xC4
+#define FXOS8700_DATA_BUF_SIZE 3
+
+struct fxos8700_data {
+ struct regmap *regmap;
+ struct iio_trigger *trig;
+ __be16 buf[FXOS8700_DATA_BUF_SIZE] ____cacheline_aligned;
+};
+
+/* Regmap info */
+static const struct regmap_range read_range[] = {
+ {
+ .range_min = FXOS8700_STATUS,
+ .range_max = FXOS8700_A_FFMT_COUNT,
+ }, {
+ .range_min = FXOS8700_TRANSIENT_CFG,
+ .range_max = FXOS8700_A_FFMT_THS_Z_LSB,
+ },
+};
+
+static const struct regmap_range write_range[] = {
+ {
+ .range_min = FXOS8700_F_SETUP,
+ .range_max = FXOS8700_TRIG_CFG,
+ }, {
+ .range_min = FXOS8700_XYZ_DATA_CFG,
+ .range_max = FXOS8700_HP_FILTER_CUTOFF,
+ }, {
+ .range_min = FXOS8700_PL_CFG,
+ .range_max = FXOS8700_A_FFMT_CFG,
+ }, {
+ .range_min = FXOS8700_A_FFMT_THS,
+ .range_max = FXOS8700_TRANSIENT_CFG,
+ }, {
+ .range_min = FXOS8700_TRANSIENT_THS,
+ .range_max = FXOS8700_PULSE_CFG,
+ }, {
+ .range_min = FXOS8700_PULSE_THSX,
+ .range_max = FXOS8700_OFF_Z,
+ }, {
+ .range_min = FXOS8700_M_OFF_X_MSB,
+ .range_max = FXOS8700_M_OFF_Z_LSB,
+ }, {
+ .range_min = FXOS8700_M_THS_CFG,
+ .range_max = FXOS8700_M_THS_CFG,
+ }, {
+ .range_min = FXOS8700_M_THS_X_MSB,
+ .range_max = FXOS8700_M_CTRL_REG3,
+ }, {
+ .range_min = FXOS8700_A_VECM_CFG,
+ .range_max = FXOS8700_A_FFMT_THS_Z_LSB,
+ },
+};
+
+static const struct regmap_access_table driver_read_table = {
+ .yes_ranges = read_range,
+ .n_yes_ranges = ARRAY_SIZE(read_range),
+};
+
+static const struct regmap_access_table driver_write_table = {
+ .yes_ranges = write_range,
+ .n_yes_ranges = ARRAY_SIZE(write_range),
+};
+
+const struct regmap_config fxos8700_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FXOS8700_NVM_DATA_BNK0,
+ .rd_table = &driver_read_table,
+ .wr_table = &driver_write_table,
+};
+EXPORT_SYMBOL(fxos8700_regmap_config);
+
+#define FXOS8700_CHANNEL(_type, _axis) { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+enum fxos8700_accel_scale_bits {
+ MODE_2G = 0,
+ MODE_4G,
+ MODE_8G,
+};
+
+/* scan indexes follow DATA register order */
+enum fxos8700_scan_axis {
+ FXOS8700_SCAN_ACCEL_X = 0,
+ FXOS8700_SCAN_ACCEL_Y,
+ FXOS8700_SCAN_ACCEL_Z,
+ FXOS8700_SCAN_MAGN_X,
+ FXOS8700_SCAN_MAGN_Y,
+ FXOS8700_SCAN_MAGN_Z,
+ FXOS8700_SCAN_RHALL,
+ FXOS8700_SCAN_TIMESTAMP,
+};
+
+enum fxos8700_sensor {
+ FXOS8700_ACCEL = 0,
+ FXOS8700_MAGN,
+ FXOS8700_NUM_SENSORS /* must be last */
+};
+
+enum fxos8700_int_pin {
+ FXOS8700_PIN_INT1,
+ FXOS8700_PIN_INT2
+};
+
+struct fxos8700_scale {
+ u8 bits;
+ int uscale;
+};
+
+struct fxos8700_odr {
+ u8 bits;
+ int odr;
+ int uodr;
+};
+
+static const struct fxos8700_scale fxos8700_accel_scale[] = {
+ { MODE_2G, 244},
+ { MODE_4G, 488},
+ { MODE_8G, 976},
+};
+
+/*
+ * Accellerometer and magnetometer have the same ODR options, set in the
+ * CTRL_REG1 register. ODR is halved when using both sensors at once in
+ * hybrid mode.
+ */
+static const struct fxos8700_odr fxos8700_odr[] = {
+ {0x00, 800, 0},
+ {0x01, 400, 0},
+ {0x02, 200, 0},
+ {0x03, 100, 0},
+ {0x04, 50, 0},
+ {0x05, 12, 500000},
+ {0x06, 6, 250000},
+ {0x07, 1, 562500},
+};
+
+static const struct iio_chan_spec fxos8700_channels[] = {
+ FXOS8700_CHANNEL(IIO_ACCEL, X),
+ FXOS8700_CHANNEL(IIO_ACCEL, Y),
+ FXOS8700_CHANNEL(IIO_ACCEL, Z),
+ FXOS8700_CHANNEL(IIO_MAGN, X),
+ FXOS8700_CHANNEL(IIO_MAGN, Y),
+ FXOS8700_CHANNEL(IIO_MAGN, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(FXOS8700_SCAN_TIMESTAMP),
+};
+
+static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
+{
+ switch (iio_type) {
+ case IIO_ACCEL:
+ return FXOS8700_ACCEL;
+ case IIO_ANGL_VEL:
+ return FXOS8700_MAGN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxos8700_set_active_mode(struct fxos8700_data *data,
+ enum fxos8700_sensor t, bool mode)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, mode);
+ if (ret)
+ return ret;
+
+ usleep_range(FXOS8700_ACTIVE_MIN_USLEEP,
+ FXOS8700_ACTIVE_MIN_USLEEP + 1000);
+
+ return 0;
+}
+
+static int fxos8700_set_scale(struct fxos8700_data *data,
+ enum fxos8700_sensor t, int uscale)
+{
+ int i;
+ static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ if (t == FXOS8700_MAGN) {
+ dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < scale_num; i++)
+ if (fxos8700_accel_scale[i].uscale == uscale)
+ break;
+
+ if (i == scale_num)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
+ fxos8700_accel_scale[i].bits);
+}
+
+static int fxos8700_get_scale(struct fxos8700_data *data,
+ enum fxos8700_sensor t, int *uscale)
+{
+ int i, ret, val;
+ static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+
+ if (t == FXOS8700_MAGN) {
+ *uscale = 1200; /* Magnetometer is locked at 1200uT */
+ return 0;
+ }
+
+ ret = regmap_read(data->regmap, FXOS8700_XYZ_DATA_CFG, &val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < scale_num; i++) {
+ if (fxos8700_accel_scale[i].bits == (val & 0x3)) {
+ *uscale = fxos8700_accel_scale[i].uscale;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
+ int axis, int *val)
+{
+ u8 base, reg;
+ int ret;
+ enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
+
+ base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
+
+ /* Block read 6 bytes of device output registers to avoid data loss */
+ ret = regmap_bulk_read(data->regmap, base, data->buf,
+ FXOS8700_DATA_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ /* Convert axis to buffer index */
+ reg = axis - IIO_MOD_X;
+
+ /* Convert to native endianness */
+ *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
+
+ return 0;
+}
+
+static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ int odr, int uodr)
+{
+ int i, ret, val;
+ bool active_mode;
+ static const int odr_num = ARRAY_SIZE(fxos8700_odr);
+
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ active_mode = val & FXOS8700_ACTIVE;
+
+ if (active_mode) {
+ /*
+ * The device must be in standby mode to change any of the
+ * other fields within CTRL_REG1
+ */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ val & ~FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < odr_num; i++)
+ if (fxos8700_odr[i].odr == odr && fxos8700_odr[i].uodr == uodr)
+ break;
+
+ if (i >= odr_num)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
+ fxos8700_odr[i].bits << 3 | active_mode);
+}
+
+static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ int *odr, int *uodr)
+{
+ int i, val, ret;
+ static const int odr_num = ARRAY_SIZE(fxos8700_odr);
+
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ val &= FXOS8700_CTRL_ODR_MSK;
+
+ for (i = 0; i < odr_num; i++)
+ if (val == fxos8700_odr[i].bits)
+ break;
+
+ if (i >= odr_num)
+ return -EINVAL;
+
+ *odr = fxos8700_odr[i].odr;
+ *uodr = fxos8700_odr[i].uodr;
+
+ return 0;
+}
+
+static int fxos8700_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct fxos8700_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = fxos8700_get_data(data, chan->type, chan->channel2, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ ret = fxos8700_get_scale(data, fxos8700_to_sensor(chan->type),
+ val2);
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = fxos8700_get_odr(data, fxos8700_to_sensor(chan->type),
+ val, val2);
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxos8700_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct fxos8700_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return fxos8700_set_scale(data, fxos8700_to_sensor(chan->type),
+ val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return fxos8700_set_odr(data, fxos8700_to_sensor(chan->type),
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
+ "1.5625 6.25 12.5 50 100 200 400 800");
+static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
+ "1.5625 6.25 12.5 50 100 200 400 800");
+static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
+static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
+
+static struct attribute *fxos8700_attrs[] = {
+ &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_magn_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_in_magn_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fxos8700_attrs_group = {
+ .attrs = fxos8700_attrs,
+};
+
+static const struct iio_info fxos8700_info = {
+ .read_raw = fxos8700_read_raw,
+ .write_raw = fxos8700_write_raw,
+ .attrs = &fxos8700_attrs_group,
+};
+
+static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
+{
+ int ret;
+ unsigned int val;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_read(data->regmap, FXOS8700_WHO_AM_I, &val);
+ if (ret) {
+ dev_err(dev, "Error reading chip id\n");
+ return ret;
+ }
+ if (val != FXOS8700_DEVICE_ID && val != FXOS8700_PRE_DEVICE_ID) {
+ dev_err(dev, "Wrong chip id, got %x expected %x or %x\n",
+ val, FXOS8700_DEVICE_ID, FXOS8700_PRE_DEVICE_ID);
+ return -ENODEV;
+ }
+
+ ret = fxos8700_set_active_mode(data, FXOS8700_ACCEL, true);
+ if (ret)
+ return ret;
+
+ ret = fxos8700_set_active_mode(data, FXOS8700_MAGN, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The device must be in standby mode to change any of the other fields
+ * within CTRL_REG1
+ */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, 0x00);
+ if (ret)
+ return ret;
+
+ /* Set max oversample ratio (OSR) and both devices active */
+ ret = regmap_write(data->regmap, FXOS8700_M_CTRL_REG1,
+ FXOS8700_HMS_MASK | FXOS8700_OS_MASK);
+ if (ret)
+ return ret;
+
+ /* Disable and rst min/max measurements & threshold */
+ ret = regmap_write(data->regmap, FXOS8700_M_CTRL_REG2,
+ FXOS8700_MAXMIN_RST | FXOS8700_MAXMIN_DIS_THS |
+ FXOS8700_MAXMIN_DIS);
+ if (ret)
+ return ret;
+
+ /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+
+ /* Set for max full-scale range (+/-8G) */
+ return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
+}
+
+static void fxos8700_chip_uninit(void *data)
+{
+ struct fxos8700_data *fxos8700_data = data;
+
+ fxos8700_set_active_mode(fxos8700_data, FXOS8700_ACCEL, false);
+ fxos8700_set_active_mode(fxos8700_data, FXOS8700_MAGN, false);
+}
+
+int fxos8700_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi)
+{
+ struct iio_dev *indio_dev;
+ struct fxos8700_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+
+ ret = fxos8700_chip_init(data, use_spi);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, fxos8700_chip_uninit, data);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = fxos8700_channels;
+ indio_dev->num_channels = ARRAY_SIZE(fxos8700_channels);
+ indio_dev->name = name ? name : "fxos8700";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &fxos8700_info;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(fxos8700_core_probe);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 6-Axis Acc and Mag Combo Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c
new file mode 100644
index 000000000000..3ceb76366313
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_i2c.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU, I2C bits
+ *
+ * 7-bit I2C slave address determined by SA1 and SA0 logic level
+ * inputs represented in the following table:
+ * SA1 | SA0 | Slave Address
+ * 0 | 0 | 0x1E
+ * 0 | 1 | 0x1D
+ * 1 | 0 | 0x1C
+ * 1 | 1 | 0x1F
+ */
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#include "fxos8700.h"
+
+static int fxos8700_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+
+ regmap = devm_regmap_init_i2c(client, &fxos8700_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ if (id)
+ name = id->name;
+
+ return fxos8700_core_probe(&client->dev, regmap, name, false);
+}
+
+static const struct i2c_device_id fxos8700_i2c_id[] = {
+ {"fxos8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fxos8700_i2c_id);
+
+static const struct acpi_device_id fxos8700_acpi_match[] = {
+ {"FXOS8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fxos8700_acpi_match);
+
+static const struct of_device_id fxos8700_of_match[] = {
+ { .compatible = "nxp,fxos8700" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxos8700_of_match);
+
+static struct i2c_driver fxos8700_i2c_driver = {
+ .driver = {
+ .name = "fxos8700_i2c",
+ .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .of_match_table = fxos8700_of_match,
+ },
+ .probe = fxos8700_i2c_probe,
+ .id_table = fxos8700_i2c_id,
+};
+module_i2c_driver(fxos8700_i2c_driver);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/fxos8700_spi.c b/drivers/iio/imu/fxos8700_spi.c
new file mode 100644
index 000000000000..57e7bb6444e7
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_spi.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU, SPI bits
+ */
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "fxos8700.h"
+
+static int fxos8700_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ regmap = devm_regmap_init_spi(spi, &fxos8700_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return fxos8700_core_probe(&spi->dev, regmap, id->name, true);
+}
+
+static const struct spi_device_id fxos8700_spi_id[] = {
+ {"fxos8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, fxos8700_spi_id);
+
+static const struct acpi_device_id fxos8700_acpi_match[] = {
+ {"FXOS8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fxos8700_acpi_match);
+
+static const struct of_device_id fxos8700_of_match[] = {
+ { .compatible = "nxp,fxos8700" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxos8700_of_match);
+
+static struct spi_driver fxos8700_spi_driver = {
+ .probe = fxos8700_spi_probe,
+ .id_table = fxos8700_spi_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .of_match_table = fxos8700_of_match,
+ .name = "fxos8700_spi",
+ },
+};
+module_spi_driver(fxos8700_spi_driver);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
index 70ffe0d13d8c..c103441a906b 100644
--- a/drivers/iio/imu/inv_mpu6050/Makefile
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -4,10 +4,11 @@
#
obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
-inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
+inv-mpu6050-y := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o \
+ inv_mpu_aux.o inv_mpu_magn.o
obj-$(CONFIG_INV_MPU6050_I2C) += inv-mpu6050-i2c.o
-inv-mpu6050-i2c-objs := inv_mpu_i2c.o inv_mpu_acpi.o
+inv-mpu6050-i2c-y := inv_mpu_i2c.o inv_mpu_acpi.o
obj-$(CONFIG_INV_MPU6050_SPI) += inv-mpu6050-spi.o
-inv-mpu6050-spi-objs := inv_mpu_spi.o
+inv-mpu6050-spi-y := inv_mpu_spi.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
new file mode 100644
index 000000000000..7327e5723f96
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "inv_mpu_aux.h"
+#include "inv_mpu_iio.h"
+
+/*
+ * i2c master auxiliary bus transfer function.
+ * Requires the i2c operations to be correctly setup before.
+ */
+static int inv_mpu_i2c_master_xfer(const struct inv_mpu6050_state *st)
+{
+ /* use 50hz frequency for xfer */
+ const unsigned int freq = 50;
+ const unsigned int period_ms = 1000 / freq;
+ uint8_t d;
+ unsigned int user_ctrl;
+ int ret;
+
+ /* set sample rate */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(freq);
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* start i2c master */
+ user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ goto error_restore_rate;
+
+ /* wait for xfer: 1 period + half-period margin */
+ msleep(period_ms + period_ms / 2);
+
+ /* stop i2c master */
+ user_ctrl = st->chip_config.user_ctrl;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ goto error_stop_i2c;
+
+ /* restore sample rate */
+ d = st->chip_config.divider;
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ goto error_restore_rate;
+
+ return 0;
+
+error_stop_i2c:
+ regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
+error_restore_rate:
+ regmap_write(st->map, st->reg->sample_rate_div, st->chip_config.divider);
+ return ret;
+}
+
+/**
+ * inv_mpu_aux_init() - init i2c auxiliary bus
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_init(const struct inv_mpu6050_state *st)
+{
+ unsigned int val;
+ int ret;
+
+ /* configure i2c master */
+ val = INV_MPU6050_BITS_I2C_MST_CLK_400KHZ |
+ INV_MPU6050_BIT_WAIT_FOR_ES;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_MST_CTRL, val);
+ if (ret)
+ return ret;
+
+ /* configure i2c master delay */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, 0);
+ if (ret)
+ return ret;
+
+ val = INV_MPU6050_BIT_I2C_SLV0_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV1_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV2_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV3_DLY_EN |
+ INV_MPU6050_BIT_DELAY_ES_SHADOW;
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_MST_DELAY_CTRL, val);
+}
+
+/**
+ * inv_mpu_aux_read() - read register function for i2c auxiliary bus
+ * @st: driver internal state.
+ * @addr: chip i2c Address
+ * @reg: chip register address
+ * @val: buffer for storing read bytes
+ * @size: number of bytes to read
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_read(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t *val, size_t size)
+{
+ unsigned int status;
+ int ret;
+
+ if (size > 0x0F)
+ return -EINVAL;
+
+ /* setup i2c SLV0 control: i2c addr, register, enable + size */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
+ INV_MPU6050_BIT_I2C_SLV_RNW | addr);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN | size);
+ if (ret)
+ return ret;
+
+ /* do i2c xfer */
+ ret = inv_mpu_i2c_master_xfer(st);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* disable i2c slave */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* check i2c status */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK)
+ return -EIO;
+
+ /* read data in registers */
+ return regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
+ val, size);
+
+error_disable_i2c:
+ regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ return ret;
+}
+
+/**
+ * inv_mpu_aux_write() - write register function for i2c auxiliary bus
+ * @st: driver internal state.
+ * @addr: chip i2c Address
+ * @reg: chip register address
+ * @val: 1 byte value to write
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_write(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t val)
+{
+ unsigned int status;
+ int ret;
+
+ /* setup i2c SLV0 control: i2c addr, register, value, enable + size */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0), addr);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(0), val);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN | 1);
+ if (ret)
+ return ret;
+
+ /* do i2c xfer */
+ ret = inv_mpu_i2c_master_xfer(st);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* disable i2c slave */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* check i2c status */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK)
+ return -EIO;
+
+ return 0;
+
+error_disable_i2c:
+ regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ return ret;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h
new file mode 100644
index 000000000000..b66997545762
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#ifndef INV_MPU_AUX_H_
+#define INV_MPU_AUX_H_
+
+#include "inv_mpu_iio.h"
+
+int inv_mpu_aux_init(const struct inv_mpu6050_state *st);
+
+int inv_mpu_aux_read(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t *val, size_t size);
+
+int inv_mpu_aux_write(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t val);
+
+#endif /* INV_MPU_AUX_H_ */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 868281b8adb0..45e77b308238 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "inv_mpu_iio.h"
+#include "inv_mpu_magn.h"
/*
* this is the gyro scale translated from dynamic range plus/minus
@@ -103,6 +104,7 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
+ .magn_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
.user_ctrl = 0,
};
@@ -341,6 +343,11 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
*/
st->chip_period = NSEC_PER_MSEC;
+ /* magn chip init, noop if not present in the chip */
+ result = inv_mpu_magn_probe(st);
+ if (result)
+ goto error_power_off;
+
return inv_mpu6050_set_power_itg(st, false);
error_power_off:
@@ -420,6 +427,9 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
IIO_MOD_X, val);
break;
+ case IIO_MAGN:
+ ret = inv_mpu_magn_read(st, chan->channel2, val);
+ break;
default:
ret = -EINVAL;
break;
@@ -478,6 +488,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
*val2 = INV_MPU6050_TEMP_SCALE;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ return inv_mpu_magn_get_scale(st, chan, val, val2);
default:
return -EINVAL;
}
@@ -719,6 +731,11 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_power_off;
+ /* update rate for magn, noop if not present in chip */
+ result = inv_mpu_magn_set_rate(st, fifo_rate);
+ if (result)
+ goto fifo_rate_fail_power_off;
+
fifo_rate_fail_power_off:
result |= inv_mpu6050_set_power_itg(st, false);
fifo_rate_fail_unlock:
@@ -804,8 +821,14 @@ inv_get_mount_matrix(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct inv_mpu6050_state *data = iio_priv(indio_dev);
+ const struct iio_mount_matrix *matrix;
+
+ if (chan->type == IIO_MAGN)
+ matrix = &data->magn_orient;
+ else
+ matrix = &data->orientation;
- return &data->orientation;
+ return matrix;
}
static const struct iio_chan_spec_ext_info inv_ext_info[] = {
@@ -873,6 +896,98 @@ static const unsigned long inv_mpu_scan_masks[] = {
0,
};
+#define INV_MPU9X50_MAGN_CHAN(_chan2, _bits, _index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+ .ext_info = inv_ext_info, \
+ }
+
+static const struct iio_chan_spec inv_mpu9250_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
+ /*
+ * Note that temperature should only be via polled reading only,
+ * not the final scan elements output.
+ */
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_OFFSET)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ },
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+
+ /* Magnetometer resolution is 16 bits */
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_X, 16, INV_MPU9X50_SCAN_MAGN_X),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Y, 16, INV_MPU9X50_SCAN_MAGN_Y),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 16, INV_MPU9X50_SCAN_MAGN_Z),
+};
+
+static const unsigned long inv_mpu9x50_scan_masks[] = {
+ /* 3-axis accel */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ /* 3-axis gyro */
+ BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ /* 3-axis magn */
+ BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 6-axis accel + gyro */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ /* 6-axis accel + magn */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 6-axis gyro + magn */
+ BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 9-axis accel + gyro + magn */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ 0,
+};
+
static const struct iio_chan_spec inv_icm20602_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
{
@@ -1034,14 +1149,14 @@ error_power_off:
return result;
}
-static int inv_mpu_core_enable_regulator(struct inv_mpu6050_state *st)
+static int inv_mpu_core_enable_regulator_vddio(struct inv_mpu6050_state *st)
{
int result;
result = regulator_enable(st->vddio_supply);
if (result) {
dev_err(regmap_get_device(st->map),
- "Failed to enable regulator: %d\n", result);
+ "Failed to enable vddio regulator: %d\n", result);
} else {
/* Give the device a little bit of time to start up. */
usleep_range(35000, 70000);
@@ -1050,21 +1165,29 @@ static int inv_mpu_core_enable_regulator(struct inv_mpu6050_state *st)
return result;
}
-static int inv_mpu_core_disable_regulator(struct inv_mpu6050_state *st)
+static int inv_mpu_core_disable_regulator_vddio(struct inv_mpu6050_state *st)
{
int result;
result = regulator_disable(st->vddio_supply);
if (result)
dev_err(regmap_get_device(st->map),
- "Failed to disable regulator: %d\n", result);
+ "Failed to disable vddio regulator: %d\n", result);
return result;
}
static void inv_mpu_core_disable_regulator_action(void *_data)
{
- inv_mpu_core_disable_regulator(_data);
+ struct inv_mpu6050_state *st = _data;
+ int result;
+
+ result = regulator_disable(st->vdd_supply);
+ if (result)
+ dev_err(regmap_get_device(st->map),
+ "Failed to disable vdd regulator: %d\n", result);
+
+ inv_mpu_core_disable_regulator_vddio(st);
}
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
@@ -1133,6 +1256,15 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return -EINVAL;
}
+ st->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(st->vdd_supply)) {
+ if (PTR_ERR(st->vdd_supply) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vdd regulator %d\n",
+ (int)PTR_ERR(st->vdd_supply));
+
+ return PTR_ERR(st->vdd_supply);
+ }
+
st->vddio_supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(st->vddio_supply)) {
if (PTR_ERR(st->vddio_supply) != -EPROBE_DEFER)
@@ -1142,9 +1274,17 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return PTR_ERR(st->vddio_supply);
}
- result = inv_mpu_core_enable_regulator(st);
- if (result)
+ result = regulator_enable(st->vdd_supply);
+ if (result) {
+ dev_err(dev, "Failed to enable vdd regulator: %d\n", result);
+ return result;
+ }
+
+ result = inv_mpu_core_enable_regulator_vddio(st);
+ if (result) {
+ regulator_disable(st->vdd_supply);
return result;
+ }
result = devm_add_action_or_reset(dev, inv_mpu_core_disable_regulator_action,
st);
@@ -1154,6 +1294,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
+ /* fill magnetometer orientation */
+ result = inv_mpu_magn_set_orient(st);
+ if (result)
+ return result;
+
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st);
if (result)
@@ -1165,9 +1310,6 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
- if (inv_mpu_bus_setup)
- inv_mpu_bus_setup(indio_dev);
-
dev_set_drvdata(dev, indio_dev);
indio_dev->dev.parent = dev;
/* name will be NULL when enumerated via ACPI */
@@ -1176,14 +1318,37 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
else
indio_dev->name = dev_name(dev);
- if (chip_type == INV_ICM20602) {
+ /* requires parent device set in indio_dev */
+ if (inv_mpu_bus_setup)
+ inv_mpu_bus_setup(indio_dev);
+
+ switch (chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /*
+ * Use magnetometer inside the chip only if there is no i2c
+ * auxiliary device in use.
+ */
+ if (!st->magn_disabled) {
+ indio_dev->channels = inv_mpu9250_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
+ indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
+ } else {
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+ indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ }
+ break;
+ case INV_ICM20602:
indio_dev->channels = inv_icm20602_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
indio_dev->available_scan_masks = inv_icm20602_scan_masks;
- } else {
+ break;
+ default:
indio_dev->channels = inv_mpu_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ break;
}
indio_dev->info = &mpu_info;
@@ -1221,7 +1386,7 @@ static int inv_mpu_resume(struct device *dev)
int result;
mutex_lock(&st->lock);
- result = inv_mpu_core_enable_regulator(st);
+ result = inv_mpu_core_enable_regulator_vddio(st);
if (result)
goto out_unlock;
@@ -1239,7 +1404,7 @@ static int inv_mpu_suspend(struct device *dev)
mutex_lock(&st->lock);
result = inv_mpu6050_set_power_itg(st, false);
- inv_mpu_core_disable_regulator(st);
+ inv_mpu_core_disable_regulator_vddio(st);
mutex_unlock(&st->lock);
return result;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 4b8b5a87398c..389cc8505e0e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -68,6 +68,56 @@ static const char *inv_mpu_match_acpi_device(struct device *dev,
return dev_name(dev);
}
+static bool inv_mpu_i2c_aux_bus(struct device *dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+
+ switch (st->chip_type) {
+ case INV_ICM20608:
+ case INV_ICM20602:
+ /* no i2c auxiliary bus on the chip */
+ return false;
+ case INV_MPU9250:
+ case INV_MPU9255:
+ if (st->magn_disabled)
+ return true;
+ else
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * MPU9xxx magnetometer support requires to disable i2c auxiliary bus support.
+ * To ensure backward compatibility with existing setups, do not disable
+ * i2c auxiliary bus if it used.
+ * Check for i2c-gate node in devicetree and set magnetometer disabled.
+ * Only MPU6500 is supported by ACPI, no need to check.
+ */
+static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
+ struct device_node *mux_node;
+
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ mux_node = of_get_child_by_name(dev->of_node, "i2c-gate");
+ if (mux_node != NULL) {
+ st->magn_disabled = true;
+ dev_warn(dev, "disable internal use of magnetometer\n");
+ }
+ of_node_put(mux_node);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/**
* inv_mpu_probe() - probe function.
* @client: i2c client.
@@ -112,17 +162,12 @@ static int inv_mpu_probe(struct i2c_client *client,
}
result = inv_mpu_core_probe(regmap, client->irq, name,
- NULL, chip_type);
+ inv_mpu_magn_disable, chip_type);
if (result < 0)
return result;
st = iio_priv(dev_get_drvdata(&client->dev));
- switch (st->chip_type) {
- case INV_ICM20608:
- case INV_ICM20602:
- /* no i2c auxiliary bus on the chip */
- break;
- default:
+ if (inv_mpu_i2c_aux_bus(&client->dev)) {
/* declare i2c auxiliary bus */
st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
@@ -137,7 +182,6 @@ static int inv_mpu_probe(struct i2c_client *client,
result = inv_mpu_acpi_create_mux_client(client);
if (result)
goto out_del_mux;
- break;
}
return 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 51235677c534..f1fb7b6bdab1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -2,6 +2,10 @@
/*
* Copyright (C) 2012 Invensense, Inc.
*/
+
+#ifndef INV_MPU_IIO_H_
+#define INV_MPU_IIO_H_
+
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/mutex.h>
@@ -82,6 +86,7 @@ enum inv_devices {
* @accl_fs: accel full scale range.
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
+ * @magn_fifo_enable: enable magn data output
* @divider: chip sample rate divider (sample rate divider - 1)
*/
struct inv_mpu6050_chip_config {
@@ -90,6 +95,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fs:2;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
+ unsigned int magn_fifo_enable:1;
u8 divider;
u8 user_ctrl;
};
@@ -126,7 +132,11 @@ struct inv_mpu6050_hw {
* @chip_period: chip internal period estimation (~1kHz).
* @it_timestamp: timestamp from previous interrupt.
* @data_timestamp: timestamp for next data sample.
- * @vddio_supply voltage regulator for the chip.
+ * @vdd_supply: VDD voltage regulator for the chip.
+ * @vddio_supply I/O voltage regulator for the chip.
+ * @magn_disabled: magnetometer disabled for backward compatibility reason.
+ * @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss.
+ * @magn_orient: magnetometer sensor chip orientation if available.
*/
struct inv_mpu6050_state {
struct mutex lock;
@@ -147,7 +157,11 @@ struct inv_mpu6050_state {
s64 chip_period;
s64 it_timestamp;
s64 data_timestamp;
+ struct regulator *vdd_supply;
struct regulator *vddio_supply;
+ bool magn_disabled;
+ s32 magn_raw_to_gauss[3];
+ struct iio_mount_matrix magn_orient;
};
/*register and associated bit definition*/
@@ -160,9 +174,41 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
#define INV_MPU6050_REG_FIFO_EN 0x23
+#define INV_MPU6050_BIT_SLAVE_0 0x01
+#define INV_MPU6050_BIT_SLAVE_1 0x02
+#define INV_MPU6050_BIT_SLAVE_2 0x04
#define INV_MPU6050_BIT_ACCEL_OUT 0x08
#define INV_MPU6050_BITS_GYRO_OUT 0x70
+#define INV_MPU6050_REG_I2C_MST_CTRL 0x24
+#define INV_MPU6050_BITS_I2C_MST_CLK_400KHZ 0x0D
+#define INV_MPU6050_BIT_I2C_MST_P_NSR 0x10
+#define INV_MPU6050_BIT_SLV3_FIFO_EN 0x20
+#define INV_MPU6050_BIT_WAIT_FOR_ES 0x40
+#define INV_MPU6050_BIT_MULT_MST_EN 0x80
+
+/* control I2C slaves from 0 to 3 */
+#define INV_MPU6050_REG_I2C_SLV_ADDR(_x) (0x25 + 3 * (_x))
+#define INV_MPU6050_BIT_I2C_SLV_RNW 0x80
+
+#define INV_MPU6050_REG_I2C_SLV_REG(_x) (0x26 + 3 * (_x))
+
+#define INV_MPU6050_REG_I2C_SLV_CTRL(_x) (0x27 + 3 * (_x))
+#define INV_MPU6050_BIT_SLV_GRP 0x10
+#define INV_MPU6050_BIT_SLV_REG_DIS 0x20
+#define INV_MPU6050_BIT_SLV_BYTE_SW 0x40
+#define INV_MPU6050_BIT_SLV_EN 0x80
+
+/* I2C master delay register */
+#define INV_MPU6050_REG_I2C_SLV4_CTRL 0x34
+#define INV_MPU6050_BITS_I2C_MST_DLY(_x) ((_x) & 0x1F)
+
+#define INV_MPU6050_REG_I2C_MST_STATUS 0x36
+#define INV_MPU6050_BIT_I2C_SLV0_NACK 0x01
+#define INV_MPU6050_BIT_I2C_SLV1_NACK 0x02
+#define INV_MPU6050_BIT_I2C_SLV2_NACK 0x04
+#define INV_MPU6050_BIT_I2C_SLV3_NACK 0x08
+
#define INV_MPU6050_REG_INT_ENABLE 0x38
#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
#define INV_MPU6050_BIT_DMP_INT_EN 0x02
@@ -175,6 +221,18 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_FIFO_OVERFLOW_INT 0x10
#define INV_MPU6050_BIT_RAW_DATA_RDY_INT 0x01
+#define INV_MPU6050_REG_EXT_SENS_DATA 0x49
+
+/* I2C slaves data output from 0 to 3 */
+#define INV_MPU6050_REG_I2C_SLV_DO(_x) (0x63 + (_x))
+
+#define INV_MPU6050_REG_I2C_MST_DELAY_CTRL 0x67
+#define INV_MPU6050_BIT_I2C_SLV0_DLY_EN 0x01
+#define INV_MPU6050_BIT_I2C_SLV1_DLY_EN 0x02
+#define INV_MPU6050_BIT_I2C_SLV2_DLY_EN 0x04
+#define INV_MPU6050_BIT_I2C_SLV3_DLY_EN 0x08
+#define INV_MPU6050_BIT_DELAY_ES_SHADOW 0x80
+
#define INV_MPU6050_REG_USER_CTRL 0x6A
#define INV_MPU6050_BIT_FIFO_RST 0x04
#define INV_MPU6050_BIT_DMP_RST 0x08
@@ -202,6 +260,9 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
#define INV_MPU6050_FIFO_COUNT_BYTE 2
+/* MPU9X50 9-axis magnetometer */
+#define INV_MPU9X50_BYTES_MAGN 7
+
/* ICM20602 FIFO samples include temperature readings */
#define INV_ICM20602_BYTES_PER_TEMP_SENSOR 2
@@ -229,8 +290,8 @@ struct inv_mpu6050_state {
#define INV_ICM20602_TEMP_OFFSET 8170
#define INV_ICM20602_TEMP_SCALE 3060
-/* 6 + 6 round up and plus 8 */
-#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+/* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 32
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
#define INV_MPU6050_ACTIVE_HIGH 0x00
@@ -279,6 +340,11 @@ enum inv_mpu6050_scan {
INV_MPU6050_SCAN_GYRO_Y,
INV_MPU6050_SCAN_GYRO_Z,
INV_MPU6050_SCAN_TIMESTAMP,
+
+ INV_MPU9X50_SCAN_MAGN_X = INV_MPU6050_SCAN_GYRO_Z + 1,
+ INV_MPU9X50_SCAN_MAGN_Y,
+ INV_MPU9X50_SCAN_MAGN_Z,
+ INV_MPU9X50_SCAN_TIMESTAMP,
};
/* scan element definition for ICM20602, which includes temperature */
@@ -344,3 +410,5 @@ void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type);
extern const struct dev_pm_ops inv_mpu_pmops;
+
+#endif
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
new file mode 100644
index 000000000000..02735af152c8
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+#include "inv_mpu_aux.h"
+#include "inv_mpu_iio.h"
+#include "inv_mpu_magn.h"
+
+/*
+ * MPU9250 magnetometer is an AKM AK8963 chip on I2C aux bus
+ */
+#define INV_MPU_MAGN_I2C_ADDR 0x0C
+
+#define INV_MPU_MAGN_REG_WIA 0x00
+#define INV_MPU_MAGN_BITS_WIA 0x48
+
+#define INV_MPU_MAGN_REG_ST1 0x02
+#define INV_MPU_MAGN_BIT_DRDY 0x01
+#define INV_MPU_MAGN_BIT_DOR 0x02
+
+#define INV_MPU_MAGN_REG_DATA 0x03
+
+#define INV_MPU_MAGN_REG_ST2 0x09
+#define INV_MPU_MAGN_BIT_HOFL 0x08
+#define INV_MPU_MAGN_BIT_BITM 0x10
+
+#define INV_MPU_MAGN_REG_CNTL1 0x0A
+#define INV_MPU_MAGN_BITS_MODE_PWDN 0x00
+#define INV_MPU_MAGN_BITS_MODE_SINGLE 0x01
+#define INV_MPU_MAGN_BITS_MODE_FUSE 0x0F
+#define INV_MPU_MAGN_BIT_OUTPUT_BIT 0x10
+
+#define INV_MPU_MAGN_REG_CNTL2 0x0B
+#define INV_MPU_MAGN_BIT_SRST 0x01
+
+#define INV_MPU_MAGN_REG_ASAX 0x10
+#define INV_MPU_MAGN_REG_ASAY 0x11
+#define INV_MPU_MAGN_REG_ASAZ 0x12
+
+/* Magnetometer maximum frequency */
+#define INV_MPU_MAGN_FREQ_HZ_MAX 50
+
+static bool inv_magn_supported(const struct inv_mpu6050_state *st)
+{
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* init magnetometer chip */
+static int inv_magn_init(struct inv_mpu6050_state *st)
+{
+ uint8_t val;
+ uint8_t asa[3];
+ int ret;
+
+ /* check whoami */
+ ret = inv_mpu_aux_read(st, INV_MPU_MAGN_I2C_ADDR, INV_MPU_MAGN_REG_WIA,
+ &val, sizeof(val));
+ if (ret)
+ return ret;
+ if (val != INV_MPU_MAGN_BITS_WIA)
+ return -ENODEV;
+
+ /* reset chip */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL2,
+ INV_MPU_MAGN_BIT_SRST);
+ if (ret)
+ return ret;
+
+ /* read fuse ROM data */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL1,
+ INV_MPU_MAGN_BITS_MODE_FUSE);
+ if (ret)
+ return ret;
+
+ ret = inv_mpu_aux_read(st, INV_MPU_MAGN_I2C_ADDR, INV_MPU_MAGN_REG_ASAX,
+ asa, sizeof(asa));
+ if (ret)
+ return ret;
+
+ /* switch back to power-down */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL1,
+ INV_MPU_MAGN_BITS_MODE_PWDN);
+ if (ret)
+ return ret;
+
+ /*
+ * Sensitivity adjustement and scale to Gauss
+ *
+ * Hadj = H * (((ASA - 128) * 0.5 / 128) + 1)
+ * Factor simplification:
+ * Hadj = H * ((ASA + 128) / 256)
+ *
+ * Sensor sentivity
+ * 0.15 uT in 16 bits mode
+ * 1 uT = 0.01 G and value is in micron (1e6)
+ * sensitvity = 0.15 uT * 0.01 * 1e6
+ *
+ * raw_to_gauss = Hadj * 1500
+ */
+ st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * 1500) / 256;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_magn_probe() - probe and setup magnetometer chip
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * It is probing the chip and setting up all needed i2c transfers.
+ * Noop if there is no magnetometer in the chip.
+ */
+int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
+{
+ int ret;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return 0;
+
+ /* configure i2c master aux port */
+ ret = inv_mpu_aux_init(st);
+ if (ret)
+ return ret;
+
+ /* check and init mag chip */
+ ret = inv_magn_init(st);
+ if (ret)
+ return ret;
+
+ /*
+ * configure mpu i2c master accesses
+ * i2c SLV0: read sensor data, 7 bytes data(6)-ST2
+ * Byte swap data to store them in big-endian in impair address groups
+ */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
+ INV_MPU6050_BIT_I2C_SLV_RNW | INV_MPU_MAGN_I2C_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0),
+ INV_MPU_MAGN_REG_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN |
+ INV_MPU6050_BIT_SLV_BYTE_SW |
+ INV_MPU6050_BIT_SLV_GRP |
+ INV_MPU9X50_BYTES_MAGN);
+ if (ret)
+ return ret;
+
+ /* i2c SLV1: launch single measurement */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(1),
+ INV_MPU_MAGN_I2C_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(1),
+ INV_MPU_MAGN_REG_CNTL1);
+ if (ret)
+ return ret;
+
+ /* add 16 bits mode */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1),
+ INV_MPU_MAGN_BITS_MODE_SINGLE |
+ INV_MPU_MAGN_BIT_OUTPUT_BIT);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(1),
+ INV_MPU6050_BIT_SLV_EN | 1);
+}
+
+/**
+ * inv_mpu_magn_set_rate() - set magnetometer sampling rate
+ * @st: driver internal state
+ * @fifo_rate: mpu set fifo rate
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * Limit sampling frequency to the maximum value supported by the
+ * magnetometer chip. Resulting in duplicated data for higher frequencies.
+ * Noop if there is no magnetometer in the chip.
+ */
+int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate)
+{
+ uint8_t d;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return 0;
+
+ /*
+ * update i2c master delay to limit mag sampling to max frequency
+ * compute fifo_rate divider d: rate = fifo_rate / (d + 1)
+ */
+ if (fifo_rate > INV_MPU_MAGN_FREQ_HZ_MAX)
+ d = fifo_rate / INV_MPU_MAGN_FREQ_HZ_MAX - 1;
+ else
+ d = 0;
+
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, d);
+}
+
+/**
+ * inv_mpu_magn_set_orient() - fill magnetometer mounting matrix
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * Fill magnetometer mounting matrix using the provided chip matrix.
+ */
+int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
+{
+ const char *orient;
+ char *str;
+ int i;
+
+ /* fill magnetometer orientation */
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /* x <- y */
+ st->magn_orient.rotation[0] = st->orientation.rotation[3];
+ st->magn_orient.rotation[1] = st->orientation.rotation[4];
+ st->magn_orient.rotation[2] = st->orientation.rotation[5];
+ /* y <- x */
+ st->magn_orient.rotation[3] = st->orientation.rotation[0];
+ st->magn_orient.rotation[4] = st->orientation.rotation[1];
+ st->magn_orient.rotation[5] = st->orientation.rotation[2];
+ /* z <- -z */
+ for (i = 0; i < 3; ++i) {
+ orient = st->orientation.rotation[6 + i];
+ /* use length + 2 for adding minus sign if needed */
+ str = devm_kzalloc(regmap_get_device(st->map),
+ strlen(orient) + 2, GFP_KERNEL);
+ if (str == NULL)
+ return -ENOMEM;
+ if (strcmp(orient, "0") == 0) {
+ strcpy(str, orient);
+ } else if (orient[0] == '-') {
+ strcpy(str, &orient[1]);
+ } else {
+ str[0] = '-';
+ strcpy(&str[1], orient);
+ }
+ st->magn_orient.rotation[6 + i] = str;
+ }
+ break;
+ default:
+ st->magn_orient = st->orientation;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * inv_mpu_magn_read() - read magnetometer data
+ * @st: driver internal state
+ * @axis: IIO modifier axis value
+ * @val: store corresponding axis value
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val)
+{
+ unsigned int user_ctrl, status;
+ __be16 data[3];
+ uint8_t addr;
+ uint8_t d;
+ unsigned int period_ms;
+ int ret;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return -ENODEV;
+
+ /* Mag data: X - Y - Z */
+ switch (axis) {
+ case IIO_MOD_X:
+ addr = 0;
+ break;
+ case IIO_MOD_Y:
+ addr = 1;
+ break;
+ case IIO_MOD_Z:
+ addr = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set sample rate to max mag freq */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU_MAGN_FREQ_HZ_MAX);
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* start i2c master, wait for xfer, stop */
+ user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ return ret;
+
+ /* need to wait 2 periods + half-period margin */
+ period_ms = 1000 / INV_MPU_MAGN_FREQ_HZ_MAX;
+ msleep(period_ms * 2 + period_ms / 2);
+ user_ctrl = st->chip_config.user_ctrl;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ return ret;
+
+ /* restore sample rate */
+ d = st->chip_config.divider;
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* check i2c status and read raw data */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK ||
+ status & INV_MPU6050_BIT_I2C_SLV1_NACK)
+ return -EIO;
+
+ ret = regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
+ data, sizeof(data));
+ if (ret)
+ return ret;
+
+ *val = (int16_t)be16_to_cpu(data[addr]);
+
+ return IIO_VAL_INT;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
new file mode 100644
index 000000000000..b41bd0578478
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#ifndef INV_MPU_MAGN_H_
+#define INV_MPU_MAGN_H_
+
+#include <linux/kernel.h>
+
+#include "inv_mpu_iio.h"
+
+int inv_mpu_magn_probe(struct inv_mpu6050_state *st);
+
+/**
+ * inv_mpu_magn_get_scale() - get magnetometer scale value
+ * @st: driver internal state
+ *
+ * Returns IIO data format.
+ */
+static inline int inv_mpu_magn_get_scale(const struct inv_mpu6050_state *st,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2)
+{
+ *val = 0;
+ *val2 = st->magn_raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate);
+
+int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st);
+
+int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val);
+
+#endif /* INV_MPU_MAGN_H_ */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 72d8c5790076..10d16ec5104b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -124,7 +124,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
- st->chip_config.gyro_fifo_enable) {
+ st->chip_config.gyro_fifo_enable ||
+ st->chip_config.magn_fifo_enable) {
result = regmap_write(st->map, st->reg->int_enable,
INV_MPU6050_BIT_DATA_RDY_EN);
if (result)
@@ -141,6 +142,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
d |= INV_MPU6050_BITS_GYRO_OUT;
if (st->chip_config.accl_fifo_enable)
d |= INV_MPU6050_BIT_ACCEL_OUT;
+ if (st->chip_config.magn_fifo_enable)
+ d |= INV_MPU6050_BIT_SLAVE_0;
result = regmap_write(st->map, st->reg->fifo_en, d);
if (result)
goto reset_fifo_fail;
@@ -187,7 +190,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
}
if (!(st->chip_config.accl_fifo_enable |
- st->chip_config.gyro_fifo_enable))
+ st->chip_config.gyro_fifo_enable |
+ st->chip_config.magn_fifo_enable))
goto end_session;
bytes_per_datum = 0;
if (st->chip_config.accl_fifo_enable)
@@ -199,6 +203,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (st->chip_type == INV_ICM20602)
bytes_per_datum += INV_ICM20602_BYTES_PER_TEMP_SENSOR;
+ if (st->chip_config.magn_fifo_enable)
+ bytes_per_datum += INV_MPU9X50_BYTES_MAGN;
+
/*
* read fifo_count register to know how many bytes are inside the FIFO
* right now
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index dd55e70b6f77..d7d951927a44 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -5,7 +5,7 @@
#include "inv_mpu_iio.h"
-static void inv_scan_query(struct iio_dev *indio_dev)
+static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
@@ -26,6 +26,60 @@ static void inv_scan_query(struct iio_dev *indio_dev)
indio_dev->active_scan_mask);
}
+static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ inv_scan_query_mpu6050(indio_dev);
+
+ /* no magnetometer if i2c auxiliary bus is used */
+ if (st->magn_disabled)
+ return;
+
+ st->chip_config.magn_fifo_enable =
+ test_bit(INV_MPU9X50_SCAN_MAGN_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU9X50_SCAN_MAGN_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU9X50_SCAN_MAGN_Z,
+ indio_dev->active_scan_mask);
+}
+
+static void inv_scan_query(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ return inv_scan_query_mpu9x50(indio_dev);
+ default:
+ return inv_scan_query_mpu6050(indio_dev);
+ }
+}
+
+static unsigned int inv_compute_skip_samples(const struct inv_mpu6050_state *st)
+{
+ unsigned int gyro_skip = 0;
+ unsigned int magn_skip = 0;
+ unsigned int skip_samples;
+
+ /* gyro first sample is out of specs, skip it */
+ if (st->chip_config.gyro_fifo_enable)
+ gyro_skip = 1;
+
+ /* mag first sample is always not ready, skip it */
+ if (st->chip_config.magn_fifo_enable)
+ magn_skip = 1;
+
+ /* compute first samples to skip */
+ skip_samples = gyro_skip;
+ if (magn_skip > skip_samples)
+ skip_samples = magn_skip;
+
+ return skip_samples;
+}
+
/**
* inv_mpu6050_set_enable() - enable chip functions.
* @indio_dev: Device driver instance.
@@ -34,6 +88,7 @@ static void inv_scan_query(struct iio_dev *indio_dev)
static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ uint8_t d;
int result;
if (enable) {
@@ -41,14 +96,11 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
return result;
inv_scan_query(indio_dev);
- st->skip_samples = 0;
if (st->chip_config.gyro_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
goto error_power_off;
- /* gyro first sample is out of specs, skip it */
- st->skip_samples = 1;
}
if (st->chip_config.accl_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
@@ -56,22 +108,32 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
goto error_gyro_off;
}
+ if (st->chip_config.magn_fifo_enable) {
+ d = st->chip_config.user_ctrl |
+ INV_MPU6050_BIT_I2C_MST_EN;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
+ if (result)
+ goto error_accl_off;
+ st->chip_config.user_ctrl = d;
+ }
+ st->skip_samples = inv_compute_skip_samples(st);
result = inv_reset_fifo(indio_dev);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
} else {
result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
result = regmap_write(st->map, st->reg->int_enable, 0);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
- result = regmap_write(st->map, st->reg->user_ctrl,
- st->chip_config.user_ctrl);
+ d = st->chip_config.user_ctrl & ~INV_MPU6050_BIT_I2C_MST_EN;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
+ st->chip_config.user_ctrl = d;
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_ACCL_STBY);
@@ -90,6 +152,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
return 0;
+error_magn_off:
+ /* always restore user_ctrl to disable fifo properly */
+ st->chip_config.user_ctrl &= ~INV_MPU6050_BIT_I2C_MST_EN;
+ regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
error_accl_off:
if (st->chip_config.accl_fifo_enable)
inv_mpu6050_switch_engine(st, false,
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 77aa0e77212d..28f59d09208a 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -12,7 +12,8 @@ config IIO_ST_LSM6DSX
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c,
- ism330dhcx and the accelerometer/gyroscope of lsm9ds1.
+ ism330dhcx, lsm6dsrx, lsm6ds0 and the accelerometer/gyroscope
+ of lsm9ds1.
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 0fe6999b8257..c605b153be41 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -12,6 +12,7 @@
#define ST_LSM6DSX_H
#include <linux/device.h>
+#include <linux/iio/iio.h>
#define ST_LSM6DS3_DEV_NAME "lsm6ds3"
#define ST_LSM6DS3H_DEV_NAME "lsm6ds3h"
@@ -25,6 +26,8 @@
#define ST_LSM6DS3TRC_DEV_NAME "lsm6ds3tr-c"
#define ST_ISM330DHCX_DEV_NAME "ism330dhcx"
#define ST_LSM9DS1_DEV_NAME "lsm9ds1-imu"
+#define ST_LSM6DS0_DEV_NAME "lsm6ds0"
+#define ST_LSM6DSRX_DEV_NAME "lsm6dsrx"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -39,6 +42,8 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DS3TRC_ID,
ST_ISM330DHCX_ID,
ST_LSM9DS1_ID,
+ ST_LSM6DS0_ID,
+ ST_LSM6DSRX_ID,
ST_LSM6DSX_MAX_ID,
};
@@ -54,6 +59,26 @@ enum st_lsm6dsx_hw_id {
* ST_LSM6DSX_TAGGED_SAMPLE_SIZE)
#define ST_LSM6DSX_SHIFT_VAL(val, mask) (((val) << __ffs(mask)) & (mask))
+#define ST_LSM6DSX_CHANNEL_ACC(chan_type, addr, mod, scan_idx) \
+{ \
+ .type = chan_type, \
+ .address = addr, \
+ .modified = 1, \
+ .channel2 = mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = scan_idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .event_spec = &st_lsm6dsx_event, \
+ .num_event_specs = 1, \
+}
+
#define ST_LSM6DSX_CHANNEL(chan_type, addr, mod, scan_idx) \
{ \
.type = chan_type, \
@@ -81,14 +106,16 @@ struct st_lsm6dsx_sensor;
struct st_lsm6dsx_hw;
struct st_lsm6dsx_odr {
- u16 hz;
+ u32 milli_hz;
u8 val;
};
#define ST_LSM6DSX_ODR_LIST_SIZE 6
struct st_lsm6dsx_odr_table_entry {
struct st_lsm6dsx_reg reg;
+
struct st_lsm6dsx_odr odr_avl[ST_LSM6DSX_ODR_LIST_SIZE];
+ int odr_len;
};
struct st_lsm6dsx_fs {
@@ -132,12 +159,14 @@ struct st_lsm6dsx_fifo_ops {
* @hr_timer: Hw timer resolution register info (addr + mask).
* @fifo_en: Hw timer FIFO enable register info (addr + mask).
* @decimator: Hw timer FIFO decimator register info (addr + mask).
+ * @freq_fine: Difference in % of ODR with respect to the typical.
*/
struct st_lsm6dsx_hw_ts_settings {
struct st_lsm6dsx_reg timer_en;
struct st_lsm6dsx_reg hr_timer;
struct st_lsm6dsx_reg fifo_en;
struct st_lsm6dsx_reg decimator;
+ u8 freq_fine;
};
/**
@@ -164,6 +193,16 @@ struct st_lsm6dsx_shub_settings {
u8 batch_en;
};
+struct st_lsm6dsx_event_settings {
+ struct st_lsm6dsx_reg enable_reg;
+ struct st_lsm6dsx_reg wakeup_reg;
+ u8 wakeup_src_reg;
+ u8 wakeup_src_status_mask;
+ u8 wakeup_src_z_mask;
+ u8 wakeup_src_y_mask;
+ u8 wakeup_src_x_mask;
+};
+
enum st_lsm6dsx_ext_sensor_id {
ST_LSM6DSX_ID_MAGN,
};
@@ -207,12 +246,14 @@ struct st_lsm6dsx_ext_dev_settings {
/**
* struct st_lsm6dsx_settings - ST IMU sensor settings
* @wai: Sensor WhoAmI default value.
- * @int1_addr: Control Register address for INT1
- * @int2_addr: Control Register address for INT2
- * @reset_addr: register address for reset/reboot
+ * @reset: register address for reset.
+ * @boot: register address for boot.
+ * @bdu: register address for Block Data Update.
* @max_fifo_size: Sensor max fifo length in FIFO words.
* @id: List of hw id/device name supported by the driver configuration.
* @channels: IIO channels supported by the device.
+ * @irq_config: interrupts related registers.
+ * @drdy_mask: register info for data-ready mask (addr + mask).
* @odr_table: Hw sensors odr table (Hz + val).
* @fs_table: Hw sensors gain table (gain + val).
* @decimator: List of decimator register info (addr + mask).
@@ -223,9 +264,9 @@ struct st_lsm6dsx_ext_dev_settings {
*/
struct st_lsm6dsx_settings {
u8 wai;
- u8 int1_addr;
- u8 int2_addr;
- u8 reset_addr;
+ struct st_lsm6dsx_reg reset;
+ struct st_lsm6dsx_reg boot;
+ struct st_lsm6dsx_reg bdu;
u16 max_fifo_size;
struct {
enum st_lsm6dsx_hw_id hw_id;
@@ -235,6 +276,17 @@ struct st_lsm6dsx_settings {
const struct iio_chan_spec *chan;
int len;
} channels[2];
+ struct {
+ struct st_lsm6dsx_reg irq1;
+ struct st_lsm6dsx_reg irq2;
+ struct st_lsm6dsx_reg irq1_func;
+ struct st_lsm6dsx_reg irq2_func;
+ struct st_lsm6dsx_reg lir;
+ struct st_lsm6dsx_reg clear_on_read;
+ struct st_lsm6dsx_reg hla;
+ struct st_lsm6dsx_reg od;
+ } irq_config;
+ struct st_lsm6dsx_reg drdy_mask;
struct st_lsm6dsx_odr_table_entry odr_table[2];
struct st_lsm6dsx_fs_table_entry fs_table[2];
struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
@@ -242,6 +294,7 @@ struct st_lsm6dsx_settings {
struct st_lsm6dsx_fifo_ops fifo_ops;
struct st_lsm6dsx_hw_ts_settings ts_settings;
struct st_lsm6dsx_shub_settings shub_settings;
+ struct st_lsm6dsx_event_settings event_settings;
};
enum st_lsm6dsx_sensor_id {
@@ -277,7 +330,7 @@ struct st_lsm6dsx_sensor {
struct st_lsm6dsx_hw *hw;
u32 gain;
- u16 odr;
+ u32 odr;
u16 watermark;
u8 sip;
@@ -301,9 +354,13 @@ struct st_lsm6dsx_sensor {
* @fifo_mode: FIFO operating mode supported by the device.
* @suspend_mask: Suspended sensor bitmask.
* @enable_mask: Enabled sensor bitmask.
+ * @ts_gain: Hw timestamp rate after internal calibration.
* @ts_sip: Total number of timestamp samples in a given pattern.
* @sip: Total number of samples (acc/gyro/ts) in a given pattern.
* @buff: Device read buffer.
+ * @irq_routing: pointer to interrupt routing configuration.
+ * @event_threshold: wakeup event threshold.
+ * @enable_event: enabled event bitmask.
* @iio_devs: Pointers to acc/gyro iio_dev instances.
* @settings: Pointer to the specific sensor settings in use.
*/
@@ -319,9 +376,14 @@ struct st_lsm6dsx_hw {
enum st_lsm6dsx_fifo_mode fifo_mode;
u8 suspend_mask;
u8 enable_mask;
+ s64 ts_gain;
u8 ts_sip;
u8 sip;
+ const struct st_lsm6dsx_reg *irq_routing;
+ u8 event_threshold;
+ u8 enable_event;
+
u8 *buff;
struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
@@ -329,6 +391,13 @@ struct st_lsm6dsx_hw {
const struct st_lsm6dsx_settings *settings;
};
+static const struct iio_event_spec st_lsm6dsx_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE)
+};
+
static const unsigned long st_lsm6dsx_available_scan_masks[] = {0x7, 0x0};
extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
@@ -346,7 +415,7 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
enum st_lsm6dsx_fifo_mode fifo_mode);
int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw);
int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw);
-int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val);
+int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u32 odr, u8 *val);
int st_lsm6dsx_shub_probe(struct st_lsm6dsx_hw *hw, const char *name);
int st_lsm6dsx_shub_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable);
int st_lsm6dsx_set_page(struct st_lsm6dsx_hw *hw, bool enable);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index b0f3da1976e4..d416990ae309 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -14,10 +14,10 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX: The FIFO buffer can be
- * configured to store data from gyroscope and accelerometer. Each sample
- * is queued with a tag (1B) indicating data source (gyroscope, accelerometer,
- * hw timer).
+ * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/LSM6DSRX/ISM330DHCX:
+ * The FIFO buffer can be configured to store data from gyroscope and
+ * accelerometer. Each sample is queued with a tag (1B) indicating data
+ * source (gyroscope, accelerometer, hw timer).
*
* FIFO supported modes:
* - BYPASS: FIFO disabled
@@ -30,8 +30,6 @@
* Denis Ciocca <denis.ciocca@st.com>
*/
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -42,10 +40,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_HLACTIVE_ADDR 0x12
-#define ST_LSM6DSX_REG_HLACTIVE_MASK BIT(5)
-#define ST_LSM6DSX_REG_PP_OD_ADDR 0x12
-#define ST_LSM6DSX_REG_PP_OD_MASK BIT(4)
#define ST_LSM6DSX_REG_FIFO_MODE_ADDR 0x0a
#define ST_LSM6DSX_FIFO_MODE_MASK GENMASK(2, 0)
#define ST_LSM6DSX_FIFO_ODR_MASK GENMASK(6, 3)
@@ -56,7 +50,6 @@
#define ST_LSM6DSX_MAX_FIFO_ODR_VAL 0x08
-#define ST_LSM6DSX_TS_SENSITIVITY 25000UL /* 25us */
#define ST_LSM6DSX_TS_RESET_VAL 0xaa
struct st_lsm6dsx_decimator_entry {
@@ -98,7 +91,7 @@ static int st_lsm6dsx_get_decimator_val(u8 val)
}
static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
- u16 *max_odr, u16 *min_odr)
+ u32 *max_odr, u32 *min_odr)
{
struct st_lsm6dsx_sensor *sensor;
int i;
@@ -113,16 +106,17 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
- *max_odr = max_t(u16, *max_odr, sensor->odr);
- *min_odr = min_t(u16, *min_odr, sensor->odr);
+ *max_odr = max_t(u32, *max_odr, sensor->odr);
+ *min_odr = min_t(u32, *min_odr, sensor->odr);
}
}
static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
{
- u16 max_odr, min_odr, sip = 0, ts_sip = 0;
const struct st_lsm6dsx_reg *ts_dec_reg;
struct st_lsm6dsx_sensor *sensor;
+ u16 sip = 0, ts_sip = 0;
+ u32 max_odr, min_odr;
int err = 0, i;
u8 data;
@@ -429,7 +423,7 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
*/
if (!reset_ts && ts >= 0xff0000)
reset_ts = true;
- ts *= ST_LSM6DSX_TS_SENSITIVITY;
+ ts *= hw->ts_gain;
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
@@ -456,13 +450,19 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
return read_len;
}
+#define ST_LSM6DSX_INVALID_SAMPLE 0x7ffd
static int
st_lsm6dsx_push_tagged_data(struct st_lsm6dsx_hw *hw, u8 tag,
u8 *data, s64 ts)
{
+ s16 val = le16_to_cpu(*(__le16 *)data);
struct st_lsm6dsx_sensor *sensor;
struct iio_dev *iio_dev;
+ /* invalid sample during bootstrap phase */
+ if (val >= ST_LSM6DSX_INVALID_SAMPLE)
+ return -EINVAL;
+
/*
* EXT_TAG are managed in FIFO fashion so ST_LSM6DSX_EXT0_TAG
* corresponds to the first enabled channel, ST_LSM6DSX_EXT1_TAG
@@ -572,7 +572,7 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
*/
if (!reset_ts && ts >= 0xffff0000)
reset_ts = true;
- ts *= ST_LSM6DSX_TS_SENSITIVITY;
+ ts *= hw->ts_gain;
} else {
st_lsm6dsx_push_tagged_data(hw, tag, iio_buff,
ts);
@@ -592,6 +592,9 @@ int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
{
int err;
+ if (!hw->settings->fifo_ops.read_fifo)
+ return -ENOTSUPP;
+
mutex_lock(&hw->fifo_lock);
hw->settings->fifo_ops.read_fifo(hw);
@@ -654,25 +657,6 @@ out:
return err;
}
-static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
-{
- struct st_lsm6dsx_hw *hw = private;
-
- return hw->sip > 0 ? IRQ_WAKE_THREAD : IRQ_NONE;
-}
-
-static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
-{
- struct st_lsm6dsx_hw *hw = private;
- int count;
-
- mutex_lock(&hw->fifo_lock);
- count = hw->settings->fifo_ops.read_fifo(hw);
- mutex_unlock(&hw->fifo_lock);
-
- return count ? IRQ_HANDLED : IRQ_NONE;
-}
-
static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
@@ -702,59 +686,8 @@ static const struct iio_buffer_setup_ops st_lsm6dsx_buffer_ops = {
int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
{
- struct device_node *np = hw->dev->of_node;
- struct st_sensors_platform_data *pdata;
struct iio_buffer *buffer;
- unsigned long irq_type;
- bool irq_active_low;
- int i, err;
-
- irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
-
- switch (irq_type) {
- case IRQF_TRIGGER_HIGH:
- case IRQF_TRIGGER_RISING:
- irq_active_low = false;
- break;
- case IRQF_TRIGGER_LOW:
- case IRQF_TRIGGER_FALLING:
- irq_active_low = true;
- break;
- default:
- dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
- return -EINVAL;
- }
-
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_HLACTIVE_ADDR,
- ST_LSM6DSX_REG_HLACTIVE_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_HLACTIVE_MASK,
- irq_active_low));
- if (err < 0)
- return err;
-
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
- (pdata && pdata->open_drain)) {
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_PP_OD_ADDR,
- ST_LSM6DSX_REG_PP_OD_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_PP_OD_MASK,
- 1));
- if (err < 0)
- return err;
-
- irq_type |= IRQF_SHARED;
- }
-
- err = devm_request_threaded_irq(hw->dev, hw->irq,
- st_lsm6dsx_handler_irq,
- st_lsm6dsx_handler_thread,
- irq_type | IRQF_ONESHOT,
- "lsm6dsx", hw);
- if (err) {
- dev_err(hw->dev, "failed to request trigger irq %d\n",
- hw->irq);
- return err;
- }
+ int i;
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
if (!hw->iio_devs[i])
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index fd5ebe1e1594..11b2c7bc8041 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -32,7 +32,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
- * - LSM9DS1:
+ * - LSM9DS1/LSM6DS0:
* - Accelerometer supported ODR [Hz]: 10, 50, 119, 238, 476, 952
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported ODR [Hz]: 15, 60, 119, 238, 476, 952
@@ -48,8 +48,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
@@ -58,17 +61,14 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK BIT(3)
#define ST_LSM6DSX_REG_WHOAMI_ADDR 0x0f
-#define ST_LSM6DSX_REG_RESET_MASK BIT(0)
-#define ST_LSM6DSX_REG_BOOT_MASK BIT(7)
-#define ST_LSM6DSX_REG_BDU_ADDR 0x12
-#define ST_LSM6DSX_REG_BDU_MASK BIT(6)
+
+#define ST_LSM6DSX_TS_SENSITIVITY 25000UL /* 25us */
static const struct iio_chan_spec st_lsm6dsx_acc_channels[] = {
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2c, IIO_MOD_Z, 2),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x2c, IIO_MOD_Z, 2),
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -89,14 +89,26 @@ static const struct iio_chan_spec st_lsm6ds0_gyro_channels[] = {
static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
{
.wai = 0x68,
- .int1_addr = 0x0c,
- .int2_addr = 0x0d,
- .reset_addr = 0x22,
+ .reset = {
+ .addr = 0x22,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x22,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x22,
+ .mask = BIT(6),
+ },
.max_fifo_size = 32,
.id = {
{
.hw_id = ST_LSM9DS1_ID,
.name = ST_LSM9DS1_DEV_NAME,
+ }, {
+ .hw_id = ST_LSM6DS0_ID,
+ .name = ST_LSM6DS0_DEV_NAME,
},
},
.channels = {
@@ -115,24 +127,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x20,
.mask = GENMASK(7, 5),
},
- .odr_avl[0] = { 10, 0x01 },
- .odr_avl[1] = { 50, 0x02 },
- .odr_avl[2] = { 119, 0x03 },
- .odr_avl[3] = { 238, 0x04 },
- .odr_avl[4] = { 476, 0x05 },
- .odr_avl[5] = { 952, 0x06 },
+ .odr_avl[0] = { 10000, 0x01 },
+ .odr_avl[1] = { 50000, 0x02 },
+ .odr_avl[2] = { 119000, 0x03 },
+ .odr_avl[3] = { 238000, 0x04 },
+ .odr_avl[4] = { 476000, 0x05 },
+ .odr_avl[5] = { 952000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 5),
},
- .odr_avl[0] = { 15, 0x01 },
- .odr_avl[1] = { 60, 0x02 },
- .odr_avl[2] = { 119, 0x03 },
- .odr_avl[3] = { 238, 0x04 },
- .odr_avl[4] = { 476, 0x05 },
- .odr_avl[5] = { 952, 0x06 },
+ .odr_avl[0] = { 14900, 0x01 },
+ .odr_avl[1] = { 59500, 0x02 },
+ .odr_avl[2] = { 119000, 0x03 },
+ .odr_avl[3] = { 238000, 0x04 },
+ .odr_avl[4] = { 476000, 0x05 },
+ .odr_avl[5] = { 952000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -152,18 +166,46 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(4, 3),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
.fs_len = 3,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0c,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .hla = {
+ .addr = 0x22,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x22,
+ .mask = BIT(4),
+ },
+ },
},
{
.wai = 0x69,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 1365,
.id = {
{
@@ -187,24 +229,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -231,6 +275,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -272,12 +346,32 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x69,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 682,
.id = {
{
@@ -301,24 +395,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -345,6 +441,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -386,12 +512,32 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6a,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 682,
.id = {
{
@@ -424,24 +570,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -468,6 +616,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -509,12 +687,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6c,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -535,30 +737,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -585,6 +793,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -617,6 +859,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
},
.shub_settings = {
.page_mux = {
@@ -643,13 +886,37 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
- }
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5b,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6b,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -667,30 +934,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -717,6 +990,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -749,13 +1056,38 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
},
},
{
.wai = 0x6b,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -764,6 +1096,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
}, {
.hw_id = ST_ISM330DHCX_ID,
.name = ST_ISM330DHCX_DEV_NAME,
+ }, {
+ .hw_id = ST_LSM6DSRX_ID,
+ .name = ST_LSM6DSRX_DEV_NAME,
},
},
.channels = {
@@ -776,30 +1111,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -826,6 +1167,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -858,6 +1233,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
},
.shub_settings = {
.page_mux = {
@@ -884,6 +1260,21 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
}
},
};
@@ -967,36 +1358,37 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
return 0;
}
-int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val)
+int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u32 odr, u8 *val)
{
const struct st_lsm6dsx_odr_table_entry *odr_table;
int i;
odr_table = &sensor->hw->settings->odr_table[sensor->id];
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
+ for (i = 0; i < odr_table->odr_len; i++) {
/*
* ext devices can run at different odr respect to
* accel sensor
*/
- if (odr_table->odr_avl[i].hz >= odr)
+ if (odr_table->odr_avl[i].milli_hz >= odr)
break;
+ }
- if (i == ST_LSM6DSX_ODR_LIST_SIZE)
+ if (i == odr_table->odr_len)
return -EINVAL;
*val = odr_table->odr_avl[i].val;
-
- return 0;
+ return odr_table->odr_avl[i].milli_hz;
}
-static u16 st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u16 odr,
- enum st_lsm6dsx_sensor_id id)
+static int
+st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u32 odr,
+ enum st_lsm6dsx_sensor_id id)
{
struct st_lsm6dsx_sensor *ref = iio_priv(hw->iio_devs[id]);
if (odr > 0) {
if (hw->enable_mask & BIT(id))
- return max_t(u16, ref->odr, odr);
+ return max_t(u32, ref->odr, odr);
else
return odr;
} else {
@@ -1004,7 +1396,8 @@ static u16 st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u16 odr,
}
}
-static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 req_odr)
+static int
+st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
{
struct st_lsm6dsx_sensor *ref_sensor = sensor;
struct st_lsm6dsx_hw *hw = sensor->hw;
@@ -1018,7 +1411,7 @@ static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 req_odr)
case ST_LSM6DSX_ID_EXT1:
case ST_LSM6DSX_ID_EXT2:
case ST_LSM6DSX_ID_ACC: {
- u16 odr;
+ u32 odr;
int i;
/*
@@ -1058,7 +1451,7 @@ int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
bool enable)
{
struct st_lsm6dsx_hw *hw = sensor->hw;
- u16 odr = enable ? sensor->odr : 0;
+ u32 odr = enable ? sensor->odr : 0;
int err;
err = st_lsm6dsx_set_odr(sensor, odr);
@@ -1084,14 +1477,15 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- delay = 1000000 / sensor->odr;
+ delay = 1000000000 / sensor->odr;
usleep_range(delay, 2 * delay);
err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
if (err < 0)
return err;
- st_lsm6dsx_sensor_set_enable(sensor, false);
+ if (!hw->enable_event)
+ st_lsm6dsx_sensor_set_enable(sensor, false);
*val = (s16)le16_to_cpu(data);
@@ -1115,8 +1509,9 @@ static int st_lsm6dsx_read_raw(struct iio_dev *iio_dev,
iio_device_release_direct_mode(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = sensor->odr;
- ret = IIO_VAL_INT;
+ *val = sensor->odr / 1000;
+ *val2 = (sensor->odr % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -1149,8 +1544,11 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ: {
u8 data;
- err = st_lsm6dsx_check_odr(sensor, val, &data);
- if (!err)
+ val = val * 1000 + val2 / 1000;
+ val = st_lsm6dsx_check_odr(sensor, val, &data);
+ if (val < 0)
+ err = val;
+ else
sensor->odr = val;
break;
}
@@ -1164,6 +1562,144 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
return err;
}
+static int st_lsm6dsx_event_setup(struct st_lsm6dsx_hw *hw, int state)
+{
+ const struct st_lsm6dsx_reg *reg;
+ unsigned int data;
+ int err;
+
+ if (!hw->settings->irq_config.irq1_func.addr)
+ return -ENOTSUPP;
+
+ reg = &hw->settings->event_settings.enable_reg;
+ if (reg->addr) {
+ data = ST_LSM6DSX_SHIFT_VAL(state, reg->mask);
+ err = st_lsm6dsx_update_bits_locked(hw, reg->addr,
+ reg->mask, data);
+ if (err < 0)
+ return err;
+ }
+
+ /* Enable wakeup interrupt */
+ data = ST_LSM6DSX_SHIFT_VAL(state, hw->irq_routing->mask);
+ return st_lsm6dsx_update_bits_locked(hw, hw->irq_routing->addr,
+ hw->irq_routing->mask, data);
+}
+
+static int st_lsm6dsx_read_event(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ *val2 = 0;
+ *val = hw->event_threshold;
+
+ return IIO_VAL_INT;
+}
+
+static int
+st_lsm6dsx_write_event(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_reg *reg;
+ unsigned int data;
+ int err;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ if (val < 0 || val > 31)
+ return -EINVAL;
+
+ reg = &hw->settings->event_settings.wakeup_reg;
+ data = ST_LSM6DSX_SHIFT_VAL(val, reg->mask);
+ err = st_lsm6dsx_update_bits_locked(hw, reg->addr,
+ reg->mask, data);
+ if (err < 0)
+ return -EINVAL;
+
+ hw->event_threshold = val;
+
+ return 0;
+}
+
+static int
+st_lsm6dsx_read_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ return !!(hw->enable_event & BIT(chan->channel2));
+}
+
+static int
+st_lsm6dsx_write_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ u8 enable_event;
+ int err = 0;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ if (state) {
+ enable_event = hw->enable_event | BIT(chan->channel2);
+
+ /* do not enable events if they are already enabled */
+ if (hw->enable_event)
+ goto out;
+ } else {
+ enable_event = hw->enable_event & ~BIT(chan->channel2);
+
+ /* only turn off sensor if no events is enabled */
+ if (enable_event)
+ goto out;
+ }
+
+ /* stop here if no changes have been made */
+ if (hw->enable_event == enable_event)
+ return 0;
+
+ err = st_lsm6dsx_event_setup(hw, state);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&hw->conf_lock);
+ err = st_lsm6dsx_sensor_set_enable(sensor, state);
+ mutex_unlock(&hw->conf_lock);
+ if (err < 0)
+ return err;
+
+out:
+ hw->enable_event = enable_event;
+
+ return 0;
+}
+
int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
@@ -1193,13 +1729,14 @@ st_lsm6dsx_sysfs_sampling_frequency_avail(struct device *dev,
char *buf)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
- enum st_lsm6dsx_sensor_id id = sensor->id;
- struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_odr_table_entry *odr_table;
int i, len = 0;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- hw->settings->odr_table[id].odr_avl[i].hz);
+ odr_table = &sensor->hw->settings->odr_table[sensor->id];
+ for (i = 0; i < odr_table->odr_len; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
+ odr_table->odr_avl[i].milli_hz / 1000,
+ odr_table->odr_avl[i].milli_hz % 1000);
buf[len - 1] = '\n';
return len;
@@ -1243,6 +1780,10 @@ static const struct iio_info st_lsm6dsx_acc_info = {
.attrs = &st_lsm6dsx_acc_attribute_group,
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
+ .read_event_value = st_lsm6dsx_read_event,
+ .write_event_value = st_lsm6dsx_write_event,
+ .read_event_config = st_lsm6dsx_read_event_config,
+ .write_event_config = st_lsm6dsx_write_event_config,
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
};
@@ -1273,7 +1814,9 @@ static int st_lsm6dsx_of_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
}
-static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
+static int
+st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
+ const struct st_lsm6dsx_reg **drdy_reg)
{
int err = 0, drdy_pin;
@@ -1287,10 +1830,12 @@ static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
switch (drdy_pin) {
case 1:
- *drdy_reg = hw->settings->int1_addr;
+ hw->irq_routing = &hw->settings->irq_config.irq1_func;
+ *drdy_reg = &hw->settings->irq_config.irq1;
break;
case 2:
- *drdy_reg = hw->settings->int2_addr;
+ hw->irq_routing = &hw->settings->irq_config.irq2_func;
+ *drdy_reg = &hw->settings->irq_config.irq2;
break;
default:
dev_err(hw->dev, "unsupported data ready pin\n");
@@ -1381,51 +1926,95 @@ static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw)
if (err < 0)
return err;
}
+
+ /* calibrate timestamp sensitivity */
+ hw->ts_gain = ST_LSM6DSX_TS_SENSITIVITY;
+ if (ts_settings->freq_fine) {
+ err = regmap_read(hw->regmap, ts_settings->freq_fine, &val);
+ if (err < 0)
+ return err;
+
+ /*
+ * linearize the AN5192 formula:
+ * 1 / (1 + x) ~= 1 - x (Taylor’s Series)
+ * ttrim[s] = 1 / (40000 * (1 + 0.0015 * val))
+ * ttrim[ns] ~= 25000 - 37.5 * val
+ * ttrim[ns] ~= 25000 - (37500 * val) / 1000
+ */
+ hw->ts_gain -= ((s8)val * 37500) / 1000;
+ }
+
return 0;
}
static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
{
- u8 drdy_int_reg;
+ const struct st_lsm6dsx_reg *reg;
int err;
/* device sw reset */
- err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
- ST_LSM6DSX_REG_RESET_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_RESET_MASK, 1));
+ reg = &hw->settings->reset;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
msleep(50);
/* reload trimming parameter */
- err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
- ST_LSM6DSX_REG_BOOT_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_BOOT_MASK, 1));
+ reg = &hw->settings->boot;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
msleep(50);
/* enable Block Data Update */
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_BDU_ADDR,
- ST_LSM6DSX_REG_BDU_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_BDU_MASK, 1));
+ reg = &hw->settings->bdu;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
/* enable FIFO watermak interrupt */
- err = st_lsm6dsx_get_drdy_reg(hw, &drdy_int_reg);
+ err = st_lsm6dsx_get_drdy_reg(hw, &reg);
if (err < 0)
return err;
- err = regmap_update_bits(hw->regmap, drdy_int_reg,
- ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
- 1));
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
+ /* enable Latched interrupts for device events */
+ if (hw->settings->irq_config.lir.addr) {
+ reg = &hw->settings->irq_config.lir;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+
+ /* enable clear on read for latched interrupts */
+ if (hw->settings->irq_config.clear_on_read.addr) {
+ reg = &hw->settings->irq_config.clear_on_read;
+ err = regmap_update_bits(hw->regmap,
+ reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+ }
+ }
+
+ /* enable drdy-mas if available */
+ if (hw->settings->drdy_mask.addr) {
+ reg = &hw->settings->drdy_mask;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+ }
+
err = st_lsm6dsx_init_shub(hw);
if (err < 0)
return err;
@@ -1453,7 +2042,7 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = hw->settings->odr_table[id].odr_avl[0].hz;
+ sensor->odr = hw->settings->odr_table[id].odr_avl[0].milli_hz;
sensor->gain = hw->settings->fs_table[id].fs_avl[0].gain;
sensor->watermark = 1;
@@ -1476,10 +2065,138 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
return iio_dev;
}
+static bool
+st_lsm6dsx_report_motion_event(struct st_lsm6dsx_hw *hw)
+{
+ const struct st_lsm6dsx_event_settings *event_settings;
+ int err, data;
+ s64 timestamp;
+
+ if (!hw->enable_event)
+ return false;
+
+ event_settings = &hw->settings->event_settings;
+ err = st_lsm6dsx_read_locked(hw, event_settings->wakeup_src_reg,
+ &data, sizeof(data));
+ if (err < 0)
+ return false;
+
+ timestamp = iio_get_time_ns(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ if ((data & hw->settings->event_settings.wakeup_src_z_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_Z)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ if ((data & hw->settings->event_settings.wakeup_src_y_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_Y)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ if ((data & hw->settings->event_settings.wakeup_src_x_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_X)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ return data & event_settings->wakeup_src_status_mask;
+}
+
+static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
+{
+ struct st_lsm6dsx_hw *hw = private;
+ bool event;
+ int count;
+
+ event = st_lsm6dsx_report_motion_event(hw);
+
+ if (!hw->settings->fifo_ops.read_fifo)
+ return event ? IRQ_HANDLED : IRQ_NONE;
+
+ mutex_lock(&hw->fifo_lock);
+ count = hw->settings->fifo_ops.read_fifo(hw);
+ mutex_unlock(&hw->fifo_lock);
+
+ return count || event ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
+{
+ struct device_node *np = hw->dev->of_node;
+ struct st_sensors_platform_data *pdata;
+ const struct st_lsm6dsx_reg *reg;
+ unsigned long irq_type;
+ bool irq_active_low;
+ int err;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ irq_active_low = false;
+ break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ irq_active_low = true;
+ break;
+ default:
+ dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
+ return -EINVAL;
+ }
+
+ reg = &hw->settings->irq_config.hla;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(irq_active_low,
+ reg->mask));
+ if (err < 0)
+ return err;
+
+ pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
+ if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ (pdata && pdata->open_drain)) {
+ reg = &hw->settings->irq_config.od;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+
+ irq_type |= IRQF_SHARED;
+ }
+
+ err = devm_request_threaded_irq(hw->dev, hw->irq,
+ NULL,
+ st_lsm6dsx_handler_thread,
+ irq_type | IRQF_ONESHOT,
+ "lsm6dsx", hw);
+ if (err) {
+ dev_err(hw->dev, "failed to request trigger irq %d\n",
+ hw->irq);
+ return err;
+ }
+
+ return 0;
+}
+
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
struct regmap *regmap)
{
+ struct st_sensors_platform_data *pdata = dev->platform_data;
const struct st_lsm6dsx_shub_settings *hub_settings;
+ struct device_node *np = dev->of_node;
struct st_lsm6dsx_hw *hw;
const char *name = NULL;
int i, err;
@@ -1524,6 +2241,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
}
if (hw->irq > 0) {
+ err = st_lsm6dsx_irq_setup(hw);
+ if (err < 0)
+ return err;
+
err = st_lsm6dsx_fifo_setup(hw);
if (err < 0)
return err;
@@ -1538,6 +2259,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
+ if ((np && of_property_read_bool(np, "wakeup-source")) ||
+ (pdata && pdata->wakeup_source))
+ device_init_wakeup(dev, true);
+
return 0;
}
EXPORT_SYMBOL(st_lsm6dsx_probe);
@@ -1556,6 +2281,13 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
+ if (device_may_wakeup(dev) &&
+ sensor->id == ST_LSM6DSX_ID_ACC && hw->enable_event) {
+ /* Enable wake from IRQ */
+ enable_irq_wake(hw->irq);
+ continue;
+ }
+
if (sensor->id == ST_LSM6DSX_ID_EXT0 ||
sensor->id == ST_LSM6DSX_ID_EXT1 ||
sensor->id == ST_LSM6DSX_ID_EXT2)
@@ -1585,6 +2317,10 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
continue;
sensor = iio_priv(hw->iio_devs[i]);
+ if (device_may_wakeup(dev) &&
+ sensor->id == ST_LSM6DSX_ID_ACC && hw->enable_event)
+ disable_irq_wake(hw->irq);
+
if (!(hw->suspend_mask & BIT(sensor->id)))
continue;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index f52511059545..cd47ec1fedcb 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -87,6 +87,14 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm9ds1-imu",
.data = (void *)ST_LSM9DS1_ID,
},
+ {
+ .compatible = "st,lsm6ds0",
+ .data = (void *)ST_LSM6DS0_ID,
+ },
+ {
+ .compatible = "st,lsm6dsrx",
+ .data = (void *)ST_LSM6DSRX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -104,6 +112,8 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
{ ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
{ ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
+ { ST_LSM6DS0_DEV_NAME, ST_LSM6DS0_ID },
+ { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index ea472cf6db7b..fa5d1001a46c 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -51,10 +51,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.addr = 0x60,
.mask = GENMASK(3, 2),
},
- .odr_avl[0] = { 10, 0x0 },
- .odr_avl[1] = { 20, 0x1 },
- .odr_avl[2] = { 50, 0x2 },
- .odr_avl[3] = { 100, 0x3 },
+ .odr_avl[0] = { 10000, 0x0 },
+ .odr_avl[1] = { 20000, 0x1 },
+ .odr_avl[2] = { 50000, 0x2 },
+ .odr_avl[3] = { 100000, 0x3 },
+ .odr_len = 4,
},
.fs_table = {
.fs_avl[0] = {
@@ -93,11 +94,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
{
struct st_lsm6dsx_sensor *sensor;
- u16 odr;
+ u32 odr;
sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
- odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13;
- msleep((2000U / odr) + 1);
+ odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 12500;
+ msleep((2000000U / odr) + 1);
}
/**
@@ -317,17 +318,18 @@ st_lsm6dsx_shub_write_with_mask(struct st_lsm6dsx_sensor *sensor,
static int
st_lsm6dsx_shub_get_odr_val(struct st_lsm6dsx_sensor *sensor,
- u16 odr, u16 *val)
+ u32 odr, u16 *val)
{
const struct st_lsm6dsx_ext_dev_settings *settings;
int i;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
- if (settings->odr_table.odr_avl[i].hz == odr)
+ for (i = 0; i < settings->odr_table.odr_len; i++) {
+ if (settings->odr_table.odr_avl[i].milli_hz == odr)
break;
+ }
- if (i == ST_LSM6DSX_ODR_LIST_SIZE)
+ if (i == settings->odr_table.odr_len)
return -EINVAL;
*val = settings->odr_table.odr_avl[i].val;
@@ -335,7 +337,7 @@ st_lsm6dsx_shub_get_odr_val(struct st_lsm6dsx_sensor *sensor,
}
static int
-st_lsm6dsx_shub_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
+st_lsm6dsx_shub_set_odr(struct st_lsm6dsx_sensor *sensor, u32 odr)
{
const struct st_lsm6dsx_ext_dev_settings *settings;
u16 val;
@@ -440,7 +442,7 @@ st_lsm6dsx_shub_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- delay = 1000000 / sensor->odr;
+ delay = 1000000000 / sensor->odr;
usleep_range(delay, 2 * delay);
len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3);
@@ -480,8 +482,9 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
iio_device_release_direct_mode(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = sensor->odr;
- ret = IIO_VAL_INT;
+ *val = sensor->odr / 1000;
+ *val2 = (sensor->odr % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -512,6 +515,7 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ: {
u16 data;
+ val = val * 1000 + val2 / 1000;
err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data);
if (!err)
sensor->odr = val;
@@ -537,12 +541,11 @@ st_lsm6dsx_shub_sampling_freq_avail(struct device *dev,
int i, len = 0;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++) {
- u16 val = settings->odr_table.odr_avl[i].hz;
+ for (i = 0; i < settings->odr_table.odr_len; i++) {
+ u32 val = settings->odr_table.odr_avl[i].milli_hz;
- if (val > 0)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- val);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
+ val / 1000, val % 1000);
}
buf[len - 1] = '\n';
@@ -607,7 +610,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = info->odr_table.odr_avl[0].hz;
+ sensor->odr = info->odr_table.odr_avl[0].milli_hz;
sensor->gain = info->fs_table.fs_avl[0].gain;
sensor->ext_info.settings = info;
sensor->ext_info.addr = i2c_addr;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 344b28dddebb..67ff36eac247 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -87,6 +87,14 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm9ds1-imu",
.data = (void *)ST_LSM9DS1_ID,
},
+ {
+ .compatible = "st,lsm6ds0",
+ .data = (void *)ST_LSM6DS0_ID,
+ },
+ {
+ .compatible = "st,lsm6dsrx",
+ .data = (void *)ST_LSM6DSRX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -104,6 +112,8 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
{ ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
{ ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
+ { ST_LSM6DS0_DEV_NAME, ST_LSM6DS0_ID },
+ { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 524a686077ca..f72c2dc5f703 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1238,6 +1238,16 @@ static ssize_t iio_show_dev_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
+static ssize_t iio_show_dev_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", indio_dev->label);
+}
+
+static DEVICE_ATTR(label, S_IRUGO, iio_show_dev_label, NULL);
+
static ssize_t iio_show_timestamp_clock(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1354,6 +1364,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
if (indio_dev->name)
attrcount++;
+ if (indio_dev->label)
+ attrcount++;
if (clk)
attrcount++;
@@ -1376,6 +1388,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
if (indio_dev->name)
indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+ if (indio_dev->label)
+ indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
if (clk)
indio_dev->chan_attr_group.attrs[attrn++] = clk;
@@ -1647,6 +1661,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
+ indio_dev->label = of_get_property(indio_dev->dev.of_node, "label",
+ NULL);
+
ret = iio_check_unique_scan_index(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 4a1a883dc061..9968f982fbc7 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -32,6 +32,17 @@ config ADJD_S311
This driver can also be built as a module. If so, the module
will be called adjd_s311.
+config ADUX1020
+ tristate "ADUX1020 photometric sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Analog Devices
+ ADUX1020 photometric sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adux1020.
+
config AL3320A
tristate "AL3320A ambient light sensor"
depends on I2C
@@ -496,6 +507,17 @@ config VCNL4035
To compile this driver as a module, choose M here: the
module will be called vcnl4035.
+config VEML6030
+ tristate "VEML6030 ambient light sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6030
+ ambient light sensor (ALS).
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6030.
+
config VEML6070
tristate "VEML6070 UV A light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 00d1f9b98f39..c98d1cefb861 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ACPI_ALS) += acpi-als.o
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
+obj-$(CONFIG_ADUX1020) += adux1020.o
obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9960) += apds9960.o
@@ -48,6 +49,7 @@ obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_VCNL4035) += vcnl4035.o
+obj-$(CONFIG_VEML6030) += veml6030.o
obj-$(CONFIG_VEML6070) += veml6070.o
obj-$(CONFIG_VL6180) += vl6180.o
obj-$(CONFIG_ZOPT2201) += zopt2201.o
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
new file mode 100644
index 000000000000..b07797ac10d7
--- /dev/null
+++ b/drivers/iio/light/adux1020.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adux1020.c - Support for Analog Devices ADUX1020 photometric sensor
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * TODO: Triggered buffer support
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+#define ADUX1020_REGMAP_NAME "adux1020_regmap"
+#define ADUX1020_DRV_NAME "adux1020"
+
+/* System registers */
+#define ADUX1020_REG_CHIP_ID 0x08
+#define ADUX1020_REG_SLAVE_ADDRESS 0x09
+
+#define ADUX1020_REG_SW_RESET 0x0f
+#define ADUX1020_REG_INT_ENABLE 0x1c
+#define ADUX1020_REG_INT_POLARITY 0x1d
+#define ADUX1020_REG_PROX_TH_ON1 0x2a
+#define ADUX1020_REG_PROX_TH_OFF1 0x2b
+#define ADUX1020_REG_PROX_TYPE 0x2f
+#define ADUX1020_REG_TEST_MODES_3 0x32
+#define ADUX1020_REG_FORCE_MODE 0x33
+#define ADUX1020_REG_FREQUENCY 0x40
+#define ADUX1020_REG_LED_CURRENT 0x41
+#define ADUX1020_REG_OP_MODE 0x45
+#define ADUX1020_REG_INT_MASK 0x48
+#define ADUX1020_REG_INT_STATUS 0x49
+#define ADUX1020_REG_DATA_BUFFER 0x60
+
+/* Chip ID bits */
+#define ADUX1020_CHIP_ID_MASK GENMASK(11, 0)
+#define ADUX1020_CHIP_ID 0x03fc
+
+#define ADUX1020_SW_RESET BIT(1)
+#define ADUX1020_FIFO_FLUSH BIT(15)
+#define ADUX1020_OP_MODE_MASK GENMASK(3, 0)
+#define ADUX1020_DATA_OUT_MODE_MASK GENMASK(7, 4)
+#define ADUX1020_DATA_OUT_PROX_I FIELD_PREP(ADUX1020_DATA_OUT_MODE_MASK, 1)
+
+#define ADUX1020_MODE_INT_MASK GENMASK(7, 0)
+#define ADUX1020_INT_ENABLE 0x2094
+#define ADUX1020_INT_DISABLE 0x2090
+#define ADUX1020_PROX_INT_ENABLE 0x00f0
+#define ADUX1020_PROX_ON1_INT BIT(0)
+#define ADUX1020_PROX_OFF1_INT BIT(1)
+#define ADUX1020_FIFO_INT_ENABLE 0x7f
+#define ADUX1020_MODE_INT_DISABLE 0xff
+#define ADUX1020_MODE_INT_STATUS_MASK GENMASK(7, 0)
+#define ADUX1020_FIFO_STATUS_MASK GENMASK(15, 8)
+#define ADUX1020_INT_CLEAR 0xff
+#define ADUX1020_PROX_TYPE BIT(15)
+
+#define ADUX1020_INT_PROX_ON1 BIT(0)
+#define ADUX1020_INT_PROX_OFF1 BIT(1)
+
+#define ADUX1020_FORCE_CLOCK_ON 0x0f4f
+#define ADUX1020_FORCE_CLOCK_RESET 0x0040
+#define ADUX1020_ACTIVE_4_STATE 0x0008
+
+#define ADUX1020_PROX_FREQ_MASK GENMASK(7, 4)
+#define ADUX1020_PROX_FREQ(x) FIELD_PREP(ADUX1020_PROX_FREQ_MASK, x)
+
+#define ADUX1020_LED_CURRENT_MASK GENMASK(3, 0)
+#define ADUX1020_LED_PIREF_EN BIT(12)
+
+/* Operating modes */
+enum adux1020_op_modes {
+ ADUX1020_MODE_STANDBY,
+ ADUX1020_MODE_PROX_I,
+ ADUX1020_MODE_PROX_XY,
+ ADUX1020_MODE_GEST,
+ ADUX1020_MODE_SAMPLE,
+ ADUX1020_MODE_FORCE = 0x0e,
+ ADUX1020_MODE_IDLE = 0x0f,
+};
+
+struct adux1020_data {
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct mutex lock;
+ struct regmap *regmap;
+};
+
+struct adux1020_mode_data {
+ u8 bytes;
+ u8 buf_len;
+ u16 int_en;
+};
+
+static const struct adux1020_mode_data adux1020_modes[] = {
+ [ADUX1020_MODE_PROX_I] = {
+ .bytes = 2,
+ .buf_len = 1,
+ .int_en = ADUX1020_PROX_INT_ENABLE,
+ },
+};
+
+static const struct regmap_config adux1020_regmap_config = {
+ .name = ADUX1020_REGMAP_NAME,
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0x6F,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct reg_sequence adux1020_def_conf[] = {
+ { 0x000c, 0x000f },
+ { 0x0010, 0x1010 },
+ { 0x0011, 0x004c },
+ { 0x0012, 0x5f0c },
+ { 0x0013, 0xada5 },
+ { 0x0014, 0x0080 },
+ { 0x0015, 0x0000 },
+ { 0x0016, 0x0600 },
+ { 0x0017, 0x0000 },
+ { 0x0018, 0x2693 },
+ { 0x0019, 0x0004 },
+ { 0x001a, 0x4280 },
+ { 0x001b, 0x0060 },
+ { 0x001c, 0x2094 },
+ { 0x001d, 0x0020 },
+ { 0x001e, 0x0001 },
+ { 0x001f, 0x0100 },
+ { 0x0020, 0x0320 },
+ { 0x0021, 0x0A13 },
+ { 0x0022, 0x0320 },
+ { 0x0023, 0x0113 },
+ { 0x0024, 0x0000 },
+ { 0x0025, 0x2412 },
+ { 0x0026, 0x2412 },
+ { 0x0027, 0x0022 },
+ { 0x0028, 0x0000 },
+ { 0x0029, 0x0300 },
+ { 0x002a, 0x0700 },
+ { 0x002b, 0x0600 },
+ { 0x002c, 0x6000 },
+ { 0x002d, 0x4000 },
+ { 0x002e, 0x0000 },
+ { 0x002f, 0x0000 },
+ { 0x0030, 0x0000 },
+ { 0x0031, 0x0000 },
+ { 0x0032, 0x0040 },
+ { 0x0033, 0x0008 },
+ { 0x0034, 0xE400 },
+ { 0x0038, 0x8080 },
+ { 0x0039, 0x8080 },
+ { 0x003a, 0x2000 },
+ { 0x003b, 0x1f00 },
+ { 0x003c, 0x2000 },
+ { 0x003d, 0x2000 },
+ { 0x003e, 0x0000 },
+ { 0x0040, 0x8069 },
+ { 0x0041, 0x1f2f },
+ { 0x0042, 0x4000 },
+ { 0x0043, 0x0000 },
+ { 0x0044, 0x0008 },
+ { 0x0046, 0x0000 },
+ { 0x0048, 0x00ef },
+ { 0x0049, 0x0000 },
+ { 0x0045, 0x0000 },
+};
+
+static const int adux1020_rates[][2] = {
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 500000 },
+ { 1, 0 },
+ { 2, 0 },
+ { 5, 0 },
+ { 10, 0 },
+ { 20, 0 },
+ { 50, 0 },
+ { 100, 0 },
+ { 190, 0 },
+ { 450, 0 },
+ { 820, 0 },
+ { 1400, 0 },
+};
+
+static const int adux1020_led_currents[][2] = {
+ { 0, 25000 },
+ { 0, 40000 },
+ { 0, 55000 },
+ { 0, 70000 },
+ { 0, 85000 },
+ { 0, 100000 },
+ { 0, 115000 },
+ { 0, 130000 },
+ { 0, 145000 },
+ { 0, 160000 },
+ { 0, 175000 },
+ { 0, 190000 },
+ { 0, 205000 },
+ { 0, 220000 },
+ { 0, 235000 },
+ { 0, 250000 },
+};
+
+static int adux1020_flush_fifo(struct adux1020_data *data)
+{
+ int ret;
+
+ /* Force Idle mode */
+ ret = regmap_write(data->regmap, ADUX1020_REG_FORCE_MODE,
+ ADUX1020_ACTIVE_4_STATE);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK, ADUX1020_MODE_FORCE);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK, ADUX1020_MODE_IDLE);
+ if (ret < 0)
+ return ret;
+
+ /* Flush FIFO */
+ ret = regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_STATUS,
+ ADUX1020_FIFO_FLUSH);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_RESET);
+}
+
+static int adux1020_read_fifo(struct adux1020_data *data, u16 *buf, u8 buf_len)
+{
+ unsigned int regval;
+ int i, ret;
+
+ /* Enable 32MHz clock */
+ ret = regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_ON);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < buf_len; i++) {
+ ret = regmap_read(data->regmap, ADUX1020_REG_DATA_BUFFER,
+ &regval);
+ if (ret < 0)
+ return ret;
+
+ buf[i] = regval;
+ }
+
+ /* Set 32MHz clock to be controlled by internal state machine */
+ return regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_RESET);
+}
+
+static int adux1020_set_mode(struct adux1020_data *data,
+ enum adux1020_op_modes mode)
+{
+ int ret;
+
+ /* Switch to standby mode before changing the mode */
+ ret = regmap_write(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_MODE_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ /* Set data out and switch to the desired mode */
+ switch (mode) {
+ case ADUX1020_MODE_PROX_I:
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_DATA_OUT_MODE_MASK,
+ ADUX1020_DATA_OUT_PROX_I);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK,
+ ADUX1020_MODE_PROX_I);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adux1020_measure(struct adux1020_data *data,
+ enum adux1020_op_modes mode,
+ u16 *val)
+{
+ unsigned int status;
+ int ret, tries = 50;
+
+ /* Disable INT pin as polling is going to be used */
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_ENABLE,
+ ADUX1020_INT_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /* Enable mode interrupt */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK,
+ adux1020_modes[mode].int_en);
+ if (ret < 0)
+ return ret;
+
+ while (tries--) {
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ status &= ADUX1020_FIFO_STATUS_MASK;
+ if (status >= adux1020_modes[mode].bytes)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0)
+ return -EIO;
+
+ ret = adux1020_read_fifo(data, val, adux1020_modes[mode].buf_len);
+ if (ret < 0)
+ return ret;
+
+ /* Clear mode interrupt */
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_STATUS,
+ (~adux1020_modes[mode].int_en));
+ if (ret < 0)
+ return ret;
+
+ /* Disable mode interrupts */
+ return regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK,
+ ADUX1020_MODE_INT_DISABLE);
+}
+
+static int adux1020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u16 buf[3];
+ int ret = -EINVAL;
+ unsigned int regval;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = adux1020_set_mode(data, ADUX1020_MODE_PROX_I);
+ if (ret < 0)
+ goto fail;
+
+ ret = adux1020_measure(data, ADUX1020_MODE_PROX_I, buf);
+ if (ret < 0)
+ goto fail;
+
+ *val = buf[0];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ ret = regmap_read(data->regmap,
+ ADUX1020_REG_LED_CURRENT, &regval);
+ if (ret < 0)
+ goto fail;
+
+ regval = regval & ADUX1020_LED_CURRENT_MASK;
+
+ *val = adux1020_led_currents[regval][0];
+ *val2 = adux1020_led_currents[regval][1];
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = regmap_read(data->regmap, ADUX1020_REG_FREQUENCY,
+ &regval);
+ if (ret < 0)
+ goto fail;
+
+ regval = FIELD_GET(ADUX1020_PROX_FREQ_MASK, regval);
+
+ *val = adux1020_rates[regval][0];
+ *val2 = adux1020_rates[regval][1];
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+};
+
+static inline int adux1020_find_index(const int array[][2], int count, int val,
+ int val2)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (val == array[i][0] && val2 == array[i][1])
+ return i;
+
+ return -EINVAL;
+}
+
+static int adux1020_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int i, ret = -EINVAL;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type == IIO_PROXIMITY) {
+ i = adux1020_find_index(adux1020_rates,
+ ARRAY_SIZE(adux1020_rates),
+ val, val2);
+ if (i < 0) {
+ ret = i;
+ goto fail;
+ }
+
+ ret = regmap_update_bits(data->regmap,
+ ADUX1020_REG_FREQUENCY,
+ ADUX1020_PROX_FREQ_MASK,
+ ADUX1020_PROX_FREQ(i));
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type == IIO_CURRENT) {
+ i = adux1020_find_index(adux1020_led_currents,
+ ARRAY_SIZE(adux1020_led_currents),
+ val, val2);
+ if (i < 0) {
+ ret = i;
+ goto fail;
+ }
+
+ ret = regmap_update_bits(data->regmap,
+ ADUX1020_REG_LED_CURRENT,
+ ADUX1020_LED_CURRENT_MASK, i);
+ }
+ break;
+ default:
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int adux1020_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, mask;
+
+ mutex_lock(&data->lock);
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_ENABLE,
+ ADUX1020_INT_ENABLE);
+ if (ret < 0)
+ goto fail;
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_POLARITY, 0);
+ if (ret < 0)
+ goto fail;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = ADUX1020_PROX_ON1_INT;
+ else
+ mask = ADUX1020_PROX_OFF1_INT;
+
+ if (state)
+ state = 0;
+ else
+ state = mask;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ mask, state);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * Trigger proximity interrupt when the intensity is above
+ * or below threshold
+ */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_PROX_TYPE,
+ ADUX1020_PROX_TYPE,
+ ADUX1020_PROX_TYPE);
+ if (ret < 0)
+ goto fail;
+
+ /* Set proximity mode */
+ ret = adux1020_set_mode(data, ADUX1020_MODE_PROX_I);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int adux1020_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, mask;
+ unsigned int regval;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = ADUX1020_PROX_ON1_INT;
+ else
+ mask = ADUX1020_PROX_OFF1_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_MASK, &regval);
+ if (ret < 0)
+ return ret;
+
+ return !(regval & mask);
+}
+
+static int adux1020_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u8 reg;
+ int ret;
+ unsigned int regval;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = ADUX1020_REG_PROX_TH_ON1;
+ else
+ reg = ADUX1020_REG_PROX_TH_OFF1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = regval;
+
+ return IIO_VAL_INT;
+}
+
+static int adux1020_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u8 reg;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = ADUX1020_REG_PROX_TH_ON1;
+ else
+ reg = ADUX1020_REG_PROX_TH_OFF1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Full scale threshold value is 0-65535 */
+ if (val < 0 || val > 65535)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, reg, val);
+}
+
+static const struct iio_event_spec adux1020_proximity_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec adux1020_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = adux1020_proximity_event,
+ .num_event_specs = ARRAY_SIZE(adux1020_proximity_event),
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .extend_name = "led",
+ .output = 1,
+ },
+};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "0.1 0.2 0.5 1 2 5 10 20 50 100 190 450 820 1400");
+
+static struct attribute *adux1020_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adux1020_attribute_group = {
+ .attrs = adux1020_attributes,
+};
+
+static const struct iio_info adux1020_info = {
+ .attrs = &adux1020_attribute_group,
+ .read_raw = adux1020_read_raw,
+ .write_raw = adux1020_write_raw,
+ .read_event_config = adux1020_read_event_config,
+ .write_event_config = adux1020_write_event_config,
+ .read_event_value = adux1020_read_thresh,
+ .write_event_value = adux1020_write_thresh,
+};
+
+static irqreturn_t adux1020_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, status;
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_STATUS, &status);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ status &= ADUX1020_MODE_INT_STATUS_MASK;
+
+ if (status & ADUX1020_INT_PROX_ON1) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (status & ADUX1020_INT_PROX_OFF1) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ regmap_update_bits(data->regmap, ADUX1020_REG_INT_STATUS,
+ ADUX1020_MODE_INT_MASK, ADUX1020_INT_CLEAR);
+
+ return IRQ_HANDLED;
+}
+
+static int adux1020_chip_init(struct adux1020_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_CHIP_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if ((val & ADUX1020_CHIP_ID_MASK) != ADUX1020_CHIP_ID) {
+ dev_err(&client->dev, "invalid chip id 0x%04x\n", val);
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "Detected ADUX1020 with chip id: 0x%04x\n", val);
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_SW_RESET,
+ ADUX1020_SW_RESET, ADUX1020_SW_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Load default configuration */
+ ret = regmap_multi_reg_write(data->regmap, adux1020_def_conf,
+ ARRAY_SIZE(adux1020_def_conf));
+ if (ret < 0)
+ return ret;
+
+ ret = adux1020_flush_fifo(data);
+ if (ret < 0)
+ return ret;
+
+ /* Use LED_IREF for proximity mode */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_LED_CURRENT,
+ ADUX1020_LED_PIREF_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Mask all interrupts */
+ return regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK, ADUX1020_MODE_INT_DISABLE);
+}
+
+static int adux1020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adux1020_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &adux1020_info;
+ indio_dev->name = ADUX1020_DRV_NAME;
+ indio_dev->channels = adux1020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adux1020_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data = iio_priv(indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &adux1020_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap initialization failed.\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ data->client = client;
+ data->indio_dev = indio_dev;
+ mutex_init(&data->lock);
+
+ ret = adux1020_chip_init(data);
+ if (ret)
+ return ret;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, adux1020_interrupt_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ ADUX1020_DRV_NAME, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ return ret;
+ }
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id adux1020_id[] = {
+ { "adux1020", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adux1020_id);
+
+static const struct of_device_id adux1020_of_match[] = {
+ { .compatible = "adi,adux1020" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adux1020_of_match);
+
+static struct i2c_driver adux1020_driver = {
+ .driver = {
+ .name = ADUX1020_DRV_NAME,
+ .of_match_table = adux1020_of_match,
+ },
+ .probe = adux1020_probe,
+ .id_table = adux1020_id,
+};
+module_i2c_driver(adux1020_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("ADUX1020 photometric sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 28347df78cff..adb5ab9e3439 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -59,9 +59,9 @@ struct bh1750_chip_info {
u16 int_time_low_mask;
u16 int_time_high_mask;
-}
+};
-static const bh1750_chip_info_tbl[] = {
+static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
[BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
[BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
[BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 1019d625adb1..90e38fcc974b 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -532,7 +532,7 @@ static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
int state)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
- int cmd, ret = -EINVAL;
+ int cmd, ret;
mutex_lock(&cm36651->lock);
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 7c0291c5fe76..b542e5619ead 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -240,32 +240,42 @@ static const struct iio_info tcs3414_info = {
.attrs = &tcs3414_attribute_group,
};
-static int tcs3414_buffer_preenable(struct iio_dev *indio_dev)
+static int tcs3414_buffer_postenable(struct iio_dev *indio_dev)
{
struct tcs3414_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
data->control |= TCS3414_CONTROL_ADC_EN;
- return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
data->control);
+ if (ret)
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
}
static int tcs3414_buffer_predisable(struct iio_dev *indio_dev)
{
struct tcs3414_data *data = iio_priv(indio_dev);
- int ret;
-
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- return ret;
+ int ret, ret2;
data->control &= ~TCS3414_CONTROL_ADC_EN;
- return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
data->control);
+
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (!ret)
+ ret = ret2;
+
+ return ret;
}
static const struct iio_buffer_setup_ops tcs3414_buffer_setup_ops = {
- .preenable = tcs3414_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
+ .postenable = tcs3414_buffer_postenable,
.predisable = tcs3414_buffer_predisable,
};
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
new file mode 100644
index 000000000000..aa25b87fca8f
--- /dev/null
+++ b/drivers/iio/light/veml6030.c
@@ -0,0 +1,908 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VEML6030 Ambient Light Sensor
+ *
+ * Copyright (c) 2019, Rishi Gupta <gupt21@gmail.com>
+ *
+ * Datasheet: https://www.vishay.com/docs/84366/veml6030.pdf
+ * Appnote-84367: https://www.vishay.com/docs/84367/designingveml6030.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+/* Device registers */
+#define VEML6030_REG_ALS_CONF 0x00
+#define VEML6030_REG_ALS_WH 0x01
+#define VEML6030_REG_ALS_WL 0x02
+#define VEML6030_REG_ALS_PSM 0x03
+#define VEML6030_REG_ALS_DATA 0x04
+#define VEML6030_REG_WH_DATA 0x05
+#define VEML6030_REG_ALS_INT 0x06
+
+/* Bit masks for specific functionality */
+#define VEML6030_ALS_IT GENMASK(9, 6)
+#define VEML6030_PSM GENMASK(2, 1)
+#define VEML6030_ALS_PERS GENMASK(5, 4)
+#define VEML6030_ALS_GAIN GENMASK(12, 11)
+#define VEML6030_PSM_EN BIT(0)
+#define VEML6030_INT_TH_LOW BIT(15)
+#define VEML6030_INT_TH_HIGH BIT(14)
+#define VEML6030_ALS_INT_EN BIT(1)
+#define VEML6030_ALS_SD BIT(0)
+
+/*
+ * The resolution depends on both gain and integration time. The
+ * cur_resolution stores one of the resolution mentioned in the
+ * table during startup and gets updated whenever integration time
+ * or gain is changed.
+ *
+ * Table 'resolution and maximum detection range' in appnote 84367
+ * is visualized as a 2D array. The cur_gain stores index of gain
+ * in this table (0-3) while the cur_integration_time holds index
+ * of integration time (0-5).
+ */
+struct veml6030_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int cur_resolution;
+ int cur_gain;
+ int cur_integration_time;
+};
+
+/* Integration time available in seconds */
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ "0.025 0.05 0.1 0.2 0.4 0.8");
+
+/*
+ * Scale is 1/gain. Value 0.125 is ALS gain x (1/8), 0.25 is
+ * ALS gain x (1/4), 1.0 = ALS gain x 1 and 2.0 is ALS gain x 2.
+ */
+static IIO_CONST_ATTR(in_illuminance_scale_available,
+ "0.125 0.25 1.0 2.0");
+
+static struct attribute *veml6030_attributes[] = {
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group veml6030_attr_group = {
+ .attrs = veml6030_attributes,
+};
+
+/*
+ * Persistence = 1/2/4/8 x integration time
+ * Minimum time for which light readings must stay above configured
+ * threshold to assert the interrupt.
+ */
+static const char * const period_values[] = {
+ "0.1 0.2 0.4 0.8",
+ "0.2 0.4 0.8 1.6",
+ "0.4 0.8 1.6 3.2",
+ "0.8 1.6 3.2 6.4",
+ "0.05 0.1 0.2 0.4",
+ "0.025 0.050 0.1 0.2"
+};
+
+/*
+ * Return list of valid period values in seconds corresponding to
+ * the currently active integration time.
+ */
+static ssize_t in_illuminance_period_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret, reg, x;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ ret = ((reg >> 6) & 0xF);
+ switch (ret) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ x = ret;
+ break;
+ case 8:
+ x = 4;
+ break;
+ case 12:
+ x = 5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", period_values[x]);
+}
+
+static IIO_DEVICE_ATTR_RO(in_illuminance_period_available, 0);
+
+static struct attribute *veml6030_event_attributes[] = {
+ &iio_dev_attr_in_illuminance_period_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group veml6030_event_attr_group = {
+ .attrs = veml6030_event_attributes,
+};
+
+static int veml6030_als_pwr_on(struct veml6030_data *data)
+{
+ return regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_SD, 0);
+}
+
+static int veml6030_als_shut_down(struct veml6030_data *data)
+{
+ return regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_SD, 1);
+}
+
+static void veml6030_als_shut_down_action(void *data)
+{
+ veml6030_als_shut_down(data);
+}
+
+static const struct iio_event_spec veml6030_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+/* Channel number */
+enum veml6030_chan {
+ CH_ALS,
+ CH_WHITE,
+};
+
+static const struct iio_chan_spec veml6030_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel = CH_ALS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = veml6030_event_spec,
+ .num_event_specs = ARRAY_SIZE(veml6030_event_spec),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .channel = CH_WHITE,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static const struct regmap_config veml6030_regmap_config = {
+ .name = "veml6030_regmap",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = VEML6030_REG_ALS_INT,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int veml6030_get_intgrn_tm(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ switch ((reg >> 6) & 0xF) {
+ case 0:
+ *val2 = 100000;
+ break;
+ case 1:
+ *val2 = 200000;
+ break;
+ case 2:
+ *val2 = 400000;
+ break;
+ case 3:
+ *val2 = 800000;
+ break;
+ case 8:
+ *val2 = 50000;
+ break;
+ case 12:
+ *val2 = 25000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_set_intgrn_tm(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, new_int_time, int_idx;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val)
+ return -EINVAL;
+
+ switch (val2) {
+ case 25000:
+ new_int_time = 0x300;
+ int_idx = 5;
+ break;
+ case 50000:
+ new_int_time = 0x200;
+ int_idx = 4;
+ break;
+ case 100000:
+ new_int_time = 0x00;
+ int_idx = 3;
+ break;
+ case 200000:
+ new_int_time = 0x40;
+ int_idx = 2;
+ break;
+ case 400000:
+ new_int_time = 0x80;
+ int_idx = 1;
+ break;
+ case 800000:
+ new_int_time = 0xC0;
+ int_idx = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_IT, new_int_time);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't update als integration time %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Cache current integration time and update resolution. For every
+ * increase in integration time to next level, resolution is halved
+ * and vice-versa.
+ */
+ if (data->cur_integration_time < int_idx)
+ data->cur_resolution <<= int_idx - data->cur_integration_time;
+ else if (data->cur_integration_time > int_idx)
+ data->cur_resolution >>= data->cur_integration_time - int_idx;
+
+ data->cur_integration_time = int_idx;
+
+ return ret;
+}
+
+static int veml6030_read_persistence(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg, period, x, y;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ }
+
+ /* integration time multiplied by 1/2/4/8 */
+ period = y * (1 << ((reg >> 4) & 0x03));
+
+ *val = period / 1000000;
+ *val2 = period % 1000000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_write_persistence(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, period, x, y;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ if (ret < 0)
+ return ret;
+
+ if (!val) {
+ period = val2 / y;
+ } else {
+ if ((val == 1) && (val2 == 600000))
+ period = 1600000 / y;
+ else if ((val == 3) && (val2 == 200000))
+ period = 3200000 / y;
+ else if ((val == 6) && (val2 == 400000))
+ period = 6400000 / y;
+ else
+ period = -1;
+ }
+
+ if (period <= 0 || period > 8 || hweight8(period) != 1)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_PERS, (ffs(period) - 1) << 4);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set persistence value %d\n", ret);
+
+ return ret;
+}
+
+static int veml6030_set_als_gain(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, new_gain, gain_idx;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val == 0 && val2 == 125000) {
+ new_gain = 0x1000; /* 0x02 << 11 */
+ gain_idx = 3;
+ } else if (val == 0 && val2 == 250000) {
+ new_gain = 0x1800;
+ gain_idx = 2;
+ } else if (val == 1 && val2 == 0) {
+ new_gain = 0x00;
+ gain_idx = 1;
+ } else if (val == 2 && val2 == 0) {
+ new_gain = 0x800;
+ gain_idx = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_GAIN, new_gain);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't set als gain %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Cache currently set gain & update resolution. For every
+ * increase in the gain to next level, resolution is halved
+ * and vice-versa.
+ */
+ if (data->cur_gain < gain_idx)
+ data->cur_resolution <<= gain_idx - data->cur_gain;
+ else if (data->cur_gain > gain_idx)
+ data->cur_resolution >>= data->cur_gain - gain_idx;
+
+ data->cur_gain = gain_idx;
+
+ return ret;
+}
+
+static int veml6030_get_als_gain(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ switch ((reg >> 11) & 0x03) {
+ case 0:
+ *val = 1;
+ *val2 = 0;
+ break;
+ case 1:
+ *val = 2;
+ *val2 = 0;
+ break;
+ case 2:
+ *val = 0;
+ *val2 = 125000;
+ break;
+ case 3:
+ *val = 0;
+ *val2 = 250000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_read_thresh(struct iio_dev *indio_dev,
+ int *val, int *val2, int dir)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (dir == IIO_EV_DIR_RISING)
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_WH, &reg);
+ else
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_WL, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als threshold value %d\n", ret);
+ return ret;
+ }
+
+ *val = reg & 0xffff;
+ return IIO_VAL_INT;
+}
+
+static int veml6030_write_thresh(struct iio_dev *indio_dev,
+ int val, int val2, int dir)
+{
+ int ret;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val > 0xFFFF || val < 0 || val2)
+ return -EINVAL;
+
+ if (dir == IIO_EV_DIR_RISING) {
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WH, val);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set high threshold %d\n", ret);
+ } else {
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WL, val);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set low threshold %d\n", ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Provide both raw as well as light reading in lux.
+ * light (in lux) = resolution * raw reading
+ */
+static int veml6030_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct regmap *regmap = data->regmap;
+ struct device *dev = &data->client->dev;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = regmap_read(regmap, VEML6030_REG_ALS_DATA, &reg);
+ if (ret < 0) {
+ dev_err(dev, "can't read als data %d\n", ret);
+ return ret;
+ }
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ *val = (reg * data->cur_resolution) / 10000;
+ *val2 = (reg * data->cur_resolution) % 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ *val = reg;
+ return IIO_VAL_INT;
+ case IIO_INTENSITY:
+ ret = regmap_read(regmap, VEML6030_REG_WH_DATA, &reg);
+ if (ret < 0) {
+ dev_err(dev, "can't read white data %d\n", ret);
+ return ret;
+ }
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ *val = (reg * data->cur_resolution) / 10000;
+ *val2 = (reg * data->cur_resolution) % 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ *val = reg;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT)
+ return veml6030_get_intgrn_tm(indio_dev, val, val2);
+ return -EINVAL;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_LIGHT)
+ return veml6030_get_als_gain(indio_dev, val, val2);
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return veml6030_set_intgrn_tm(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return veml6030_set_als_gain(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ case IIO_EV_DIR_FALLING:
+ return veml6030_read_thresh(indio_dev, val, val2, dir);
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_EV_INFO_PERIOD:
+ return veml6030_read_persistence(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return veml6030_write_thresh(indio_dev, val, val2, dir);
+ case IIO_EV_INFO_PERIOD:
+ return veml6030_write_persistence(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_read_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ if (reg & VEML6030_ALS_INT_EN)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Sensor should not be measuring light when interrupt is configured.
+ * Therefore correct sequence to configure interrupt functionality is:
+ * shut down -> enable/disable interrupt -> power on
+ *
+ * state = 1 enables interrupt, state = 0 disables interrupt
+ */
+static int veml6030_write_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ int ret;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (state < 0 || state > 1)
+ return -EINVAL;
+
+ ret = veml6030_als_shut_down(data);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "can't disable als to configure interrupt %d\n", ret);
+ return ret;
+ }
+
+ /* enable interrupt + power on */
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_INT_EN | VEML6030_ALS_SD, state << 1);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't enable interrupt & poweron als %d\n", ret);
+
+ return ret;
+}
+
+static const struct iio_info veml6030_info = {
+ .read_raw = veml6030_read_raw,
+ .write_raw = veml6030_write_raw,
+ .read_event_value = veml6030_read_event_val,
+ .write_event_value = veml6030_write_event_val,
+ .read_event_config = veml6030_read_interrupt_config,
+ .write_event_config = veml6030_write_interrupt_config,
+ .attrs = &veml6030_attr_group,
+ .event_attrs = &veml6030_event_attr_group,
+};
+
+static const struct iio_info veml6030_info_no_irq = {
+ .read_raw = veml6030_read_raw,
+ .write_raw = veml6030_write_raw,
+ .attrs = &veml6030_attr_group,
+};
+
+static irqreturn_t veml6030_event_handler(int irq, void *private)
+{
+ int ret, reg, evtdir;
+ struct iio_dev *indio_dev = private;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_INT, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als interrupt register %d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ /* Spurious interrupt handling */
+ if (!(reg & (VEML6030_INT_TH_HIGH | VEML6030_INT_TH_LOW)))
+ return IRQ_NONE;
+
+ if (reg & VEML6030_INT_TH_HIGH)
+ evtdir = IIO_EV_DIR_RISING;
+ else
+ evtdir = IIO_EV_DIR_FALLING;
+
+ iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
+ 0, IIO_EV_TYPE_THRESH, evtdir),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set ALS gain to 1/8, integration time to 100 ms, PSM to mode 2,
+ * persistence to 1 x integration time and the threshold
+ * interrupt disabled by default. First shutdown the sensor,
+ * update registers and then power on the sensor.
+ */
+static int veml6030_hw_init(struct iio_dev *indio_dev)
+{
+ int ret, val;
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ ret = veml6030_als_shut_down(data);
+ if (ret) {
+ dev_err(&client->dev, "can't shutdown als %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_CONF, 0x1001);
+ if (ret) {
+ dev_err(&client->dev, "can't setup als configs %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_PSM,
+ VEML6030_PSM | VEML6030_PSM_EN, 0x03);
+ if (ret) {
+ dev_err(&client->dev, "can't setup default PSM %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WH, 0xFFFF);
+ if (ret) {
+ dev_err(&client->dev, "can't setup high threshold %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WL, 0x0000);
+ if (ret) {
+ dev_err(&client->dev, "can't setup low threshold %d\n", ret);
+ return ret;
+ }
+
+ ret = veml6030_als_pwr_on(data);
+ if (ret) {
+ dev_err(&client->dev, "can't poweron als %d\n", ret);
+ return ret;
+ }
+
+ /* Wait 4 ms to let processor & oscillator start correctly */
+ usleep_range(4000, 4002);
+
+ /* Clear stale interrupt status bits if any during start */
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_INT, &val);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "can't clear als interrupt status %d\n", ret);
+ return ret;
+ }
+
+ /* Cache currently active measurement parameters */
+ data->cur_gain = 3;
+ data->cur_resolution = 4608;
+ data->cur_integration_time = 3;
+
+ return ret;
+}
+
+static int veml6030_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct veml6030_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c adapter doesn't support plain i2c\n");
+ return -EOPNOTSUPP;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &veml6030_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "can't setup regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = "veml6030";
+ indio_dev->channels = veml6030_channels;
+ indio_dev->num_channels = ARRAY_SIZE(veml6030_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, veml6030_event_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "veml6030", indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "irq %d request failed\n", client->irq);
+ return ret;
+ }
+ indio_dev->info = &veml6030_info;
+ } else {
+ indio_dev->info = &veml6030_info_no_irq;
+ }
+
+ ret = veml6030_hw_init(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev,
+ veml6030_als_shut_down_action, data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused veml6030_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_als_shut_down(data);
+ if (ret < 0)
+ dev_err(&data->client->dev, "can't suspend als %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused veml6030_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_als_pwr_on(data);
+ if (ret < 0)
+ dev_err(&data->client->dev, "can't resume als %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops veml6030_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(veml6030_runtime_suspend,
+ veml6030_runtime_resume, NULL)
+};
+
+static const struct of_device_id veml6030_of_match[] = {
+ { .compatible = "vishay,veml6030" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, veml6030_of_match);
+
+static const struct i2c_device_id veml6030_id[] = {
+ { "veml6030", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6030_id);
+
+static struct i2c_driver veml6030_driver = {
+ .driver = {
+ .name = "veml6030",
+ .of_match_table = veml6030_of_match,
+ .pm = &veml6030_pm_ops,
+ },
+ .probe = veml6030_probe,
+ .id_table = veml6030_id,
+};
+module_i2c_driver(veml6030_driver);
+
+MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>");
+MODULE_DESCRIPTION("VEML6030 Ambient Light Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index a3a268ee2896..e68184a93a6d 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 8d0f15f27dc5..29c209cc1108 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -74,6 +74,12 @@ struct bmp280_calib {
s8 H6;
};
+static const char *const bmp280_supply_names[] = {
+ "vddd", "vdda"
+};
+
+#define BMP280_NUM_SUPPLIES ARRAY_SIZE(bmp280_supply_names)
+
struct bmp280_data {
struct device *dev;
struct mutex lock;
@@ -85,8 +91,7 @@ struct bmp280_data {
struct bmp180_calib bmp180;
struct bmp280_calib bmp280;
} calib;
- struct regulator *vddd;
- struct regulator *vdda;
+ struct regulator_bulk_data supplies[BMP280_NUM_SUPPLIES];
unsigned int start_up_time; /* in microseconds */
/* log of base 2 of oversampling rate */
@@ -148,6 +153,8 @@ static int bmp280_read_calib(struct bmp280_data *data,
{
int ret;
unsigned int tmp;
+ __le16 l16;
+ __be16 b16;
struct device *dev = data->dev;
__le16 t_buf[BMP280_COMP_TEMP_REG_COUNT / 2];
__le16 p_buf[BMP280_COMP_PRESS_REG_COUNT / 2];
@@ -207,12 +214,12 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H1 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &l16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H2 comp value\n");
return ret;
}
- calib->H2 = sign_extend32(le16_to_cpu(tmp), 15);
+ calib->H2 = sign_extend32(le16_to_cpu(l16), 15);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
if (ret < 0) {
@@ -221,20 +228,20 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H3 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &b16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H4 comp value\n");
return ret;
}
- calib->H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
- (be16_to_cpu(tmp) & 0xf), 11);
+ calib->H4 = sign_extend32(((be16_to_cpu(b16) >> 4) & 0xff0) |
+ (be16_to_cpu(b16) & 0xf), 11);
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &l16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H5 comp value\n");
return ret;
}
- calib->H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+ calib->H5 = sign_extend32(((le16_to_cpu(l16) >> 4) & 0xfff), 11);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
if (ret < 0) {
@@ -979,6 +986,22 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
return 0;
}
+static void bmp280_pm_disable(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+}
+
+static void bmp280_regulators_disable(void *data)
+{
+ struct regulator_bulk_data *supplies = data;
+
+ regulator_bulk_disable(BMP280_NUM_SUPPLIES, supplies);
+}
+
int bmp280_common_probe(struct device *dev,
struct regmap *regmap,
unsigned int chip,
@@ -1033,27 +1056,28 @@ int bmp280_common_probe(struct device *dev,
}
/* Bring up regulators */
- data->vddd = devm_regulator_get(dev, "vddd");
- if (IS_ERR(data->vddd)) {
- dev_err(dev, "failed to get VDDD regulator\n");
- return PTR_ERR(data->vddd);
- }
- ret = regulator_enable(data->vddd);
+ regulator_bulk_set_supply_names(data->supplies,
+ bmp280_supply_names,
+ BMP280_NUM_SUPPLIES);
+
+ ret = devm_regulator_bulk_get(dev,
+ BMP280_NUM_SUPPLIES, data->supplies);
if (ret) {
- dev_err(dev, "failed to enable VDDD regulator\n");
+ dev_err(dev, "failed to get regulators\n");
return ret;
}
- data->vdda = devm_regulator_get(dev, "vdda");
- if (IS_ERR(data->vdda)) {
- dev_err(dev, "failed to get VDDA regulator\n");
- ret = PTR_ERR(data->vdda);
- goto out_disable_vddd;
- }
- ret = regulator_enable(data->vdda);
+
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
if (ret) {
- dev_err(dev, "failed to enable VDDA regulator\n");
- goto out_disable_vddd;
+ dev_err(dev, "failed to enable regulators\n");
+ return ret;
}
+
+ ret = devm_add_action_or_reset(dev, bmp280_regulators_disable,
+ data->supplies);
+ if (ret)
+ return ret;
+
/* Wait to make sure we started up properly */
usleep_range(data->start_up_time, data->start_up_time + 100);
@@ -1068,17 +1092,16 @@ int bmp280_common_probe(struct device *dev,
data->regmap = regmap;
ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
if (ret < 0)
- goto out_disable_vdda;
+ return ret;
if (chip_id != chip) {
dev_err(dev, "bad chip id: expected %x got %x\n",
chip, chip_id);
- ret = -EINVAL;
- goto out_disable_vdda;
+ return -EINVAL;
}
ret = data->chip_info->chip_config(data);
if (ret < 0)
- goto out_disable_vdda;
+ return ret;
dev_set_drvdata(dev, indio_dev);
@@ -1092,14 +1115,14 @@ int bmp280_common_probe(struct device *dev,
if (ret < 0) {
dev_err(data->dev,
"failed to read calibration coefficients\n");
- goto out_disable_vdda;
+ return ret;
}
} else if (chip_id == BMP280_CHIP_ID || chip_id == BME280_CHIP_ID) {
ret = bmp280_read_calib(data, &data->calib.bmp280, chip_id);
if (ret < 0) {
dev_err(data->dev,
"failed to read calibration coefficients\n");
- goto out_disable_vdda;
+ return ret;
}
}
@@ -1111,7 +1134,7 @@ int bmp280_common_probe(struct device *dev,
if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
if (ret)
- goto out_disable_vdda;
+ return ret;
}
/* Enable runtime PM */
@@ -1126,51 +1149,21 @@ int bmp280_common_probe(struct device *dev,
pm_runtime_use_autosuspend(dev);
pm_runtime_put(dev);
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(dev, bmp280_pm_disable, dev);
if (ret)
- goto out_runtime_pm_disable;
-
-
- return 0;
+ return ret;
-out_runtime_pm_disable:
- pm_runtime_get_sync(data->dev);
- pm_runtime_put_noidle(data->dev);
- pm_runtime_disable(data->dev);
-out_disable_vdda:
- regulator_disable(data->vdda);
-out_disable_vddd:
- regulator_disable(data->vddd);
- return ret;
+ return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL(bmp280_common_probe);
-int bmp280_common_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct bmp280_data *data = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- pm_runtime_get_sync(data->dev);
- pm_runtime_put_noidle(data->dev);
- pm_runtime_disable(data->dev);
- regulator_disable(data->vdda);
- regulator_disable(data->vddd);
- return 0;
-}
-EXPORT_SYMBOL(bmp280_common_remove);
-
#ifdef CONFIG_PM
static int bmp280_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmp280_data *data = iio_priv(indio_dev);
- int ret;
- ret = regulator_disable(data->vdda);
- if (ret)
- return ret;
- return regulator_disable(data->vddd);
+ return regulator_bulk_disable(BMP280_NUM_SUPPLIES, data->supplies);
}
static int bmp280_runtime_resume(struct device *dev)
@@ -1179,10 +1172,7 @@ static int bmp280_runtime_resume(struct device *dev)
struct bmp280_data *data = iio_priv(indio_dev);
int ret;
- ret = regulator_enable(data->vddd);
- if (ret)
- return ret;
- ret = regulator_enable(data->vdda);
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
if (ret)
return ret;
usleep_range(data->start_up_time, data->start_up_time + 100);
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index acd9a3784fb4..3109c8e2cc11 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -38,11 +38,6 @@ static int bmp280_i2c_probe(struct i2c_client *client,
client->irq);
}
-static int bmp280_i2c_remove(struct i2c_client *client)
-{
- return bmp280_common_remove(&client->dev);
-}
-
static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
{"BMP0280", BMP280_CHIP_ID },
{"BMP0180", BMP180_CHIP_ID },
@@ -82,7 +77,6 @@ static struct i2c_driver bmp280_i2c_driver = {
.pm = &bmp280_dev_pm_ops,
},
.probe = bmp280_i2c_probe,
- .remove = bmp280_i2c_remove,
.id_table = bmp280_i2c_id,
};
module_i2c_driver(bmp280_i2c_driver);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 9d57b7a3b134..625b86878ad8 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -86,11 +86,6 @@ static int bmp280_spi_probe(struct spi_device *spi)
spi->irq);
}
-static int bmp280_spi_remove(struct spi_device *spi)
-{
- return bmp280_common_remove(&spi->dev);
-}
-
static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bmp085", },
{ .compatible = "bosch,bmp180", },
@@ -118,7 +113,6 @@ static struct spi_driver bmp280_spi_driver = {
},
.id_table = bmp280_spi_id,
.probe = bmp280_spi_probe,
- .remove = bmp280_spi_remove,
};
module_spi_driver(bmp280_spi_driver);
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index eda50ef65706..57ba0e85db91 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -112,7 +112,6 @@ int bmp280_common_probe(struct device *dev,
unsigned int chip,
const char *name,
int irq);
-int bmp280_common_remove(struct device *dev);
/* PM ops */
extern const struct dev_pm_ops bmp280_dev_pm_ops;
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 2354302375de..52f53f3123b1 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -114,6 +114,7 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_baro_info = {
.read_raw = &cros_ec_baro_read,
.write_raw = &cros_ec_baro_write,
+ .read_avail = &cros_ec_sensors_core_read_avail,
};
static int cros_ec_baro_probe(struct platform_device *pdev)
@@ -149,6 +150,8 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_FREQUENCY);
+ channel->info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
channel->scan_type.shift = 0;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index ca6863b32a5f..bd972cec4830 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 9d0d07930236..99dfe33ee402 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -1243,6 +1243,11 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
const struct zpa2326_private *priv = iio_priv(indio_dev);
int err;
+ /* Plug our own trigger event handler. */
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ goto err;
+
if (!priv->waken) {
/*
* We were already power supplied. Just clear hardware FIFO to
@@ -1250,7 +1255,7 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
*/
err = zpa2326_clear_fifo(indio_dev, 0);
if (err)
- goto err;
+ goto err_buffer_predisable;
}
if (!iio_trigger_using_own(indio_dev) && priv->waken) {
@@ -1260,16 +1265,13 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
*/
err = zpa2326_config_oneshot(indio_dev, priv->irq);
if (err)
- goto err;
+ goto err_buffer_predisable;
}
- /* Plug our own trigger event handler. */
- err = iio_triggered_buffer_postenable(indio_dev);
- if (err)
- goto err;
-
return 0;
+err_buffer_predisable:
+ iio_triggered_buffer_predisable(indio_dev);
err:
zpa2326_err(indio_dev, "failed to enable buffering (%d)", err);
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 47af54f14756..5b369645ef49 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -136,12 +136,13 @@ static inline int lidar_write_power(struct lidar_data *data, int val)
static int lidar_read_measurement(struct lidar_data *data, u16 *reg)
{
+ __be16 value;
int ret = data->xfer(data, LIDAR_REG_DATA_HBYTE |
(data->i2c_enabled ? LIDAR_REG_DATA_WORD_READ : 0),
- (u8 *) reg, 2);
+ (u8 *) &value, 2);
if (!ret)
- *reg = be16_to_cpu(*reg);
+ *reg = be16_to_cpu(value);
return ret;
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 612f79c53cfc..287d288e40c2 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -675,11 +675,15 @@ out:
return IRQ_HANDLED;
}
-static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
+static int sx9500_buffer_postenable(struct iio_dev *indio_dev)
{
struct sx9500_data *data = iio_priv(indio_dev);
int ret = 0, i;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&data->mutex);
for (i = 0; i < SX9500_NUM_CHANNELS; i++)
@@ -696,6 +700,9 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
mutex_unlock(&data->mutex);
+ if (ret)
+ iio_triggered_buffer_predisable(indio_dev);
+
return ret;
}
@@ -704,8 +711,6 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
struct sx9500_data *data = iio_priv(indio_dev);
int ret = 0, i;
- iio_triggered_buffer_predisable(indio_dev);
-
mutex_lock(&data->mutex);
for (i = 0; i < SX9500_NUM_CHANNELS; i++)
@@ -722,12 +727,13 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
mutex_unlock(&data->mutex);
+ iio_triggered_buffer_predisable(indio_dev);
+
return ret;
}
static const struct iio_buffer_setup_ops sx9500_buffer_setup_ops = {
- .preenable = sx9500_buffer_preenable,
- .postenable = iio_triggered_buffer_postenable,
+ .postenable = sx9500_buffer_postenable,
.predisable = sx9500_buffer_predisable,
};
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 737faa0901fe..e1ccb4003015 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -4,6 +4,17 @@
#
menu "Temperature sensors"
+config LTC2983
+ tristate "Analog Devices Multi-Sensor Digital Temperature Measurement System"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the LTC2983 Multi-Sensor
+ high accuracy digital temperature measurement system.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ltc2983.
+
config MAXIM_THERMOCOUPLE
tristate "Maxim thermocouple sensors"
depends on SPI
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index baca4776ca0d..d6b850b0cf63 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -3,6 +3,7 @@
# Makefile for industrial I/O temperature drivers
#
+obj-$(CONFIG_LTC2983) += ltc2983.o
obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
obj-$(CONFIG_MAX31856) += max31856.o
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
new file mode 100644
index 000000000000..ddf47023364b
--- /dev/null
+++ b/drivers/iio/temperature/ltc2983.c
@@ -0,0 +1,1557 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2983 Multi-Sensor Digital Temperature Measurement System
+ * driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+/* register map */
+#define LTC2983_STATUS_REG 0x0000
+#define LTC2983_TEMP_RES_START_REG 0x0010
+#define LTC2983_TEMP_RES_END_REG 0x005F
+#define LTC2983_GLOBAL_CONFIG_REG 0x00F0
+#define LTC2983_MULT_CHANNEL_START_REG 0x00F4
+#define LTC2983_MULT_CHANNEL_END_REG 0x00F7
+#define LTC2983_MUX_CONFIG_REG 0x00FF
+#define LTC2983_CHAN_ASSIGN_START_REG 0x0200
+#define LTC2983_CHAN_ASSIGN_END_REG 0x024F
+#define LTC2983_CUST_SENS_TBL_START_REG 0x0250
+#define LTC2983_CUST_SENS_TBL_END_REG 0x03CF
+
+#define LTC2983_DIFFERENTIAL_CHAN_MIN 2
+#define LTC2983_MAX_CHANNELS_NR 20
+#define LTC2983_MIN_CHANNELS_NR 1
+#define LTC2983_SLEEP 0x97
+#define LTC2983_CUSTOM_STEINHART_SIZE 24
+#define LTC2983_CUSTOM_SENSOR_ENTRY_SZ 6
+#define LTC2983_CUSTOM_STEINHART_ENTRY_SZ 4
+
+#define LTC2983_CHAN_START_ADDR(chan) \
+ (((chan - 1) * 4) + LTC2983_CHAN_ASSIGN_START_REG)
+#define LTC2983_CHAN_RES_ADDR(chan) \
+ (((chan - 1) * 4) + LTC2983_TEMP_RES_START_REG)
+#define LTC2983_THERMOCOUPLE_DIFF_MASK BIT(3)
+#define LTC2983_THERMOCOUPLE_SGL(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_DIFF_MASK, x)
+#define LTC2983_THERMOCOUPLE_OC_CURR_MASK GENMASK(1, 0)
+#define LTC2983_THERMOCOUPLE_OC_CURR(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_OC_CURR_MASK, x)
+#define LTC2983_THERMOCOUPLE_OC_CHECK_MASK BIT(2)
+#define LTC2983_THERMOCOUPLE_OC_CHECK(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_OC_CHECK_MASK, x)
+
+#define LTC2983_THERMISTOR_DIFF_MASK BIT(2)
+#define LTC2983_THERMISTOR_SGL(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_DIFF_MASK, x)
+#define LTC2983_THERMISTOR_R_SHARE_MASK BIT(1)
+#define LTC2983_THERMISTOR_R_SHARE(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_R_SHARE_MASK, x)
+#define LTC2983_THERMISTOR_C_ROTATE_MASK BIT(0)
+#define LTC2983_THERMISTOR_C_ROTATE(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_C_ROTATE_MASK, x)
+
+#define LTC2983_DIODE_DIFF_MASK BIT(2)
+#define LTC2983_DIODE_SGL(x) \
+ FIELD_PREP(LTC2983_DIODE_DIFF_MASK, x)
+#define LTC2983_DIODE_3_CONV_CYCLE_MASK BIT(1)
+#define LTC2983_DIODE_3_CONV_CYCLE(x) \
+ FIELD_PREP(LTC2983_DIODE_3_CONV_CYCLE_MASK, x)
+#define LTC2983_DIODE_AVERAGE_ON_MASK BIT(0)
+#define LTC2983_DIODE_AVERAGE_ON(x) \
+ FIELD_PREP(LTC2983_DIODE_AVERAGE_ON_MASK, x)
+
+#define LTC2983_RTD_4_WIRE_MASK BIT(3)
+#define LTC2983_RTD_ROTATION_MASK BIT(1)
+#define LTC2983_RTD_C_ROTATE(x) \
+ FIELD_PREP(LTC2983_RTD_ROTATION_MASK, x)
+#define LTC2983_RTD_KELVIN_R_SENSE_MASK GENMASK(3, 2)
+#define LTC2983_RTD_N_WIRES_MASK GENMASK(3, 2)
+#define LTC2983_RTD_N_WIRES(x) \
+ FIELD_PREP(LTC2983_RTD_N_WIRES_MASK, x)
+#define LTC2983_RTD_R_SHARE_MASK BIT(0)
+#define LTC2983_RTD_R_SHARE(x) \
+ FIELD_PREP(LTC2983_RTD_R_SHARE_MASK, 1)
+
+#define LTC2983_COMMON_HARD_FAULT_MASK GENMASK(31, 30)
+#define LTC2983_COMMON_SOFT_FAULT_MASK GENMASK(27, 25)
+
+#define LTC2983_STATUS_START_MASK BIT(7)
+#define LTC2983_STATUS_START(x) FIELD_PREP(LTC2983_STATUS_START_MASK, x)
+
+#define LTC2983_STATUS_CHAN_SEL_MASK GENMASK(4, 0)
+#define LTC2983_STATUS_CHAN_SEL(x) \
+ FIELD_PREP(LTC2983_STATUS_CHAN_SEL_MASK, x)
+
+#define LTC2983_TEMP_UNITS_MASK BIT(2)
+#define LTC2983_TEMP_UNITS(x) FIELD_PREP(LTC2983_TEMP_UNITS_MASK, x)
+
+#define LTC2983_NOTCH_FREQ_MASK GENMASK(1, 0)
+#define LTC2983_NOTCH_FREQ(x) FIELD_PREP(LTC2983_NOTCH_FREQ_MASK, x)
+
+#define LTC2983_RES_VALID_MASK BIT(24)
+#define LTC2983_DATA_MASK GENMASK(23, 0)
+#define LTC2983_DATA_SIGN_BIT 23
+
+#define LTC2983_CHAN_TYPE_MASK GENMASK(31, 27)
+#define LTC2983_CHAN_TYPE(x) FIELD_PREP(LTC2983_CHAN_TYPE_MASK, x)
+
+/* cold junction for thermocouples and rsense for rtd's and thermistor's */
+#define LTC2983_CHAN_ASSIGN_MASK GENMASK(26, 22)
+#define LTC2983_CHAN_ASSIGN(x) FIELD_PREP(LTC2983_CHAN_ASSIGN_MASK, x)
+
+#define LTC2983_CUSTOM_LEN_MASK GENMASK(5, 0)
+#define LTC2983_CUSTOM_LEN(x) FIELD_PREP(LTC2983_CUSTOM_LEN_MASK, x)
+
+#define LTC2983_CUSTOM_ADDR_MASK GENMASK(11, 6)
+#define LTC2983_CUSTOM_ADDR(x) FIELD_PREP(LTC2983_CUSTOM_ADDR_MASK, x)
+
+#define LTC2983_THERMOCOUPLE_CFG_MASK GENMASK(21, 18)
+#define LTC2983_THERMOCOUPLE_CFG(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_CFG_MASK, x)
+#define LTC2983_THERMOCOUPLE_HARD_FAULT_MASK GENMASK(31, 29)
+#define LTC2983_THERMOCOUPLE_SOFT_FAULT_MASK GENMASK(28, 25)
+
+#define LTC2983_RTD_CFG_MASK GENMASK(21, 18)
+#define LTC2983_RTD_CFG(x) FIELD_PREP(LTC2983_RTD_CFG_MASK, x)
+#define LTC2983_RTD_EXC_CURRENT_MASK GENMASK(17, 14)
+#define LTC2983_RTD_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_RTD_EXC_CURRENT_MASK, x)
+#define LTC2983_RTD_CURVE_MASK GENMASK(13, 12)
+#define LTC2983_RTD_CURVE(x) FIELD_PREP(LTC2983_RTD_CURVE_MASK, x)
+
+#define LTC2983_THERMISTOR_CFG_MASK GENMASK(21, 19)
+#define LTC2983_THERMISTOR_CFG(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_CFG_MASK, x)
+#define LTC2983_THERMISTOR_EXC_CURRENT_MASK GENMASK(18, 15)
+#define LTC2983_THERMISTOR_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_EXC_CURRENT_MASK, x)
+
+#define LTC2983_DIODE_CFG_MASK GENMASK(26, 24)
+#define LTC2983_DIODE_CFG(x) FIELD_PREP(LTC2983_DIODE_CFG_MASK, x)
+#define LTC2983_DIODE_EXC_CURRENT_MASK GENMASK(23, 22)
+#define LTC2983_DIODE_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_DIODE_EXC_CURRENT_MASK, x)
+#define LTC2983_DIODE_IDEAL_FACTOR_MASK GENMASK(21, 0)
+#define LTC2983_DIODE_IDEAL_FACTOR(x) \
+ FIELD_PREP(LTC2983_DIODE_IDEAL_FACTOR_MASK, x)
+
+#define LTC2983_R_SENSE_VAL_MASK GENMASK(26, 0)
+#define LTC2983_R_SENSE_VAL(x) FIELD_PREP(LTC2983_R_SENSE_VAL_MASK, x)
+
+#define LTC2983_ADC_SINGLE_ENDED_MASK BIT(26)
+#define LTC2983_ADC_SINGLE_ENDED(x) \
+ FIELD_PREP(LTC2983_ADC_SINGLE_ENDED_MASK, x)
+
+enum {
+ LTC2983_SENSOR_THERMOCOUPLE = 1,
+ LTC2983_SENSOR_THERMOCOUPLE_CUSTOM = 9,
+ LTC2983_SENSOR_RTD = 10,
+ LTC2983_SENSOR_RTD_CUSTOM = 18,
+ LTC2983_SENSOR_THERMISTOR = 19,
+ LTC2983_SENSOR_THERMISTOR_STEINHART = 26,
+ LTC2983_SENSOR_THERMISTOR_CUSTOM = 27,
+ LTC2983_SENSOR_DIODE = 28,
+ LTC2983_SENSOR_SENSE_RESISTOR = 29,
+ LTC2983_SENSOR_DIRECT_ADC = 30,
+};
+
+#define to_thermocouple(_sensor) \
+ container_of(_sensor, struct ltc2983_thermocouple, sensor)
+
+#define to_rtd(_sensor) \
+ container_of(_sensor, struct ltc2983_rtd, sensor)
+
+#define to_thermistor(_sensor) \
+ container_of(_sensor, struct ltc2983_thermistor, sensor)
+
+#define to_diode(_sensor) \
+ container_of(_sensor, struct ltc2983_diode, sensor)
+
+#define to_rsense(_sensor) \
+ container_of(_sensor, struct ltc2983_rsense, sensor)
+
+#define to_adc(_sensor) \
+ container_of(_sensor, struct ltc2983_adc, sensor)
+
+struct ltc2983_data {
+ struct regmap *regmap;
+ struct spi_device *spi;
+ struct mutex lock;
+ struct completion completion;
+ struct iio_chan_spec *iio_chan;
+ struct ltc2983_sensor **sensors;
+ u32 mux_delay_config;
+ u32 filter_notch_freq;
+ u16 custom_table_size;
+ u8 num_channels;
+ u8 iio_channels;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Holds the converted temperature
+ */
+ __be32 temp ____cacheline_aligned;
+};
+
+struct ltc2983_sensor {
+ int (*fault_handler)(const struct ltc2983_data *st, const u32 result);
+ int (*assign_chan)(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor);
+ /* specifies the sensor channel */
+ u32 chan;
+ /* sensor type */
+ u32 type;
+};
+
+struct ltc2983_custom_sensor {
+ /* raw table sensor data */
+ u8 *table;
+ size_t size;
+ /* address offset */
+ s8 offset;
+ bool is_steinhart;
+};
+
+struct ltc2983_thermocouple {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 cold_junction_chan;
+};
+
+struct ltc2983_rtd {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 r_sense_chan;
+ u32 excitation_current;
+ u32 rtd_curve;
+};
+
+struct ltc2983_thermistor {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 r_sense_chan;
+ u32 excitation_current;
+};
+
+struct ltc2983_diode {
+ struct ltc2983_sensor sensor;
+ u32 sensor_config;
+ u32 excitation_current;
+ u32 ideal_factor_value;
+};
+
+struct ltc2983_rsense {
+ struct ltc2983_sensor sensor;
+ u32 r_sense_val;
+};
+
+struct ltc2983_adc {
+ struct ltc2983_sensor sensor;
+ bool single_ended;
+};
+
+/*
+ * Convert to Q format numbers. These number's are integers where
+ * the number of integer and fractional bits are specified. The resolution
+ * is given by 1/@resolution and tell us the number of fractional bits. For
+ * instance a resolution of 2^-10 means we have 10 fractional bits.
+ */
+static u32 __convert_to_raw(const u64 val, const u32 resolution)
+{
+ u64 __res = val * resolution;
+
+ /* all values are multiplied by 1000000 to remove the fraction */
+ do_div(__res, 1000000);
+
+ return __res;
+}
+
+static u32 __convert_to_raw_sign(const u64 val, const u32 resolution)
+{
+ s64 __res = -(s32)val;
+
+ __res = __convert_to_raw(__res, resolution);
+
+ return (u32)-__res;
+}
+
+static int __ltc2983_fault_handler(const struct ltc2983_data *st,
+ const u32 result, const u32 hard_mask,
+ const u32 soft_mask)
+{
+ const struct device *dev = &st->spi->dev;
+
+ if (result & hard_mask) {
+ dev_err(dev, "Invalid conversion: Sensor HARD fault\n");
+ return -EIO;
+ } else if (result & soft_mask) {
+ /* just print a warning */
+ dev_warn(dev, "Suspicious conversion: Sensor SOFT fault\n");
+ }
+
+ return 0;
+}
+
+static int __ltc2983_chan_assign_common(const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor,
+ u32 chan_val)
+{
+ u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan);
+ __be32 __chan_val;
+
+ chan_val |= LTC2983_CHAN_TYPE(sensor->type);
+ dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg,
+ chan_val);
+ __chan_val = cpu_to_be32(chan_val);
+ return regmap_bulk_write(st->regmap, reg, &__chan_val,
+ sizeof(__chan_val));
+}
+
+static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
+ struct ltc2983_custom_sensor *custom,
+ u32 *chan_val)
+{
+ u32 reg;
+ u8 mult = custom->is_steinhart ? LTC2983_CUSTOM_STEINHART_ENTRY_SZ :
+ LTC2983_CUSTOM_SENSOR_ENTRY_SZ;
+ const struct device *dev = &st->spi->dev;
+ /*
+ * custom->size holds the raw size of the table. However, when
+ * configuring the sensor channel, we must write the number of
+ * entries of the table minus 1. For steinhart sensors 0 is written
+ * since the size is constant!
+ */
+ const u8 len = custom->is_steinhart ? 0 :
+ (custom->size / LTC2983_CUSTOM_SENSOR_ENTRY_SZ) - 1;
+ /*
+ * Check if the offset was assigned already. It should be for steinhart
+ * sensors. When coming from sleep, it should be assigned for all.
+ */
+ if (custom->offset < 0) {
+ /*
+ * This needs to be done again here because, from the moment
+ * when this test was done (successfully) for this custom
+ * sensor, a steinhart sensor might have been added changing
+ * custom_table_size...
+ */
+ if (st->custom_table_size + custom->size >
+ (LTC2983_CUST_SENS_TBL_END_REG -
+ LTC2983_CUST_SENS_TBL_START_REG) + 1) {
+ dev_err(dev,
+ "Not space left(%d) for new custom sensor(%zu)",
+ st->custom_table_size,
+ custom->size);
+ return -EINVAL;
+ }
+
+ custom->offset = st->custom_table_size /
+ LTC2983_CUSTOM_SENSOR_ENTRY_SZ;
+ st->custom_table_size += custom->size;
+ }
+
+ reg = (custom->offset * mult) + LTC2983_CUST_SENS_TBL_START_REG;
+
+ *chan_val |= LTC2983_CUSTOM_LEN(len);
+ *chan_val |= LTC2983_CUSTOM_ADDR(custom->offset);
+ dev_dbg(dev, "Assign custom sensor, reg:0x%04X, off:%d, sz:%zu",
+ reg, custom->offset,
+ custom->size);
+ /* write custom sensor table */
+ return regmap_bulk_write(st->regmap, reg, custom->table, custom->size);
+}
+
+static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
+ struct ltc2983_data *st,
+ const struct device_node *np,
+ const char *propname,
+ const bool is_steinhart,
+ const u32 resolution,
+ const bool has_signed)
+{
+ struct ltc2983_custom_sensor *new_custom;
+ u8 index, n_entries, tbl = 0;
+ struct device *dev = &st->spi->dev;
+ /*
+ * For custom steinhart, the full u32 is taken. For all the others
+ * the MSB is discarded.
+ */
+ const u8 n_size = (is_steinhart == true) ? 4 : 3;
+ const u8 e_size = (is_steinhart == true) ? sizeof(u32) : sizeof(u64);
+
+ n_entries = of_property_count_elems_of_size(np, propname, e_size);
+ /* n_entries must be an even number */
+ if (!n_entries || (n_entries % 2) != 0) {
+ dev_err(dev, "Number of entries either 0 or not even\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ new_custom = devm_kzalloc(dev, sizeof(*new_custom), GFP_KERNEL);
+ if (!new_custom)
+ return ERR_PTR(-ENOMEM);
+
+ new_custom->size = n_entries * n_size;
+ /* check Steinhart size */
+ if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) {
+ dev_err(dev, "Steinhart sensors size(%zu) must be 24",
+ new_custom->size);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Check space on the table. */
+ if (st->custom_table_size + new_custom->size >
+ (LTC2983_CUST_SENS_TBL_END_REG -
+ LTC2983_CUST_SENS_TBL_START_REG) + 1) {
+ dev_err(dev, "No space left(%d) for new custom sensor(%zu)",
+ st->custom_table_size, new_custom->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* allocate the table */
+ new_custom->table = devm_kzalloc(dev, new_custom->size, GFP_KERNEL);
+ if (!new_custom->table)
+ return ERR_PTR(-ENOMEM);
+
+ for (index = 0; index < n_entries; index++) {
+ u64 temp = 0, j;
+ /*
+ * Steinhart sensors are configured with raw values in the
+ * devicetree. For the other sensors we must convert the
+ * value to raw. The odd index's correspond to temperarures
+ * and always have 1/1024 of resolution. Temperatures also
+ * come in kelvin, so signed values is not possible
+ */
+ if (!is_steinhart) {
+ of_property_read_u64_index(np, propname, index, &temp);
+
+ if ((index % 2) != 0)
+ temp = __convert_to_raw(temp, 1024);
+ else if (has_signed && (s64)temp < 0)
+ temp = __convert_to_raw_sign(temp, resolution);
+ else
+ temp = __convert_to_raw(temp, resolution);
+ } else {
+ of_property_read_u32_index(np, propname, index,
+ (u32 *)&temp);
+ }
+
+ for (j = 0; j < n_size; j++)
+ new_custom->table[tbl++] =
+ temp >> (8 * (n_size - j - 1));
+ }
+
+ new_custom->is_steinhart = is_steinhart;
+ /*
+ * This is done to first add all the steinhart sensors to the table,
+ * in order to maximize the table usage. If we mix adding steinhart
+ * with the other sensors, we might have to do some roundup to make
+ * sure that sensor_addr - 0x250(start address) is a multiple of 4
+ * (for steinhart), and a multiple of 6 for all the other sensors.
+ * Since we have const 24 bytes for steinhart sensors and 24 is
+ * also a multiple of 6, we guarantee that the first non-steinhart
+ * sensor will sit in a correct address without the need of filling
+ * addresses.
+ */
+ if (is_steinhart) {
+ new_custom->offset = st->custom_table_size /
+ LTC2983_CUSTOM_STEINHART_ENTRY_SZ;
+ st->custom_table_size += new_custom->size;
+ } else {
+ /* mark as unset. This is checked later on the assign phase */
+ new_custom->offset = -1;
+ }
+
+ return new_custom;
+}
+
+static int ltc2983_thermocouple_fault_handler(const struct ltc2983_data *st,
+ const u32 result)
+{
+ return __ltc2983_fault_handler(st, result,
+ LTC2983_THERMOCOUPLE_HARD_FAULT_MASK,
+ LTC2983_THERMOCOUPLE_SOFT_FAULT_MASK);
+}
+
+static int ltc2983_common_fault_handler(const struct ltc2983_data *st,
+ const u32 result)
+{
+ return __ltc2983_fault_handler(st, result,
+ LTC2983_COMMON_HARD_FAULT_MASK,
+ LTC2983_COMMON_SOFT_FAULT_MASK);
+}
+
+static int ltc2983_thermocouple_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermocouple *thermo = to_thermocouple(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(thermo->cold_junction_chan);
+ chan_val |= LTC2983_THERMOCOUPLE_CFG(thermo->sensor_config);
+
+ if (thermo->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st, thermo->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_rtd_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rtd *rtd = to_rtd(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(rtd->r_sense_chan);
+ chan_val |= LTC2983_RTD_CFG(rtd->sensor_config);
+ chan_val |= LTC2983_RTD_EXC_CURRENT(rtd->excitation_current);
+ chan_val |= LTC2983_RTD_CURVE(rtd->rtd_curve);
+
+ if (rtd->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st, rtd->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_thermistor_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermistor *thermistor = to_thermistor(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(thermistor->r_sense_chan);
+ chan_val |= LTC2983_THERMISTOR_CFG(thermistor->sensor_config);
+ chan_val |=
+ LTC2983_THERMISTOR_EXC_CURRENT(thermistor->excitation_current);
+
+ if (thermistor->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st,
+ thermistor->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_diode_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_diode *diode = to_diode(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_DIODE_CFG(diode->sensor_config);
+ chan_val |= LTC2983_DIODE_EXC_CURRENT(diode->excitation_current);
+ chan_val |= LTC2983_DIODE_IDEAL_FACTOR(diode->ideal_factor_value);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_r_sense_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rsense *rsense = to_rsense(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_R_SENSE_VAL(rsense->r_sense_val);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_adc_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_adc *adc = to_adc(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_ADC_SINGLE_ENDED(adc->single_ended);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static struct ltc2983_sensor *ltc2983_thermocouple_new(
+ const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermocouple *thermo;
+ struct device_node *phandle;
+ u32 oc_current;
+ int ret;
+
+ thermo = devm_kzalloc(&st->spi->dev, sizeof(*thermo), GFP_KERNEL);
+ if (!thermo)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ thermo->sensor_config = LTC2983_THERMOCOUPLE_SGL(1);
+
+ ret = of_property_read_u32(child, "adi,sensor-oc-current-microamp",
+ &oc_current);
+ if (!ret) {
+ switch (oc_current) {
+ case 10:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(0);
+ break;
+ case 100:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(1);
+ break;
+ case 500:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(2);
+ break;
+ case 1000:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(3);
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid open circuit current:%u", oc_current);
+ return ERR_PTR(-EINVAL);
+ }
+
+ thermo->sensor_config |= LTC2983_THERMOCOUPLE_OC_CHECK(1);
+ }
+ /* validate channel index */
+ if (!(thermo->sensor_config & LTC2983_THERMOCOUPLE_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermocouple",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ phandle = of_parse_phandle(child, "adi,cold-junction-handle", 0);
+ if (phandle) {
+ int ret;
+
+ ret = of_property_read_u32(phandle, "reg",
+ &thermo->cold_junction_chan);
+ if (ret) {
+ /*
+ * This would be catched later but we can just return
+ * the error right away.
+ */
+ dev_err(&st->spi->dev, "Property reg must be given\n");
+ of_node_put(phandle);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* check custom sensor */
+ if (sensor->type == LTC2983_SENSOR_THERMOCOUPLE_CUSTOM) {
+ const char *propname = "adi,custom-thermocouple";
+
+ thermo->custom = __ltc2983_custom_sensor_new(st, child,
+ propname, false,
+ 16384, true);
+ if (IS_ERR(thermo->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(thermo->custom);
+ }
+ }
+
+ /* set common parameters */
+ thermo->sensor.fault_handler = ltc2983_thermocouple_fault_handler;
+ thermo->sensor.assign_chan = ltc2983_thermocouple_assign_chan;
+
+ of_node_put(phandle);
+ return &thermo->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rtd *rtd;
+ int ret = 0;
+ struct device *dev = &st->spi->dev;
+ struct device_node *phandle;
+ u32 excitation_current = 0, n_wires = 0;
+
+ rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
+ if (!rtd)
+ return ERR_PTR(-ENOMEM);
+
+ phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
+ if (!phandle) {
+ dev_err(dev, "Property adi,rsense-handle missing or invalid");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(phandle, "reg", &rtd->r_sense_chan);
+ if (ret) {
+ dev_err(dev, "Property reg must be given\n");
+ goto fail;
+ }
+
+ ret = of_property_read_u32(child, "adi,number-of-wires", &n_wires);
+ if (!ret) {
+ switch (n_wires) {
+ case 2:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(0);
+ break;
+ case 3:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(1);
+ break;
+ case 4:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(2);
+ break;
+ case 5:
+ /* 4 wires, Kelvin Rsense */
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(3);
+ break;
+ default:
+ dev_err(dev, "Invalid number of wires:%u\n", n_wires);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (of_property_read_bool(child, "adi,rsense-share")) {
+ /* Current rotation is only available with rsense sharing */
+ if (of_property_read_bool(child, "adi,current-rotate")) {
+ if (n_wires == 2 || n_wires == 3) {
+ dev_err(dev,
+ "Rotation not allowed for 2/3 Wire RTDs");
+ ret = -EINVAL;
+ goto fail;
+ }
+ rtd->sensor_config |= LTC2983_RTD_C_ROTATE(1);
+ } else {
+ rtd->sensor_config |= LTC2983_RTD_R_SHARE(1);
+ }
+ }
+ /*
+ * rtd channel indexes are a bit more complicated to validate.
+ * For 4wire RTD with rotation, the channel selection cannot be
+ * >=19 since the chann + 1 is used in this configuration.
+ * For 4wire RTDs with kelvin rsense, the rsense channel cannot be
+ * <=1 since chanel - 1 and channel - 2 are used.
+ */
+ if (rtd->sensor_config & LTC2983_RTD_4_WIRE_MASK) {
+ /* 4-wire */
+ u8 min = LTC2983_DIFFERENTIAL_CHAN_MIN,
+ max = LTC2983_MAX_CHANNELS_NR;
+
+ if (rtd->sensor_config & LTC2983_RTD_ROTATION_MASK)
+ max = LTC2983_MAX_CHANNELS_NR - 1;
+
+ if (((rtd->sensor_config & LTC2983_RTD_KELVIN_R_SENSE_MASK)
+ == LTC2983_RTD_KELVIN_R_SENSE_MASK) &&
+ (rtd->r_sense_chan <= min)) {
+ /* kelvin rsense*/
+ dev_err(dev,
+ "Invalid rsense chann:%d to use in kelvin rsense",
+ rtd->r_sense_chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (sensor->chan < min || sensor->chan > max) {
+ dev_err(dev, "Invalid chann:%d for the rtd config",
+ sensor->chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ /* same as differential case */
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for RTD", sensor->chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* check custom sensor */
+ if (sensor->type == LTC2983_SENSOR_RTD_CUSTOM) {
+ rtd->custom = __ltc2983_custom_sensor_new(st, child,
+ "adi,custom-rtd",
+ false, 2048, false);
+ if (IS_ERR(rtd->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(rtd->custom);
+ }
+ }
+
+ /* set common parameters */
+ rtd->sensor.fault_handler = ltc2983_common_fault_handler;
+ rtd->sensor.assign_chan = ltc2983_rtd_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
+ if (ret) {
+ /* default to 5uA */
+ rtd->excitation_current = 1;
+ } else {
+ switch (excitation_current) {
+ case 5:
+ rtd->excitation_current = 0x01;
+ break;
+ case 10:
+ rtd->excitation_current = 0x02;
+ break;
+ case 25:
+ rtd->excitation_current = 0x03;
+ break;
+ case 50:
+ rtd->excitation_current = 0x04;
+ break;
+ case 100:
+ rtd->excitation_current = 0x05;
+ break;
+ case 250:
+ rtd->excitation_current = 0x06;
+ break;
+ case 500:
+ rtd->excitation_current = 0x07;
+ break;
+ case 1000:
+ rtd->excitation_current = 0x08;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ of_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
+
+ of_node_put(phandle);
+ return &rtd->sensor;
+fail:
+ of_node_put(phandle);
+ return ERR_PTR(ret);
+}
+
+static struct ltc2983_sensor *ltc2983_thermistor_new(
+ const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermistor *thermistor;
+ struct device *dev = &st->spi->dev;
+ struct device_node *phandle;
+ u32 excitation_current = 0;
+ int ret = 0;
+
+ thermistor = devm_kzalloc(dev, sizeof(*thermistor), GFP_KERNEL);
+ if (!thermistor)
+ return ERR_PTR(-ENOMEM);
+
+ phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
+ if (!phandle) {
+ dev_err(dev, "Property adi,rsense-handle missing or invalid");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(phandle, "reg", &thermistor->r_sense_chan);
+ if (ret) {
+ dev_err(dev, "rsense channel must be configured...\n");
+ goto fail;
+ }
+
+ if (of_property_read_bool(child, "adi,single-ended")) {
+ thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1);
+ } else if (of_property_read_bool(child, "adi,rsense-share")) {
+ /* rotation is only possible if sharing rsense */
+ if (of_property_read_bool(child, "adi,current-rotate"))
+ thermistor->sensor_config =
+ LTC2983_THERMISTOR_C_ROTATE(1);
+ else
+ thermistor->sensor_config =
+ LTC2983_THERMISTOR_R_SHARE(1);
+ }
+ /* validate channel index */
+ if (!(thermistor->sensor_config & LTC2983_THERMISTOR_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermistor",
+ sensor->chan);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* check custom sensor */
+ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ bool steinhart = false;
+ const char *propname;
+
+ if (sensor->type == LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ steinhart = true;
+ propname = "adi,custom-steinhart";
+ } else {
+ propname = "adi,custom-thermistor";
+ }
+
+ thermistor->custom = __ltc2983_custom_sensor_new(st, child,
+ propname,
+ steinhart,
+ 64, false);
+ if (IS_ERR(thermistor->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(thermistor->custom);
+ }
+ }
+ /* set common parameters */
+ thermistor->sensor.fault_handler = ltc2983_common_fault_handler;
+ thermistor->sensor.assign_chan = ltc2983_thermistor_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-nanoamp",
+ &excitation_current);
+ if (ret) {
+ /* Auto range is not allowed for custom sensors */
+ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART)
+ /* default to 1uA */
+ thermistor->excitation_current = 0x03;
+ else
+ /* default to auto-range */
+ thermistor->excitation_current = 0x0c;
+ } else {
+ switch (excitation_current) {
+ case 0:
+ /* auto range */
+ if (sensor->type >=
+ LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ dev_err(&st->spi->dev,
+ "Auto Range not allowed for custom sensors\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ thermistor->excitation_current = 0x0c;
+ break;
+ case 250:
+ thermistor->excitation_current = 0x01;
+ break;
+ case 500:
+ thermistor->excitation_current = 0x02;
+ break;
+ case 1000:
+ thermistor->excitation_current = 0x03;
+ break;
+ case 5000:
+ thermistor->excitation_current = 0x04;
+ break;
+ case 10000:
+ thermistor->excitation_current = 0x05;
+ break;
+ case 25000:
+ thermistor->excitation_current = 0x06;
+ break;
+ case 50000:
+ thermistor->excitation_current = 0x07;
+ break;
+ case 100000:
+ thermistor->excitation_current = 0x08;
+ break;
+ case 250000:
+ thermistor->excitation_current = 0x09;
+ break;
+ case 500000:
+ thermistor->excitation_current = 0x0a;
+ break;
+ case 1000000:
+ thermistor->excitation_current = 0x0b;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ of_node_put(phandle);
+ return &thermistor->sensor;
+fail:
+ of_node_put(phandle);
+ return ERR_PTR(ret);
+}
+
+static struct ltc2983_sensor *ltc2983_diode_new(
+ const struct device_node *child,
+ const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_diode *diode;
+ u32 temp = 0, excitation_current = 0;
+ int ret;
+
+ diode = devm_kzalloc(&st->spi->dev, sizeof(*diode), GFP_KERNEL);
+ if (!diode)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ diode->sensor_config = LTC2983_DIODE_SGL(1);
+
+ if (of_property_read_bool(child, "adi,three-conversion-cycles"))
+ diode->sensor_config |= LTC2983_DIODE_3_CONV_CYCLE(1);
+
+ if (of_property_read_bool(child, "adi,average-on"))
+ diode->sensor_config |= LTC2983_DIODE_AVERAGE_ON(1);
+
+ /* validate channel index */
+ if (!(diode->sensor_config & LTC2983_DIODE_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermistor",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+ /* set common parameters */
+ diode->sensor.fault_handler = ltc2983_common_fault_handler;
+ diode->sensor.assign_chan = ltc2983_diode_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
+ if (!ret) {
+ switch (excitation_current) {
+ case 10:
+ diode->excitation_current = 0x00;
+ break;
+ case 20:
+ diode->excitation_current = 0x01;
+ break;
+ case 40:
+ diode->excitation_current = 0x02;
+ break;
+ case 80:
+ diode->excitation_current = 0x03;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ of_property_read_u32(child, "adi,ideal-factor-value", &temp);
+
+ /* 2^20 resolution */
+ diode->ideal_factor_value = __convert_to_raw(temp, 1048576);
+
+ return &diode->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rsense *rsense;
+ int ret;
+ u32 temp;
+
+ rsense = devm_kzalloc(&st->spi->dev, sizeof(*rsense), GFP_KERNEL);
+ if (!rsense)
+ return ERR_PTR(-ENOMEM);
+
+ /* validate channel index */
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev, "Invalid chann:%d for r_sense",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
+ if (ret) {
+ dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n");
+ return ERR_PTR(-EINVAL);
+ }
+ /*
+ * Times 1000 because we have milli-ohms and __convert_to_raw
+ * expects scales of 1000000 which are used for all other
+ * properties.
+ * 2^10 resolution
+ */
+ rsense->r_sense_val = __convert_to_raw((u64)temp * 1000, 1024);
+
+ /* set common parameters */
+ rsense->sensor.assign_chan = ltc2983_r_sense_assign_chan;
+
+ return &rsense->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_adc *adc;
+
+ adc = devm_kzalloc(&st->spi->dev, sizeof(*adc), GFP_KERNEL);
+ if (!adc)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ adc->single_ended = true;
+
+ if (!adc->single_ended &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev, "Invalid chan:%d for differential adc\n",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+ /* set common parameters */
+ adc->sensor.assign_chan = ltc2983_adc_assign_chan;
+ adc->sensor.fault_handler = ltc2983_common_fault_handler;
+
+ return &adc->sensor;
+}
+
+static int ltc2983_chan_read(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor, int *val)
+{
+ u32 start_conversion = 0;
+ int ret;
+ unsigned long time;
+
+ start_conversion = LTC2983_STATUS_START(true);
+ start_conversion |= LTC2983_STATUS_CHAN_SEL(sensor->chan);
+ dev_dbg(&st->spi->dev, "Start conversion on chan:%d, status:%02X\n",
+ sensor->chan, start_conversion);
+ /* start conversion */
+ ret = regmap_write(st->regmap, LTC2983_STATUS_REG, start_conversion);
+ if (ret)
+ return ret;
+
+ reinit_completion(&st->completion);
+ /*
+ * wait for conversion to complete.
+ * 300 ms should be more than enough to complete the conversion.
+ * Depending on the sensor configuration, there are 2/3 conversions
+ * cycles of 82ms.
+ */
+ time = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(300));
+ if (!time) {
+ dev_warn(&st->spi->dev, "Conversion timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* read the converted data */
+ ret = regmap_bulk_read(st->regmap, LTC2983_CHAN_RES_ADDR(sensor->chan),
+ &st->temp, sizeof(st->temp));
+ if (ret)
+ return ret;
+
+ *val = __be32_to_cpu(st->temp);
+
+ if (!(LTC2983_RES_VALID_MASK & *val)) {
+ dev_err(&st->spi->dev, "Invalid conversion detected\n");
+ return -EIO;
+ }
+
+ ret = sensor->fault_handler(st, *val);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32((*val) & LTC2983_DATA_MASK, LTC2983_DATA_SIGN_BIT);
+ return 0;
+}
+
+static int ltc2983_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltc2983_data *st = iio_priv(indio_dev);
+ int ret;
+
+ /* sanity check */
+ if (chan->address >= st->num_channels) {
+ dev_err(&st->spi->dev, "Invalid chan address:%ld",
+ chan->address);
+ return -EINVAL;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ ret = ltc2983_chan_read(st, st->sensors[chan->address], val);
+ mutex_unlock(&st->lock);
+ return ret ?: IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* value in milli degrees */
+ *val = 1000;
+ /* 2^10 */
+ *val2 = 1024;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_VOLTAGE:
+ /* value in millivolt */
+ *val = 1000;
+ /* 2^21 */
+ *val2 = 2097152;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ltc2983_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ltc2983_data *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+ else
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static irqreturn_t ltc2983_irq_handler(int irq, void *data)
+{
+ struct ltc2983_data *st = data;
+
+ complete(&st->completion);
+ return IRQ_HANDLED;
+}
+
+#define LTC2983_CHAN(__type, index, __address) ({ \
+ struct iio_chan_spec __chan = { \
+ .type = __type, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = __address, \
+ }; \
+ __chan; \
+})
+
+static int ltc2983_parse_dt(struct ltc2983_data *st)
+{
+ struct device_node *child;
+ struct device *dev = &st->spi->dev;
+ int ret = 0, chan = 0, channel_avail_mask = 0;
+
+ of_property_read_u32(dev->of_node, "adi,mux-delay-config-us",
+ &st->mux_delay_config);
+
+ of_property_read_u32(dev->of_node, "adi,filter-notch-freq",
+ &st->filter_notch_freq);
+
+ st->num_channels = of_get_available_child_count(dev->of_node);
+ st->sensors = devm_kcalloc(dev, st->num_channels, sizeof(*st->sensors),
+ GFP_KERNEL);
+ if (!st->sensors)
+ return -ENOMEM;
+
+ st->iio_channels = st->num_channels;
+ for_each_available_child_of_node(dev->of_node, child) {
+ struct ltc2983_sensor sensor;
+
+ ret = of_property_read_u32(child, "reg", &sensor.chan);
+ if (ret) {
+ dev_err(dev, "reg property must given for child nodes\n");
+ return ret;
+ }
+
+ /* check if we have a valid channel */
+ if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
+ sensor.chan > LTC2983_MAX_CHANNELS_NR) {
+ dev_err(dev,
+ "chan:%d must be from 1 to 20\n", sensor.chan);
+ return -EINVAL;
+ } else if (channel_avail_mask & BIT(sensor.chan)) {
+ dev_err(dev, "chan:%d already in use\n", sensor.chan);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(child, "adi,sensor-type",
+ &sensor.type);
+ if (ret) {
+ dev_err(dev,
+ "adi,sensor-type property must given for child nodes\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Create new sensor, type %u, chann %u",
+ sensor.type,
+ sensor.chan);
+
+ if (sensor.type >= LTC2983_SENSOR_THERMOCOUPLE &&
+ sensor.type <= LTC2983_SENSOR_THERMOCOUPLE_CUSTOM) {
+ st->sensors[chan] = ltc2983_thermocouple_new(child, st,
+ &sensor);
+ } else if (sensor.type >= LTC2983_SENSOR_RTD &&
+ sensor.type <= LTC2983_SENSOR_RTD_CUSTOM) {
+ st->sensors[chan] = ltc2983_rtd_new(child, st, &sensor);
+ } else if (sensor.type >= LTC2983_SENSOR_THERMISTOR &&
+ sensor.type <= LTC2983_SENSOR_THERMISTOR_CUSTOM) {
+ st->sensors[chan] = ltc2983_thermistor_new(child, st,
+ &sensor);
+ } else if (sensor.type == LTC2983_SENSOR_DIODE) {
+ st->sensors[chan] = ltc2983_diode_new(child, st,
+ &sensor);
+ } else if (sensor.type == LTC2983_SENSOR_SENSE_RESISTOR) {
+ st->sensors[chan] = ltc2983_r_sense_new(child, st,
+ &sensor);
+ /* don't add rsense to iio */
+ st->iio_channels--;
+ } else if (sensor.type == LTC2983_SENSOR_DIRECT_ADC) {
+ st->sensors[chan] = ltc2983_adc_new(child, st, &sensor);
+ } else {
+ dev_err(dev, "Unknown sensor type %d\n", sensor.type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(st->sensors[chan])) {
+ dev_err(dev, "Failed to create sensor %ld",
+ PTR_ERR(st->sensors[chan]));
+ return PTR_ERR(st->sensors[chan]);
+ }
+ /* set generic sensor parameters */
+ st->sensors[chan]->chan = sensor.chan;
+ st->sensors[chan]->type = sensor.type;
+
+ channel_avail_mask |= BIT(sensor.chan);
+ chan++;
+ }
+
+ return 0;
+}
+
+static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
+{
+ u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0;
+ int ret;
+ unsigned long time;
+
+ /* make sure the device is up */
+ time = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(250));
+
+ if (!time) {
+ dev_err(&st->spi->dev, "Device startup timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ st->iio_chan = devm_kzalloc(&st->spi->dev,
+ st->iio_channels * sizeof(*st->iio_chan),
+ GFP_KERNEL);
+
+ if (!st->iio_chan)
+ return -ENOMEM;
+
+ ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG,
+ LTC2983_NOTCH_FREQ_MASK,
+ LTC2983_NOTCH_FREQ(st->filter_notch_freq));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, LTC2983_MUX_CONFIG_REG,
+ st->mux_delay_config);
+ if (ret)
+ return ret;
+
+ for (chan = 0; chan < st->num_channels; chan++) {
+ u32 chan_type = 0, *iio_chan;
+
+ ret = st->sensors[chan]->assign_chan(st, st->sensors[chan]);
+ if (ret)
+ return ret;
+ /*
+ * The assign_iio flag is necessary for when the device is
+ * coming out of sleep. In that case, we just need to
+ * re-configure the device channels.
+ * We also don't assign iio channels for rsense.
+ */
+ if (st->sensors[chan]->type == LTC2983_SENSOR_SENSE_RESISTOR ||
+ !assign_iio)
+ continue;
+
+ /* assign iio channel */
+ if (st->sensors[chan]->type != LTC2983_SENSOR_DIRECT_ADC) {
+ chan_type = IIO_TEMP;
+ iio_chan = &iio_chan_t;
+ } else {
+ chan_type = IIO_VOLTAGE;
+ iio_chan = &iio_chan_v;
+ }
+
+ /*
+ * add chan as the iio .address so that, we can directly
+ * reference the sensor given the iio_chan_spec
+ */
+ st->iio_chan[iio_idx++] = LTC2983_CHAN(chan_type, (*iio_chan)++,
+ chan);
+ }
+
+ return 0;
+}
+
+static const struct regmap_range ltc2983_reg_ranges[] = {
+ regmap_reg_range(LTC2983_STATUS_REG, LTC2983_STATUS_REG),
+ regmap_reg_range(LTC2983_TEMP_RES_START_REG, LTC2983_TEMP_RES_END_REG),
+ regmap_reg_range(LTC2983_GLOBAL_CONFIG_REG, LTC2983_GLOBAL_CONFIG_REG),
+ regmap_reg_range(LTC2983_MULT_CHANNEL_START_REG,
+ LTC2983_MULT_CHANNEL_END_REG),
+ regmap_reg_range(LTC2983_MUX_CONFIG_REG, LTC2983_MUX_CONFIG_REG),
+ regmap_reg_range(LTC2983_CHAN_ASSIGN_START_REG,
+ LTC2983_CHAN_ASSIGN_END_REG),
+ regmap_reg_range(LTC2983_CUST_SENS_TBL_START_REG,
+ LTC2983_CUST_SENS_TBL_END_REG),
+};
+
+static const struct regmap_access_table ltc2983_reg_table = {
+ .yes_ranges = ltc2983_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ltc2983_reg_ranges),
+};
+
+/*
+ * The reg_bits are actually 12 but the device needs the first *complete*
+ * byte for the command (R/W).
+ */
+static const struct regmap_config ltc2983_regmap_config = {
+ .reg_bits = 24,
+ .val_bits = 8,
+ .wr_table = &ltc2983_reg_table,
+ .rd_table = &ltc2983_reg_table,
+ .read_flag_mask = GENMASK(1, 0),
+ .write_flag_mask = BIT(1),
+};
+
+static const struct iio_info ltc2983_iio_info = {
+ .read_raw = ltc2983_read_raw,
+ .debugfs_reg_access = ltc2983_reg_access,
+};
+
+static int ltc2983_probe(struct spi_device *spi)
+{
+ struct ltc2983_data *st;
+ struct iio_dev *indio_dev;
+ const char *name = spi_get_device_id(spi)->name;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->regmap = devm_regmap_init_spi(spi, &ltc2983_regmap_config);
+ if (IS_ERR(st->regmap)) {
+ dev_err(&spi->dev, "Failed to initialize regmap\n");
+ return PTR_ERR(st->regmap);
+ }
+
+ mutex_init(&st->lock);
+ init_completion(&st->completion);
+ st->spi = spi;
+ spi_set_drvdata(spi, st);
+
+ ret = ltc2983_parse_dt(st);
+ if (ret)
+ return ret;
+ /*
+ * let's request the irq now so it is used to sync the device
+ * startup in ltc2983_setup()
+ */
+ ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
+ IRQF_TRIGGER_RISING, name, st);
+ if (ret) {
+ dev_err(&spi->dev, "failed to request an irq, %d", ret);
+ return ret;
+ }
+
+ ret = ltc2983_setup(st, true);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = name;
+ indio_dev->num_channels = st->iio_channels;
+ indio_dev->channels = st->iio_chan;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ltc2983_iio_info;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static int __maybe_unused ltc2983_resume(struct device *dev)
+{
+ struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
+ int dummy;
+
+ /* dummy read to bring the device out of sleep */
+ regmap_read(st->regmap, LTC2983_STATUS_REG, &dummy);
+ /* we need to re-assign the channels */
+ return ltc2983_setup(st, false);
+}
+
+static int __maybe_unused ltc2983_suspend(struct device *dev)
+{
+ struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
+
+ return regmap_write(st->regmap, LTC2983_STATUS_REG, LTC2983_SLEEP);
+}
+
+static SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend, ltc2983_resume);
+
+static const struct spi_device_id ltc2983_id_table[] = {
+ { "ltc2983" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, ltc2983_id_table);
+
+static const struct of_device_id ltc2983_of_match[] = {
+ { .compatible = "adi,ltc2983" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltc2983_of_match);
+
+static struct spi_driver ltc2983_driver = {
+ .driver = {
+ .name = "ltc2983",
+ .of_match_table = ltc2983_of_match,
+ .pm = &ltc2983_pm_ops,
+ },
+ .probe = ltc2983_probe,
+ .id_table = ltc2983_id_table,
+};
+
+module_spi_driver(ltc2983_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices LTC2983 SPI Temperature sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index f184ba5601d9..73ed550e3fc9 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -284,6 +284,8 @@ static int max31856_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->info = &max31856_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = max31856_channels;
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 2ab68282d0b6..d1360605209c 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -194,7 +194,7 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
default:
*val = 250; /* 1000 * 0.25 */
ret = IIO_VAL_INT;
- };
+ }
break;
}
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b44b1c322ec8..ade86388434f 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -83,7 +83,6 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
-source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/i40iw/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 09881bd5f12d..9a8871e21545 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,7 +11,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- nldev.o restrack.o counters.o
+ nldev.o restrack.o counters.o ib_core_uverbs.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 00fb3eacda19..d535995711c3 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -819,22 +819,16 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
struct ib_gid_table *table)
{
int i;
- bool deleted = false;
if (!table)
return;
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (is_gid_entry_valid(table->data_vec[i])) {
+ if (is_gid_entry_valid(table->data_vec[i]))
del_gid(ib_dev, port, table, i);
- deleted = true;
- }
}
mutex_unlock(&table->lock);
-
- if (deleted)
- dispatch_gid_change_event(ib_dev, port);
}
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 5920c0085d35..455b3659d84b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,36 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/completion.h>
@@ -246,7 +220,7 @@ struct cm_work {
};
struct cm_timewait_info {
- struct cm_work work; /* Must be first. */
+ struct cm_work work;
struct list_head list;
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
@@ -263,7 +237,7 @@ struct cm_id_private {
struct rb_node sidr_id_node;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
* Protected by the cm.lock spinlock. */
int listen_sharecount;
@@ -308,7 +282,7 @@ static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
- if (atomic_dec_and_test(&cm_id_priv->refcount))
+ if (refcount_dec_and_test(&cm_id_priv->refcount))
complete(&cm_id_priv->comp);
}
@@ -365,7 +339,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
m->ah = ah;
m->retries = cm_id_priv->max_cm_retries;
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
@@ -626,7 +600,7 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -883,7 +857,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->prim_list);
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
- atomic_set(&cm_id_priv->refcount, 1);
+ refcount_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
error:
@@ -1230,7 +1204,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
spin_unlock_irqrestore(&cm.lock, flags);
return ERR_PTR(-EINVAL);
}
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
++cm_id_priv->listen_sharecount;
spin_unlock_irqrestore(&cm.lock, flags);
@@ -1525,14 +1499,6 @@ static int cm_issue_rej(struct cm_port *port,
return ret;
}
-static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
- __be32 local_qpn, __be32 remote_qpn)
-{
- return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
- ((local_ca_guid == remote_ca_guid) &&
- (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
-}
-
static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
{
return ((req_msg->alt_local_lid) ||
@@ -1895,8 +1861,8 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
NULL, 0);
goto out;
}
- atomic_inc(&listen_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&listen_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
cm_id_priv->id.state = IB_CM_REQ_RCVD;
atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
@@ -2052,7 +2018,7 @@ static int cm_req_handler(struct cm_work *work)
return 0;
rejected:
- atomic_dec(&cm_id_priv->refcount);
+ refcount_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
free_timeinfo:
kfree(cm_id_priv->timewait_info);
@@ -2826,7 +2792,7 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
cm_local_id(timewait_info->work.local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -3434,7 +3400,7 @@ static int cm_timewait_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
int ret;
- timewait_info = (struct cm_timewait_info *)work;
+ timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
list_del(&timewait_info->list);
spin_unlock_irq(&cm.lock);
@@ -3596,8 +3562,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
goto out; /* No match. */
}
- atomic_inc(&cur_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cur_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 3d16d614aff6..92d7260ac913 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -1,37 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING the madirectory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use source and binary forms, with or
- * withmodification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retathe above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(CM_MSGS_H)
+#ifndef CM_MSGS_H
#define CM_MSGS_H
#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d78f67623f24..25f2b70fd8ef 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1,36 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
- * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
+ * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/completion.h>
@@ -2531,7 +2504,9 @@ EXPORT_SYMBOL(rdma_set_service_type);
* This function should be called before rdma_connect() on active side,
* and on passive side before rdma_accept(). It is applicable to primary
* path only. The timeout will affect the local side of the QP, it is not
- * negotiated with remote side and zero disables the timer.
+ * negotiated with remote side and zero disables the timer. In case it is
+ * set before rdma_resolve_route, the value will also be used to determine
+ * PacketLifeTime for RoCE.
*
* Return: 0 for success
*/
@@ -2828,22 +2803,65 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
return 0;
}
-static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
{
- int prio;
struct net_device *dev;
- prio = rt_tos2priority(tos);
- dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ dev = vlan_dev_real_dev(vlan_ndev);
if (dev->num_tc)
return netdev_get_prio_tc_map(dev, prio);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+struct iboe_prio_tc_map {
+ int input_prio;
+ int output_tc;
+ bool found;
+};
+
+static int get_lower_vlan_dev_tc(struct net_device *dev, void *data)
+{
+ struct iboe_prio_tc_map *map = data;
+
+ if (is_vlan_dev(dev))
+ map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
+ else if (dev->num_tc)
+ map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
+ else
+ map->output_tc = 0;
+ /* We are interested only in first level VLAN device, so always
+ * return 1 to stop iterating over next level devices.
+ */
+ map->found = true;
+ return 1;
+}
+
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+ struct iboe_prio_tc_map prio_tc_map = {};
+ int prio = rt_tos2priority(tos);
+
+ /* If VLAN device, get it directly from the VLAN netdev */
if (is_vlan_dev(ndev))
- return (vlan_dev_get_egress_qos_mask(ndev, prio) &
- VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-#endif
- return 0;
+ return get_vlan_ndev_tc(ndev, prio);
+
+ prio_tc_map.input_prio = prio;
+ rcu_read_lock();
+ netdev_walk_all_lower_dev_rcu(ndev,
+ get_lower_vlan_dev_tc,
+ &prio_tc_map);
+ rcu_read_unlock();
+ /* If map is found from lower device, use it; Otherwise
+ * continue with the current netdevice to get priority to tc map.
+ */
+ if (prio_tc_map.found)
+ return prio_tc_map.output_tc;
+ else if (ndev->num_tc)
+ return netdev_get_prio_tc_map(ndev, prio);
+ else
+ return 0;
}
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
@@ -2897,7 +2915,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->rate = iboe_get_rate(ndev);
dev_put(ndev);
route->path_rec->packet_life_time_selector = IB_SA_EQ;
- route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ /* In case ACK timeout is set, use this value to calculate
+ * PacketLifeTime. As per IBTA 12.7.34,
+ * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
+ * Assuming a negligible local ACK delay, we can use
+ * PacketLifeTime = local ACK timeout/2
+ * as a reasonable approximation for RoCE networks.
+ */
+ route->path_rec->packet_life_time = id_priv->timeout_set ?
+ id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
+
if (!route->path_rec->mtu) {
ret = -EINVAL;
goto err2;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 9d07378b5b42..3645e092e1c7 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -388,4 +388,15 @@ int ib_device_set_netns_put(struct sk_buff *skb,
int rdma_nl_net_init(struct rdma_dev_net *rnet);
void rdma_nl_net_exit(struct rdma_dev_net *rnet);
+
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+ struct rdma_user_mmap_entry *entry;
+};
+
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 680ad27f497d..8434ec082c3a 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,11 +149,18 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
- if (!rdma_is_visible_in_pid_ns(&qp->res))
- return false;
-
- /* Ensure that counter belongs to the right PID */
- if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
+ /*
+ * Ensure that counter belongs to the right PID. This operation can
+ * race with user space which kills the process and leaves QP and
+ * counters orphans.
+ *
+ * It is not a big deal because exitted task will leave both QP and
+ * counter in the same bucket of zombie process. Just ensure that
+ * process is still alive before procedding.
+ *
+ */
+ if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task) ||
+ !task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -229,9 +236,6 @@ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
rt = &dev->res[RDMA_RESTRACK_COUNTER];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
counter = container_of(res, struct rdma_counter, res);
if ((counter->device != qp->device) || (counter->port != port))
goto next;
@@ -412,9 +416,6 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res))
- goto err;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
goto err;
@@ -445,11 +446,6 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res)) {
- rdma_restrack_put(res);
- return NULL;
- }
-
counter = container_of(res, struct rdma_counter, res);
kref_get(&counter->kref);
rdma_restrack_put(res);
@@ -463,10 +459,15 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
u32 qp_num, u32 counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
+ port_counter = &dev->port_data[port].port_counter;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
@@ -503,6 +504,7 @@ err:
int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
u32 qp_num, u32 *counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
@@ -510,9 +512,13 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
- if (!dev->port_data[port].port_counter.hstats)
+ port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
return -EOPNOTSUPP;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 50a92442c4f7..9ebb09a75049 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -128,17 +128,14 @@ module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
MODULE_PARM_DESC(netns_mode,
"Share device among net namespaces; default=1 (shared)");
/**
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
+ * rdma_dev_access_netns() - Return whether an rdma device can be accessed
* from a specified net namespace or not.
- * @device: Pointer to rdma device which needs to be checked
+ * @dev: Pointer to rdma device which needs to be checked
* @net: Pointer to net namesapce for which access to be checked
*
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
- * from a specified net namespace or not. When
- * rdma device is in shared mode, it ignores the
- * net namespace. When rdma device is exclusive
- * to a net namespace, rdma device net namespace is
- * checked against the specified one.
+ * When the rdma device is in shared mode, it ignores the net namespace.
+ * When the rdma device is exclusive to a net namespace, rdma device net
+ * namespace is checked against the specified one.
*/
bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
{
@@ -1199,9 +1196,21 @@ static void setup_dma_device(struct ib_device *device)
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
- /* Setup default max segment size for all IB devices */
- dma_set_max_seg_size(device->dma_device, SZ_2G);
+ if (!device->dev.dma_parms) {
+ if (parent) {
+ /*
+ * The caller did not provide DMA parameters, so
+ * 'parent' probably represents a PCI device. The PCI
+ * core sets the maximum segment size to 64
+ * KB. Increase this parameter to 2 GB.
+ */
+ device->dev.dma_parms = parent->dma_parms;
+ dma_set_max_seg_size(device->dma_device, SZ_2G);
+ } else {
+ WARN_ON_ONCE(true);
+ }
+ }
}
/*
@@ -1317,7 +1326,9 @@ out:
/**
* ib_register_device - Register an IB device with IB core
- * @device:Device to register
+ * @device: Device to register
+ * @name: unique string device name. This may include a '%' which will
+ * cause a unique index to be added to the passed device name.
*
* Low-level drivers use ib_register_device() to register their
* devices with the IB core. All registered clients will receive a
@@ -1444,7 +1455,7 @@ out:
/**
* ib_unregister_device - Unregister an IB device
- * @device: The device to unregister
+ * @ib_dev: The device to unregister
*
* Unregister an IB device. All clients will receive a remove callback.
*
@@ -1466,7 +1477,7 @@ EXPORT_SYMBOL(ib_unregister_device);
/**
* ib_unregister_device_and_put - Unregister a device while holding a 'get'
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This is the same as ib_unregister_device(), except it includes an internal
* ib_device_put() that should match a 'get' obtained by the caller.
@@ -1536,7 +1547,7 @@ static void ib_unregister_work(struct work_struct *work)
/**
* ib_unregister_device_queued - Unregister a device using a work queue
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This schedules an asynchronous unregistration using a WQ for the device. A
* driver should use this to avoid holding locks while doing unregistration,
@@ -2366,7 +2377,7 @@ int ib_modify_device(struct ib_device *device,
struct ib_device_modify *device_modify)
{
if (!device->ops.modify_device)
- return -ENOSYS;
+ return -EOPNOTSUPP;
return device->ops.modify_device(device, device_modify_mask,
device_modify);
@@ -2397,8 +2408,12 @@ int ib_modify_port(struct ib_device *device,
rc = device->ops.modify_port(device, port_num,
port_modify_mask,
port_modify);
+ else if (rdma_protocol_roce(device, port_num) &&
+ ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
+ (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
+ rc = 0;
else
- rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
+ rc = -EOPNOTSUPP;
return rc;
}
EXPORT_SYMBOL(ib_modify_port);
@@ -2607,6 +2622,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, drain_sq);
SET_DEVICE_OP(dev_ops, enable_driver);
SET_DEVICE_OP(dev_ops, fill_res_entry);
+ SET_DEVICE_OP(dev_ops, fill_stat_entry);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
@@ -2615,6 +2631,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
+ SET_DEVICE_OP(dev_ops, get_vf_guid);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, init_port);
SET_DEVICE_OP(dev_ops, invalidate_range);
@@ -2630,6 +2647,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
+ SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
new file mode 100644
index 000000000000..f509c478b469
--- /dev/null
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2019 Marvell. All rights reserved.
+ */
+#include <linux/xarray.h>
+#include "uverbs.h"
+#include "core_priv.h"
+
+/**
+ * rdma_umap_priv_init() - Initialize the private data of a vma
+ *
+ * @priv: The already allocated private data
+ * @vma: The vm area struct that needs private data
+ * @entry: entry into the mmap_xa that needs to be linked with
+ * this vma
+ *
+ * Each time we map IO memory into user space this keeps track of the
+ * mapping. When the device is hot-unplugged we 'zap' the mmaps in user space
+ * to point to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ *
+ */
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ if (entry) {
+ kref_get(&entry->ref);
+ priv->entry = entry;
+ }
+ vma->vm_private_data = priv;
+ /* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+EXPORT_SYMBOL(rdma_umap_priv_init);
+
+/**
+ * rdma_user_mmap_io() - Map IO memory into a process
+ *
+ * @ucontext: associated user context
+ * @vma: the vma related to the current mmap call
+ * @pfn: pfn to map
+ * @size: size to map
+ * @prot: pgprot to use in remap call
+ * @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL
+ * if mmap_entry is not used by the driver
+ *
+ * This is to be called by drivers as part of their mmap() functions if they
+ * wish to send something like PCI-E BAR memory to userspace.
+ *
+ * Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on
+ * success.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return -EINVAL;
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return -EINVAL;
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma, entry);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/**
+ * rdma_user_mmap_entry_get_pgoff() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @pgoff: The mmap offset >> PAGE_SHIFT
+ *
+ * This function is called when a user tries to mmap with an offset (returned
+ * by rdma_user_mmap_get_offset()) it initially received from the driver. The
+ * rdma_user_mmap_entry was created by the function
+ * rdma_user_mmap_entry_insert(). This function increases the refcnt of the
+ * entry so that it won't be deleted from the xarray in the meantime.
+ *
+ * Return an reference to an entry if exists or NULL if there is no
+ * match. rdma_user_mmap_entry_put() must be called to put the reference.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (pgoff > U32_MAX)
+ return NULL;
+
+ xa_lock(&ucontext->mmap_xa);
+
+ entry = xa_load(&ucontext->mmap_xa, pgoff);
+
+ /*
+ * If refcount is zero, entry is already being deleted, driver_removed
+ * indicates that the no further mmaps are possible and we waiting for
+ * the active VMAs to be closed.
+ */
+ if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
+ !kref_get_unless_zero(&entry->ref))
+ goto err;
+
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
+ pgoff, entry->npages);
+
+ return entry;
+
+err:
+ xa_unlock(&ucontext->mmap_xa);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
+
+/**
+ * rdma_user_mmap_entry_get() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @vma: the vma being mmap'd into
+ *
+ * This function is like rdma_user_mmap_entry_get_pgoff() except that it also
+ * checks that the VMA is correct.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return NULL;
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
+ if (!entry)
+ return NULL;
+ if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
+ rdma_user_mmap_entry_put(entry);
+ return NULL;
+ }
+ return entry;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get);
+
+static void rdma_user_mmap_entry_free(struct kref *kref)
+{
+ struct rdma_user_mmap_entry *entry =
+ container_of(kref, struct rdma_user_mmap_entry, ref);
+ struct ib_ucontext *ucontext = entry->ucontext;
+ unsigned long i;
+
+ /*
+ * Erase all entries occupied by this single entry, this is deferred
+ * until all VMA are closed so that the mmap offsets remain unique.
+ */
+ xa_lock(&ucontext->mmap_xa);
+ for (i = 0; i < entry->npages; i++)
+ __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
+ entry->start_pgoff, entry->npages);
+
+ if (ucontext->device->ops.mmap_free)
+ ucontext->device->ops.mmap_free(entry);
+}
+
+/**
+ * rdma_user_mmap_entry_put() - Drop reference to the mmap entry
+ *
+ * @entry: an entry in the mmap_xa
+ *
+ * This function is called when the mapping is closed if it was
+ * an io mapping or when the driver is done with the entry for
+ * some other reason.
+ * Should be called after rdma_user_mmap_entry_get was called
+ * and entry is no longer needed. This function will erase the
+ * entry and free it if its refcnt reaches zero.
+ */
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
+{
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_put);
+
+/**
+ * rdma_user_mmap_entry_remove() - Drop reference to entry and
+ * mark it as unmmapable
+ *
+ * @entry: the entry to insert into the mmap_xa
+ *
+ * Drivers can call this to prevent userspace from creating more mappings for
+ * entry, however existing mmaps continue to exist and ops->mmap_free() will
+ * not be called until all user mmaps are destroyed.
+ */
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->driver_removed = true;
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
+
+/**
+ * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for implementing their mmap syscall A database of mmap offsets is
+ * handled in the core and helper functions are provided to insert entries
+ * into the database and extract entries when the user calls mmap with the
+ * given offset. The function allocates a unique page offset that should be
+ * provided to user, the user will use the offset to retrieve information such
+ * as address to be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ XA_STATE(xas, &ucontext->mmap_xa, 0);
+ u32 xa_first, xa_last, npages;
+ int err;
+ u32 i;
+
+ if (!entry)
+ return -EINVAL;
+
+ kref_init(&entry->ref);
+ entry->ucontext = ucontext;
+
+ /*
+ * We want the whole allocation to be done without interruption from a
+ * different thread. The allocation requires finding a free range and
+ * storing. During the xa_insert the lock could be released, possibly
+ * allowing another thread to choose the same range.
+ */
+ mutex_lock(&ufile->umap_lock);
+
+ xa_lock(&ucontext->mmap_xa);
+
+ /* We want to find an empty range */
+ npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
+ entry->npages = npages;
+ while (true) {
+ /* First find an empty index */
+ xas_find_marked(&xas, U32_MAX, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ goto err_unlock;
+
+ xa_first = xas.xa_index;
+
+ /* Is there enough room to have the range? */
+ if (check_add_overflow(xa_first, npages, &xa_last))
+ goto err_unlock;
+
+ /*
+ * Now look for the next present entry. If an entry doesn't
+ * exist, we found an empty range and can proceed.
+ */
+ xas_next_entry(&xas, xa_last - 1);
+ if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
+ break;
+ }
+
+ for (i = xa_first; i < xa_last; i++) {
+ err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
+ if (err)
+ goto err_undo;
+ }
+
+ /*
+ * Internally the kernel uses a page offset, in libc this is a byte
+ * offset. Drivers should not return pgoff to userspace.
+ */
+ entry->start_pgoff = xa_first;
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
+ entry->start_pgoff, npages);
+
+ return 0;
+
+err_undo:
+ for (; i > xa_first; i--)
+ __xa_erase(&ucontext->mmap_xa, i - 1);
+
+err_unlock:
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 7e2bcc72f66c..1bf87d9fd0bd 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -210,8 +210,10 @@ int iwpm_mapinfo_available(void);
/**
* iwpm_compare_sockaddr - Compare two sockaddr storage structs
+ * @a_sockaddr: first sockaddr to compare
+ * @b_sockaddr: second sockaddr to compare
*
- * Returns 0 if they are holding the same ip/tcp address info,
+ * Return: 0 if they are holding the same ip/tcp address info,
* otherwise returns 1
*/
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
@@ -272,6 +274,7 @@ void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);
* iwpm_send_hello - Send hello response to iwpmd
*
* @nl_client: The index of the netlink client
+ * @iwpm_pid: The pid of the user space port mapper
* @abi_version: The kernel's abi_version
*
* Returns 0 on success or a negative error code
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9947d16edef2..c54db13fa9b0 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -913,9 +913,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
- (const struct ib_mad_hdr *)smp, mad_size,
- (struct ib_mad_hdr *)mad_priv->mad,
- &mad_size, &out_mad_pkey_index);
+ (const struct ib_mad *)smp,
+ (struct ib_mad *)mad_priv->mad, &mad_size,
+ &out_mad_pkey_index);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -1397,25 +1397,6 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
}
EXPORT_SYMBOL(ib_free_recv_mad);
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- return ERR_PTR(-EINVAL); /* XXX: for now */
-}
-EXPORT_SYMBOL(ib_redirect_mad_qp);
-
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc)
-{
- dev_err(&mad_agent->device->dev,
- "ib_process_mad_wc() not implemented yet\n");
- return 0;
-}
-EXPORT_SYMBOL(ib_process_mad_wc);
-
static int method_in_use(struct ib_mad_mgmt_method_table **method,
struct ib_mad_reg_req *mad_reg_req)
{
@@ -2340,9 +2321,9 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (port_priv->device->ops.process_mad) {
ret = port_priv->device->ops.process_mad(
port_priv->device, 0, port_priv->port_num, wc,
- &recv->grh, (const struct ib_mad_hdr *)recv->mad,
- recv->mad_size, (struct ib_mad_hdr *)response->mad,
- &mad_size, &resp_mad_pkey_index);
+ &recv->grh, (const struct ib_mad *)recv->mad,
+ (struct ib_mad *)response->mad, &mad_size,
+ &resp_mad_pkey_index);
if (opa)
wc->pkey_index = resp_mad_pkey_index;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index c03af08b80e7..cbf6041a5d4a 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -42,6 +42,9 @@
#include "cma_priv.h"
#include "restrack.h"
+typedef int (*res_fill_func_t)(struct sk_buff*, bool,
+ struct rdma_restrack_entry*, uint32_t);
+
/*
* Sort array elements by the netlink attribute name
*/
@@ -180,6 +183,19 @@ static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
return 0;
}
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str)
+{
+ if (put_driver_name_print_type(msg, name,
+ RDMA_NLDEV_PRINT_TYPE_UNSPEC))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
+ return -EMSGSIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_string);
+
int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
{
return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
@@ -399,20 +415,34 @@ err:
static int fill_res_name_pid(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
+ int err = 0;
+
/*
* For user resources, user is should read /proc/PID/comm to get the
* name of the task file.
*/
if (rdma_is_kernel_res(res)) {
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
- res->kern_name))
- return -EMSGSIZE;
+ err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
+ res->kern_name);
} else {
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
- task_pid_vnr(res->task)))
- return -EMSGSIZE;
+ pid_t pid;
+
+ pid = task_pid_vnr(res->task);
+ /*
+ * Task is dead and in zombie state.
+ * There is no need to print PID anymore.
+ */
+ if (pid)
+ /*
+ * This part is racy, task can be killed and PID will
+ * be zero right here but it is ok, next query won't
+ * return PID. We don't promise real-time reflection
+ * of SW objects.
+ */
+ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
}
- return 0;
+
+ return err ? -EMSGSIZE : 0;
}
static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
@@ -423,6 +453,14 @@ static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
return dev->ops.fill_res_entry(msg, res);
}
+static bool fill_stat_entry(struct ib_device *dev, struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (!dev->ops.fill_stat_entry)
+ return false;
+ return dev->ops.fill_stat_entry(msg, res);
+}
+
static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
@@ -698,9 +736,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
rt = &counter->device->res[RDMA_RESTRACK_QP];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
continue;
@@ -723,8 +758,8 @@ err:
return ret;
}
-static int fill_stat_hwcounter_entry(struct sk_buff *msg,
- const char *name, u64 value)
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value)
{
struct nlattr *entry_attr;
@@ -746,6 +781,25 @@ err:
nla_nest_cancel(msg, entry_attr);
return -EMSGSIZE;
}
+EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
+
+static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct ib_device *dev = mr->pd->device;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
+ goto err;
+
+ if (fill_stat_entry(dev, msg, res))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
static int fill_stat_counter_hwcounters(struct sk_buff *msg,
struct rdma_counter *counter)
@@ -759,7 +813,7 @@ static int fill_stat_counter_hwcounters(struct sk_buff *msg,
return -EMSGSIZE;
for (i = 0; i < st->num_counters; i++)
- if (fill_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
+ if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
goto err;
nla_nest_end(msg, table_attr);
@@ -1117,8 +1171,6 @@ static int nldev_res_get_dumpit(struct sk_buff *skb,
}
struct nldev_fill_res_entry {
- int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
- struct rdma_restrack_entry *res, u32 port);
enum rdma_nldev_attr nldev_attr;
enum rdma_nldev_command nldev_cmd;
u8 flags;
@@ -1132,21 +1184,18 @@ enum nldev_res_flags {
static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_QP] = {
- .fill_res_func = fill_res_qp_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_LQPN,
},
[RDMA_RESTRACK_CM_ID] = {
- .fill_res_func = fill_res_cm_id_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
},
[RDMA_RESTRACK_CQ] = {
- .fill_res_func = fill_res_cq_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
.flags = NLDEV_PER_DEV,
@@ -1154,7 +1203,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_CQN,
},
[RDMA_RESTRACK_MR] = {
- .fill_res_func = fill_res_mr_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
.flags = NLDEV_PER_DEV,
@@ -1162,7 +1210,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_MRN,
},
[RDMA_RESTRACK_PD] = {
- .fill_res_func = fill_res_pd_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
.flags = NLDEV_PER_DEV,
@@ -1170,7 +1217,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_PDN,
},
[RDMA_RESTRACK_COUNTER] = {
- .fill_res_func = fill_res_counter_entry,
.nldev_cmd = RDMA_NLDEV_CMD_STAT_GET,
.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
@@ -1180,7 +1226,8 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1222,11 +1269,6 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err;
}
- if (!rdma_is_visible_in_pid_ns(res)) {
- ret = -ENOENT;
- goto err_get;
- }
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
@@ -1243,7 +1285,9 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
}
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
- ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
+
+ ret = fill_func(msg, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret)
goto err_free;
@@ -1263,7 +1307,8 @@ err:
static int res_get_common_dumpit(struct sk_buff *skb,
struct netlink_callback *cb,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1334,9 +1379,6 @@ static int res_get_common_dumpit(struct sk_buff *skb,
* objects.
*/
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
if (idx < start || !rdma_restrack_get(res))
goto next;
@@ -1351,7 +1393,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto msg_full;
}
- ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
+ ret = fill_func(skb, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret) {
@@ -1394,17 +1437,19 @@ err_index:
return ret;
}
-#define RES_GET_FUNCS(name, type) \
- static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
+#define RES_GET_FUNCS(name, type) \
+ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
struct netlink_callback *cb) \
- { \
- return res_get_common_dumpit(skb, cb, type); \
- } \
- static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
- struct nlmsghdr *nlh, \
+ { \
+ return res_get_common_dumpit(skb, cb, type, \
+ fill_res_##name##_entry); \
+ } \
+ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
+ struct nlmsghdr *nlh, \
struct netlink_ext_ack *extack) \
- { \
- return res_get_common_doit(skb, nlh, extack, type); \
+ { \
+ return res_get_common_doit(skb, nlh, extack, type, \
+ fill_res_##name##_entry); \
}
RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
@@ -1880,7 +1925,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
for (i = 0; i < num_cnts; i++) {
v = stats->value[i] +
rdma_counter_get_hwstat_value(device, port, i);
- if (fill_stat_hwcounter_entry(msg, stats->names[i], v)) {
+ if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
ret = -EMSGSIZE;
goto err_table;
}
@@ -1989,7 +2034,10 @@ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
case RDMA_NLDEV_ATTR_RES_QP:
ret = stat_get_doit_qp(skb, nlh, extack, tb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
@@ -2013,7 +2061,10 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
case RDMA_NLDEV_ATTR_RES_QP:
ret = nldev_res_get_counter_dumpit(skb, cb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index ccf4d069c25c..6c72773faf29 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -817,6 +817,7 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
rdma_restrack_del(&ucontext->res);
ib_dev->ops.dealloc_ucontext(ucontext);
+ WARN_ON(!xa_empty(&ucontext->mmap_xa));
kfree(ucontext);
ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index a07665f7ef8c..62fbb0ae9cb4 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -116,11 +116,8 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
u32 cnt = 0;
xa_lock(&rt->xa);
- xas_for_each(&xas, e, U32_MAX) {
- if (!rdma_is_visible_in_pid_ns(e))
- continue;
+ xas_for_each(&xas, e, U32_MAX)
cnt++;
- }
xa_unlock(&rt->xa);
return cnt;
}
@@ -346,18 +343,3 @@ out:
}
}
EXPORT_SYMBOL(rdma_restrack_del);
-
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
-{
- /*
- * 1. Kern resources should be visible in init
- * namespace only
- * 2. Present only resources visible in the current
- * namespace
- */
- if (rdma_is_kernel_res(res))
- return task_active_pid_ns(current) == &init_pid_ns;
-
- /* PID 0 means that resource is not found in current namespace */
- return task_pid_vnr(res->task);
-}
diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
index 7bd177cc0a61..d084e5f89849 100644
--- a/drivers/infiniband/core/restrack.h
+++ b/drivers/infiniband/core/restrack.h
@@ -27,5 +27,4 @@ int rdma_restrack_init(struct ib_device *dev);
void rdma_restrack_clean(struct ib_device *dev);
void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
struct task_struct *task);
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res);
#endif /* _RDMA_CORE_RESTRACK_H_ */
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5337393d4dfe..4fad732f9b3c 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -20,14 +20,17 @@ module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
/*
- * Check if the device might use memory registration. This is currently only
- * true for iWarp devices. In the future we can hopefully fine tune this based
- * on HCA driver input.
+ * Report whether memory registration should be used. Memory registration must
+ * be used for iWarp devices because of iWARP-specific limitations. Memory
+ * registration is also enabled if registering memory might yield better
+ * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
*/
static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
{
if (rdma_protocol_iwarp(dev, port_num))
return true;
+ if (dev->attrs.max_sgl_rd)
+ return true;
if (unlikely(rdma_rw_force_mr))
return true;
return false;
@@ -35,17 +38,19 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
/*
* Check if the device will use memory registration for this RW operation.
- * We currently always use memory registrations for iWarp RDMA READs, and
- * have a debug option to force usage of MRs.
- *
- * XXX: In the future we can hopefully fine tune this based on HCA driver
- * input.
+ * For RDMA READs we must use MRs on iWarp and can optionally use them as an
+ * optimization otherwise. Additionally we have a debug option to force usage
+ * of MRs to help testing this code path.
*/
static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
enum dma_data_direction dir, int dma_nents)
{
- if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
- return true;
+ if (dir == DMA_FROM_DEVICE) {
+ if (rdma_protocol_iwarp(dev, port_num))
+ return true;
+ if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
+ return true;
+ }
if (unlikely(rdma_rw_force_mr))
return true;
return false;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 17fc2936c077..8917125ea16d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1246,7 +1246,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
* @port_num: Port on the specified device.
* @rec: path record entry to use for ah attributes initialization.
* @ah_attr: address handle attributes to initialization from path record.
- * @sgid_attr: SGID attribute to consider during initialization.
+ * @gid_attr: SGID attribute to consider during initialization.
*
* When ib_init_ah_attr_from_path() returns success,
* (a) for IB link layer it optionally contains a reference to SGID attribute
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7a50cedcef1f..087682e6969e 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -481,8 +481,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (!dev->ops.process_mad)
return -ENOSYS;
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kzalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
@@ -497,10 +497,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (attr != IB_PMA_CLASS_PORT_INFO)
in_mad->data[41] = port_num; /* PortSelect field */
- if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY,
- port_num, NULL, NULL,
- (const struct ib_mad_hdr *)in_mad, mad_size,
- (struct ib_mad_hdr *)out_mad, &mad_size,
+ if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL,
+ in_mad, out_mad, &mad_size,
&out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
@@ -1268,7 +1266,7 @@ static ssize_t node_desc_store(struct device *device,
int ret;
if (!dev->ops.modify_device)
- return -EIO;
+ return -EOPNOTSUPP;
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 24244a2f68cc..7a3b99597ead 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -185,10 +185,9 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
- * @dmasync: flush in-flight DMA when the memory region is written
*/
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access, int dmasync)
+ size_t size, int access)
{
struct ib_ucontext *context;
struct ib_umem *umem;
@@ -199,7 +198,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct mm_struct *mm;
unsigned long npages;
int ret;
- unsigned long dma_attrs = 0;
struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
@@ -211,9 +209,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
if (!context)
return ERR_PTR(-EIO);
- if (dmasync)
- dma_attrs |= DMA_ATTR_WRITE_BARRIER;
-
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
@@ -294,11 +289,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg_attrs(context->device,
+ umem->nmap = ib_dma_map_sg(context->device,
umem->sg_head.sgl,
umem->sg_nents,
- DMA_BIDIRECTIONAL,
- dma_attrs);
+ DMA_BIDIRECTIONAL);
if (!umem->nmap) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 163ff7ba92b7..d7d5fadf0899 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -508,7 +508,6 @@ static int ib_umem_odp_map_dma_single_page(
{
struct ib_device *dev = umem_odp->umem.ibdev;
dma_addr_t dma_addr;
- int remove_existing_mapping = 0;
int ret = 0;
/*
@@ -534,28 +533,29 @@ static int ib_umem_odp_map_dma_single_page(
} else if (umem_odp->page_list[page_index] == page) {
umem_odp->dma_list[page_index] |= access_mask;
} else {
- pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem_odp->page_list[page_index], page);
- /* Better remove the mapping now, to prevent any further
- * damage. */
- remove_existing_mapping = 1;
+ /*
+ * This is a race here where we could have done:
+ *
+ * CPU0 CPU1
+ * get_user_pages()
+ * invalidate()
+ * page_fault()
+ * mutex_lock(umem_mutex)
+ * page from GUP != page in ODP
+ *
+ * It should be prevented by the retry test above as reading
+ * the seq number should be reliable under the
+ * umem_mutex. Thus something is really not working right if
+ * things get here.
+ */
+ WARN(true,
+ "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
+ umem_odp->page_list[page_index], page);
+ ret = -EAGAIN;
}
out:
put_user_page(page);
-
- if (remove_existing_mapping) {
- ib_umem_notifier_start_account(umem_odp);
- dev->ops.invalidate_range(
- umem_odp,
- ib_umem_start(umem_odp) +
- (page_index << umem_odp->page_shift),
- ib_umem_start(umem_odp) +
- ((page_index + 1) << umem_odp->page_shift));
- ib_umem_notifier_end_account(umem_odp);
- ret = -EAGAIN;
- }
-
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 14a80fd9f464..06ed32c8662f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -252,6 +252,8 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
ucontext->closing = false;
ucontext->cleanup_retryable = false;
+ xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
+
ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0)
goto err_free;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 61758201d9b2..269938f59d3f 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -795,6 +795,9 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
if (size < attr->ptr_attr.len) {
if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
attr->ptr_attr.len - size))
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index db98111b47f4..9aa7ffc1d12a 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -772,6 +772,8 @@ out_unlock:
return (ret) ? : count;
}
+static const struct vm_operations_struct rdma_umap_ops;
+
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ib_uverbs_file *file = filp->private_data;
@@ -785,7 +787,7 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
ret = PTR_ERR(ucontext);
goto out;
}
-
+ vma->vm_ops = &rdma_umap_ops;
ret = ucontext->device->ops.mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
@@ -793,38 +795,6 @@ out:
}
/*
- * Each time we map IO memory into user space this keeps track of the mapping.
- * When the device is hot-unplugged we 'zap' the mmaps in user space to point
- * to the zero page and allow the hot unplug to proceed.
- *
- * This is necessary for cases like PCI physical hot unplug as the actual BAR
- * memory may vanish after this and access to it from userspace could MCE.
- *
- * RDMA drivers supporting disassociation must have their user space designed
- * to cope in some way with their IO pages going to the zero page.
- */
-struct rdma_umap_priv {
- struct vm_area_struct *vma;
- struct list_head list;
-};
-
-static const struct vm_operations_struct rdma_umap_ops;
-
-static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
- struct vm_area_struct *vma)
-{
- struct ib_uverbs_file *ufile = vma->vm_file->private_data;
-
- priv->vma = vma;
- vma->vm_private_data = priv;
- vma->vm_ops = &rdma_umap_ops;
-
- mutex_lock(&ufile->umap_lock);
- list_add(&priv->list, &ufile->umaps);
- mutex_unlock(&ufile->umap_lock);
-}
-
-/*
* The VMA has been dup'd, initialize the vm_private_data with a new tracking
* struct
*/
@@ -849,7 +819,7 @@ static void rdma_umap_open(struct vm_area_struct *vma)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto out_unlock;
- rdma_umap_priv_init(priv, vma);
+ rdma_umap_priv_init(priv, vma, opriv->entry);
up_read(&ufile->hw_destroy_rwsem);
return;
@@ -880,6 +850,9 @@ static void rdma_umap_close(struct vm_area_struct *vma)
* this point.
*/
mutex_lock(&ufile->umap_lock);
+ if (priv->entry)
+ rdma_user_mmap_entry_put(priv->entry);
+
list_del(&priv->list);
mutex_unlock(&ufile->umap_lock);
kfree(priv);
@@ -931,44 +904,6 @@ static const struct vm_operations_struct rdma_umap_ops = {
.fault = rdma_umap_fault,
};
-/*
- * Map IO memory into a process. This is to be called by drivers as part of
- * their mmap() functions if they wish to send something like PCI-E BAR memory
- * to userspace.
- */
-int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- struct ib_uverbs_file *ufile = ucontext->ufile;
- struct rdma_umap_priv *priv;
-
- if (!(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- if (vma->vm_end - vma->vm_start != size)
- return -EINVAL;
-
- /* Driver is using this wrong, must be called by ib_uverbs_mmap */
- if (WARN_ON(!vma->vm_file ||
- vma->vm_file->private_data != ufile))
- return -EINVAL;
- lockdep_assert_held(&ufile->device->disassociate_srcu);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- vma->vm_page_prot = prot;
- if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
- kfree(priv);
- return -EAGAIN;
- }
-
- rdma_umap_priv_init(priv, vma);
- return 0;
-}
-EXPORT_SYMBOL(rdma_user_mmap_io);
-
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
{
struct rdma_umap_priv *priv, *next_priv;
@@ -1018,6 +953,11 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
+
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 35c2841a569e..dd765e176cdd 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -244,6 +244,8 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
/**
* ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
+ * @flags: protection domain flags
+ * @caller: caller's build-time module name
*
* A protection domain object provides an association between QPs, shared
* receive queues, address handles, memory regions, and memory windows.
@@ -2459,6 +2461,16 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
}
EXPORT_SYMBOL(ib_set_vf_guid);
+int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ if (!device->ops.get_vf_guid)
+ return -EOPNOTSUPP;
+
+ return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
+}
+EXPORT_SYMBOL(ib_get_vf_guid);
/**
* ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
* information) and set an appropriate memory region for registration.
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 433fca59febd..0aeccd984889 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
-obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index ab8779d23382..b83f1cc38c52 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_BNXT_RE
- tristate "Broadcom Netxtreme HCA support"
- depends on 64BIT
- depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- select NET_VENDOR_BROADCOM
- select BNXT
- ---help---
+ tristate "Broadcom Netxtreme HCA support"
+ depends on 64BIT
+ depends on ETHERNET && NETDEVICES && PCI && INET && DCB
+ select NET_VENDOR_BROADCOM
+ select BNXT
+ ---help---
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
the module will be called bnxt_re.
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index e55a1666c0cd..725b2350e349 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -108,6 +108,7 @@ struct bnxt_re_sqp_entries {
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
+#define BNXT_RE_GEN_P5_MAX_VF 64
struct bnxt_re_dev {
struct ib_device ibdev;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index b4149dc9e824..9b6ca15a183c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -191,24 +191,6 @@ int bnxt_re_query_device(struct ib_device *ibdev,
return 0;
}
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- switch (device_modify_mask) {
- case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
- /* Modify the GUID requires the modification of the GID table */
- /* GUID should be made as READ-ONLY */
- break;
- case IB_DEVICE_MODIFY_NODE_DESC:
- /* Node Desc should be made as READ-ONLY */
- break;
- default:
- break;
- }
- return 0;
-}
-
/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr)
@@ -855,7 +837,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -869,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qprva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
@@ -1322,7 +1304,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2565,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->umem = ib_umem_get(udata, req.cq_va,
entries * sizeof(struct cq_base),
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem);
goto fail;
@@ -3530,7 +3512,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 31662b1ee35a..23d972da5652 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -145,9 +145,6 @@ struct bnxt_re_ucontext {
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 30a54f8aa42c..e7e8a0f49464 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -119,61 +119,76 @@ static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
* reserved for the function. The driver may choose to allocate fewer
* resources than the firmware maximum.
*/
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
{
- u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
- u32 i;
- u32 vf_pct;
- u32 num_vfs;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_ctx *ctx;
+ int i;
- rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
- dev_attr->max_qp);
+ attr = &rdev->dev_attr;
+ ctx = &rdev->qplib_ctx;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ attr->max_qp);
+ ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
/* Use max_mr from fw since max_mrw does not get set */
- rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
- dev_attr->max_mr);
- rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
- dev_attr->max_srq);
- rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
- dev_attr->max_cq);
-
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-
- if (rdev->num_vfs) {
- /*
- * Reserve a set of resources for the PF. Divide the remaining
- * resources among the VFs
- */
- vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
- num_vfs = 100 * rdev->num_vfs;
- vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
- vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
- vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
- /*
- * The driver allows many more MRs than other resources. If the
- * firmware does also, then reserve a fixed amount for the PF
- * and divide the rest among VFs. VFs may use many MRs for NFS
- * mounts, ISER, NVME applications, etc. If the firmware
- * severely restricts the number of MRs, then let PF have
- * half and divide the rest among VFs, as for the other
- * resource types.
- */
- if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
- vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
- else
- vf_mrws = (rdev->qplib_ctx.mrw_count -
- BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
- vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
+ ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ attr->max_srq);
+ ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+}
+
+static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+{
+ struct bnxt_qplib_vf_res *vf_res;
+ u32 mrws = 0;
+ u32 vf_pct;
+ u32 nvfs;
+
+ vf_res = &qplib_ctx->vf_res;
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ nvfs = num_vf;
+ num_vf = 100 * num_vf;
+ vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
+ vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
+ vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF and
+ * divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware severely
+ * restricts the number of MRs, then let PF have half and divide
+ * the rest among VFs, as for the other resource types.
+ */
+ if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
+ mrws = qplib_ctx->mrw_count * vf_pct;
+ nvfs = num_vf;
+ } else {
+ mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
}
- rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
- rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
- rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
- rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
- rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+ vf_res->max_mrw_per_vf = (mrws / nvfs);
+ vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
+}
+
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 num_vfs;
+
+ memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+ num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
+ if (num_vfs)
+ bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
}
/* for handling bnxt_en callbacks later */
@@ -193,9 +208,11 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
return;
rdev->num_vfs = num_vfs;
- bnxt_re_set_resource_limits(rdev);
- bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) {
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+ }
}
static void bnxt_re_shutdown(void *p)
@@ -477,6 +494,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@@ -625,7 +643,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap,
.modify_ah = bnxt_re_modify_ah,
- .modify_device = bnxt_re_modify_device,
.modify_qp = bnxt_re_modify_qp,
.modify_srq = bnxt_re_modify_srq,
.poll_cq = bnxt_re_poll_cq,
@@ -895,10 +912,14 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
return 0;
}
+#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
+#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
- 0x10000 : rdev->msix_entries[indx].db_offset;
+ (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
+ BNXT_RE_GEN_P5_PF_NQ_DB) :
+ rdev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -1270,10 +1291,10 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
return;
}
rdev->qplib_ctx.hwrm_intf_ver =
- (u64)resp.hwrm_intf_major << 48 |
- (u64)resp.hwrm_intf_minor << 32 |
- (u64)resp.hwrm_intf_build << 16 |
- resp.hwrm_intf_patch;
+ (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
+ (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
+ (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
+ le16_to_cpu(resp.hwrm_intf_patch);
}
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
@@ -1408,8 +1429,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- if (!rdev->is_virtfn)
- bnxt_re_set_resource_limits(rdev);
+
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 60c8f76aab33..5cdfa84faf85 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -494,8 +494,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
* shall setup this area for VF. Skipping the
* HW programming
*/
- if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ if (is_virtfn)
goto skip_ctx_setup;
+ if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ goto config_vf_res;
level = ctx->qpc_tbl.level;
req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
@@ -540,6 +542,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
+config_vf_res:
req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index fbda11a7ab1a..aaa76d792185 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -186,7 +186,9 @@ struct bnxt_qplib_chip_ctx {
u8 chip_metal;
};
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
struct bnxt_qplib_res {
struct pci_dev *pdev;
@@ -203,7 +205,9 @@ struct bnxt_qplib_res {
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
{
- return (cctx->chip_num == CHIP_NUM_57500);
+ return (cctx->chip_num == CHIP_NUM_57508 ||
+ cctx->chip_num == CHIP_NUM_57504 ||
+ cctx->chip_num == CHIP_NUM_57502);
}
static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
deleted file mode 100644
index 8c1a72bff447..000000000000
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_CXGB3
- tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3
- select GENERIC_ALLOCATOR
- ---help---
- This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
- 10GbE adapters.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module, choose M here: the module
- will be called iw_cxgb3.
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
deleted file mode 100644
index 34bb86a6ae3a..000000000000
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb3
-
-obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
-
-iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
- iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
deleted file mode 100644
index 95b22a651673..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <asm/delay.h>
-
-#include <linux/mutex.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-#include "sge_defs.h"
-
-static LIST_HEAD(rdev_list);
-static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (!strcmp(rdev->dev_name, dev_name))
- return rdev;
- return NULL;
-}
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (rdev->t3cdev_p == tdev)
- return rdev;
- return NULL;
-}
-
-int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit)
-{
- int ret;
- struct t3_cqe *cqe;
- u32 rptr;
-
- struct rdma_cq_op setup;
- setup.id = cq->cqid;
- setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
- setup.op = op;
- ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
-
- if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
- return ret;
-
- /*
- * If the rearm returned an index other than our current index,
- * then there might be CQE's in flight (being DMA'd). We must wait
- * here for them to complete or the consumer can miss a notification.
- */
- if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
- int i=0;
-
- rptr = cq->rptr;
-
- /*
- * Keep the generation correct by bumping rptr until it
- * matches the index returned by the rearm - 1.
- */
- while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
- rptr++;
-
- /*
- * Now rptr is the index for the (last) cqe that was
- * in-flight at the time the HW rearmed the CQ. We
- * spin until that CQE is valid.
- */
- cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
- while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
- udelay(1);
- if (i++ > 1000000) {
- pr_err("%s: stalled rnic\n", rdev_p->dev_name);
- return -EIO;
- }
- }
-
- return 1;
- }
-
- return 0;
-}
-
-static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
-{
- struct rdma_cq_setup setup;
- setup.id = cqid;
- setup.base_addr = 0; /* NULL address */
- setup.size = 0; /* disaable the CQ */
- setup.credits = 0;
- setup.credit_thres = 0;
- setup.ovfl_mode = 0;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
-{
- u64 sge_cmd;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
- T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
- T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = qpid << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
-{
- struct rdma_cq_setup setup;
- int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
-
- size += 1; /* one extra page for storing cq-in-err state */
- cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
- if (!cq->cqid)
- return -ENOMEM;
- if (kernel) {
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- }
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
- &(cq->dma_addr), GFP_KERNEL);
- if (!cq->queue) {
- kfree(cq->sw_queue);
- return -ENOMEM;
- }
- dma_unmap_addr_set(cq, mapping, cq->dma_addr);
- setup.id = cq->cqid;
- setup.base_addr = (u64) (cq->dma_addr);
- setup.size = 1UL << cq->size_log2;
- setup.credits = 65535;
- setup.credit_thres = 1;
- if (rdev_p->t3cdev_p->type != T3A)
- setup.ovfl_mode = 0;
- else
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
- u32 qpid;
- int i;
-
- mutex_lock(&uctx->lock);
- if (!list_empty(&uctx->qpids)) {
- entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
- entry);
- list_del(&entry->entry);
- qpid = entry->qpid;
- kfree(entry);
- } else {
- qpid = cxio_hal_get_qpid(rdev_p->rscp);
- if (!qpid)
- goto out;
- for (i = qpid+1; i & rdev_p->qpmask; i++) {
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- break;
- entry->qpid = i;
- list_add_tail(&entry->entry, &uctx->qpids);
- }
- }
-out:
- mutex_unlock(&uctx->lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
- struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- entry->qpid = qpid;
- mutex_lock(&uctx->lock);
- list_add_tail(&entry->entry, &uctx->qpids);
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct list_head *pos, *nxt;
- struct cxio_qpid_list *entry;
-
- mutex_lock(&uctx->lock);
- list_for_each_safe(pos, nxt, &uctx->qpids) {
- entry = list_entry(pos, struct cxio_qpid_list, entry);
- list_del_init(&entry->entry);
- if (!(entry->qpid & rdev_p->qpmask))
- cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
- kfree(entry);
- }
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- INIT_LIST_HEAD(&uctx->qpids);
- mutex_init(&uctx->lock);
-}
-
-int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
- struct t3_wq *wq, struct cxio_ucontext *uctx)
-{
- int depth = 1UL << wq->size_log2;
- int rqsize = 1UL << wq->rq_size_log2;
-
- wq->qpid = get_qpid(rdev_p, uctx);
- if (!wq->qpid)
- return -ENOMEM;
-
- wq->rq = kcalloc(depth, sizeof(struct t3_swrq), GFP_KERNEL);
- if (!wq->rq)
- goto err1;
-
- wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
- if (!wq->rq_addr)
- goto err2;
-
- wq->sq = kcalloc(depth, sizeof(struct t3_swsq), GFP_KERNEL);
- if (!wq->sq)
- goto err3;
-
- wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
- if (!wq->queue)
- goto err4;
-
- dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
- if (!kernel_domain)
- wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
- (wq->qpid << rdev_p->qpshift);
- wq->rdev = rdev_p;
- pr_debug("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n",
- __func__, wq->qpid, wq->doorbell, (unsigned long long)wq->udb);
- return 0;
-err4:
- kfree(wq->sq);
-err3:
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
-err2:
- kfree(wq->rq);
-err1:
- put_qpid(rdev_p, wq->qpid, uctx);
- return -ENOMEM;
-}
-
-void cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
-{
- cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
- kfree(cq->sw_queue);
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2))
- * sizeof(struct t3_cqe) + 1, cq->queue,
- dma_unmap_addr(cq, mapping));
- cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
-}
-
-int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
- struct cxio_ucontext *uctx)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (wq->size_log2))
- * sizeof(union t3_wr), wq->queue,
- dma_unmap_addr(wq, mapping));
- kfree(wq->sq);
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
- kfree(wq->rq);
- put_qpid(rdev_p, wq->qpid, uctx);
- return 0;
-}
-
-static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(T3_SEND) |
- V_CQE_TYPE(0) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- u32 ptr;
- int flushed = 0;
-
- pr_debug("%s wq %p cq %p\n", __func__, wq, cq);
-
- /* flush RQ */
- pr_debug("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
- wq->rq_rptr, wq->rq_wptr, count);
- ptr = wq->rq_rptr + count;
- while (ptr++ != wq->rq_wptr) {
- insert_recv_cqe(wq, cq);
- flushed++;
- }
- return flushed;
-}
-
-static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
- struct t3_swsq *sqp)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(sqp->opcode) |
- V_CQE_TYPE(1) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- cqe.u.scqe.wrid_hi = sqp->sq_wptr;
-
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- __u32 ptr = wq->sq_rptr + count;
- int flushed = 0;
- struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
-
- while (ptr != wq->sq_wptr) {
- sqp->signaled = 0;
- insert_sq_cqe(wq, cq, sqp);
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- flushed++;
- }
- return flushed;
-}
-
-/*
- * Move all CQEs from the HWCQ into the SWCQ.
- */
-void cxio_flush_hw_cq(struct t3_cq *cq)
-{
- struct t3_cqe *cqe, *swcqe;
-
- pr_debug("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
- cqe = cxio_next_hw_cqe(cq);
- while (cqe) {
- pr_debug("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
- __func__, cq->rptr, cq->sw_wptr);
- swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
- *swcqe = *cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
- cq->sw_wptr++;
- cq->rptr++;
- cqe = cxio_next_hw_cqe(cq);
- }
-}
-
-static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
-{
- if (CQE_OPCODE(*cqe) == T3_TERMINATE)
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
- return 0;
-
- if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
- return 0;
-
- return 1;
-}
-
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if ((SQ_TYPE(*cqe) ||
- ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
- (CQE_QPID(*cqe) == wq->qpid))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- pr_debug("%s count zero %d\n", __func__, *count);
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
- (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
-{
- struct rdma_cq_setup setup;
- setup.id = 0;
- setup.base_addr = 0; /* NULL address */
- setup.size = 1; /* enable the CQ */
- setup.credits = 0;
-
- /* force SGE to redirect to RspQ and interrupt */
- setup.credit_thres = 0;
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- int err;
- u64 sge_cmd, ctx0, ctx1;
- u64 base_addr;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb;
-
- skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- err = cxio_hal_init_ctrl_cq(rdev_p);
- if (err) {
- pr_debug("%s err %d initializing ctrl_cq\n", __func__, err);
- goto err;
- }
- rdev_p->ctrl_qp.workq = dma_alloc_coherent(
- &(rdev_p->rnic_info.pdev->dev),
- (1 << T3_CTRL_QP_SIZE_LOG2) *
- sizeof(union t3_wr),
- &(rdev_p->ctrl_qp.dma_addr),
- GFP_KERNEL);
- if (!rdev_p->ctrl_qp.workq) {
- pr_debug("%s dma_alloc_coherent failed\n", __func__);
- err = -ENOMEM;
- goto err;
- }
- dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
- rdev_p->ctrl_qp.dma_addr);
- rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
-
- mutex_init(&rdev_p->ctrl_qp.lock);
- init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
-
- /* update HW Ctrl QP context */
- base_addr = rdev_p->ctrl_qp.dma_addr;
- base_addr >>= 12;
- ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
- V_EC_BASE_LO((u32) base_addr & 0xffff));
- ctx0 <<= 32;
- ctx0 |= V_EC_CREDITS(FW_WR_NUM);
- base_addr >>= 16;
- ctx1 = (u32) base_addr;
- base_addr >>= 32;
- ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
- V_EC_TYPE(0) | V_EC_GEN(1) |
- V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
- T3_CTL_QP_TID, 7, T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- wqe->ctx1 = cpu_to_be64(ctx1);
- wqe->ctx0 = cpu_to_be64(ctx0);
- pr_debug("CtrlQP dma_addr %pad workq %p size %d\n",
- &rdev_p->ctrl_qp.dma_addr, rdev_p->ctrl_qp.workq,
- 1 << T3_CTRL_QP_SIZE_LOG2);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-err:
- kfree_skb(skb);
- return err;
-}
-
-static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << T3_CTRL_QP_SIZE_LOG2)
- * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
- return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
-}
-
-/* write len bytes of data into addr (32B aligned address)
- * If data is NULL, clear len byte of memory to zero.
- * caller acquires the ctrl_qp lock before the call
- */
-static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
- u32 len, void *data)
-{
- u32 i, nr_wqe, copy_len;
- u8 *copy_data;
- u8 wr_len, utx_len; /* length in 8 byte flit */
- enum t3_wr_flags flag;
- __be64 *wqe;
- u64 utx_cmd;
- addr &= 0x7FFFFFF;
- nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
- pr_debug("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
- __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
- nr_wqe, data, addr);
- utx_len = 3; /* in 32B unit */
- for (i = 0; i < nr_wqe; i++) {
- if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2)) {
- pr_debug("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, wait for more space i %d\n",
- __func__,
- rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- !Q_FULL(rdev_p->ctrl_qp.rptr,
- rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2))) {
- pr_debug("%s ctrl_qp workq interrupted\n",
- __func__);
- return -ERESTARTSYS;
- }
- pr_debug("%s ctrl_qp wakeup, continue posting work request i %d\n",
- __func__, i);
- }
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
- flag = 0;
- if (i == (nr_wqe - 1)) {
- /* last WQE */
- flag = T3_COMPLETION_FLAG;
- if (len % 32)
- utx_len = len / 32 + 1;
- else
- utx_len = len / 32;
- }
-
- /*
- * Force a CQE to return the credit to the workq in case
- * we posted more than half the max QP size of WRs
- */
- if ((i != 0) &&
- (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
- flag = T3_COMPLETION_FLAG;
- pr_debug("%s force completion at i %d\n", __func__, i);
- }
-
- /* build the utx mem command */
- wqe += (sizeof(struct t3_bypass_wr) >> 3);
- utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
- utx_cmd <<= 32;
- utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
- *wqe = cpu_to_be64(utx_cmd);
- wqe++;
- copy_data = (u8 *) data + i * 96;
- copy_len = len > 96 ? 96 : len;
-
- /* clear memory content if data is NULL */
- if (data)
- memcpy(wqe, copy_data, copy_len);
- else
- memset(wqe, 0, copy_len);
- if (copy_len % 32)
- memset(((u8 *) wqe) + copy_len, 0,
- 32 - (copy_len % 32));
- wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
- (utx_len << 2);
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
-
- /* wptr in the WRID[31:0] */
- ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
-
- /*
- * This must be the last write with a memory barrier
- * for the genbit
- */
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
- Q_GENBIT(rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
- wr_len, T3_SOPEOP);
- if (flag == T3_COMPLETION_FLAG)
- ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
- len -= 96;
- rdev_p->ctrl_qp.wptr++;
- }
- return 0;
-}
-
-/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
- * OUT: stag index
- * TBD: shared memory region support
- */
-static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
- u32 *stag, u8 stag_state, u32 pdid,
- enum tpt_mem_type type, enum tpt_mem_perm perm,
- u32 zbva, u64 to, u32 len, u8 page_size,
- u32 pbl_size, u32 pbl_addr)
-{
- int err;
- struct tpt_entry tpt;
- u32 stag_idx;
- u32 wptr;
-
- if (cxio_fatal_error(rdev_p))
- return -EIO;
-
- stag_state = stag_state > 0;
- stag_idx = (*stag) >> 8;
-
- if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
- stag_idx = cxio_hal_get_stag(rdev_p->rscp);
- if (!stag_idx)
- return -ENOMEM;
- *stag = (stag_idx << 8) | ((*stag) & 0xFF);
- }
- pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
-
- /* write TPT entry */
- if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
- else {
- tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
- V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
- V_TPT_STAG_STATE(stag_state) |
- V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
- BUG_ON(page_size >= 28);
- tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
- ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
- V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
- V_TPT_PAGE_SIZE(page_size));
- tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
- tpt.len = cpu_to_be32(len);
- tpt.va_hi = cpu_to_be32((u32) (to >> 32));
- tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
- tpt.rsvd_bind_cnt_or_pstag = 0;
- tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
- }
- err = cxio_hal_ctrl_qp_write_mem(rdev_p,
- stag_idx +
- (rdev_p->rnic_info.tpt_base >> 5),
- sizeof(tpt), &tpt);
-
- /* release the stag index to free pool */
- if (reset_tpt_entry)
- cxio_hal_put_stag(rdev_p->rscp, stag_idx);
-
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (!err)
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
- return err;
-}
-
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
-{
- u32 wptr;
- int err;
-
- pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
- pbl_size);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
- err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
- pbl);
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (err)
- return err;
-
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
- u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- pbl_size, pbl_addr);
-}
-
-int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
- 0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
-}
-
-int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
-{
- struct t3_rdma_init_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
- pr_debug("%s rdev_p %p\n", __func__, rdev_p);
- wqe = __skb_put(skb, sizeof(*wqe));
- wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
- wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
- V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
- wqe->wrid.id1 = 0;
- wqe->qpid = cpu_to_be32(attr->qpid);
- wqe->pdid = cpu_to_be32(attr->pdid);
- wqe->scqid = cpu_to_be32(attr->scqid);
- wqe->rcqid = cpu_to_be32(attr->rcqid);
- wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
- wqe->rq_size = cpu_to_be32(attr->rq_size);
- wqe->mpaattrs = attr->mpaattrs;
- wqe->qpcaps = attr->qpcaps;
- wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
- wqe->rqe_count = cpu_to_be16(attr->rqe_count);
- wqe->flags_rtr_type = cpu_to_be16(attr->flags |
- V_RTR_TYPE(attr->rtr_type) |
- V_CHAN(attr->chan));
- wqe->ord = cpu_to_be32(attr->ord);
- wqe->ird = cpu_to_be32(attr->ird);
- wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
- wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
- wqe->irs = cpu_to_be32(attr->irs);
- skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = ev_cb;
-}
-
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = NULL;
-}
-
-static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
-{
- static int cnt;
- struct cxio_rdev *rdev_p = NULL;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- pr_debug("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x se %0x notify %0x cqbranch %0x creditth %0x\n",
- cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
- RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
- RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
- RSPQ_CREDIT_THRESH(rsp_msg));
- pr_debug("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
- rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
- if (!rdev_p) {
- pr_debug("%s called by t3cdev %p with null ulp\n", __func__,
- t3cdev_p);
- return 0;
- }
- if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
- rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
- wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
- dev_kfree_skb_irq(skb);
- } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
- dev_kfree_skb_irq(skb);
- else if (cxio_ev_cb)
- (*cxio_ev_cb) (rdev_p, skb);
- else
- dev_kfree_skb_irq(skb);
- cnt++;
- return 0;
-}
-
-/* Caller takes care of locking if needed */
-int cxio_rdev_open(struct cxio_rdev *rdev_p)
-{
- struct net_device *netdev_p = NULL;
- int err = 0;
- if (strlen(rdev_p->dev_name)) {
- if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
- return -EBUSY;
- }
- netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
- if (!netdev_p) {
- return -EINVAL;
- }
- dev_put(netdev_p);
- } else if (rdev_p->t3cdev_p) {
- if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
- return -EBUSY;
- }
- netdev_p = rdev_p->t3cdev_p->lldev;
- strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
- T3_MAX_DEV_NAME_LEN);
- } else {
- pr_debug("%s t3cdev_p or dev_name must be set\n", __func__);
- return -EINVAL;
- }
-
- list_add_tail(&rdev_p->entry, &rdev_list);
-
- pr_debug("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
- memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
- if (!rdev_p->t3cdev_p)
- rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
- rdev_p->t3cdev_p->ulp = (void *) rdev_p;
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
- &(rdev_p->fw_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
- pr_err("fatal firmware version mismatch: need version %u but adapter has version %u\n",
- CXIO_FW_MAJ,
- G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
- err = -EINVAL;
- goto err1;
- }
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
- &(rdev_p->rnic_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
- &(rdev_p->port_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
-
- /*
- * qpshift is the number of bits to shift the qpid left in order
- * to get the correct address of the doorbell for that qp.
- */
- cxio_init_ucontext(rdev_p, &rdev_p->uctx);
- rdev_p->qpshift = PAGE_SHIFT -
- ilog2(65536 >>
- ilog2(rdev_p->rnic_info.udbell_len >>
- PAGE_SHIFT));
- rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
- rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
- pr_debug("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
- __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
- rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
- rdev_p->rnic_info.pbl_base,
- rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
- rdev_p->rnic_info.rqt_top);
- pr_debug("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu qpnr %d qpmask 0x%x\n",
- rdev_p->rnic_info.udbell_len,
- rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
- rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
-
- err = cxio_hal_init_ctrl_qp(rdev_p);
- if (err) {
- pr_err("%s error %d initializing ctrl_qp\n", __func__, err);
- goto err1;
- }
- err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
- 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
- T3_MAX_NUM_PD);
- if (err) {
- pr_err("%s error %d initializing hal resources\n",
- __func__, err);
- goto err2;
- }
- err = cxio_hal_pblpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing pbl mem pool\n",
- __func__, err);
- goto err3;
- }
- err = cxio_hal_rqtpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing rqt mem pool\n",
- __func__, err);
- goto err4;
- }
- return 0;
-err4:
- cxio_hal_pblpool_destroy(rdev_p);
-err3:
- cxio_hal_destroy_resource(rdev_p->rscp);
-err2:
- cxio_hal_destroy_ctrl_qp(rdev_p);
-err1:
- rdev_p->t3cdev_p->ulp = NULL;
- list_del(&rdev_p->entry);
- return err;
-}
-
-void cxio_rdev_close(struct cxio_rdev *rdev_p)
-{
- if (rdev_p) {
- cxio_hal_pblpool_destroy(rdev_p);
- cxio_hal_rqtpool_destroy(rdev_p);
- list_del(&rdev_p->entry);
- cxio_hal_destroy_ctrl_qp(rdev_p);
- cxio_hal_destroy_resource(rdev_p->rscp);
- rdev_p->t3cdev_p->ulp = NULL;
- }
-}
-
-int __init cxio_hal_init(void)
-{
- if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
- return -ENOMEM;
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
- return 0;
-}
-
-void __exit cxio_hal_exit(void)
-{
- struct cxio_rdev *rdev, *tmp;
-
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
- list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
- cxio_rdev_close(rdev);
- cxio_hal_destroy_rhdl_resource();
-}
-
-static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_swsq *sqp;
- __u32 ptr = wq->sq_rptr;
- int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
-
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- while (count--)
- if (!sqp->signaled) {
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- } else if (sqp->complete) {
-
- /*
- * Insert this completed cqe into the swcq.
- */
- pr_debug("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
- __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
- Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
- sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
- = sqp->cqe;
- cq->sw_wptr++;
- sqp->signaled = 0;
- break;
- } else
- break;
-}
-
-static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
- struct t3_cqe *read_cqe)
-{
- read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
- read_cqe->len = wq->oldest_read->read_len;
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
- V_CQE_OPCODE(T3_READ_REQ) |
- V_CQE_TYPE(1));
-}
-
-/*
- * Return a ptr to the next read wr in the SWSQ or NULL.
- */
-static void advance_oldest_read(struct t3_wq *wq)
-{
-
- u32 rptr = wq->oldest_read - wq->sq + 1;
- u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
-
- while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
- wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
-
- if (wq->oldest_read->opcode == T3_READ_REQ)
- return;
- rptr++;
- }
- wq->oldest_read = NULL;
-}
-
-/*
- * cxio_poll_cq
- *
- * Caller must:
- * check the validity of the first CQE,
- * supply the wq assicated with the qpid.
- *
- * credit: cq credit to return to sge.
- * cqe_flushed: 1 iff the CQE is flushed.
- * cqe: copy of the polled CQE.
- *
- * return value:
- * 0 CQE returned,
- * -1 CQE skipped, try again.
- */
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
-{
- int ret = 0;
- struct t3_cqe *hw_cqe, read_cqe;
-
- *cqe_flushed = 0;
- *credit = 0;
- hw_cqe = cxio_next_cqe(cq);
-
- pr_debug("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
- CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
- CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
- CQE_WRID_LOW(*hw_cqe));
-
- /*
- * skip cqe's not affiliated with a QP.
- */
- if (wq == NULL) {
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Gotta tweak READ completions:
- * 1) the cqe doesn't contain the sq_wptr from the wr.
- * 2) opcode not reflected from the wr.
- * 3) read_len not reflected from the wr.
- * 4) cq_type is RQ_TYPE not SQ_TYPE.
- */
- if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
-
- /*
- * If this is an unsolicited read response, then the read
- * was generated by the kernel driver as part of peer-2-peer
- * connection setup. So ignore the completion.
- */
- if (!wq->oldest_read) {
- if (CQE_STATUS(*hw_cqe))
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Don't write to the HWCQ, so create a new read req CQE
- * in local memory.
- */
- create_read_req_cqe(wq, hw_cqe, &read_cqe);
- hw_cqe = &read_cqe;
- advance_oldest_read(wq);
- }
-
- /*
- * T3A: Discard TERMINATE CQEs.
- */
- if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
- ret = -1;
- wq->error = 1;
- goto skip_cqe;
- }
-
- if (CQE_STATUS(*hw_cqe) || wq->error) {
- *cqe_flushed = wq->error;
- wq->error = 1;
-
- /*
- * T3A inserts errors into the CQE. We cannot return
- * these as work completions.
- */
- /* incoming write failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
- && RQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
- /* incoming read request failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
-
- /* incoming SEND with no receive posted failures */
- if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- ret = -1;
- goto skip_cqe;
- }
- BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
- goto proc_cqe;
- }
-
- /*
- * RECV completion.
- */
- if (RQ_TYPE(*hw_cqe)) {
-
- /*
- * HW only validates 4 bits of MSN. So we must validate that
- * the MSN in the SEND is the next expected MSN. If its not,
- * then we complete this with TPT_ERR_MSN and mark the wq in
- * error.
- */
-
- if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
- wq->error = 1;
- hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
- goto proc_cqe;
- }
- goto proc_cqe;
- }
-
- /*
- * If we get here its a send completion.
- *
- * Handle out of order completion. These get stuffed
- * in the SW SQ. Then the SW SQ is walked to move any
- * now in-order completions into the SW CQ. This handles
- * 2 cases:
- * 1) reaping unsignaled WRs when the first subsequent
- * signaled WR is completed.
- * 2) out of order read completions.
- */
- if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
- struct t3_swsq *sqp;
-
- pr_debug("%s out of order completion going in swsq at idx %ld\n",
- __func__,
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe),
- wq->sq_size_log2));
- sqp = wq->sq +
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
- sqp->cqe = *hw_cqe;
- sqp->complete = 1;
- ret = -1;
- goto flush_wq;
- }
-
-proc_cqe:
- *cqe = *hw_cqe;
-
- /*
- * Reap the associated WR(s) that are freed up with this
- * completion.
- */
- if (SQ_TYPE(*hw_cqe)) {
- wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
- pr_debug("%s completing sq idx %ld\n", __func__,
- Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
- *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
- wq->sq_rptr++;
- } else {
- pr_debug("%s completing rq idx %ld\n", __func__,
- Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
- *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
- if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
- cxio_hal_pblpool_free(wq->rdev,
- wq->rq[Q_PTR2IDX(wq->rq_rptr,
- wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
- BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
- wq->rq_rptr++;
- }
-
-flush_wq:
- /*
- * Flush any completed cqes that are now in-order.
- */
- flush_completed_wrs(wq, cq);
-
-skip_cqe:
- if (SW_CQE(*hw_cqe)) {
- pr_debug("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->sw_rptr);
- ++cq->sw_rptr;
- } else {
- pr_debug("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->rptr);
- ++cq->rptr;
-
- /*
- * T3A: compute credits.
- */
- if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
- || ((cq->rptr - cq->wptr) >= 128)) {
- *credit = cq->rptr - cq->wptr;
- cq->wptr = cq->rptr;
- }
- }
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
deleted file mode 100644
index 40c029ffa425..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_HAL_H__
-#define __CXIO_HAL_H__
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/kfifo.h>
-
-#include "t3_cpl.h"
-#include "t3cdev.h"
-#include "cxgb3_ctl_defs.h"
-#include "cxio_wr.h"
-
-#define T3_CTRL_QP_ID FW_RI_SGEEC_START
-#define T3_CTL_QP_TID FW_RI_TID_START
-#define T3_CTRL_QP_SIZE_LOG2 8
-#define T3_CTRL_CQ_ID 0
-
-#define T3_MAX_NUM_RI (1<<15)
-#define T3_MAX_NUM_QP (1<<15)
-#define T3_MAX_NUM_CQ (1<<15)
-#define T3_MAX_NUM_PD (1<<15)
-#define T3_MAX_PBL_SIZE 256
-#define T3_MAX_RQ_SIZE 1024
-#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 65536
-#define T3_MAX_NUM_STAG (1<<15)
-#define T3_MAX_MR_SIZE 0x100000000ULL
-#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
-
-#define T3_STAG_UNSET 0xffffffff
-
-#define T3_MAX_DEV_NAME_LEN 32
-
-#define CXIO_FW_MAJ 7
-
-struct cxio_hal_ctrl_qp {
- u32 wptr;
- u32 rptr;
- struct mutex lock; /* for the wtpr, can sleep */
- wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
- union t3_wr *workq; /* the work request queue */
- dma_addr_t dma_addr; /* pci bus address of the workq */
- DEFINE_DMA_UNMAP_ADDR(mapping);
- void __iomem *doorbell;
-};
-
-struct cxio_hal_resource {
- struct kfifo tpt_fifo;
- spinlock_t tpt_fifo_lock;
- struct kfifo qpid_fifo;
- spinlock_t qpid_fifo_lock;
- struct kfifo cqid_fifo;
- spinlock_t cqid_fifo_lock;
- struct kfifo pdid_fifo;
- spinlock_t pdid_fifo_lock;
-};
-
-struct cxio_qpid_list {
- struct list_head entry;
- u32 qpid;
-};
-
-struct cxio_ucontext {
- struct list_head qpids;
- struct mutex lock;
-};
-
-struct cxio_rdev {
- char dev_name[T3_MAX_DEV_NAME_LEN];
- struct t3cdev *t3cdev_p;
- struct rdma_info rnic_info;
- struct adap_ports port_info;
- struct cxio_hal_resource *rscp;
- struct cxio_hal_ctrl_qp ctrl_qp;
- void *ulp;
- unsigned long qpshift;
- u32 qpnr;
- u32 qpmask;
- struct cxio_ucontext uctx;
- struct gen_pool *pbl_pool;
- struct gen_pool *rqt_pool;
- struct list_head entry;
- struct ch_embedded_info fw_info;
- u32 flags;
-#define CXIO_ERROR_FATAL 1
-};
-
-static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
-{
- return rdev_p->flags & CXIO_ERROR_FATAL;
-}
-
-static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
-{
- return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
-}
-
-typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
- struct sk_buff * skb);
-
-#define RSPQ_CQID(rsp) (be32_to_cpu(rsp->cq_ptrid) & 0xffff)
-#define RSPQ_CQPTR(rsp) ((be32_to_cpu(rsp->cq_ptrid) >> 16) & 0xffff)
-#define RSPQ_GENBIT(rsp) ((be32_to_cpu(rsp->flags) >> 16) & 1)
-#define RSPQ_OVERFLOW(rsp) ((be32_to_cpu(rsp->flags) >> 17) & 1)
-#define RSPQ_AN(rsp) ((be32_to_cpu(rsp->flags) >> 18) & 1)
-#define RSPQ_SE(rsp) ((be32_to_cpu(rsp->flags) >> 19) & 1)
-#define RSPQ_NOTIFY(rsp) ((be32_to_cpu(rsp->flags) >> 20) & 1)
-#define RSPQ_CQBRANCH(rsp) ((be32_to_cpu(rsp->flags) >> 21) & 1)
-#define RSPQ_CREDIT_THRESH(rsp) ((be32_to_cpu(rsp->flags) >> 22) & 1)
-
-struct respQ_msg_t {
- __be32 flags; /* flit 0 */
- __be32 cq_ptrid;
- __be64 rsvd; /* flit 1 */
- struct t3_cqe cqe; /* flits 2-3 */
-};
-
-enum t3_cq_opcode {
- CQ_ARM_AN = 0x2,
- CQ_ARM_SE = 0x6,
- CQ_FORCE_AN = 0x3,
- CQ_CREDIT_UPDATE = 0x7
-};
-
-int cxio_rdev_open(struct cxio_rdev *rdev);
-void cxio_rdev_close(struct cxio_rdev *rdev);
-int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
-void cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
-void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size);
-int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr);
-int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
-int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
-int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
-int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
-int __init cxio_hal_init(void);
-void __exit cxio_hal_exit(void);
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_flush_hw_cq(struct t3_cq *cq);
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit);
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
deleted file mode 100644
index c6e7bc4420b6..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* Crude resource management */
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-
-static struct kfifo rhdl_fifo;
-static spinlock_t rhdl_fifo_lock;
-
-#define RANDOM_SIZE 16
-
-static int __cxio_init_resource_fifo(struct kfifo *fifo,
- spinlock_t *fifo_lock,
- u32 nr, u32 skip_low,
- u32 skip_high,
- int random)
-{
- u32 i, j, entry = 0, idx;
- u32 random_bytes;
- u32 rarray[16];
- spin_lock_init(fifo_lock);
-
- if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 0; i < skip_low + skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
- if (random) {
- j = 0;
- random_bytes = prandom_u32();
- for (i = 0; i < RANDOM_SIZE; i++)
- rarray[i] = i + skip_low;
- for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
- if (j >= RANDOM_SIZE) {
- j = 0;
- random_bytes = prandom_u32();
- }
- idx = (random_bytes >> (j * 2)) & 0xF;
- kfifo_in(fifo,
- (unsigned char *) &rarray[idx],
- sizeof(u32));
- rarray[idx] = i;
- j++;
- }
- for (i = 0; i < RANDOM_SIZE; i++)
- kfifo_in(fifo,
- (unsigned char *) &rarray[i],
- sizeof(u32));
- } else
- for (i = skip_low; i < nr - skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
-
- for (i = 0; i < skip_low + skip_high; i++)
- if (kfifo_out_locked(fifo, (unsigned char *) &entry,
- sizeof(u32), fifo_lock) != sizeof(u32))
- break;
- return 0;
-}
-
-static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 0));
-}
-
-static int cxio_init_resource_fifo_random(struct kfifo *fifo,
- spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
-
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 1));
-}
-
-static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
-{
- u32 i;
-
- spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
-
- if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
- GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 16; i < T3_MAX_NUM_QP; i++)
- if (!(i & rdev_p->qpmask))
- kfifo_in(&rdev_p->rscp->qpid_fifo,
- (unsigned char *) &i, sizeof(u32));
- return 0;
-}
-
-int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
-{
- return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
- 0);
-}
-
-void cxio_hal_destroy_rhdl_resource(void)
-{
- kfifo_free(&rhdl_fifo);
-}
-
-/* nr_* must be power of 2 */
-int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
-{
- int err = 0;
- struct cxio_hal_resource *rscp;
-
- rscp = kmalloc(sizeof(*rscp), GFP_KERNEL);
- if (!rscp)
- return -ENOMEM;
- rdev_p->rscp = rscp;
- err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
- &rscp->tpt_fifo_lock,
- nr_tpt, 1, 0);
- if (err)
- goto tpt_err;
- err = cxio_init_qpid_fifo(rdev_p);
- if (err)
- goto qpid_err;
- err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
- nr_cqid, 1, 0);
- if (err)
- goto cqid_err;
- err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
- nr_pdid, 1, 0);
- if (err)
- goto pdid_err;
- return 0;
-pdid_err:
- kfifo_free(&rscp->cqid_fifo);
-cqid_err:
- kfifo_free(&rscp->qpid_fifo);
-qpid_err:
- kfifo_free(&rscp->tpt_fifo);
-tpt_err:
- return -ENOMEM;
-}
-
-/*
- * returns 0 if no resource available
- */
-static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
-{
- u32 entry;
- if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
- return entry;
- else
- return 0; /* fifo emptry */
-}
-
-static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
- u32 entry)
-{
- BUG_ON(
- kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
- == 0);
-}
-
-u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
-}
-
-void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
-{
- cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
-}
-
-u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
-{
- u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
- &rscp->qpid_fifo_lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
-{
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
-}
-
-u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
-}
-
-void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
-{
- cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
-}
-
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
-}
-
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
-{
- cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
-}
-
-void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
-{
- kfifo_free(&rscp->tpt_fifo);
- kfifo_free(&rscp->cqid_fifo);
- kfifo_free(&rscp->qpid_fifo);
- kfifo_free(&rscp->pdid_fifo);
- kfree(rscp);
-}
-
-/*
- * PBL Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
-
-u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
- return (u32)addr;
-}
-
-void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
- gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
-}
-
-int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned pbl_start, pbl_chunk;
-
- rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
- if (!rdev_p->pbl_pool)
- return -ENOMEM;
-
- pbl_start = rdev_p->rnic_info.pbl_base;
- pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
-
- while (pbl_start < rdev_p->rnic_info.pbl_top) {
- pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
- pbl_chunk);
- if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
- pr_debug("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
- pr_warn("%s: Failed to add all PBL chunks (%x/%x)\n",
- __func__, pbl_start,
- rdev_p->rnic_info.pbl_top - pbl_start);
- return 0;
- }
- pbl_chunk >>= 1;
- } else {
- pr_debug("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- pbl_start += pbl_chunk;
- }
- }
-
- return 0;
-}
-
-void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->pbl_pool);
-}
-
-/*
- * RQT Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
-#define RQT_CHUNK 2*1024*1024
-
-u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
- return (u32)addr;
-}
-
-void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
- gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
-}
-
-int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned long i;
- rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
- if (rdev_p->rqt_pool)
- for (i = rdev_p->rnic_info.rqt_base;
- i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
- i += RQT_CHUNK)
- gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
- return rdev_p->rqt_pool ? 0 : -ENOMEM;
-}
-
-void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->rqt_pool);
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.h b/drivers/infiniband/hw/cxgb3/cxio_resource.h
deleted file mode 100644
index a2703a3d882d..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_RESOURCE_H__
-#define __CXIO_RESOURCE_H__
-
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/genalloc.h>
-#include "cxio_hal.h"
-
-extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
-extern void cxio_hal_destroy_rhdl_resource(void);
-extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
- u32 nr_pdid);
-extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
-extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
-extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
-extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
-
-#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
-extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-
-#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
-extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
deleted file mode 100644
index 53aa5c36247a..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_WR_H__
-#define __CXIO_WR_H__
-
-#include <asm/io.h>
-#include <linux/pci.h>
-#include <linux/timer.h>
-#include "firmware_exports.h"
-
-#define T3_MAX_SGE 4
-#define T3_MAX_INLINE 64
-#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
-#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
-#define T3_STAG0_PAGE_SHIFT 15
-
-#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
-#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
- ((rptr)!=(wptr)) )
-#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
-#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
-#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
-#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
-
-static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
-{
- writel(((1<<31) | qpid), doorbell);
-}
-
-#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
-
-enum t3_wr_flags {
- T3_COMPLETION_FLAG = 0x01,
- T3_NOTIFY_FLAG = 0x02,
- T3_SOLICITED_EVENT_FLAG = 0x04,
- T3_READ_FENCE_FLAG = 0x08,
- T3_LOCAL_FENCE_FLAG = 0x10
-} __packed;
-
-enum t3_wr_opcode {
- T3_WR_BP = FW_WROPCODE_RI_BYPASS,
- T3_WR_SEND = FW_WROPCODE_RI_SEND,
- T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
- T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
- T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
- T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
- T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
- T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
- T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
- T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
-} __packed;
-
-enum t3_rdma_opcode {
- T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
- T3_READ_REQ,
- T3_READ_RESP,
- T3_SEND,
- T3_SEND_WITH_INV,
- T3_SEND_WITH_SE,
- T3_SEND_WITH_SE_INV,
- T3_TERMINATE,
- T3_RDMA_INIT, /* CHELSIO RI specific ... */
- T3_BIND_MW,
- T3_FAST_REGISTER,
- T3_LOCAL_INV,
- T3_QP_MOD,
- T3_BYPASS,
- T3_RDMA_READ_REQ_WITH_INV,
-} __packed;
-
-static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
-{
- switch (wrop) {
- case T3_WR_BP: return T3_BYPASS;
- case T3_WR_SEND: return T3_SEND;
- case T3_WR_WRITE: return T3_RDMA_WRITE;
- case T3_WR_READ: return T3_READ_REQ;
- case T3_WR_INV_STAG: return T3_LOCAL_INV;
- case T3_WR_BIND: return T3_BIND_MW;
- case T3_WR_INIT: return T3_RDMA_INIT;
- case T3_WR_QP_MOD: return T3_QP_MOD;
- case T3_WR_FASTREG: return T3_FAST_REGISTER;
- default: break;
- }
- return -1;
-}
-
-
-/* Work request id */
-union t3_wrid {
- struct {
- u32 hi;
- u32 low;
- } id0;
- u64 id1;
-};
-
-#define WRID(wrid) (wrid.id1)
-#define WRID_GEN(wrid) (wrid.id0.wr_gen)
-#define WRID_IDX(wrid) (wrid.id0.wr_idx)
-#define WRID_LO(wrid) (wrid.id0.wr_lo)
-
-struct fw_riwrh {
- __be32 op_seop_flags;
- __be32 gen_tid_len;
-};
-
-#define S_FW_RIWR_OP 24
-#define M_FW_RIWR_OP 0xff
-#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
-#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
-
-#define S_FW_RIWR_SOPEOP 22
-#define M_FW_RIWR_SOPEOP 0x3
-#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
-
-#define S_FW_RIWR_FLAGS 8
-#define M_FW_RIWR_FLAGS 0x3fffff
-#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
-#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
-
-#define S_FW_RIWR_TID 8
-#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
-
-#define S_FW_RIWR_LEN 0
-#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
-
-#define S_FW_RIWR_GEN 31
-#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
-
-struct t3_sge {
- __be32 stag;
- __be32 len;
- __be64 to;
-};
-
-/* If num_sgle is zero, flit 5+ contains immediate data.*/
-struct t3_send_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
-
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 rem_stag;
- __be32 plen; /* 3 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
-};
-
-#define T3_MAX_FASTREG_DEPTH 10
-#define T3_MAX_FASTREG_FRAG 10
-
-struct t3_fastreg_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 len;
- __be32 va_base_hi; /* 3 */
- __be32 va_base_lo_fbo;
- __be32 page_type_perms; /* 4 */
- __be32 reserved1;
- __be64 pbl_addrs[0]; /* 5+ */
-};
-
-/*
- * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
- */
-struct t3_pbl_frag {
- struct fw_riwrh wrh; /* 0 */
- __be64 pbl_addrs[14]; /* 1..14 */
-};
-
-#define S_FR_PAGE_COUNT 24
-#define M_FR_PAGE_COUNT 0xff
-#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
-#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
-
-#define S_FR_PAGE_SIZE 16
-#define M_FR_PAGE_SIZE 0x1f
-#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
-#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
-
-#define S_FR_TYPE 8
-#define M_FR_TYPE 0x1
-#define V_FR_TYPE(x) ((x) << S_FR_TYPE)
-#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
-
-#define S_FR_PERMS 0
-#define M_FR_PERMS 0xff
-#define V_FR_PERMS(x) ((x) << S_FR_PERMS)
-#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
-
-struct t3_local_inv_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 reserved;
-};
-
-struct t3_rdma_write_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 stag_sink;
- __be64 to_sink; /* 3 */
- __be32 plen; /* 4 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
-};
-
-struct t3_rdma_read_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 local_inv;
- u8 reserved[2];
- __be32 rem_stag;
- __be64 rem_to; /* 3 */
- __be32 local_stag; /* 4 */
- __be32 local_len;
- __be64 local_to; /* 5 */
-};
-
-struct t3_bind_mw_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u16 reserved; /* 2 */
- u8 type;
- u8 perms;
- __be32 mr_stag;
- __be32 mw_stag; /* 3 */
- __be32 mw_len;
- __be64 mw_va; /* 4 */
- __be32 mr_pbl_addr; /* 5 */
- u8 reserved2[3];
- u8 mr_pagesz;
-};
-
-struct t3_receive_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 pagesz[T3_MAX_SGE];
- __be32 num_sgle; /* 2 */
- struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
- __be32 pbl_addr[T3_MAX_SGE];
-};
-
-struct t3_bypass_wr {
- struct fw_riwrh wrh;
- union t3_wrid wrid; /* 1 */
-};
-
-struct t3_modify_qp_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 flags; /* 2 */
- __be32 quiesce; /* 2 */
- __be32 max_ird; /* 3 */
- __be32 max_ord; /* 3 */
- __be64 sge_cmd; /* 4 */
- __be64 ctx1; /* 5 */
- __be64 ctx0; /* 6 */
-};
-
-enum t3_modify_qp_flags {
- MODQP_QUIESCE = 0x01,
- MODQP_MAX_IRD = 0x02,
- MODQP_MAX_ORD = 0x04,
- MODQP_WRITE_EC = 0x08,
- MODQP_READ_EC = 0x10,
-};
-
-
-enum t3_mpa_attrs {
- uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
- uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
- uP_RI_MPA_CRC_ENABLE = 0x4,
- uP_RI_MPA_IETF_ENABLE = 0x8
-} __packed;
-
-enum t3_qp_caps {
- uP_RI_QP_RDMA_READ_ENABLE = 0x01,
- uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
- uP_RI_QP_BIND_ENABLE = 0x04,
- uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
- uP_RI_QP_STAG0_ENABLE = 0x10
-} __packed;
-
-enum rdma_init_rtr_types {
- RTR_READ = 1,
- RTR_WRITE = 2,
- RTR_SEND = 3,
-};
-
-#define S_RTR_TYPE 2
-#define M_RTR_TYPE 0x3
-#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
-#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
-
-#define S_CHAN 4
-#define M_CHAN 0x3
-#define V_CHAN(x) ((x) << S_CHAN)
-#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
-
-struct t3_rdma_init_attr {
- u32 tid;
- u32 qpid;
- u32 pdid;
- u32 scqid;
- u32 rcqid;
- u32 rq_addr;
- u32 rq_size;
- enum t3_mpa_attrs mpaattrs;
- enum t3_qp_caps qpcaps;
- u16 tcp_emss;
- u32 ord;
- u32 ird;
- u64 qp_dma_addr;
- u32 qp_dma_size;
- enum rdma_init_rtr_types rtr_type;
- u16 flags;
- u16 rqe_count;
- u32 irs;
- u32 chan;
-};
-
-struct t3_rdma_init_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 qpid; /* 2 */
- __be32 pdid;
- __be32 scqid; /* 3 */
- __be32 rcqid;
- __be32 rq_addr; /* 4 */
- __be32 rq_size;
- u8 mpaattrs; /* 5 */
- u8 qpcaps;
- __be16 ulpdu_size;
- __be16 flags_rtr_type;
- __be16 rqe_count;
- __be32 ord; /* 6 */
- __be32 ird;
- __be64 qp_dma_addr; /* 7 */
- __be32 qp_dma_size; /* 8 */
- __be32 irs;
-};
-
-struct t3_genbit {
- u64 flit[15];
- __be64 genbit;
-};
-
-struct t3_wq_in_err {
- u64 flit[13];
- u64 err;
-};
-
-enum rdma_init_wr_flags {
- MPA_INITIATOR = (1<<0),
- PRIV_QP = (1<<1),
-};
-
-union t3_wr {
- struct t3_send_wr send;
- struct t3_rdma_write_wr write;
- struct t3_rdma_read_wr read;
- struct t3_receive_wr recv;
- struct t3_fastreg_wr fastreg;
- struct t3_pbl_frag pbl_frag;
- struct t3_local_inv_wr local_inv;
- struct t3_bind_mw_wr bind;
- struct t3_bypass_wr bypass;
- struct t3_rdma_init_wr init;
- struct t3_modify_qp_wr qp_mod;
- struct t3_genbit genbit;
- struct t3_wq_in_err wq_in_err;
- __be64 flit[16];
-};
-
-#define T3_SQ_CQE_FLIT 13
-#define T3_SQ_COOKIE_FLIT 14
-
-#define T3_RQ_COOKIE_FLIT 13
-#define T3_RQ_CQE_FLIT 14
-
-static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
-{
- return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
-}
-
-enum t3_wr_hdr_bits {
- T3_EOP = 1,
- T3_SOP = 2,
- T3_SOPEOP = T3_EOP|T3_SOP,
-};
-
-static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
- enum t3_wr_flags flags, u8 genbit, u32 tid,
- u8 len, u8 sopeop)
-{
- wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
- V_FW_RIWR_SOPEOP(sopeop) |
- V_FW_RIWR_FLAGS(flags));
- wmb();
- wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
- V_FW_RIWR_TID(tid) |
- V_FW_RIWR_LEN(len));
- /* 2nd gen bit... */
- ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
-}
-
-/*
- * T3 ULP2_TX commands
- */
-enum t3_utx_mem_op {
- T3_UTX_MEM_READ = 2,
- T3_UTX_MEM_WRITE = 3
-};
-
-/* T3 MC7 RDMA TPT entry format */
-
-enum tpt_mem_type {
- TPT_NON_SHARED_MR = 0x0,
- TPT_SHARED_MR = 0x1,
- TPT_MW = 0x2,
- TPT_MW_RELAXED_PROTECTION = 0x3
-};
-
-enum tpt_addr_type {
- TPT_ZBTO = 0,
- TPT_VATO = 1
-};
-
-enum tpt_mem_perm {
- TPT_MW_BIND = 0x10,
- TPT_LOCAL_READ = 0x8,
- TPT_LOCAL_WRITE = 0x4,
- TPT_REMOTE_READ = 0x2,
- TPT_REMOTE_WRITE = 0x1
-};
-
-struct tpt_entry {
- __be32 valid_stag_pdid;
- __be32 flags_pagesize_qpid;
-
- __be32 rsvd_pbl_addr;
- __be32 len;
- __be32 va_hi;
- __be32 va_low_or_fbo;
-
- __be32 rsvd_bind_cnt_or_pstag;
- __be32 rsvd_pbl_size;
-};
-
-#define S_TPT_VALID 31
-#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
-#define F_TPT_VALID V_TPT_VALID(1U)
-
-#define S_TPT_STAG_KEY 23
-#define M_TPT_STAG_KEY 0xFF
-#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
-#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
-
-#define S_TPT_STAG_STATE 22
-#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
-#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
-
-#define S_TPT_STAG_TYPE 20
-#define M_TPT_STAG_TYPE 0x3
-#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
-#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
-
-#define S_TPT_PDID 0
-#define M_TPT_PDID 0xFFFFF
-#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
-#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
-
-#define S_TPT_PERM 28
-#define M_TPT_PERM 0xF
-#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
-#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
-
-#define S_TPT_REM_INV_DIS 27
-#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
-#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
-
-#define S_TPT_ADDR_TYPE 26
-#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
-#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
-
-#define S_TPT_MW_BIND_ENABLE 25
-#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
-#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
-
-#define S_TPT_PAGE_SIZE 20
-#define M_TPT_PAGE_SIZE 0x1F
-#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
-#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
-
-#define S_TPT_PBL_ADDR 0
-#define M_TPT_PBL_ADDR 0x1FFFFFFF
-#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
-#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
-
-#define S_TPT_QPID 0
-#define M_TPT_QPID 0xFFFFF
-#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
-#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
-
-#define S_TPT_PSTAG 0
-#define M_TPT_PSTAG 0xFFFFFF
-#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
-#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
-
-#define S_TPT_PBL_SIZE 0
-#define M_TPT_PBL_SIZE 0xFFFFF
-#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
-#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
-
-/*
- * CQE defs
- */
-struct t3_cqe {
- __be32 header;
- __be32 len;
- union {
- struct {
- __be32 stag;
- __be32 msn;
- } rcqe;
- struct {
- u32 wrid_hi;
- u32 wrid_low;
- } scqe;
- } u;
-};
-
-#define S_CQE_OOO 31
-#define M_CQE_OOO 0x1
-#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
-#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
-
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0x7FFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_GENBIT 10
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
-#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
-#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
-#define SQ_TYPE(x) (CQE_TYPE((x)))
-#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
-
-#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
-
-#define CQE_LEN(x) (be32_to_cpu((x).len))
-
-/* used for RQ completion processing */
-#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
-#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
-
-/* used for SQ completion processing */
-#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
-
-/* generic accessor macros */
-#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
-
-#define TPT_ERR_SUCCESS 0x0
-#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
- /* STAG is offlimt, being 0, */
- /* or STAG_key mismatch */
-#define TPT_ERR_PDID 0x2 /* PDID mismatch */
-#define TPT_ERR_QPID 0x3 /* QPID mismatch */
-#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
-#define TPT_ERR_WRAP 0x5 /* Wrap error */
-#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
-#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_ECC 0x9 /* ECC error detected */
-#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
- /* reading PSTAG for a MW */
- /* Invalidate */
-#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
- /* software error */
-#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
-#define TPT_ERR_CRC 0x10 /* CRC error */
-#define TPT_ERR_MARKER 0x11 /* Marker error */
-#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
-#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
-#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
-#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
-#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
-#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
-#define TPT_ERR_MSN 0x18 /* MSN error */
-#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
-#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
- /* or READ_REQ */
-#define TPT_ERR_MSN_GAP 0x1B
-#define TPT_ERR_MSN_RANGE 0x1C
-#define TPT_ERR_IRD_OVERFLOW 0x1D
-#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
- /* software error */
-#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
- /* mismatch) */
-
-struct t3_swsq {
- __u64 wr_id;
- struct t3_cqe cqe;
- __u32 sq_wptr;
- __be32 read_len;
- int opcode;
- int complete;
- int signaled;
-};
-
-struct t3_swrq {
- __u64 wr_id;
- __u32 pbl_addr;
-};
-
-/*
- * A T3 WQ implements both the SQ and RQ.
- */
-struct t3_wq {
- union t3_wr *queue; /* DMA accessible memory */
- dma_addr_t dma_addr; /* DMA address for HW */
- DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
- u32 error; /* 1 once we go to ERROR */
- u32 qpid;
- u32 wptr; /* idx to next available WR slot */
- u32 size_log2; /* total wq size */
- struct t3_swsq *sq; /* SW SQ */
- struct t3_swsq *oldest_read; /* tracks oldest pending read */
- u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
- u32 sq_rptr; /* pending wrs */
- u32 sq_size_log2; /* sq size */
- struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
- u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
- u32 rq_rptr; /* pending wrs */
- struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
- u32 rq_size_log2; /* rq size */
- u32 rq_addr; /* rq adapter address */
- void __iomem *doorbell; /* kernel db */
- u64 udb; /* user db if any */
- struct cxio_rdev *rdev;
-};
-
-struct t3_cq {
- u32 cqid;
- u32 rptr;
- u32 wptr;
- u32 size_log2;
- dma_addr_t dma_addr;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- struct t3_cqe *queue;
- struct t3_cqe *sw_queue;
- u32 sw_rptr;
- u32 sw_wptr;
-};
-
-#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
- CQE_GENBIT(*cqe))
-
-struct t3_cq_status_page {
- u32 cq_err;
-};
-
-static inline int cxio_cq_in_error(struct t3_cq *cq)
-{
- return ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err;
-}
-
-static inline void cxio_set_cq_in_error(struct t3_cq *cq)
-{
- ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err = 1;
-}
-
-static inline void cxio_set_wq_in_error(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 1;
-}
-
-static inline void cxio_disable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 2;
-}
-
-static inline void cxio_enable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err &= ~2;
-}
-
-static inline int cxio_wq_db_enabled(struct t3_wq *wq)
-{
- return !(wq->queue->wq_in_err.err & 2);
-}
-
-static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
deleted file mode 100644
index 56a8ab6210cf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-#include <rdma/cxgb3-abi.h>
-#include "iwch.h"
-#include "iwch_cm.h"
-
-#define DRV_VERSION "1.1"
-
-MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
-MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static void open_rnic_dev(struct t3cdev *);
-static void close_rnic_dev(struct t3cdev *);
-static void iwch_event_handler(struct t3cdev *, u32, u32);
-
-struct cxgb3_client t3c_client = {
- .name = "iw_cxgb3",
- .add = open_rnic_dev,
- .remove = close_rnic_dev,
- .handlers = t3c_handlers,
- .redirect = iwch_ep_redirect,
- .event_handler = iwch_event_handler
-};
-
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(dev_mutex);
-
-static void disable_dbs(struct iwch_dev *rnicp)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp)
- cxio_disable_wq_db(&qhp->wq);
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp) {
- if (ring_db)
- ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
- qhp->wq.qpid);
- cxio_enable_wq_db(&qhp->wq);
- }
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void iwch_db_drop_task(struct work_struct *work)
-{
- struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
- db_drop_task.work);
- enable_dbs(rnicp, 1);
-}
-
-static void rnic_init(struct iwch_dev *rnicp)
-{
- pr_debug("%s iwch_dev %p\n", __func__, rnicp);
- xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->mrs, XA_FLAGS_LOCK_IRQ);
- INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
-
- rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
- rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
- rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
- rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
- rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
- rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
- rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
- rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
- rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
- rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
- rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
- rnicp->attr.can_resize_wq = 0;
- rnicp->attr.max_rdma_reads_per_qp = 8;
- rnicp->attr.max_rdma_read_resources =
- rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
- rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
- rnicp->attr.max_rdma_read_depth =
- rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
- rnicp->attr.rq_overflow_handled = 0;
- rnicp->attr.can_modify_ird = 0;
- rnicp->attr.can_modify_ord = 0;
- rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
- rnicp->attr.stag0_value = 1;
- rnicp->attr.zbva_support = 1;
- rnicp->attr.local_invalidate_fence = 1;
- rnicp->attr.cq_overflow_detection = 1;
- return;
-}
-
-static void open_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *rnicp;
-
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- pr_info_once("Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION);
- rnicp = ib_alloc_device(iwch_dev, ibdev);
- if (!rnicp) {
- pr_err("Cannot allocate ib device\n");
- return;
- }
- rnicp->rdev.ulp = rnicp;
- rnicp->rdev.t3cdev_p = tdev;
-
- mutex_lock(&dev_mutex);
-
- if (cxio_rdev_open(&rnicp->rdev)) {
- mutex_unlock(&dev_mutex);
- pr_err("Unable to open CXIO rdev\n");
- ib_dealloc_device(&rnicp->ibdev);
- return;
- }
-
- rnic_init(rnicp);
-
- list_add_tail(&rnicp->entry, &dev_list);
- mutex_unlock(&dev_mutex);
-
- if (iwch_register_device(rnicp)) {
- pr_err("Unable to register device\n");
- close_rnic_dev(tdev);
- }
- pr_info("Initialized device %s\n",
- pci_name(rnicp->rdev.rnic_info.pdev));
- return;
-}
-
-static void close_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *dev, *tmp;
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
- if (dev->rdev.t3cdev_p == tdev) {
- dev->rdev.flags = CXIO_ERROR_FATAL;
- synchronize_net();
- cancel_delayed_work_sync(&dev->db_drop_task);
- list_del(&dev->entry);
- iwch_unregister_device(dev);
- cxio_rdev_close(&dev->rdev);
- WARN_ON(!xa_empty(&dev->cqs));
- WARN_ON(!xa_empty(&dev->qps));
- WARN_ON(!xa_empty(&dev->mrs));
- ib_dealloc_device(&dev->ibdev);
- break;
- }
- }
- mutex_unlock(&dev_mutex);
-}
-
-static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
-{
- struct cxio_rdev *rdev = tdev->ulp;
- struct iwch_dev *rnicp;
- struct ib_event event;
- u32 portnum = port_id + 1;
- int dispatch = 0;
-
- if (!rdev)
- return;
- rnicp = rdev_to_iwch_dev(rdev);
- switch (evt) {
- case OFFLOAD_STATUS_DOWN: {
- rdev->flags = CXIO_ERROR_FATAL;
- synchronize_net();
- event.event = IB_EVENT_DEVICE_FATAL;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_DOWN: {
- event.event = IB_EVENT_PORT_ERR;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_UP: {
- event.event = IB_EVENT_PORT_ACTIVE;
- dispatch = 1;
- break;
- }
- case OFFLOAD_DB_FULL: {
- disable_dbs(rnicp);
- break;
- }
- case OFFLOAD_DB_EMPTY: {
- enable_dbs(rnicp, 1);
- break;
- }
- case OFFLOAD_DB_DROP: {
- unsigned long delay = 1000;
- unsigned short r;
-
- disable_dbs(rnicp);
- get_random_bytes(&r, 2);
- delay += r & 1023;
-
- /*
- * delay is between 1000-2023 usecs.
- */
- schedule_delayed_work(&rnicp->db_drop_task,
- usecs_to_jiffies(delay));
- break;
- }
- }
-
- if (dispatch) {
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
- }
-
- return;
-}
-
-static int __init iwch_init_module(void)
-{
- int err;
-
- err = cxio_hal_init();
- if (err)
- return err;
- err = iwch_cm_init();
- if (err)
- return err;
- cxio_register_ev_cb(iwch_ev_dispatch);
- cxgb3_register_client(&t3c_client);
- return 0;
-}
-
-static void __exit iwch_exit_module(void)
-{
- cxgb3_unregister_client(&t3c_client);
- cxio_unregister_ev_cb(iwch_ev_dispatch);
- iwch_cm_term();
- cxio_hal_exit();
-}
-
-module_init(iwch_init_module);
-module_exit(iwch_exit_module);
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
deleted file mode 100644
index 310a937bffcf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_H__
-#define __IWCH_H__
-
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/xarray.h>
-#include <linux/workqueue.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-
-struct iwch_pd;
-struct iwch_cq;
-struct iwch_qp;
-struct iwch_mr;
-
-struct iwch_rnic_attributes {
- u32 max_qps;
- u32 max_wrs; /* Max for any SQ/RQ */
- u32 max_sge_per_wr;
- u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
- u32 max_cqs;
- u32 max_cqes_per_cq;
- u32 max_mem_regs;
- u32 max_phys_buf_entries; /* for phys buf list */
- u32 max_pds;
-
- /*
- * The memory page sizes supported by this RNIC.
- * Bit position i in bitmap indicates page of
- * size (4k)^i. Phys block list mode unsupported.
- */
- u32 mem_pgsizes_bitmask;
- u64 max_mr_size;
- u8 can_resize_wq;
-
- /*
- * The maximum number of RDMA Reads that can be outstanding
- * per QP with this RNIC as the target.
- */
- u32 max_rdma_reads_per_qp;
-
- /*
- * The maximum number of resources used for RDMA Reads
- * by this RNIC with this RNIC as the target.
- */
- u32 max_rdma_read_resources;
-
- /*
- * The max depth per QP for initiation of RDMA Read
- * by this RNIC.
- */
- u32 max_rdma_read_qp_depth;
-
- /*
- * The maximum depth for initiation of RDMA Read
- * operations by this RNIC on all QPs
- */
- u32 max_rdma_read_depth;
- u8 rq_overflow_handled;
- u32 can_modify_ird;
- u32 can_modify_ord;
- u32 max_mem_windows;
- u32 stag0_value;
- u8 zbva_support;
- u8 local_invalidate_fence;
- u32 cq_overflow_detection;
-};
-
-struct iwch_dev {
- struct ib_device ibdev;
- struct cxio_rdev rdev;
- u32 device_cap_flags;
- struct iwch_rnic_attributes attr;
- struct xarray cqs;
- struct xarray qps;
- struct xarray mrs;
- struct list_head entry;
- struct delayed_work db_drop_task;
-};
-
-static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct iwch_dev, ibdev);
-}
-
-static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
-{
- return container_of(rdev, struct iwch_dev, rdev);
-}
-
-static inline int t3b_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3B;
-}
-
-static inline int t3a_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3A;
-}
-
-static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
-{
- return xa_load(&rhp->cqs, cqid);
-}
-
-static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
-{
- return xa_load(&rhp->qps, qpid);
-}
-
-static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
-{
- return xa_load(&rhp->mrs, mmid);
-}
-
-extern struct cxgb3_client t3c_client;
-extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
deleted file mode 100644
index 0bca72cb4d9a..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ /dev/null
@@ -1,2258 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/notifier.h>
-#include <linux/inetdevice.h>
-
-#include <net/neighbour.h>
-#include <net/netevent.h>
-#include <net/route.h>
-
-#include "tcb.h"
-#include "cxgb3_offload.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-
-static char *states[] = {
- "idle",
- "listen",
- "connecting",
- "mpa_wait_req",
- "mpa_req_sent",
- "mpa_req_rcvd",
- "mpa_rep_sent",
- "fpdu_mode",
- "aborting",
- "closing",
- "moribund",
- "dead",
- NULL,
-};
-
-int peer2peer = 0;
-module_param(peer2peer, int, 0644);
-MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
-
-static int ep_timeout_secs = 60;
-module_param(ep_timeout_secs, int, 0644);
-MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
- "in seconds (default=60)");
-
-static int mpa_rev = 1;
-module_param(mpa_rev, int, 0644);
-MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is spec compliant. (default=1)");
-
-static int markers_enabled = 0;
-module_param(markers_enabled, int, 0644);
-MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
-
-static int crc_enabled = 1;
-module_param(crc_enabled, int, 0644);
-MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
-
-static int rcv_win = 256 * 1024;
-module_param(rcv_win, int, 0644);
-MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
-
-static int snd_win = 32 * 1024;
-module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
-
-static unsigned int nocong = 0;
-module_param(nocong, uint, 0644);
-MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
-
-static unsigned int cong_flavor = 1;
-module_param(cong_flavor, uint, 0644);
-MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-
-static struct workqueue_struct *workq;
-
-static struct sk_buff_head rxq;
-
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(struct timer_list *t);
-static void connect_reply_upcall(struct iwch_ep *ep, int status);
-
-static void start_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (timer_pending(&ep->timer)) {
- pr_debug("%s stopped / restarted timer ep %p\n", __func__, ep);
- del_timer_sync(&ep->timer);
- } else
- get_ep(&ep->com);
- ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- add_timer(&ep->timer);
-}
-
-static void stop_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (!timer_pending(&ep->timer)) {
- WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
- __func__, ep, ep->com.state);
- return;
- }
- del_timer_sync(&ep->timer);
- put_ep(&ep->com);
-}
-
-static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = l2t_send(tdev, skb, l2e);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = cxgb3_ofld_send(tdev, skb);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
-{
- struct cpl_tid_release *req;
-
- skb = get_skb(skb, sizeof(*req), GFP_KERNEL);
- if (!skb)
- return;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_cxgb3_ofld_send(tdev, skb);
- return;
-}
-
-int iwch_quiesce_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-int iwch_resume_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = 0;
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static void set_emss(struct iwch_ep *ep, u16 opt)
-{
- pr_debug("%s ep %p opt %u\n", __func__, ep, opt);
- ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
- if (G_TCPOPT_TSTAMP(opt))
- ep->emss -= 12;
- if (ep->emss < 128)
- ep->emss = 128;
- pr_debug("emss=%d\n", ep->emss);
-}
-
-static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
-{
- unsigned long flags;
- enum iwch_ep_state state;
-
- spin_lock_irqsave(&epc->lock, flags);
- state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
- return state;
-}
-
-static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- epc->state = new;
-}
-
-static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
- pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
- __state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
- return;
-}
-
-static void *alloc_ep(int size, gfp_t gfp)
-{
- struct iwch_ep_common *epc;
-
- epc = kzalloc(size, gfp);
- if (epc) {
- kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
- }
- pr_debug("%s alloc ep %p\n", __func__, epc);
- return epc;
-}
-
-void __free_ep(struct kref *kref)
-{
- struct iwch_ep *ep;
- ep = container_of(container_of(kref, struct iwch_ep_common, kref),
- struct iwch_ep, com);
- pr_debug("%s ep %p state %s\n",
- __func__, ep, states[state_read(&ep->com)]);
- if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
- cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- }
- kfree(ep);
-}
-
-static void release_ep_resources(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- set_bit(RELEASE_RESOURCES, &ep->com.flags);
- put_ep(&ep->com);
-}
-
-static int status2errno(int status)
-{
- switch (status) {
- case CPL_ERR_NONE:
- return 0;
- case CPL_ERR_CONN_RESET:
- return -ECONNRESET;
- case CPL_ERR_ARP_MISS:
- return -EHOSTUNREACH;
- case CPL_ERR_CONN_TIMEDOUT:
- return -ETIMEDOUT;
- case CPL_ERR_TCAM_FULL:
- return -ENOMEM;
- case CPL_ERR_CONN_EXIST:
- return -EADDRINUSE;
- default:
- return -EIO;
- }
-}
-
-/*
- * Try and reuse skbs already allocated...
- */
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
-{
- if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
- skb_trim(skb, 0);
- skb_get(skb);
- } else {
- skb = alloc_skb(len, gfp);
- }
- return skb;
-}
-
-static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- return rt;
-}
-
-static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
-{
- int i = 0;
-
- while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
- ++i;
- return i;
-}
-
-static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p\n", __func__, dev);
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for an active open.
- */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_err("ARP failure during connect\n");
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
- * and send it along.
- */
-static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- struct cpl_abort_req *req = cplhdr(skb);
-
- pr_debug("%s t3cdev %p\n", __func__, dev);
- req->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(dev, skb);
-}
-
-static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
-{
- struct cpl_close_con_req *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- struct cpl_abort_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(skb, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, abort_arp_failure);
- req = skb_put_zero(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_connect(struct iwch_ep *ep)
-{
- struct cpl_act_open_req *req;
- struct sk_buff *skb;
- u32 opt0h, opt0l, opt2;
- unsigned int mtu_idx;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
- skb->priority = CPL_PRIORITY_SETUP;
- set_arp_failure_handler(skb, act_open_req_arp_failure);
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0h = htonl(opt0h);
- req->opt0l = htonl(opt0l);
- req->params = 0;
- req->opt2 = htonl(opt2);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
-
- pr_debug("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
-
- BUG_ON(skb_cloned(skb));
-
- mpalen = sizeof(*mpa) + ep->plen;
- if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
- kfree_skb(skb);
- skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- connect_reply_upcall(ep, -ENOMEM);
- return;
- }
- }
- skb_trim(skb, 0);
- skb_reserve(skb, sizeof(*req));
- skb_put(skb, mpalen);
- skb->priority = CPL_PRIORITY_DATA;
- mpa = (struct mpa_message *) skb->data;
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->private_data_size = htons(ep->plen);
- mpa->revision = mpa_rev;
-
- if (ep->plen)
- memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
- start_ep_timer(ep);
- state_set(&ep->com, MPA_REQ_SENT);
- return;
-}
-
-static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = MPA_REJECT;
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb again. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(mpalen);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- ep->mpa_skb = skb;
- state_set(&ep->com, MPA_REP_SENT);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_establish *req = cplhdr(skb);
- unsigned int tid = GET_TID(req);
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, tid);
-
- dst_confirm(ep->dst);
-
- /* setup the hwtid for this connection */
- ep->hwtid = tid;
- cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
-
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- /* dealloc the atid */
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-
- /* start MPA negotiation */
- send_mpa_req(ep, skb);
-
- return 0;
-}
-
-static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- pr_debug("%s ep %p\n", __FILE__, ep);
- state_set(&ep->com, ABORTING);
- send_abort(ep, skb, gfp);
-}
-
-static void close_complete_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- if (ep->com.cm_id) {
- pr_debug("close complete delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void peer_close_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_DISCONNECT;
- if (ep->com.cm_id) {
- pr_debug("peer close delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static void peer_abort_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- event.status = -ECONNRESET;
- if (ep->com.cm_id) {
- pr_debug("abort delivered ep %p cm_id %p tid %d\n", ep,
- ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_reply_upcall(struct iwch_ep *ep, int status)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p status %d\n", __func__, ep, status);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REPLY;
- event.status = status;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-
- if ((status == 0) || (status == -ECONNREFUSED)) {
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- }
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d status %d\n", __func__, ep,
- ep->hwtid, status);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
- if (status < 0) {
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_request_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REQUEST;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.local_addr));
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- event.provider_data = ep;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (state_read(&ep->parent_ep->com) != DEAD) {
- get_ep(&ep->com);
- ep->parent_ep->com.cm_id->event_handler(
- ep->parent_ep->com.cm_id,
- &event);
- }
- put_ep(&ep->parent_ep->com);
- ep->parent_ep = NULL;
-}
-
-static void established_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_ESTABLISHED;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static int update_rx_credits(struct iwch_ep *ep, u32 credits)
-{
- struct cpl_rx_data_ack *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("update_rx_credits - cannot alloc skb!\n");
- return 0;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
- req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
- skb->priority = CPL_PRIORITY_ACK;
- iwch_cxgb3_ofld_send(ep->com.tdev, skb);
- return credits;
-}
-
-static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- int err;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_SENT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- err = -EINVAL;
- goto err;
- }
-
- /*
- * copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * if we don't even have the mpa message, then bail.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /* Validate MPA header. */
- if (mpa->revision != mpa_rev) {
- err = -EPROTO;
- goto err;
- }
- if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
- err = -EPROTO;
- goto err;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- err = -EPROTO;
- goto err;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- err = -EPROTO;
- goto err;
- }
-
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- if (mpa->flags & MPA_REJECT) {
- err = -ECONNREFUSED;
- goto err;
- }
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data. And
- * the MPA header is valid.
- */
- state_set(&ep->com, FPDU_MODE);
- ep->mpa_attr.initiator = 1;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
-
- /* bind QP and TID with INIT_WR */
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err;
-
- if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
- iwch_post_zb_read(ep);
- }
-
- goto out;
-err:
- abort_connection(ep, skb, GFP_KERNEL);
-out:
- connect_reply_upcall(ep, err);
- return;
-}
-
-static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_WAIT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
-
- /*
- * Copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * If we don't even have the mpa message, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /*
- * Validate MPA Header.
- */
- if (mpa->revision != mpa_rev) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data.
- */
- ep->mpa_attr.initiator = 0;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- state_set(&ep->com, MPA_REQ_RCVD);
-
- /* drive upcall */
- connect_request_upcall(ep);
- return;
-}
-
-static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_rx_data *hdr = cplhdr(skb);
- unsigned int dlen = ntohs(hdr->len);
-
- pr_debug("%s ep %p dlen %u\n", __func__, ep, dlen);
-
- skb_pull(skb, sizeof(*hdr));
- skb_trim(skb, dlen);
-
- ep->rcv_seq += dlen;
- BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
-
- switch (state_read(&ep->com)) {
- case MPA_REQ_SENT:
- process_mpa_reply(ep, skb);
- break;
- case MPA_REQ_WAIT:
- process_mpa_request(ep, skb);
- break;
- case MPA_REP_SENT:
- break;
- default:
- pr_err("%s Unexpected streaming data. ep %p state %d tid %d\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
-
- /*
- * The ep will timeout and inform the ULP of the failure.
- * See ep_timeout().
- */
- break;
- }
-
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Upcall from the adapter indicating data has been transmitted.
- * For us its just the single MPA request or reply. We can now free
- * the skb holding the mpa message.
- */
-static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_wr_ack *hdr = cplhdr(skb);
- unsigned int credits = ntohs(hdr->credits);
- unsigned long flags;
- int post_zb = 0;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
-
- if (credits == 0) {
- pr_debug("%s 0 credit ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- BUG_ON(credits != 1);
- dst_confirm(ep->dst);
- if (!ep->mpa_skb) {
- pr_debug("%s rdma_init wr_ack ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->mpa_attr.initiator) {
- pr_debug("%s initiator ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (peer2peer && ep->com.state == FPDU_MODE)
- post_zb = 1;
- } else {
- pr_debug("%s responder ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->com.state == MPA_REQ_RCVD) {
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- }
- }
- } else {
- pr_debug("%s lsm ack ep %p state %u freeing skb\n",
- __func__, ep, ep->com.state);
- kfree_skb(ep->mpa_skb);
- ep->mpa_skb = NULL;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (post_zb)
- iwch_post_zb_read(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /*
- * We get 2 abort replies from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case ABORTING:
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- default:
- pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Return whether a failed active open has allocated a TID
- */
-static inline int act_open_has_tid(int status)
-{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
-}
-
-static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
- status2errno(rpl->status));
- connect_reply_upcall(ep, status2errno(rpl->status));
- state_set(&ep->com, DEAD);
- if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
- release_tid(ep->com.tdev, GET_TID(rpl), NULL);
- cxgb3_free_atid(ep->com.tdev, ep->atid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- put_ep(&ep->com);
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_start(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_pass_open_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("t3c_listen_start failed to alloc skb!\n");
- return -ENOMEM;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
- req->local_port = ep->com.local_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_port = 0;
- req->peer_ip = 0;
- req->peer_netmask = 0;
- req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
- req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
- req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
-
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_pass_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %d error %d\n", __func__, ep,
- rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_stop(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_close_listserv_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->cpu_idx = 0;
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
- void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- return CPL_RET_BUF_DONE;
-}
-
-static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
-{
- struct cpl_pass_accept_rpl *rpl;
- unsigned int mtu_idx;
- u32 opt0h, opt0l, opt2;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(*rpl));
- skb_get(skb);
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
-
- rpl = cplhdr(skb);
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(opt0h);
- rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
- rpl->opt2 = htonl(opt2);
- rpl->rsvd = rpl->opt2; /* workaround for HW bug */
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-
- return;
-}
-
-static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
- struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
- peer_ip);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(struct cpl_tid_release));
- skb_get(skb);
-
- if (tdev->type != T3A)
- release_tid(tdev, hwtid, skb);
- else {
- struct cpl_pass_accept_rpl *rpl;
-
- rpl = cplhdr(skb);
- skb->priority = CPL_PRIORITY_SETUP;
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(F_TCAM_BYPASS);
- rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
- rpl->opt2 = 0;
- rpl->rsvd = rpl->opt2;
- iwch_cxgb3_ofld_send(tdev, skb);
- }
-}
-
-static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *child_ep, *parent_ep = ctx;
- struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int hwtid = GET_TID(req);
- struct dst_entry *dst;
- struct l2t_entry *l2t;
- struct rtable *rt;
- struct iff_mac tim;
-
- pr_debug("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
- if (state_read(&parent_ep->com) != LISTEN) {
- pr_err("%s - listening ep not in LISTEN\n", __func__);
- goto reject;
- }
-
- /*
- * Find the netdev for this connection request.
- */
- tim.mac_addr = req->dst_mac;
- tim.vlan_tag = ntohs(req->vlan_tag);
- if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- pr_err("%s bad dst mac %pM\n", __func__, req->dst_mac);
- goto reject;
- }
-
- /* Find output route */
- rt = find_route(tdev,
- req->local_ip,
- req->peer_ip,
- req->local_port,
- req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
- if (!rt) {
- pr_err("%s - failed to find dst entry!\n", __func__);
- goto reject;
- }
- dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
- if (!l2t) {
- pr_err("%s - failed to allocate l2t entry!\n", __func__);
- dst_release(dst);
- goto reject;
- }
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- pr_err("%s - failed to allocate ep entry!\n", __func__);
- l2t_release(tdev, l2t);
- dst_release(dst);
- goto reject;
- }
- state_set(&child_ep->com, CONNECTING);
- child_ep->com.tdev = tdev;
- child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = AF_INET;
- child_ep->com.local_addr.sin_port = req->local_port;
- child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
- child_ep->com.remote_addr.sin_family = AF_INET;
- child_ep->com.remote_addr.sin_port = req->peer_port;
- child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
- get_ep(&parent_ep->com);
- child_ep->parent_ep = parent_ep;
- child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
- child_ep->l2t = l2t;
- child_ep->dst = dst;
- child_ep->hwtid = hwtid;
- timer_setup(&child_ep->timer, ep_timeout, 0);
- cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
- accept_cr(child_ep, req->peer_ip, skb);
- goto out;
-reject:
- reject_cr(tdev, hwtid, req->peer_ip, skb);
-out:
- return CPL_RET_BUF_DONE;
-}
-
-static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_pass_establish *req = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- dst_confirm(ep->dst);
- state_set(&ep->com, MPA_REQ_WAIT);
- start_ep_timer(ep);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int disconnect = 1;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- dst_confirm(ep->dst);
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- __state_set(&ep->com, CLOSING);
- break;
- case MPA_REQ_SENT:
- __state_set(&ep->com, CLOSING);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REP_SENT:
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case FPDU_MODE:
- start_ep_timer(ep);
- __state_set(&ep->com, CLOSING);
- attrs.next_state = IWCH_QP_STATE_CLOSING;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- break;
- case ABORTING:
- disconnect = 0;
- break;
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- disconnect = 0;
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- disconnect = 0;
- break;
- case DEAD:
- disconnect = 0;
- break;
- default:
- BUG_ON(1);
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (disconnect)
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
-static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct iwch_ep *ep = ctx;
- struct cpl_abort_rpl *rpl;
- struct sk_buff *rpl_skb;
- struct iwch_qp_attributes attrs;
- int ret;
- int release = 0;
- unsigned long flags;
-
- if (is_neg_adv_abort(req->status)) {
- pr_debug("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
- ep->hwtid);
- t3_l2t_send_event(ep->com.tdev, ep->l2t);
- return CPL_RET_BUF_DONE;
- }
-
- /*
- * We get 2 peer aborts from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p state %u\n", __func__, ep, ep->com.state);
- switch (ep->com.state) {
- case CONNECTING:
- break;
- case MPA_REQ_WAIT:
- stop_ep_timer(ep);
- break;
- case MPA_REQ_SENT:
- stop_ep_timer(ep);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MORIBUND:
- case CLOSING:
- stop_ep_timer(ep);
- /*FALLTHROUGH*/
- case FPDU_MODE:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- ret = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (ret)
- pr_err("%s - qp <- error failed!\n", __func__);
- }
- peer_abort_upcall(ep);
- break;
- case ABORTING:
- break;
- case DEAD:
- pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
- return CPL_RET_BUF_DONE;
- default:
- BUG_ON(1);
- break;
- }
- dst_confirm(ep->dst);
- if (ep->com.state != ABORTING) {
- __state_set(&ep->com, DEAD);
- release = 1;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- pr_err("%s - cannot allocate skb!\n", __func__);
- release = 1;
- goto out;
- }
- rpl_skb->priority = CPL_PRIORITY_DATA;
- rpl = skb_put(rpl_skb, sizeof(*rpl));
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
-out:
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if ((ep->com.cm_id) && (ep->com.qp)) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- case ABORTING:
- case DEAD:
- break;
- default:
- BUG_ON(1);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * T3A does 3 things when a TERM is received:
- * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
- * 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcode cqe into the associated CQ.
- *
- * For (1), we save the message in the qp for later consumer consumption.
- * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
- * For (3), we toss the CQE in cxio_poll_cq().
- *
- * terminate() handles case (1)...
- */
-static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
-
- if (state_read(&ep->com) != FPDU_MODE)
- return CPL_RET_BUF_DONE;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb_pull(skb, sizeof(struct cpl_rdma_terminate));
- pr_debug("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
- return CPL_RET_BUF_DONE;
-}
-
-static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_rdma_ec_status *rep = cplhdr(skb);
- struct iwch_ep *ep = ctx;
-
- pr_debug("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
- rep->status);
- if (rep->status) {
- struct iwch_qp_attributes attrs;
-
- pr_err("%s BAD CLOSE - Aborting tid %u\n",
- __func__, ep->hwtid);
- stop_ep_timer(ep);
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- abort_connection(ep, NULL, GFP_KERNEL);
- }
- return CPL_RET_BUF_DONE;
-}
-
-static void ep_timeout(struct timer_list *t)
-{
- struct iwch_ep *ep = from_timer(ep, t, timer);
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int abort = 1;
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
- switch (ep->com.state) {
- case MPA_REQ_SENT:
- __state_set(&ep->com, ABORTING);
- connect_reply_upcall(ep, -ETIMEDOUT);
- break;
- case MPA_REQ_WAIT:
- __state_set(&ep->com, ABORTING);
- break;
- case CLOSING:
- case MORIBUND:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- __state_set(&ep->com, ABORTING);
- break;
- default:
- WARN(1, "%s unexpected state ep %p state %u\n",
- __func__, ep, ep->com.state);
- abort = 0;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (abort)
- abort_connection(ep, NULL, GFP_ATOMIC);
- put_ep(&ep->com);
-}
-
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct iwch_ep *ep = to_ep(cm_id);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-
- if (state_read(&ep->com) == DEAD) {
- put_ep(&ep->com);
- return -ECONNRESET;
- }
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- if (mpa_rev == 0)
- abort_connection(ep, NULL, GFP_KERNEL);
- else {
- send_mpa_reject(ep, pdata, pdata_len);
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- }
- put_ep(&ep->com);
- return 0;
-}
-
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- int err;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- struct iwch_ep *ep = to_ep(cm_id);
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
- err = -ECONNRESET;
- goto err;
- }
-
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- BUG_ON(!qp);
-
- if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
- (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
- abort_connection(ep, NULL, GFP_KERNEL);
- err = -EINVAL;
- goto err;
- }
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = qp;
-
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ird == 0)
- ep->ird = 1;
-
- pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
-
- /* bind QP to EP and move to RTS */
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- /* bind QP and TID with INIT_WR */
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_MAX_ORD;
-
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err1;
-
- /* if needed, wait for wr_ack */
- if (iwch_rqes_posted(qp)) {
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (err)
- goto err1;
- }
-
- err = send_mpa_reply(ep, conn_param->private_data,
- conn_param->private_data_len);
- if (err)
- goto err1;
-
-
- state_set(&ep->com, FPDU_MODE);
- established_upcall(ep);
- put_ep(&ep->com);
- return 0;
-err1:
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- cm_id->rem_ref(cm_id);
-err:
- put_ep(&ep->com);
- return err;
-}
-
-static int is_loopback_dst(struct iw_cm_id *cm_id)
-{
- struct net_device *dev;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
- if (!dev)
- return 0;
- dev_put(dev);
- return 1;
-}
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_ep *ep;
- struct rtable *rt;
- int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- if (cm_id->m_remote_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto out;
- }
-
- if (is_loopback_dst(cm_id)) {
- err = -ENOSYS;
- goto out;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto out;
- }
- timer_setup(&ep->timer, ep_timeout, 0);
- ep->plen = conn_param->private_data_len;
- if (ep->plen)
- memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
- conn_param->private_data, ep->plen);
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ord == 0)
- ep->ord = 1;
-
- ep->com.tdev = h->rdev.t3cdev_p;
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = get_qhp(h, conn_param->qpn);
- BUG_ON(!ep->com.qp);
- pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
- ep->com.qp, cm_id);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->atid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, IPTOS_LOWDELAY);
- if (!rt) {
- pr_err("%s - cannot find route\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
- &raddr->sin_addr.s_addr);
- if (!ep->l2t) {
- pr_err("%s - cannot alloc l2e\n", __func__);
- err = -ENOMEM;
- goto fail4;
- }
-
- state_set(&ep->com, CONNECTING);
- ep->tos = IPTOS_LOWDELAY;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
- sizeof(ep->com.remote_addr));
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- l2t_release(h->rdev.t3cdev_p, ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-out:
- return err;
-}
-
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
-{
- int err = 0;
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_listen_ep *ep;
-
-
- might_sleep();
-
- if (cm_id->m_local_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto fail1;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto fail1;
- }
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.tdev = h->rdev.t3cdev_p;
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
-
- /*
- * Allocate a server TID.
- */
- ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->stid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- state_set(&ep->com, LISTEN);
- err = listen_start(ep);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (!err) {
- cm_id->provider_data = ep;
- goto out;
- }
-fail3:
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-fail1:
-out:
- return err;
-}
-
-int iwch_destroy_listen(struct iw_cm_id *cm_id)
-{
- int err;
- struct iwch_listen_ep *ep = to_listen_ep(cm_id);
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- might_sleep();
- state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
- err = listen_stop(ep);
- if (err)
- goto done;
- wait_event(ep->com.waitq, ep->com.rpl_done);
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-done:
- err = ep->com.rpl_err;
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
- return err;
-}
-
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
-{
- int ret=0;
- unsigned long flags;
- int close = 0;
- int fatal = 0;
- struct t3cdev *tdev;
- struct cxio_rdev *rdev;
-
- spin_lock_irqsave(&ep->com.lock, flags);
-
- pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
- states[ep->com.state], abrupt);
-
- tdev = (struct t3cdev *)ep->com.tdev;
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- fatal = 1;
- close_complete_upcall(ep);
- ep->com.state = DEAD;
- }
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- case MPA_REQ_SENT:
- case MPA_REQ_RCVD:
- case MPA_REP_SENT:
- case FPDU_MODE:
- close = 1;
- if (abrupt)
- ep->com.state = ABORTING;
- else {
- ep->com.state = CLOSING;
- start_ep_timer(ep);
- }
- set_bit(CLOSE_SENT, &ep->com.flags);
- break;
- case CLOSING:
- if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
- close = 1;
- if (abrupt) {
- stop_ep_timer(ep);
- ep->com.state = ABORTING;
- } else
- ep->com.state = MORIBUND;
- }
- break;
- case MORIBUND:
- case ABORTING:
- case DEAD:
- pr_debug("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
- break;
- default:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (close) {
- if (abrupt)
- ret = send_abort(ep, NULL, gfp);
- else
- ret = send_halfclose(ep, gfp);
- if (ret)
- fatal = 1;
- }
- if (fatal)
- release_ep_resources(ep);
- return ret;
-}
-
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t)
-{
- struct iwch_ep *ep = ctx;
-
- if (ep->dst != old)
- return 0;
-
- pr_debug("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
- l2t);
- dst_hold(new);
- l2t_release(ep->com.tdev, ep->l2t);
- ep->l2t = l2t;
- dst_release(old);
- ep->dst = new;
- return 1;
-}
-
-/*
- * All the CM events are handled on a work queue to have a safe context.
- * These are the real handlers that are called from the work queue.
- */
-static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = act_establish,
- [CPL_ACT_OPEN_RPL] = act_open_rpl,
- [CPL_RX_DATA] = rx_data,
- [CPL_TX_DMA_ACK] = tx_ack,
- [CPL_ABORT_RPL_RSS] = abort_rpl,
- [CPL_ABORT_RPL] = abort_rpl,
- [CPL_PASS_OPEN_RPL] = pass_open_rpl,
- [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
- [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
- [CPL_PASS_ESTABLISH] = pass_establish,
- [CPL_PEER_CLOSE] = peer_close,
- [CPL_ABORT_REQ_RSS] = peer_abort,
- [CPL_CLOSE_CON_RPL] = close_con_rpl,
- [CPL_RDMA_TERMINATE] = terminate,
- [CPL_RDMA_EC_STATUS] = ec_status,
-};
-
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
-static DECLARE_WORK(skb_work, process_work);
-
-static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep_common *epc = ctx;
-
- get_ep(epc);
-
- /*
- * Save ctx and tdev in the skb->cb area.
- */
- *((void **) skb->cb) = ctx;
- *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
-
- /*
- * Queue the skb and schedule the worker thread.
- */
- skb_queue_tail(&rxq, skb);
- queue_work(workq, &skb_work);
- return 0;
-}
-
-static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
-
- if (rpl->status != CPL_ERR_NONE) {
- pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
- rpl->status, GET_TID(rpl));
- }
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * All upcalls from the T3 Core go to sched() to schedule the
- * processing on a work queue.
- */
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = sched,
- [CPL_ACT_OPEN_RPL] = sched,
- [CPL_RX_DATA] = sched,
- [CPL_TX_DMA_ACK] = sched,
- [CPL_ABORT_RPL_RSS] = sched,
- [CPL_ABORT_RPL] = sched,
- [CPL_PASS_OPEN_RPL] = sched,
- [CPL_CLOSE_LISTSRV_RPL] = sched,
- [CPL_PASS_ACCEPT_REQ] = sched,
- [CPL_PASS_ESTABLISH] = sched,
- [CPL_PEER_CLOSE] = sched,
- [CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
- [CPL_RDMA_TERMINATE] = sched,
- [CPL_RDMA_EC_STATUS] = sched,
- [CPL_SET_TCB_RPL] = set_tcb_rpl,
-};
-
-int __init iwch_cm_init(void)
-{
- skb_queue_head_init(&rxq);
-
- workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
- if (!workq)
- return -ENOMEM;
-
- return 0;
-}
-
-void __exit iwch_cm_term(void)
-{
- flush_workqueue(workq);
- destroy_workqueue(workq);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
deleted file mode 100644
index cc7fe644d260..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _IWCH_CM_H_
-#define _IWCH_CM_H_
-
-#include <linux/inet.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/kref.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/iw_cm.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-
-#define MPA_KEY_REQ "MPA ID Req Frame"
-#define MPA_KEY_REP "MPA ID Rep Frame"
-
-#define MPA_MAX_PRIVATE_DATA 256
-#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
-#define MPA_REJECT 0x20
-#define MPA_CRC 0x40
-#define MPA_MARKERS 0x80
-#define MPA_FLAGS_MASK 0xE0
-
-#define put_ep(ep) { \
- pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- WARN_ON(kref_read(&((ep)->kref)) < 1); \
- kref_put(&((ep)->kref), __free_ep); \
-}
-
-#define get_ep(ep) { \
- pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- kref_get(&((ep)->kref)); \
-}
-
-struct mpa_message {
- u8 key[16];
- u8 flags;
- u8 revision;
- __be16 private_data_size;
- u8 private_data[0];
-};
-
-struct terminate_message {
- u8 layer_etype;
- u8 ecode;
- __be16 hdrct_rsvd;
- u8 len_hdrs[0];
-};
-
-#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
-
-enum iwch_layers_types {
- LAYER_RDMAP = 0x00,
- LAYER_DDP = 0x10,
- LAYER_MPA = 0x20,
- RDMAP_LOCAL_CATA = 0x00,
- RDMAP_REMOTE_PROT = 0x01,
- RDMAP_REMOTE_OP = 0x02,
- DDP_LOCAL_CATA = 0x00,
- DDP_TAGGED_ERR = 0x01,
- DDP_UNTAGGED_ERR = 0x02,
- DDP_LLP = 0x03
-};
-
-enum iwch_rdma_ecodes {
- RDMAP_INV_STAG = 0x00,
- RDMAP_BASE_BOUNDS = 0x01,
- RDMAP_ACC_VIOL = 0x02,
- RDMAP_STAG_NOT_ASSOC = 0x03,
- RDMAP_TO_WRAP = 0x04,
- RDMAP_INV_VERS = 0x05,
- RDMAP_INV_OPCODE = 0x06,
- RDMAP_STREAM_CATA = 0x07,
- RDMAP_GLOBAL_CATA = 0x08,
- RDMAP_CANT_INV_STAG = 0x09,
- RDMAP_UNSPECIFIED = 0xff
-};
-
-enum iwch_ddp_ecodes {
- DDPT_INV_STAG = 0x00,
- DDPT_BASE_BOUNDS = 0x01,
- DDPT_STAG_NOT_ASSOC = 0x02,
- DDPT_TO_WRAP = 0x03,
- DDPT_INV_VERS = 0x04,
- DDPU_INV_QN = 0x01,
- DDPU_INV_MSN_NOBUF = 0x02,
- DDPU_INV_MSN_RANGE = 0x03,
- DDPU_INV_MO = 0x04,
- DDPU_MSG_TOOBIG = 0x05,
- DDPU_INV_VERS = 0x06
-};
-
-enum iwch_mpa_ecodes {
- MPA_CRC_ERR = 0x02,
- MPA_MARKER_ERR = 0x03
-};
-
-enum iwch_ep_state {
- IDLE = 0,
- LISTEN,
- CONNECTING,
- MPA_REQ_WAIT,
- MPA_REQ_SENT,
- MPA_REQ_RCVD,
- MPA_REP_SENT,
- FPDU_MODE,
- ABORTING,
- CLOSING,
- MORIBUND,
- DEAD,
-};
-
-enum iwch_ep_flags {
- PEER_ABORT_IN_PROGRESS = 0,
- ABORT_REQ_IN_PROGRESS = 1,
- RELEASE_RESOURCES = 2,
- CLOSE_SENT = 3,
-};
-
-struct iwch_ep_common {
- struct iw_cm_id *cm_id;
- struct iwch_qp *qp;
- struct t3cdev *tdev;
- enum iwch_ep_state state;
- struct kref kref;
- spinlock_t lock;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
- wait_queue_head_t waitq;
- int rpl_done;
- int rpl_err;
- unsigned long flags;
-};
-
-struct iwch_listen_ep {
- struct iwch_ep_common com;
- unsigned int stid;
- int backlog;
-};
-
-struct iwch_ep {
- struct iwch_ep_common com;
- struct iwch_ep *parent_ep;
- struct timer_list timer;
- unsigned int atid;
- u32 hwtid;
- u32 snd_seq;
- u32 rcv_seq;
- struct l2t_entry *l2t;
- struct dst_entry *dst;
- struct sk_buff *mpa_skb;
- struct iwch_mpa_attributes mpa_attr;
- unsigned int mpa_pkt_len;
- u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
- u8 tos;
- u16 emss;
- u16 plen;
- u32 ird;
- u32 ord;
-};
-
-static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
-/* CM prototypes */
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
-int iwch_destroy_listen(struct iw_cm_id *cm_id);
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
-int iwch_quiesce_tid(struct iwch_ep *ep);
-int iwch_resume_tid(struct iwch_ep *ep);
-void __free_ep(struct kref *kref);
-void iwch_rearp(struct iwch_ep *ep);
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t);
-
-int __init iwch_cm_init(void);
-void __exit iwch_cm_term(void);
-extern int peer2peer;
-
-#endif /* _IWCH_CM_H_ */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
deleted file mode 100644
index a098c0140580..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "iwch_provider.h"
-#include "iwch.h"
-
-static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct iwch_qp *qhp, struct ib_wc *wc)
-{
- struct t3_wq *wq = qhp ? &qhp->wq : NULL;
- struct t3_cqe cqe;
- u32 credit = 0;
- u8 cqe_flushed;
- u64 cookie;
- int ret = 1;
-
- ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
- &credit);
- if (t3a_device(chp->rhp) && credit) {
- pr_debug("%s updating %d cq credits on id %d\n", __func__,
- credit, chp->cq.cqid);
- cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
- }
-
- if (ret) {
- ret = -EAGAIN;
- goto out;
- }
- ret = 1;
-
- wc->wr_id = cookie;
- wc->qp = qhp ? &qhp->ibqp : NULL;
- wc->vendor_err = CQE_STATUS(cqe);
- wc->wc_flags = 0;
-
- pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
- __func__,
- CQE_QPID(cqe), CQE_TYPE(cqe),
- CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
- CQE_WRID_LOW(cqe), (unsigned long long)cookie);
-
- if (CQE_TYPE(cqe) == 0) {
- if (!CQE_STATUS(cqe))
- wc->byte_len = CQE_LEN(cqe);
- else
- wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
- CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
- wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- }
- } else {
- switch (CQE_OPCODE(cqe)) {
- case T3_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case T3_READ_REQ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = CQE_LEN(cqe);
- break;
- case T3_SEND:
- case T3_SEND_WITH_SE:
- case T3_SEND_WITH_INV:
- case T3_SEND_WITH_SE_INV:
- wc->opcode = IB_WC_SEND;
- break;
- case T3_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case T3_FAST_REGISTER:
- wc->opcode = IB_WC_REG_MR;
- break;
- default:
- pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
- CQE_OPCODE(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- goto out;
- }
- }
-
- if (cqe_flushed)
- wc->status = IB_WC_WR_FLUSH_ERR;
- else {
-
- switch (CQE_STATUS(cqe)) {
- case TPT_ERR_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case TPT_ERR_STAG:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_PDID:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_WRAP:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- case TPT_ERR_BOUND:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- case TPT_ERR_OPCODE:
- wc->status = IB_WC_FATAL_ERR;
- break;
- case TPT_ERR_SWFLUSH:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- default:
- pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
- CQE_STATUS(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- }
- }
-out:
- return ret;
-}
-
-/*
- * Get one cq entry from cxio and map it to openib.
- *
- * Returns:
- * 0 EMPTY;
- * 1 cqe returned
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct ib_wc *wc)
-{
- struct iwch_qp *qhp;
- struct t3_cqe *rd_cqe;
- int ret;
-
- rd_cqe = cxio_next_cqe(&chp->cq);
-
- if (!rd_cqe)
- return 0;
-
- qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
- if (qhp) {
- spin_lock(&qhp->lock);
- ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
- spin_unlock(&qhp->lock);
- } else {
- ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
- }
- return ret;
-}
-
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- unsigned long flags;
- int npolled;
- int err = 0;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
-
- spin_lock_irqsave(&chp->lock, flags);
- for (npolled = 0; npolled < num_entries; ++npolled) {
-
- /*
- * Because T3 can post CQEs that are _not_ associated
- * with a WR, we might have to poll again after removing
- * one of these.
- */
- do {
- err = iwch_poll_cq_one(rhp, chp, wc + npolled);
- } while (err == -EAGAIN);
- if (err <= 0)
- break;
- }
- spin_unlock_irqrestore(&chp->lock, flags);
-
- if (err < 0)
- return err;
- else {
- return npolled;
- }
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
deleted file mode 100644
index 9d356c1301c7..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/gfp.h>
-#include <linux/mman.h>
-#include <net/sock.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_wr.h"
-
-static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
- struct respQ_msg_t *rsp_msg,
- enum ib_event_type ib_event,
- int send_term)
-{
- struct ib_event event;
- struct iwch_qp_attributes attrs;
- struct iwch_qp *qhp;
- unsigned long flag;
-
- xa_lock(&rnicp->qps);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
-
- if (!qhp) {
- pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
- __func__, CQE_STATUS(rsp_msg->cqe),
- CQE_QPID(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
- (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
- pr_debug("%s AE received after RTS - qp state %d qpid 0x%x status 0x%x\n",
- __func__,
- qhp->attr.state, qhp->wq.qpid,
- CQE_STATUS(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- pr_err("%s - AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- __func__,
- CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
- CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
-
- atomic_inc(&qhp->refcnt);
- xa_unlock(&rnicp->qps);
-
- if (qhp->attr.state == IWCH_QP_STATE_RTS) {
- attrs.next_state = IWCH_QP_STATE_TERMINATE;
- iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (send_term)
- iwch_post_terminate(qhp, rsp_msg);
- }
-
- event.event = ib_event;
- event.device = chp->ibcq.device;
- if (ib_event == IB_EVENT_CQ_ERR)
- event.element.cq = &chp->ibcq;
- else
- event.element.qp = &qhp->ibqp;
-
- if (qhp->ibqp.event_handler)
- (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
-
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-}
-
-void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
-{
- struct iwch_dev *rnicp;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- struct iwch_cq *chp;
- struct iwch_qp *qhp;
- u32 cqid = RSPQ_CQID(rsp_msg);
- unsigned long flag;
-
- rnicp = (struct iwch_dev *) rdev_p->ulp;
- xa_lock(&rnicp->qps);
- chp = get_chp(rnicp, cqid);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
- if (!chp || !qhp) {
- pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- cqid, CQE_QPID(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
- CQE_WRID_LOW(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- goto out;
- }
- iwch_qp_add_ref(&qhp->ibqp);
- atomic_inc(&chp->refcnt);
- xa_unlock(&rnicp->qps);
-
- /*
- * 1) completion of our sending a TERMINATE.
- * 2) incoming TERMINATE message.
- */
- if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
- (CQE_STATUS(rsp_msg->cqe) == 0)) {
- if (SQ_TYPE(rsp_msg->cqe)) {
- pr_debug("%s QPID 0x%x ep %p disconnecting\n",
- __func__, qhp->wq.qpid, qhp->ep);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- } else {
- pr_debug("%s post REQ_ERR AE QPID 0x%x\n", __func__,
- qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg,
- IB_EVENT_QP_REQ_ERR, 0);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- }
- goto done;
- }
-
- /* Bad incoming Read request */
- if (SQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- /* Bad incoming write */
- if (RQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- switch (CQE_STATUS(rsp_msg->cqe)) {
-
- /* Completion Events */
- case TPT_ERR_SUCCESS:
-
- /*
- * Confirm the destination entry if this is a RECV completion.
- */
- if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
- dst_confirm(qhp->ep->dst);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- break;
-
- case TPT_ERR_STAG:
- case TPT_ERR_PDID:
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- case TPT_ERR_WRAP:
- case TPT_ERR_BOUND:
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
- break;
-
- /* Device Fatal Errors */
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
- break;
-
- /* QP Fatal Errors */
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_PBL_ADDR_BOUND:
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_OPCODE:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_RQE_ADDR_BOUND:
- case TPT_ERR_IRD_OVERFLOW:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
-
- default:
- pr_err("Unknown T3 status 0x%x QPID 0x%x\n",
- CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
- }
-done:
- if (atomic_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
- iwch_qp_rem_ref(&qhp->ibqp);
-out:
- dev_kfree_skb_irq(skb);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
deleted file mode 100644
index ce0f2741821d..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-
-static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
-{
- u32 mmid;
-
- mhp->attr.state = 1;
- mhp->attr.stag = stag;
- mmid = stag >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
- return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
-}
-
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift)
-{
- u32 stag;
- int ret;
-
- if (cxio_register_phys_mem(&rhp->rdev,
- &stag, mhp->attr.pdid,
- mhp->attr.perms,
- mhp->attr.zbva,
- mhp->attr.va_fbo,
- mhp->attr.len,
- shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr))
- return -ENOMEM;
-
- ret = iwch_finish_mem_reg(mhp, stag);
- if (ret)
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- return ret;
-}
-
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
-{
- mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
- npages << 3);
-
- if (!mhp->attr.pbl_addr)
- return -ENOMEM;
-
- mhp->attr.pbl_size = npages;
-
- return 0;
-}
-
-void iwch_free_pbl(struct iwch_mr *mhp)
-{
- cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
- mhp->attr.pbl_size << 3);
-}
-
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
-{
- return cxio_write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (offset << 3), npages);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
deleted file mode 100644
index dcf02ec02810..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ /dev/null
@@ -1,1321 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/sched/mm.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/rtnetlink.h>
-#include <linux/inetdevice.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/uverbs_ioctl.h>
-
-#include "cxio_hal.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-#include <rdma/cxgb3-abi.h>
-#include "common.h"
-
-static void iwch_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct iwch_dev *rhp = to_iwch_dev(context->device);
- struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
- struct iwch_mm_entry *mm, *tmp;
-
- pr_debug("%s context %p\n", __func__, context);
- list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
- kfree(mm);
- cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
-}
-
-static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ucontext->device;
- struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
- struct iwch_dev *rhp = to_iwch_dev(ibdev);
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- cxio_init_ucontext(&rhp->rdev, &context->uctx);
- INIT_LIST_HEAD(&context->mmaps);
- spin_lock_init(&context->mmap_lock);
- return 0;
-}
-
-static void iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
-{
- struct iwch_cq *chp;
-
- pr_debug("%s ib_cq %p\n", __func__, ib_cq);
- chp = to_iwch_cq(ib_cq);
-
- xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
- atomic_dec(&chp->refcnt);
- wait_event(chp->wait, !atomic_read(&chp->refcnt));
-
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
-}
-
-static int iwch_create_cq(struct ib_cq *ibcq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ibcq->device;
- int entries = attr->cqe;
- struct iwch_dev *rhp = to_iwch_dev(ibcq->device);
- struct iwch_cq *chp = to_iwch_cq(ibcq);
- struct iwch_create_cq_resp uresp;
- struct iwch_create_cq_req ureq;
- static int warned;
- size_t resplen;
-
- pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
- if (attr->flags)
- return -EINVAL;
-
- if (udata) {
- if (!t3a_device(rhp)) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return -EFAULT;
-
- chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
- }
- }
-
- if (t3a_device(rhp)) {
-
- /*
- * T3A: Add some fluff to handle extra CQEs inserted
- * for various errors.
- * Additional CQE possibilities:
- * TERMINATE,
- * incoming RDMA WRITE Failures
- * incoming RDMA READ REQUEST FAILUREs
- * NOTE: We cannot ensure the CQ won't overflow.
- */
- entries += 16;
- }
- entries = roundup_pow_of_two(entries);
- chp->cq.size_log2 = ilog2(entries);
-
- if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata))
- return -ENOMEM;
-
- chp->rhp = rhp;
- chp->ibcq.cqe = 1 << chp->cq.size_log2;
- spin_lock_init(&chp->lock);
- spin_lock_init(&chp->comp_handler_lock);
- atomic_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
- if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) {
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
- return -ENOMEM;
- }
-
- if (udata) {
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct iwch_ucontext, ibucontext);
-
- mm = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- iwch_destroy_cq(&chp->ibcq, udata);
- return -ENOMEM;
- }
- uresp.cqid = chp->cq.cqid;
- uresp.size_log2 = chp->cq.size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
- if (udata->outlen < sizeof(uresp)) {
- if (!warned++)
- pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
- mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
- sizeof(struct t3_cqe));
- resplen = sizeof(struct iwch_create_cq_resp_v0);
- } else {
- mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
- sizeof(struct t3_cqe));
- uresp.memsize = mm->len;
- uresp.reserved = 0;
- resplen = sizeof(uresp);
- }
- if (ib_copy_to_udata(udata, &uresp, resplen)) {
- kfree(mm);
- iwch_destroy_cq(&chp->ibcq, udata);
- return -EFAULT;
- }
- insert_mmap(ucontext, mm);
- }
- pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr %pad\n",
- chp->cq.cqid, chp, (1 << chp->cq.size_log2),
- &chp->cq.dma_addr);
- return 0;
-}
-
-static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- enum t3_cq_opcode cq_op;
- int err;
- unsigned long flag;
- u32 rptr;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
- if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- cq_op = CQ_ARM_SE;
- else
- cq_op = CQ_ARM_AN;
- if (chp->user_rptr_addr) {
- if (get_user(rptr, chp->user_rptr_addr))
- return -EFAULT;
- spin_lock_irqsave(&chp->lock, flag);
- chp->cq.rptr = rptr;
- } else
- spin_lock_irqsave(&chp->lock, flag);
- pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
- err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
- spin_unlock_irqrestore(&chp->lock, flag);
- if (err < 0)
- pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
- if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- err = 0;
- return err;
-}
-
-static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- int len = vma->vm_end - vma->vm_start;
- u32 key = vma->vm_pgoff << PAGE_SHIFT;
- struct cxio_rdev *rdev_p;
- int ret = 0;
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext;
- u64 addr;
-
- pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
- key, len);
-
- if (vma->vm_start & (PAGE_SIZE-1)) {
- return -EINVAL;
- }
-
- rdev_p = &(to_iwch_dev(context->device)->rdev);
- ucontext = to_iwch_ucontext(context);
-
- mm = remove_mmap(ucontext, key, len);
- if (!mm)
- return -EINVAL;
- addr = mm->addr;
- kfree(mm);
-
- if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
- (addr < (rdev_p->rnic_info.udbell_physbase +
- rdev_p->rnic_info.udbell_len))) {
-
- /*
- * Map T3 DB register.
- */
- if (vma->vm_flags & VM_READ) {
- return -EPERM;
- }
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_flags &= ~VM_MAYREAD;
- ret = io_remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
- cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
-}
-
-static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_pd *php = to_iwch_pd(pd);
- struct ib_device *ibdev = pd->device;
- u32 pdid;
- struct iwch_dev *rhp;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- rhp = (struct iwch_dev *) ibdev;
- pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
- if (!pdid)
- return -EINVAL;
-
- php->pdid = pdid;
- php->rhp = rhp;
- if (udata) {
- struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
-
- if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- iwch_deallocate_pd(&php->ibpd, udata);
- return -EFAULT;
- }
- }
- pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
- return 0;
-}
-
-static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_mr *mhp;
- u32 mmid;
-
- pr_debug("%s ib_mr %p\n", __func__, ib_mr);
-
- mhp = to_iwch_mr(ib_mr);
- kfree(mhp->pages);
- rhp = mhp->rhp;
- mmid = mhp->attr.stag >> 8;
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- iwch_free_pbl(mhp);
- xa_erase_irq(&rhp->mrs, mmid);
- if (mhp->kva)
- kfree((void *) (unsigned long) mhp->kva);
- ib_umem_release(mhp->umem);
- pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
-{
- const u64 total_size = 0xffffffff;
- const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct iwch_pd *php = to_iwch_pd(pd);
- struct iwch_dev *rhp = php->rhp;
- struct iwch_mr *mhp;
- __be64 *page_list;
- int shift = 26, npages, ret, i;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- /*
- * T3 only supports 32 bits of size.
- */
- if (sizeof(phys_addr_t) > 4) {
- pr_warn_once("Cannot support dma_mrs on this platform\n");
- return ERR_PTR(-ENOTSUPP);
- }
-
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- npages = (total_size + (1ULL << shift) - 1) >> shift;
- if (!npages) {
- ret = -EINVAL;
- goto err;
- }
-
- page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
- if (!page_list) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < npages; i++)
- page_list[i] = cpu_to_be64((u64)i << shift);
-
- pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
- __func__, mask, shift, total_size, npages);
-
- ret = iwch_alloc_pbl(mhp, npages);
- if (ret) {
- kfree(page_list);
- goto err_pbl;
- }
-
- ret = iwch_write_pbl(mhp, page_list, npages, 0);
- kfree(page_list);
- if (ret)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
-
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = 0;
- mhp->attr.page_size = shift - 12;
-
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- ret = iwch_register_mem(rhp, php, mhp, shift);
- if (ret)
- goto err_pbl;
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- kfree(mhp);
- return ERR_PTR(ret);
-}
-
-static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- __be64 *pages;
- int shift, n, i;
- int err = 0;
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- struct iwch_reg_user_mr_resp uresp;
- struct sg_dma_page_iter sg_iter;
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
- }
-
- shift = PAGE_SHIFT;
-
- n = ib_umem_num_pages(mhp->umem);
-
- err = iwch_alloc_pbl(mhp, n);
- if (err)
- goto err;
-
- pages = (__be64 *) __get_free_page(GFP_KERNEL);
- if (!pages) {
- err = -ENOMEM;
- goto err_pbl;
- }
-
- i = n = 0;
-
- for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
- pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
- if (i == PAGE_SIZE / sizeof(*pages)) {
- err = iwch_write_pbl(mhp, pages, i, n);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
- }
-
- if (i)
- err = iwch_write_pbl(mhp, pages, i, n);
-
-pbl_done:
- free_page((unsigned long) pages);
- if (err)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = virt;
- mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
-
- err = iwch_register_mem(rhp, php, mhp, shift);
- if (err)
- goto err_pbl;
-
- if (udata && !t3a_device(rhp)) {
- uresp.pbl_addr = (mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3;
- pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
- uresp.pbl_addr);
-
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- iwch_dereg_mr(&mhp->ibmr, udata);
- err = -EFAULT;
- goto err;
- }
- }
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- ib_umem_release(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
-}
-
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mw *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
- ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
- mhp->rhp = rhp;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_MW;
- mhp->attr.stag = stag;
- mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
- if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmw);
-}
-
-static int iwch_dealloc_mw(struct ib_mw *mw)
-{
- struct iwch_dev *rhp;
- struct iwch_mw *mhp;
- u32 mmid;
-
- mhp = to_iwch_mw(mw);
- rhp = mhp->rhp;
- mmid = (mw->rkey) >> 8;
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- xa_erase_irq(&rhp->mrs, mmid);
- pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret = -ENOMEM;
-
- if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > T3_MAX_FASTREG_DEPTH)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- goto err;
-
- mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mhp->pages)
- goto pl_err;
-
- mhp->rhp = rhp;
- ret = iwch_alloc_pbl(mhp, max_num_sg);
- if (ret)
- goto err1;
- mhp->attr.pbl_size = max_num_sg;
- ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret)
- goto err2;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_NON_SHARED_MR;
- mhp->attr.stag = stag;
- mhp->attr.state = 1;
- mmid = (stag) >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- ret = xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL);
- if (ret)
- goto err3;
-
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmr);
-err3:
- cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-err2:
- iwch_free_pbl(mhp);
-err1:
- kfree(mhp->pages);
-pl_err:
- kfree(mhp);
-err:
- return ERR_PTR(ret);
-}
-
-static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- if (unlikely(mhp->npages == mhp->attr.pbl_size))
- return -ENOMEM;
-
- mhp->pages[mhp->npages++] = addr;
-
- return 0;
-}
-
-static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- mhp->npages = 0;
-
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
-}
-
-static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_qp_attributes attrs;
- struct iwch_ucontext *ucontext;
-
- qhp = to_iwch_qp(ib_qp);
- rhp = qhp->rhp;
-
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
- wait_event(qhp->wait, !qhp->ep);
-
- xa_erase_irq(&rhp->qps, qhp->wq.qpid);
-
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
-
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
- pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
- ib_qp, qhp->wq.qpid, qhp);
- kfree(qhp);
- return 0;
-}
-
-static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_pd *php;
- struct iwch_cq *schp;
- struct iwch_cq *rchp;
- struct iwch_create_qp_resp uresp;
- int wqsize, sqsize, rqsize;
- struct iwch_ucontext *ucontext;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
- if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
- rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
- if (!schp || !rchp)
- return ERR_PTR(-EINVAL);
-
- /* The RQT size must be # of entries + 1 rounded up to a power of two */
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
- if (rqsize == attrs->cap.max_recv_wr)
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
-
- /* T3 doesn't support RQT depth < 16 */
- if (rqsize < 16)
- rqsize = 16;
-
- if (rqsize > T3_MAX_RQ_SIZE)
- return ERR_PTR(-EINVAL);
-
- if (attrs->cap.max_inline_data > T3_MAX_INLINE)
- return ERR_PTR(-EINVAL);
-
- /*
- * NOTE: The SQ and total WQ sizes don't need to be
- * a power of two. However, all the code assumes
- * they are. EG: Q_FREECNT() and friends.
- */
- sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
- wqsize = roundup_pow_of_two(rqsize + sqsize);
-
- /*
- * Kernel users need more wq space for fastreg WRs which can take
- * 2 WR fragments.
- */
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
- wqsize = roundup_pow_of_two(rqsize +
- roundup_pow_of_two(attrs->cap.max_send_wr * 2));
- pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
- wqsize, sqsize, rqsize);
- qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
- if (!qhp)
- return ERR_PTR(-ENOMEM);
- qhp->wq.size_log2 = ilog2(wqsize);
- qhp->wq.rq_size_log2 = ilog2(rqsize);
- qhp->wq.sq_size_log2 = ilog2(sqsize);
- if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- attrs->cap.max_recv_wr = rqsize - 1;
- attrs->cap.max_send_wr = sqsize;
- attrs->cap.max_inline_data = T3_MAX_INLINE;
-
- qhp->rhp = rhp;
- qhp->attr.pd = php->pdid;
- qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
- qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
- qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
- qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
- qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.next_state = IWCH_QP_STATE_IDLE;
-
- /*
- * XXX - These don't get passed in from the openib user
- * at create time. The CM sets them via a QP modify.
- * Need to fix... I think the CM should
- */
- qhp->attr.enable_rdma_read = 1;
- qhp->attr.enable_rdma_write = 1;
- qhp->attr.enable_bind = 1;
- qhp->attr.max_ord = 1;
- qhp->attr.max_ird = 1;
-
- spin_lock_init(&qhp->lock);
- init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
-
- if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- if (udata) {
-
- struct iwch_mm_entry *mm1, *mm2;
-
- mm1 = kmalloc(sizeof(*mm1), GFP_KERNEL);
- if (!mm1) {
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
- if (!mm2) {
- kfree(mm1);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- uresp.qpid = qhp->wq.qpid;
- uresp.size_log2 = qhp->wq.size_log2;
- uresp.sq_size_log2 = qhp->wq.sq_size_log2;
- uresp.rq_size_log2 = qhp->wq.rq_size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- uresp.db_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- kfree(mm1);
- kfree(mm2);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-EFAULT);
- }
- mm1->key = uresp.key;
- mm1->addr = virt_to_phys(qhp->wq.queue);
- mm1->len = PAGE_ALIGN(wqsize * sizeof(union t3_wr));
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.db_key;
- mm2->addr = qhp->wq.udb & PAGE_MASK;
- mm2->len = PAGE_SIZE;
- insert_mmap(ucontext, mm2);
- }
- qhp->ibqp.qp_num = qhp->wq.qpid;
- pr_debug(
- "%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr %pad size %d rq_addr 0x%x\n",
- __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
- qhp->wq.qpid, qhp, &qhp->wq.dma_addr, 1 << qhp->wq.size_log2,
- qhp->wq.rq_addr);
- return &qhp->ibqp;
-}
-
-static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- enum iwch_qp_attr_mask mask = 0;
- struct iwch_qp_attributes attrs = {};
-
- pr_debug("%s ib_qp %p\n", __func__, ibqp);
-
- /* iwarp does not support the RTR state */
- if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
- attr_mask &= ~IB_QP_STATE;
-
- /* Make sure we still have something left to do */
- if (!attr_mask)
- return 0;
-
- qhp = to_iwch_qp(ibqp);
- rhp = qhp->rhp;
-
- attrs.next_state = iwch_convert_state(attr->qp_state);
- attrs.enable_rdma_read = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ) ? 1 : 0;
- attrs.enable_rdma_write = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
- attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
-
-
- mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
- mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
- (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
-
- return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_iwch_qp(qp)->refcnt));
-}
-
-void iwch_qp_rem_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
- wake_up(&(to_iwch_qp(qp)->wait));
-}
-
-static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
-{
- pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
- return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
-}
-
-
-static int iwch_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 * pkey)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- *pkey = 0;
- return 0;
-}
-
-static int iwch_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
- dev = to_iwch_dev(ibdev);
- BUG_ON(port == 0 || port > 2);
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
- return 0;
-}
-
-static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
-{
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
- char *cp, *next;
- unsigned fw_maj, fw_min, fw_mic;
-
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
-
- next = info.fw_version + 1;
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_maj);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_min);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_mic);
-
- return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
- (fw_mic & 0xffff);
-}
-
-static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
-
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- dev = to_iwch_dev(ibdev);
- memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- props->hw_ver = dev->rdev.t3cdev_p->type;
- props->fw_ver = fw_vers_string_to_u64(dev);
- props->device_cap_flags = dev->device_cap_flags;
- props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
- props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
- props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
- props->max_mr_size = dev->attr.max_mr_size;
- props->max_qp = dev->attr.max_qps;
- props->max_qp_wr = dev->attr.max_wrs;
- props->max_send_sge = dev->attr.max_sge_per_wr;
- props->max_recv_sge = dev->attr.max_sge_per_wr;
- props->max_sge_rd = 1;
- props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_cq = dev->attr.max_cqs;
- props->max_cqe = dev->attr.max_cqes_per_cq;
- props->max_mr = dev->attr.max_mem_regs;
- props->max_pd = dev->attr.max_pds;
- props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
-
- return 0;
-}
-
-static int iwch_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_SNMP_TUNNEL_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
- props->max_msg_sz = -1;
-
- return 0;
-}
-
-static ssize_t hw_rev_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-static ssize_t hca_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.driver);
-}
-static DEVICE_ATTR_RO(hca_type);
-
-static ssize_t board_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
- iwch_dev->rdev.rnic_info.pdev->device);
-}
-static DEVICE_ATTR_RO(board_id);
-
-enum counters {
- IPINRECEIVES,
- IPINHDRERRORS,
- IPINADDRERRORS,
- IPINUNKNOWNPROTOS,
- IPINDISCARDS,
- IPINDELIVERS,
- IPOUTREQUESTS,
- IPOUTDISCARDS,
- IPOUTNOROUTES,
- IPREASMTIMEOUT,
- IPREASMREQDS,
- IPREASMOKS,
- IPREASMFAILS,
- TCPACTIVEOPENS,
- TCPPASSIVEOPENS,
- TCPATTEMPTFAILS,
- TCPESTABRESETS,
- TCPCURRESTAB,
- TCPINSEGS,
- TCPOUTSEGS,
- TCPRETRANSSEGS,
- TCPINERRS,
- TCPOUTRSTS,
- TCPRTOMIN,
- TCPRTOMAX,
- NR_COUNTERS
-};
-
-static const char * const names[] = {
- [IPINRECEIVES] = "ipInReceives",
- [IPINHDRERRORS] = "ipInHdrErrors",
- [IPINADDRERRORS] = "ipInAddrErrors",
- [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
- [IPINDISCARDS] = "ipInDiscards",
- [IPINDELIVERS] = "ipInDelivers",
- [IPOUTREQUESTS] = "ipOutRequests",
- [IPOUTDISCARDS] = "ipOutDiscards",
- [IPOUTNOROUTES] = "ipOutNoRoutes",
- [IPREASMTIMEOUT] = "ipReasmTimeout",
- [IPREASMREQDS] = "ipReasmReqds",
- [IPREASMOKS] = "ipReasmOKs",
- [IPREASMFAILS] = "ipReasmFails",
- [TCPACTIVEOPENS] = "tcpActiveOpens",
- [TCPPASSIVEOPENS] = "tcpPassiveOpens",
- [TCPATTEMPTFAILS] = "tcpAttemptFails",
- [TCPESTABRESETS] = "tcpEstabResets",
- [TCPCURRESTAB] = "tcpCurrEstab",
- [TCPINSEGS] = "tcpInSegs",
- [TCPOUTSEGS] = "tcpOutSegs",
- [TCPRETRANSSEGS] = "tcpRetransSegs",
- [TCPINERRS] = "tcpInErrs",
- [TCPOUTRSTS] = "tcpOutRsts",
- [TCPRTOMIN] = "tcpRtoMin",
- [TCPRTOMAX] = "tcpRtoMax",
-};
-
-static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
- u8 port_num)
-{
- BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
-
- /* Our driver only supports device level stats */
- if (port_num != 0)
- return NULL;
-
- return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
-}
-
-static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port, int index)
-{
- struct iwch_dev *dev;
- struct tp_mib_stats m;
- int ret;
-
- if (port != 0 || !stats)
- return -ENOSYS;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- dev = to_iwch_dev(ibdev);
- ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
- if (ret)
- return -ENOSYS;
-
- stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
- stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
- stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
- stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
- stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
- stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
- stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
- stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
- stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
- stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
- stats->value[IPREASMREQDS] = m.ipReasmReqds;
- stats->value[IPREASMOKS] = m.ipReasmOKs;
- stats->value[IPREASMFAILS] = m.ipReasmFails;
- stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
- stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
- stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
- stats->value[TCPESTABRESETS] = m.tcpEstabResets;
- stats->value[TCPCURRESTAB] = m.tcpOutRsts;
- stats->value[TCPINSEGS] = m.tcpCurrEstab;
- stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
- stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
- stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
- stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
- stats->value[TCPRTOMIN] = m.tcpRtoMin;
- stats->value[TCPRTOMAX] = m.tcpRtoMax;
-
- return stats->num_counters;
-}
-
-static struct attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev.attr,
- &dev_attr_hca_type.attr,
- &dev_attr_board_id.attr,
- NULL
-};
-
-static const struct attribute_group iwch_attr_group = {
- .attrs = iwch_class_attributes,
-};
-
-static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
-{
- struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
-}
-
-static const struct ib_device_ops iwch_dev_ops = {
- .owner = THIS_MODULE,
- .driver_id = RDMA_DRIVER_CXGB3,
- .uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION,
- .uverbs_no_driver_id_binding = 1,
-
- .alloc_hw_stats = iwch_alloc_stats,
- .alloc_mr = iwch_alloc_mr,
- .alloc_mw = iwch_alloc_mw,
- .alloc_pd = iwch_allocate_pd,
- .alloc_ucontext = iwch_alloc_ucontext,
- .create_cq = iwch_create_cq,
- .create_qp = iwch_create_qp,
- .dealloc_mw = iwch_dealloc_mw,
- .dealloc_pd = iwch_deallocate_pd,
- .dealloc_ucontext = iwch_dealloc_ucontext,
- .dereg_mr = iwch_dereg_mr,
- .destroy_cq = iwch_destroy_cq,
- .destroy_qp = iwch_destroy_qp,
- .get_dev_fw_str = get_dev_fw_ver_str,
- .get_dma_mr = iwch_get_dma_mr,
- .get_hw_stats = iwch_get_mib,
- .get_port_immutable = iwch_port_immutable,
- .iw_accept = iwch_accept_cr,
- .iw_add_ref = iwch_qp_add_ref,
- .iw_connect = iwch_connect,
- .iw_create_listen = iwch_create_listen,
- .iw_destroy_listen = iwch_destroy_listen,
- .iw_get_qp = iwch_get_qp,
- .iw_reject = iwch_reject_cr,
- .iw_rem_ref = iwch_qp_rem_ref,
- .map_mr_sg = iwch_map_mr_sg,
- .mmap = iwch_mmap,
- .modify_qp = iwch_ib_modify_qp,
- .poll_cq = iwch_poll_cq,
- .post_recv = iwch_post_receive,
- .post_send = iwch_post_send,
- .query_device = iwch_query_device,
- .query_gid = iwch_query_gid,
- .query_pkey = iwch_query_pkey,
- .query_port = iwch_query_port,
- .reg_user_mr = iwch_reg_user_mr,
- .req_notify_cq = iwch_arm_cq,
- INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
- INIT_RDMA_OBJ_SIZE(ib_cq, iwch_cq, ibcq),
- INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
-};
-
-static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
-{
- int ret;
- int i;
-
- for (i = 0; i < rdev->port_info.nports; i++) {
- ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
- i + 1);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-int iwch_register_device(struct iwch_dev *dev)
-{
- int err;
-
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
-
- /* cxgb3 supports STag 0. */
- dev->ibdev.local_dma_lkey = 0;
-
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
- dev->ibdev.node_type = RDMA_NODE_RNIC;
- BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
- memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
- dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
- dev->ibdev.num_comp_vectors = 1;
- dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
-
- memcpy(dev->ibdev.iw_ifname, dev->rdev.t3cdev_p->lldev->name,
- sizeof(dev->ibdev.iw_ifname));
-
- rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
- ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
- err = set_netdevs(&dev->ibdev, &dev->rdev);
- if (err)
- return err;
-
- return ib_register_device(&dev->ibdev, "cxgb3_%d");
-}
-
-void iwch_unregister_device(struct iwch_dev *dev)
-{
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- ib_unregister_device(&dev->ibdev);
- return;
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
deleted file mode 100644
index 8adbe9658935..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_PROVIDER_H__
-#define __IWCH_PROVIDER_H__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <rdma/ib_verbs.h>
-#include <asm/types.h>
-#include "t3cdev.h"
-#include "iwch.h"
-#include "cxio_wr.h"
-#include "cxio_hal.h"
-
-struct iwch_pd {
- struct ib_pd ibpd;
- u32 pdid;
- struct iwch_dev *rhp;
-};
-
-static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct iwch_pd, ibpd);
-}
-
-struct tpt_attributes {
- u32 stag;
- u32 state:1;
- u32 type:2;
- u32 rsvd:1;
- enum tpt_mem_perm perms;
- u32 remote_invaliate_disable:1;
- u32 zbva:1;
- u32 mw_bind_enable:1;
- u32 page_size:5;
-
- u32 pdid;
- u32 qpid;
- u32 pbl_addr;
- u32 len;
- u64 va_fbo;
- u32 pbl_size;
-};
-
-struct iwch_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
- u64 *pages;
- u32 npages;
-};
-
-typedef struct iwch_mw iwch_mw_handle;
-
-static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct iwch_mr, ibmr);
-}
-
-struct iwch_mw {
- struct ib_mw ibmw;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
-};
-
-static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct iwch_mw, ibmw);
-}
-
-struct iwch_cq {
- struct ib_cq ibcq;
- struct iwch_dev *rhp;
- struct t3_cq cq;
- spinlock_t lock;
- spinlock_t comp_handler_lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- u32 __user *user_rptr_addr;
-};
-
-static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct iwch_cq, ibcq);
-}
-
-enum IWCH_QP_FLAGS {
- QP_QUIESCED = 0x01
-};
-
-struct iwch_mpa_attributes {
- u8 initiator;
- u8 recv_marker_enabled;
- u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
- u8 crc_enabled;
- u8 version; /* 0 or 1 */
-};
-
-struct iwch_qp_attributes {
- u32 scq;
- u32 rcq;
- u32 sq_num_entries;
- u32 rq_num_entries;
- u32 sq_max_sges;
- u32 sq_max_sges_rdma_write;
- u32 rq_max_sges;
- u32 state;
- u8 enable_rdma_read;
- u8 enable_rdma_write; /* enable inbound Read Resp. */
- u8 enable_bind;
- u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
- /*
- * Next QP state. If specify the current state, only the
- * QP attributes will be modified.
- */
- u32 max_ord;
- u32 max_ird;
- u32 pd; /* IN */
- u32 next_state;
- char terminate_buffer[52];
- u32 terminate_msg_len;
- u8 is_terminate_local;
- struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
- struct iwch_ep *llp_stream_handle;
- char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
- u32 stream_msg_buf_len; /* Only on Idle -> RTS */
-};
-
-struct iwch_qp {
- struct ib_qp ibqp;
- struct iwch_dev *rhp;
- struct iwch_ep *ep;
- struct iwch_qp_attributes attr;
- struct t3_wq wq;
- spinlock_t lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- enum IWCH_QP_FLAGS flags;
-};
-
-static inline int qp_quiesced(struct iwch_qp *qhp)
-{
- return qhp->flags & QP_QUIESCED;
-}
-
-static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct iwch_qp, ibqp);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp);
-void iwch_qp_rem_ref(struct ib_qp *qp);
-
-struct iwch_ucontext {
- struct ib_ucontext ibucontext;
- struct cxio_ucontext uctx;
- u32 key;
- spinlock_t mmap_lock;
- struct list_head mmaps;
-};
-
-static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
-{
- return container_of(c, struct iwch_ucontext, ibucontext);
-}
-
-struct iwch_mm_entry {
- struct list_head entry;
- u64 addr;
- u32 key;
- unsigned len;
-};
-
-static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
- u32 key, unsigned len)
-{
- struct list_head *pos, *nxt;
- struct iwch_mm_entry *mm;
-
- spin_lock(&ucontext->mmap_lock);
- list_for_each_safe(pos, nxt, &ucontext->mmaps) {
-
- mm = list_entry(pos, struct iwch_mm_entry, entry);
- if (mm->key == key && mm->len == len) {
- list_del_init(&mm->entry);
- spin_unlock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, key,
- (unsigned long long)mm->addr, mm->len);
- return mm;
- }
- }
- spin_unlock(&ucontext->mmap_lock);
- return NULL;
-}
-
-static inline void insert_mmap(struct iwch_ucontext *ucontext,
- struct iwch_mm_entry *mm)
-{
- spin_lock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, mm->key, (unsigned long long)mm->addr, mm->len);
- list_add_tail(&mm->entry, &ucontext->mmaps);
- spin_unlock(&ucontext->mmap_lock);
-}
-
-enum iwch_qp_attr_mask {
- IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
- IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
- IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
- IWCH_QP_ATTR_MAX_ORD = 1 << 11,
- IWCH_QP_ATTR_MAX_IRD = 1 << 12,
- IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
- IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
- IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
- IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_MAX_ORD |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_STREAM_MSG_BUFFER |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
-};
-
-int iwch_modify_qp(struct iwch_dev *rhp,
- struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal);
-
-enum iwch_qp_state {
- IWCH_QP_STATE_IDLE,
- IWCH_QP_STATE_RTS,
- IWCH_QP_STATE_ERROR,
- IWCH_QP_STATE_TERMINATE,
- IWCH_QP_STATE_CLOSING,
- IWCH_QP_STATE_TOT
-};
-
-static inline int iwch_convert_state(enum ib_qp_state ib_state)
-{
- switch (ib_state) {
- case IB_QPS_RESET:
- case IB_QPS_INIT:
- return IWCH_QP_STATE_IDLE;
- case IB_QPS_RTS:
- return IWCH_QP_STATE_RTS;
- case IB_QPS_SQD:
- return IWCH_QP_STATE_CLOSING;
- case IB_QPS_SQE:
- return IWCH_QP_STATE_TERMINATE;
- case IB_QPS_ERR:
- return IWCH_QP_STATE_ERROR;
- default:
- return -1;
- }
-}
-
-static inline u32 iwch_ib_to_tpt_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
- (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
- TPT_LOCAL_READ;
-}
-
-static inline u32 iwch_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
-}
-
-enum iwch_mmid_state {
- IWCH_STAG_STATE_VALID,
- IWCH_STAG_STATE_INVALID
-};
-
-enum iwch_qp_query_flags {
- IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
- IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
- IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
-
- /*
- * Quiesce QP context; Consumer
- * will NOT replay outstanding WR
- */
- IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
- IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
- IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
-};
-
-u16 iwch_rqes_posted(struct iwch_qp *qhp);
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
-int iwch_post_zb_read(struct iwch_ep *ep);
-int iwch_register_device(struct iwch_dev *dev);
-void iwch_unregister_device(struct iwch_dev *dev);
-void stop_read_rep_timer(struct iwch_qp *qhp);
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift);
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
-void iwch_free_pbl(struct iwch_mr *mhp);
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
-
-#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
deleted file mode 100644
index c649faad63f9..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ /dev/null
@@ -1,1082 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-
-#define NO_SUPPORT -1
-
-static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
-
- switch (wr->opcode) {
- case IB_WR_SEND:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE;
- else
- wqe->send.rdmaop = T3_SEND;
- wqe->send.rem_stag = 0;
- break;
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
- else
- wqe->send.rdmaop = T3_SEND_WITH_INV;
- wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
- break;
- default:
- return -EINVAL;
- }
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->send.reserved[0] = 0;
- wqe->send.reserved[1] = 0;
- wqe->send.reserved[2] = 0;
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
-
- plen += wr->sg_list[i].length;
- wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 4 + ((wr->num_sge) << 1);
- wqe->send.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_write(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->write.rdmaop = T3_RDMA_WRITE;
- wqe->write.reserved[0] = 0;
- wqe->write.reserved[1] = 0;
- wqe->write.reserved[2] = 0;
- wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
-
- if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
- plen = 4;
- wqe->write.sgl[0].stag = wr->ex.imm_data;
- wqe->write.sgl[0].len = cpu_to_be32(0);
- wqe->write.num_sgle = cpu_to_be32(0);
- *flit_cnt = 6;
- } else {
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen) {
- return -EMSGSIZE;
- }
- plen += wr->sg_list[i].length;
- wqe->write.sgl[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->write.sgl[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->write.sgl[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 5 + ((wr->num_sge) << 1);
- }
- wqe->write.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_read(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- if (wr->num_sge > 1)
- return -EINVAL;
- wqe->read.rdmaop = T3_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
- wqe->read.local_inv = 1;
- else
- wqe->read.local_inv = 0;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr);
- wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
- wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
- wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
- *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
- return 0;
-}
-
-static int build_memreg(union t3_wr *wqe, const struct ib_reg_wr *wr,
- u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
-{
- struct iwch_mr *mhp = to_iwch_mr(wr->mr);
- int i;
- __be64 *p;
-
- if (mhp->npages > T3_MAX_FASTREG_DEPTH)
- return -EINVAL;
- *wr_cnt = 1;
- wqe->fastreg.stag = cpu_to_be32(wr->key);
- wqe->fastreg.len = cpu_to_be32(mhp->ibmr.length);
- wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
- wqe->fastreg.va_base_lo_fbo =
- cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
- wqe->fastreg.page_type_perms = cpu_to_be32(
- V_FR_PAGE_COUNT(mhp->npages) |
- V_FR_PAGE_SIZE(ilog2(wr->mr->page_size) - 12) |
- V_FR_TYPE(TPT_VATO) |
- V_FR_PERMS(iwch_ib_to_tpt_access(wr->access)));
- p = &wqe->fastreg.pbl_addrs[0];
- for (i = 0; i < mhp->npages; i++, p++) {
-
- /* If we need a 2nd WR, then set it up */
- if (i == T3_MAX_FASTREG_FRAG) {
- *wr_cnt = 2;
- wqe = (union t3_wr *)(wq->queue +
- Q_PTR2IDX((wq->wptr+1), wq->size_log2));
- build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
- Q_GENBIT(wq->wptr + 1, wq->size_log2),
- 0, 1 + mhp->npages - T3_MAX_FASTREG_FRAG,
- T3_EOP);
-
- p = &wqe->pbl_frag.pbl_addrs[0];
- }
- *p = cpu_to_be64((u64)mhp->pages[i]);
- }
- *flit_cnt = 5 + mhp->npages;
- if (*flit_cnt > 15)
- *flit_cnt = 15;
- return 0;
-}
-
-static int build_inv_stag(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
- wqe->local_inv.reserved = 0;
- *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
- return 0;
-}
-
-static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
- u32 num_sgle, u32 * pbl_addr, u8 * page_size)
-{
- int i;
- struct iwch_mr *mhp;
- u64 offset;
- for (i = 0; i < num_sgle; i++) {
-
- mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
- if (!mhp) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (!mhp->attr.state) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (mhp->attr.zbva) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
-
- if (sg_list[i].addr < mhp->attr.va_fbo) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) <
- sg_list[i].addr) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) >
- mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- offset = sg_list[i].addr - mhp->attr.va_fbo;
- offset += mhp->attr.va_fbo &
- ((1UL << (12 + mhp->attr.page_size)) - 1);
- pbl_addr[i] = ((mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3) +
- (offset >> (12 + mhp->attr.page_size));
- page_size[i] = mhp->attr.page_size;
- }
- return 0;
-}
-
-static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i, err = 0;
- u32 pbl_addr[T3_MAX_SGE];
- u8 page_size[T3_MAX_SGE];
-
- err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
- page_size);
- if (err)
- return err;
- wqe->recv.pagesz[0] = page_size[0];
- wqe->recv.pagesz[1] = page_size[1];
- wqe->recv.pagesz[2] = page_size[2];
- wqe->recv.pagesz[3] = page_size[3];
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
- for (i = 0; i < wr->num_sge; i++) {
- wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
-
- /* to in the WQE == the offset into the page */
- wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
- ((1UL << (12 + page_size[i])) - 1));
-
- /* pbl_addr is the adapters address in the PBL */
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = 0;
- return 0;
-}
-
-static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i;
- u32 pbl_addr;
- u32 pbl_offset;
-
-
- /*
- * The T3 HW requires the PBL in the HW recv descriptor to reference
- * a PBL entry. So we allocate the max needed PBL memory here and pass
- * it to the uP in the recv WR. The uP will build the PBL and setup
- * the HW recv descriptor.
- */
- pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
- if (!pbl_addr)
- return -ENOMEM;
-
- /*
- * Compute the 8B aligned offset.
- */
- pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
-
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
-
- for (i = 0; i < wr->num_sge; i++) {
-
- /*
- * Use a 128MB page size. This and an imposed 128MB
- * sge length limit allows us to require only a 2-entry HW
- * PBL for each SGE. This restriction is acceptable since
- * since it is not possible to allocate 128MB of contiguous
- * DMA coherent memory!
- */
- if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
- return -EINVAL;
- wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
-
- /*
- * T3 restricts a recv to all zero-stag or all non-zero-stag.
- */
- if (wr->sg_list[i].lkey != 0)
- return -EINVAL;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
- pbl_offset += 2;
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.pagesz[i] = 0;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
- return 0;
-}
-
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- int err = 0;
- u8 uninitialized_var(t3_wr_flit_cnt);
- enum t3_wr_opcode t3_wr_opcode = 0;
- enum t3_wr_flags t3_wr_flags;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
- struct t3_swsq *sqp;
- int wr_cnt = 1;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
- qhp->wq.sq_size_log2);
- if (num_wrs == 0) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (num_wrs == 0) {
- err = -ENOMEM;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- t3_wr_flags = 0;
- if (wr->send_flags & IB_SEND_SOLICITED)
- t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
- if (wr->send_flags & IB_SEND_SIGNALED)
- t3_wr_flags |= T3_COMPLETION_FLAG;
- sqp = qhp->wq.sq +
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
- switch (wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_READ_FENCE_FLAG;
- t3_wr_opcode = T3_WR_SEND;
- err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- t3_wr_opcode = T3_WR_WRITE;
- err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_READ_WITH_INV:
- t3_wr_opcode = T3_WR_READ;
- t3_wr_flags = 0; /* T3 reads are always signaled */
- err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
- if (err)
- break;
- sqp->read_len = wqe->read.local_len;
- if (!qhp->wq.oldest_read)
- qhp->wq.oldest_read = sqp;
- break;
- case IB_WR_REG_MR:
- t3_wr_opcode = T3_WR_FASTREG;
- err = build_memreg(wqe, reg_wr(wr), &t3_wr_flit_cnt,
- &wr_cnt, &qhp->wq);
- break;
- case IB_WR_LOCAL_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
- t3_wr_opcode = T3_WR_INV_STAG;
- err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
- break;
- default:
- pr_debug("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
- err = -EINVAL;
- }
- if (err)
- break;
- wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
- sqp->wr_id = wr->wr_id;
- sqp->opcode = wr2opcode(t3_wr_opcode);
- sqp->sq_wptr = qhp->wq.sq_wptr;
- sqp->complete = 0;
- sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
-
- build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, t3_wr_flit_cnt,
- (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
- pr_debug("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
- __func__, (unsigned long long)wr->wr_id, idx,
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
- sqp->opcode);
- wr = wr->next;
- num_wrs--;
- qhp->wq.wptr += wr_cnt;
- ++(qhp->wq.sq_wptr);
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- int err = 0;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2) - 1;
- if (!wr) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (wr->num_sge > T3_MAX_SGE) {
- err = -EINVAL;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- if (num_wrs)
- if (wr->sg_list[0].lkey)
- err = build_rdma_recv(qhp, wqe, wr);
- else
- err = build_zero_stag_recv(qhp, wqe, wr);
- else
- err = -ENOMEM;
-
- if (err)
- break;
-
- build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
- pr_debug("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x wqe %p\n",
- __func__, (unsigned long long)wr->wr_id,
- idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
- ++(qhp->wq.rq_wptr);
- ++(qhp->wq.wptr);
- wr = wr->next;
- num_wrs--;
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
- u8 *layer_type, u8 *ecode)
-{
- int status = TPT_ERR_INTERNAL_ERR;
- int tagged = 0;
- int opcode = -1;
- int rqtype = 0;
- int send_inv = 0;
-
- if (rsp_msg) {
- status = CQE_STATUS(rsp_msg->cqe);
- opcode = CQE_OPCODE(rsp_msg->cqe);
- rqtype = RQ_TYPE(rsp_msg->cqe);
- send_inv = (opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV);
- tagged = (opcode == T3_RDMA_WRITE) ||
- (rqtype && (opcode == T3_READ_RESP));
- }
-
- switch (status) {
- case TPT_ERR_STAG:
- if (send_inv) {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_INV_STAG;
- }
- break;
- case TPT_ERR_PDID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- if ((opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV))
- *ecode = RDMAP_CANT_INV_STAG;
- else
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_QPID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_ACCESS:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_ACC_VIOL;
- break;
- case TPT_ERR_WRAP:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_TO_WRAP;
- break;
- case TPT_ERR_BOUND:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_BASE_BOUNDS;
- }
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- break;
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_OUT_OF_RQE:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_NOBUF;
- break;
- case TPT_ERR_PBL_ADDR_BOUND:
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- break;
- case TPT_ERR_CRC:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_CRC_ERR;
- break;
- case TPT_ERR_MARKER:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_MARKER_ERR;
- break;
- case TPT_ERR_PDU_LEN_ERR:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_MSG_TOOBIG;
- break;
- case TPT_ERR_DDP_VERSION:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_INV_VERS;
- } else {
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_VERS;
- }
- break;
- case TPT_ERR_RDMA_VERSION:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_VERS;
- break;
- case TPT_ERR_OPCODE:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_OPCODE;
- break;
- case TPT_ERR_DDP_QUEUE_NUM:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_QN;
- break;
- case TPT_ERR_MSN:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_RANGE;
- break;
- case TPT_ERR_TBIT:
- *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_MO:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MO;
- break;
- default:
- *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- }
-}
-
-int iwch_post_zb_read(struct iwch_ep *ep)
-{
- union t3_wr *wqe;
- struct sk_buff *skb;
- u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
-
- pr_debug("%s enter\n", __func__);
- skb = alloc_skb(40, GFP_KERNEL);
- if (!skb) {
- pr_err("%s cannot send zb_read!!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr));
- wqe->read.rdmaop = T3_READ_REQ;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(1);
- wqe->read.rem_to = cpu_to_be64(1);
- wqe->read.local_stag = cpu_to_be32(1);
- wqe->read.local_len = cpu_to_be32(0);
- wqe->read.local_to = cpu_to_be64(1);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
- V_FW_RIWR_LEN(flit_cnt));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * This posts a TERMINATE with layer=RDMA, type=catastrophic.
- */
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
-{
- union t3_wr *wqe;
- struct terminate_message *term;
- struct sk_buff *skb;
-
- pr_debug("%s %d\n", __func__, __LINE__);
- skb = alloc_skb(40, GFP_ATOMIC);
- if (!skb) {
- pr_err("%s cannot send TERMINATE!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, 40);
- wqe->send.rdmaop = T3_TERMINATE;
-
- /* immediate data length */
- wqe->send.plen = htonl(4);
-
- /* immediate data starts here. */
- term = (struct terminate_message *)wqe->send.sgl;
- build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
- V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * Assumes qhp lock is held.
- */
-static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
- struct iwch_cq *schp)
- __releases(&qhp->lock)
- __acquires(&qhp->lock)
-{
- int count;
- int flushed;
-
- lockdep_assert_held(&qhp->lock);
-
- pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
- /* take a ref on the qhp since we must release the lock */
- atomic_inc(&qhp->refcnt);
- spin_unlock(&qhp->lock);
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&rchp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&rchp->cq);
- cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
- flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&rchp->lock);
- if (flushed) {
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- }
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&schp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&schp->cq);
- cxio_count_scqes(&schp->cq, &qhp->wq, &count);
- flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&schp->lock);
- if (flushed) {
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
-
- /* deref */
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-
- spin_lock(&qhp->lock);
-}
-
-static void flush_qp(struct iwch_qp *qhp)
-{
- struct iwch_cq *rchp, *schp;
-
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
-
- if (qhp->ibqp.uobject) {
- cxio_set_wq_in_error(&qhp->wq);
- cxio_set_cq_in_error(&rchp->cq);
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- if (schp != rchp) {
- cxio_set_cq_in_error(&schp->cq);
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
- return;
- }
- __flush_qp(qhp, rchp, schp);
-}
-
-
-/*
- * Return count of RECV WRs posted
- */
-u16 iwch_rqes_posted(struct iwch_qp *qhp)
-{
- union t3_wr *wqe = qhp->wq.queue;
- u16 count = 0;
-
- while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
- count++;
- wqe++;
- }
- pr_debug("%s qhp %p count %u\n", __func__, qhp, count);
- return count;
-}
-
-static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs)
-{
- struct t3_rdma_init_attr init_attr;
- int ret;
-
- init_attr.tid = qhp->ep->hwtid;
- init_attr.qpid = qhp->wq.qpid;
- init_attr.pdid = qhp->attr.pd;
- init_attr.scqid = qhp->attr.scq;
- init_attr.rcqid = qhp->attr.rcq;
- init_attr.rq_addr = qhp->wq.rq_addr;
- init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
- init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
- qhp->attr.mpa_attr.recv_marker_enabled |
- (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
- (qhp->attr.mpa_attr.crc_enabled << 2);
-
- init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
- uP_RI_QP_RDMA_WRITE_ENABLE |
- uP_RI_QP_BIND_ENABLE;
- if (!qhp->ibqp.uobject)
- init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
- uP_RI_QP_FAST_REGISTER_ENABLE;
-
- init_attr.tcp_emss = qhp->ep->emss;
- init_attr.ord = qhp->attr.max_ord;
- init_attr.ird = qhp->attr.max_ird;
- init_attr.qp_dma_addr = qhp->wq.dma_addr;
- init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
- init_attr.rqe_count = iwch_rqes_posted(qhp);
- init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
- init_attr.chan = qhp->ep->l2t->smt_idx;
- if (peer2peer) {
- init_attr.rtr_type = RTR_READ;
- if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
- init_attr.ord = 1;
- if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
- init_attr.ird = 1;
- } else
- init_attr.rtr_type = 0;
- init_attr.irs = qhp->ep->rcv_seq;
- pr_debug("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d flags 0x%x qpcaps 0x%x\n",
- __func__,
- init_attr.rq_addr, init_attr.rq_size,
- init_attr.flags, init_attr.qpcaps);
- ret = cxio_rdma_init(&rhp->rdev, &init_attr);
- pr_debug("%s ret %d\n", __func__, ret);
- return ret;
-}
-
-int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal)
-{
- int ret = 0;
- struct iwch_qp_attributes newattr = qhp->attr;
- unsigned long flag;
- int disconnect = 0;
- int terminate = 0;
- int abort = 0;
- int free = 0;
- struct iwch_ep *ep = NULL;
-
- pr_debug("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
- qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
- (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
-
- spin_lock_irqsave(&qhp->lock, flag);
-
- /* Process attr changes if in IDLE */
- if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
- if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
- ret = -EIO;
- goto out;
- }
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
- newattr.enable_rdma_read = attrs->enable_rdma_read;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
- newattr.enable_rdma_write = attrs->enable_rdma_write;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
- newattr.enable_bind = attrs->enable_bind;
- if (mask & IWCH_QP_ATTR_MAX_ORD) {
- if (attrs->max_ord >
- rhp->attr.max_rdma_read_qp_depth) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ord = attrs->max_ord;
- }
- if (mask & IWCH_QP_ATTR_MAX_IRD) {
- if (attrs->max_ird >
- rhp->attr.max_rdma_reads_per_qp) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ird = attrs->max_ird;
- }
- qhp->attr = newattr;
- }
-
- if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
- goto out;
- if (qhp->attr.state == attrs->next_state)
- goto out;
-
- switch (qhp->attr.state) {
- case IWCH_QP_STATE_IDLE:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_RTS:
- if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
- ret = -EINVAL;
- goto out;
- }
- if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.mpa_attr = attrs->mpa_attr;
- qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
- qhp->ep = qhp->attr.llp_stream_handle;
- qhp->attr.state = IWCH_QP_STATE_RTS;
-
- /*
- * Ref the endpoint here and deref when we
- * disassociate the endpoint from the QP. This
- * happens in CLOSING->IDLE transition or *->ERROR
- * transition.
- */
- get_ep(&qhp->ep->com);
- spin_unlock_irqrestore(&qhp->lock, flag);
- ret = rdma_init(rhp, qhp, mask, attrs);
- spin_lock_irqsave(&qhp->lock, flag);
- if (ret)
- goto err;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- flush_qp(qhp);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_RTS:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_CLOSING:
- BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
- qhp->attr.state = IWCH_QP_STATE_CLOSING;
- if (!internal) {
- abort=0;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- break;
- case IWCH_QP_STATE_TERMINATE:
- qhp->attr.state = IWCH_QP_STATE_TERMINATE;
- if (qhp->ibqp.uobject)
- cxio_set_wq_in_error(&qhp->wq);
- if (!internal)
- terminate = 1;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- if (!internal) {
- abort=1;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- goto err;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_CLOSING:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- switch (attrs->next_state) {
- case IWCH_QP_STATE_IDLE:
- flush_qp(qhp);
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.llp_stream_handle = NULL;
- put_ep(&qhp->ep->com);
- qhp->ep = NULL;
- wake_up(&qhp->wait);
- break;
- case IWCH_QP_STATE_ERROR:
- goto err;
- default:
- ret = -EINVAL;
- goto err;
- }
- break;
- case IWCH_QP_STATE_ERROR:
- if (attrs->next_state != IWCH_QP_STATE_IDLE) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
- !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- break;
- case IWCH_QP_STATE_TERMINATE:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- goto err;
- break;
- default:
- pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
- ret = -EINVAL;
- goto err;
- break;
- }
- goto out;
-err:
- pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
- qhp->wq.qpid);
-
- /* disassociate the LLP connection */
- qhp->attr.llp_stream_handle = NULL;
- ep = qhp->ep;
- qhp->ep = NULL;
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- free=1;
- wake_up(&qhp->wait);
- BUG_ON(!ep);
- flush_qp(qhp);
-out:
- spin_unlock_irqrestore(&qhp->lock, flag);
-
- if (terminate)
- iwch_post_terminate(qhp, NULL);
-
- /*
- * If disconnect is 1, then we need to initiate a disconnect
- * on the EP. This can be a normal close (RTS->CLOSING) or
- * an abnormal close (RTS/CLOSING->ERROR).
- */
- if (disconnect) {
- iwch_ep_disconnect(ep, abort, GFP_KERNEL);
- put_ep(&ep->com);
- }
-
- /*
- * If free is 1, then we've disassociated the EP from the QP
- * and we need to dereference the EP.
- */
- if (free)
- put_ep(&ep->com);
-
- pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/tcb.h b/drivers/infiniband/hw/cxgb3/tcb.h
deleted file mode 100644
index c702dc199e18..000000000000
--- a/drivers/infiniband/hw/cxgb3/tcb.h
+++ /dev/null
@@ -1,632 +0,0 @@
-/*
- * Copyright (c) 2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _TCB_DEFS_H
-#define _TCB_DEFS_H
-
-#define W_TCB_T_STATE 0
-#define S_TCB_T_STATE 0
-#define M_TCB_T_STATE 0xfULL
-#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
-
-#define W_TCB_TIMER 0
-#define S_TCB_TIMER 4
-#define M_TCB_TIMER 0x1ULL
-#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
-
-#define W_TCB_DACK_TIMER 0
-#define S_TCB_DACK_TIMER 5
-#define M_TCB_DACK_TIMER 0x1ULL
-#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
-
-#define W_TCB_DEL_FLAG 0
-#define S_TCB_DEL_FLAG 6
-#define M_TCB_DEL_FLAG 0x1ULL
-#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
-
-#define W_TCB_L2T_IX 0
-#define S_TCB_L2T_IX 7
-#define M_TCB_L2T_IX 0x7ffULL
-#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
-
-#define W_TCB_SMAC_SEL 0
-#define S_TCB_SMAC_SEL 18
-#define M_TCB_SMAC_SEL 0x3ULL
-#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
-
-#define W_TCB_TOS 0
-#define S_TCB_TOS 20
-#define M_TCB_TOS 0x3fULL
-#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
-
-#define W_TCB_MAX_RT 0
-#define S_TCB_MAX_RT 26
-#define M_TCB_MAX_RT 0xfULL
-#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
-
-#define W_TCB_T_RXTSHIFT 0
-#define S_TCB_T_RXTSHIFT 30
-#define M_TCB_T_RXTSHIFT 0xfULL
-#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
-
-#define W_TCB_T_DUPACKS 1
-#define S_TCB_T_DUPACKS 2
-#define M_TCB_T_DUPACKS 0xfULL
-#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
-
-#define W_TCB_T_MAXSEG 1
-#define S_TCB_T_MAXSEG 6
-#define M_TCB_T_MAXSEG 0xfULL
-#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
-
-#define W_TCB_T_FLAGS1 1
-#define S_TCB_T_FLAGS1 10
-#define M_TCB_T_FLAGS1 0xffffffffULL
-#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
-
-#define W_TCB_T_MIGRATION 1
-#define S_TCB_T_MIGRATION 20
-#define M_TCB_T_MIGRATION 0x1ULL
-#define V_TCB_T_MIGRATION(x) ((x) << S_TCB_T_MIGRATION)
-
-#define W_TCB_T_FLAGS2 2
-#define S_TCB_T_FLAGS2 10
-#define M_TCB_T_FLAGS2 0x7fULL
-#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
-
-#define W_TCB_SND_SCALE 2
-#define S_TCB_SND_SCALE 17
-#define M_TCB_SND_SCALE 0xfULL
-#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
-
-#define W_TCB_RCV_SCALE 2
-#define S_TCB_RCV_SCALE 21
-#define M_TCB_RCV_SCALE 0xfULL
-#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
-
-#define W_TCB_SND_UNA_RAW 2
-#define S_TCB_SND_UNA_RAW 25
-#define M_TCB_SND_UNA_RAW 0x7ffffffULL
-#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
-
-#define W_TCB_SND_NXT_RAW 3
-#define S_TCB_SND_NXT_RAW 20
-#define M_TCB_SND_NXT_RAW 0x7ffffffULL
-#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
-
-#define W_TCB_RCV_NXT 4
-#define S_TCB_RCV_NXT 15
-#define M_TCB_RCV_NXT 0xffffffffULL
-#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
-
-#define W_TCB_RCV_ADV 5
-#define S_TCB_RCV_ADV 15
-#define M_TCB_RCV_ADV 0xffffULL
-#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
-
-#define W_TCB_SND_MAX_RAW 5
-#define S_TCB_SND_MAX_RAW 31
-#define M_TCB_SND_MAX_RAW 0x7ffffffULL
-#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
-
-#define W_TCB_SND_CWND 6
-#define S_TCB_SND_CWND 26
-#define M_TCB_SND_CWND 0x7ffffffULL
-#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
-
-#define W_TCB_SND_SSTHRESH 7
-#define S_TCB_SND_SSTHRESH 21
-#define M_TCB_SND_SSTHRESH 0x7ffffffULL
-#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
-
-#define W_TCB_T_RTT_TS_RECENT_AGE 8
-#define S_TCB_T_RTT_TS_RECENT_AGE 16
-#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
-#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
-
-#define W_TCB_T_RTSEQ_RECENT 9
-#define S_TCB_T_RTSEQ_RECENT 16
-#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
-#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
-
-#define W_TCB_T_SRTT 10
-#define S_TCB_T_SRTT 16
-#define M_TCB_T_SRTT 0xffffULL
-#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
-
-#define W_TCB_T_RTTVAR 11
-#define S_TCB_T_RTTVAR 0
-#define M_TCB_T_RTTVAR 0xffffULL
-#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
-
-#define W_TCB_TS_LAST_ACK_SENT_RAW 11
-#define S_TCB_TS_LAST_ACK_SENT_RAW 16
-#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
-#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
-
-#define W_TCB_DIP 12
-#define S_TCB_DIP 11
-#define M_TCB_DIP 0xffffffffULL
-#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
-
-#define W_TCB_SIP 13
-#define S_TCB_SIP 11
-#define M_TCB_SIP 0xffffffffULL
-#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
-
-#define W_TCB_DP 14
-#define S_TCB_DP 11
-#define M_TCB_DP 0xffffULL
-#define V_TCB_DP(x) ((x) << S_TCB_DP)
-
-#define W_TCB_SP 14
-#define S_TCB_SP 27
-#define M_TCB_SP 0xffffULL
-#define V_TCB_SP(x) ((x) << S_TCB_SP)
-
-#define W_TCB_TIMESTAMP 15
-#define S_TCB_TIMESTAMP 11
-#define M_TCB_TIMESTAMP 0xffffffffULL
-#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
-
-#define W_TCB_TIMESTAMP_OFFSET 16
-#define S_TCB_TIMESTAMP_OFFSET 11
-#define M_TCB_TIMESTAMP_OFFSET 0xfULL
-#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
-
-#define W_TCB_TX_MAX 16
-#define S_TCB_TX_MAX 15
-#define M_TCB_TX_MAX 0xffffffffULL
-#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
-
-#define W_TCB_TX_HDR_PTR_RAW 17
-#define S_TCB_TX_HDR_PTR_RAW 15
-#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
-
-#define W_TCB_TX_LAST_PTR_RAW 18
-#define S_TCB_TX_LAST_PTR_RAW 0
-#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
-
-#define W_TCB_TX_COMPACT 18
-#define S_TCB_TX_COMPACT 17
-#define M_TCB_TX_COMPACT 0x1ULL
-#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
-
-#define W_TCB_RX_COMPACT 18
-#define S_TCB_RX_COMPACT 18
-#define M_TCB_RX_COMPACT 0x1ULL
-#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
-
-#define W_TCB_RCV_WND 18
-#define S_TCB_RCV_WND 19
-#define M_TCB_RCV_WND 0x7ffffffULL
-#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
-
-#define W_TCB_RX_HDR_OFFSET 19
-#define S_TCB_RX_HDR_OFFSET 14
-#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
-#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
-
-#define W_TCB_RX_FRAG0_START_IDX_RAW 20
-#define S_TCB_RX_FRAG0_START_IDX_RAW 9
-#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
-
-#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
-#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
-#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
-#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
-
-#define W_TCB_RX_FRAG0_LEN 21
-#define S_TCB_RX_FRAG0_LEN 31
-#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
-
-#define W_TCB_RX_FRAG1_LEN 22
-#define S_TCB_RX_FRAG1_LEN 26
-#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
-
-#define W_TCB_NEWRENO_RECOVER 23
-#define S_TCB_NEWRENO_RECOVER 21
-#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
-#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
-
-#define W_TCB_PDU_HAVE_LEN 24
-#define S_TCB_PDU_HAVE_LEN 16
-#define M_TCB_PDU_HAVE_LEN 0x1ULL
-#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
-
-#define W_TCB_PDU_LEN 24
-#define S_TCB_PDU_LEN 17
-#define M_TCB_PDU_LEN 0xffffULL
-#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
-
-#define W_TCB_RX_QUIESCE 25
-#define S_TCB_RX_QUIESCE 1
-#define M_TCB_RX_QUIESCE 0x1ULL
-#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
-
-#define W_TCB_RX_PTR_RAW 25
-#define S_TCB_RX_PTR_RAW 2
-#define M_TCB_RX_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
-
-#define W_TCB_CPU_NO 25
-#define S_TCB_CPU_NO 19
-#define M_TCB_CPU_NO 0x7fULL
-#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
-
-#define W_TCB_ULP_TYPE 25
-#define S_TCB_ULP_TYPE 26
-#define M_TCB_ULP_TYPE 0xfULL
-#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
-
-#define W_TCB_RX_FRAG1_PTR_RAW 25
-#define S_TCB_RX_FRAG1_PTR_RAW 30
-#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
-#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
-#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
-
-#define W_TCB_RX_FRAG2_PTR_RAW 27
-#define S_TCB_RX_FRAG2_PTR_RAW 10
-#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_LEN_RAW 27
-#define S_TCB_RX_FRAG2_LEN_RAW 27
-#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_PTR_RAW 28
-#define S_TCB_RX_FRAG3_PTR_RAW 22
-#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
-
-#define W_TCB_RX_FRAG3_LEN_RAW 29
-#define S_TCB_RX_FRAG3_LEN_RAW 7
-#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
-#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
-#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
-
-#define W_TCB_PDU_HDR_LEN 30
-#define S_TCB_PDU_HDR_LEN 29
-#define M_TCB_PDU_HDR_LEN 0xffULL
-#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
-
-#define W_TCB_SLUSH1 31
-#define S_TCB_SLUSH1 5
-#define M_TCB_SLUSH1 0x7ffffULL
-#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
-
-#define W_TCB_ULP_RAW 31
-#define S_TCB_ULP_RAW 24
-#define M_TCB_ULP_RAW 0xffULL
-#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
-
-#define W_TCB_DDP_RDMAP_VERSION 25
-#define S_TCB_DDP_RDMAP_VERSION 30
-#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
-#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
-
-#define W_TCB_MARKER_ENABLE_RX 25
-#define S_TCB_MARKER_ENABLE_RX 31
-#define M_TCB_MARKER_ENABLE_RX 0x1ULL
-#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
-
-#define W_TCB_MARKER_ENABLE_TX 26
-#define S_TCB_MARKER_ENABLE_TX 0
-#define M_TCB_MARKER_ENABLE_TX 0x1ULL
-#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
-
-#define W_TCB_CRC_ENABLE 26
-#define S_TCB_CRC_ENABLE 1
-#define M_TCB_CRC_ENABLE 0x1ULL
-#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
-
-#define W_TCB_IRS_ULP 26
-#define S_TCB_IRS_ULP 2
-#define M_TCB_IRS_ULP 0x1ffULL
-#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
-
-#define W_TCB_ISS_ULP 26
-#define S_TCB_ISS_ULP 11
-#define M_TCB_ISS_ULP 0x1ffULL
-#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
-
-#define W_TCB_TX_PDU_LEN 26
-#define S_TCB_TX_PDU_LEN 20
-#define M_TCB_TX_PDU_LEN 0x3fffULL
-#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
-
-#define W_TCB_TX_PDU_OUT 27
-#define S_TCB_TX_PDU_OUT 2
-#define M_TCB_TX_PDU_OUT 0x1ULL
-#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
-
-#define W_TCB_CQ_IDX_SQ 27
-#define S_TCB_CQ_IDX_SQ 3
-#define M_TCB_CQ_IDX_SQ 0xffffULL
-#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
-
-#define W_TCB_CQ_IDX_RQ 27
-#define S_TCB_CQ_IDX_RQ 19
-#define M_TCB_CQ_IDX_RQ 0xffffULL
-#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
-
-#define W_TCB_QP_ID 28
-#define S_TCB_QP_ID 3
-#define M_TCB_QP_ID 0xffffULL
-#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
-
-#define W_TCB_PD_ID 28
-#define S_TCB_PD_ID 19
-#define M_TCB_PD_ID 0xffffULL
-#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
-
-#define W_TCB_STAG 29
-#define S_TCB_STAG 3
-#define M_TCB_STAG 0xffffffffULL
-#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
-
-#define W_TCB_RQ_START 30
-#define S_TCB_RQ_START 3
-#define M_TCB_RQ_START 0x3ffffffULL
-#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
-
-#define W_TCB_RQ_MSN 30
-#define S_TCB_RQ_MSN 29
-#define M_TCB_RQ_MSN 0x3ffULL
-#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
-
-#define W_TCB_RQ_MAX_OFFSET 31
-#define S_TCB_RQ_MAX_OFFSET 7
-#define M_TCB_RQ_MAX_OFFSET 0xfULL
-#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
-
-#define W_TCB_RQ_WRITE_PTR 31
-#define S_TCB_RQ_WRITE_PTR 11
-#define M_TCB_RQ_WRITE_PTR 0x3ffULL
-#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
-
-#define W_TCB_INB_WRITE_PERM 31
-#define S_TCB_INB_WRITE_PERM 21
-#define M_TCB_INB_WRITE_PERM 0x1ULL
-#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
-
-#define W_TCB_INB_READ_PERM 31
-#define S_TCB_INB_READ_PERM 22
-#define M_TCB_INB_READ_PERM 0x1ULL
-#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
-
-#define W_TCB_ORD_L_BIT_VLD 31
-#define S_TCB_ORD_L_BIT_VLD 23
-#define M_TCB_ORD_L_BIT_VLD 0x1ULL
-#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
-
-#define W_TCB_RDMAP_OPCODE 31
-#define S_TCB_RDMAP_OPCODE 24
-#define M_TCB_RDMAP_OPCODE 0xfULL
-#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
-
-#define W_TCB_TX_FLUSH 31
-#define S_TCB_TX_FLUSH 28
-#define M_TCB_TX_FLUSH 0x1ULL
-#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
-
-#define W_TCB_TX_OOS_RXMT 31
-#define S_TCB_TX_OOS_RXMT 29
-#define M_TCB_TX_OOS_RXMT 0x1ULL
-#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
-
-#define W_TCB_TX_OOS_TXMT 31
-#define S_TCB_TX_OOS_TXMT 30
-#define M_TCB_TX_OOS_TXMT 0x1ULL
-#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
-
-#define W_TCB_SLUSH_AUX2 31
-#define S_TCB_SLUSH_AUX2 31
-#define M_TCB_SLUSH_AUX2 0x1ULL
-#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
-
-#define W_TCB_RX_FRAG1_PTR_RAW2 25
-#define S_TCB_RX_FRAG1_PTR_RAW2 30
-#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
-
-#define W_TCB_RX_DDP_FLAGS 26
-#define S_TCB_RX_DDP_FLAGS 15
-#define M_TCB_RX_DDP_FLAGS 0x3ffULL
-#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
-
-#define W_TCB_SLUSH_AUX3 26
-#define S_TCB_SLUSH_AUX3 31
-#define M_TCB_SLUSH_AUX3 0x1ffULL
-#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
-
-#define W_TCB_RX_DDP_BUF0_OFFSET 27
-#define S_TCB_RX_DDP_BUF0_OFFSET 8
-#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
-
-#define W_TCB_RX_DDP_BUF0_LEN 27
-#define S_TCB_RX_DDP_BUF0_LEN 30
-#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
-
-#define W_TCB_RX_DDP_BUF1_OFFSET 28
-#define S_TCB_RX_DDP_BUF1_OFFSET 20
-#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
-
-#define W_TCB_RX_DDP_BUF1_LEN 29
-#define S_TCB_RX_DDP_BUF1_LEN 10
-#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
-
-#define W_TCB_RX_DDP_BUF0_TAG 30
-#define S_TCB_RX_DDP_BUF0_TAG 0
-#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
-
-#define W_TCB_RX_DDP_BUF1_TAG 31
-#define S_TCB_RX_DDP_BUF1_TAG 0
-#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-
-#define S_TF_DACK 10
-#define V_TF_DACK(x) ((x) << S_TF_DACK)
-
-#define S_TF_NAGLE 11
-#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
-
-#define S_TF_RECV_SCALE 12
-#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
-
-#define S_TF_RECV_TSTMP 13
-#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
-
-#define S_TF_RECV_SACK 14
-#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
-
-#define S_TF_TURBO 15
-#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
-
-#define S_TF_KEEPALIVE 16
-#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
-
-#define S_TF_TCAM_BYPASS 17
-#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
-
-#define S_TF_CORE_FIN 18
-#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
-
-#define S_TF_CORE_MORE 19
-#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
-
-#define S_TF_MIGRATING 20
-#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
-
-#define S_TF_ACTIVE_OPEN 21
-#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
-
-#define S_TF_ASK_MODE 22
-#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
-
-#define S_TF_NON_OFFLOAD 23
-#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
-
-#define S_TF_MOD_SCHD 24
-#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
-
-#define S_TF_MOD_SCHD_REASON0 25
-#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
-
-#define S_TF_MOD_SCHD_REASON1 26
-#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
-
-#define S_TF_MOD_SCHD_RX 27
-#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
-
-#define S_TF_CORE_PUSH 28
-#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
-
-#define S_TF_RCV_COALESCE_ENABLE 29
-#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
-
-#define S_TF_RCV_COALESCE_PUSH 30
-#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
-
-#define S_TF_RCV_COALESCE_LAST_PSH 31
-#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
-
-#define S_TF_RCV_COALESCE_HEARTBEAT 32
-#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
-
-#define S_TF_HALF_CLOSE 33
-#define V_TF_HALF_CLOSE(x) ((x) << S_TF_HALF_CLOSE)
-
-#define S_TF_DACK_MSS 34
-#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
-
-#define S_TF_CCTRL_SEL0 35
-#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
-
-#define S_TF_CCTRL_SEL1 36
-#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
-
-#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
-#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
-
-#define S_TF_TX_PACE_AUTO 38
-#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
-
-#define S_TF_PEER_FIN_HELD 39
-#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
-
-#define S_TF_CORE_URG 40
-#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
-
-#define S_TF_RDMA_ERROR 41
-#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
-
-#define S_TF_SSWS_DISABLED 42
-#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
-
-#define S_TF_DUPACK_COUNT_ODD 43
-#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
-
-#define S_TF_TX_CHANNEL 44
-#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
-
-#define S_TF_RX_CHANNEL 45
-#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
-
-#define S_TF_TX_PACE_FIXED 46
-#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
-
-#define S_TF_RDMA_FLM_ERROR 47
-#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
-
-#define S_TF_RX_FLOW_CONTROL_DISABLE 48
-#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
-
-#endif /* _TCB_DEFS_H */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 347dc242fb88..ee1182f9b627 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3379,7 +3379,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
@@ -3401,7 +3401,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 35c284af574d..fe3a7e8561df 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
+ mhp->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mhp->umem))
goto err_free_skb;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index d373ac0fe2cb..ba83d942997c 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -305,7 +305,10 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
+ int ret = 0;
pr_debug("ibdev %p\n", ibdev);
+ ret = ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
props->port_cap_flags =
IB_PORT_CM_SUP |
@@ -315,11 +318,9 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
- return 0;
+ return ret;
}
static ssize_t hw_rev_show(struct device *dev,
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 2283e432693e..aa7396a1588a 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -60,8 +60,6 @@ struct efa_dev {
u64 mem_bar_len;
u64 db_bar_addr;
u64 db_bar_len;
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
int admin_msix_vector_idx;
struct efa_irq admin_irq;
@@ -71,8 +69,6 @@ struct efa_dev {
struct efa_ucontext {
struct ib_ucontext ibucontext;
- struct xarray mmap_xa;
- u32 mmap_xa_page;
u16 uarn;
};
@@ -91,6 +87,7 @@ struct efa_cq {
struct efa_ucontext *ucontext;
dma_addr_t dma_addr;
void *cpu_addr;
+ struct rdma_user_mmap_entry *mmap_entry;
size_t size;
u16 cq_idx;
};
@@ -101,6 +98,13 @@ struct efa_qp {
void *rq_cpu_addr;
size_t rq_size;
enum ib_qp_state state;
+
+ /* Used for saving mmap_xa entries */
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *llq_desc_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_mmap_entry;
+
u32 qp_handle;
u32 max_send_wr;
u32 max_recv_wr;
@@ -147,6 +151,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma);
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
struct rdma_ah_attr *ah_attr,
u32 flags,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 2be0469d545f..e96bcb16bd2b 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -362,9 +362,13 @@ struct efa_admin_reg_mr_cmd {
/*
* permissions
- * 0 : local_write_enable - Write permissions: value
- * of 1 needed for RQ buffers and for RDMA write
- * 7:1 : reserved1 - remote access flags, etc
+ * 0 : local_write_enable - Local write permissions:
+ * must be set for RQ buffers and buffers posted for
+ * RDMA Read requests
+ * 1 : reserved1 - MBZ
+ * 2 : remote_read_enable - Remote read permissions:
+ * must be set to enable RDMA read from the region
+ * 7:3 : reserved2 - MBZ
*/
u8 permissions;
@@ -558,6 +562,16 @@ struct efa_admin_feature_device_attr_desc {
/* Indicates how many bits are used virtual address access */
u8 virt_addr_width;
+
+ /*
+ * 0 : rdma_read - If set, RDMA Read is supported on
+ * TX queues
+ * 31:1 : reserved - MBZ
+ */
+ u32 device_caps;
+
+ /* Max RDMA transfer size in bytes */
+ u32 max_rdma_size;
};
struct efa_admin_feature_queue_attr_desc {
@@ -604,6 +618,9 @@ struct efa_admin_feature_queue_attr_desc {
/* The maximum size of LLQ in bytes */
u32 max_llq_size;
+
+ /* Maximum number of SGEs for a single RDMA read WQE */
+ u16 max_wr_rdma_sges;
};
struct efa_admin_feature_aenq_desc {
@@ -618,6 +635,7 @@ struct efa_admin_feature_network_attr_desc {
/* Raw address data in network byte order */
u8 addr[16];
+ /* max packet payload size in bytes */
u32 mtu;
};
@@ -780,6 +798,8 @@ struct efa_admin_mmio_req_read_less_resp {
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
@@ -791,4 +811,7 @@ struct efa_admin_mmio_req_read_less_resp {
/* get_set_feature_common_desc */
#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+/* feature_device_attr_desc */
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+
#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 3c412bc5b94f..0778f4f7dccd 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -317,6 +317,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
struct efa_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
+ struct efa_admin_aq_entry *aqe;
struct efa_comp_ctx *comp_ctx;
u16 queue_size_mask;
u16 cmd_id;
@@ -350,7 +351,9 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
reinit_completion(&comp_ctx->wait_event);
- memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes);
+ aqe = &aq->sq.entries[pi];
+ memset(aqe, 0, sizeof(*aqe));
+ memcpy(aqe, cmd, cmd_size_in_bytes);
aq->sq.pc++;
atomic64_inc(&aq->stats.submitted_cmd);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index c079f1332082..e20bd84a1014 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -230,8 +230,7 @@ int efa_com_register_mr(struct efa_com_dev *edev,
mr_cmd.flags |= params->page_shift &
EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
mr_cmd.iova = params->iova;
- mr_cmd.permissions |= params->permissions &
- EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+ mr_cmd.permissions = params->permissions;
if (params->inline_pbl) {
memcpy(mr_cmd.pbl.inline_pbl_array,
@@ -423,28 +422,6 @@ static int efa_com_get_feature(struct efa_com_dev *edev,
return efa_com_get_feature_ex(edev, get_resp, feature_id, 0, 0);
}
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result)
-{
- struct efa_admin_get_feature_resp resp;
- int err;
-
- err = efa_com_get_feature(edev, &resp,
- EFA_ADMIN_NETWORK_ATTR);
- if (err) {
- ibdev_err_ratelimited(edev->efa_dev,
- "Failed to get network attributes %d\n",
- err);
- return err;
- }
-
- memcpy(result->addr, resp.u.network_attr.addr,
- sizeof(resp.u.network_attr.addr));
- result->mtu = resp.u.network_attr.mtu;
-
- return 0;
-}
-
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result)
{
@@ -467,6 +444,8 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->phys_addr_width = resp.u.device_attr.phys_addr_width;
result->virt_addr_width = resp.u.device_attr.virt_addr_width;
result->db_bar = resp.u.device_attr.db_bar;
+ result->max_rdma_size = resp.u.device_attr.max_rdma_size;
+ result->device_caps = resp.u.device_attr.device_caps;
if (result->admin_api_version < 1) {
ibdev_err_ratelimited(
@@ -500,6 +479,19 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->max_ah = resp.u.queue_attr.max_ah;
result->max_llq_size = resp.u.queue_attr.max_llq_size;
result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
+ result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get network attributes %d\n",
+ err);
+ return err;
+ }
+
+ memcpy(result->addr, resp.u.network_attr.addr,
+ sizeof(resp.u.network_attr.addr));
+ result->mtu = resp.u.network_attr.mtu;
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 7f6c13052f49..31db5a0cbd5b 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -100,14 +100,11 @@ struct efa_com_destroy_ah_params {
u16 pdn;
};
-struct efa_com_get_network_attr_result {
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
-};
-
struct efa_com_get_device_attr_result {
+ u8 addr[EFA_GID_SIZE];
u64 page_size_cap;
u64 max_mr_pages;
+ u32 mtu;
u32 fw_version;
u32 admin_api_version;
u32 device_version;
@@ -124,9 +121,12 @@ struct efa_com_get_device_attr_result {
u32 max_pd;
u32 max_ah;
u32 max_llq_size;
+ u32 max_rdma_size;
+ u32 device_caps;
u16 sub_cqs_per_cq;
u16 max_sq_sge;
u16 max_rq_sge;
+ u16 max_wr_rdma_sge;
u8 db_bar;
};
@@ -181,12 +181,7 @@ struct efa_com_reg_mr_params {
* address mapping
*/
u8 page_shift;
- /*
- * permissions
- * 0: local_write_enable - Write permissions: value of 1 needed
- * for RQ buffers and for RDMA write:1: reserved1 - remote
- * access flags, etc
- */
+ /* see permissions field of struct efa_admin_reg_mr_cmd */
u8 permissions;
u8 inline_pbl;
u8 indirect;
@@ -271,8 +266,6 @@ int efa_com_create_ah(struct efa_com_dev *edev,
struct efa_com_create_ah_result *result);
int efa_com_destroy_ah(struct efa_com_dev *edev,
struct efa_com_destroy_ah_params *params);
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result);
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 83858f7e83d0..faf3ff1bca2a 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -30,15 +30,6 @@ MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-static void efa_update_network_attr(struct efa_dev *dev,
- struct efa_com_get_network_attr_result *network_attr)
-{
- memcpy(dev->addr, network_attr->addr, sizeof(network_attr->addr));
- dev->mtu = network_attr->mtu;
-
- dev_dbg(&dev->pdev->dev, "Full address %pI6\n", dev->addr);
-}
-
/* This handler will called for unknown event group or unimplemented handlers */
static void unimplemented_aenq_handler(void *data,
struct efa_admin_aenq_entry *aenq_e)
@@ -217,6 +208,7 @@ static const struct ib_device_ops efa_dev_ops = {
.get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap,
+ .mmap_free = efa_mmap_free,
.modify_qp = efa_modify_qp,
.query_device = efa_query_device,
.query_gid = efa_query_gid,
@@ -233,7 +225,6 @@ static const struct ib_device_ops efa_dev_ops = {
static int efa_ib_device_add(struct efa_dev *dev)
{
- struct efa_com_get_network_attr_result network_attr;
struct efa_com_get_hw_hints_result hw_hints;
struct pci_dev *pdev = dev->pdev;
int err;
@@ -249,12 +240,6 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
return err;
- err = efa_com_get_network_attr(&dev->edev, &network_attr);
- if (err)
- goto err_release_doorbell_bar;
-
- efa_update_network_attr(dev, &network_attr);
-
err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
if (err)
goto err_release_doorbell_bar;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 4edae89e8e3c..c9d294caa27a 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -13,10 +13,6 @@
#include "efa.h"
-#define EFA_MMAP_FLAG_SHIFT 56
-#define EFA_MMAP_PAGE_MASK GENMASK(EFA_MMAP_FLAG_SHIFT - 1, 0)
-#define EFA_MMAP_INVALID U64_MAX
-
enum {
EFA_MMAP_DMA_PAGE = 0,
EFA_MMAP_IO_WC,
@@ -27,20 +23,12 @@ enum {
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-struct efa_mmap_entry {
- void *obj;
+struct efa_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
u64 address;
- u64 length;
- u32 mmap_page;
u8 mmap_flag;
};
-static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
-{
- return ((u64)efa->mmap_flag << EFA_MMAP_FLAG_SHIFT) |
- ((u64)efa->mmap_page << PAGE_SHIFT);
-}
-
#define EFA_DEFINE_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \
@@ -82,8 +70,6 @@ static const char *const efa_stats_names[] = {
#define EFA_CHUNK_USED_SIZE \
((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
-#define EFA_SUPPORTED_ACCESS_FLAGS IB_ACCESS_LOCAL_WRITE
-
struct pbl_chunk {
dma_addr_t dma_addr;
u64 *buf;
@@ -147,6 +133,17 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah)
return container_of(ibah, struct efa_ah, ibah);
}
+static inline struct efa_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
+}
+
+static inline bool is_rdma_read_cap(struct efa_dev *dev)
+{
+ return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
+}
+
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
FIELD_SIZEOF(typeof(x), fld) <= (sz))
@@ -172,106 +169,6 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
return addr;
}
-/*
- * This is only called when the ucontext is destroyed and there can be no
- * concurrent query via mmap or allocate on the xarray, thus we can be sure no
- * other thread is using the entry pointer. We also know that all the BAR
- * pages have either been zap'd or munmaped at this point. Normal pages are
- * refcounted and will be freed at the proper time.
- */
-static void mmap_entries_remove_free(struct efa_dev *dev,
- struct efa_ucontext *ucontext)
-{
- struct efa_mmap_entry *entry;
- unsigned long mmap_page;
-
- xa_for_each(&ucontext->mmap_xa, mmap_page, entry) {
- xa_erase(&ucontext->mmap_xa, mmap_page);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, get_mmap_key(entry), entry->address,
- entry->length);
- if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
- /* DMA mapping is already gone, now free the pages */
- free_pages_exact(phys_to_virt(entry->address),
- entry->length);
- kfree(entry);
- }
-}
-
-static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
- struct efa_ucontext *ucontext,
- u64 key, u64 len)
-{
- struct efa_mmap_entry *entry;
- u64 mmap_page;
-
- mmap_page = (key & EFA_MMAP_PAGE_MASK) >> PAGE_SHIFT;
- if (mmap_page > U32_MAX)
- return NULL;
-
- entry = xa_load(&ucontext->mmap_xa, mmap_page);
- if (!entry || get_mmap_key(entry) != key || entry->length != len)
- return NULL;
-
- ibdev_dbg(&dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, key, entry->address, entry->length);
-
- return entry;
-}
-
-/*
- * Note this locking scheme cannot support removal of entries, except during
- * ucontext destruction when the core code guarentees no concurrency.
- */
-static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
- void *obj, u64 address, u64 length, u8 mmap_flag)
-{
- struct efa_mmap_entry *entry;
- u32 next_mmap_page;
- int err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return EFA_MMAP_INVALID;
-
- entry->obj = obj;
- entry->address = address;
- entry->length = length;
- entry->mmap_flag = mmap_flag;
-
- xa_lock(&ucontext->mmap_xa);
- if (check_add_overflow(ucontext->mmap_xa_page,
- (u32)(length >> PAGE_SHIFT),
- &next_mmap_page))
- goto err_unlock;
-
- entry->mmap_page = ucontext->mmap_xa_page;
- ucontext->mmap_xa_page = next_mmap_page;
- err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
- GFP_KERNEL);
- if (err)
- goto err_unlock;
-
- xa_unlock(&ucontext->mmap_xa);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n",
- entry->obj, entry->address, entry->length, get_mmap_key(entry));
-
- return get_mmap_key(entry);
-
-err_unlock:
- xa_unlock(&ucontext->mmap_xa);
- kfree(entry);
- return EFA_MMAP_INVALID;
-
-}
-
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata)
@@ -306,12 +203,17 @@ int efa_query_device(struct ib_device *ibdev,
dev_attr->max_rq_depth);
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
+ props->max_sge_rd = dev_attr->max_wr_rdma_sge;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
resp.max_rq_sge = dev_attr->max_rq_sge;
resp.max_sq_wr = dev_attr->max_sq_depth;
resp.max_rq_wr = dev_attr->max_rq_depth;
+ resp.max_rdma_size = dev_attr->max_rdma_size;
+
+ if (is_rdma_read_cap(dev))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
@@ -338,9 +240,9 @@ int efa_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->active_speed = IB_SPEED_EDR;
props->active_width = IB_WIDTH_4X;
- props->max_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->active_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->max_msg_sz = dev->mtu;
+ props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->max_msg_sz = dev->dev_attr.mtu;
props->max_vl_num = 1;
return 0;
@@ -401,7 +303,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct efa_dev *dev = to_edev(ibdev);
- memcpy(gid->raw, dev->addr, sizeof(dev->addr));
+ memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
return 0;
}
@@ -485,8 +387,19 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
return efa_com_destroy_qp(&dev->edev, &params);
}
+static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
+ struct efa_qp *qp)
+{
+ rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
+}
+
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
+ struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct efa_ucontext, ibucontext);
struct efa_dev *dev = to_edev(ibqp->pd->device);
struct efa_qp *qp = to_eqp(ibqp);
int err;
@@ -505,61 +418,101 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
DMA_TO_DEVICE);
}
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
kfree(qp);
return 0;
}
+static struct rdma_user_mmap_entry*
+efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ u64 address, size_t length,
+ u8 mmap_flag, u64 *offset)
+{
+ struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int err;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_flag = mmap_flag;
+
+ err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
+ length);
+ if (err) {
+ kfree(entry);
+ return NULL;
+ }
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
static int qp_mmap_entries_setup(struct efa_qp *qp,
struct efa_dev *dev,
struct efa_ucontext *ucontext,
struct efa_com_create_qp_params *params,
struct efa_ibv_create_qp_resp *resp)
{
- /*
- * Once an entry is inserted it might be mmapped, hence cannot be
- * cleaned up until dealloc_ucontext.
- */
- resp->sq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->sq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->sq_db_mmap_key == EFA_MMAP_INVALID)
+ size_t length;
+ u64 address;
+
+ address = dev->db_bar_addr + resp->sq_db_offset;
+ qp->sq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->sq_db_mmap_key);
+ if (!qp->sq_db_mmap_entry)
return -ENOMEM;
resp->sq_db_offset &= ~PAGE_MASK;
- resp->llq_desc_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->mem_bar_addr + resp->llq_desc_offset,
- PAGE_ALIGN(params->sq_ring_size_in_bytes +
- (resp->llq_desc_offset & ~PAGE_MASK)),
- EFA_MMAP_IO_WC);
- if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->mem_bar_addr + resp->llq_desc_offset;
+ length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
+ (resp->llq_desc_offset & ~PAGE_MASK));
+
+ qp->llq_desc_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, length,
+ EFA_MMAP_IO_WC,
+ &resp->llq_desc_mmap_key);
+ if (!qp->llq_desc_mmap_entry)
+ goto err_remove_mmap;
resp->llq_desc_offset &= ~PAGE_MASK;
if (qp->rq_size) {
- resp->rq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->rq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->rq_db_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->db_bar_addr + resp->rq_db_offset;
+
+ qp->rq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, PAGE_SIZE,
+ EFA_MMAP_IO_NC,
+ &resp->rq_db_mmap_key);
+ if (!qp->rq_db_mmap_entry)
+ goto err_remove_mmap;
resp->rq_db_offset &= ~PAGE_MASK;
- resp->rq_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- virt_to_phys(qp->rq_cpu_addr),
- qp->rq_size, EFA_MMAP_DMA_PAGE);
- if (resp->rq_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = virt_to_phys(qp->rq_cpu_addr);
+ qp->rq_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, qp->rq_size,
+ EFA_MMAP_DMA_PAGE,
+ &resp->rq_mmap_key);
+ if (!qp->rq_mmap_entry)
+ goto err_remove_mmap;
resp->rq_mmap_size = qp->rq_size;
}
return 0;
+
+err_remove_mmap:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
+
+ return -ENOMEM;
}
static int efa_qp_validate_cap(struct efa_dev *dev,
@@ -634,7 +587,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_ibv_create_qp_resp resp = {};
struct efa_ibv_create_qp cmd = {};
- bool rq_entry_inserted = false;
struct efa_ucontext *ucontext;
struct efa_qp *qp;
int err;
@@ -742,7 +694,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
if (err)
goto err_destroy_qp;
- rq_entry_inserted = true;
qp->qp_handle = create_qp_resp.qp_handle;
qp->ibqp.qp_num = create_qp_resp.qp_num;
qp->ibqp.qp_type = init_attr->qp_type;
@@ -759,7 +710,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
ibdev_dbg(&dev->ibdev,
"Failed to copy udata for qp[%u]\n",
create_qp_resp.qp_num);
- goto err_destroy_qp;
+ goto err_remove_mmap_entries;
}
}
@@ -767,13 +718,16 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
return &qp->ibqp;
+err_remove_mmap_entries:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped:
if (qp->rq_size) {
dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
DMA_TO_DEVICE);
- if (!rq_entry_inserted)
+
+ if (!qp->rq_mmap_entry)
free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
}
err_free_qp:
@@ -897,16 +851,18 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
efa_destroy_cq_idx(dev, cq->cq_idx);
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
struct efa_ibv_create_cq_resp *resp)
{
resp->q_mmap_size = cq->size;
- resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq,
- virt_to_phys(cq->cpu_addr),
- cq->size, EFA_MMAP_DMA_PAGE);
- if (resp->q_mmap_key == EFA_MMAP_INVALID)
+ cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ virt_to_phys(cq->cpu_addr),
+ cq->size, EFA_MMAP_DMA_PAGE,
+ &resp->q_mmap_key);
+ if (!cq->mmap_entry)
return -ENOMEM;
return 0;
@@ -924,7 +880,6 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_dev *dev = to_edev(ibdev);
struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq);
- bool cq_entry_inserted = false;
int entries = attr->cqe;
int err;
@@ -1013,15 +968,13 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_destroy_cq;
}
- cq_entry_inserted = true;
-
if (udata->outlen) {
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
ibdev_dbg(ibdev,
"Failed to copy udata for create_cq\n");
- goto err_destroy_cq;
+ goto err_remove_mmap;
}
}
@@ -1030,13 +983,16 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
+err_remove_mmap:
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
- if (!cq_entry_inserted)
+ if (!cq->mmap_entry)
free_pages_exact(cq->cpu_addr, cq->size);
+
err_out:
atomic64_inc(&dev->stats.sw_stats.create_cq_err);
return err;
@@ -1396,6 +1352,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
struct efa_com_reg_mr_params params = {};
struct efa_com_reg_mr_result result = {};
struct pbl_context pbl;
+ int supp_access_flags;
unsigned int pg_sz;
struct efa_mr *mr;
int inline_size;
@@ -1409,10 +1366,14 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- if (access_flags & ~EFA_SUPPORTED_ACCESS_FLAGS) {
+ supp_access_flags =
+ IB_ACCESS_LOCAL_WRITE |
+ (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
+
+ if (access_flags & ~supp_access_flags) {
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
- access_flags, EFA_SUPPORTED_ACCESS_FLAGS);
+ access_flags, supp_access_flags);
err = -EOPNOTSUPP;
goto err_out;
}
@@ -1423,7 +1384,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
@@ -1434,7 +1395,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
params.pd = to_epd(ibpd)->pdn;
params.iova = virt_addr;
params.mr_length_in_bytes = length;
- params.permissions = access_flags & 0x1;
+ params.permissions = access_flags;
pg_sz = ib_umem_find_best_pgsz(mr->umem,
dev->dev_attr.page_size_cap,
@@ -1556,7 +1517,6 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
goto err_out;
ucontext->uarn = result.uarn;
- xa_init(&ucontext->mmap_xa);
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
@@ -1585,38 +1545,56 @@ void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- mmap_entries_remove_free(dev, ucontext);
efa_dealloc_uar(dev, ucontext->uarn);
}
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ /* DMA mapping is already gone, now free the pages */
+ if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
+ free_pages_exact(phys_to_virt(entry->address),
+ entry->rdma_entry.npages * PAGE_SIZE);
+ kfree(entry);
+}
+
static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
- struct vm_area_struct *vma, u64 key, u64 length)
+ struct vm_area_struct *vma)
{
- struct efa_mmap_entry *entry;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct efa_user_mmap_entry *entry;
unsigned long va;
+ int err = 0;
u64 pfn;
- int err;
- entry = mmap_entry_get(dev, ucontext, key, length);
- if (!entry) {
- ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid entry\n",
- key);
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev,
+ "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
+ entry = to_emmap(rdma_entry);
ibdev_dbg(&dev->ibdev,
- "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n",
- entry->address, length, entry->mmap_flag);
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag);
pfn = entry->address >> PAGE_SHIFT;
switch (entry->mmap_flag) {
case EFA_MMAP_IO_NC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_noncached(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_IO_WC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_writecombine(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_DMA_PAGE:
for (va = vma->vm_start; va < vma->vm_end;
@@ -1633,12 +1611,13 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
if (err) {
ibdev_dbg(
&dev->ibdev,
- "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n",
- entry->address, length, entry->mmap_flag, err);
- return err;
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag, err);
}
- return 0;
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
}
int efa_mmap(struct ib_ucontext *ibucontext,
@@ -1646,26 +1625,13 @@ int efa_mmap(struct ib_ucontext *ibucontext,
{
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- u64 length = vma->vm_end - vma->vm_start;
- u64 key = vma->vm_pgoff << PAGE_SHIFT;
+ size_t length = vma->vm_end - vma->vm_start;
ibdev_dbg(&dev->ibdev,
- "start %#lx, end %#lx, length = %#llx, key = %#llx\n",
- vma->vm_start, vma->vm_end, length, key);
-
- if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
- ibdev_dbg(&dev->ibdev,
- "length[%#llx] is not page size aligned[%#lx] or VM_SHARED is not set [%#lx]\n",
- length, PAGE_SIZE, vma->vm_flags);
- return -EINVAL;
- }
-
- if (vma->vm_flags & VM_EXEC) {
- ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
- return -EPERM;
- }
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- return __efa_mmap(dev, ucontext, vma, key, length);
+ return __efa_mmap(dev, ucontext, vma);
}
static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 71cb9525c074..26b792bb1027 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
goto bail_dev;
}
- hfi1_compute_tid_rdma_flow_wt();
/*
* These must be called before the driver is registered with
* the PCI subsystem.
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index d8ff063a5419..a51bcd2b4391 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4915,16 +4915,11 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*/
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
- switch (in_mad->base_version) {
+ switch (in_mad->mad_hdr.base_version) {
case OPA_MGMT_BASE_VERSION:
- if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
- dev_err(ibdev->dev.parent, "invalid in_mad_size\n");
- return IB_MAD_RESULT_FAILURE;
- }
return hfi1_process_opa_mad(ibdev, mad_flags, port,
in_wc, in_grh,
(struct opa_mad *)in_mad,
@@ -4932,10 +4927,8 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
out_mad_size,
out_mad_pkey_index);
case IB_MGMT_BASE_VERSION:
- return hfi1_process_ib_mad(ibdev, mad_flags, port,
- in_wc, in_grh,
- (const struct ib_mad *)in_mad,
- (struct ib_mad *)out_mad);
+ return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
+ in_grh, in_mad, out_mad);
default:
break;
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 61aa5504d7c3..61362bd6d3ce 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
/*
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
*/
- if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
+ if (parent &&
+ (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
+ dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd->link_gen3_capable = 0;
}
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index cbf7faa5038c..36593f2efe26 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -634,7 +634,7 @@ static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
u32 config_data, const char *message)
{
u8 i;
- int ret = HCMD_SUCCESS;
+ int ret;
for (i = 0; i < 4; i++) {
ret = load_8051_config(ppd->dd, field_id, i, config_data);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 513a8aac9ccd..1a3c647675a7 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2209,15 +2209,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail_stop;
rdi = ib_to_rvt(qp->ibqp.device);
- if (qp->s_rnr_retry == 0 &&
- !((rdi->post_parms[wqe->wr.opcode].flags &
- RVT_OPERATION_IGN_RNR_CNT) &&
- qp->s_rnr_retry_cnt == 0)) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
+ if (!(rdi->post_parms[wqe->wr.opcode].flags &
+ RVT_OPERATION_IGN_RNR_CNT)) {
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
+ qp->s_rnr_retry--;
}
- if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
- qp->s_rnr_retry--;
/*
* The last valid PSN is the previous PSN. For TID RDMA WRITE
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index c61b6022575e..5774dfc22e18 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -881,8 +881,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
cpu_id = smp_processor_id();
rcu_read_lock();
- rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
- sdma_rht_params);
+ rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
+ sdma_rht_params);
if (rht_node && rht_node->map[vl]) {
struct sdma_rht_map_elem *map = rht_node->map[vl];
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index f21fca3617d5..e53f542b60af 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
* C - Capcode
*/
-static u32 tid_rdma_flow_wt;
-
static void tid_rdma_trigger_resume(struct work_struct *work);
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
@@ -136,6 +134,26 @@ static void update_r_next_psn_fecn(struct hfi1_packet *packet,
struct tid_rdma_flow *flow,
bool fecn);
+static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
+{
+ if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+ priv->r_tid_ack = priv->r_tid_tail;
+}
+
+static void tid_rdma_schedule_ack(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ priv->s_flags |= RVT_S_ACK_PENDING;
+ hfi1_schedule_tid_send(qp);
+}
+
+static void tid_rdma_trigger_ack(struct rvt_qp *qp)
+{
+ validate_r_tid_ack(qp->priv);
+ tid_rdma_schedule_ack(qp);
+}
+
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
{
return
@@ -3005,10 +3023,7 @@ nak_psn:
qpriv->s_nak_state = IB_NAK_PSN_ERROR;
/* We are NAK'ing the next expected PSN */
qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
- qpriv->r_tid_ack = qpriv->r_tid_tail;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
}
goto unlock;
}
@@ -3371,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
}
-void hfi1_compute_tid_rdma_flow_wt(void)
+static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
{
/*
* Heuristic for computing the RNR timeout when waiting on the flow
* queue. Rather than a computationaly expensive exact estimate of when
* a flow will be available, we assume that if a QP is at position N in
* the flow queue it has to wait approximately (N + 1) * (number of
- * segments between two sync points), assuming PMTU of 4K. The rationale
- * for this is that flows are released and recycled at each sync point.
+ * segments between two sync points). The rationale for this is that
+ * flows are released and recycled at each sync point.
*/
- tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
- TID_RDMA_MAX_SEGMENT_SIZE;
+ return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
}
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
@@ -3505,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
if (ret) {
- to_seg = tid_rdma_flow_wt *
+ to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
position_in_queue(qpriv,
&rcd->flow_queue);
break;
@@ -3526,7 +3540,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
/*
* If overtaking req->acked_tail, send an RNR NAK. Because the
* QP is not queued in this case, and the issue can only be
- * caused due a delay in scheduling the second leg which we
+ * caused by a delay in scheduling the second leg which we
* cannot estimate, we use a rather arbitrary RNR timeout of
* (MAX_FLOWS / 2) segments
*/
@@ -3534,8 +3548,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
MAX_FLOWS)) {
ret = -EAGAIN;
to_seg = MAX_FLOWS >> 1;
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
break;
}
@@ -4335,8 +4348,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
req);
trace_hfi1_tid_write_rsp_rcv_data(qp);
- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
- priv->r_tid_ack = priv->r_tid_tail;
+ validate_r_tid_ack(priv);
if (opcode == TID_OP(WRITE_DATA_LAST)) {
release_rdma_sge_mr(e);
@@ -4375,8 +4387,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
}
done:
- priv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_schedule_ack(qp);
exit:
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
if (fecn)
@@ -4388,10 +4399,7 @@ send_nak:
if (!priv->s_nak_state) {
priv->s_nak_state = IB_NAK_PSN_ERROR;
priv->s_nak_psn = flow->flow_state.r_next_psn;
- priv->s_flags |= RVT_S_ACK_PENDING;
- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
- priv->r_tid_ack = priv->r_tid_tail;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
}
goto done;
}
@@ -4939,8 +4947,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
qpriv->resync = true;
/* RESYNC request always gets a TID RDMA ACK. */
qpriv->s_nak_state = 0;
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
bail:
if (fecn)
qp->s_flags |= RVT_S_ECN;
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h
index 1c536185261e..6e82df2190b7 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.h
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.h
@@ -17,6 +17,7 @@
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
+#define TID_RDMA_SEGMENT_SHIFT 18
/*
* Bit definitions for priv->s_flags.
@@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len);
-void hfi1_compute_tid_rdma_flow_wt(void);
-
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index ae9582ddbc8f..b0e9bf7cd150 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -330,9 +330,8 @@ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
/*
* The PSN_MASK and PSN_SHIFT allow for
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index d602b698b57e..4921c1e40ccd 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,23 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
- bool "HNS RoCE Driver"
+ tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on (HNS_DSAF && HNS_ENET) || HNS3
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
+ To compile HIP06 or HIP08 driver as module, choose M here.
+
config INFINIBAND_HNS_HIP06
- tristate "Hisilicon Hip06 Family RoCE support"
+ bool "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
+ depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v1
+
config INFINIBAND_HNS_HIP08
- tristate "Hisilicon Hip08 Family RoCE support"
+ bool "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
+ depends on INFINIBAND_HNS=m || HNS3=y
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
+
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 449a2d81319d..e105945b94a1 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,8 +9,12 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
+ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
+endif
+ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
+endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 90e08c0c332d..8a522e14ef62 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -46,32 +46,32 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
struct hns_roce_ah *ah = to_hr_ah(ibah);
- u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ u16 vlan_id = 0xffff;
bool vlan_en = false;
int ret;
gid_attr = ah_attr->grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan_tag, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
- if (vlan_tag < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
vlan_en = true;
- vlan_tag |= (rdma_ah_get_sl(ah_attr) &
+ vlan_id |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
}
ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
- ah->av.vlan = vlan_tag;
+ ah->av.vlan_id = vlan_id;
ah->av.vlan_en = vlan_en;
- dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
- ah->av.vlan);
+ dev_dbg(dev, "gid_index = 0x%x,vlan_id = 0x%x\n", ah->av.gid_index,
+ ah->av.vlan_id);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 8c063c598d2a..da574c26e063 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -55,7 +55,7 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
bitmap->last = 0;
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
@@ -100,7 +100,7 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
}
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 2b6ac646ca9a..1915bacaded0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -115,12 +115,12 @@ enum {
enum {
/* TPT commands */
- HNS_ROCE_CMD_SW2HW_MPT = 0xd,
- HNS_ROCE_CMD_HW2SW_MPT = 0xf,
+ HNS_ROCE_CMD_CREATE_MPT = 0xd,
+ HNS_ROCE_CMD_DESTROY_MPT = 0xf,
/* CQ commands */
- HNS_ROCE_CMD_SW2HW_CQ = 0x16,
- HNS_ROCE_CMD_HW2SW_CQ = 0x17,
+ HNS_ROCE_CMD_CREATE_CQC = 0x16,
+ HNS_ROCE_CMD_DESTROY_CQC = 0x17,
/* QP/EE commands */
HNS_ROCE_CMD_RST2INIT_QP = 0x19,
@@ -129,14 +129,14 @@ enum {
HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
HNS_ROCE_CMD_2ERR_QP = 0x1e,
HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
- HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
HNS_ROCE_CMD_2RST_QP = 0x21,
HNS_ROCE_CMD_QUERY_QP = 0x22,
- HNS_ROCE_CMD_SW2HW_SRQ = 0x70,
+ HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
+ HNS_ROCE_CMD_CREATE_SRQ = 0x70,
HNS_ROCE_CMD_MODIFY_SRQC = 0x72,
HNS_ROCE_CMD_QUERY_SRQC = 0x73,
- HNS_ROCE_CMD_HW2SW_SRQ = 0x74,
+ HNS_ROCE_CMD_DESTROY_SRQ = 0x74,
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 22541d19cd09..af1d8823b3f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,8 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
-{
- struct ib_cq *ibcq = &hr_cq->ib_cq;
-
- ibcq->comp_handler(ibcq, ibcq->cq_context);
-}
-
-static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
- enum hns_roce_event event_type)
-{
- struct hns_roce_dev *hr_dev;
- struct ib_event event;
- struct ib_cq *ibcq;
-
- ibcq = &hr_cq->ib_cq;
- hr_dev = to_hr_dev(ibcq->device);
-
- if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(hr_dev->dev,
- "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
- event_type, hr_cq->cqn);
- return;
- }
-
- if (ibcq->event_handler) {
- event.device = ibcq->device;
- event.event = IB_EVENT_CQ_ERR;
- event.element.cq = ibcq;
- ibcq->event_handler(&event, ibcq->cq_context);
- }
-}
-
-static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
- HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
- struct hns_roce_mtt *hr_mtt,
- struct hns_roce_cq *hr_cq, int vector)
+static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_table *mtt_table;
@@ -101,35 +58,32 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
else
mtt_table = &hr_dev->mr_table.mtt_table;
- mtts = hns_roce_table_find(hr_dev, mtt_table,
- hr_mtt->first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
- return -EINVAL;
- }
+ mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
+ &dma_handle);
- if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "CQ alloc.Invalid vector.\n");
+ if (!mtts) {
+ dev_err(dev, "Failed to find mtt for CQ buf.\n");
return -EINVAL;
}
- hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
- if (ret == -1) {
- dev_err(dev, "CQ alloc.Failed to alloc index.\n");
- return -ENOMEM;
+ if (ret) {
+ dev_err(dev, "Num of CQ out of range.\n");
+ return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to get context mem.\n");
+ dev_err(dev,
+ "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "CQ alloc failed xa_store.\n");
+ dev_err(dev, "Failed to xa_store CQ.\n");
goto err_put;
}
@@ -140,14 +94,16 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
goto err_xa;
}
- hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
- nent, vector);
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
/* Send mailbox to hw */
- ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
+ HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
+ dev_err(dev,
+ "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_xa;
}
@@ -170,24 +126,17 @@ err_out:
return ret;
}
-static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
int ret;
- ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
+ HNS_ROCE_CMD_DESTROY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
+ dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
xa_erase(&cq_table->array, hr_cq->cqn);
@@ -204,103 +153,91 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
- struct ib_udata *udata,
- struct hns_roce_cq_buf *buf,
- struct ib_umem **umem, u64 buf_addr, int cqe)
+static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct hns_roce_ib_create_cq ucmd,
+ struct ib_udata *udata)
{
- int ret;
- u32 page_shift;
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
+ struct ib_umem **umem = &hr_cq->umem;
u32 npages;
+ int ret;
- *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
- IB_ACCESS_LOCAL_WRITE, 1);
+ *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
-
- if (hr_dev->caps.cqe_buf_pg_sz) {
- npages = (ib_umem_page_count(*umem) +
- (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.cqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
- &buf->hr_mtt);
- } else {
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
- PAGE_SHIFT, &buf->hr_mtt);
- }
+ mtt->mtt_type = MTT_TYPE_WQE;
+
+ npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
+ 1 << hr_dev->caps.cqe_buf_pg_sz);
+ ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
ib_umem_release(*umem);
return ret;
}
-static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, u32 nent)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
int ret;
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- (1 << page_shift) * 2, &buf->hr_buf,
- page_shift);
+ ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
+ buf, buf->page_shift);
if (ret)
goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+ mtt->mtt_type = MTT_TYPE_WQE;
- ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
- buf->hr_buf.page_shift, &buf->hr_mtt);
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+ ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
- hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, buf->size, buf);
+
out:
return ret;
}
-static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, int cqe)
+static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp,
- int cq_entries)
+ struct hns_roce_ib_create_cq_resp *resp)
{
struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev;
@@ -314,9 +251,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
}
/* Get user space address, write it into mtt table */
- ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
- &hr_cq->umem, ucmd.buf_addr,
- cq_entries);
+ ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
return ret;
@@ -337,17 +272,16 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, int cq_entries)
+ struct hns_roce_cq *hr_cq)
{
struct device *dev = hr_dev->dev;
- struct hns_roce_uar *uar;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
@@ -361,15 +295,14 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
}
/* Init mtt table and write buff address to mtt table */
- ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+ ret = alloc_cq_buf(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
}
- uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * uar->index;
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
return 0;
@@ -392,64 +325,69 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(context, &hr_cq->db);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct device *dev = hr_dev->dev;
int vector = attr->comp_vector;
- int cq_entries = attr->cqe;
+ u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
+ dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
- if (hr_dev->caps.min_cqes)
- cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+ dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
- cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
- hr_cq->ib_cq.cqe = cq_entries - 1;
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+ hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
+ ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
if (ret) {
dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq;
}
} else {
- ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
+ ret = create_kernel_cq(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Create cq failed in kernel mode!\n");
goto err_cq;
}
}
- /* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- hr_cq, vector);
+ ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
if (ret) {
- dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
+ dev_err(dev, "Alloc CQ failed(%d).\n", ret);
goto err_dbmap;
}
@@ -462,11 +400,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
if (!udata && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
- /* Get created cq handler and carry out event */
- hr_cq->comp = hns_roce_ib_cq_comp;
- hr_cq->event = hns_roce_ib_cq_event;
- hr_cq->cq_depth = cq_entries;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -477,7 +410,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
return 0;
err_cqc:
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
err_dbmap:
if (udata)
@@ -489,7 +422,7 @@ err_cq:
return ret;
}
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -499,8 +432,8 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
return;
}
- hns_roce_free_cq(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_free_cqc(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (udata) {
@@ -512,7 +445,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
&hr_cq->db);
} else {
/* Free the buff of stored cq */
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
@@ -520,38 +453,57 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_cq *ibcq;
- cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
- if (!cq) {
- dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ cqn);
return;
}
- ++cq->arm_sn;
- cq->comp(cq);
+ ++hr_cq->arm_sn;
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->comp_handler)
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
- struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_event event;
+ struct ib_cq *ibcq;
- cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
- if (!cq) {
- dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ event_type, cqn);
return;
}
- cq->event(cq, (enum hns_roce_event)event_type);
+ atomic_inc(&hr_cq->refcount);
+
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
}
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index c00714c2f16a..10af6958ab69 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -31,7 +31,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1);
page->user_virt = page_addr;
- page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 96d1302abde1..5617434cbfb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -45,7 +45,7 @@
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
-#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
+#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
@@ -53,8 +53,6 @@
#define BA_BYTE_LEN 8
-#define BITS_PER_BYTE 8
-
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20
@@ -426,7 +424,6 @@ struct hns_roce_wq {
u64 *wrid; /* Work request ID */
spinlock_t lock;
int wqe_cnt; /* WQE num */
- u32 max_post;
int max_gs;
int offset;
int wqe_shift; /* WQE size */
@@ -451,6 +448,7 @@ struct hns_roce_buf {
struct hns_roce_buf_list *page_list;
int nbufs;
u32 npages;
+ u32 size;
int page_shift;
};
@@ -482,22 +480,14 @@ struct hns_roce_db {
int order;
};
-struct hns_roce_cq_buf {
- struct hns_roce_buf hr_buf;
- struct hns_roce_mtt hr_mtt;
-};
-
struct hns_roce_cq {
struct ib_cq ib_cq;
- struct hns_roce_cq_buf hr_buf;
+ struct hns_roce_buf buf;
+ struct hns_roce_mtt mtt;
struct hns_roce_db db;
u8 db_en;
spinlock_t lock;
struct ib_umem *umem;
- void (*comp)(struct hns_roce_cq *cq);
- void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type);
-
- struct hns_roce_uar *uar;
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
@@ -521,9 +511,8 @@ struct hns_roce_idx_que {
struct hns_roce_srq {
struct ib_srq ibsrq;
- void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
unsigned long srqn;
- int max;
+ u32 wqe_cnt;
int max_gs;
int wqe_shift;
void __iomem *db_reg_l;
@@ -539,8 +528,8 @@ struct hns_roce_srq {
spinlock_t lock;
int head;
int tail;
- u16 wqe_ctr;
struct mutex mutex;
+ void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
struct hns_roce_uar_table {
@@ -582,7 +571,7 @@ struct hns_roce_av {
u8 tclass;
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[ETH_ALEN];
- u16 vlan;
+ u16 vlan_id;
bool vlan_en;
};
@@ -695,10 +684,6 @@ struct hns_roce_qp {
struct hns_roce_rinl_buf rq_inl_buf;
};
-struct hns_roce_sqp {
- struct hns_roce_qp hr_qp;
-};
-
struct hns_roce_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
@@ -821,8 +806,8 @@ struct hns_roce_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int num_cqs;
- int max_cqes;
- int min_cqes;
+ u32 max_cqes;
+ u32 min_cqes;
u32 min_wqes;
int reserved_cqs;
int reserved_srqs;
@@ -953,7 +938,7 @@ struct hns_roce_hw {
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
- dma_addr_t dma_handle, int nent, u32 vector);
+ dma_addr_t dma_handle);
int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
@@ -1092,11 +1077,6 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}
-static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
-{
- return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
-}
-
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
__raw_writeq(*(u64 *) val, dest);
@@ -1198,9 +1178,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index);
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
@@ -1257,12 +1237,11 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata);
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 86783276fb1f..3bb8f78fb7b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -59,7 +59,7 @@ enum {
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
- (sizeof(struct scatterlist)))
+ (sizeof(struct scatterlist) + sizeof(void *)))
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5f74bf55f471..2a2b2112f886 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -732,7 +732,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
if (!cq)
return -ENOMEM;
- ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
+ ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
if (ret) {
dev_err(dev, "Create cq for reserved loop qp failed!");
goto alloc_cq_failed;
@@ -868,7 +868,7 @@ alloc_pd_failed:
kfree(pd);
alloc_mem_failed:
- hns_roce_ib_destroy_cq(cq, NULL);
+ hns_roce_destroy_cq(cq, NULL);
alloc_cq_failed:
kfree(cq);
return ret;
@@ -897,7 +897,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
i, ret);
}
- hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
+ hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
kfree(&free_mr->mr_free_cq->ib_cq);
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
kfree(&free_mr->mr_free_pd->ibpd);
@@ -1114,9 +1114,10 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
free_mr = &priv->free_mr;
if (mr->enabled) {
- if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1)))
- dev_warn(dev, "HW2SW_MPT failed!\n");
+ if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1)))
+ dev_warn(dev, "DESTROY_MPT failed!\n");
}
mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
@@ -1979,8 +1980,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -1989,7 +1989,7 @@ static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
}
static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
@@ -2072,8 +2072,7 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_cq_context *cq_context = NULL;
struct hns_roce_buf_list *tptr_buf;
@@ -2108,9 +2107,9 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_12,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
- ilog2((unsigned int)nent));
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
- CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
+ CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
@@ -3644,10 +3643,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- kfree(hr_qp);
- else
- kfree(hr_to_hr_sqp(hr_qp));
+ kfree(hr_qp);
return 0;
}
@@ -3658,10 +3654,9 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct device *dev = &hr_dev->pdev->dev;
u32 cqe_cnt_ori;
u32 cqe_cnt_cur;
- u32 cq_buf_size;
int wait_time = 0;
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
/*
* Before freeing cq buffer, we need to ensure that the outstanding CQE
@@ -3686,13 +3681,12 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
wait_time++;
}
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (!udata) {
/* Free the buff of stored cq */
- cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
- hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index e82567fcdeb7..cb8071a3e0d5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -389,7 +389,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_VLAN_M,
V2_UD_SEND_WQE_BYTE_36_VLAN_S,
- le16_to_cpu(ah->av.vlan));
+ ah->av.vlan_id);
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
@@ -2447,8 +2447,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2457,7 +2456,7 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
@@ -2550,8 +2549,7 @@ static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_v2_cq_context *cq_context;
@@ -2563,9 +2561,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
- V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+ V2_CQC_BYTE_4_SHIFT_S,
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
- V2_CQC_BYTE_4_CEQN_S, vector);
+ V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
@@ -4061,8 +4060,8 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol;
+ u16 vlan_id = 0xffff;
bool is_udp = false;
- u16 vlan = 0xffff;
u8 ib_port;
u8 hr_port;
int ret;
@@ -4074,7 +4073,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
@@ -4083,7 +4082,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
IB_GID_TYPE_ROCE_UDP_ENCAP);
}
- if (vlan < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
@@ -4095,7 +4094,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
}
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+ V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
@@ -4650,16 +4649,14 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
{
struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev;
- int ret;
+ int ret = 0;
if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
- if (ret) {
+ if (ret)
ibdev_err(ibdev, "modify QP to Reset failed.\n");
- return ret;
- }
}
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
@@ -4715,7 +4712,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
kfree(hr_qp->rq_inl_buf.wqe_list);
}
- return 0;
+ return ret;
}
static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
@@ -4725,16 +4722,11 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
int ret;
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
- if (ret) {
+ if (ret)
ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
- return ret;
- }
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- kfree(hr_to_hr_sqp(hr_qp));
- else
- kfree(hr_qp);
+ kfree(hr_qp);
return 0;
}
@@ -4951,10 +4943,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
struct hns_roce_dev *hr_dev = eq->hr_dev;
- __le32 doorbell[2];
-
- doorbell[0] = 0;
- doorbell[1] = 0;
+ __le32 doorbell[2] = {};
if (eq->type_flag == HNS_ROCE_AEQ) {
roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
@@ -6047,7 +6036,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
hr_dev->caps.srqwqe_hop_num));
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->max));
+ ilog2(srq->wqe_cnt));
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
SRQC_BYTE_4_SRQN_S, srq->srqn);
@@ -6092,11 +6081,11 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- hr_dev->caps.idx_ba_pg_sz);
+ hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- hr_dev->caps.idx_buf_pg_sz);
+ hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
srq_context->idx_nxt_blk_addr =
cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
@@ -6133,7 +6122,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
int ret;
if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->max)
+ if (srq_attr->srq_limit >= srq->wqe_cnt)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -6193,7 +6182,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl;
- attr->max_wr = srq->max - 1;
+ attr->max_wr = srq->wqe_cnt - 1;
attr->max_sge = srq->max_gs;
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
@@ -6246,7 +6235,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
spin_lock_irqsave(&srq->lock, flags);
- ind = srq->head & (srq->max - 1);
+ ind = srq->head & (srq->wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->max_gs)) {
@@ -6261,7 +6250,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
break;
}
- wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
+ wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
if (wqe_idx < 0) {
ret = -ENOMEM;
*bad_wr = wr;
@@ -6285,7 +6274,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->max - 1);
+ ind = (ind + 1) & (srq->wqe_cnt - 1);
}
if (likely(nreq)) {
@@ -6380,12 +6369,14 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
-static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
int i;
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
@@ -6410,8 +6401,6 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
priv->handle = handle;
-
- return 0;
}
static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
@@ -6429,14 +6418,7 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_kzalloc;
}
- hr_dev->pci_dev = handle->pdev;
- hr_dev->dev = &handle->pdev->dev;
-
- ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
- if (ret) {
- dev_err(hr_dev->dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
- }
+ hns_roce_hw_v2_get_cfg(hr_dev, handle);
ret = hns_roce_init(hr_dev);
if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 43219d2f7de0..76a14db7028d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -87,8 +87,8 @@
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
-#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
-#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
+#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
+#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b5d196c119ee..854ef6e74788 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -111,7 +111,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
- dev_err(dev, "port(%d) can't find netdev\n", port);
+ dev_err(dev, "Can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
@@ -253,7 +253,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "find netdev %d failed!\r\n", port);
+ dev_err(dev, "Find netdev %u failed!\n", port);
return -EINVAL;
}
@@ -301,12 +301,6 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
return 0;
}
-static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -359,7 +353,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
/* vm_pgoff: 1 -- TPTR */
case 1:
@@ -372,7 +367,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size,
- vma->vm_page_prot);
+ vma->vm_page_prot,
+ NULL);
default:
return -EINVAL;
@@ -423,14 +419,14 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
.create_ah = hns_roce_create_ah,
- .create_cq = hns_roce_ib_create_cq,
+ .create_cq = hns_roce_create_cq,
.create_qp = hns_roce_create_qp,
.dealloc_pd = hns_roce_dealloc_pd,
.dealloc_ucontext = hns_roce_dealloc_ucontext,
.del_gid = hns_roce_del_gid,
.dereg_mr = hns_roce_dereg_mr,
.destroy_ah = hns_roce_destroy_ah,
- .destroy_cq = hns_roce_ib_destroy_cq,
+ .destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.fill_res_entry = hns_roce_fill_res_entry,
.get_dma_mr = hns_roce_get_dma_mr,
@@ -438,7 +434,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
.modify_device = hns_roce_modify_device,
- .modify_port = hns_roce_modify_port,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
.query_device = hns_roce_query_device,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 5f8416ba09a9..9ad19170c3f9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -48,21 +48,21 @@ unsigned long key_to_hw_index(u32 key)
return (key << 24) | (key >> 8);
}
-static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
- HNS_ROCE_CMD_SW2HW_MPT,
+ HNS_ROCE_CMD_CREATE_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
- mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
+ mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -83,7 +83,7 @@ static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
}
}
spin_unlock(&buddy->lock);
- return -1;
+ return -EINVAL;
found:
clear_bit(*seg, buddy->bits[o]);
@@ -206,13 +206,14 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
}
ret = hns_roce_buddy_alloc(buddy, order, seg);
- if (ret == -1)
- return -1;
+ if (ret)
+ return ret;
- if (hns_roce_table_get_range(hr_dev, table, *seg,
- *seg + (1 << order) - 1)) {
+ ret = hns_roce_table_get_range(hr_dev, table, *seg,
+ *seg + (1 << order) - 1);
+ if (ret) {
hns_roce_buddy_free(buddy, *seg, order);
- return -1;
+ return ret;
}
return 0;
@@ -578,7 +579,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
/* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
mr->iova = iova; /* MR va starting addr */
@@ -707,10 +708,11 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
int ret;
if (mr->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
}
if (mr->size != ~0ULL) {
@@ -763,10 +765,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
@@ -1143,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1228,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
@@ -1308,9 +1310,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
if (ret)
goto free_cmd_mbox;
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
mr->enabled = 0;
@@ -1332,9 +1334,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
goto free_cmd_mbox;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
ib_umem_release(mr->umem);
goto free_cmd_mbox;
}
@@ -1448,10 +1450,11 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
int ret;
if (mw->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mw->rkey) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
key_to_hw_index(mw->rkey));
@@ -1487,10 +1490,10 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
+ dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 912b89b4da34..780c780fdb22 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -96,7 +96,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
/* Using bitmap to manager UAR index */
ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index bd78ff90d998..a6565b674801 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -318,7 +318,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
* hr_qp->rq.max_gs);
}
- cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
+ cap->max_recv_wr = hr_qp->rq.wqe_cnt;
cap->max_recv_sge = hr_qp->rq.max_gs;
return 0;
@@ -332,9 +332,8 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
u8 max_sq_stride = ilog2(roundup_sq_stride);
/* Sanity check SQ size before proceeding */
- if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
- ucmd->log_sq_stride > max_sq_stride ||
- ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ if (ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
return -EINVAL;
}
@@ -358,13 +357,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
u32 max_cnt;
int ret;
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
+ hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
+ return -EINVAL;
+
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
return ret;
}
- hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
@@ -391,37 +393,37 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
if (ex_sge_num) {
- hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
hr_qp->rq.offset = hr_qp->sge.offset +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift),
page_size);
} else {
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
@@ -591,24 +593,24 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
+ size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
- size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
+ size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
}
hr_qp->rq.offset = size;
- size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
+ size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
page_size);
hr_qp->buff_size = size;
/* Get wr and sge number which send */
- cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
+ cap->max_send_wr = hr_qp->sq.wqe_cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
@@ -743,7 +745,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
- hr_qp->buff_size, 0, 0);
+ hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
@@ -1017,7 +1019,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
@@ -1030,7 +1031,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp);
if (ret) {
- ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n",
+ ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp);
return ERR_PTR(ret);
@@ -1047,11 +1048,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
- hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
- if (!hr_sqp)
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
return ERR_PTR(-ENOMEM);
- hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
@@ -1066,7 +1066,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
ibdev_err(ibdev, "Create GSI QP failed!\n");
- kfree(hr_sqp);
+ kfree(hr_qp);
return ERR_PTR(ret);
}
@@ -1289,7 +1289,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
u32 cur;
cur = hr_wq->head - hr_wq->tail;
- if (likely(cur + nreq < hr_wq->max_post))
+ if (likely(cur + nreq < hr_wq->wqe_cnt))
return false;
hr_cq = to_hr_cq(ib_cq);
@@ -1297,7 +1297,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
cur = hr_wq->head - hr_wq->tail;
spin_unlock(&hr_cq->lock);
- return cur + nreq >= hr_wq->max_post;
+ return cur + nreq >= hr_wq->wqe_cnt;
}
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 0a31d0a3d657..06871731ac43 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -98,11 +98,15 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
- if (!table_attr)
+ if (!table_attr) {
+ ret = -EMSGSIZE;
goto err;
+ }
- if (hns_roce_fill_cq(msg, context))
+ if (hns_roce_fill_cq(msg, context)) {
+ ret = -EMSGSIZE;
goto err_cancel_table;
+ }
nla_nest_end(msg, table_attr);
kfree(context);
@@ -113,7 +117,7 @@ err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
kfree(context);
- return -EMSGSIZE;
+ return ret;
}
int hns_roce_fill_res_entry(struct sk_buff *msg,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 9591457eb768..7113ebfdb4f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -59,21 +59,21 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
}
}
-static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
- HNS_ROCE_CMD_SW2HW_SRQ,
+ HNS_ROCE_CMD_CREATE_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
+ mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -95,8 +95,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
srq->mtt.first_seg,
&dma_handle_wqe);
if (!mtts_wqe) {
- dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find srq buf addr.\n");
+ dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n");
return -EINVAL;
}
@@ -106,13 +105,14 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
&dma_handle_idx);
if (!mtts_idx) {
dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find idx que buf addr.\n");
+ "Failed to find mtt for srq idx queue buf.\n");
return -EINVAL;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
- if (ret == -1) {
- dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Failed to alloc a bit from srq bitmap.\n");
return -ENOMEM;
}
@@ -134,7 +134,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
mtts_wqe, mtts_idx, dma_handle_wqe,
dma_handle_idx);
- ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
+ ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
goto err_xa;
@@ -160,9 +160,9 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
- ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
+ ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
if (ret)
- dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
+ dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn);
xa_erase(&srq_table->xa, srq->srqn);
@@ -180,22 +180,23 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_ib_create_srq ucmd;
- u32 page_shift;
- u32 npages;
+ struct hns_roce_buf *buf;
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
- npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
+ buf = &srq->buf;
+ buf->npages = (ib_umem_page_count(srq->umem) +
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->mtt);
if (ret)
goto err_user_buf;
@@ -205,16 +206,19 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
- srq->idx_que.buf_size, 0, 0);
+ srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_user_srq_mtt;
}
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
- PAGE_SHIFT, &srq->idx_que.mtt);
-
+ buf = &srq->idx_que.idx_buf;
+ buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
+ 1 << hr_dev->caps.idx_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->idx_que.mtt);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
goto err_user_idx_mtt;
@@ -251,7 +255,7 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
- idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
@@ -277,7 +281,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
return -ENOMEM;
srq->head = 0;
- srq->tail = srq->max - 1;
+ srq->tail = srq->wqe_cnt - 1;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
@@ -308,7 +312,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
if (ret)
goto err_kernel_idx_buf;
- srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
+ srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_kernel_idx_buf;
@@ -354,7 +358,7 @@ static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
- struct ib_srq_init_attr *srq_init_attr,
+ struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
@@ -366,24 +370,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
- if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
- srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
+ if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
+ init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return -EINVAL;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
- srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
- srq->max_gs = srq_init_attr->attr.max_sge;
+ srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+ srq->max_gs = init_attr->attr.max_sge;
- srq_desc_size = max(16, 16 * srq->max_gs);
+ srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size);
- srq_buf_size = srq->max * srq_desc_size;
+ srq_buf_size = srq->wqe_cnt * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
- srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
+ srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
@@ -401,8 +405,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
- cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
- to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
+ cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_hr_cq(init_attr->ext.cq)->cqn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
@@ -449,7 +453,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
} else {
kvfree(srq->wrid);
- hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
+ hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
&srq->buf);
}
ib_umem_release(srq->idx_que.umem);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 2d6a378e8560..bb78d3280acc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2079,9 +2079,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
if (!dst || dst->error) {
if (dst) {
- dst_release(dst);
i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return rc;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index cd9ee1664a69..86375947bc67 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1763,7 +1763,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(udata, start, length, acc, 0);
+ region = ib_umem_get(udata, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a7d238d312f0..306b21281fa2 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -145,7 +145,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int n;
*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 0f390351cef0..714f9df5bf39 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -64,7 +64,7 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 57079110af9b..abe68708d6d6 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -966,7 +966,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
mutex_unlock(&dev->counters_table[port_num - 1].mutex);
if (stats_avail) {
- memset(out_mad->data, 0, sizeof out_mad->data);
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
@@ -984,38 +983,31 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
* queries, should be called only by VFs and for that specific purpose
*/
if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) &&
- (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
- in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
- in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
+ in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
+ in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
+ return iboe_process_mad(ibdev, mad_flags, port_num,
+ in_wc, in_grh, in, out);
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in, out);
}
if (link == IB_LINK_LAYER_ETHERNET)
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ in_grh, in, out);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2f1e38b891..0b5dc1d5928f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -256,6 +256,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
int hw_update = 0;
int i;
struct gid_entry *gids = NULL;
+ u16 vlan_id = 0xffff;
+ u8 mac[ETH_ALEN];
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -266,12 +268,16 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
if (!context)
return -EINVAL;
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
if (!memcmp(&port_gid_table->gids[i].gid,
&attr->gid, sizeof(attr->gid)) &&
- port_gid_table->gids[i].gid_type == attr->gid_type) {
+ port_gid_table->gids[i].gid_type == attr->gid_type &&
+ port_gid_table->gids[i].vlan_id == vlan_id) {
found = i;
break;
}
@@ -291,6 +297,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
memcpy(&port_gid_table->gids[free].gid,
&attr->gid, sizeof(attr->gid));
port_gid_table->gids[free].gid_type = attr->gid_type;
+ port_gid_table->gids[free].vlan_id = vlan_id;
port_gid_table->gids[free].ctx->real_index = free;
port_gid_table->gids[free].ctx->refcount = 1;
hw_update = 1;
@@ -1146,7 +1153,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return rdma_user_mmap_io(context, vma,
to_mucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case 1:
if (dev->dev->caps.bf_reg_size == 0)
@@ -1155,7 +1163,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
context, vma,
to_mucontext(context)->uar.pfn +
dev->dev->caps.num_uars,
- PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
+ NULL);
case 3: {
struct mlx4_clock_params params;
@@ -1171,7 +1180,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
params.bar) +
params.offset) >>
PAGE_SHIFT,
- PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
+ NULL);
}
default:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index eb53bb4c0c91..d188573187fa 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -508,6 +508,7 @@ struct gid_entry {
union ib_gid gid;
enum ib_gid_type gid_type;
struct gid_cache_context *ctx;
+ u16 vlan_id;
};
struct mlx4_port_gid_table {
@@ -786,11 +787,10 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 6ae503cfc526..dfa17bcdcdbc 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(udata, start, length, access_flags, 0);
+ return ib_umem_get(udata, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bd4aa04416c6..85f57b76e446 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
- qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1110,8 +1110,7 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err)
goto err;
- qp->umem =
- ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 848db7264cc9..8dcf6e3d9ae2 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -110,7 +110,7 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 9924be8384d8..d0a043ccbe58 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
- cong.o
+ cong.o restrack.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 45f48cde6b9d..dd8d24ee8e1d 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -423,9 +423,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
- struct mlx5_sig_err_cqe *sig_err_cqe;
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
@@ -519,27 +516,29 @@ repoll:
}
}
break;
- case MLX5_CQE_SIG_ERR:
- sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
+ case MLX5_CQE_SIG_ERR: {
+ struct mlx5_sig_err_cqe *sig_err_cqe =
+ (struct mlx5_sig_err_cqe *)cqe64;
+ struct mlx5_core_sig_ctx *sig;
- xa_lock(&dev->mdev->priv.mkey_table);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ xa_lock(&dev->sig_mrs);
+ sig = xa_load(&dev->sig_mrs,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- mr = to_mibmr(mmkey);
- get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
- mr->sig->sig_err_exists = true;
- mr->sig->sigerr_count++;
+ get_sig_err_item(sig_err_cqe, &sig->err_item);
+ sig->sig_err_exists = true;
+ sig->sigerr_count++;
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
- cq->mcq.cqn, mr->sig->err_item.key,
- mr->sig->err_item.err_type,
- mr->sig->err_item.sig_err_offset,
- mr->sig->err_item.expected,
- mr->sig->err_item.actual);
+ cq->mcq.cqn, sig->err_item.key,
+ sig->err_item.err_type,
+ sig->err_item.sig_err_offset,
+ sig->err_item.expected,
+ sig->err_item.actual);
- xa_unlock(&dev->mdev->priv.mkey_table);
+ xa_unlock(&dev->sig_mrs);
goto repoll;
}
+ }
return 0;
}
@@ -710,7 +709,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->buf.umem =
ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1111,7 +1110,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
umem = ib_umem_get(udata, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index d609f4659afb..9d0a18cf9e5e 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -100,6 +100,7 @@ struct devx_obj {
struct mlx5_ib_devx_mr devx_mr;
struct mlx5_core_dct core_dct;
struct mlx5_core_cq core_cq;
+ u32 flow_counter_bulk_size;
};
struct list_head event_sub; /* holds devx_event_subscription entries */
};
@@ -192,15 +193,20 @@ bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
}
}
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
+
+ if (offset && offset >= devx_obj->flow_counter_bulk_size)
+ return false;
+
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
+ *counter_id += offset;
return true;
}
@@ -1265,8 +1271,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
- return xa_err(xa_store(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
+ GFP_KERNEL));
}
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
@@ -1345,9 +1351,9 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+ xa_erase(&obj->ib_dev->odp_mkeys,
mlx5_base_mkey(obj->devx_mr.mmkey.key));
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
@@ -1463,6 +1469,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (err)
goto obj_free;
+ if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+ u8 bulk = MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk);
+ obj->flow_counter_bulk_size = 128UL * bulk;
+ }
+
uobj->object = obj;
INIT_LIST_HEAD(&obj->event_sub);
obj->ib_dev = dev;
@@ -2121,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
+ obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 8f4e5f22b84c..12737c509aa2 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index b198ff10cde9..dbee17d22d50 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -85,6 +85,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
int len, ret, i;
u32 counter_id = 0;
+ u32 *offset_attr;
+ u32 offset = 0;
if (!capable(CAP_NET_RAW))
return -EPERM;
@@ -151,8 +153,27 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
if (len) {
devx_obj = arr_flow_actions[0]->object;
- if (!mlx5_ib_devx_is_flow_counter(devx_obj, &counter_id))
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
+
+ int num_offsets = uverbs_attr_ptr_get_array_size(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ sizeof(u32));
+
+ if (num_offsets != 1)
+ return -EINVAL;
+
+ offset_attr = uverbs_attr_get_alloced_ptr(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
+ offset = *offset_attr;
+ }
+
+ if (!mlx5_ib_devx_is_flow_counter(devx_obj, offset,
+ &counter_id))
return -EINVAL;
+
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -598,7 +619,11 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
MLX5_IB_OBJECT_DEVX_OBJ,
UVERBS_ACCESS_READ, 1, 1,
- UA_OPTIONAL));
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 4950df3f71b6..ac4d8d1b9a07 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -263,7 +263,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
},
.sq_sig_type = gsi->sq_sig_type,
.qp_type = IB_QPT_UD,
- .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
+ .create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
};
return ib_create_qp(pd, &init_attr);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 74ce9249e75a..5c3d052ac30b 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -35,7 +35,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
int vport_index;
if (rep->vport == MLX5_VPORT_UPLINK)
- profile = &uplink_rep_profile;
+ profile = &raw_eth_profile;
else
return mlx5_ib_set_vport_rep(dev, rep);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index de43b423bafc..3b6750cba796 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -10,7 +10,7 @@
#include "mlx5_ib.h"
#ifdef CONFIG_MLX5_ESWITCH
-extern const struct mlx5_ib_profile uplink_rep_profile;
+extern const struct mlx5_ib_profile raw_eth_profile;
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 649a3364f838..4f0edd4832bd 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -201,3 +201,27 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
return -EINVAL;
}
+
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
+ if (err)
+ goto ex;
+
+ port_guid->guid = rep->port_guid;
+ node_guid->guid = rep->node_guid;
+ex:
+ kfree(rep);
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 348c1df69cdc..14e0c17de6a9 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -74,58 +74,6 @@ static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
port);
}
-static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
- u16 slid;
- int err;
-
- slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
- return IB_MAD_RESULT_SUCCESS;
-
- /* Don't process SMInfo queries -- the SMA can't handle them.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
- return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
- return IB_MAD_RESULT_SUCCESS;
- } else {
- return IB_MAD_RESULT_SUCCESS;
- }
-
- err = mlx5_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
- if (err)
- return IB_MAD_RESULT_FAILURE;
-
- /* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
- /* no response for trap repress */
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
void *out)
{
@@ -271,30 +219,66 @@ done:
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
- int ret;
+ u8 mgmt_class = in->mad_hdr.mgmt_class;
+ u8 method = in->mad_hdr.method;
+ u16 slid;
+ int err;
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
+ be16_to_cpu(IB_LID_PERMISSIVE);
- memset(out_mad->data, 0, sizeof(out_mad->data));
+ if (method == IB_MGMT_METHOD_TRAP && !slid)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
- } else {
- ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
- in_mad, out_mad);
+ switch (mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET &&
+ method != IB_MGMT_METHOD_TRAP_REPRESS)
+ return IB_MAD_RESULT_SUCCESS;
+
+ /* Don't process SMInfo queries -- the SMA can't handle them.
+ */
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
+ method == IB_MGMT_METHOD_GET)
+ return process_pma_cmd(dev, port_num, in, out);
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS1:
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS2:
+ case IB_MGMT_CLASS_CONG_MGMT: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ default:
+ return IB_MAD_RESULT_SUCCESS;
}
- return ret;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* set return bit in status of directed route responses */
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
+
+ if (method == IB_MGMT_METHOD_TRAP_REPRESS)
+ /* no response for trap repress */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 831539419c30..51100350b688 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -67,6 +67,7 @@
#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -693,21 +694,6 @@ static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
get_atomic_caps(dev, atomic_size_qp, props);
}
-static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
- struct ib_device_attr *props)
-{
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
-
- get_atomic_caps(dev, atomic_size_qp, props);
-}
-
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
-{
- struct ib_device_attr props = {};
-
- get_atomic_caps_dc(dev, &props);
- return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
-}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -844,8 +830,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
if (uhw->outlen && uhw->outlen < resp_len)
return -EINVAL;
- else
- resp.response_length = resp_len;
+
+ resp.response_length = resp_len;
if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
@@ -1011,6 +997,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
props->max_pi_fast_reg_page_list_len =
props->max_fast_reg_page_list_len / 2;
+ props->max_sgl_rd =
+ MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -1031,7 +1019,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
- if (!mlx5_core_is_pf(mdev))
+ if (mlx5_core_is_vf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) ==
@@ -1161,8 +1149,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
- resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ else
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.supported_qpts =
@@ -1808,7 +1802,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
return -EINVAL;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ if (dev->wc_support)
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
@@ -2168,7 +2162,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
- prot);
+ prot, NULL);
if (err) {
mlx5_ib_err(dev,
"rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
@@ -2210,7 +2204,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
PAGE_SHIFT) +
page_idx;
return rdma_user_mmap_io(context, vma, pfn, map_size,
- pgprot_writecombine(vma->vm_page_prot));
+ pgprot_writecombine(vma->vm_page_prot),
+ NULL);
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2248,7 +2243,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
PAGE_SHIFT;
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -5145,8 +5141,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
- if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
@@ -5249,11 +5244,9 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
- if (MLX5_CAP_GEN(dev->mdev, roce)) {
- err = mlx5_nic_vport_enable_roce(dev->mdev);
- if (err)
- return err;
- }
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
err = mlx5_eth_lag_init(dev);
if (err)
@@ -5262,8 +5255,7 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
return 0;
err_disable_roce:
- if (MLX5_CAP_GEN(dev->mdev, roce))
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_nic_vport_disable_roce(dev->mdev);
return err;
}
@@ -5271,8 +5263,7 @@ err_disable_roce:
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
{
mlx5_eth_lag_cleanup(dev);
- if (MLX5_CAP_GEN(dev->mdev, roce))
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_nic_vport_disable_roce(dev->mdev);
}
struct mlx5_ib_counter {
@@ -5710,11 +5701,10 @@ static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
- if (!dev->delay_drop.dbg)
+ if (!dev->delay_drop.dir_debugfs)
return;
- debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
- kfree(dev->delay_drop.dbg);
- dev->delay_drop.dbg = NULL;
+ debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
+ dev->delay_drop.dir_debugfs = NULL;
}
static void cancel_delay_drop(struct mlx5_ib_dev *dev)
@@ -5765,52 +5755,22 @@ static const struct file_operations fops_delay_drop_timeout = {
.read = delay_drop_timeout_read,
};
-static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
+static void delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_ib_dbg_delay_drop *dbg;
+ struct dentry *root;
if (!mlx5_debugfs_root)
- return 0;
-
- dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
- if (!dbg)
- return -ENOMEM;
-
- dev->delay_drop.dbg = dbg;
-
- dbg->dir_debugfs =
- debugfs_create_dir("delay_drop",
- dev->mdev->priv.dbg_root);
- if (!dbg->dir_debugfs)
- goto out_debugfs;
-
- dbg->events_cnt_debugfs =
- debugfs_create_atomic_t("num_timeout_events", 0400,
- dbg->dir_debugfs,
- &dev->delay_drop.events_cnt);
- if (!dbg->events_cnt_debugfs)
- goto out_debugfs;
-
- dbg->rqs_cnt_debugfs =
- debugfs_create_atomic_t("num_rqs", 0400,
- dbg->dir_debugfs,
- &dev->delay_drop.rqs_cnt);
- if (!dbg->rqs_cnt_debugfs)
- goto out_debugfs;
-
- dbg->timeout_debugfs =
- debugfs_create_file("timeout", 0600,
- dbg->dir_debugfs,
- &dev->delay_drop,
- &fops_delay_drop_timeout);
- if (!dbg->timeout_debugfs)
- goto out_debugfs;
+ return;
- return 0;
+ root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
+ dev->delay_drop.dir_debugfs = root;
-out_debugfs:
- delay_drop_debugfs_cleanup(dev);
- return -ENOMEM;
+ debugfs_create_atomic_t("num_timeout_events", 0400, root,
+ &dev->delay_drop.events_cnt);
+ debugfs_create_atomic_t("num_rqs", 0400, root,
+ &dev->delay_drop.rqs_cnt);
+ debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
+ &fops_delay_drop_timeout);
}
static void init_delay_drop(struct mlx5_ib_dev *dev)
@@ -5826,8 +5786,7 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
atomic_set(&dev->delay_drop.rqs_cnt, 0);
atomic_set(&dev->delay_drop.events_cnt, 0);
- if (delay_drop_debugfs_init(dev))
- mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
+ delay_drop_debugfs_init(dev);
}
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
@@ -6145,11 +6104,10 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- srcu_barrier(&dev->mr_srcu);
- cleanup_srcu_struct(&dev->mr_srcu);
- }
+ WARN_ON(!xa_empty(&dev->odp_mkeys));
+ cleanup_srcu_struct(&dev->odp_srcu);
+ WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
}
@@ -6201,15 +6159,15 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
+ xa_init(&dev->odp_mkeys);
+ xa_init(&dev->sig_mrs);
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- err = init_srcu_struct(&dev->mr_srcu);
- if (err)
- goto err_mp;
- }
+ err = init_srcu_struct(&dev->odp_srcu);
+ if (err)
+ goto err_mp;
return 0;
@@ -6269,6 +6227,9 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
.drain_rq = mlx5_ib_drain_rq,
.drain_sq = mlx5_ib_drain_sq,
+ .enable_driver = mlx5_ib_enable_driver,
+ .fill_res_entry = mlx5_ib_fill_res_entry,
+ .fill_stat_entry = mlx5_ib_fill_stat_entry,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,
.get_link_layer = mlx5_ib_port_link_layer,
@@ -6315,6 +6276,7 @@ static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
.get_vf_config = mlx5_ib_get_vf_config,
+ .get_vf_guid = mlx5_ib_get_vf_guid,
.get_vf_stats = mlx5_ib_get_vf_stats,
.set_vf_guid = mlx5_ib_set_vf_guid,
.set_vf_link_state = mlx5_ib_set_vf_link_state,
@@ -6444,7 +6406,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
.query_port = mlx5_ib_rep_query_port,
};
-static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
{
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
return 0;
@@ -6484,7 +6446,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
mlx5_remove_netdev_notifier(dev, port_num);
}
-static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
@@ -6500,7 +6462,7 @@ static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
return err;
}
-static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_stage_common_roce_cleanup(dev);
}
@@ -6710,6 +6672,18 @@ static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
}
}
+int mlx5_ib_enable_driver(struct ib_device *dev)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(dev);
+ int ret;
+
+ ret = mlx5_ib_test_wc(mdev);
+ mlx5_ib_dbg(mdev, "Write-Combining %s",
+ mdev->wc_support ? "supported" : "not supported");
+
+ return ret;
+}
+
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
@@ -6807,7 +6781,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_delay_drop_cleanup),
};
-const struct mlx5_ib_profile uplink_rep_profile = {
+const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
@@ -6818,11 +6792,11 @@ const struct mlx5_ib_profile uplink_rep_profile = {
mlx5_ib_stage_caps_init,
NULL),
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
- mlx5_ib_stage_rep_non_default_cb,
+ mlx5_ib_stage_raw_eth_non_default_cb,
NULL),
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
- mlx5_ib_stage_rep_roce_init,
- mlx5_ib_stage_rep_roce_cleanup),
+ mlx5_ib_stage_raw_eth_roce_init,
+ mlx5_ib_stage_raw_eth_roce_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -6898,6 +6872,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
+ const struct mlx5_ib_profile *profile;
enum rdma_link_layer ll;
struct mlx5_ib_dev *dev;
int port_type_cap;
@@ -6933,7 +6908,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->mdev = mdev;
dev->num_ports = num_ports;
- return __mlx5_ib_add(dev, &pf_profile);
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
+ profile = &raw_eth_profile;
+ else
+ profile = &pf_profile;
+
+ return __mlx5_ib_add(dev, profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b5aece786b36..048f4e974a61 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -34,6 +34,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
+#include <linux/jiffies.h>
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
@@ -216,3 +217,201 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
*offset = buf_off >> ilog2(off_size);
return 0;
}
+
+#define WR_ID_BF 0xBF
+#define WR_ID_END 0xBAD
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ bool signaled)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ struct mlx5_bf *bf = &qp->bf;
+ __be32 mmio_wqe[16] = {};
+ unsigned long flags;
+ unsigned int idx;
+ int i;
+
+ if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+
+ memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
+ ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
+ (qp->trans_qp.base.mqp.qpn << 8));
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
+ qp->sq.wqe_head[idx] = qp->sq.head + 1;
+ qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+ qp->sq.head++;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell
+ */
+ wmb();
+ for (i = 0; i < 8; i++)
+ mlx5_write64(&mmio_wqe[i * 2],
+ bf->bfreg->map + bf->offset + i * 8);
+
+ bf->offset ^= bf->buf_size;
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return 0;
+}
+
+static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
+{
+ int ret;
+ struct ib_wc wc = {};
+ unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+
+ do {
+ ret = ib_poll_cq(cq, 1, &wc);
+ if (ret < 0 || wc.status)
+ return ret < 0 ? ret : -EINVAL;
+ if (ret)
+ break;
+ } while (!time_after(jiffies, end));
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (wc.wr_id != WR_ID_BF)
+ ret = 0;
+
+ return ret;
+}
+
+static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
+{
+ int err, i;
+
+ for (i = 0; i < TEST_WC_NUM_WQES; i++) {
+ err = post_send_nop(dev, qp, WR_ID_BF, false);
+ if (err)
+ return err;
+ }
+
+ return post_send_nop(dev, qp, WR_ID_END, true);
+}
+
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
+{
+ struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
+ int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
+ struct ib_qp_init_attr qp_init_attr = {
+ .cap = { .max_send_wr = TEST_WC_NUM_WQES },
+ .qp_type = IB_QPT_UD,
+ .sq_sig_type = IB_SIGNAL_REQ_WR,
+ .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
+ };
+ struct ib_qp_attr qp_attr = { .port_num = 1 };
+ struct ib_device *ibdev = &dev->ib_dev;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, bf))
+ return 0;
+
+ if (!dev->mdev->roce.roce_en &&
+ port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
+ if (mlx5_core_is_pf(dev->mdev))
+ dev->wc_support = true;
+ return 0;
+ }
+
+ ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
+ if (ret)
+ goto print_err;
+
+ if (!dev->wc_bfreg.wc)
+ goto out1;
+
+ pd = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto out1;
+ }
+
+ cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out2;
+ }
+
+ qp_init_attr.recv_cq = cq;
+ qp_init_attr.send_cq = cq;
+ qp = ib_create_qp(pd, &qp_init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto out3;
+ }
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = ib_modify_qp(qp, &qp_attr,
+ IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
+ IB_QP_QKEY);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret)
+ goto out4;
+
+ ret = test_wc_do_send(dev, qp);
+ if (ret < 0)
+ goto out4;
+
+ ret = test_wc_poll_cq_result(dev, cq);
+ if (ret > 0) {
+ dev->wc_support = true;
+ ret = 0;
+ }
+
+out4:
+ ib_destroy_qp(qp);
+out3:
+ ib_destroy_cq(cq);
+out2:
+ ib_dealloc_pd(pd);
+out1:
+ mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
+print_err:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Error %d while trying to test write-combining support\n",
+ ret);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 1a98ee2e01c4..03af54b53bf7 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -247,12 +247,8 @@ struct mlx5_ib_flow_db {
* These flags are intended for internal use by the mlx5_ib driver, and they
* rely on the range reserved for that use in the ib_qp_create_flags enum.
*/
-
-/* Create a UD QP whose source QP number is 1 */
-static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
-{
- return IB_QP_CREATE_RESERVED_START;
-}
+#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
+#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
struct wr_list {
u16 opcode;
@@ -295,6 +291,7 @@ enum mlx5_ib_wq_flags {
#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
struct mlx5_ib_rwq {
struct ib_wq ibwq;
@@ -585,6 +582,9 @@ struct mlx5_ib_dm {
IB_ACCESS_REMOTE_READ |\
IB_ZERO_BASED)
+#define mlx5_update_odp_stats(mr, counter_name, value) \
+ atomic64_add(value, &((mr)->odp_stats.counter_name))
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
void *descs;
@@ -606,7 +606,6 @@ struct mlx5_ib_mr {
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
- unsigned int live;
void *descs_alloc;
int access_flags; /* Needed for rereg MR */
@@ -618,10 +617,18 @@ struct mlx5_ib_mr {
u64 data_iova;
u64 pi_iova;
- atomic_t num_leaf_free;
- wait_queue_head_t q_leaf_free;
+ /* For ODP and implicit */
+ atomic_t num_deferred_work;
+ struct xarray implicit_children;
+ union {
+ struct rcu_head rcu;
+ struct list_head elm;
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+
struct mlx5_async_work cb_work;
- atomic_t num_pending_prefetch;
};
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
@@ -792,13 +799,6 @@ enum {
MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
};
-struct mlx5_ib_dbg_delay_drop {
- struct dentry *dir_debugfs;
- struct dentry *rqs_cnt_debugfs;
- struct dentry *events_cnt_debugfs;
- struct dentry *timeout_debugfs;
-};
-
struct mlx5_ib_delay_drop {
struct mlx5_ib_dev *dev;
struct work_struct delay_drop_work;
@@ -808,7 +808,7 @@ struct mlx5_ib_delay_drop {
bool activate;
atomic_t events_cnt;
atomic_t rqs_cnt;
- struct mlx5_ib_dbg_delay_drop *dbg;
+ struct dentry *dir_debugfs;
};
enum mlx5_ib_stages {
@@ -957,7 +957,11 @@ struct mlx5_ib_dev {
/* serialize update of capability mask
*/
struct mutex cap_mask_mutex;
- bool ib_active;
+ u8 ib_active:1;
+ u8 fill_delay:1;
+ u8 is_rep:1;
+ u8 lag_active:1;
+ u8 wc_support:1;
struct umr_common umrc;
/* sync used page count stats
*/
@@ -966,7 +970,6 @@ struct mlx5_ib_dev {
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
- int fill_delay;
struct ib_odp_caps odp_caps;
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
@@ -975,7 +978,9 @@ struct mlx5_ib_dev {
* Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler.
*/
- struct srcu_struct mr_srcu;
+ struct srcu_struct odp_srcu;
+ struct xarray odp_mkeys;
+
u32 null_mkey;
struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */
@@ -984,11 +989,10 @@ struct mlx5_ib_dev {
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg wc_bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
- bool is_rep;
- int lag_active;
struct mlx5_ib_lb_state lb;
u8 umr_fence;
@@ -999,6 +1003,8 @@ struct mlx5_ib_dev {
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
+
+ struct xarray sig_mrs;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1162,6 +1168,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1179,9 +1186,8 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
unsigned int *meta_sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
@@ -1223,6 +1229,8 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
+
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
@@ -1235,7 +1243,6 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
@@ -1300,6 +1307,9 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
u8 port, int state);
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
@@ -1334,6 +1344,10 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
u8 *native_port_num);
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
@@ -1349,7 +1363,7 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_flow_act *flow_act, u32 counter_id,
void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id);
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
@@ -1491,4 +1505,7 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
return true;
}
+
+int mlx5_ib_enable_driver(struct ib_device *dev);
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 7019c12005f4..60d39b9ec41c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -50,7 +50,6 @@ enum {
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
@@ -59,13 +58,9 @@ static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- /* Wait until all page fault handlers using the mr complete. */
- synchronize_srcu(&dev->mr_srcu);
-
- return err;
+ return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
static int order2idx(struct mlx5_ib_dev *dev, int order)
@@ -94,8 +89,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct xarray *mkeys = &dev->mdev->priv.mkey_table;
- int err;
spin_lock_irqsave(&ent->lock, flags);
ent->pending--;
@@ -122,13 +115,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
ent->size++;
spin_unlock_irqrestore(&ent->lock, flags);
- xa_lock_irqsave(mkeys, flags);
- err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_ATOMIC));
- xa_unlock_irqrestore(mkeys, flags);
- if (err)
- pr_err("Error inserting to mkey tree. 0x%x\n", -err);
-
if (!completion_done(&ent->compl))
complete(&ent->compl);
}
@@ -218,9 +204,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- synchronize_srcu(&dev->mr_srcu);
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -428,7 +411,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
ent = &cache->ent[entry];
@@ -511,7 +494,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
c = order2idx(dev, mr->order);
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr)) {
+ if (mlx5_mr_cache_invalidate(mr)) {
mr->allocated_from_cache = false;
destroy_mkey(dev, mr);
ent = &cache->ent[c];
@@ -555,10 +538,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- synchronize_srcu(&dev->mr_srcu);
-#endif
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -679,6 +658,20 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
return 0;
}
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+ struct ib_pd *pd)
+{
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+}
+
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -702,16 +695,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET(mkc, mkc, length64, 1);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, 0);
+ set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -779,7 +764,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags, 0);
+ u = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -1169,16 +1154,8 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET64(mkc, mkc, len, length);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, start_addr);
+ set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -1337,10 +1314,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (is_odp_mr(mr)) {
to_ib_umem_odp(mr->umem)->private = mr;
- atomic_set(&mr->num_pending_prefetch, 0);
+ atomic_set(&mr->num_deferred_work, 0);
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
+ GFP_KERNEL));
+ if (err) {
+ dereg_mr(dev, mr);
+ return ERR_PTR(err);
+ }
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- smp_store_release(&mr->live, 1);
return &mr->ibmr;
error:
@@ -1348,22 +1330,29 @@ error:
return ERR_PTR(err);
}
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+/**
+ * mlx5_mr_cache_invalidate - Fence all DMA on the MR
+ * @mr: The MR to fence
+ *
+ * Upon return the NIC will not be doing any DMA to the pages under the MR,
+ * and any DMA inprogress will be completed. Failure of this function
+ * indicates the HW has failed catastrophically.
+ */
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
{
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_umr_wr umrwr = {};
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
- umrwr.pd = dev->umrc.pd;
+ umrwr.pd = mr->dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
umrwr.ignore_free_state = 1;
- return mlx5_ib_post_send_wait(dev, &umrwr);
+ return mlx5_ib_post_send_wait(mr->dev, &umrwr);
}
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
@@ -1447,7 +1436,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* UMR can't be used - MKey needs to be replaced.
*/
if (mr->allocated_from_cache)
- err = unreg_umr(dev, mr);
+ err = mlx5_mr_cache_invalidate(mr);
else
err = destroy_mkey(dev, mr);
if (err)
@@ -1560,6 +1549,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
+ xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
kfree(mr->sig);
mr->sig = NULL;
}
@@ -1575,54 +1565,20 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int npages = mr->npages;
struct ib_umem *umem = mr->umem;
- if (is_odp_mr(mr)) {
- struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
-
- /* Prevent new page faults and
- * prefetch requests from succeeding
- */
- WRITE_ONCE(mr->live, 0);
-
- /* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&dev->mr_srcu);
-
- /* dequeue pending prefetch requests for the mr */
- if (atomic_read(&mr->num_pending_prefetch))
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&mr->num_pending_prefetch));
-
- /* Destroy all page mappings */
- if (!umem_odp->is_implicit_odp)
- mlx5_ib_invalidate_range(umem_odp,
- ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- else
- mlx5_ib_free_implicit_mr(mr);
- /*
- * We kill the umem before the MR for ODP,
- * so that there will not be any invalidations in
- * flight, looking at the *mr struct.
- */
- ib_umem_odp_release(umem_odp);
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
-
- /* Avoid double-freeing the umem. */
- umem = NULL;
- }
+ /* Stop all DMA */
+ if (is_odp_mr(mr))
+ mlx5_ib_fence_odp_mr(mr);
+ else
+ clean_mr(dev, mr);
- clean_mr(dev, mr);
+ if (mr->allocated_from_cache)
+ mlx5_mr_cache_free(dev, mr);
+ else
+ kfree(mr);
- /*
- * We should unregister the DMA address from the HCA before
- * remove the DMA mapping.
- */
- mlx5_mr_cache_free(dev, mr);
ib_umem_release(umem);
- if (umem)
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
- if (!mr->allocated_from_cache)
- kfree(mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1634,6 +1590,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
}
+ if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
+ mlx5_ib_free_implicit_mr(mmr);
+ return 0;
+ }
+
dereg_mr(to_mdev(ibmr->device), mmr);
return 0;
@@ -1797,8 +1758,15 @@ static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
if (err)
goto err_free_mtt_mr;
+ err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, GFP_KERNEL));
+ if (err)
+ goto err_free_descs;
return 0;
+err_free_descs:
+ destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
err_free_mtt_mr:
dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
mr->mtt_mr = NULL;
@@ -1951,9 +1919,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
}
}
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
+ GFP_KERNEL));
+ if (err)
+ goto free_mkey;
+ }
+
kfree(in);
return &mw->ibmw;
+free_mkey:
+ mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
free:
kfree(mw);
kfree(in);
@@ -1967,13 +1945,12 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- xa_erase_irq(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mmw->mmkey.key));
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
/*
* pagefault_single_data_segment() may be accessing mmw under
* SRCU if the user bound an ODP MR to this MW.
*/
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3f9478d19376..45ee40c2f36e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -93,158 +93,152 @@ struct mlx5_pagefault {
static u64 mlx5_imr_ksm_entries;
-static int check_parent(struct ib_umem_odp *odp,
- struct mlx5_ib_mr *parent)
+void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
{
- struct mlx5_ib_mr *mr = odp->private;
-
- return mr && mr->parent == parent && !odp->dying;
-}
-
-static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
-{
- if (WARN_ON(!mr || !is_odp_mr(mr)))
- return NULL;
-
- return to_ib_umem_odp(mr->umem)->per_mm;
-}
-
-static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
-{
- struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
- struct ib_ucontext_per_mm *per_mm = odp->per_mm;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- while (1) {
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (check_parent(odp, parent))
- goto end;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
- struct mlx5_ib_mr *parent)
-{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
- struct ib_umem_odp *odp;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
- if (!odp)
- goto end;
-
- while (1) {
- if (check_parent(odp, parent))
- goto end;
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(odp) > start + length)
- goto not_found;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
- size_t nentries, struct mlx5_ib_mr *mr, int flags)
-{
- struct ib_pd *pd = mr->ibmr.pd;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct ib_umem_odp *odp;
- unsigned long va;
- int i;
+ struct mlx5_klm *end = pklm + nentries;
if (flags & MLX5_IB_UPD_XLT_ZAP) {
- for (i = 0; i < nentries; i++, pklm++) {
+ for (; pklm != end; pklm++, idx++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
pklm->va = 0;
}
return;
}
/*
- * The locking here is pretty subtle. Ideally the implicit children
- * list would be protected by the umem_mutex, however that is not
+ * The locking here is pretty subtle. Ideally the implicit_children
+ * xarray would be protected by the umem_mutex, however that is not
* possible. Instead this uses a weaker update-then-lock pattern:
*
* srcu_read_lock()
- * <change children list>
+ * xa_store()
* mutex_lock(umem_mutex)
* mlx5_ib_update_xlt()
* mutex_unlock(umem_mutex)
* destroy lkey
*
- * ie any change the children list must be followed by the locked
- * update_xlt before destroying.
+ * ie any change the xarray must be followed by the locked update_xlt
+ * before destroying.
*
* The umem_mutex provides the acquire/release semantic needed to make
- * the children list visible to a racing thread. While SRCU is not
+ * the xa_store() visible to a racing thread. While SRCU is not
* technically required, using it gives consistent use of the SRCU
- * locking around the children list.
+ * locking around the xarray.
*/
- lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
- lockdep_assert_held(&mr->dev->mr_srcu);
+ lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
+ lockdep_assert_held(&imr->dev->odp_srcu);
- odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
- nentries * MLX5_IMR_MTT_SIZE, mr);
+ for (; pklm != end; pklm++, idx++) {
+ struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
- for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && ib_umem_start(odp) == va) {
- struct mlx5_ib_mr *mtt = odp->private;
-
+ if (mtt) {
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
- odp = odp_next(odp);
+ pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
+ pklm->va = 0;
}
- mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
- i, va, be32_to_cpu(pklm->key));
}
}
-static void mr_leaf_free_action(struct work_struct *work)
+static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ /* Ensure mlx5_ib_invalidate_range() will not touch the MR any more */
+ mutex_lock(&odp->umem_mutex);
+ if (odp->npages) {
+ mlx5_mr_cache_invalidate(mr);
+ ib_umem_odp_unmap_dma_pages(odp, ib_umem_start(odp),
+ ib_umem_end(odp));
+ WARN_ON(odp->npages);
+ }
+ odp->private = NULL;
+ mutex_unlock(&odp->umem_mutex);
+
+ if (!mr->allocated_from_cache) {
+ mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
+ WARN_ON(mr->descs);
+ }
+}
+
+/*
+ * This must be called after the mr has been removed from implicit_children
+ * and the SRCU synchronized. NOTE: The MR does not necessarily have to be
+ * empty here, parallel page faults could have raced with the free process and
+ * added pages to it.
+ */
+static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
{
- struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
- struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+ struct mlx5_ib_mr *imr = mr->parent;
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
int srcu_key;
- mr->parent = NULL;
- synchronize_srcu(&mr->dev->mr_srcu);
+ /* implicit_child_mr's are not allowed to have deferred work */
+ WARN_ON(atomic_read(&mr->num_deferred_work));
- if (smp_load_acquire(&imr->live)) {
- srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+ if (need_imr_xlt) {
+ srcu_key = srcu_read_lock(&mr->dev->odp_srcu);
mutex_lock(&odp_imr->umem_mutex);
- mlx5_ib_update_xlt(imr, idx, 1, 0,
+ mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
- srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&mr->dev->odp_srcu, srcu_key);
}
- ib_umem_odp_release(odp);
+
+ dma_fence_odp_mr(mr);
+
+ mr->parent = NULL;
mlx5_mr_cache_free(mr->dev, mr);
+ ib_umem_odp_release(odp);
+ atomic_dec(&imr->num_deferred_work);
+}
+
+static void free_implicit_child_mr_work(struct work_struct *work)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
- if (atomic_dec_and_test(&imr->num_leaf_free))
- wake_up(&imr->q_leaf_free);
+ free_implicit_child_mr(mr, true);
+}
+
+static void free_implicit_child_mr_rcu(struct rcu_head *head)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(head, struct mlx5_ib_mr, odp_destroy.rcu);
+
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
+}
+
+static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ struct mlx5_ib_mr *imr = mr->parent;
+
+ xa_lock(&imr->implicit_children);
+ /*
+ * This can race with mlx5_ib_free_implicit_mr(), the first one to
+ * reach the xa lock wins the race and destroys the MR.
+ */
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
+ mr)
+ goto out_unlock;
+
+ atomic_inc(&imr->num_deferred_work);
+ call_srcu(&mr->dev->odp_srcu, &mr->odp_destroy.rcu,
+ free_implicit_child_mr_rcu);
+
+out_unlock:
+ xa_unlock(&imr->implicit_children);
}
void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
@@ -254,19 +248,19 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
+ u64 invalidations = 0;
int in_block = 0;
u64 addr;
- if (!umem_odp) {
- pr_err("invalidation called on NULL umem or non-ODP umem\n");
- return;
- }
-
+ mutex_lock(&umem_odp->umem_mutex);
+ /*
+ * If npages is zero then umem_odp->private may not be setup yet. This
+ * does not complete until after the first page is mapped for DMA.
+ */
+ if (!umem_odp->npages)
+ goto out;
mr = umem_odp->private;
- if (!mr || !mr->ibmr.pd)
- return;
-
start = max_t(u64, ib_umem_start(umem_odp), start);
end = min_t(u64, ib_umem_end(umem_odp), end);
@@ -276,7 +270,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
- mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -291,6 +284,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
blk_start_idx = idx;
in_block = 1;
}
+
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
} else {
u64 umr_offset = idx & umr_block_mask;
@@ -308,6 +304,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+
+ mlx5_update_odp_stats(mr, invalidations, invalidations);
+
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -316,13 +315,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
- if (unlikely(!umem_odp->npages && mr->parent &&
- !umem_odp->dying)) {
- WRITE_ONCE(mr->live, 0);
- umem_odp->dying = 1;
- atomic_inc(&mr->parent->num_leaf_free);
- schedule_work(&umem_odp->work);
- }
+ if (unlikely(!umem_odp->npages && mr->parent))
+ destroy_unused_implicit_child_mr(mr);
+out:
mutex_unlock(&umem_odp->umem_mutex);
}
@@ -390,8 +385,6 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
!MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
-
- return;
}
static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
@@ -416,237 +409,213 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
wq_num, err);
}
-static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
- struct ib_umem_odp *umem_odp,
- bool ksm, int access_flags)
+static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ unsigned long idx)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *ret;
int err;
- mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
- MLX5_IMR_MTT_CACHE_ENTRY);
+ odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
+ idx * MLX5_IMR_MTT_SIZE,
+ MLX5_IMR_MTT_SIZE);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+ ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY);
if (IS_ERR(mr))
- return mr;
-
- mr->ibmr.pd = pd;
-
- mr->dev = dev;
- mr->access_flags = access_flags;
- mr->mmkey.iova = 0;
- mr->umem = &umem_odp->umem;
-
- if (ksm) {
- err = mlx5_ib_update_xlt(mr, 0,
- mlx5_imr_ksm_entries,
- MLX5_KSM_PAGE_SHIFT,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE);
-
- } else {
- err = mlx5_ib_update_xlt(mr, 0,
- MLX5_IMR_MTT_ENTRIES,
- PAGE_SHIFT,
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE |
- MLX5_IB_UPD_XLT_ATOMIC);
- }
-
- if (err)
- goto fail;
+ goto out_umem;
+ mr->ibmr.pd = imr->ibmr.pd;
+ mr->access_flags = imr->access_flags;
+ mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
-
- mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
- mr->mmkey.key, dev->mdev, mr);
-
- return mr;
-
-fail:
- mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
- mlx5_mr_cache_free(dev, mr);
-
- return ERR_PTR(err);
-}
-
-static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt)
-{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
- struct ib_umem_odp *odp, *result = NULL;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
- u64 addr = io_virt & MLX5_IMR_MTT_MASK;
- int nentries = 0, start_idx = 0, ret;
- struct mlx5_ib_mr *mtt;
-
- mutex_lock(&odp_mr->umem_mutex);
- odp = odp_lookup(addr, 1, mr);
-
- mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
- io_virt, bcnt, addr, odp);
-
-next_mr:
- if (likely(odp)) {
- if (nentries)
- nentries++;
- } else {
- odp = ib_umem_odp_alloc_child(odp_mr, addr, MLX5_IMR_MTT_SIZE);
- if (IS_ERR(odp)) {
- mutex_unlock(&odp_mr->umem_mutex);
- return ERR_CAST(odp);
- }
-
- mtt = implicit_mr_alloc(mr->ibmr.pd, odp, 0,
- mr->access_flags);
- if (IS_ERR(mtt)) {
- mutex_unlock(&odp_mr->umem_mutex);
- ib_umem_odp_release(odp);
- return ERR_CAST(mtt);
- }
-
- odp->private = mtt;
- mtt->umem = &odp->umem;
- mtt->mmkey.iova = addr;
- mtt->parent = mr;
- INIT_WORK(&odp->work, mr_leaf_free_action);
-
- smp_store_release(&mtt->live, 1);
-
- if (!nentries)
- start_idx = addr >> MLX5_IMR_MTT_SHIFT;
- nentries++;
- }
-
- /* Return first odp if region not covered by single one */
- if (likely(!result))
- result = odp;
-
- addr += MLX5_IMR_MTT_SIZE;
- if (unlikely(addr < io_virt + bcnt)) {
- odp = odp_next(odp);
- if (odp && ib_umem_start(odp) != addr)
- odp = NULL;
- goto next_mr;
+ mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->parent = imr;
+ odp->private = mr;
+
+ err = mlx5_ib_update_xlt(mr, 0,
+ MLX5_IMR_MTT_ENTRIES,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out_mr;
}
- if (unlikely(nentries)) {
- ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ATOMIC);
- if (ret) {
- mlx5_ib_err(dev, "Failed to update PAS\n");
- result = ERR_PTR(ret);
+ /*
+ * Once the store to either xarray completes any error unwind has to
+ * use synchronize_srcu(). Avoid this with xa_reserve()
+ */
+ ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
+ if (unlikely(ret)) {
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto out_mr;
}
+ /*
+ * Another thread beat us to creating the child mr, use
+ * theirs.
+ */
+ goto out_mr;
}
- mutex_unlock(&odp_mr->umem_mutex);
- return result;
+ mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
+ return mr;
+
+out_mr:
+ mlx5_mr_cache_free(imr->dev, mr);
+out_umem:
+ ib_umem_odp_release(odp);
+ return ret;
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags)
{
- struct mlx5_ib_mr *imr;
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *imr;
+ int err;
umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
+ imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY);
if (IS_ERR(imr)) {
- ib_umem_odp_release(umem_odp);
- return ERR_CAST(imr);
+ err = PTR_ERR(imr);
+ goto out_umem;
}
+ imr->ibmr.pd = &pd->ibpd;
+ imr->access_flags = access_flags;
+ imr->mmkey.iova = 0;
imr->umem = &umem_odp->umem;
- init_waitqueue_head(&imr->q_leaf_free);
- atomic_set(&imr->num_leaf_free, 0);
- atomic_set(&imr->num_pending_prefetch, 0);
- smp_store_release(&imr->live, 1);
+ imr->ibmr.lkey = imr->mmkey.key;
+ imr->ibmr.rkey = imr->mmkey.key;
+ imr->umem = &umem_odp->umem;
+ imr->is_odp_implicit = true;
+ atomic_set(&imr->num_deferred_work, 0);
+ xa_init(&imr->implicit_children);
+
+ err = mlx5_ib_update_xlt(imr, 0,
+ mlx5_imr_ksm_entries,
+ MLX5_KSM_PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err)
+ goto out_mr;
+ err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
+ &imr->mmkey, GFP_KERNEL));
+ if (err)
+ goto out_mr;
+
+ mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
return imr;
+out_mr:
+ mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
+ mlx5_mr_cache_free(dev, imr);
+out_umem:
+ ib_umem_odp_release(umem_odp);
+ return ERR_PTR(err);
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
- struct rb_node *node;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct mlx5_ib_dev *dev = imr->dev;
+ struct list_head destroy_list;
+ struct mlx5_ib_mr *mtt;
+ struct mlx5_ib_mr *tmp;
+ unsigned long idx;
- down_read(&per_mm->umem_rwsem);
- for (node = rb_first_cached(&per_mm->umem_tree); node;
- node = rb_next(node)) {
- struct ib_umem_odp *umem_odp =
- rb_entry(node, struct ib_umem_odp, interval_tree.rb);
- struct mlx5_ib_mr *mr = umem_odp->private;
+ INIT_LIST_HEAD(&destroy_list);
- if (mr->parent != imr)
- continue;
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
+ /*
+ * This stops the SRCU protected page fault path from touching either
+ * the imr or any children. The page fault path can only reach the
+ * children xarray via the imr.
+ */
+ synchronize_srcu(&dev->odp_srcu);
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
+ xa_lock(&imr->implicit_children);
+ xa_for_each (&imr->implicit_children, idx, mtt) {
+ __xa_erase(&imr->implicit_children, idx);
+ list_add(&mtt->odp_destroy.elm, &destroy_list);
+ }
+ xa_unlock(&imr->implicit_children);
- if (umem_odp->dying) {
- mutex_unlock(&umem_odp->umem_mutex);
- continue;
- }
+ /*
+ * num_deferred_work can only be incremented inside the odp_srcu, or
+ * under xa_lock while the child is in the xarray. Thus at this point
+ * it is only decreasing, and all work holding it is now on the wq.
+ */
+ if (atomic_read(&imr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&imr->num_deferred_work));
+ }
+
+ /*
+ * Fence the imr before we destroy the children. This allows us to
+ * skip updating the XLT of the imr during destroy of the child mkey
+ * the imr points to.
+ */
+ mlx5_mr_cache_invalidate(imr);
+
+ list_for_each_entry_safe (mtt, tmp, &destroy_list, odp_destroy.elm)
+ free_implicit_child_mr(mtt, false);
+
+ mlx5_mr_cache_free(dev, imr);
+ ib_umem_odp_release(odp_imr);
+}
- umem_odp->dying = 1;
- atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem_odp->work);
- mutex_unlock(&umem_odp->umem_mutex);
+/**
+ * mlx5_ib_fence_odp_mr - Stop all access to the ODP MR
+ * @mr: to fence
+ *
+ * On return no parallel threads will be touching this MR and no DMA will be
+ * active.
+ */
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ /* Prevent new page faults and prefetch requests from succeeding */
+ xa_erase(&mr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
+
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&mr->dev->odp_srcu);
+
+ if (atomic_read(&mr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&mr->num_deferred_work));
}
- up_read(&per_mm->umem_rwsem);
- wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
+ dma_fence_odp_mr(mr);
}
-#define MLX5_PF_FLAGS_PREFETCH BIT(0)
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
-static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt, u32 *bytes_mapped,
- u32 flags)
+static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
+ u64 user_va, size_t bcnt, u32 *bytes_mapped,
+ u32 flags)
{
- int npages = 0, current_seq, page_shift, ret, np;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
+ int current_seq, page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
u64 access_mask;
u64 start_idx, page_mask;
- struct ib_umem_odp *odp;
- size_t size;
-
- if (odp_mr->is_implicit_odp) {
- odp = implicit_mr_get_data(mr, io_virt, bcnt);
-
- if (IS_ERR(odp))
- return PTR_ERR(odp);
- mr = odp->private;
- } else {
- odp = odp_mr;
- }
-
-next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
page_shift = odp->page_shift;
page_mask = ~(BIT(page_shift) - 1);
- start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
- if (prefetch && !downgrade && !odp->umem.writable) {
- /* prefetch with write-access must
- * be supported by the MR
- */
- ret = -EINVAL;
- goto out;
- }
-
if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT;
@@ -657,13 +626,10 @@ next_mr:
*/
smp_rmb();
- ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask,
- current_seq);
-
- if (ret < 0)
- goto out;
-
- np = ret;
+ np = ib_umem_odp_map_dma_pages(odp, user_va, bcnt, access_mask,
+ current_seq);
+ if (np < 0)
+ return np;
mutex_lock(&odp->umem_mutex);
if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
@@ -681,35 +647,19 @@ next_mr:
if (ret < 0) {
if (ret != -EAGAIN)
- mlx5_ib_err(dev, "Failed to update mkey page tables\n");
+ mlx5_ib_err(mr->dev,
+ "Failed to update mkey page tables\n");
goto out;
}
if (bytes_mapped) {
u32 new_mappings = (np << page_shift) -
- (io_virt - round_down(io_virt, 1 << page_shift));
- *bytes_mapped += min_t(u32, new_mappings, size);
- }
-
- npages += np << (page_shift - PAGE_SHIFT);
- bcnt -= size;
-
- if (unlikely(bcnt)) {
- struct ib_umem_odp *next;
+ (user_va - round_down(user_va, 1 << page_shift));
- io_virt += size;
- next = odp_next(odp);
- if (unlikely(!next || ib_umem_start(next) != io_virt)) {
- mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
- io_virt, next);
- return -EAGAIN;
- }
- odp = next;
- mr = odp->private;
- goto next_mr;
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
}
- return npages;
+ return np << (page_shift - PAGE_SHIFT);
out:
if (ret == -EAGAIN) {
@@ -718,7 +668,7 @@ out:
if (!wait_for_completion_timeout(&odp->notifier_completion,
timeout)) {
mlx5_ib_warn(
- dev,
+ mr->dev,
"timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
current_seq, odp->notifiers_seq,
odp->notifiers_count);
@@ -728,6 +678,109 @@ out:
return ret;
}
+static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
+ struct ib_umem_odp *odp_imr, u64 user_va,
+ size_t bcnt, u32 *bytes_mapped, u32 flags)
+{
+ unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long upd_start_idx = end_idx + 1;
+ unsigned long upd_len = 0;
+ unsigned long npages = 0;
+ int err;
+ int ret;
+
+ if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
+ mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
+ return -EFAULT;
+
+ /* Fault each child mr that intersects with our interval. */
+ while (bcnt) {
+ unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *mtt;
+ u64 len;
+
+ mtt = xa_load(&imr->implicit_children, idx);
+ if (unlikely(!mtt)) {
+ mtt = implicit_get_child_mr(imr, idx);
+ if (IS_ERR(mtt)) {
+ ret = PTR_ERR(mtt);
+ goto out;
+ }
+ upd_start_idx = min(upd_start_idx, idx);
+ upd_len = idx - upd_start_idx + 1;
+ }
+
+ umem_odp = to_ib_umem_odp(mtt->umem);
+ len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
+ user_va;
+
+ ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
+ bytes_mapped, flags);
+ if (ret < 0)
+ goto out;
+ user_va += len;
+ bcnt -= len;
+ npages += ret;
+ }
+
+ ret = npages;
+
+ /*
+ * Any time the implicit_children are changed we must perform an
+ * update of the xlt before exiting to ensure the HW and the
+ * implicit_children remains synchronized.
+ */
+out:
+ if (likely(!upd_len))
+ return ret;
+
+ /*
+ * Notice this is not strictly ordered right, the KSM is updated after
+ * the implicit_children is updated, so a parallel page fault could
+ * see a MR that is not yet visible in the KSM. This is similar to a
+ * parallel page fault seeing a MR that is being concurrently removed
+ * from the KSM. Both of these improbable situations are resolved
+ * safely by resuming the HW and then taking another page fault. The
+ * next pagefault handler will see the new information.
+ */
+ mutex_lock(&odp_imr->umem_mutex);
+ err = mlx5_ib_update_xlt(imr, upd_start_idx, upd_len, 0,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ if (err) {
+ mlx5_ib_err(imr->dev, "Failed to update PAS\n");
+ return err;
+ }
+ return ret;
+}
+
+/*
+ * Returns:
+ * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
+ * not accessible, or the MR is no longer valid.
+ * -EAGAIN/-ENOMEM: The operation should be retried
+ *
+ * -EINVAL/others: General internal malfunction
+ * >0: Number of pages mapped
+ */
+static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ if (!odp->is_implicit_odp) {
+ if (unlikely(io_virt < ib_umem_start(odp) ||
+ ib_umem_end(odp) - io_virt < bcnt))
+ return -EFAULT;
+ return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+ }
+ return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+}
+
struct pf_frame {
struct pf_frame *next;
u32 key;
@@ -775,10 +828,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
struct ib_pd *pd, u32 key,
u64 io_virt, size_t bcnt,
u32 *bytes_committed,
- u32 *bytes_mapped, u32 flags)
+ u32 *bytes_mapped)
{
int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
struct pf_frame *head = NULL, *frame;
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -787,58 +839,49 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
size_t offset;
int ndescs;
- srcu_key = srcu_read_lock(&dev->mr_srcu);
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
next_mr:
- mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ /*
+ * The user could specify a SGL with multiple lkeys and only
+ * some of them are ODP. Treat the non-ODP ones as fully
+ * faulted.
+ */
+ ret = 0;
+ goto srcu_unlock;
+ }
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
goto srcu_unlock;
}
- if (prefetch && mmkey->type != MLX5_MKEY_MR) {
- mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
- ret = -EINVAL;
- goto srcu_unlock;
- }
-
switch (mmkey->type) {
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
- mlx5_ib_dbg(dev, "got dead MR\n");
- ret = -EFAULT;
- goto srcu_unlock;
- }
- if (prefetch) {
- if (!is_odp_mr(mr) ||
- mr->ibmr.pd != pd) {
- mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
- is_odp_mr(mr) ? "MR is not ODP" :
- "PD is not of the MR");
- ret = -EINVAL;
- goto srcu_unlock;
- }
- }
-
- if (!is_odp_mr(mr)) {
- mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
- key);
- if (bytes_mapped)
- *bytes_mapped += bcnt;
- ret = 0;
- goto srcu_unlock;
- }
-
- ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
if (ret < 0)
goto srcu_unlock;
+ /*
+ * When prefetching a page, page fault is generated
+ * in order to bring the page to the main memory.
+ * In the current flow, page faults are being counted.
+ */
+ mlx5_update_odp_stats(mr, faults, ret);
+
npages += ret;
ret = 0;
break;
@@ -928,7 +971,7 @@ srcu_unlock:
}
kfree(out);
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
*bytes_committed = 0;
return ret ? ret : npages;
}
@@ -1009,7 +1052,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, key,
io_virt, bcnt,
&pfault->bytes_committed,
- bytes_mapped, 0);
+ bytes_mapped);
if (ret < 0)
break;
npages += ret;
@@ -1292,8 +1335,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
}
ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
- &pfault->bytes_committed, NULL,
- 0);
+ &pfault->bytes_committed, NULL);
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
@@ -1320,8 +1362,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, rkey, address,
prefetch_len,
- &bytes_committed, NULL,
- 0);
+ &bytes_committed, NULL);
if (ret < 0 && ret != -EAGAIN) {
mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
ret, pfault->token, address, prefetch_len);
@@ -1624,114 +1665,128 @@ int mlx5_ib_odp_init(void)
struct prefetch_mr_work {
struct work_struct work;
- struct ib_pd *pd;
u32 pf_flags;
u32 num_sge;
- struct ib_sge sg_list[0];
+ struct {
+ u64 io_virt;
+ struct mlx5_ib_mr *mr;
+ size_t length;
+ } frags[];
};
-static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
- struct ib_sge *sg_list, u32 num_sge,
- u32 from)
+static void destroy_prefetch_work(struct prefetch_mr_work *work)
{
u32 i;
- int srcu_key;
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
- for (i = from; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
-
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- atomic_dec(&mr->num_pending_prefetch);
- }
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ for (i = 0; i < work->num_sge; ++i)
+ atomic_dec(&work->frags[i].mr->num_deferred_work);
+ kvfree(work);
}
-static bool num_pending_prefetch_inc(struct ib_pd *pd,
- struct ib_sge *sg_list, u32 num_sge)
+static struct mlx5_ib_mr *
+get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- bool ret = true;
- u32 i;
+ struct mlx5_core_mkey *mmkey;
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
- for (i = 0; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
+ lockdep_assert_held(&dev->odp_srcu);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- if (!mmkey || mmkey->key != sg_list[i].lkey) {
- ret = false;
- break;
- }
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
+ if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ return NULL;
- if (mmkey->type != MLX5_MKEY_MR) {
- ret = false;
- break;
- }
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ if (mr->ibmr.pd != pd)
+ return NULL;
- if (!smp_load_acquire(&mr->live)) {
- ret = false;
- break;
- }
+ odp = to_ib_umem_odp(mr->umem);
- if (mr->ibmr.pd != pd) {
- ret = false;
- break;
- }
+ /* prefetch with write-access must be supported by the MR */
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !odp->umem.writable)
+ return NULL;
- atomic_inc(&mr->num_pending_prefetch);
- }
+ return mr;
+}
- if (!ret)
- num_pending_prefetch_dec(dev, sg_list, i, 0);
+static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ u32 bytes_mapped = 0;
+ u32 i;
- return ret;
+ for (i = 0; i < work->num_sge; ++i)
+ pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+ work->pf_flags);
+
+ destroy_prefetch_work(work);
}
-static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
- struct ib_sge *sg_list, u32 num_sge)
+static bool init_prefetch_work(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct prefetch_mr_work *work,
+ struct ib_sge *sg_list, u32 num_sge)
{
u32 i;
- int ret = 0;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
+ INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- struct ib_sge *sg = &sg_list[i];
- int bytes_committed = 0;
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr =
+ get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!work->frags[i].mr) {
+ work->num_sge = i - 1;
+ if (i)
+ destroy_prefetch_work(work);
+ return false;
+ }
- ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
- sg->length,
- &bytes_committed, NULL,
- pf_flags);
- if (ret < 0)
- break;
+ /* Keep the MR pointer will valid outside the SRCU */
+ atomic_inc(&work->frags[i].mr->num_deferred_work);
}
-
- return ret < 0 ? ret : 0;
+ work->num_sge = num_sge;
+ return true;
}
-static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
+static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
{
- struct prefetch_mr_work *w =
- container_of(work, struct prefetch_mr_work, work);
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ u32 bytes_mapped = 0;
+ int srcu_key;
+ int ret = 0;
+ u32 i;
+
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
- if (ib_device_try_get(w->pd->device)) {
- mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
- w->num_sge);
- ib_device_put(w->pd->device);
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!mr) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
+ &bytes_mapped, pf_flags);
+ if (ret < 0)
+ goto out;
}
+ ret = 0;
- num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
- w->num_sge, 0);
- kvfree(w);
+out:
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return ret;
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1739,43 +1794,27 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
+ u32 pf_flags = 0;
struct prefetch_mr_work *work;
- bool valid_req;
int srcu_key;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
- return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
+ return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
num_sge);
- work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
if (!work)
return -ENOMEM;
- memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
-
- /* It is guaranteed that the pd when work is executed is the pd when
- * work was queued since pd can't be destroyed while it holds MRs and
- * destroying a MR leads to flushing the workquque
- */
- work->pd = pd;
- work->pf_flags = pf_flags;
- work->num_sge = num_sge;
-
- INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
-
- valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
- if (valid_req)
- queue_work(system_unbound_wq, &work->work);
- else
- kvfree(work);
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
-
- return valid_req ? 0 : -EINVAL;
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return -EINVAL;
+ }
+ queue_work(system_unbound_wq, &work->work);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5fd071c05944..7e51870e9e01 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{
int err;
- *umem = ib_umem_get(udata, addr, size, 0, 0);
+ *umem = ib_umem_get(udata, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0);
+ rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -1041,11 +1041,14 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO |
IB_QP_CREATE_NETIF_QP |
- mlx5_ib_create_qp_sqpn_qp1()))
+ MLX5_IB_QP_CREATE_SQPN_QP1 |
+ MLX5_IB_QP_CREATE_WC_TEST))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
+ else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
+ qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -1104,7 +1107,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
- if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
+ if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
MLX5_SET(qpc, qpc, deth_sqpn, 1);
qp->flags |= MLX5_IB_QP_SQPN_QP1;
}
@@ -2140,7 +2143,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EINVAL;
}
if (init_attr->create_flags &
- mlx5_ib_create_qp_sqpn_qp1()) {
+ MLX5_IB_QP_CREATE_SQPN_QP1) {
mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
return -EINVAL;
}
@@ -5330,7 +5333,6 @@ out:
* we hit doorbell */
wmb();
- /* currently we support only regular doorbells */
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
@@ -5825,7 +5827,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
+ qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
@@ -5957,12 +5959,21 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ /*
+ * In Firmware number of strides in each WQE is:
+ * "512 * 2^single_wqe_log_num_of_strides"
+ * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
+ * accepted as 0 to 9
+ */
+ static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
+ 2, 3, 4, 5, 6, 7, 8, 9 };
MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
MLX5_SET(wq, wq, log_wqe_stride_size,
rwq->single_stride_log_num_of_bytes -
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
- MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ fw_map[rwq->log_num_strides -
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
}
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
@@ -6037,6 +6048,19 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev,
return 0;
}
+static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
+{
+ if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
+ (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ return true;
+}
+
static int prepare_user_rq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata,
@@ -6084,14 +6108,16 @@ static int prepare_user_rq(struct ib_pd *pd,
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
return -EINVAL;
}
- if ((ucmd.single_wqe_log_num_of_strides >
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
- (ucmd.single_wqe_log_num_of_strides <
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
- mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
- ucmd.single_wqe_log_num_of_strides,
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ if (!log_of_strides_valid(dev,
+ ucmd.single_wqe_log_num_of_strides)) {
+ mlx5_ib_dbg(
+ dev,
+ "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
return -EINVAL;
}
rwq->single_stride_log_num_of_bytes =
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
new file mode 100644
index 000000000000..8f6c04f12531
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <uapi/rdma/rdma_netlink.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/restrack.h>
+#include "mlx5_ib.h"
+
+static int fill_stat_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+
+ if (!table_attr)
+ goto err;
+
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
+ atomic64_read(&mr->odp_stats.faults)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations",
+ atomic64_read(&mr->odp_stats.invalidations)))
+ goto err_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (mr->is_odp_implicit) {
+ if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
+ goto err;
+ } else {
+ if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_res_mr_entry(msg, res);
+
+ return 0;
+}
+
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_stat_mr_entry(msg, res);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4e7fde86c96b..62939df3c692 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index bfd4eebc1182..599794c5a78f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -576,14 +576,10 @@ enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7ad517da4917..99aa8183a7f2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -196,30 +196,19 @@ static void forward_trap(struct mthca_dev *dev,
}
}
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int err;
u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
- slid == 0) {
- forward_trap(to_mdev(ibdev), port_num, in_mad);
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) {
+ forward_trap(to_mdev(ibdev), port_num, in);
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
@@ -229,40 +218,39 @@ int mthca_process_mad(struct ib_device *ibdev,
* Only handle PMA and Mellanox vendor-specific class gets and
* sets for other classes.
*/
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS;
/*
* Don't process SMInfo queries or vendor-specific
* MADs -- the SMA can't handle them.
*/
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
- ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+ ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
IB_SMP_ATTR_VENDOR_MASK))
return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS;
} else
return IB_MAD_RESULT_SUCCESS;
- if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ in->mad_hdr.method == IB_MGMT_METHOD_SET &&
+ in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
prev_lid = ib_lid_cpu16(pattr.lid);
- err = mthca_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
+ err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS;
else if (err) {
@@ -270,16 +258,16 @@ int mthca_process_mad(struct ib_device *ibdev,
return IB_MAD_RESULT_FAILURE;
}
- if (!out_mad->mad_hdr.status) {
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
- node_desc_override(ibdev, out_mad);
+ if (!out->mad_hdr.status) {
+ smp_snoop(ibdev, port_num, in, prev_lid);
+ node_desc_override(ibdev, out);
}
/* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 23554d8bf241..33002530fee7 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -880,9 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, acc,
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
-
+ mr->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 8d3e36d548aa..2b7f00ac41b0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -247,35 +247,20 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-int ocrdma_process_mad(struct ib_device *ibdev,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
- int status;
+ int status = IB_MAD_RESULT_SUCCESS;
struct ocrdma_dev *dev;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_PERF_MGMT:
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
dev = get_ocrdma_dev(ibdev);
- if (!ocrdma_pma_counters(dev, out_mad))
- status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
- else
- status = IB_MAD_RESULT_SUCCESS;
- break;
- default:
- status = IB_MAD_RESULT_SUCCESS;
- break;
+ ocrdma_pma_counters(dev, out);
+ status |= IB_MAD_RESULT_REPLY;
}
+
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 64cb82c08664..9780afcde780 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -56,12 +56,9 @@ int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int ocrdma_process_mad(struct ib_device *,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
#endif /* __OCRDMA_AH_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c15cfc6cef81..d8c47d24d6d6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -166,7 +166,6 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.get_port_immutable = ocrdma_port_immutable,
.map_mr_sg = ocrdma_map_mr_sg,
.mmap = ocrdma_mmap,
- .modify_port = ocrdma_modify_port,
.modify_qp = ocrdma_modify_qp,
.poll_cq = ocrdma_poll_cq,
.post_recv = ocrdma_post_recv,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 6ef89c226ad8..c2e0d0fa44be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -2034,7 +2034,7 @@ struct ocrdma_rx_stats {
};
struct ocrdma_rx_qp_err_stats {
- u32 nak_invalid_requst_errors;
+ u32 nak_invalid_request_errors;
u32 nak_remote_operation_errors;
u32 nak_count_remote_access_errors;
u32 local_length_errors;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index a902942adb5d..5f831e3bdbad 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -423,8 +423,8 @@ static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
- pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
- (u64)rx_qp_err_stats->nak_invalid_requst_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_request_errors",
+ (u64)rx_qp_err_stats->nak_invalid_request_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
(u64)rx_qp_err_stats->nak_remote_operation_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
@@ -670,12 +670,10 @@ err:
return -EFAULT;
}
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad)
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad)
{
struct ib_pma_portcounters *pma_cnt;
- memset(out_mad->data, 0, sizeof out_mad->data);
pma_cnt = (void *)(out_mad->data + 40);
ocrdma_update_stats(dev);
@@ -683,7 +681,6 @@ int ocrdma_pma_counters(struct ocrdma_dev *dev,
pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
- return 0;
}
static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index bba1fec4f11f..98feca26ac55 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -69,7 +69,6 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad);
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad);
#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e8267e590772..9bc1ca6f6f9e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -190,12 +190,6 @@ int ocrdma_query_port(struct ib_device *ibdev,
return 0;
}
-int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
unsigned long len)
{
@@ -875,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 32488da1b752..3a5010881be5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -54,8 +54,6 @@ int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
struct ib_udata *uhw);
int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u8 port_num);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dc71b6e16a07..dcdc85a1ab25 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -212,7 +212,7 @@ static const struct ib_device_ops qedr_dev_ops = {
.get_link_layer = qedr_link_layer,
.map_mr_sg = qedr_map_mr_sg,
.mmap = qedr_mmap,
- .modify_port = qedr_modify_port,
+ .mmap_free = qedr_mmap_free,
.modify_qp = qedr_modify_qp,
.modify_srq = qedr_modify_srq,
.poll_cq = qedr_poll_cq,
@@ -357,9 +357,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
return -ENOMEM;
spin_lock_init(&dev->sgid_lock);
+ xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
if (IS_IWARP(dev)) {
- xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
+ xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 0cfd849b13d6..5488dbd59d3c 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -40,6 +40,7 @@
#include <linux/qed/qed_rdma_if.h>
#include <linux/qed/qede_rdma.h>
#include <linux/qed/roce_common.h>
+#include <linux/completion.h>
#include "qedr_hsi_rdma.h"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
@@ -230,14 +231,16 @@ struct qedr_ucontext {
struct qedr_dev *dev;
struct qedr_pd *pd;
void __iomem *dpi_addr;
+ struct rdma_user_mmap_entry *db_mmap_entry;
u64 dpi_phys_addr;
u32 dpi_size;
u16 dpi;
+ bool db_rec;
+};
- struct list_head mm_head;
-
- /* Lock to protect mm list */
- struct mutex mm_list_lock;
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
};
union db_prod64 {
@@ -265,6 +268,13 @@ struct qedr_userq {
struct qedr_pbl *pbl_tbl;
u64 buf_addr;
size_t buf_len;
+
+ /* doorbell recovery */
+ void __iomem *db_addr;
+ struct qedr_user_db_rec *db_rec_data;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ void __iomem *db_rec_db2_addr;
+ union db_prod32 db_rec_db2_data;
};
struct qedr_cq {
@@ -300,19 +310,6 @@ struct qedr_pd {
struct qedr_ucontext *uctx;
};
-struct qedr_mm {
- struct {
- u64 phy_addr;
- unsigned long len;
- } key;
- struct list_head entry;
-};
-
-union db_prod32 {
- struct rdma_pwm_val16_data data;
- u32 raw;
-};
-
struct qedr_qp_hwq_info {
/* WQE Elements */
struct qed_chain pbl;
@@ -377,10 +374,20 @@ enum qedr_qp_err_bitmap {
QEDR_QP_ERR_RQ_PBL_FULL = 32,
};
+enum qedr_qp_create_type {
+ QEDR_QP_CREATE_NONE,
+ QEDR_QP_CREATE_USER,
+ QEDR_QP_CREATE_KERNEL,
+};
+
+enum qedr_iwarp_cm_flags {
+ QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0),
+ QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
+};
+
struct qedr_qp {
struct ib_qp ibqp; /* must be first */
struct qedr_dev *dev;
- struct qedr_iw_ep *ep;
struct qedr_qp_hwq_info sq;
struct qedr_qp_hwq_info rq;
@@ -395,6 +402,7 @@ struct qedr_qp {
u32 id;
struct qedr_pd *pd;
enum ib_qp_type qp_type;
+ enum qedr_qp_create_type create_type;
struct qed_rdma_qp *qed_qp;
u32 qp_id;
u16 icid;
@@ -437,8 +445,11 @@ struct qedr_qp {
/* Relevant to qps created from user space only (applications) */
struct qedr_userq usq;
struct qedr_userq urq;
- atomic_t refcnt;
- bool destroyed;
+
+ /* synchronization objects used with iwarp ep */
+ struct kref refcnt;
+ struct completion iwarp_cm_comp;
+ unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
struct qedr_ah {
@@ -476,6 +487,18 @@ struct qedr_mr {
u32 npages;
};
+struct qedr_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ struct qedr_dev *dev;
+ union {
+ u64 io_address;
+ void *address;
+ };
+ size_t length;
+ u16 dpi;
+ u8 mmap_flag;
+};
+
#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
@@ -531,7 +554,7 @@ struct qedr_iw_ep {
struct iw_cm_id *cm_id;
struct qedr_qp *qp;
void *qed_context;
- u8 during_connect;
+ struct kref refcnt;
};
static inline
@@ -574,4 +597,11 @@ static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct qedr_srq, ibsrq);
}
+
+static inline struct qedr_user_mmap_entry *
+get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct qedr_user_mmap_entry,
+ rdma_entry);
+}
#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 22881d4442b9..792eecd206b6 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
}
}
+static void qedr_iw_free_qp(struct kref *ref)
+{
+ struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
+
+ kfree(qp);
+}
+
+static void
+qedr_iw_free_ep(struct kref *ref)
+{
+ struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
+
+ if (ep->qp)
+ kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
+
+ if (ep->cm_id)
+ ep->cm_id->rem_ref(ep->cm_id);
+
+ kfree(ep);
+}
+
static void
qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
ep->dev = dev;
ep->qed_context = params->ep_context;
+ kref_init(&ep->refcnt);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
@@ -141,12 +163,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
- if (ep->cm_id) {
+ if (ep->cm_id)
qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -186,11 +206,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
struct qedr_qp *qp = ep->qp;
struct iw_cm_event event;
- if (qp->destroyed) {
- kfree(dwork);
- qedr_iw_qp_rem_ref(&qp->ibqp);
- return;
- }
+ /* The qp won't be released until we release the ep.
+ * the ep's refcnt was increased before calling this
+ * function, therefore it is safe to access qp
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ goto out;
memset(&event, 0, sizeof(event));
event.status = dwork->status;
@@ -204,7 +226,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
else
qp_params.new_state = QED_ROCE_QP_STATE_SQD;
- kfree(dwork);
if (ep->cm_id)
ep->cm_id->event_handler(ep->cm_id, &event);
@@ -214,7 +235,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
- qedr_iw_qp_rem_ref(&qp->ibqp);
+ complete(&ep->qp->iwarp_cm_comp);
+out:
+ kfree(dwork);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -224,13 +248,17 @@ qedr_iw_disconnect_event(void *context,
struct qedr_discon_work *work;
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
- struct qedr_qp *qp = ep->qp;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
- qedr_iw_qp_add_ref(&qp->ibqp);
+ /* We can't get a close event before disconnect, but since
+ * we're scheduling a work queue we need to make sure close
+ * won't delete the ep, so we increase the refcnt
+ */
+ kref_get(&ep->refcnt);
+
work->ep = ep;
work->event = params->event;
work->status = params->status;
@@ -252,16 +280,30 @@ qedr_iw_passive_complete(void *context,
if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
DP_DEBUG(dev, QEDR_MSG_IWARP,
"PASSIVE connection refused releasing ep...\n");
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return;
}
+ complete(&ep->qp->iwarp_cm_comp);
qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
if (params->status < 0)
qedr_iw_close_event(context, params);
}
+static void
+qedr_iw_active_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ complete(&ep->qp->iwarp_cm_comp);
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
+
+ if (params->status < 0)
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+}
+
static int
qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -288,27 +330,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
qedr_iw_mpa_reply(context, params);
break;
case QED_IWARP_EVENT_PASSIVE_COMPLETE:
- ep->during_connect = 0;
qedr_iw_passive_complete(context, params);
break;
-
case QED_IWARP_EVENT_ACTIVE_COMPLETE:
- ep->during_connect = 0;
- qedr_iw_issue_event(context,
- params,
- IW_CM_EVENT_CONNECT_REPLY);
- if (params->status < 0) {
- struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
-
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ qedr_iw_active_complete(context, params);
break;
case QED_IWARP_EVENT_DISCONNECT:
qedr_iw_disconnect_event(context, params);
break;
case QED_IWARP_EVENT_CLOSE:
- ep->during_connect = 0;
qedr_iw_close_event(context, params);
break;
case QED_IWARP_EVENT_RQ_EMPTY:
@@ -451,10 +481,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
if ((!dst) || dst->error) {
if (dst) {
- dst_release(dst);
DP_ERR(dev,
"ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return -EINVAL;
}
@@ -476,6 +506,19 @@ qedr_addr6_resolve(struct qedr_dev *dev,
return rc;
}
+static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
+{
+ struct qedr_qp *qp;
+
+ xa_lock(&dev->qps);
+ qp = xa_load(&dev->qps, qpn);
+ if (qp)
+ kref_get(&qp->refcnt);
+ xa_unlock(&dev->qps);
+
+ return qp;
+}
+
int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct qedr_dev *dev = get_qedr_dev(cm_id->device);
@@ -491,10 +534,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int rc = 0;
int i;
- qp = xa_load(&dev->qps, conn_param->qpn);
- if (unlikely(!qp))
- return -EINVAL;
-
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
@@ -516,8 +555,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -ENOMEM;
ep->dev = dev;
+ kref_init(&ep->refcnt);
+
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
+ if (!qp) {
+ rc = -EINVAL;
+ goto err;
+ }
+
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -580,16 +626,20 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
in_params.qp = qp->qed_qp;
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already being destroyed */
+
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
err:
- cm_id->rem_ref(cm_id);
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return rc;
}
@@ -677,18 +727,17 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp;
struct qed_iwarp_accept_in params;
- int rc;
+ int rc = 0;
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
- qp = xa_load(&dev->qps, conn_param->qpn);
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
}
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -700,15 +749,21 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
params.ird = conn_param->ird;
params.ord = conn_param->ord;
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already destroyed */
+
rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
+
err:
- ep->during_connect = 0;
- cm_id->rem_ref(cm_id);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+
return rc;
}
@@ -731,17 +786,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- atomic_inc(&qp->refcnt);
+ kref_get(&qp->refcnt);
}
void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- if (atomic_dec_and_test(&qp->refcnt)) {
- xa_erase_irq(&qp->dev->qps, qp->qp_id);
- kfree(qp);
- }
+ kref_put(&qp->refcnt, qedr_iw_free_qp);
}
struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6f3ce86019b7..4cd292966aa9 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -51,6 +51,7 @@
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_roce_cm.h"
+#include "qedr_iw_cm.h"
#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
#define RDMA_MAX_SGE_PER_SRQ (4)
@@ -58,6 +59,11 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+enum {
+ QEDR_USER_MMAP_IO_WC = 0,
+ QEDR_USER_MMAP_PHYS_PAGE,
+};
+
static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
size_t len)
{
@@ -250,78 +256,31 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
return 0;
}
-int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
-static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- struct qedr_mm *mm;
-
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm)
- return -ENOMEM;
-
- mm->key.phy_addr = phy_addr;
- /* This function might be called with a length which is not a multiple
- * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
- * forces this granularity by increasing the requested size if needed.
- * When qedr_mmap is called, it will search the list with the updated
- * length as a key. To prevent search failures, the length is rounded up
- * in advance to PAGE_SIZE.
- */
- mm->key.len = roundup(len, PAGE_SIZE);
- INIT_LIST_HEAD(&mm->entry);
-
- mutex_lock(&uctx->mm_list_lock);
- list_add(&mm->entry, &uctx->mm_head);
- mutex_unlock(&uctx->mm_list_lock);
-
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- (unsigned long long)mm->key.phy_addr,
- (unsigned long)mm->key.len, uctx);
-
- return 0;
-}
-
-static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- bool found = false;
- struct qedr_mm *mm;
-
- mutex_lock(&uctx->mm_list_lock);
- list_for_each_entry(mm, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
- continue;
-
- found = true;
- break;
- }
- mutex_unlock(&uctx->mm_list_lock);
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
- mm->key.phy_addr, mm->key.len, uctx, found);
-
- return found;
-}
-
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
struct ib_device *ibdev = uctx->device;
int rc;
struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
struct qedr_alloc_ucontext_resp uresp = {};
+ struct qedr_alloc_ucontext_req ureq = {};
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_add_user_out_params oparams;
+ struct qedr_user_mmap_entry *entry;
if (!udata)
return -EFAULT;
+ if (udata->inlen) {
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return -EFAULT;
+ }
+
+ ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
+ }
+
rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
if (rc) {
DP_ERR(dev,
@@ -334,13 +293,29 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
ctx->dpi_addr = oparams.dpi_addr;
ctx->dpi_phys_addr = oparams.dpi_phys_addr;
ctx->dpi_size = oparams.dpi_size;
- INIT_LIST_HEAD(&ctx->mm_head);
- mutex_init(&ctx->mm_list_lock);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ entry->io_address = ctx->dpi_phys_addr;
+ entry->length = ctx->dpi_size;
+ entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
+ entry->dpi = ctx->dpi;
+ entry->dev = dev;
+ rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
+ ctx->dpi_size);
+ if (rc) {
+ kfree(entry);
+ goto err;
+ }
+ ctx->db_mmap_entry = &entry->rdma_entry;
uresp.dpm_enabled = dev->user_dpm_enabled;
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
- uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
uresp.db_size = ctx->dpi_size;
uresp.max_send_wr = dev->attr.max_sqe;
uresp.max_recv_wr = dev->attr.max_rqe;
@@ -352,82 +327,92 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
- return rc;
+ goto err;
ctx->dev = dev;
- rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
- if (rc)
- return rc;
-
DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
&ctx->ibucontext);
return 0;
+
+err:
+ if (!ctx->db_mmap_entry)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
+ else
+ rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
+
+ return rc;
}
void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
{
struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
- struct qedr_mm *mm, *tmp;
DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
uctx);
- uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
- list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- mm->key.phy_addr, mm->key.len, uctx);
- list_del(&mm->entry);
- kfree(mm);
- }
+ rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
}
-int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
- struct qedr_dev *dev = get_qedr_dev(context->device);
- unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long len = (vma->vm_end - vma->vm_start);
- unsigned long dpi_start;
+ struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
+ struct qedr_dev *dev = entry->dev;
- dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
+ if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
+ free_page((unsigned long)entry->address);
+ else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
- DP_DEBUG(dev, QEDR_MSG_INIT,
- "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
- (void *)vma->vm_start, (void *)vma->vm_end,
- (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
+ kfree(entry);
+}
- if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
- DP_ERR(dev,
- "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
- (void *)vma->vm_start, (void *)vma->vm_end);
- return -EINVAL;
- }
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
+{
+ struct ib_device *dev = ucontext->device;
+ size_t length = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct qedr_user_mmap_entry *entry;
+ int rc = 0;
+ u64 pfn;
- if (!qedr_search_mmap(ucontext, phys_addr, len)) {
- DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
- vma->vm_pgoff);
- return -EINVAL;
- }
+ ibdev_dbg(dev,
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- if (phys_addr < dpi_start ||
- ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
- DP_ERR(dev,
- "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
- (void *)phys_addr, (void *)dpi_start,
- ucontext->dpi_size);
+ rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
-
- if (vma->vm_flags & VM_READ) {
- DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
- return -EINVAL;
+ entry = get_qedr_mmap_entry(rdma_entry);
+ ibdev_dbg(dev,
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->io_address, length, entry->mmap_flag);
+
+ switch (entry->mmap_flag) {
+ case QEDR_USER_MMAP_IO_WC:
+ pfn = entry->io_address >> PAGE_SHIFT;
+ rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case QEDR_USER_MMAP_PHYS_PAGE:
+ rc = vm_insert_page(vma, vma->vm_start,
+ virt_to_page(entry->address));
+ break;
+ default:
+ rc = -EINVAL;
}
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
- vma->vm_page_prot);
+ if (rc)
+ ibdev_dbg(dev,
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->io_address, length, entry->mmap_flag, rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
}
int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
@@ -657,16 +642,50 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
}
}
+static int qedr_db_recovery_add(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return 0;
+ }
+
+ return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
+ db_width, db_space);
+}
+
+static void qedr_db_recovery_del(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return;
+ }
+
+ /* Ignore return code as there is not much we can do about it. Error
+ * log will be printed inside.
+ */
+ dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
+}
+
static int qedr_copy_cq_uresp(struct qedr_dev *dev,
- struct qedr_cq *cq, struct ib_udata *udata)
+ struct qedr_cq *cq, struct ib_udata *udata,
+ u32 db_offset)
{
struct qedr_create_cq_uresp uresp;
int rc;
memset(&uresp, 0, sizeof(uresp));
- uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.db_offset = db_offset;
uresp.icid = cq->icid;
+ if (cq->q.db_mmap_entry)
+ uresp.db_rec_addr =
+ rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
@@ -694,10 +713,58 @@ static inline int qedr_align_cq_entries(int entries)
return aligned_size / QEDR_CQE_SIZE;
}
+static int qedr_init_user_db_rec(struct ib_udata *udata,
+ struct qedr_dev *dev, struct qedr_userq *q,
+ bool requires_db_rec)
+{
+ struct qedr_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ struct qedr_user_mmap_entry *entry;
+ int rc;
+
+ /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
+ if (requires_db_rec == 0 || !uctx->db_rec)
+ return 0;
+
+ /* Allocate a page for doorbell recovery, add to mmap */
+ q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
+ if (!q->db_rec_data) {
+ DP_ERR(dev, "get_zeroed_page failed\n");
+ return -ENOMEM;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto err_free_db_data;
+
+ entry->address = q->db_rec_data;
+ entry->length = PAGE_SIZE;
+ entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
+ rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
+ &entry->rdma_entry,
+ PAGE_SIZE);
+ if (rc)
+ goto err_free_entry;
+
+ q->db_mmap_entry = &entry->rdma_entry;
+
+ return 0;
+
+err_free_entry:
+ kfree(entry);
+
+err_free_db_data:
+ free_page((unsigned long)q->db_rec_data);
+ q->db_rec_data = NULL;
+ return -ENOMEM;
+}
+
static inline int qedr_init_user_queue(struct ib_udata *udata,
struct qedr_dev *dev,
struct qedr_userq *q, u64 buf_addr,
- size_t buf_len, int access, int dmasync,
+ size_t buf_len, bool requires_db_rec,
+ int access,
int alloc_and_init)
{
u32 fw_pages;
@@ -705,7 +772,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync);
+ q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -735,7 +802,8 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
}
}
- return 0;
+ /* mmap the user address used to store doorbell data for recovery */
+ return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
err0:
ib_umem_release(q->umem);
@@ -821,6 +889,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int entries = attr->cqe;
struct qedr_cq *cq = get_qedr_cq(ibcq);
int chain_entries;
+ u32 db_offset;
int page_cnt;
u64 pbl_ptr;
u16 icid;
@@ -840,8 +909,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chain_entries = qedr_align_cq_entries(entries);
chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ /* calc db offset. user will add DPI base, kernel will add db addr */
+ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create cq: problem copying data from user space\n");
goto err0;
@@ -856,7 +929,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
- ureq.len, IB_ACCESS_LOCAL_WRITE, 1,
+ ureq.len, true, IB_ACCESS_LOCAL_WRITE,
1);
if (rc)
goto err0;
@@ -865,6 +938,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
page_cnt = cq->q.pbl_info.num_pbes;
cq->ibcq.cqe = chain_entries;
+ cq->q.db_addr = ctx->dpi_addr + db_offset;
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
@@ -876,7 +950,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
sizeof(union rdma_cqe),
&cq->pbl, NULL);
if (rc)
- goto err1;
+ goto err0;
page_cnt = qed_chain_get_page_cnt(&cq->pbl);
pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
@@ -888,21 +962,28 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
if (rc)
- goto err2;
+ goto err1;
cq->icid = icid;
cq->sig = QEDR_CQ_MAGIC_NUMBER;
spin_lock_init(&cq->cq_lock);
if (udata) {
- rc = qedr_copy_cq_uresp(dev, cq, udata);
+ rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
+ if (rc)
+ goto err2;
+
+ rc = qedr_db_recovery_add(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data,
+ DB_REC_WIDTH_64B,
+ DB_REC_USER);
if (rc)
- goto err3;
+ goto err2;
+
} else {
/* Generate doorbell address. */
- cq->db_addr = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
cq->db.data.icid = cq->icid;
+ cq->db_addr = dev->db_addr + db_offset;
cq->db.data.params = DB_AGG_CMD_SET <<
RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
@@ -912,6 +993,11 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->latest_cqe = NULL;
consume_cqe(cq);
cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ if (rc)
+ goto err2;
}
DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -920,18 +1006,19 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
-err3:
+err2:
destroy_iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
&destroy_oparams);
-err2:
- if (udata)
- qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
- else
- dev->ops->common->chain_free(dev->cdev, &cq->pbl);
err1:
- if (udata)
+ if (udata) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+ if (cq->q.db_mmap_entry)
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ } else {
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
err0:
return -EINVAL;
}
@@ -962,8 +1049,10 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
cq->destroyed = 1;
/* GSIs CQs are handled by driver, so they don't exist in the FW */
- if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
return;
+ }
iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
@@ -972,6 +1061,14 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
if (udata) {
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+
+ if (cq->q.db_rec_data) {
+ qedr_db_recovery_del(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ }
+ } else {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
}
/* We don't want the IRQ handler to handle a non-existing CQ so we
@@ -1136,8 +1233,8 @@ static int qedr_copy_srq_uresp(struct qedr_dev *dev,
}
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
- struct qedr_create_qp_uresp *uresp,
- struct qedr_qp *qp)
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
{
/* iWARP requires two doorbells per RQ. */
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
@@ -1150,6 +1247,9 @@ static void qedr_copy_rq_uresp(struct qedr_dev *dev,
}
uresp->rq_icid = qp->icid;
+ if (qp->urq.db_mmap_entry)
+ uresp->rq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
}
static void qedr_copy_sq_uresp(struct qedr_dev *dev,
@@ -1163,22 +1263,26 @@ static void qedr_copy_sq_uresp(struct qedr_dev *dev,
uresp->sq_icid = qp->icid;
else
uresp->sq_icid = qp->icid + 1;
+
+ if (qp->usq.db_mmap_entry)
+ uresp->sq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
}
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
- struct qedr_qp *qp, struct ib_udata *udata)
+ struct qedr_qp *qp, struct ib_udata *udata,
+ struct qedr_create_qp_uresp *uresp)
{
- struct qedr_create_qp_uresp uresp;
int rc;
- memset(&uresp, 0, sizeof(uresp));
- qedr_copy_sq_uresp(dev, &uresp, qp);
- qedr_copy_rq_uresp(dev, &uresp, qp);
+ memset(uresp, 0, sizeof(*uresp));
+ qedr_copy_sq_uresp(dev, uresp, qp);
+ qedr_copy_rq_uresp(dev, uresp, qp);
- uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
- uresp.qp_id = qp->qp_id;
+ uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp->qp_id = qp->qp_id;
- rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
if (rc)
DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n",
@@ -1193,7 +1297,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
spin_lock_init(&qp->q_lock);
- atomic_set(&qp->refcnt, 1);
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ kref_init(&qp->refcnt);
+ init_completion(&qp->iwarp_cm_comp);
+ }
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
@@ -1222,16 +1329,35 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->sq.max_sges, qp->sq_cq->icid);
}
-static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid + 1;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
if (!qp->srq) {
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ qedr_db_recovery_del(dev, qp->sq.db,
+ &qp->sq.db_data);
}
+
+ return rc;
}
static int qedr_check_srq_params(struct qedr_dev *dev,
@@ -1279,19 +1405,19 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
static int qedr_init_srq_user_params(struct ib_udata *udata,
struct qedr_srq *srq,
struct qedr_create_srq_ureq *ureq,
- int access, int dmasync)
+ int access)
{
struct scatterlist *sg;
int rc;
rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
- ureq->srq_len, access, dmasync, 1);
+ ureq->srq_len, false, access, 1);
if (rc)
return rc;
srq->prod_umem =
ib_umem_get(udata, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers), access, dmasync);
+ sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -1381,13 +1507,14 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
hw_srq->max_sges = init_attr->attr.max_sge;
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create srq: problem copying data from user space\n");
goto err0;
}
- rc = qedr_init_srq_user_params(udata, srq, &ureq, 0, 0);
+ rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
if (rc)
goto err0;
@@ -1570,13 +1697,39 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
&qp->urq.pbl_info, FW_PAGE_SHIFT);
}
-static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
+static void qedr_cleanup_user(struct qedr_dev *dev,
+ struct qedr_ucontext *ctx,
+ struct qedr_qp *qp)
{
ib_umem_release(qp->usq.umem);
qp->usq.umem = NULL;
ib_umem_release(qp->urq.umem);
qp->urq.umem = NULL;
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ kfree(qp->urq.pbl_tbl);
+ }
+
+ if (qp->usq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
+ }
+
+ if (qp->urq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
+ }
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data);
}
static int qedr_create_user_qp(struct qedr_dev *dev,
@@ -1588,27 +1741,30 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct qed_rdma_create_qp_in_params in_params;
struct qed_rdma_create_qp_out_params out_params;
struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_create_qp_uresp uresp;
+ struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
+ qp->create_type = QEDR_QP_CREATE_USER;
memset(&ureq, 0, sizeof(ureq));
- rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
if (rc) {
DP_ERR(dev, "Problem copying data from user space\n");
return rc;
}
- /* SQ - read access only (0), dma sync not required (0) */
+ /* SQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, 0, 0, alloc_and_init);
+ ureq.sq_len, true, 0, alloc_and_init);
if (rc)
return rc;
if (!qp->srq) {
- /* RQ - read access only (0), dma sync not required (0) */
+ /* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0, alloc_and_init);
+ ureq.rq_len, true, 0, alloc_and_init);
if (rc)
return rc;
}
@@ -1638,29 +1794,76 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- rc = qedr_copy_qp_uresp(dev, qp, udata);
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
+ if (rc)
+ goto err;
+
+ /* db offset was calculated in copy_qp_uresp, now set in the user q */
+ ctx = pd->uctx;
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+ }
+
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
if (rc)
goto err;
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
qedr_qp_user_print(dev, qp);
- return 0;
+ return rc;
err:
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
if (rc)
DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
err1:
- qedr_cleanup_user(dev, qp);
+ qedr_cleanup_user(dev, ctx, qp);
return rc;
}
-static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
@@ -1668,6 +1871,19 @@ static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
qp->rq.iwarp_db2_data.data.icid = qp->icid;
qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
}
static int
@@ -1715,8 +1931,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_roce_db_info(dev, qp);
- return rc;
+ return qedr_set_roce_db_info(dev, qp);
}
static int
@@ -1774,8 +1989,7 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_iwarp_db_info(dev, qp);
- return rc;
+ return qedr_set_iwarp_db_info(dev, qp);
err:
dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -1790,6 +2004,20 @@ static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
kfree(qp->rqe_wr_id);
+
+ /* GSI qp is not registered to db mechanism so no need to delete */
+ if (qp->qp_type == IB_QPT_GSI)
+ return;
+
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
+
+ if (!qp->srq) {
+ qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data);
+ }
}
static int qedr_create_kernel_qp(struct qedr_dev *dev,
@@ -1805,6 +2033,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_entries;
memset(&in_params, 0, sizeof(in_params));
+ qp->create_type = QEDR_QP_CREATE_KERNEL;
/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
* the ring. The ring should allow at least a single WR, even if the
@@ -1918,7 +2147,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
- rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
+ rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
if (rc)
goto err;
}
@@ -2429,7 +2658,10 @@ err:
static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
struct ib_udata *udata)
{
- int rc = 0;
+ struct qedr_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ int rc;
if (qp->qp_type != IB_QPT_GSI) {
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -2437,8 +2669,8 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
return rc;
}
- if (udata)
- qedr_cleanup_user(dev, qp);
+ if (qp->create_type == QEDR_QP_CREATE_USER)
+ qedr_cleanup_user(dev, ctx, qp);
else
qedr_cleanup_kernel(dev, qp);
@@ -2467,34 +2699,44 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
}
} else {
- /* Wait for the connect/accept to complete */
- if (qp->ep) {
- int wait_count = 1;
-
- while (qp->ep->during_connect) {
- DP_DEBUG(dev, QEDR_MSG_QP,
- "Still in during connect/accept\n");
-
- msleep(100);
- if (wait_count++ > 200) {
- DP_NOTICE(dev,
- "during connect timeout\n");
- break;
- }
- }
- }
+ /* If connection establishment started the WAIT_FOR_CONNECT
+ * bit will be on and we need to Wait for the establishment
+ * to complete before destroying the qp.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
+
+ /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
+ * bit will be on, and we need to wait for the disconnect to
+ * complete before continuing. We can use the same completion,
+ * iwarp_cm_comp, since this is the only place that waits for
+ * this completion and it is sequential. In addition,
+ * disconnect can't occur before the connection is fully
+ * established, therefore if WAIT_FOR_DISCONNECT is on it
+ * means WAIT_FOR_CONNECT is also on and the completion for
+ * CONNECT already occurred.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
}
if (qp->qp_type == IB_QPT_GSI)
qedr_destroy_gsi_qp(dev);
+ /* We need to remove the entry from the xarray before we release the
+ * qp_id to avoid a race of the qp_id being reallocated and failing
+ * on xa_insert
+ */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ xa_erase(&dev->qps, qp->qp_id);
+
qedr_free_qp_resources(dev, qp, udata);
- if (atomic_dec_and_test(&qp->refcnt) &&
- rdma_protocol_iwarp(&dev->ibdev, 1)) {
- xa_erase_irq(&dev->qps, qp->qp_id);
- kfree(qp);
- }
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+
return 0;
}
@@ -2597,7 +2839,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
@@ -2673,8 +2915,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
- if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
- qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+ if (mr->type != QEDR_MR_DMA)
+ free_mr_info(dev, &mr->info);
/* it could be user registered memory. */
ib_umem_release(mr->umem);
@@ -4106,19 +4348,10 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *mad_hdr,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index)
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
{
- struct qedr_dev *dev = get_qedr_dev(ibdev);
-
- DP_DEBUG(dev, QEDR_MSG_GSI,
- "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
- mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
- mad_hdr->class_specific, mad_hdr->class_version,
- mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 9aaa90283d6e..18027844eb87 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -35,8 +35,6 @@
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata);
int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int qedr_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid);
@@ -46,7 +44,8 @@ int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
-int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma);
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
@@ -93,10 +92,9 @@ int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index);
+ const struct ib_grh *in_grh, const struct ib_mad *in_mad,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 531d8a1db2c3..ca5ea734e3d0 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1417,7 +1417,6 @@ static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
*
* The exact combo of LEDs if on is true is determined by looking
* at the ibcstatus.
-
* These LEDs indicate the physical and logical state of IB link.
* For this chip (at least with recommended board pinouts), LED1
* is Yellow (logical state) and LED2 is Green (physical state),
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index f92faf5ec369..79bb83222e8d 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2098,8 +2098,6 @@ static int cc_get_classportinfo(struct ib_cc_mad *ccp,
struct ib_cc_classportinfo_attr *p =
(struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->base_version = 1;
p->class_version = 1;
p->cap_mask = 0;
@@ -2120,8 +2118,6 @@ static int cc_get_congestion_info(struct ib_cc_mad *ccp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->congestion_info = 0;
p->control_table_cap = ppd->cc_max_table_entries;
@@ -2138,8 +2134,6 @@ static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_cc_congestion_entry_shadow *entries;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
entries = ppd->congestion_entries_shadow->entries;
@@ -2176,8 +2170,6 @@ static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
goto bail;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
max_cct_block =
@@ -2296,18 +2288,11 @@ bail:
return reply_failure((struct ib_smp *) ccp);
}
-static int check_cc_key(struct qib_ibport *ibp,
- struct ib_cc_mad *ccp, int mad_flags)
-{
- return 0;
-}
-
static int process_cc(struct ib_device *ibdev, int mad_flags,
u8 port, const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
int ret;
*out_mad = *in_mad;
@@ -2318,10 +2303,6 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
goto bail;
}
- ret = check_cc_key(ibp, ccp, mad_flags);
- if (ret)
- goto bail;
-
switch (ccp->method) {
case IB_MGMT_METHOD_GET:
switch (ccp->attr_id) {
@@ -2405,28 +2386,21 @@ bail:
*/
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int ret;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
+ switch (in->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_subn(ibdev, mad_flags, port, in, out);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in_mad, out_mad);
+ ret = process_perf(ibdev, port, in, out);
goto bail;
case IB_MGMT_CLASS_CONG_MGMT:
@@ -2435,7 +2409,7 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_cc(ibdev, mad_flags, port, in, out);
goto bail;
default:
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3926be78036e..568b21eb6ea1 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->show)
+ return -EIO;
+
return pattr->show(ppd, buf);
}
@@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->store)
+ return -EIO;
+
return pattr->store(ppd, buf, len);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 17bdf8acee2f..8bf414b47b96 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -245,9 +245,8 @@ void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 7800e6930502..a26a4fd86bf4 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -136,7 +136,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
goto err_cq;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index 8f9749d54688..86a6c054ea26 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -58,7 +58,8 @@
#define PVRDMA_ROCEV1_VERSION 17
#define PVRDMA_ROCEV2_VERSION 18
#define PVRDMA_PPN64_VERSION 19
-#define PVRDMA_VERSION PVRDMA_PPN64_VERSION
+#define PVRDMA_QPHANDLE_VERSION 20
+#define PVRDMA_VERSION PVRDMA_QPHANDLE_VERSION
#define PVRDMA_BOARD_ID 1
#define PVRDMA_REV_ID 1
@@ -581,6 +582,17 @@ struct pvrdma_cmd_create_qp_resp {
u32 max_inline_data;
};
+struct pvrdma_cmd_create_qp_resp_v2 {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 qp_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
struct pvrdma_cmd_modify_qp {
struct pvrdma_cmd_hdr hdr;
u32 qp_handle;
@@ -663,6 +675,7 @@ union pvrdma_cmd_resp {
struct pvrdma_cmd_create_cq_resp create_cq_resp;
struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
struct pvrdma_cmd_create_qp_resp create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 create_qp_resp_v2;
struct pvrdma_cmd_query_qp_resp query_qp_resp;
struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
struct pvrdma_cmd_create_srq_resp create_srq_resp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index f3a3d22ee8d7..c61e665ff261 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(udata, start, length, access_flags, 0);
+ umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index bca6a58a442e..f15809c28f67 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -52,6 +52,9 @@
#include "pvrdma.h"
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp);
+
static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
struct pvrdma_cq **recv_cq)
{
@@ -195,7 +198,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2;
struct pvrdma_create_qp ucmd;
+ struct pvrdma_create_qp_resp qp_resp = {};
unsigned long flags;
int ret;
bool is_srq = !!init_attr->srq;
@@ -260,10 +265,19 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
+ /* Userspace supports qpn and qp handles? */
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION &&
+ udata->outlen < sizeof(qp_resp)) {
+ dev_warn(&dev->pdev->dev,
+ "create queuepair not supported\n");
+ ret = -EOPNOTSUPP;
+ goto err_qp;
+ }
+
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -275,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
- ucmd.sbuf_size, 0, 0);
+ ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
ib_umem_release(qp->rumem);
@@ -379,13 +393,33 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
/* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
- qp->qp_handle = resp->qpn;
qp->port = init_attr->port_num;
- qp->ibqp.qp_num = resp->qpn;
+
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) {
+ qp->ibqp.qp_num = resp_v2->qpn;
+ qp->qp_handle = resp_v2->qp_handle;
+ } else {
+ qp->ibqp.qp_num = resp->qpn;
+ qp->qp_handle = resp->qpn;
+ }
+
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+ if (udata) {
+ qp_resp.qpn = qp->ibqp.qp_num;
+ qp_resp.qp_handle = qp->qp_handle;
+
+ if (ib_copy_to_udata(udata, &qp_resp,
+ min(udata->outlen, sizeof(qp_resp)))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ __pvrdma_destroy_qp(dev, qp);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
return &qp->ibqp;
err_pdir:
@@ -400,27 +434,15 @@ err_qp:
return ERR_PTR(ret);
}
-static void pvrdma_free_qp(struct pvrdma_qp *qp)
+static void _pvrdma_free_qp(struct pvrdma_qp *qp)
{
+ unsigned long flags;
struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
- struct pvrdma_cq *scq;
- struct pvrdma_cq *rcq;
- unsigned long flags, scq_flags, rcq_flags;
-
- /* In case cq is polling */
- get_cqs(qp, &scq, &rcq);
- pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
- _pvrdma_flush_cqe(qp, scq);
- if (scq != rcq)
- _pvrdma_flush_cqe(qp, rcq);
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle] = NULL;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
- pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
@@ -435,34 +457,71 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
atomic_dec(&dev->num_qps);
}
-/**
- * pvrdma_destroy_qp - destroy a queue pair
- * @qp: the queue pair to destroy
- * @udata: user data or null for kernel object
- *
- * @return: 0 on success.
- */
-int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq;
+ struct pvrdma_cq *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* In case cq is polling */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ /*
+ * We're now unlocking the CQs before clearing out the qp handle this
+ * should still be safe. We have destroyed the backend QP and flushed
+ * the CQEs so there should be no other completions for this QP.
+ */
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_free_qp(qp);
+}
+
+static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev,
+ u32 qp_handle)
{
- struct pvrdma_qp *vqp = to_vqp(qp);
union pvrdma_cmd_req req;
struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
int ret;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
- cmd->qp_handle = vqp->qp_handle;
+ cmd->qp_handle = qp_handle;
- ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
if (ret < 0)
- dev_warn(&to_vdev(qp->device)->pdev->dev,
+ dev_warn(&dev->pdev->dev,
"destroy queuepair failed, error: %d\n", ret);
+}
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ * @udata: user data or null for kernel object
+ *
+ * @return: always 0.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+{
+ struct pvrdma_qp *vqp = to_vqp(qp);
+
+ _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
pvrdma_free_qp(vqp);
return 0;
}
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp)
+{
+ _pvrdma_destroy_qp_work(dev, qp->qp_handle);
+ _pvrdma_free_qp(qp);
+}
+
/**
* pvrdma_modify_qp - modify queue pair attributes
* @ibqp: the queue pair
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 36cdfbdbd325..98c8be71d91d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index fe99da0ff060..ee02c6176007 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -129,7 +129,6 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
* rvt_destory_ah - Destory an address handle
* @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
- * @udata: user data or NULL for kernel object
*
* Return: 0 on success
*/
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index a85571a4cf57..13d7f66eadab 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -552,7 +552,6 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
/**
* rvt_driver_cq_init - Init cq resources on behalf of driver
- * @rdi: rvt dev structure
*
* Return: 0 on success
*/
@@ -568,7 +567,6 @@ int rvt_driver_cq_init(void)
/**
* rvt_cq_exit - tear down cq reources
- * @rdi: rvt dev structure
*/
void rvt_cq_exit(void)
{
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index a6a39f01dca3..b9a76bf74857 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 0b0a241c57ff..3cdf75d0c7a4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2563,10 +2563,9 @@ void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
EXPORT_SYMBOL(rvt_add_retry_timer_ext);
/**
- * rvt_add_rnr_timer - add/start an rnr timer
- * @qp - the QP
- * @aeth - aeth of RNR timeout, simulated aeth for loopback
- * add an rnr timer on the QP
+ * rvt_add_rnr_timer - add/start an rnr timer on the QP
+ * @qp: the QP
+ * @aeth: aeth of RNR timeout, simulated aeth for loopback
*/
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
{
@@ -2583,7 +2582,7 @@ EXPORT_SYMBOL(rvt_add_rnr_timer);
/**
* rvt_stop_rc_timers - stop all timers
- * @qp - the QP
+ * @qp: the QP
* stop any pending timers
*/
void rvt_stop_rc_timers(struct rvt_qp *qp)
@@ -2617,7 +2616,7 @@ static void rvt_stop_rnr_timer(struct rvt_qp *qp)
/**
* rvt_del_timers_sync - wait for any timeout routines to exit
- * @qp - the QP
+ * @qp: the QP
*/
void rvt_del_timers_sync(struct rvt_qp *qp)
{
@@ -2626,7 +2625,7 @@ void rvt_del_timers_sync(struct rvt_qp *qp)
}
EXPORT_SYMBOL(rvt_del_timers_sync);
-/**
+/*
* This is called from s_timer for missing responses.
*/
static void rvt_rc_timeout(struct timer_list *t)
@@ -2676,12 +2675,13 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
* rvt_qp_iter_init - initial for QP iteration
* @rdi: rvt devinfo
* @v: u64 value
+ * @cb: user-defined callback
*
* This returns an iterator suitable for iterating QPs
* in the system.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* @cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
@@ -2712,7 +2712,7 @@ EXPORT_SYMBOL(rvt_qp_iter_init);
/**
* rvt_qp_iter_next - return the next QP in iter
- * @iter - the iterator
+ * @iter: the iterator
*
* Fine grained QP iterator suitable for use
* with debugfs seq_file mechanisms.
@@ -2775,14 +2775,14 @@ EXPORT_SYMBOL(rvt_qp_iter_next);
/**
* rvt_qp_iter - iterate all QPs
- * @rdi - rvt devinfo
- * @v - a 64 bit value
- * @cb - a callback
+ * @rdi: rvt devinfo
+ * @v: a 64-bit value
+ * @cb: a callback
*
* This provides a way for iterating all QPs.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 18da1e1ea979..986265ad6e79 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -683,9 +683,10 @@ EXPORT_SYMBOL(rvt_unregister_device);
/**
* rvt_init_port - init internal data for driver port
- * @rdi: rvt dev strut
+ * @rdi: rvt_dev_info struct
* @port: rvt port
* @port_index: 0 based index of ports, different from IB core port num
+ * @pkey_table: pkey_table for @port
*
* Keep track of a list of ports. No need to have a detach port.
* They persist until the driver goes away.
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index a8c11b5e1e94..0946a301a5c5 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
{
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
- rxe->attr.fw_ver = RXE_FW_VER;
rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
- rxe->attr.vendor_id = RXE_VENDOR_ID;
- rxe->attr.vendor_part_id = RXE_VENDOR_PART_ID;
- rxe->attr.hw_ver = RXE_HW_VER;
rxe->attr.max_qp = RXE_MAX_QP;
rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
@@ -94,22 +90,13 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_mr = RXE_MAX_MR;
rxe->attr.max_pd = RXE_MAX_PD;
rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
- rxe->attr.max_ee_rd_atom = RXE_MAX_EE_RD_ATOM;
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
- rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
rxe->attr.atomic_cap = IB_ATOMIC_HCA;
- rxe->attr.max_ee = RXE_MAX_EE;
- rxe->attr.max_rdd = RXE_MAX_RDD;
- rxe->attr.max_mw = RXE_MAX_MW;
- rxe->attr.max_raw_ipv6_qp = RXE_MAX_RAW_IPV6_QP;
- rxe->attr.max_raw_ethy_qp = RXE_MAX_RAW_ETHY_QP;
rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
rxe->attr.max_ah = RXE_MAX_AH;
- rxe->attr.max_fmr = RXE_MAX_FMR;
- rxe->attr.max_map_per_fmr = RXE_MAX_MAP_PER_FMR;
rxe->attr.max_srq = RXE_MAX_SRQ;
rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index ea6a819b7167..35a2baf2f364 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access, 0);
+ umem = ib_umem_get(udata, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index fe5207386700..353c6668249e 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -60,12 +60,8 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
/* default/initial rxe device parameter settings */
enum rxe_device_param {
- RXE_FW_VER = 0,
RXE_MAX_MR_SIZE = -1ull,
RXE_PAGE_SIZE_CAP = 0xfffff000,
- RXE_VENDOR_ID = 0,
- RXE_VENDOR_PART_ID = 0,
- RXE_HW_VER = 0,
RXE_MAX_QP = 0x10000,
RXE_MAX_QP_WR = 0x4000,
RXE_MAX_INLINE_DATA = 400,
@@ -87,21 +83,12 @@ enum rxe_device_param {
RXE_MAX_MR = 256 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
- RXE_MAX_EE_RD_ATOM = 0,
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
- RXE_MAX_EE_INIT_RD_ATOM = 0,
- RXE_MAX_EE = 0,
- RXE_MAX_RDD = 0,
- RXE_MAX_MW = 0,
- RXE_MAX_RAW_IPV6_QP = 0,
- RXE_MAX_RAW_ETHY_QP = 0,
RXE_MAX_MCAST_GRP = 8192,
RXE_MAX_MCAST_QP_ATTACH = 56,
RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
RXE_MAX_AH = 100,
- RXE_MAX_FMR = 0,
- RXE_MAX_MAP_PER_FMR = 0,
RXE_MAX_SRQ = 960,
RXE_MAX_SRQ_WR = 0x4000,
RXE_MIN_SRQ_WR = 1,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 623129f27f5a..9dd4bd7aea92 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -106,6 +106,10 @@ static int rxe_modify_device(struct ib_device *dev,
{
struct rxe_dev *rxe = to_rdev(dev);
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC))
+ return -EOPNOTSUPP;
+
if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
@@ -1171,6 +1175,9 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_ops = &dma_virt_ops;
+ dev->dev.dma_parms = &rxe->dma_parms;
+ rxe->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
dma_coerce_mask_and_coherent(&dev->dev,
dma_get_required_mask(&dev->dev));
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 5c4b2239129c..95834206c80c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -384,6 +384,7 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
+ struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index dba4535494ab..b939f489cd46 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -70,6 +70,7 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
+ struct device_dma_parameters dma_parms;
struct net_device *netdev;
struct siw_dev_cap attrs;
@@ -98,18 +99,9 @@ struct siw_device {
struct work_struct netdev_down;
};
-struct siw_uobj {
- void *addr;
- u32 size;
-};
-
struct siw_ucontext {
struct ib_ucontext base_ucontext;
struct siw_device *sdev;
-
- /* xarray of user mappable objects */
- struct xarray xa;
- u32 uobj_nextkey;
};
/*
@@ -149,8 +141,6 @@ struct siw_pbl {
struct siw_pble pbe[1];
};
-struct siw_mr;
-
/*
* Generic memory representation for registered siw memory.
* Memory lookup always via higher 24 bit of STag (STag index).
@@ -220,7 +210,7 @@ struct siw_cq {
u32 cq_get;
u32 num_cqe;
bool kernel_verbs;
- u32 xa_cq_index; /* mmap information for CQE array */
+ struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */
};
@@ -263,7 +253,7 @@ struct siw_srq {
u32 rq_put;
u32 rq_get;
u32 num_rqe; /* max # of wqe's allowed */
- u32 xa_srq_index; /* mmap information for SRQ array */
+ struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
char armed; /* inform user if limit hit */
char kernel_verbs; /* '1' if kernel client */
};
@@ -477,8 +467,8 @@ struct siw_qp {
u8 layer : 4, etype : 4;
u8 ecode;
} term_info;
- u32 xa_sq_index; /* mmap information for SQE array */
- u32 xa_rq_index; /* mmap information for RQE array */
+ struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
+ struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
struct rcu_head rcu;
};
@@ -503,6 +493,11 @@ struct iwarp_msg_info {
int (*rx_data)(struct siw_qp *qp);
};
+struct siw_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ void *address;
+};
+
/* Global siw parameters. Currently set in siw_main.c */
extern const bool zcopy_tx;
extern const bool try_gso;
@@ -607,6 +602,12 @@ static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
return container_of(base_mr, struct siw_mr, base_mr);
}
+static inline struct siw_user_mmap_entry *
+to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
+{
+ return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
+}
+
static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
{
struct siw_qp *qp;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 8c1931a57f4a..3bccfef40e7e 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1373,22 +1373,8 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
rv = -EINVAL;
goto error;
}
- if (v4)
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
- pd_len,
- &((struct sockaddr_in *)(laddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(laddr))->sin_port),
- &((struct sockaddr_in *)(raddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(raddr))->sin_port));
- else
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
- pd_len,
- &((struct sockaddr_in6 *)(laddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
- &((struct sockaddr_in6 *)(raddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(raddr))->sin6_port));
+ siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
+ raddr);
rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
@@ -1867,14 +1853,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
- if (addr_family == AF_INET)
- siw_dbg(id->device, "Listen at laddr %pI4 %u\n",
- &(((struct sockaddr_in *)laddr)->sin_addr),
- ((struct sockaddr_in *)laddr)->sin_port);
- else
- siw_dbg(id->device, "Listen at laddr %pI6 %u\n",
- &(((struct sockaddr_in6 *)laddr)->sin6_addr),
- ((struct sockaddr_in6 *)laddr)->sin6_port);
+ siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
return 0;
@@ -1935,7 +1914,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
/*
* siw_create_listen - Create resources for a listener's IWCM ID @id
*
- * Listens on the socket addresses id->local_addr and id->remote_addr.
+ * Listens on the socket address id->local_addr.
*
* If the listener's @id provides a specific local IP address, at most one
* listening socket is created and associated with @id.
@@ -1959,7 +1938,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
*/
if (id->local_addr.ss_family == AF_INET) {
struct in_device *in_dev = in_dev_get(dev);
- struct sockaddr_in s_laddr, *s_raddr;
+ struct sockaddr_in s_laddr;
const struct in_ifaddr *ifa;
if (!in_dev) {
@@ -1967,12 +1946,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
goto out;
}
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
- s_raddr = (struct sockaddr_in *)&id->remote_addr;
- siw_dbg(id->device,
- "laddr %pI4:%d, raddr %pI4:%d\n",
- &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
- &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
@@ -1992,17 +1967,13 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
} else if (id->local_addr.ss_family == AF_INET6) {
struct inet6_dev *in6_dev = in6_dev_get(dev);
struct inet6_ifaddr *ifp;
- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
- *s_raddr = &to_sockaddr_in6(id->remote_addr);
+ struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
if (!in6_dev) {
rv = -ENODEV;
goto out;
}
- siw_dbg(id->device,
- "laddr %pI6:%d, raddr %pI6:%d\n",
- &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
- &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 05a92f997f60..c147f0613d95 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/rdma_netlink.h>
@@ -248,24 +249,6 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
return NULL;
}
-static void siw_verbs_sq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_sq_flush(qp);
- up_write(&qp->state_lock);
-}
-
-static void siw_verbs_rq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_rq_flush(qp);
- up_write(&qp->state_lock);
-}
-
static const struct ib_device_ops siw_device_ops = {
.owner = THIS_MODULE,
.uverbs_abi_ver = SIW_ABI_VERSION,
@@ -284,8 +267,6 @@ static const struct ib_device_ops siw_device_ops = {
.destroy_cq = siw_destroy_cq,
.destroy_qp = siw_destroy_qp,
.destroy_srq = siw_destroy_srq,
- .drain_rq = siw_verbs_rq_flush,
- .drain_sq = siw_verbs_sq_flush,
.get_dma_mr = siw_get_dma_mr,
.get_port_immutable = siw_get_port_immutable,
.iw_accept = siw_accept,
@@ -298,6 +279,7 @@ static const struct ib_device_ops siw_device_ops = {
.iw_rem_ref = siw_qp_put_ref,
.map_mr_sg = siw_map_mr_sg,
.mmap = siw_mmap,
+ .mmap_free = siw_mmap_free,
.modify_qp = siw_verbs_modify_qp,
.modify_srq = siw_modify_srq,
.poll_cq = siw_poll_cq,
@@ -350,15 +332,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->netdev = netdev;
if (netdev->type != ARPHRD_LOOPBACK) {
- memcpy(&base_dev->node_guid, netdev->dev_addr, 6);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ netdev->dev_addr);
} else {
/*
* The loopback device does not have a HW address,
* but connection mangagement lib expects gid != 0
*/
- size_t gidlen = min_t(size_t, strlen(base_dev->name), 6);
+ size_t len = min_t(size_t, strlen(base_dev->name), 6);
+ char addr[6] = { };
- memcpy(&base_dev->node_guid, base_dev->name, gidlen);
+ memcpy(addr, base_dev->name, len);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ addr);
}
base_dev->uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -397,6 +383,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
base_dev->dev.dma_ops = &dma_virt_ops;
+ base_dev->dev.dma_parms = &sdev->dma_parms;
+ sdev->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
base_dev->num_comp_vectors = num_possible_cpus();
ib_set_device_ops(base_dev, &siw_device_ops);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index b18a677832e1..5fd6d6499b3d 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -34,44 +34,19 @@ static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
[IB_QPS_ERR] = "ERR"
};
-static u32 siw_create_uobj(struct siw_ucontext *uctx, void *vaddr, u32 size)
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct siw_uobj *uobj;
- struct xa_limit limit = XA_LIMIT(0, SIW_UOBJ_MAX_KEY);
- u32 key;
+ struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
- uobj = kzalloc(sizeof(*uobj), GFP_KERNEL);
- if (!uobj)
- return SIW_INVAL_UOBJ_KEY;
-
- if (xa_alloc_cyclic(&uctx->xa, &key, uobj, limit, &uctx->uobj_nextkey,
- GFP_KERNEL) < 0) {
- kfree(uobj);
- return SIW_INVAL_UOBJ_KEY;
- }
- uobj->size = PAGE_ALIGN(size);
- uobj->addr = vaddr;
-
- return key;
-}
-
-static struct siw_uobj *siw_get_uobj(struct siw_ucontext *uctx,
- unsigned long off, u32 size)
-{
- struct siw_uobj *uobj = xa_load(&uctx->xa, off);
-
- if (uobj && uobj->size == size)
- return uobj;
-
- return NULL;
+ kfree(entry);
}
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
{
struct siw_ucontext *uctx = to_siw_ctx(ctx);
- struct siw_uobj *uobj;
- unsigned long off = vma->vm_pgoff;
- int size = vma->vm_end - vma->vm_start;
+ size_t size = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct siw_user_mmap_entry *entry;
int rv = -EINVAL;
/*
@@ -79,18 +54,25 @@ int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
*/
if (vma->vm_start & (PAGE_SIZE - 1)) {
pr_warn("siw: mmap not page aligned\n");
- goto out;
+ return -EINVAL;
}
- uobj = siw_get_uobj(uctx, off, size);
- if (!uobj) {
- siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %u\n",
- off, size);
+ rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
+ if (!rdma_entry) {
+ siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
+ vma->vm_pgoff, size);
+ return -EINVAL;
+ }
+ entry = to_siw_mmap_entry(rdma_entry);
+
+ rv = remap_vmalloc_range(vma, entry->address, 0);
+ if (rv) {
+ pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
+ size);
goto out;
}
- rv = remap_vmalloc_range(vma, uobj->addr, 0);
- if (rv)
- pr_warn("remap_vmalloc_range failed: %lu, %u\n", off, size);
out:
+ rdma_user_mmap_entry_put(rdma_entry);
+
return rv;
}
@@ -105,8 +87,6 @@ int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
rv = -ENOMEM;
goto err_out;
}
- xa_init_flags(&ctx->xa, XA_FLAGS_ALLOC);
- ctx->uobj_nextkey = 0;
ctx->sdev = sdev;
uresp.dev_id = sdev->vendor_part_id;
@@ -135,19 +115,7 @@ err_out:
void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
{
struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
- void *entry;
- unsigned long index;
- /*
- * Make sure all user mmap objects are gone. Since QP, CQ
- * and SRQ destroy routines destroy related objects, nothing
- * should be found here.
- */
- xa_for_each(&uctx->xa, index, entry) {
- kfree(xa_erase(&uctx->xa, index));
- pr_warn("siw: dropping orphaned uobj at %lu\n", index);
- }
- xa_destroy(&uctx->xa);
atomic_dec(&uctx->sdev->num_ctx);
}
@@ -293,6 +261,33 @@ void siw_qp_put_ref(struct ib_qp *base_qp)
siw_qp_put(to_siw_qp(base_qp));
}
+static struct rdma_user_mmap_entry *
+siw_mmap_entry_insert(struct siw_ucontext *uctx,
+ void *address, size_t length,
+ u64 *offset)
+{
+ struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int rv;
+
+ *offset = SIW_INVAL_UOBJ_KEY;
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+
+ rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
+ &entry->rdma_entry,
+ length);
+ if (rv) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
/*
* siw_create_qp()
*
@@ -317,6 +312,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
+ size_t length;
siw_dbg(base_dev, "create new QP\n");
@@ -380,8 +376,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->orq_lock);
qp->kernel_verbs = !udata;
- qp->xa_sq_index = SIW_INVAL_UOBJ_KEY;
- qp->xa_rq_index = SIW_INVAL_UOBJ_KEY;
rv = siw_qp_add(sdev, qp);
if (rv)
@@ -458,22 +452,27 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
uresp.qp_id = qp_id(qp);
if (qp->sendq) {
- qp->xa_sq_index =
- siw_create_uobj(uctx, qp->sendq,
- num_sqe * sizeof(struct siw_sqe));
+ length = num_sqe * sizeof(struct siw_sqe);
+ qp->sq_entry =
+ siw_mmap_entry_insert(uctx, qp->sendq,
+ length, &uresp.sq_key);
+ if (!qp->sq_entry) {
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
+
if (qp->recvq) {
- qp->xa_rq_index =
- siw_create_uobj(uctx, qp->recvq,
- num_rqe * sizeof(struct siw_rqe));
- }
- if (qp->xa_sq_index == SIW_INVAL_UOBJ_KEY ||
- qp->xa_rq_index == SIW_INVAL_UOBJ_KEY) {
- rv = -ENOMEM;
- goto err_out_xa;
+ length = num_rqe * sizeof(struct siw_rqe);
+ qp->rq_entry =
+ siw_mmap_entry_insert(uctx, qp->recvq,
+ length, &uresp.rq_key);
+ if (!qp->rq_entry) {
+ uresp.sq_key = SIW_INVAL_UOBJ_KEY;
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
- uresp.sq_key = qp->xa_sq_index << PAGE_SHIFT;
- uresp.rq_key = qp->xa_rq_index << PAGE_SHIFT;
if (udata->outlen < sizeof(uresp)) {
rv = -EINVAL;
@@ -501,11 +500,10 @@ err_out:
kfree(siw_base_qp);
if (qp) {
- if (qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
-
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
vfree(qp->sendq);
vfree(qp->recvq);
kfree(qp);
@@ -618,10 +616,10 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->attrs.flags |= SIW_QP_IN_DESTROY;
qp->rx_stream.rx_suspend = 1;
- if (uctx && qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (uctx && qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
down_write(&qp->state_lock);
@@ -685,6 +683,47 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
return bytes;
}
+/* Complete SQ WR's without processing */
+static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct siw_sqe sqe = {};
+ int rv = 0;
+
+ while (wr) {
+ sqe.id = wr->wr_id;
+ sqe.opcode = wr->opcode;
+ rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
+/* Complete RQ WR's without processing */
+static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct siw_rqe rqe = {};
+ int rv = 0;
+
+ while (wr) {
+ rqe.id = wr->wr_id;
+ rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
/*
* siw_post_send()
*
@@ -703,26 +742,54 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
unsigned long flags;
int rv = 0;
+ if (wr && !qp->kernel_verbs) {
+ siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state);
- return -ENOTCONN;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_sq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. SQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_sq().
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state);
- return -ENOTCONN;
- }
- if (wr && !qp->kernel_verbs) {
- siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
spin_lock_irqsave(&qp->sq_lock, flags);
@@ -917,24 +984,54 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
*bad_wr = wr;
return -EOPNOTSUPP; /* what else from errno.h? */
}
+ if (!qp->kernel_verbs) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- return -ENOTCONN;
- }
- if (!qp->kernel_verbs) {
- siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_rq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (qp->attrs.state > SIW_QP_STATE_RTS) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. RQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_rq().
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
/*
* Serialize potentially multiple producers.
@@ -991,8 +1088,8 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
siw_cq_flush(cq);
- if (ctx && cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
atomic_dec(&sdev->num_cq);
@@ -1029,7 +1126,6 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
size = roundup_pow_of_two(size);
cq->base_cq.cqe = size;
cq->num_cqe = size;
- cq->xa_cq_index = SIW_INVAL_UOBJ_KEY;
if (!udata) {
cq->kernel_verbs = 1;
@@ -1055,16 +1151,17 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
+ size_t length = size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl);
- cq->xa_cq_index =
- siw_create_uobj(ctx, cq->queue,
- size * sizeof(struct siw_cqe) +
- sizeof(struct siw_cq_ctrl));
- if (cq->xa_cq_index == SIW_INVAL_UOBJ_KEY) {
+ cq->cq_entry =
+ siw_mmap_entry_insert(ctx, cq->queue,
+ length, &uresp.cq_key);
+ if (!cq->cq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.cq_key = cq->xa_cq_index << PAGE_SHIFT;
+
uresp.cq_id = cq->id;
uresp.num_cqe = size;
@@ -1085,8 +1182,8 @@ err_out:
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
vfree(cq->queue);
}
atomic_dec(&sdev->num_cq);
@@ -1490,7 +1587,6 @@ int siw_create_srq(struct ib_srq *base_srq,
}
srq->max_sge = attrs->max_sge;
srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
- srq->xa_srq_index = SIW_INVAL_UOBJ_KEY;
srq->limit = attrs->srq_limit;
if (srq->limit)
srq->armed = 1;
@@ -1509,15 +1605,16 @@ int siw_create_srq(struct ib_srq *base_srq,
}
if (udata) {
struct siw_uresp_create_srq uresp = {};
+ size_t length = srq->num_rqe * sizeof(struct siw_rqe);
- srq->xa_srq_index = siw_create_uobj(
- ctx, srq->recvq, srq->num_rqe * sizeof(struct siw_rqe));
-
- if (srq->xa_srq_index == SIW_INVAL_UOBJ_KEY) {
+ srq->srq_entry =
+ siw_mmap_entry_insert(ctx, srq->recvq,
+ length, &uresp.srq_key);
+ if (!srq->srq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.srq_key = srq->xa_srq_index;
+
uresp.num_rqe = srq->num_rqe;
if (udata->outlen < sizeof(uresp)) {
@@ -1536,8 +1633,8 @@ int siw_create_srq(struct ib_srq *base_srq,
err_out:
if (srq->recvq) {
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
}
atomic_dec(&sdev->num_srq);
@@ -1623,9 +1720,8 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
-
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
atomic_dec(&sdev->num_srq);
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 1910869281cb..1a731989fad6 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -83,6 +83,7 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac0583ff280d..e5f438ab716c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2019,6 +2019,15 @@ static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
}
+static int ipoib_get_vf_guid(struct net_device *dev, int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
+}
+
static int ipoib_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
@@ -2045,6 +2054,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_set_vf_link_state = ipoib_set_vf_link_state,
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_get_vf_guid = ipoib_get_vf_guid,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 2e72fc5af157..3690e28cc7ea 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -646,13 +646,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
if (ib_conn->pi_support) {
u32 sig_caps = ib_dev->attrs.sig_prot_cap;
+ shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- shost->virt_boundary_mask = ~MASK_4K;
+ shost->virt_boundary_mask = SZ_4K - 1;
if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -785,7 +786,7 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* iscsi_iser_ep_connect() - Initiate iSER connection establishment
* @shost: scsi_host
* @dst_addr: destination address
- * @non-blocking: indicate if routine can block
+ * @non_blocking: indicate if routine can block
*
* Allocate an iscsi endpoint, an iser_conn structure and bind them.
* After that start RDMA connection establishment via rdma_cm. We
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 52ce63592dcf..029c00163442 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -96,16 +96,12 @@
#define iser_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define SHIFT_4K 12
-#define SIZE_4K (1ULL << SHIFT_4K)
-#define MASK_4K (~(SIZE_4K-1))
-
/* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS 1024
#define ISCSI_ISER_DEF_SG_TABLESIZE \
- ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K)
+ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
/* Maximum support is 16MB I/O size */
-#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K)
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
@@ -232,15 +228,16 @@ enum iser_desc_type {
* @iser_header: iser header
* @iscsi_header: iscsi header
* @type: command/control/dataout
- * @dam_addr: header buffer dma_address
+ * @dma_addr: header buffer dma_address
* @tx_sg: sg[0] points to iser/iscsi headers
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
+ * @cqe: completion handler
* @mapped: Is the task header mapped
- * reg_wr: registration WR
- * send_wr: send WR
- * inv_wr: invalidate WR
+ * @reg_wr: registration WR
+ * @send_wr: send WR
+ * @inv_wr: invalidate WR
*/
struct iser_tx_desc {
struct iser_ctrl iser_header;
@@ -267,6 +264,7 @@ struct iser_tx_desc {
* @data: received data segment
* @dma_addr: receive buffer dma address
* @rx_sg: ib_sge of receive buffer
+ * @cqe: completion handler
* @pad: for sense data TODO: Modify to maximum sense length supported
*/
struct iser_rx_desc {
@@ -283,9 +281,9 @@ struct iser_rx_desc {
* struct iser_login_desc - iSER login descriptor
*
* @req: pointer to login request buffer
- * @resp: pointer to login response buffer
+ * @rsp: pointer to login response buffer
* @req_dma: DMA address of login request buffer
- * @rsp_dma: DMA address of login response buffer
+ * @rsp_dma: DMA address of login response buffer
* @sge: IB sge for login post recv
* @cqe: completion handler
*/
@@ -315,12 +313,12 @@ struct iser_comp {
};
/**
- * struct iser_device - Memory registration operations
+ * struct iser_reg_ops - Memory registration operations
* per-device registration schemes
*
* @alloc_reg_res: Allocate registration resources
* @free_reg_res: Free registration resources
- * @fast_reg_mem: Register memory buffers
+ * @reg_mem: Register memory buffers
* @unreg_mem: Un-register memory buffers
* @reg_desc_get: Get a registration descriptor for pool
* @reg_desc_put: Get a registration descriptor to pool
@@ -369,7 +367,7 @@ struct iser_device {
};
/**
- * struct iser_reg_resources - Fast registration recources
+ * struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
* @fmr_pool: pool of fmrs
@@ -402,7 +400,7 @@ struct iser_fr_desc {
};
/**
- * struct iser_fr_pool: connection fast registration pool
+ * struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
* @lock: protects fmr/fastreg pool
@@ -427,6 +425,7 @@ struct iser_fr_pool {
* @comp: iser completion context
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
+ * @reg_cqe: completion handler
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -467,6 +466,7 @@ struct ib_conn {
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
* @pages_per_mr: maximum pages available for registration
+ * @snd_w_inv: connection uses remote invalidation
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -525,7 +525,7 @@ struct iser_page_vec {
};
/**
- * struct iser_global: iSER global context
+ * struct iser_global - iSER global context
*
* @device_list_mutex: protects device_list
* @device_list: iser devices global list
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5cbb4b3a0566..4a7045bb0831 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -358,6 +358,8 @@ static inline bool iser_signal_comp(u8 sig_count)
/**
* iser_send_command - send command PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
*/
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
@@ -429,6 +431,9 @@ send_command_error:
/**
* iser_send_data_out - send data out PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
+ * @hdr: pointer to the LLD's iSCSI message header
*/
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2cc89a9b9e9b..0f74dc6d12fa 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -170,7 +170,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
- if (data->dma_nents == 0) {
+ if (unlikely(data->dma_nents == 0)) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
@@ -237,7 +237,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
int ret, plen;
page_vec->npages = 0;
- page_vec->fake_mr.page_size = SIZE_4K;
+ page_vec->fake_mr.page_size = SZ_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
mem->dma_nents, NULL, iser_set_page);
if (unlikely(plen < mem->dma_nents)) {
@@ -451,7 +451,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->dma_nents);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a6548de0e218..1f4a37a3c2b3 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -58,12 +58,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
dev_name(&event->device->dev), event->element.port_num);
}
-/**
+/*
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
- * the adapator.
+ * the adaptor.
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
@@ -124,9 +124,9 @@ comps_err:
return -1;
}
-/**
+/*
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
- * CQ and PD created with the device associated with the adapator.
+ * CQ and PD created with the device associated with the adaptor.
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
@@ -149,8 +149,11 @@ static void iser_free_device_ib_res(struct iser_device *device)
/**
* iser_alloc_fmr_pool - Creates FMR pool and page_vector
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
*
- * returns 0 on success, or errno code on failure
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -180,7 +183,7 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
page_vec->pages = (u64 *)(page_vec + 1);
- params.page_shift = SHIFT_4K;
+ params.page_shift = ilog2(SZ_4K);
params.max_pages_per_fmr = size;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
@@ -215,6 +218,7 @@ err_frpl:
/**
* iser_free_fmr_pool - releases the FMR pool and page vec
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
@@ -295,7 +299,11 @@ static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
/**
* iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
- * returns 0 on success, or errno code on failure
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
+ *
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -332,6 +340,7 @@ err:
/**
* iser_free_fastreg_pool - releases the pool of fast_reg descriptors
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
@@ -355,10 +364,10 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
fr_pool->size - i);
}
-/**
+/*
* iser_create_ib_conn_res - Queue-Pair (QP)
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
@@ -436,7 +445,7 @@ out_err:
return ret;
}
-/**
+/*
* based on the resolved device node GUID see if there already allocated
* device for this device. If there's no such, create one.
*/
@@ -487,9 +496,9 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
enum iser_conn_state comp,
enum iser_conn_state exch)
@@ -561,7 +570,8 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
}
/**
- * Frees all conn objects and deallocs conn descriptor
+ * iser_conn_release - Frees all conn objects and deallocs conn descriptor
+ * @iser_conn: iSER connection context
*/
void iser_conn_release(struct iser_conn *iser_conn)
{
@@ -595,7 +605,10 @@ void iser_conn_release(struct iser_conn *iser_conn)
}
/**
- * triggers start of the disconnect procedures and wait for them to be done
+ * iser_conn_terminate - triggers start of the disconnect procedures and
+ * waits for them to be done
+ * @iser_conn: iSER connection context
+ *
* Called with state mutex held
*/
int iser_conn_terminate(struct iser_conn *iser_conn)
@@ -632,9 +645,9 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
return 1;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn;
@@ -670,7 +683,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
else
max_num_sg = attr->max_fast_reg_page_list_len;
- sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
+ sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
sup_sg_tablesize =
min_t(
@@ -684,9 +697,9 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
@@ -732,9 +745,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
}
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
@@ -1019,7 +1032,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
ib_conn->post_recv_buf_count += count;
ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
- if (ib_ret) {
+ if (unlikely(ib_ret)) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count -= count;
} else
@@ -1030,9 +1043,12 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
/**
- * iser_start_send - Initiate a Send DTO operation
+ * iser_post_send - Initiate a Send DTO operation
+ * @ib_conn: connection RDMA resources
+ * @tx_desc: iSER TX descriptor
+ * @signal: true to send work request as SIGNALED
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
@@ -1060,7 +1076,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
first_wr = wr;
ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
- if (ib_ret)
+ if (unlikely(ib_ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
ib_ret, wr->opcode);
@@ -1081,7 +1097,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ iser_err("ib_check_mr_status failed, ret %d\n", ret);
/* Not a lot we can do, return ambiguous guard error */
*sector = 0;
return 0x1;
@@ -1093,7 +1109,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
sector_div(sector_off, sector_size + 8);
*sector = scsi_get_lba(iser_task->sc) + sector_off;
- pr_err("PI error found type %d at sector %llx "
+ iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
mr_status.sig_err.err_type,
(unsigned long long)*sector,
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index 43ac61ffef4a..6dbc08e1a6a6 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -70,7 +70,7 @@
struct opa_vnic_adapter;
-/**
+/*
* struct __opa_vesw_info - OPA vnic virtual switch info
*
* Same as opa_vesw_info without bitwise attribute.
@@ -96,7 +96,7 @@ struct __opa_vesw_info {
u8 rsvd4[2];
} __packed;
-/**
+/*
* struct __opa_per_veswport_info - OPA vnic per port info
*
* Same as opa_per_veswport_info without bitwise attribute.
@@ -136,7 +136,7 @@ struct __opa_per_veswport_info {
u8 rsvd3[8];
} __packed;
-/**
+/*
* struct __opa_veswport_info - OPA vnic port info
*
* Same as opa_veswport_info without bitwise attribute.
@@ -146,7 +146,7 @@ struct __opa_veswport_info {
struct __opa_per_veswport_info vport;
};
-/**
+/*
* struct __opa_veswport_trap - OPA vnic trap info
*
* Same as opa_veswport_trap without bitwise attribute.
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b5960351bec0..b7f7a5f7bd98 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -174,9 +174,9 @@ static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
int tmo = *(int *)kp->arg;
if (tmo >= 0)
- return sprintf(buffer, "%d", tmo);
+ return sprintf(buffer, "%d\n", tmo);
else
- return sprintf(buffer, "off");
+ return sprintf(buffer, "off\n");
}
static int srp_tmo_set(const char *val, const struct kernel_param *kp)
@@ -352,11 +352,11 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
init_completion(&ch->done);
ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
- (struct sockaddr *)&target->rdma_cm.src : NULL,
- (struct sockaddr *)&target->rdma_cm.dst,
+ &target->rdma_cm.src.sa : NULL,
+ &target->rdma_cm.dst.sa,
SRP_PATH_REC_TIMEOUT_MS);
if (ret) {
- pr_err("No route available from %pIS to %pIS (%d)\n",
+ pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
&target->rdma_cm.src, &target->rdma_cm.dst, ret);
goto out;
}
@@ -366,7 +366,7 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret) {
- pr_err("Resolving address %pIS failed (%d)\n",
+ pr_err("Resolving address %pISpsc failed (%d)\n",
&target->rdma_cm.dst, ret);
goto out;
}
@@ -552,6 +552,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ const struct ib_device_attr *attr = &dev->dev->attrs;
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
@@ -583,12 +584,14 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->cap.max_send_wr = m * target->queue_size;
init_attr->cap.max_recv_wr = target->queue_size + 1;
init_attr->cap.max_recv_sge = 1;
- init_attr->cap.max_send_sge = SRP_MAX_SGE;
+ init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr->qp_type = IB_QPT_RC;
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
+ ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
+
if (target->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
qp = ch->rdma_cm.cm_id->qp;
@@ -1362,7 +1365,8 @@ static void srp_terminate_io(struct srp_rport *rport)
}
/* Calculate maximum initiator to target information unit length. */
-static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
+static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
+ uint32_t max_it_iu_size)
{
uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
sizeof(struct srp_indirect_buf) +
@@ -1372,6 +1376,11 @@ static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
srp_max_imm_data);
+ if (max_it_iu_size)
+ max_iu_len = min(max_iu_len, max_it_iu_size);
+
+ pr_debug("max_iu_len = %d\n", max_iu_len);
+
return max_iu_len;
}
@@ -1389,7 +1398,8 @@ static int srp_rport_reconnect(struct srp_rport *rport)
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- srp_use_imm_data);
+ srp_use_imm_data,
+ target->max_it_iu_size);
int i, j, ret = 0;
bool multich = false;
@@ -1838,7 +1848,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
return -EIO;
if (ch->use_imm_data &&
- count <= SRP_MAX_IMM_SGE &&
+ count <= ch->max_imm_sge &&
SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
scmnd->sc_data_direction == DMA_TO_DEVICE) {
struct srp_imm_buf *buf;
@@ -2538,7 +2548,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- ch->use_imm_data);
+ ch->use_imm_data,
+ target->max_it_iu_size);
WARN_ON_ONCE(ch->max_it_iu_len >
be32_to_cpu(lrsp->max_it_iu_len));
@@ -3411,6 +3422,7 @@ enum {
SRP_OPT_IP_SRC = 1 << 15,
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+ SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3443,6 +3455,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
+ { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
{ SRP_OPT_ERR, NULL }
};
@@ -3736,6 +3749,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->tl_retry_count = token;
break;
+ case SRP_OPT_MAX_IT_IU_SIZE:
+ if (match_int(args, &token) || token < 0) {
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p);
+ goto out;
+ }
+ target->max_it_iu_size = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3887,7 +3908,9 @@ static ssize_t srp_create_target(struct device *dev,
target->mr_per_cmd = mr_per_cmd;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
- max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data);
+ max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ srp_use_imm_data,
+ target->max_it_iu_size);
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index b2861cd2087a..5359ece561ca 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,6 +161,7 @@ struct srp_rdma_ch {
};
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
+ u8 max_imm_sge;
bool use_imm_data;
/* Everything above this point is used in the hot path of
@@ -209,6 +210,7 @@ struct srp_target_port {
u32 ch_count;
u32 lkey;
enum srp_target_state state;
+ uint32_t max_it_iu_size;
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
@@ -245,11 +247,13 @@ struct srp_target_port {
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} src;
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} dst;
bool src_specified;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e25c70a56be6..23c782e3d49a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -556,34 +556,41 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_port_attr port_attr;
int ret;
- memset(&port_modify, 0, sizeof(port_modify));
- port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- port_modify.clr_port_cap_mask = 0;
-
- ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
- if (ret)
- goto err_mod_port;
-
ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
if (ret)
- goto err_query_port;
+ return ret;
sport->sm_lid = port_attr.sm_lid;
sport->lid = port_attr.lid;
ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
if (ret)
- goto err_query_port;
+ return ret;
- sport->port_guid_wwn.priv = sport;
- srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ sport->port_guid_id.wwn.priv = sport;
+ srpt_format_guid(sport->port_guid_id.name,
+ sizeof(sport->port_guid_id.name),
&sport->gid.global.interface_id);
- sport->port_gid_wwn.priv = sport;
- snprintf(sport->port_gid, sizeof(sport->port_gid),
+ sport->port_gid_id.wwn.priv = sport;
+ snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
+ if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
+ return 0;
+
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret) {
+ pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port, ret);
+ return 0;
+ }
+
if (!sport->mad_agent) {
memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -599,23 +606,14 @@ static int srpt_refresh_port(struct srpt_port *sport)
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(sport->mad_agent)) {
- ret = PTR_ERR(sport->mad_agent);
+ pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
- goto err_query_port;
}
}
return 0;
-
-err_query_port:
-
- port_modify.set_port_cap_mask = 0;
- port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
-
-err_mod_port:
-
- return ret;
}
/**
@@ -1364,9 +1362,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx, u64 tag,
int status)
{
+ struct se_cmd *cmd = &ioctx->cmd;
struct srp_rsp *srp_rsp;
const u8 *sense_data;
int sense_data_len, max_sense_len;
+ u32 resid = cmd->residual_count;
/*
* The lowest bit of all SAM-3 status codes is zero (see also
@@ -1388,6 +1388,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
srp_rsp->tag = tag;
srp_rsp->status = status;
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ }
+
if (sense_data_len) {
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
@@ -1931,41 +1953,22 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
-{
- struct srpt_nexus *nexus;
- struct srpt_rdma_ch *ch2;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry) {
- list_for_each_entry(ch2, &nexus->ch_list, list) {
- if (ch2 == ch) {
- res = false;
- goto done;
- }
- }
- }
-done:
- rcu_read_unlock();
-
- return res;
-}
-
/* Send DREQ and wait for DREP. */
static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
{
+ DECLARE_COMPLETION_ONSTACK(closed);
struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
+ ch->closed = &closed;
+
mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
- 5 * HZ) == 0)
+ while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
@@ -2045,10 +2048,17 @@ static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
__srpt_close_all_ch(sport);
}
+static void srpt_drop_sport_ref(struct srpt_port *sport)
+{
+ if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
+ complete(sport->freed_channels);
+}
+
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+ srpt_drop_sport_ref(ch->sport);
kfree_rcu(ch, rcu);
}
@@ -2092,6 +2102,9 @@ static void srpt_release_channel_work(struct work_struct *w)
list_del_rcu(&ch->list);
mutex_unlock(&sport->mutex);
+ if (ch->closed)
+ complete(ch->closed);
+
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2106,8 +2119,6 @@ static void srpt_release_channel_work(struct work_struct *w)
kmem_cache_destroy(ch->req_buf_cache);
- wake_up(&sport->ch_releaseQ);
-
kref_put(&ch->kref, srpt_free_ch);
}
@@ -2144,6 +2155,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
char i_port_id[36];
u32 it_iu_len;
int i, tag_num, tag_size, ret;
+ struct srpt_tpg *stpg;
WARN_ON_ONCE(irqs_disabled());
@@ -2296,23 +2308,38 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
be64_to_cpu(*(__be64 *)nexus->i_port_id),
be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
- pr_debug("registering session %s\n", ch->sess_name);
+ pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
+ i_port_id);
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_setup_session(&sport->port_guid_tpg, tag_num,
+
+ mutex_lock(&sport->port_guid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ }
+ mutex_unlock(&sport->port_guid_id.mutex);
+
+ mutex_lock(&sport->port_gid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- /* Retry without leading "0x" */
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->port_gid_id.mutex);
+
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
ret = PTR_ERR(ch->sess);
@@ -2325,6 +2352,12 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
goto destroy_ib;
}
+ /*
+ * Once a session has been created destruction of srpt_rdma_ch objects
+ * will decrement sport->refcount. Hence increment sport->refcount now.
+ */
+ atomic_inc(&sport->refcount);
+
mutex_lock(&sport->mutex);
if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
@@ -2505,6 +2538,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
struct srpt_device *sdev;
struct srp_login_req req;
const struct srp_login_req_rdma *req_rdma;
+ struct sa_path_rec *path_rec = cm_id->route.path_rec;
char src_addr[40];
sdev = ib_get_client_data(cm_id->device, &srpt_client);
@@ -2530,7 +2564,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
&cm_id->route.addr.src_addr);
return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
- cm_id->route.path_rec->pkey, &req, src_addr);
+ path_rec ? path_rec->pkey : 0, &req, src_addr);
}
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
@@ -2906,39 +2940,29 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
-static bool srpt_ch_list_empty(struct srpt_port *sport)
-{
- struct srpt_nexus *nexus;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry)
- if (!list_empty(&nexus->ch_list))
- res = false;
- rcu_read_unlock();
-
- return res;
-}
-
/**
* srpt_release_sport - disable login and wait for associated channels
* @sport: SRPT HCA port.
*/
static int srpt_release_sport(struct srpt_port *sport)
{
+ DECLARE_COMPLETION_ONSTACK(c);
struct srpt_nexus *nexus, *next_n;
struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
+ sport->freed_channels = &c;
+
mutex_lock(&sport->mutex);
srpt_set_enabled(sport, false);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ,
- srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
- pr_info("%s_%d: waiting for session unregistration ...\n",
- dev_name(&sport->sdev->device->dev), sport->port);
+ while (atomic_read(&sport->refcount) > 0 &&
+ wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ atomic_read(&sport->refcount));
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
@@ -2975,10 +2999,10 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name)
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid, name) == 0)
- return &sport->port_guid_wwn;
- if (strcmp(sport->port_gid, name) == 0)
- return &sport->port_gid_wwn;
+ if (strcmp(sport->port_guid_id.name, name) == 0)
+ return &sport->port_guid_id.wwn;
+ if (strcmp(sport->port_gid_id.name, name) == 0)
+ return &sport->port_gid_id.wwn;
}
}
@@ -3147,7 +3171,6 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
INIT_LIST_HEAD(&sport->nexus_list);
- init_waitqueue_head(&sport->ch_releaseQ);
mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
@@ -3156,6 +3179,10 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
+ mutex_init(&sport->port_guid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
+ mutex_init(&sport->port_gid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
@@ -3258,14 +3285,23 @@ static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
return tpg->se_tpg_wwn->priv;
}
+static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = wwn->priv;
+
+ if (wwn == &sport->port_guid_id.wwn)
+ return &sport->port_guid_id;
+ if (wwn == &sport->port_gid_id.wwn)
+ return &sport->port_gid_id;
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
{
- struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
- WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
- tpg != &sport->port_gid_tpg);
- return tpg == &sport->port_guid_tpg ? sport->port_guid :
- sport->port_gid;
+ return stpg->sport_id->name;
}
static u16 srpt_get_tag(struct se_portal_group *tpg)
@@ -3721,19 +3757,25 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
const char *name)
{
- struct srpt_port *sport = wwn->priv;
- struct se_portal_group *tpg;
- int res;
-
- WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
- wwn != &sport->port_gid_wwn);
- tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
- &sport->port_gid_tpg;
- res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
- if (res)
+ struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
+ struct srpt_tpg *stpg;
+ int res = -ENOMEM;
+
+ stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
+ if (!stpg)
return ERR_PTR(res);
+ stpg->sport_id = sport_id;
+ res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
+ if (res) {
+ kfree(stpg);
+ return ERR_PTR(res);
+ }
- return tpg;
+ mutex_lock(&sport_id->mutex);
+ list_add_tail(&stpg->entry, &sport_id->tpg_list);
+ mutex_unlock(&sport_id->mutex);
+
+ return &stpg->tpg;
}
/**
@@ -3742,10 +3784,17 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
+ struct srpt_port_id *sport_id = stpg->sport_id;
struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ mutex_lock(&sport_id->mutex);
+ list_del(&stpg->entry);
+ mutex_unlock(&sport_id->mutex);
+
sport->enabled = false;
core_tpg_deregister(tpg);
+ kfree(stpg);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ee9f20e9177a..2e1a69840857 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -264,6 +264,8 @@ enum rdma_ch_state {
* @zw_cqe: Zero-length write CQE.
* @rcu: RCU head.
* @kref: kref for this channel.
+ * @closed: Completion object that will be signaled as soon as a new
+ * channel object with the same identity can be created.
* @rq_size: IB receive queue size.
* @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
@@ -306,6 +308,7 @@ struct srpt_rdma_ch {
struct ib_cqe zw_cqe;
struct rcu_head rcu;
struct kref kref;
+ struct completion *closed;
int rq_size;
u32 max_rsp_size;
atomic_t sq_wr_avail;
@@ -361,24 +364,52 @@ struct srpt_port_attrib {
};
/**
+ * struct srpt_tpg - information about a single "target portal group"
+ * @entry: Entry in @sport_id->tpg_list.
+ * @sport_id: Port name this TPG is associated with.
+ * @tpg: LIO TPG data structure.
+ *
+ * Zero or more target portal groups are associated with each port name
+ * (srpt_port_id). With each TPG an ACL list is associated.
+ */
+struct srpt_tpg {
+ struct list_head entry;
+ struct srpt_port_id *sport_id;
+ struct se_portal_group tpg;
+};
+
+/**
+ * struct srpt_port_id - information about an RDMA port name
+ * @mutex: Protects @tpg_list changes.
+ * @tpg_list: TPGs associated with the RDMA port name.
+ * @wwn: WWN associated with the RDMA port name.
+ * @name: ASCII representation of the port name.
+ *
+ * Multiple sysfs directories can be associated with a single RDMA port. This
+ * data structure represents a single (port, name) pair.
+ */
+struct srpt_port_id {
+ struct mutex mutex;
+ struct list_head tpg_list;
+ struct se_wwn wwn;
+ char name[64];
+};
+
+/**
* struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
- * @port_guid: ASCII representation of Port GUID
- * @port_gid: ASCII representation of Port GID
* @port: one-based port number.
* @sm_lid: cached value of the port's sm_lid.
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
- * @port_acl_lock spinlock for port_acl_list:
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_tpg: TPG associated with target port GUID.
- * @port_guid_wwn: WWN associated with target port GUID.
- * @port_gid_tpg: TPG associated with target port GID.
- * @port_gid_wwn: WWN associated with target port GID.
+ * @port_guid_id: target port GUID
+ * @port_gid_id: target port GID
* @port_attrib: Port attributes that can be accessed through configfs.
- * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @refcount: Number of objects associated with this port.
+ * @freed_channels: Completion that will be signaled once @refcount becomes 0.
* @mutex: Protects nexus_list.
* @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
@@ -386,19 +417,16 @@ struct srpt_port {
struct srpt_device *sdev;
struct ib_mad_agent *mad_agent;
bool enabled;
- u8 port_guid[24];
- u8 port_gid[64];
u8 port;
u32 sm_lid;
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct se_portal_group port_guid_tpg;
- struct se_wwn port_guid_wwn;
- struct se_portal_group port_gid_tpg;
- struct se_wwn port_gid_wwn;
+ struct srpt_port_id port_guid_id;
+ struct srpt_port_id port_gid_id;
struct srpt_port_attrib port_attrib;
- wait_queue_head_t ch_releaseQ;
+ atomic_t refcount;
+ struct completion *freed_channels;
struct mutex mutex;
struct list_head nexus_list;
};
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 1cb40c7475af..8229a9006917 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_device *ff)
{
struct ml_device *ml = ff->private;
+ /*
+ * Even though we stop all playing effects when tearing down
+ * an input device (via input_device_flush() that calls into
+ * input_ff_flush() that stops and erases all effects), we
+ * do not actually stop the timer, and therefore we should
+ * do it here.
+ */
+ del_timer_sync(&ml->timer);
+
kfree(ml->private);
}
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f28a7158b2ef..bbf9ae9f3f0c 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -510,7 +510,6 @@ struct f11_data {
struct rmi_2d_sensor_platform_data sensor_pdata;
unsigned long *abs_mask;
unsigned long *rel_mask;
- unsigned long *result_bits;
};
enum f11_finger_state {
@@ -1057,7 +1056,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
/*
** init instance data, fill in values and create any sysfs files
*/
- f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
+ f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 2,
GFP_KERNEL);
if (!f11)
return -ENOMEM;
@@ -1076,8 +1075,6 @@ static int rmi_f11_initialize(struct rmi_function *fn)
+ sizeof(struct f11_data));
f11->rel_mask = (unsigned long *)((char *)f11
+ sizeof(struct f11_data) + mask_size);
- f11->result_bits = (unsigned long *)((char *)f11
- + sizeof(struct f11_data) + mask_size * 2);
set_bit(fn->irq_pos, f11->abs_mask);
set_bit(fn->irq_pos + 1, f11->rel_mask);
@@ -1284,8 +1281,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx)
valid_bytes = f11->sensor.attn_size;
memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
valid_bytes);
- drvdata->attn_data.data += f11->sensor.attn_size;
- drvdata->attn_data.size -= f11->sensor.attn_size;
+ drvdata->attn_data.data += valid_bytes;
+ drvdata->attn_data.size -= valid_bytes;
} else {
error = rmi_read_block(rmi_dev,
data_base_addr, f11->sensor.data_pkt,
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index d20a5d6780d1..7e97944f7616 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -55,6 +55,9 @@ struct f12_data {
const struct rmi_register_desc_item *data15;
u16 data15_offset;
+
+ unsigned long *abs_mask;
+ unsigned long *rel_mask;
};
static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
@@ -209,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx)
valid_bytes = sensor->attn_size;
memcpy(sensor->data_pkt, drvdata->attn_data.data,
valid_bytes);
- drvdata->attn_data.data += sensor->attn_size;
- drvdata->attn_data.size -= sensor->attn_size;
+ drvdata->attn_data.data += valid_bytes;
+ drvdata->attn_data.size -= valid_bytes;
} else {
retval = rmi_read_block(rmi_dev, f12->data_addr,
sensor->data_pkt, sensor->pkt_size);
@@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn)
static int rmi_f12_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ struct rmi_2d_sensor *sensor;
int ret;
- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ sensor = &f12->sensor;
+
+ if (!sensor->report_abs)
+ drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
+ else
+ drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
+
+ drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
ret = rmi_f12_write_control_regs(fn);
if (ret)
@@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_function *fn)
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
u16 data_offset = 0;
+ int mask_size;
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
+ mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
+
ret = rmi_read(fn->rmi_dev, query_addr, &buf);
if (ret < 0) {
dev_err(&fn->dev, "Failed to read general info register: %d\n",
@@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_function *fn)
return -ENODEV;
}
- f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
+ f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
+ GFP_KERNEL);
if (!f12)
return -ENOMEM;
+ f12->abs_mask = (unsigned long *)((char *)f12
+ + sizeof(struct f12_data));
+ f12->rel_mask = (unsigned long *)((char *)f12
+ + sizeof(struct f12_data) + mask_size);
+
+ set_bit(fn->irq_pos, f12->abs_mask);
+ set_bit(fn->irq_pos + 1, f12->rel_mask);
+
f12->has_dribble = !!(buf & BIT(3));
if (fn->dev.of_node) {
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 710b02595486..897105b9a98b 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
static const struct vb2_queue rmi_f54_queue = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
- .buf_struct_size = sizeof(struct vb2_buffer),
+ .buf_struct_size = sizeof(struct vb2_v4l2_buffer),
.ops = &rmi_f54_queue_ops,
.mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
@@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
return 0;
}
@@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
video_unregister_device(&f54->vdev);
v4l2_device_unregister(&f54->v4l2);
+ destroy_workqueue(f54->workqueue);
}
struct rmi_function_handler rmi_f54_handler = {
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 4b22d49a0f49..6bcffc930384 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1990,11 +1990,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
/* get sysinfo */
md->si = &cd->sysinfo;
- if (!md->si) {
- dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
- __func__, md->si);
- goto error_get_sysinfo;
- }
rc = cyttsp4_setup_input_device(cd);
if (rc)
@@ -2004,8 +1999,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
error_init_input:
input_free_device(md->input);
-error_get_sysinfo:
- input_set_drvdata(md->input, NULL);
error_alloc_failed:
dev_err(dev, "%s failed.\n", __func__);
return rc;
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 6ab4012a059a..c49afbea3458 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_MSM8974
+ tristate "Qualcomm MSM8974 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8974-based
+ platforms.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 67dafb783dec..9adf9e380545 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-msm8974-objs := msm8974.o
qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
new file mode 100644
index 000000000000..ce599a0c83d9
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ *
+ * Based on MSM bus code from downstream MSM kernel sources.
+ * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
+ *
+ * Based on qcs404.c
+ * Copyright (C) 2019 Linaro Ltd
+ *
+ * Here's a rough representation that shows the various buses that form the
+ * Network On Chip (NOC) for the msm8974:
+ *
+ * Multimedia Subsystem (MMSS)
+ * |----------+-----------------------------------+-----------|
+ * | |
+ * | |
+ * Config | Bus Interface | Memory Controller
+ * |------------+-+-----------| |------------+-+-----------|
+ * | |
+ * | |
+ * | System |
+ * |--------------+-+---------------------------------+-+-------------|
+ * | |
+ * | |
+ * Peripheral | On Chip | Memory (OCMEM)
+ * |------------+-------------| |------------+-------------|
+ */
+
+#include <dt-bindings/interconnect/qcom,msm8974.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+enum {
+ MSM8974_BIMC_MAS_AMPSS_M0 = 1,
+ MSM8974_BIMC_MAS_AMPSS_M1,
+ MSM8974_BIMC_MAS_MSS_PROC,
+ MSM8974_BIMC_TO_MNOC,
+ MSM8974_BIMC_TO_SNOC,
+ MSM8974_BIMC_SLV_EBI_CH0,
+ MSM8974_BIMC_SLV_AMPSS_L2,
+ MSM8974_CNOC_MAS_RPM_INST,
+ MSM8974_CNOC_MAS_RPM_DATA,
+ MSM8974_CNOC_MAS_RPM_SYS,
+ MSM8974_CNOC_MAS_DEHR,
+ MSM8974_CNOC_MAS_QDSS_DAP,
+ MSM8974_CNOC_MAS_SPDM,
+ MSM8974_CNOC_MAS_TIC,
+ MSM8974_CNOC_SLV_CLK_CTL,
+ MSM8974_CNOC_SLV_CNOC_MSS,
+ MSM8974_CNOC_SLV_SECURITY,
+ MSM8974_CNOC_SLV_TCSR,
+ MSM8974_CNOC_SLV_TLMM,
+ MSM8974_CNOC_SLV_CRYPTO_0_CFG,
+ MSM8974_CNOC_SLV_CRYPTO_1_CFG,
+ MSM8974_CNOC_SLV_IMEM_CFG,
+ MSM8974_CNOC_SLV_MESSAGE_RAM,
+ MSM8974_CNOC_SLV_BIMC_CFG,
+ MSM8974_CNOC_SLV_BOOT_ROM,
+ MSM8974_CNOC_SLV_PMIC_ARB,
+ MSM8974_CNOC_SLV_SPDM_WRAPPER,
+ MSM8974_CNOC_SLV_DEHR_CFG,
+ MSM8974_CNOC_SLV_MPM,
+ MSM8974_CNOC_SLV_QDSS_CFG,
+ MSM8974_CNOC_SLV_RBCPR_CFG,
+ MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG,
+ MSM8974_CNOC_TO_SNOC,
+ MSM8974_CNOC_SLV_CNOC_ONOC_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_CFG,
+ MSM8974_CNOC_SLV_PNOC_CFG,
+ MSM8974_CNOC_SLV_SNOC_MPU_CFG,
+ MSM8974_CNOC_SLV_SNOC_CFG,
+ MSM8974_CNOC_SLV_EBI1_DLL_CFG,
+ MSM8974_CNOC_SLV_PHY_APU_CFG,
+ MSM8974_CNOC_SLV_EBI1_PHY_CFG,
+ MSM8974_CNOC_SLV_RPM,
+ MSM8974_CNOC_SLV_SERVICE_CNOC,
+ MSM8974_MNOC_MAS_GRAPHICS_3D,
+ MSM8974_MNOC_MAS_JPEG,
+ MSM8974_MNOC_MAS_MDP_PORT0,
+ MSM8974_MNOC_MAS_VIDEO_P0,
+ MSM8974_MNOC_MAS_VIDEO_P1,
+ MSM8974_MNOC_MAS_VFE,
+ MSM8974_MNOC_TO_CNOC,
+ MSM8974_MNOC_TO_BIMC,
+ MSM8974_MNOC_SLV_CAMERA_CFG,
+ MSM8974_MNOC_SLV_DISPLAY_CFG,
+ MSM8974_MNOC_SLV_OCMEM_CFG,
+ MSM8974_MNOC_SLV_CPR_CFG,
+ MSM8974_MNOC_SLV_CPR_XPU_CFG,
+ MSM8974_MNOC_SLV_MISC_CFG,
+ MSM8974_MNOC_SLV_MISC_XPU_CFG,
+ MSM8974_MNOC_SLV_VENUS_CFG,
+ MSM8974_MNOC_SLV_GRAPHICS_3D_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG,
+ MSM8974_MNOC_SLV_MNOC_MPU_CFG,
+ MSM8974_MNOC_SLV_ONOC_MPU_CFG,
+ MSM8974_MNOC_SLV_SERVICE_MNOC,
+ MSM8974_OCMEM_NOC_TO_OCMEM_VNOC,
+ MSM8974_OCMEM_MAS_JPEG_OCMEM,
+ MSM8974_OCMEM_MAS_MDP_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM,
+ MSM8974_OCMEM_MAS_VFE_OCMEM,
+ MSM8974_OCMEM_MAS_CNOC_ONOC_CFG,
+ MSM8974_OCMEM_SLV_SERVICE_ONOC,
+ MSM8974_OCMEM_VNOC_TO_SNOC,
+ MSM8974_OCMEM_VNOC_TO_OCMEM_NOC,
+ MSM8974_OCMEM_VNOC_MAS_GFX3D,
+ MSM8974_OCMEM_SLV_OCMEM,
+ MSM8974_PNOC_MAS_PNOC_CFG,
+ MSM8974_PNOC_MAS_SDCC_1,
+ MSM8974_PNOC_MAS_SDCC_3,
+ MSM8974_PNOC_MAS_SDCC_4,
+ MSM8974_PNOC_MAS_SDCC_2,
+ MSM8974_PNOC_MAS_TSIF,
+ MSM8974_PNOC_MAS_BAM_DMA,
+ MSM8974_PNOC_MAS_BLSP_2,
+ MSM8974_PNOC_MAS_USB_HSIC,
+ MSM8974_PNOC_MAS_BLSP_1,
+ MSM8974_PNOC_MAS_USB_HS,
+ MSM8974_PNOC_TO_SNOC,
+ MSM8974_PNOC_SLV_SDCC_1,
+ MSM8974_PNOC_SLV_SDCC_3,
+ MSM8974_PNOC_SLV_SDCC_2,
+ MSM8974_PNOC_SLV_SDCC_4,
+ MSM8974_PNOC_SLV_TSIF,
+ MSM8974_PNOC_SLV_BAM_DMA,
+ MSM8974_PNOC_SLV_BLSP_2,
+ MSM8974_PNOC_SLV_USB_HSIC,
+ MSM8974_PNOC_SLV_BLSP_1,
+ MSM8974_PNOC_SLV_USB_HS,
+ MSM8974_PNOC_SLV_PDM,
+ MSM8974_PNOC_SLV_PERIPH_APU_CFG,
+ MSM8974_PNOC_SLV_PNOC_MPU_CFG,
+ MSM8974_PNOC_SLV_PRNG,
+ MSM8974_PNOC_SLV_SERVICE_PNOC,
+ MSM8974_SNOC_MAS_LPASS_AHB,
+ MSM8974_SNOC_MAS_QDSS_BAM,
+ MSM8974_SNOC_MAS_SNOC_CFG,
+ MSM8974_SNOC_TO_BIMC,
+ MSM8974_SNOC_TO_CNOC,
+ MSM8974_SNOC_TO_PNOC,
+ MSM8974_SNOC_TO_OCMEM_VNOC,
+ MSM8974_SNOC_MAS_CRYPTO_CORE0,
+ MSM8974_SNOC_MAS_CRYPTO_CORE1,
+ MSM8974_SNOC_MAS_LPASS_PROC,
+ MSM8974_SNOC_MAS_MSS,
+ MSM8974_SNOC_MAS_MSS_NAV,
+ MSM8974_SNOC_MAS_OCMEM_DMA,
+ MSM8974_SNOC_MAS_WCSS,
+ MSM8974_SNOC_MAS_QDSS_ETR,
+ MSM8974_SNOC_MAS_USB3,
+ MSM8974_SNOC_SLV_AMPSS,
+ MSM8974_SNOC_SLV_LPASS,
+ MSM8974_SNOC_SLV_USB3,
+ MSM8974_SNOC_SLV_WCSS,
+ MSM8974_SNOC_SLV_OCIMEM,
+ MSM8974_SNOC_SLV_SNOC_OCMEM,
+ MSM8974_SNOC_SLV_SERVICE_SNOC,
+ MSM8974_SNOC_SLV_QDSS_STM,
+};
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+#define to_msm8974_icc_provider(_provider) \
+ container_of(_provider, struct msm8974_icc_provider, provider)
+
+static const struct clk_bulk_data msm8974_icc_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct msm8974_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct msm8974_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define MSM8974_ICC_MAX_LINKS 3
+
+/**
+ * struct msm8974_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM ID for devices that are bus masters
+ * @slv_rpm_id: RPM ID for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct msm8974_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[MSM8974_ICC_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct msm8974_icc_desc {
+ struct msm8974_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct msm8974_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(mas_ampss_m0, MSM8974_BIMC_MAS_AMPSS_M0, 8, 0, -1);
+DEFINE_QNODE(mas_ampss_m1, MSM8974_BIMC_MAS_AMPSS_M1, 8, 0, -1);
+DEFINE_QNODE(mas_mss_proc, MSM8974_BIMC_MAS_MSS_PROC, 8, 1, -1);
+DEFINE_QNODE(bimc_to_mnoc, MSM8974_BIMC_TO_MNOC, 8, 2, -1, MSM8974_BIMC_SLV_EBI_CH0);
+DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC, MSM8974_BIMC_SLV_EBI_CH0, MSM8974_BIMC_MAS_AMPSS_M0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
+DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
+
+static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
+ [BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
+ [BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
+ [BIMC_MAS_MSS_PROC] = &mas_mss_proc,
+ [BIMC_TO_MNOC] = &bimc_to_mnoc,
+ [BIMC_TO_SNOC] = &bimc_to_snoc,
+ [BIMC_SLV_EBI_CH0] = &slv_ebi_ch0,
+ [BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
+};
+
+static struct msm8974_icc_desc msm8974_bimc = {
+ .nodes = msm8974_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
+};
+
+DEFINE_QNODE(mas_rpm_inst, MSM8974_CNOC_MAS_RPM_INST, 8, 45, -1);
+DEFINE_QNODE(mas_rpm_data, MSM8974_CNOC_MAS_RPM_DATA, 8, 46, -1);
+DEFINE_QNODE(mas_rpm_sys, MSM8974_CNOC_MAS_RPM_SYS, 8, 47, -1);
+DEFINE_QNODE(mas_dehr, MSM8974_CNOC_MAS_DEHR, 8, 48, -1);
+DEFINE_QNODE(mas_qdss_dap, MSM8974_CNOC_MAS_QDSS_DAP, 8, 49, -1);
+DEFINE_QNODE(mas_spdm, MSM8974_CNOC_MAS_SPDM, 8, 50, -1);
+DEFINE_QNODE(mas_tic, MSM8974_CNOC_MAS_TIC, 8, 51, -1);
+DEFINE_QNODE(slv_clk_ctl, MSM8974_CNOC_SLV_CLK_CTL, 8, -1, 47);
+DEFINE_QNODE(slv_cnoc_mss, MSM8974_CNOC_SLV_CNOC_MSS, 8, -1, 48);
+DEFINE_QNODE(slv_security, MSM8974_CNOC_SLV_SECURITY, 8, -1, 49);
+DEFINE_QNODE(slv_tcsr, MSM8974_CNOC_SLV_TCSR, 8, -1, 50);
+DEFINE_QNODE(slv_tlmm, MSM8974_CNOC_SLV_TLMM, 8, -1, 51);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8974_CNOC_SLV_CRYPTO_0_CFG, 8, -1, 52);
+DEFINE_QNODE(slv_crypto_1_cfg, MSM8974_CNOC_SLV_CRYPTO_1_CFG, 8, -1, 53);
+DEFINE_QNODE(slv_imem_cfg, MSM8974_CNOC_SLV_IMEM_CFG, 8, -1, 54);
+DEFINE_QNODE(slv_message_ram, MSM8974_CNOC_SLV_MESSAGE_RAM, 8, -1, 55);
+DEFINE_QNODE(slv_bimc_cfg, MSM8974_CNOC_SLV_BIMC_CFG, 8, -1, 56);
+DEFINE_QNODE(slv_boot_rom, MSM8974_CNOC_SLV_BOOT_ROM, 8, -1, 57);
+DEFINE_QNODE(slv_pmic_arb, MSM8974_CNOC_SLV_PMIC_ARB, 8, -1, 59);
+DEFINE_QNODE(slv_spdm_wrapper, MSM8974_CNOC_SLV_SPDM_WRAPPER, 8, -1, 60);
+DEFINE_QNODE(slv_dehr_cfg, MSM8974_CNOC_SLV_DEHR_CFG, 8, -1, 61);
+DEFINE_QNODE(slv_mpm, MSM8974_CNOC_SLV_MPM, 8, -1, 62);
+DEFINE_QNODE(slv_qdss_cfg, MSM8974_CNOC_SLV_QDSS_CFG, 8, -1, 63);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8974_CNOC_SLV_RBCPR_CFG, 8, -1, 64);
+DEFINE_QNODE(slv_rbcpr_qdss_apu_cfg, MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG, 8, -1, 65);
+DEFINE_QNODE(cnoc_to_snoc, MSM8974_CNOC_TO_SNOC, 8, 52, 75);
+DEFINE_QNODE(slv_cnoc_onoc_cfg, MSM8974_CNOC_SLV_CNOC_ONOC_CFG, 8, -1, 68);
+DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG, 8, -1, 58);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_CFG, 8, -1, 66);
+DEFINE_QNODE(slv_pnoc_cfg, MSM8974_CNOC_SLV_PNOC_CFG, 8, -1, 69);
+DEFINE_QNODE(slv_snoc_mpu_cfg, MSM8974_CNOC_SLV_SNOC_MPU_CFG, 8, -1, 67);
+DEFINE_QNODE(slv_snoc_cfg, MSM8974_CNOC_SLV_SNOC_CFG, 8, -1, 70);
+DEFINE_QNODE(slv_ebi1_dll_cfg, MSM8974_CNOC_SLV_EBI1_DLL_CFG, 8, -1, 71);
+DEFINE_QNODE(slv_phy_apu_cfg, MSM8974_CNOC_SLV_PHY_APU_CFG, 8, -1, 72);
+DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
+DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
+DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
+
+static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
+ [CNOC_MAS_RPM_INST] = &mas_rpm_inst,
+ [CNOC_MAS_RPM_DATA] = &mas_rpm_data,
+ [CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
+ [CNOC_MAS_DEHR] = &mas_dehr,
+ [CNOC_MAS_QDSS_DAP] = &mas_qdss_dap,
+ [CNOC_MAS_SPDM] = &mas_spdm,
+ [CNOC_MAS_TIC] = &mas_tic,
+ [CNOC_SLV_CLK_CTL] = &slv_clk_ctl,
+ [CNOC_SLV_CNOC_MSS] = &slv_cnoc_mss,
+ [CNOC_SLV_SECURITY] = &slv_security,
+ [CNOC_SLV_TCSR] = &slv_tcsr,
+ [CNOC_SLV_TLMM] = &slv_tlmm,
+ [CNOC_SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [CNOC_SLV_CRYPTO_1_CFG] = &slv_crypto_1_cfg,
+ [CNOC_SLV_IMEM_CFG] = &slv_imem_cfg,
+ [CNOC_SLV_MESSAGE_RAM] = &slv_message_ram,
+ [CNOC_SLV_BIMC_CFG] = &slv_bimc_cfg,
+ [CNOC_SLV_BOOT_ROM] = &slv_boot_rom,
+ [CNOC_SLV_PMIC_ARB] = &slv_pmic_arb,
+ [CNOC_SLV_SPDM_WRAPPER] = &slv_spdm_wrapper,
+ [CNOC_SLV_DEHR_CFG] = &slv_dehr_cfg,
+ [CNOC_SLV_MPM] = &slv_mpm,
+ [CNOC_SLV_QDSS_CFG] = &slv_qdss_cfg,
+ [CNOC_SLV_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [CNOC_SLV_RBCPR_QDSS_APU_CFG] = &slv_rbcpr_qdss_apu_cfg,
+ [CNOC_TO_SNOC] = &cnoc_to_snoc,
+ [CNOC_SLV_CNOC_ONOC_CFG] = &slv_cnoc_onoc_cfg,
+ [CNOC_SLV_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
+ [CNOC_SLV_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+ [CNOC_SLV_PNOC_CFG] = &slv_pnoc_cfg,
+ [CNOC_SLV_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
+ [CNOC_SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [CNOC_SLV_EBI1_DLL_CFG] = &slv_ebi1_dll_cfg,
+ [CNOC_SLV_PHY_APU_CFG] = &slv_phy_apu_cfg,
+ [CNOC_SLV_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
+ [CNOC_SLV_RPM] = &slv_rpm,
+ [CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
+};
+
+static struct msm8974_icc_desc msm8974_cnoc = {
+ .nodes = msm8974_cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
+};
+
+DEFINE_QNODE(mas_graphics_3d, MSM8974_MNOC_MAS_GRAPHICS_3D, 16, 6, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_jpeg, MSM8974_MNOC_MAS_JPEG, 16, 7, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_mdp_port0, MSM8974_MNOC_MAS_MDP_PORT0, 16, 8, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_video_p0, MSM8974_MNOC_MAS_VIDEO_P0, 16, 9, -1);
+DEFINE_QNODE(mas_video_p1, MSM8974_MNOC_MAS_VIDEO_P1, 16, 10, -1);
+DEFINE_QNODE(mas_vfe, MSM8974_MNOC_MAS_VFE, 16, 11, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mnoc_to_cnoc, MSM8974_MNOC_TO_CNOC, 16, 4, -1);
+DEFINE_QNODE(mnoc_to_bimc, MSM8974_MNOC_TO_BIMC, 16, -1, 16, MSM8974_BIMC_TO_MNOC);
+DEFINE_QNODE(slv_camera_cfg, MSM8974_MNOC_SLV_CAMERA_CFG, 16, -1, 3);
+DEFINE_QNODE(slv_display_cfg, MSM8974_MNOC_SLV_DISPLAY_CFG, 16, -1, 4);
+DEFINE_QNODE(slv_ocmem_cfg, MSM8974_MNOC_SLV_OCMEM_CFG, 16, -1, 5);
+DEFINE_QNODE(slv_cpr_cfg, MSM8974_MNOC_SLV_CPR_CFG, 16, -1, 6);
+DEFINE_QNODE(slv_cpr_xpu_cfg, MSM8974_MNOC_SLV_CPR_XPU_CFG, 16, -1, 7);
+DEFINE_QNODE(slv_misc_cfg, MSM8974_MNOC_SLV_MISC_CFG, 16, -1, 8);
+DEFINE_QNODE(slv_misc_xpu_cfg, MSM8974_MNOC_SLV_MISC_XPU_CFG, 16, -1, 9);
+DEFINE_QNODE(slv_venus_cfg, MSM8974_MNOC_SLV_VENUS_CFG, 16, -1, 10);
+DEFINE_QNODE(slv_graphics_3d_cfg, MSM8974_MNOC_SLV_GRAPHICS_3D_CFG, 16, -1, 11);
+DEFINE_QNODE(slv_mmss_clk_cfg, MSM8974_MNOC_SLV_MMSS_CLK_CFG, 16, -1, 12);
+DEFINE_QNODE(slv_mmss_clk_xpu_cfg, MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG, 16, -1, 13);
+DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
+DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
+DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
+
+static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
+ [MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
+ [MNOC_MAS_JPEG] = &mas_jpeg,
+ [MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
+ [MNOC_MAS_VIDEO_P0] = &mas_video_p0,
+ [MNOC_MAS_VIDEO_P1] = &mas_video_p1,
+ [MNOC_MAS_VFE] = &mas_vfe,
+ [MNOC_TO_CNOC] = &mnoc_to_cnoc,
+ [MNOC_TO_BIMC] = &mnoc_to_bimc,
+ [MNOC_SLV_CAMERA_CFG] = &slv_camera_cfg,
+ [MNOC_SLV_DISPLAY_CFG] = &slv_display_cfg,
+ [MNOC_SLV_OCMEM_CFG] = &slv_ocmem_cfg,
+ [MNOC_SLV_CPR_CFG] = &slv_cpr_cfg,
+ [MNOC_SLV_CPR_XPU_CFG] = &slv_cpr_xpu_cfg,
+ [MNOC_SLV_MISC_CFG] = &slv_misc_cfg,
+ [MNOC_SLV_MISC_XPU_CFG] = &slv_misc_xpu_cfg,
+ [MNOC_SLV_VENUS_CFG] = &slv_venus_cfg,
+ [MNOC_SLV_GRAPHICS_3D_CFG] = &slv_graphics_3d_cfg,
+ [MNOC_SLV_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
+ [MNOC_SLV_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
+ [MNOC_SLV_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+ [MNOC_SLV_ONOC_MPU_CFG] = &slv_onoc_mpu_cfg,
+ [MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
+};
+
+static struct msm8974_icc_desc msm8974_mnoc = {
+ .nodes = msm8974_mnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
+};
+
+DEFINE_QNODE(ocmem_noc_to_ocmem_vnoc, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC, 16, 54, 78, MSM8974_OCMEM_SLV_OCMEM);
+DEFINE_QNODE(mas_jpeg_ocmem, MSM8974_OCMEM_MAS_JPEG_OCMEM, 16, 13, -1);
+DEFINE_QNODE(mas_mdp_ocmem, MSM8974_OCMEM_MAS_MDP_OCMEM, 16, 14, -1);
+DEFINE_QNODE(mas_video_p0_ocmem, MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM, 16, 15, -1);
+DEFINE_QNODE(mas_video_p1_ocmem, MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM, 16, 16, -1);
+DEFINE_QNODE(mas_vfe_ocmem, MSM8974_OCMEM_MAS_VFE_OCMEM, 16, 17, -1);
+DEFINE_QNODE(mas_cnoc_onoc_cfg, MSM8974_OCMEM_MAS_CNOC_ONOC_CFG, 16, 12, -1);
+DEFINE_QNODE(slv_service_onoc, MSM8974_OCMEM_SLV_SERVICE_ONOC, 16, -1, 19);
+DEFINE_QNODE(slv_ocmem, MSM8974_OCMEM_SLV_OCMEM, 16, -1, 18);
+
+/* Virtual NoC is needed for connection to OCMEM */
+DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
+DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+
+static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
+ [OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
+ [OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
+ [OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
+ [OCMEM_MAS_VIDEO_P0_OCMEM] = &mas_video_p0_ocmem,
+ [OCMEM_MAS_VIDEO_P1_OCMEM] = &mas_video_p1_ocmem,
+ [OCMEM_MAS_VFE_OCMEM] = &mas_vfe_ocmem,
+ [OCMEM_MAS_CNOC_ONOC_CFG] = &mas_cnoc_onoc_cfg,
+ [OCMEM_SLV_SERVICE_ONOC] = &slv_service_onoc,
+ [OCMEM_VNOC_TO_SNOC] = &ocmem_vnoc_to_snoc,
+ [OCMEM_VNOC_TO_OCMEM_NOC] = &ocmem_vnoc_to_onoc,
+ [OCMEM_VNOC_MAS_GFX3D] = &mas_v_ocmem_gfx3d,
+ [OCMEM_SLV_OCMEM] = &slv_ocmem,
+};
+
+static struct msm8974_icc_desc msm8974_onoc = {
+ .nodes = msm8974_onoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
+};
+
+DEFINE_QNODE(mas_pnoc_cfg, MSM8974_PNOC_MAS_PNOC_CFG, 8, 43, -1);
+DEFINE_QNODE(mas_sdcc_1, MSM8974_PNOC_MAS_SDCC_1, 8, 33, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_3, MSM8974_PNOC_MAS_SDCC_3, 8, 34, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_4, MSM8974_PNOC_MAS_SDCC_4, 8, 36, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_2, MSM8974_PNOC_MAS_SDCC_2, 8, 35, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_tsif, MSM8974_PNOC_MAS_TSIF, 8, 37, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_bam_dma, MSM8974_PNOC_MAS_BAM_DMA, 8, 38, -1);
+DEFINE_QNODE(mas_blsp_2, MSM8974_PNOC_MAS_BLSP_2, 8, 39, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hsic, MSM8974_PNOC_MAS_USB_HSIC, 8, 40, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_blsp_1, MSM8974_PNOC_MAS_BLSP_1, 8, 41, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hs, MSM8974_PNOC_MAS_USB_HS, 8, 42, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(pnoc_to_snoc, MSM8974_PNOC_TO_SNOC, 8, 44, 45, MSM8974_SNOC_TO_PNOC, MSM8974_PNOC_SLV_PRNG);
+DEFINE_QNODE(slv_sdcc_1, MSM8974_PNOC_SLV_SDCC_1, 8, -1, 31);
+DEFINE_QNODE(slv_sdcc_3, MSM8974_PNOC_SLV_SDCC_3, 8, -1, 32);
+DEFINE_QNODE(slv_sdcc_2, MSM8974_PNOC_SLV_SDCC_2, 8, -1, 33);
+DEFINE_QNODE(slv_sdcc_4, MSM8974_PNOC_SLV_SDCC_4, 8, -1, 34);
+DEFINE_QNODE(slv_tsif, MSM8974_PNOC_SLV_TSIF, 8, -1, 35);
+DEFINE_QNODE(slv_bam_dma, MSM8974_PNOC_SLV_BAM_DMA, 8, -1, 36);
+DEFINE_QNODE(slv_blsp_2, MSM8974_PNOC_SLV_BLSP_2, 8, -1, 37);
+DEFINE_QNODE(slv_usb_hsic, MSM8974_PNOC_SLV_USB_HSIC, 8, -1, 38);
+DEFINE_QNODE(slv_blsp_1, MSM8974_PNOC_SLV_BLSP_1, 8, -1, 39);
+DEFINE_QNODE(slv_usb_hs, MSM8974_PNOC_SLV_USB_HS, 8, -1, 40);
+DEFINE_QNODE(slv_pdm, MSM8974_PNOC_SLV_PDM, 8, -1, 41);
+DEFINE_QNODE(slv_periph_apu_cfg, MSM8974_PNOC_SLV_PERIPH_APU_CFG, 8, -1, 42);
+DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
+DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
+
+static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
+ [PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
+ [PNOC_MAS_SDCC_1] = &mas_sdcc_1,
+ [PNOC_MAS_SDCC_3] = &mas_sdcc_3,
+ [PNOC_MAS_SDCC_4] = &mas_sdcc_4,
+ [PNOC_MAS_SDCC_2] = &mas_sdcc_2,
+ [PNOC_MAS_TSIF] = &mas_tsif,
+ [PNOC_MAS_BAM_DMA] = &mas_bam_dma,
+ [PNOC_MAS_BLSP_2] = &mas_blsp_2,
+ [PNOC_MAS_USB_HSIC] = &mas_usb_hsic,
+ [PNOC_MAS_BLSP_1] = &mas_blsp_1,
+ [PNOC_MAS_USB_HS] = &mas_usb_hs,
+ [PNOC_TO_SNOC] = &pnoc_to_snoc,
+ [PNOC_SLV_SDCC_1] = &slv_sdcc_1,
+ [PNOC_SLV_SDCC_3] = &slv_sdcc_3,
+ [PNOC_SLV_SDCC_2] = &slv_sdcc_2,
+ [PNOC_SLV_SDCC_4] = &slv_sdcc_4,
+ [PNOC_SLV_TSIF] = &slv_tsif,
+ [PNOC_SLV_BAM_DMA] = &slv_bam_dma,
+ [PNOC_SLV_BLSP_2] = &slv_blsp_2,
+ [PNOC_SLV_USB_HSIC] = &slv_usb_hsic,
+ [PNOC_SLV_BLSP_1] = &slv_blsp_1,
+ [PNOC_SLV_USB_HS] = &slv_usb_hs,
+ [PNOC_SLV_PDM] = &slv_pdm,
+ [PNOC_SLV_PERIPH_APU_CFG] = &slv_periph_apu_cfg,
+ [PNOC_SLV_PNOC_MPU_CFG] = &slv_pnoc_mpu_cfg,
+ [PNOC_SLV_PRNG] = &slv_prng,
+ [PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
+};
+
+static struct msm8974_icc_desc msm8974_pnoc = {
+ .nodes = msm8974_pnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
+};
+
+DEFINE_QNODE(mas_lpass_ahb, MSM8974_SNOC_MAS_LPASS_AHB, 8, 18, -1);
+DEFINE_QNODE(mas_qdss_bam, MSM8974_SNOC_MAS_QDSS_BAM, 8, 19, -1);
+DEFINE_QNODE(mas_snoc_cfg, MSM8974_SNOC_MAS_SNOC_CFG, 8, 20, -1);
+DEFINE_QNODE(snoc_to_bimc, MSM8974_SNOC_TO_BIMC, 8, 21, 24, MSM8974_BIMC_TO_SNOC);
+DEFINE_QNODE(snoc_to_cnoc, MSM8974_SNOC_TO_CNOC, 8, 22, 25);
+DEFINE_QNODE(snoc_to_pnoc, MSM8974_SNOC_TO_PNOC, 8, 29, 28, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(snoc_to_ocmem_vnoc, MSM8974_SNOC_TO_OCMEM_VNOC, 8, 53, 77, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+DEFINE_QNODE(mas_crypto_core0, MSM8974_SNOC_MAS_CRYPTO_CORE0, 8, 23, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(mas_crypto_core1, MSM8974_SNOC_MAS_CRYPTO_CORE1, 8, 24, -1);
+DEFINE_QNODE(mas_lpass_proc, MSM8974_SNOC_MAS_LPASS_PROC, 8, 25, -1, MSM8974_SNOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(mas_mss, MSM8974_SNOC_MAS_MSS, 8, 26, -1);
+DEFINE_QNODE(mas_mss_nav, MSM8974_SNOC_MAS_MSS_NAV, 8, 27, -1);
+DEFINE_QNODE(mas_ocmem_dma, MSM8974_SNOC_MAS_OCMEM_DMA, 8, 28, -1);
+DEFINE_QNODE(mas_wcss, MSM8974_SNOC_MAS_WCSS, 8, 30, -1);
+DEFINE_QNODE(mas_qdss_etr, MSM8974_SNOC_MAS_QDSS_ETR, 8, 31, -1);
+DEFINE_QNODE(mas_usb3, MSM8974_SNOC_MAS_USB3, 8, 32, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(slv_ampss, MSM8974_SNOC_SLV_AMPSS, 8, -1, 20);
+DEFINE_QNODE(slv_lpass, MSM8974_SNOC_SLV_LPASS, 8, -1, 21);
+DEFINE_QNODE(slv_usb3, MSM8974_SNOC_SLV_USB3, 8, -1, 22);
+DEFINE_QNODE(slv_wcss, MSM8974_SNOC_SLV_WCSS, 8, -1, 23);
+DEFINE_QNODE(slv_ocimem, MSM8974_SNOC_SLV_OCIMEM, 8, -1, 26);
+DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
+DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
+DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
+
+static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
+ [SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
+ [SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
+ [SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
+ [SNOC_TO_BIMC] = &snoc_to_bimc,
+ [SNOC_TO_CNOC] = &snoc_to_cnoc,
+ [SNOC_TO_PNOC] = &snoc_to_pnoc,
+ [SNOC_TO_OCMEM_VNOC] = &snoc_to_ocmem_vnoc,
+ [SNOC_MAS_CRYPTO_CORE0] = &mas_crypto_core0,
+ [SNOC_MAS_CRYPTO_CORE1] = &mas_crypto_core1,
+ [SNOC_MAS_LPASS_PROC] = &mas_lpass_proc,
+ [SNOC_MAS_MSS] = &mas_mss,
+ [SNOC_MAS_MSS_NAV] = &mas_mss_nav,
+ [SNOC_MAS_OCMEM_DMA] = &mas_ocmem_dma,
+ [SNOC_MAS_WCSS] = &mas_wcss,
+ [SNOC_MAS_QDSS_ETR] = &mas_qdss_etr,
+ [SNOC_MAS_USB3] = &mas_usb3,
+ [SNOC_SLV_AMPSS] = &slv_ampss,
+ [SNOC_SLV_LPASS] = &slv_lpass,
+ [SNOC_SLV_USB3] = &slv_usb3,
+ [SNOC_SLV_WCSS] = &slv_wcss,
+ [SNOC_SLV_OCIMEM] = &slv_ocimem,
+ [SNOC_SLV_SNOC_OCMEM] = &slv_snoc_ocmem,
+ [SNOC_SLV_SERVICE_SNOC] = &slv_service_snoc,
+ [SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
+};
+
+static struct msm8974_icc_desc msm8974_snoc = {
+ .nodes = msm8974_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
+};
+
+static int msm8974_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
+ char *name, int id, u64 val)
+{
+ int ret;
+
+ if (id == -1)
+ return;
+
+ /*
+ * Setting the bandwidth requests for some nodes fails and this same
+ * behavior occurs on the downstream MSM 3.4 kernel sources based on
+ * errors like this in that kernel:
+ *
+ * msm_rpm_get_error_from_ack(): RPM NACK Unsupported resource
+ * AXI: msm_bus_rpm_req(): RPM: Ack failed
+ * AXI: msm_bus_rpm_commit_arb(): RPM: Req fail: mas:32, bw:240000000
+ *
+ * Since there's no publicly available documentation for this hardware,
+ * and the bandwidth for some nodes in the path can be set properly,
+ * let's not return an error.
+ */
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, rsc_type, id,
+ val);
+ if (ret)
+ dev_dbg(dev, "Cannot set bandwidth for node %s (%d): %d\n",
+ name, id, ret);
+}
+
+static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct msm8974_icc_node *src_qn, *dst_qn;
+ struct msm8974_icc_provider *qp;
+ u64 sum_bw, max_peak_bw, rate;
+ u32 agg_avg = 0, agg_peak = 0;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int ret, i;
+
+ src_qn = src->data;
+ dst_qn = dst->data;
+ provider = src->provider;
+ qp = to_msm8974_icc_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ msm8974_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* Set bandwidth on source node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ src_qn->name, src_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ src_qn->name, src_qn->slv_rpm_id, sum_bw);
+
+ /* Set bandwidth on destination node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ dst_qn->name, dst_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ dst_qn->name, dst_qn->slv_rpm_id, sum_bw);
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, src_qn->buswidth);
+
+ if (src_qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ dev_err(provider->dev, "%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ ret = 0;
+ }
+ }
+
+ src_qn->rate = rate;
+
+ return 0;
+}
+
+static int msm8974_icc_probe(struct platform_device *pdev)
+{
+ const struct msm8974_icc_desc *desc;
+ struct msm8974_icc_node **qnodes;
+ struct msm8974_icc_provider *qp;
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, msm8974_icc_bus_clocks,
+ sizeof(msm8974_icc_bus_clocks), GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(msm8974_icc_bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = msm8974_icc_set;
+ provider->aggregate = msm8974_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ goto err_disable_clks;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err_del_icc;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(dev, "registered node %s\n", node->name);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+
+err_del_icc:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(provider);
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return ret;
+}
+
+static int msm8974_icc_remove(struct platform_device *pdev)
+{
+ struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id msm8974_noc_of_match[] = {
+ { .compatible = "qcom,msm8974-bimc", .data = &msm8974_bimc},
+ { .compatible = "qcom,msm8974-cnoc", .data = &msm8974_cnoc},
+ { .compatible = "qcom,msm8974-mmssnoc", .data = &msm8974_mnoc},
+ { .compatible = "qcom,msm8974-ocmemnoc", .data = &msm8974_onoc},
+ { .compatible = "qcom,msm8974-pnoc", .data = &msm8974_pnoc},
+ { .compatible = "qcom,msm8974-snoc", .data = &msm8974_snoc},
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8974_noc_of_match);
+
+static struct platform_driver msm8974_noc_driver = {
+ .probe = msm8974_icc_probe,
+ .remove = msm8974_icc_remove,
+ .driver = {
+ .name = "qnoc-msm8974",
+ .of_match_table = msm8974_noc_of_match,
+ },
+};
+module_platform_driver(msm8974_noc_driver);
+MODULE_DESCRIPTION("Qualcomm MSM8974 NoC driver");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index dd555078258c..12e5039a7a25 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -124,30 +124,6 @@ static struct lock_class_key reserved_rbtree_key;
*
****************************************************************************/
-static inline int match_hid_uid(struct device *dev,
- struct acpihid_map_entry *entry)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- const char *hid, *uid;
-
- if (!adev)
- return -ENODEV;
-
- hid = acpi_device_hid(adev);
- uid = acpi_device_uid(adev);
-
- if (!hid || !(*hid))
- return -ENODEV;
-
- if (!uid || !(*uid))
- return strcmp(hid, entry->hid);
-
- if (!(*entry->uid))
- return strcmp(hid, entry->hid);
-
- return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
-}
-
static inline u16 get_pci_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -158,10 +134,14 @@ static inline u16 get_pci_device_id(struct device *dev)
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpihid_map_entry *p;
+ if (!adev)
+ return -ENODEV;
+
list_for_each_entry(p, &acpihid_map, list) {
- if (!match_hid_uid(dev, p)) {
+ if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) {
if (entry)
*entry = p;
return p->devid;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1edc99335a94..6bb1f682f78b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -87,6 +87,15 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
+/*
+ * Global static key controlling whether an update to PMR allowing more
+ * interrupts requires to be propagated to the redistributor (DSB SY).
+ * And this needs to be exported for modules to be able to enable
+ * interrupts...
+ */
+DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
+EXPORT_SYMBOL(gic_pmr_sync);
+
/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
static refcount_t *ppi_nmi_refs;
@@ -1502,6 +1511,17 @@ static void gic_enable_nmi_support(void)
for (i = 0; i < gic_data.ppi_nr; i++)
refcount_set(&ppi_nmi_refs[i], 0);
+ /*
+ * Linux itself doesn't use 1:N distribution, so has no need to
+ * set PMHE. The only reason to have it set is if EL3 requires it
+ * (and we can't change it).
+ */
+ if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
+ static_branch_enable(&gic_pmr_sync);
+
+ pr_info("%s ICC_PMR_EL1 synchronisation\n",
+ static_branch_unlikely(&gic_pmr_sync) ? "Forcing" : "Relaxing");
+
static_branch_enable(&supports_pseudo_nmis);
if (static_branch_likely(&supports_deactivate_key))
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 563e87ed0766..45969927cc81 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -141,12 +141,17 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
int its_schedule_vpe(struct its_vpe *vpe, bool on)
{
struct its_cmd_info info;
+ int ret;
WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
- return its_send_vpe_cmd(vpe, &info);
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->resident = on;
+
+ return ret;
}
int its_invall_vpe(struct its_vpe *vpe)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 7d0a12fe2714..8df547d2d935 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -181,7 +181,7 @@ static void plic_handle_irq(struct pt_regs *regs)
WARN_ON_ONCE(!handler->present);
- csr_clear(sie, SIE_SEIE);
+ csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq);
@@ -191,7 +191,7 @@ static void plic_handle_irq(struct pt_regs *regs)
else
generic_handle_irq(irq);
}
- csr_set(sie, SIE_SEIE);
+ csr_set(CSR_IE, IE_EIE);
}
/*
@@ -252,8 +252,11 @@ static int __init plic_init(struct device_node *node,
continue;
}
- /* skip contexts other than supervisor external interrupt */
- if (parent.args[0] != IRQ_S_EXT)
+ /*
+ * Skip contexts other than external interrupts for our
+ * privilege level.
+ */
+ if (parent.args[0] != IRQ_EXT)
continue;
hartid = plic_find_hart_id(parent.np);
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index 304f50c08da2..078eeadf707a 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -10,7 +10,7 @@ config MISDN_HFCPCI
depends on PCI
help
Enable support for cards with Cologne Chip AG's
- HFC PCI chip.
+ HFC PCI chip.
config MISDN_HFCMULTI
tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 1137dd152b5c..ecc1ef6c386d 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -402,8 +402,8 @@ hdlc_empty_fifo(struct bchannel *bch, int count)
} else {
cnt = bchannel_get_rxbuf(bch, count);
if (cnt < 0) {
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- fc->name, bch->nr, count);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ fc->name, bch->nr, count);
return;
}
p = skb_put(bch->rx_skb, count);
@@ -538,8 +538,8 @@ HDLC_irq(struct bchannel *bch, u32 stat)
}
if (stat & HDLC_INT_RPR) {
if (stat & HDLC_STAT_RDO) {
- pr_warning("%s: ch%d stat %x RDO\n",
- fc->name, bch->nr, stat);
+ pr_warn("%s: ch%d stat %x RDO\n",
+ fc->name, bch->nr, stat);
hdlc->ctrl.sr.xml = 0;
hdlc->ctrl.sr.cmd |= HDLC_CMD_RRS;
write_ctrl(bch, 1);
@@ -561,8 +561,8 @@ HDLC_irq(struct bchannel *bch, u32 stat)
HDLC_STAT_CRCVFR) {
recv_Bchannel(bch, 0, false);
} else {
- pr_warning("%s: got invalid frame\n",
- fc->name);
+ pr_warn("%s: got invalid frame\n",
+ fc->name);
skb_trim(bch->rx_skb, 0);
}
}
@@ -574,8 +574,8 @@ handle_tx:
* restart transmitting the whole frame on HDLC
* in transparent mode we send the next data
*/
- pr_warning("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr,
- stat, bch->tx_skb ? "tx_skb" : "no tx_skb");
+ pr_warn("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr,
+ stat, bch->tx_skb ? "tx_skb" : "no tx_skb");
if (bch->tx_skb && bch->tx_skb->len) {
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
bch->tx_idx = 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 86669ec8b977..7013a3f08429 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -2248,8 +2248,8 @@ next_frame:
if (bch) {
maxlen = bchannel_get_rxbuf(bch, Zsize);
if (maxlen < 0) {
- pr_warning("card%d.B%d: No bufferspace for %d bytes\n",
- hc->id + 1, bch->nr, Zsize);
+ pr_warn("card%d.B%d: No bufferspace for %d bytes\n",
+ hc->id + 1, bch->nr, Zsize);
return;
}
sp = &bch->rx_skb;
@@ -2260,8 +2260,8 @@ next_frame:
if (*sp == NULL) {
*sp = mI_alloc_skb(maxlen, GFP_ATOMIC);
if (*sp == NULL) {
- pr_warning("card%d: No mem for dch rx_skb\n",
- hc->id + 1);
+ pr_warn("card%d: No mem for dch rx_skb\n",
+ hc->id + 1);
return;
}
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 2330a7d24267..abdf787c1a71 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -566,8 +566,7 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
}
maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
if (maxlen < 0) {
- pr_warning("B%d: No bufferspace for %d bytes\n",
- bch->nr, fcnt_rx);
+ pr_warn("B%d: No bufferspace for %d bytes\n", bch->nr, fcnt_rx);
} else {
ptr = skb_put(bch->rx_skb, fcnt_rx);
if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 008a74a1ed44..621364bb6b12 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -841,8 +841,8 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
if (maxlen < 0) {
if (rx_skb)
skb_trim(rx_skb, 0);
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- hw->name, fifo->bch->nr, len);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ hw->name, fifo->bch->nr, len);
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
index e4fa2a2824af..7e2bc5068019 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.h
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -173,8 +173,8 @@ symbolic(struct hfcusb_symbolic_list list[], const int num)
/*
- * List of all supported enpoints configiration sets, used to find the
- * best matching endpoint configuration within a devices' USB descriptor.
+ * List of all supported endpoint configuration sets, used to find the
+ * best matching endpoint configuration within a device's USB descriptor.
* We need at least 3 RX endpoints, and 3 TX endpoints, either
* INT-in and ISO-out, or ISO-in and ISO-out)
* with 4 RX endpoints even E-Channel logging is possible
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index bca880213e91..ec475087fbf9 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -936,8 +936,8 @@ hscx_empty_fifo(struct hscx_hw *hscx, u8 count)
hscx_cmdr(hscx, 0x80); /* RMC */
if (hscx->bch.rx_skb)
skb_trim(hscx->bch.rx_skb, 0);
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- hscx->ip->name, hscx->bch.nr, count);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ hscx->ip->name, hscx->bch.nr, count);
return;
}
p = skb_put(hscx->bch.rx_skb, count);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 4a3e748a1c26..e325e87c0593 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -27,7 +27,6 @@ MODULE_VERSION(ISAR_REV);
#define DEBUG_HW_FIRMWARE_FIFO 0x10000
-static const u8 faxmodulation_s[] = "3,24,48,72,73,74,96,97,98,121,122,145,146";
static const u8 faxmodulation[] = {3, 24, 48, 72, 73, 74, 96, 97, 98, 121,
122, 145, 146};
#define FAXMODCNT 13
@@ -222,7 +221,7 @@ load_firmware(struct isar_hw *isar, const u8 *buf, int size)
goto reterror;
}
if (!poll_mbox(isar, 1000)) {
- pr_warning("ISAR poll_mbox dkey failed\n");
+ pr_warn("ISAR poll_mbox dkey failed\n");
ret = -ETIME;
goto reterror;
}
@@ -432,8 +431,8 @@ isar_rcv_frame(struct isar_ch *ch)
case ISDN_P_B_MODEM_ASYNC:
maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
if (maxlen < 0) {
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- ch->is->name, ch->bch.nr, ch->is->clsb);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ ch->is->name, ch->bch.nr, ch->is->clsb);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
@@ -443,8 +442,8 @@ isar_rcv_frame(struct isar_ch *ch)
case ISDN_P_B_HDLC:
maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
if (maxlen < 0) {
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- ch->is->name, ch->bch.nr, ch->is->clsb);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ ch->is->name, ch->bch.nr, ch->is->clsb);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 61caa7e50b9a..6aae97e827b7 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -380,8 +380,8 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
stat = bchannel_get_rxbuf(&bc->bch, cnt);
/* only transparent use the count here, HDLC overun is detected later */
if (stat == -ENOMEM) {
- pr_warning("%s.B%d: No memory for %d bytes\n",
- card->name, bc->bch.nr, cnt);
+ pr_warn("%s.B%d: No memory for %d bytes\n",
+ card->name, bc->bch.nr, cnt);
return;
}
if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
@@ -420,8 +420,8 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
recv_Bchannel(&bc->bch, 0, false);
stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
if (stat < 0) {
- pr_warning("%s.B%d: No memory for %d bytes\n",
- card->name, bc->bch.nr, cnt);
+ pr_warn("%s.B%d: No memory for %d bytes\n",
+ card->name, bc->bch.nr, cnt);
return;
}
} else if (stat == -HDLC_CRC_ERROR) {
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index bad55fdacd36..f3b8db7b48fe 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -466,8 +466,8 @@ W6692_empty_Bfifo(struct w6692_ch *wch, int count)
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
if (wch->bch.rx_skb)
skb_trim(wch->bch.rx_skb, 0);
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- card->name, wch->bch.nr, count);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ card->name, wch->bch.nr, count);
return;
}
ptr = skb_put(wch->bch.rx_skb, count);
@@ -729,8 +729,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
wch->bch.nr, star);
}
if (star & W_B_STAR_XDOW) {
- pr_warning("%s: B%d XDOW proto=%x\n", card->name,
- wch->bch.nr, wch->bch.state);
+ pr_warn("%s: B%d XDOW proto=%x\n", card->name,
+ wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_xdu++;
#endif
@@ -747,8 +747,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
return; /* handle XDOW only once */
}
if (stat & W_B_EXI_XDUN) {
- pr_warning("%s: B%d XDUN proto=%x\n", card->name,
- wch->bch.nr, wch->bch.state);
+ pr_warn("%s: B%d XDUN proto=%x\n", card->name,
+ wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_xdu++;
#endif
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index f378173bcf6f..8c93af06ed02 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -474,8 +474,8 @@ bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
if (bch->rx_skb) {
len = skb_tailroom(bch->rx_skb);
if (len < reqlen) {
- pr_warning("B%d no space for %d (only %d) bytes\n",
- bch->nr, reqlen, len);
+ pr_warn("B%d no space for %d (only %d) bytes\n",
+ bch->nr, reqlen, len);
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
/* send what we have now and try a new buffer */
recv_Bchannel(bch, 0, true);
@@ -508,8 +508,7 @@ bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
}
bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!bch->rx_skb) {
- pr_warning("B%d receive no memory for %d bytes\n",
- bch->nr, len);
+ pr_warn("B%d receive no memory for %d bytes\n", bch->nr, len);
len = -ENOMEM;
}
return len;
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 6a72b7e13719..14ba7faaed9e 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -57,11 +57,15 @@ static void led_activity_function(struct timer_list *t)
curr_used = 0;
for_each_possible_cpu(i) {
- curr_used += kcpustat_cpu(i).cpustat[CPUTIME_USER]
- + kcpustat_cpu(i).cpustat[CPUTIME_NICE]
- + kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]
- + kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]
- + kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ struct kernel_cpustat kcpustat;
+
+ kcpustat_cpu_fetch(&kcpustat, i);
+
+ curr_used += kcpustat.cpustat[CPUTIME_USER]
+ + kcpustat.cpustat[CPUTIME_NICE]
+ + kcpustat.cpustat[CPUTIME_SYSTEM]
+ + kcpustat.cpustat[CPUTIME_SOFTIRQ]
+ + kcpustat.cpustat[CPUTIME_IRQ];
cpus++;
}
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 4134e580f786..60311e8d6240 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -81,13 +81,14 @@ static int rackmeter_ignore_nice;
*/
static inline u64 get_cpu_idle_time(unsigned int cpu)
{
+ struct kernel_cpustat *kcpustat = &kcpustat_cpu(cpu);
u64 retval;
- retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
- kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
+ retval = kcpustat->cpustat[CPUTIME_IDLE] +
+ kcpustat->cpustat[CPUTIME_IOWAIT];
if (rackmeter_ignore_nice)
- retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+ retval += kcpustat_field(kcpustat, CPUTIME_NICE, cpu);
return retval;
}
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index 3c971297b6dc..67daeec94b44 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -468,9 +468,7 @@ static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
else
id = ((*reg) - 0x30) / 2;
if (id > 7) {
- pr_warning("wf_fcu: Can't parse "
- "fan ID in device-tree for %pOF\n",
- np);
+ pr_warn("wf_fcu: Can't parse fan ID in device-tree for %pOF\n", np);
break;
}
wf_fcu_add_fan(pv, name, type, id);
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index e44525b19071..b03a33b803b7 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -124,8 +124,8 @@ static int wf_lm87_probe(struct i2c_client *client,
}
}
if (!name) {
- pr_warning("wf_lm87: Unsupported sensor %pOF\n",
- client->dev.of_node);
+ pr_warn("wf_lm87: Unsupported sensor %pOF\n",
+ client->dev.of_node);
return -ENODEV;
}
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index c5da0fc24884..e81746b87cff 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -285,8 +285,8 @@ static void cpu_fans_tick_split(void)
/* Apply result directly to exhaust fan */
err = wf_control_set(cpu_rear_fans[cpu], sp->target);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_rear_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_rear_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
break;
}
@@ -296,8 +296,8 @@ static void cpu_fans_tick_split(void)
DBG_LOTS(" CPU%d: intake = %d RPM\n", cpu, intake);
err = wf_control_set(cpu_front_fans[cpu], intake);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_front_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_front_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
break;
}
@@ -367,22 +367,22 @@ static void cpu_fans_tick_combined(void)
for (cpu = 0; cpu < nr_chips; cpu++) {
err = wf_control_set(cpu_rear_fans[cpu], sp->target);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_rear_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_rear_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
}
err = wf_control_set(cpu_front_fans[cpu], intake);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_front_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_front_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
}
err = 0;
if (cpu_pumps[cpu])
err = wf_control_set(cpu_pumps[cpu], pump);
if (err) {
- pr_warning("wf_pm72: Pump %s reports error %d\n",
- cpu_pumps[cpu]->name, err);
+ pr_warn("wf_pm72: Pump %s reports error %d\n",
+ cpu_pumps[cpu]->name, err);
failure_state |= FAILURE_FAN;
}
}
@@ -561,7 +561,7 @@ static void drives_fan_tick(void)
err = wf_sensor_get(drives_temp, &temp);
if (err) {
- pr_warning("wf_pm72: drive bay temp sensor error %d\n", err);
+ pr_warn("wf_pm72: drive bay temp sensor error %d\n", err);
failure_state |= FAILURE_SENSOR;
wf_control_set_max(drives_fan);
return;
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 8456eb67184b..7acd1684c451 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -281,8 +281,8 @@ static void cpu_fans_tick(void)
for (i = 0; i < 3; i++) {
err = wf_control_set(cpu_fans[cpu][i], speed);
if (err) {
- pr_warning("wf_rm31: Fan %s reports error %d\n",
- cpu_fans[cpu][i]->name, err);
+ pr_warn("wf_rm31: Fan %s reports error %d\n",
+ cpu_fans[cpu][i]->name, err);
failure_state |= FAILURE_FAN;
}
}
@@ -465,7 +465,7 @@ static void slots_fan_tick(void)
err = wf_sensor_get(slots_temp, &temp);
if (err) {
- pr_warning("wf_rm31: slots temp sensor error %d\n", err);
+ pr_warn("wf_rm31: slots temp sensor error %d\n", err);
failure_state |= FAILURE_SENSOR;
wf_control_set_max(slots_fan);
return;
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 4c5ba35d48d4..834b35dc3b13 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -657,7 +657,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
- err = platform_get_irq_byname(pdev, "doorbell");
+ err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0)
hsp->doorbell_irq = err;
@@ -677,7 +677,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- err = platform_get_irq_byname(pdev, name);
+ err = platform_get_irq_byname_optional(pdev, name);
if (err >= 0) {
hsp->shared_irqs[i] = err;
count++;
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index b72e82efaee5..38fbb3b59873 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -191,7 +191,7 @@ int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(__mcb_register_driver);
+EXPORT_SYMBOL_NS_GPL(__mcb_register_driver, MCB);
/**
* mcb_unregister_driver() - Unregister a @mcb_driver from the system
@@ -203,7 +203,7 @@ void mcb_unregister_driver(struct mcb_driver *drv)
{
driver_unregister(&drv->driver);
}
-EXPORT_SYMBOL_GPL(mcb_unregister_driver);
+EXPORT_SYMBOL_NS_GPL(mcb_unregister_driver, MCB);
static void mcb_release_dev(struct device *dev)
{
@@ -249,7 +249,7 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(mcb_device_register);
+EXPORT_SYMBOL_NS_GPL(mcb_device_register, MCB);
static void mcb_free_bus(struct device *dev)
{
@@ -301,7 +301,7 @@ err_free:
kfree(bus);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(mcb_alloc_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
static int __mcb_devices_unregister(struct device *dev, void *data)
{
@@ -323,7 +323,7 @@ void mcb_release_bus(struct mcb_bus *bus)
{
mcb_devices_unregister(bus);
}
-EXPORT_SYMBOL_GPL(mcb_release_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB);
/**
* mcb_bus_put() - Increment refcnt
@@ -338,7 +338,7 @@ struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
return bus;
}
-EXPORT_SYMBOL_GPL(mcb_bus_get);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_get, MCB);
/**
* mcb_bus_put() - Decrement refcnt
@@ -351,7 +351,7 @@ void mcb_bus_put(struct mcb_bus *bus)
if (bus)
put_device(&bus->dev);
}
-EXPORT_SYMBOL_GPL(mcb_bus_put);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_put, MCB);
/**
* mcb_alloc_dev() - Allocate a device
@@ -371,7 +371,7 @@ struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
return dev;
}
-EXPORT_SYMBOL_GPL(mcb_alloc_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_dev, MCB);
/**
* mcb_free_dev() - Free @mcb_device
@@ -383,7 +383,7 @@ void mcb_free_dev(struct mcb_device *dev)
{
kfree(dev);
}
-EXPORT_SYMBOL_GPL(mcb_free_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
@@ -412,7 +412,7 @@ void mcb_bus_add_devices(const struct mcb_bus *bus)
{
bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
}
-EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_add_devices, MCB);
/**
* mcb_get_resource() - get a resource for a mcb device
@@ -428,7 +428,7 @@ struct resource *mcb_get_resource(struct mcb_device *dev, unsigned int type)
else
return NULL;
}
-EXPORT_SYMBOL_GPL(mcb_get_resource);
+EXPORT_SYMBOL_NS_GPL(mcb_get_resource, MCB);
/**
* mcb_request_mem() - Request memory
@@ -454,7 +454,7 @@ struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
return mem;
}
-EXPORT_SYMBOL_GPL(mcb_request_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB);
/**
* mcb_release_mem() - Release memory requested by device
@@ -469,7 +469,7 @@ void mcb_release_mem(struct resource *mem)
size = resource_size(mem);
release_mem_region(mem->start, size);
}
-EXPORT_SYMBOL_GPL(mcb_release_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_release_mem, MCB);
static int __mcb_get_irq(struct mcb_device *dev)
{
@@ -495,7 +495,7 @@ int mcb_get_irq(struct mcb_device *dev)
return __mcb_get_irq(dev);
}
-EXPORT_SYMBOL_GPL(mcb_get_irq);
+EXPORT_SYMBOL_NS_GPL(mcb_get_irq, MCB);
static int mcb_init(void)
{
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index 8f1bde437a7e..506676754538 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -168,3 +168,4 @@ module_exit(mcb_lpc_exit);
MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over LPC support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 3b69e6aa3d88..0266bfddfbe2 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -253,4 +253,4 @@ free_header:
return ret;
}
-EXPORT_SYMBOL_GPL(chameleon_parse_cells);
+EXPORT_SYMBOL_NS_GPL(chameleon_parse_cells, MCB);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 14866aa22f75..dc88232d9af8 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -131,3 +131,4 @@ module_pci_driver(mcb_pci_driver);
MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over PCI support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index aa98953f4462..d6d5ab23c088 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -38,9 +38,9 @@ config MD_AUTODETECT
default y
---help---
If you say Y here, then the kernel will try to autodetect raid
- arrays as part of its boot process.
+ arrays as part of its boot process.
- If you don't use raid and say Y, this autodetection can cause
+ If you don't use raid and say Y, this autodetection can cause
a several-second delay in the boot time due to various
synchronisation steps that are part of this step.
@@ -290,7 +290,7 @@ config DM_SNAPSHOT
depends on BLK_DEV_DM
select DM_BUFIO
---help---
- Allow volume managers to take writable snapshots of a device.
+ Allow volume managers to take writable snapshots of a device.
config DM_THIN_PROVISIONING
tristate "Thin provisioning target"
@@ -298,7 +298,7 @@ config DM_THIN_PROVISIONING
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- Provides thin provisioning and snapshots that share a data store.
+ Provides thin provisioning and snapshots that share a data store.
config DM_CACHE
tristate "Cache target (EXPERIMENTAL)"
@@ -307,23 +307,23 @@ config DM_CACHE
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- dm-cache attempts to improve performance of a block device by
- moving frequently used data to a smaller, higher performance
- device. Different 'policy' plugins can be used to change the
- algorithms used to select which blocks are promoted, demoted,
- cleaned etc. It supports writeback and writethrough modes.
+ dm-cache attempts to improve performance of a block device by
+ moving frequently used data to a smaller, higher performance
+ device. Different 'policy' plugins can be used to change the
+ algorithms used to select which blocks are promoted, demoted,
+ cleaned etc. It supports writeback and writethrough modes.
config DM_CACHE_SMQ
tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
default y
---help---
- A cache policy that uses a multiqueue ordered by recent hits
- to select which blocks should be promoted and demoted.
- This is meant to be a general purpose policy. It prioritises
- reads over writes. This SMQ policy (vs MQ) offers the promise
- of less memory utilization, improved performance and increased
- adaptability in the face of changing workloads.
+ A cache policy that uses a multiqueue ordered by recent hits
+ to select which blocks should be promoted and demoted.
+ This is meant to be a general purpose policy. It prioritises
+ reads over writes. This SMQ policy (vs MQ) offers the promise
+ of less memory utilization, improved performance and increased
+ adaptability in the face of changing workloads.
config DM_WRITECACHE
tristate "Writecache target"
@@ -343,9 +343,9 @@ config DM_ERA
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- dm-era tracks which parts of a block device are written to
- over time. Useful for maintaining cache coherency when using
- vendor snapshots.
+ dm-era tracks which parts of a block device are written to
+ over time. Useful for maintaining cache coherency when using
+ vendor snapshots.
config DM_CLONE
tristate "Clone target (EXPERIMENTAL)"
@@ -353,20 +353,20 @@ config DM_CLONE
default n
select DM_PERSISTENT_DATA
---help---
- dm-clone produces a one-to-one copy of an existing, read-only source
- device into a writable destination device. The cloned device is
- visible/mountable immediately and the copy of the source device to the
- destination device happens in the background, in parallel with user
- I/O.
+ dm-clone produces a one-to-one copy of an existing, read-only source
+ device into a writable destination device. The cloned device is
+ visible/mountable immediately and the copy of the source device to the
+ destination device happens in the background, in parallel with user
+ I/O.
- If unsure, say N.
+ If unsure, say N.
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
---help---
- Allow volume managers to mirror logical volumes, also
- needed for live data migration tools such as 'pvmove'.
+ Allow volume managers to mirror logical volumes, also
+ needed for live data migration tools such as 'pvmove'.
config DM_LOG_USERSPACE
tristate "Mirror userspace logging"
@@ -483,7 +483,7 @@ config DM_FLAKEY
tristate "Flakey target"
depends on BLK_DEV_DM
---help---
- A target that intermittently fails I/O for debugging purposes.
+ A target that intermittently fails I/O for debugging purposes.
config DM_VERITY
tristate "Verity target support"
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index d26b35195825..fd714628da6a 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -5,5 +5,3 @@ obj-$(CONFIG_BCACHE) += bcache.o
bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
util.o writeback.o
-
-CFLAGS_request.o += -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 6f776823b9ba..a1df0d95151c 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -377,7 +377,10 @@ retry_invalidate:
if (!fifo_full(&ca->free_inc))
goto retry_invalidate;
- bch_prio_write(ca);
+ if (bch_prio_write(ca, false) < 0) {
+ ca->invalidate_needs_gc = 1;
+ wake_up_gc(ca->set);
+ }
}
}
out:
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 013e35a9e317..9198c1b480d9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -582,6 +582,7 @@ struct cache_set {
*/
wait_queue_head_t btree_cache_wait;
struct task_struct *btree_cache_alloc_lock;
+ spinlock_t btree_cannibalize_lock;
/*
* When we free a btree node, we increment the gen of the bucket the
@@ -723,6 +724,7 @@ struct cache_set {
unsigned int gc_always_rewrite:1;
unsigned int shrinker_disabled:1;
unsigned int copy_gc_enabled:1;
+ unsigned int idle_max_writeback_rate_enabled:1;
#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
@@ -977,7 +979,7 @@ bool bch_cached_dev_error(struct cached_dev *dc);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
-void bch_prio_write(struct cache *ca);
+int bch_prio_write(struct cache *ca, bool wait);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 08768796b543..cffcdc9feefb 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -155,6 +155,7 @@ int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
return 0;
}
+/* Pop the top key of keylist by pointing l->top to its previous key */
struct bkey *bch_keylist_pop(struct keylist *l)
{
struct bkey *k = l->keys;
@@ -168,6 +169,7 @@ struct bkey *bch_keylist_pop(struct keylist *l)
return l->top = k;
}
+/* Pop the bottom key of keylist and update l->top_p */
void bch_keylist_pop_front(struct keylist *l)
{
l->top_p -= bkey_u64s(l->keys);
@@ -309,7 +311,6 @@ void bch_btree_keys_free(struct btree_keys *b)
t->tree = NULL;
t->data = NULL;
}
-EXPORT_SYMBOL(bch_btree_keys_free);
int bch_btree_keys_alloc(struct btree_keys *b,
unsigned int page_order,
@@ -342,7 +343,6 @@ err:
bch_btree_keys_free(b);
return -ENOMEM;
}
-EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
@@ -361,7 +361,6 @@ void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
* any more.
*/
}
-EXPORT_SYMBOL(bch_btree_keys_init);
/* Binary tree stuff for auxiliary search trees */
@@ -678,7 +677,6 @@ void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
bch_bset_build_unwritten_tree(b);
}
-EXPORT_SYMBOL(bch_bset_init_next);
/*
* Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
@@ -732,7 +730,6 @@ void bch_bset_build_written_tree(struct btree_keys *b)
j = inorder_next(j, t->size))
make_bfloat(t, j);
}
-EXPORT_SYMBOL(bch_bset_build_written_tree);
/* Insert */
@@ -780,7 +777,6 @@ fix_right: do {
j = j * 2 + 1;
} while (j < t->size);
}
-EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
static void bch_bset_fix_lookup_table(struct btree_keys *b,
struct bset_tree *t,
@@ -855,7 +851,6 @@ bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
return b->ops->key_merge(b, l, r);
}
-EXPORT_SYMBOL(bch_bkey_try_merge);
void bch_bset_insert(struct btree_keys *b, struct bkey *where,
struct bkey *insert)
@@ -875,7 +870,6 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
bkey_copy(where, insert);
bch_bset_fix_lookup_table(b, t, where);
}
-EXPORT_SYMBOL(bch_bset_insert);
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key)
@@ -931,7 +925,6 @@ copy: bkey_copy(m, k);
merged:
return status;
}
-EXPORT_SYMBOL(bch_btree_insert_key);
/* Lookup */
@@ -1077,7 +1070,6 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
return i.l;
}
-EXPORT_SYMBOL(__bch_bset_search);
/* Btree iterator */
@@ -1132,7 +1124,6 @@ struct bkey *bch_btree_iter_init(struct btree_keys *b,
{
return __bch_btree_iter_init(b, iter, search, b->set);
}
-EXPORT_SYMBOL(bch_btree_iter_init);
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
btree_iter_cmp_fn *cmp)
@@ -1165,7 +1156,6 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
return __bch_btree_iter_next(iter, btree_iter_cmp);
}
-EXPORT_SYMBOL(bch_btree_iter_next);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
struct btree_keys *b, ptr_filter_fn fn)
@@ -1196,7 +1186,6 @@ int bch_bset_sort_state_init(struct bset_sort_state *state,
return mempool_init_page_pool(&state->pool, 1, page_order);
}
-EXPORT_SYMBOL(bch_bset_sort_state_init);
static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
@@ -1313,7 +1302,6 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
-EXPORT_SYMBOL(bch_btree_sort_partial);
void bch_btree_sort_and_fix_extents(struct btree_keys *b,
struct btree_iter *iter,
@@ -1366,7 +1354,6 @@ void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
out:
bch_bset_build_written_tree(b);
}
-EXPORT_SYMBOL(bch_btree_sort_lazy);
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ba434d9ac720..14d6c33b0957 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -543,6 +543,11 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
set_btree_node_dirty(b);
+ /*
+ * w->journal is always the oldest journal pin of all bkeys
+ * in the leaf node, to make sure the oldest jset seq won't
+ * be increased before this btree node is flushed.
+ */
if (journal_ref) {
if (w->journal &&
journal_pin_cmp(b->c, w->journal, journal_ref)) {
@@ -723,6 +728,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
* IO can always make forward progress:
*/
nr /= c->btree_pages;
+ if (nr == 0)
+ nr = 1;
nr = min_t(unsigned long, nr, mca_can_free(c));
i = 0;
@@ -884,15 +891,17 @@ out:
static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
{
- struct task_struct *old;
-
- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
- if (old && old != current) {
+ spin_lock(&c->btree_cannibalize_lock);
+ if (likely(c->btree_cache_alloc_lock == NULL)) {
+ c->btree_cache_alloc_lock = current;
+ } else if (c->btree_cache_alloc_lock != current) {
if (op)
prepare_to_wait(&c->btree_cache_wait, &op->wait,
TASK_UNINTERRUPTIBLE);
+ spin_unlock(&c->btree_cannibalize_lock);
return -EINTR;
}
+ spin_unlock(&c->btree_cannibalize_lock);
return 0;
}
@@ -927,10 +936,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
*/
static void bch_cannibalize_unlock(struct cache_set *c)
{
+ spin_lock(&c->btree_cannibalize_lock);
if (c->btree_cache_alloc_lock == current) {
c->btree_cache_alloc_lock = NULL;
wake_up(&c->btree_cache_wait);
}
+ spin_unlock(&c->btree_cannibalize_lock);
}
static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index c12cd809ab19..0164a1fe94a9 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -45,7 +45,6 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
-EXPORT_SYMBOL(closure_sub);
/*
* closure_put - decrement a closure's refcount
@@ -54,7 +53,6 @@ void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
-EXPORT_SYMBOL(closure_put);
/*
* closure_wake_up - wake up all closures on a wait list, without memory barrier
@@ -76,7 +74,6 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
-EXPORT_SYMBOL(__closure_wake_up);
/**
* closure_wait - add a closure to a waitlist
@@ -96,7 +93,6 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
return true;
}
-EXPORT_SYMBOL(closure_wait);
struct closure_syncer {
struct task_struct *task;
@@ -131,7 +127,6 @@ void __sched __closure_sync(struct closure *cl)
__set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -149,7 +144,6 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -162,7 +156,6 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *closure_debug;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 41adcd1546f1..73478a91a342 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -62,18 +62,6 @@ static void bch_data_insert_keys(struct closure *cl)
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
int ret;
- /*
- * If we're looping, might already be waiting on
- * another journal write - can't wait on more than one journal write at
- * a time
- *
- * XXX: this looks wrong
- */
-#if 0
- while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
- closure_sync(&s->cl);
-#endif
-
if (!op->replace)
journal_ref = bch_journal(op->c, &op->insert_keys,
op->flush_journal ? cl : NULL);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 20ed838e9413..77e9869345e7 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -92,10 +92,11 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
sb->version, sb->flags, sb->seq, sb->keys);
- err = "Not a bcache superblock";
+ err = "Not a bcache superblock (bad offset)";
if (sb->offset != SB_SECTOR)
goto err;
+ err = "Not a bcache superblock (bad magic)";
if (memcmp(sb->magic, bcache_magic, 16))
goto err;
@@ -529,12 +530,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
closure_sync(cl);
}
-void bch_prio_write(struct cache *ca)
+int bch_prio_write(struct cache *ca, bool wait)
{
int i;
struct bucket *b;
struct closure cl;
+ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
+ fifo_used(&ca->free[RESERVE_PRIO]),
+ fifo_used(&ca->free[RESERVE_NONE]),
+ fifo_used(&ca->free_inc));
+
+ /*
+ * Pre-check if there are enough free buckets. In the non-blocking
+ * scenario it's better to fail early rather than starting to allocate
+ * buckets and do a cleanup later in case of failure.
+ */
+ if (!wait) {
+ size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
+ fifo_used(&ca->free[RESERVE_NONE]);
+ if (prio_buckets(ca) > avail)
+ return -ENOMEM;
+ }
+
closure_init_stack(&cl);
lockdep_assert_held(&ca->set->bucket_lock);
@@ -544,9 +562,6 @@ void bch_prio_write(struct cache *ca)
atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
&ca->meta_sectors_written);
- //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
- // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
-
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket;
struct prio_set *p = ca->disk_buckets;
@@ -564,7 +579,7 @@ void bch_prio_write(struct cache *ca)
p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
+ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -593,6 +608,7 @@ void bch_prio_write(struct cache *ca)
ca->prio_last_buckets[i] = ca->prio_buckets[i];
}
+ return 0;
}
static void prio_read(struct cache *ca, uint64_t bucket)
@@ -761,20 +777,28 @@ static inline int idx_to_first_minor(int idx)
static void bcache_device_free(struct bcache_device *d)
{
+ struct gendisk *disk = d->disk;
+
lockdep_assert_held(&bch_register_lock);
- pr_info("%s stopped", d->disk->disk_name);
+ if (disk)
+ pr_info("%s stopped", disk->disk_name);
+ else
+ pr_err("bcache device (NULL gendisk) stopped");
if (d->c)
bcache_device_detach(d);
- if (d->disk && d->disk->flags & GENHD_FL_UP)
- del_gendisk(d->disk);
- if (d->disk && d->disk->queue)
- blk_cleanup_queue(d->disk->queue);
- if (d->disk) {
+
+ if (disk) {
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+
ida_simple_remove(&bcache_device_idx,
- first_minor_to_idx(d->disk->first_minor));
- put_disk(d->disk);
+ first_minor_to_idx(disk->first_minor));
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
@@ -1769,6 +1793,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
+ spin_lock_init(&c->btree_cannibalize_lock);
init_waitqueue_head(&c->bucket_wait);
init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
@@ -1809,6 +1834,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->congested_read_threshold_us = 2000;
c->congested_write_threshold_us = 20000;
c->error_limit = DEFAULT_IO_ERROR_LIMIT;
+ c->idle_max_writeback_rate_enabled = 1;
WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
return c;
@@ -1954,7 +1980,7 @@ static int run_cache_set(struct cache_set *c)
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i)
- bch_prio_write(ca);
+ bch_prio_write(ca, true);
mutex_unlock(&c->bucket_lock);
err = "cannot allocate new UUID bucket";
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 627dcea0f5b6..733e2ddf3c78 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -134,6 +134,7 @@ rw_attribute(expensive_debug_checks);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
+rw_attribute(idle_max_writeback_rate);
rw_attribute(gc_after_writeback);
rw_attribute(size);
@@ -747,6 +748,8 @@ SHOW(__bch_cache_set)
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
+ sysfs_printf(idle_max_writeback_rate, "%i",
+ c->idle_max_writeback_rate_enabled);
sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
sysfs_printf(io_disable, "%i",
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
@@ -864,6 +867,9 @@ STORE(__bch_cache_set)
sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
+ sysfs_strtoul_bool(idle_max_writeback_rate,
+ c->idle_max_writeback_rate_enabled);
+
/*
* write gc_after_writeback here may overwrite an already set
* BCH_DO_AUTO_GC, it doesn't matter because this flag will be
@@ -954,6 +960,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled,
&sysfs_copy_gc_enabled,
+ &sysfs_idle_max_writeback_rate,
&sysfs_gc_after_writeback,
&sysfs_io_disable,
&sysfs_cutoff_writeback,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index d60268fe49e1..4a40f9eadeaf 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -122,6 +122,10 @@ static void __update_writeback_rate(struct cached_dev *dc)
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
+ /* Don't sst max writeback rate if it is disabled */
+ if (!c->idle_max_writeback_rate_enabled)
+ return false;
+
/* Don't set max writeback rate if gc is running */
if (!c->gc_mark_valid)
return false;
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index b5389890bbc3..1f8f98efd97a 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -150,11 +150,10 @@ static int bio_detain(struct dm_bio_prison *prison,
struct dm_bio_prison_cell **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -198,11 +197,9 @@ void dm_cell_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *bios)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
__cell_release(prison, cell, bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_release);
@@ -250,12 +247,10 @@ void dm_cell_visit_release(struct dm_bio_prison *prison,
void *context,
struct dm_bio_prison_cell *cell)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
visit_fn(context, cell);
rb_erase(&cell->node, &prison->cells);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_visit_release);
@@ -275,11 +270,10 @@ int dm_cell_promote_or_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __promote_or_release(prison, cell);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -379,10 +373,9 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
- unsigned long flags;
unsigned next_entry;
- spin_lock_irqsave(&ds->lock, flags);
+ spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
!ds->entries[ds->current_entry].count)
r = 0;
@@ -392,7 +385,7 @@ int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
if (!ds->entries[next_entry].count)
ds->current_entry = next_entry;
}
- spin_unlock_irqrestore(&ds->lock, flags);
+ spin_unlock_irq(&ds->lock);
return r;
}
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index b092cdc8e1ae..8ee019eda32d 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -177,11 +177,10 @@ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -261,11 +260,10 @@ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -285,11 +283,9 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
struct work_struct *continuation)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
__quiesce(prison, cell, continuation);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
@@ -309,11 +305,10 @@ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
unsigned new_lock_level)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __promote(prison, cell, new_lock_level);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -342,11 +337,10 @@ bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
struct bio_list *bios)
{
bool r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __unlock(prison, cell, bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8346e6d1816c..2d32821b3a5b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -74,22 +74,19 @@ static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
{
bool r;
- unsigned long flags;
- spin_lock_irqsave(&iot->lock, flags);
+ spin_lock_irq(&iot->lock);
r = __iot_idle_for(iot, jifs);
- spin_unlock_irqrestore(&iot->lock, flags);
+ spin_unlock_irq(&iot->lock);
return r;
}
static void iot_io_begin(struct io_tracker *iot, sector_t len)
{
- unsigned long flags;
-
- spin_lock_irqsave(&iot->lock, flags);
+ spin_lock_irq(&iot->lock);
iot->in_flight += len;
- spin_unlock_irqrestore(&iot->lock, flags);
+ spin_unlock_irq(&iot->lock);
}
static void __iot_io_end(struct io_tracker *iot, sector_t len)
@@ -172,7 +169,6 @@ static void __commit(struct work_struct *_ws)
{
struct batcher *b = container_of(_ws, struct batcher, commit_work);
blk_status_t r;
- unsigned long flags;
struct list_head work_items;
struct work_struct *ws, *tmp;
struct continuation *k;
@@ -186,12 +182,12 @@ static void __commit(struct work_struct *_ws)
* We have to grab these before the commit_op to avoid a race
* condition.
*/
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
list_splice_init(&b->work_items, &work_items);
bio_list_merge(&bios, &b->bios);
bio_list_init(&b->bios);
b->commit_scheduled = false;
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
r = b->commit_op(b->commit_context);
@@ -238,13 +234,12 @@ static void async_commit(struct batcher *b)
static void continue_after_commit(struct batcher *b, struct continuation *k)
{
- unsigned long flags;
bool commit_scheduled;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
commit_scheduled = b->commit_scheduled;
list_add_tail(&k->ws.entry, &b->work_items);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (commit_scheduled)
async_commit(b);
@@ -255,13 +250,12 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
*/
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
- unsigned long flags;
bool commit_scheduled;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
commit_scheduled = b->commit_scheduled;
bio_list_add(&b->bios, bio);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (commit_scheduled)
async_commit(b);
@@ -273,12 +267,11 @@ static void issue_after_commit(struct batcher *b, struct bio *bio)
static void schedule_commit(struct batcher *b)
{
bool immediate;
- unsigned long flags;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
b->commit_scheduled = true;
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (immediate)
async_commit(b);
@@ -630,23 +623,19 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio)
static void defer_bio(struct cache *cache, struct bio *bio)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_add(&cache->deferred_bios, bio);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
}
static void defer_bios(struct cache *cache, struct bio_list *bios)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_merge(&cache->deferred_bios, bios);
bio_list_init(bios);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
}
@@ -756,33 +745,27 @@ static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
static void set_discard(struct cache *cache, dm_dblock_t b)
{
- unsigned long flags;
-
BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
atomic_inc(&cache->stats.discard_count);
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
set_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static void clear_discard(struct cache *cache, dm_dblock_t b)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
clear_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
int r;
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
return r;
}
@@ -790,12 +773,10 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
int r;
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
return r;
}
@@ -827,17 +808,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
- unsigned long flags;
struct per_bio_data *pb;
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb = get_per_bio_data(bio);
pb->tick = true;
cache->need_tick_bio = false;
}
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
@@ -1889,17 +1869,16 @@ static void process_deferred_bios(struct work_struct *ws)
{
struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
- unsigned long flags;
bool commit_needed = false;
struct bio_list bios;
struct bio *bio;
bio_list_init(&bios);
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_merge(&bios, &cache->deferred_bios);
bio_list_init(&cache->deferred_bios);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
while ((bio = bio_list_pop(&bios))) {
if (bio->bi_opf & REQ_PREFLUSH)
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 6bc8c1d1c351..08c552e5e41b 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -712,7 +712,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd)
static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
{
int r;
- unsigned long word, flags;
+ unsigned long word;
word = 0;
do {
@@ -736,9 +736,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
return r;
/* Update the changed flag */
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
dmap->changed = 0;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
return 0;
}
@@ -746,7 +746,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
{
int r = -EPERM;
- unsigned long flags;
struct dirty_map *dmap, *next_dmap;
down_write(&cmd->lock);
@@ -770,9 +769,9 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
}
/* Swap dirty bitmaps */
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->current_dmap = next_dmap;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
/*
* No one is accessing the old dirty bitmap anymore, so we can flush
@@ -817,9 +816,9 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
{
int r = 0;
struct dirty_map *dmap;
- unsigned long word, region_nr, flags;
+ unsigned long word, region_nr;
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
if (cmd->read_only) {
r = -EPERM;
@@ -836,7 +835,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
}
}
out:
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
return r;
}
@@ -903,13 +902,11 @@ out:
void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
{
- unsigned long flags;
-
down_write(&cmd->lock);
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 1;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io)
dm_bm_set_read_only(cmd->bm);
@@ -919,13 +916,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
{
- unsigned long flags;
-
down_write(&cmd->lock);
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 0;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io)
dm_bm_set_read_write(cmd->bm);
diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
index 434bff08508b..3fe50a781c11 100644
--- a/drivers/md/dm-clone-metadata.h
+++ b/drivers/md/dm-clone-metadata.h
@@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
* @start: Starting region number
* @nr_regions: Number of regions in the range
*
- * This function doesn't block, so it's safe to call it from interrupt context.
+ * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq()
+ * it's NOT safe to call it from any context where interrupts are disabled, e.g.,
+ * from interrupt context.
*/
int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
unsigned long nr_regions);
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 4ca8f1977222..b3d89072d21c 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -332,8 +332,6 @@ static void submit_bios(struct bio_list *bios)
*/
static void issue_bio(struct clone *clone, struct bio *bio)
{
- unsigned long flags;
-
if (!bio_triggers_commit(clone, bio)) {
generic_make_request(bio);
return;
@@ -352,9 +350,9 @@ static void issue_bio(struct clone *clone, struct bio *bio)
* Batch together any bios that trigger commits and then issue a single
* commit for them in process_deferred_flush_bios().
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_flush_bios, bio);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
wake_worker(clone);
}
@@ -469,7 +467,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
static void process_discard_bio(struct clone *clone, struct bio *bio)
{
- unsigned long rs, re, flags;
+ unsigned long rs, re;
bio_region_range(clone, bio, &rs, &re);
BUG_ON(re > clone->nr_regions);
@@ -501,9 +499,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
/*
* Defer discard processing.
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_discard_bios, bio);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
wake_worker(clone);
}
@@ -554,6 +552,12 @@ struct hash_table_bucket {
#define bucket_unlock_irqrestore(bucket, flags) \
spin_unlock_irqrestore(&(bucket)->lock, flags)
+#define bucket_lock_irq(bucket) \
+ spin_lock_irq(&(bucket)->lock)
+
+#define bucket_unlock_irq(bucket) \
+ spin_unlock_irq(&(bucket)->lock)
+
static int hash_table_init(struct clone *clone)
{
unsigned int i, sz;
@@ -851,7 +855,6 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio
*/
static void hydrate_bio_region(struct clone *clone, struct bio *bio)
{
- unsigned long flags;
unsigned long region_nr;
struct hash_table_bucket *bucket;
struct dm_clone_region_hydration *hd, *hd2;
@@ -859,19 +862,19 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
region_nr = bio_to_region(clone, bio);
bucket = get_hash_table_bucket(clone, region_nr);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
hd = __hash_find(bucket, region_nr);
if (hd) {
/* Someone else is hydrating the region */
bio_list_add(&hd->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
return;
}
if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
/* The region has been hydrated */
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
issue_bio(clone, bio);
return;
}
@@ -880,16 +883,16 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
* We must allocate a hydration descriptor and start the hydration of
* the corresponding region.
*/
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hd = alloc_hydration(clone);
hydration_init(hd, region_nr);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
/* Check if the region has been hydrated in the meantime. */
if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
issue_bio(clone, bio);
return;
@@ -899,7 +902,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
if (hd2 != hd) {
/* Someone else started the region's hydration. */
bio_list_add(&hd2->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
return;
}
@@ -911,7 +914,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
*/
if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
hlist_del(&hd->h);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
bio_io_error(bio);
return;
@@ -925,11 +928,11 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
* to the destination device.
*/
if (is_overwrite_bio(clone, bio)) {
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hydration_overwrite(hd, bio);
} else {
bio_list_add(&hd->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hydration_copy(hd, 1);
}
}
@@ -996,7 +999,6 @@ static unsigned long __start_next_hydration(struct clone *clone,
unsigned long offset,
struct batch_info *batch)
{
- unsigned long flags;
struct hash_table_bucket *bucket;
struct dm_clone_region_hydration *hd;
unsigned long nr_regions = clone->nr_regions;
@@ -1010,13 +1012,13 @@ static unsigned long __start_next_hydration(struct clone *clone,
break;
bucket = get_hash_table_bucket(clone, offset);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
!__hash_find(bucket, offset)) {
hydration_init(hd, offset);
__insert_region_hydration(bucket, hd);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
/* Batch hydration */
__batch_hydration(batch, hd);
@@ -1024,7 +1026,7 @@ static unsigned long __start_next_hydration(struct clone *clone,
return (offset + 1);
}
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
} while (++offset < nr_regions);
@@ -1140,13 +1142,13 @@ static void process_deferred_discards(struct clone *clone)
int r = -EPERM;
struct bio *bio;
struct blk_plug plug;
- unsigned long rs, re, flags;
+ unsigned long rs, re;
struct bio_list discards = BIO_EMPTY_LIST;
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&discards, &clone->deferred_discard_bios);
bio_list_init(&clone->deferred_discard_bios);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&discards))
return;
@@ -1176,13 +1178,12 @@ out:
static void process_deferred_bios(struct clone *clone)
{
- unsigned long flags;
struct bio_list bios = BIO_EMPTY_LIST;
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_bios);
bio_list_init(&clone->deferred_bios);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios))
return;
@@ -1193,7 +1194,6 @@ static void process_deferred_bios(struct clone *clone)
static void process_deferred_flush_bios(struct clone *clone)
{
struct bio *bio;
- unsigned long flags;
struct bio_list bios = BIO_EMPTY_LIST;
struct bio_list bio_completions = BIO_EMPTY_LIST;
@@ -1201,13 +1201,13 @@ static void process_deferred_flush_bios(struct clone *clone)
* If there are any deferred flush bios, we must commit the metadata
* before issuing them or signaling their completion.
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_flush_bios);
bio_list_init(&clone->deferred_flush_bios);
bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
bio_list_init(&clone->deferred_flush_completions);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f87f6495652f..eb9782fc93fe 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2700,21 +2700,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
- 1, devname);
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1, devname);
else
cc->crypt_queue = alloc_workqueue("kcryptd/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 8288887b7f94..eb37584427a4 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -17,6 +17,7 @@
struct badblock {
struct rb_node node;
sector_t bb;
+ unsigned char wr_fail_cnt;
};
struct dust_device {
@@ -101,7 +102,8 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
return 0;
}
-static int dust_add_block(struct dust_device *dd, unsigned long long block)
+static int dust_add_block(struct dust_device *dd, unsigned long long block,
+ unsigned char wr_fail_cnt)
{
struct badblock *bblock;
unsigned long flags;
@@ -115,6 +117,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
spin_lock_irqsave(&dd->dust_lock, flags);
bblock->bb = block;
+ bblock->wr_fail_cnt = wr_fail_cnt;
if (!dust_rb_insert(&dd->badblocklist, bblock)) {
if (!dd->quiet_mode) {
DMERR("%s: block %llu already in badblocklist",
@@ -126,8 +129,10 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
}
dd->badblock_count++;
- if (!dd->quiet_mode)
- DMINFO("%s: badblock added at block %llu", __func__, block);
+ if (!dd->quiet_mode) {
+ DMINFO("%s: badblock added at block %llu with write fail count %hhu",
+ __func__, block, wr_fail_cnt);
+ }
spin_unlock_irqrestore(&dd->dust_lock, flags);
return 0;
@@ -163,22 +168,27 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
- int ret = DM_MAPIO_REMAPPED;
+ int r = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- ret = __dust_map_read(dd, thisblock);
+ r = __dust_map_read(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return ret;
+ return r;
}
-static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
+static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
{
struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+ if (bblk && bblk->wr_fail_cnt > 0) {
+ bblk->wr_fail_cnt--;
+ return DM_MAPIO_KILL;
+ }
+
if (bblk) {
rb_erase(&bblk->node, &dd->badblocklist);
dd->badblock_count--;
@@ -189,37 +199,40 @@ static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
(unsigned long long)thisblock);
}
}
+
+ return DM_MAPIO_REMAPPED;
}
static int dust_map_write(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
+ int ret = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- __dust_map_write(dd, thisblock);
+ ret = __dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return DM_MAPIO_REMAPPED;
+ return ret;
}
static int dust_map(struct dm_target *ti, struct bio *bio)
{
struct dust_device *dd = ti->private;
- int ret;
+ int r;
bio_set_dev(bio, dd->dev->bdev);
bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
if (bio_data_dir(bio) == READ)
- ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
else
- ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
- return ret;
+ return r;
}
static bool __dust_clear_badblocks(struct rb_root *tree,
@@ -375,8 +388,10 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
struct dust_device *dd = ti->private;
sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
bool invalid_msg = false;
- int result = -EINVAL;
+ int r = -EINVAL;
unsigned long long tmp, block;
+ unsigned char wr_fail_cnt;
+ unsigned int tmp_ui;
unsigned long flags;
char dummy;
@@ -388,45 +403,69 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
} else if (!strcasecmp(argv[0], "disable")) {
DMINFO("disabling read failures on bad sectors");
dd->fail_read_on_bb = false;
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "enable")) {
DMINFO("enabling read failures on bad sectors");
dd->fail_read_on_bb = true;
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "countbadblocks")) {
spin_lock_irqsave(&dd->dust_lock, flags);
DMINFO("countbadblocks: %llu badblock(s) found",
dd->badblock_count);
spin_unlock_irqrestore(&dd->dust_lock, flags);
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "clearbadblocks")) {
- result = dust_clear_badblocks(dd);
+ r = dust_clear_badblocks(dd);
} else if (!strcasecmp(argv[0], "quiet")) {
if (!dd->quiet_mode)
dd->quiet_mode = true;
else
dd->quiet_mode = false;
- result = 0;
+ r = 0;
} else {
invalid_msg = true;
}
} else if (argc == 2) {
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
- return result;
+ return r;
block = tmp;
sector_div(size, dd->sect_per_block);
if (block > size) {
DMERR("selected block value out of range");
- return result;
+ return r;
}
if (!strcasecmp(argv[0], "addbadblock"))
- result = dust_add_block(dd, block);
+ r = dust_add_block(dd, block, 0);
else if (!strcasecmp(argv[0], "removebadblock"))
- result = dust_remove_block(dd, block);
+ r = dust_remove_block(dd, block);
else if (!strcasecmp(argv[0], "queryblock"))
- result = dust_query_block(dd, block);
+ r = dust_query_block(dd, block);
+ else
+ invalid_msg = true;
+
+ } else if (argc == 3) {
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
+ return r;
+
+ if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
+ return r;
+
+ block = tmp;
+ if (tmp_ui > 255) {
+ DMERR("selected write fail count out of range");
+ return r;
+ }
+ wr_fail_cnt = tmp_ui;
+ sector_div(size, dd->sect_per_block);
+ if (block > size) {
+ DMERR("selected block value out of range");
+ return r;
+ }
+
+ if (!strcasecmp(argv[0], "addbadblock"))
+ r = dust_add_block(dd, block, wr_fail_cnt);
else
invalid_msg = true;
@@ -436,7 +475,7 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
if (invalid_msg)
DMERR("unrecognized message '%s' received", argv[0]);
- return result;
+ return r;
}
static void dust_status(struct dm_target *ti, status_type_t type,
@@ -499,12 +538,12 @@ static struct target_type dust_target = {
static int __init dm_dust_init(void)
{
- int result = dm_register_target(&dust_target);
+ int r = dm_register_target(&dust_target);
- if (result < 0)
- DMERR("dm_register_target failed %d", result);
+ if (r < 0)
+ DMERR("dm_register_target failed %d", r);
- return result;
+ return r;
}
static void __exit dm_dust_exit(void)
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 2900fbde89b3..a2cc9e45cbba 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -280,7 +280,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
struct flakey_c *fc = ti->private;
bio_set_dev(bio, fc->dev->bdev);
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -322,8 +322,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false;
- /* Do not fail reset zone */
- if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (op_is_zone_mgmt(bio_op(bio)))
goto map_bio;
/* Are we alive ? */
@@ -384,7 +383,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (op_is_zone_mgmt(bio_op(bio)))
return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
@@ -460,21 +459,15 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
}
#ifdef CONFIG_BLK_DEV_ZONED
-static int flakey_report_zones(struct dm_target *ti, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+static int flakey_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
{
struct flakey_c *fc = ti->private;
- int ret;
+ sector_t sector = flakey_map_sector(ti, args->next_sector);
- /* Do report and remap it */
- ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
- zones, nr_zones);
- if (ret != 0)
- return ret;
-
- if (*nr_zones)
- dm_remap_zone_report(ti, fc->start, zones, nr_zones);
- return 0;
+ args->start = fc->start;
+ return blkdev_report_zones(fc->dev->bdev, sector, nr_zones,
+ dm_report_zones_cb, args);
}
#endif
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index dab4446fe7d8..b225b3e445fa 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -53,6 +53,7 @@
#define SB_VERSION_1 1
#define SB_VERSION_2 2
#define SB_VERSION_3 3
+#define SB_VERSION_4 4
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -73,6 +74,7 @@ struct superblock {
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
#define SB_FLAG_RECALCULATING 0x2
#define SB_FLAG_DIRTY_BITMAP 0x4
+#define SB_FLAG_FIXED_PADDING 0x8
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -250,6 +252,7 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
+ bool fix_padding;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -463,7 +466,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
+ ic->sb->version = SB_VERSION_4;
+ else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
ic->sb->version = SB_VERSION_3;
else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
ic->sb->version = SB_VERSION_2;
@@ -2955,6 +2960,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
+ arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -2974,6 +2980,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
}
+ if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
+ DMEMIT(" fix_padding");
#define EMIT_ALG(a, n) \
do { \
@@ -3042,8 +3050,14 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
if (!ic->meta_dev) {
sector_t last_sector, last_area, last_offset;
- ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
- (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
+ /* we have to maintain excessive padding for compatibility with existing volumes */
+ __u64 metadata_run_padding =
+ ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
+ (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
+ (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
+
+ ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
+ metadata_run_padding) >> SECTOR_SHIFT;
if (!(ic->metadata_run & (ic->metadata_run - 1)))
ic->log2_metadata_run = __ffs(ic->metadata_run);
else
@@ -3086,6 +3100,8 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
journal_sections = 1;
if (!ic->meta_dev) {
+ if (ic->fix_padding)
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
ic->sb->journal_sections = cpu_to_le32(journal_sections);
if (!interleave_sectors)
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
@@ -3725,6 +3741,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true;
+ } else if (!strcmp(opt_string, "fix_padding")) {
+ ic->fix_padding = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -3867,7 +3885,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -4182,7 +4200,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index ecefe6703736..8d07fdf63a47 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -90,7 +90,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
struct linear_c *lc = ti->private;
bio_set_dev(bio, lc->dev->bdev);
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -136,21 +136,15 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
}
#ifdef CONFIG_BLK_DEV_ZONED
-static int linear_report_zones(struct dm_target *ti, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+static int linear_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
{
- struct linear_c *lc = (struct linear_c *) ti->private;
- int ret;
-
- /* Do report and remap it */
- ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
- zones, nr_zones);
- if (ret != 0)
- return ret;
+ struct linear_c *lc = ti->private;
+ sector_t sector = linear_map_sector(ti, args->next_sector);
- if (*nr_zones)
- dm_remap_zone_report(ti, lc->start, zones, nr_zones);
- return 0;
+ args->start = lc->start;
+ return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
+ dm_report_zones_cb, args);
}
#endif
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0aa595e4375..c412eaa975fc 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -209,6 +209,7 @@ struct raid_dev {
#define RT_FLAG_RS_SUSPENDED 5
#define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7
+#define RT_FLAG_RS_GROW 8
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -241,6 +242,9 @@ struct raid_set {
struct raid_type *raid_type;
struct dm_target_callbacks callbacks;
+ sector_t array_sectors;
+ sector_t dev_sectors;
+
/* Optional raid4/5/6 journal device */
struct journal_dev {
struct dm_dev *dev;
@@ -616,7 +620,6 @@ static int raid10_format_to_md_layout(struct raid_set *rs,
} else if (algorithm == ALGORITHM_RAID10_FAR) {
f = copies;
- r = !RAID10_OFFSET;
if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
r |= RAID10_USE_FAR_SETS;
@@ -1615,13 +1618,12 @@ static int _check_data_dev_sectors(struct raid_set *rs)
}
/* Calculate the sectors per device and per array used for @rs */
-static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
+static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev)
{
int delta_disks;
unsigned int data_stripes;
+ sector_t array_sectors = sectors, dev_sectors = sectors;
struct mddev *mddev = &rs->md;
- struct md_rdev *rdev;
- sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
if (use_mddev) {
delta_disks = mddev->delta_disks;
@@ -1656,12 +1658,9 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
/* Striped layouts */
array_sectors = (data_stripes + delta_disks) * dev_sectors;
- rdev_for_each(rdev, mddev)
- if (!test_bit(Journal, &rdev->flags))
- rdev->sectors = dev_sectors;
-
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
+ rs_set_rdev_sectors(rs);
return _check_data_dev_sectors(rs);
bad:
@@ -1670,7 +1669,7 @@ bad:
}
/* Setup recovery on @rs */
-static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
{
/* raid0 does not recover */
if (rs_is_raid0(rs))
@@ -1691,22 +1690,6 @@ static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
? MaxSector : dev_sectors;
}
-/* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
-static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
-{
- if (!dev_sectors)
- /* New raid set or 'sync' flag provided */
- __rs_setup_recovery(rs, 0);
- else if (dev_sectors == MaxSector)
- /* Prevent recovery */
- __rs_setup_recovery(rs, MaxSector);
- else if (__rdev_sectors(rs) < dev_sectors)
- /* Grown raid set */
- __rs_setup_recovery(rs, __rdev_sectors(rs));
- else
- __rs_setup_recovery(rs, MaxSector);
-}
-
static void do_table_event(struct work_struct *ws)
{
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
@@ -2474,7 +2457,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
- /* Enable bitmap creation for RAID levels != 0 */
+ /* Enable bitmap creation on @rs unless no metadevs or raid0 or journaled raid4/5/6 set. */
mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
@@ -2911,7 +2894,7 @@ static int rs_setup_reshape(struct raid_set *rs)
/* Remove disk(s) */
} else if (rs->delta_disks < 0) {
- r = rs_set_dev_and_array_sectors(rs, true);
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true);
mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
/* Change layout and/or chunk size */
@@ -3008,7 +2991,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bool resize = false;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
+ sector_t sb_array_sectors, rdev_sectors, reshape_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3067,11 +3050,13 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*
* Any existing superblock will overwrite the array and device sizes
*/
- r = rs_set_dev_and_array_sectors(rs, false);
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
if (r)
goto bad;
- calculated_dev_sectors = rs->md.dev_sectors;
+ /* Memorize just calculated, potentially larger sizes to grow the raid set in preresume */
+ rs->array_sectors = rs->md.array_sectors;
+ rs->dev_sectors = rs->md.dev_sectors;
/*
* Backup any new raid set level, layout, ...
@@ -3084,6 +3069,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
+ /* All in-core metadata now as of current superblocks after calling analyse_superblocks() */
+ sb_array_sectors = rs->md.array_sectors;
rdev_sectors = __rdev_sectors(rs);
if (!rdev_sectors) {
ti->error = "Invalid rdev size";
@@ -3093,8 +3080,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
reshape_sectors = _get_reshape_sectors(rs);
- if (calculated_dev_sectors != rdev_sectors)
- resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
+ if (rs->dev_sectors != rdev_sectors) {
+ resize = (rs->dev_sectors != rdev_sectors - reshape_sectors);
+ if (rs->dev_sectors > rdev_sectors - reshape_sectors)
+ set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
+ }
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -3121,13 +3111,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
rs_set_new(rs);
} else if (rs_is_recovering(rs)) {
- /* Rebuild particular devices */
- if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
- set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- rs_setup_recovery(rs, MaxSector);
- }
/* A recovering raid set may be resized */
- ; /* skip setup rs */
+ goto size_check;
} else if (rs_is_reshaping(rs)) {
/* Have to reject size change request during reshape */
if (resize) {
@@ -3171,6 +3156,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs_setup_recovery(rs, MaxSector);
rs_set_new(rs);
} else if (rs_reshape_requested(rs)) {
+ /* Only request grow on raid set size extensions, not on reshapes. */
+ clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
+
/*
* No need to check for 'ongoing' takeover here, because takeover
* is an instant operation as oposed to an ongoing reshape.
@@ -3201,13 +3189,31 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
rs_set_cur(rs);
} else {
+size_check:
/* May not set recovery when a device rebuild is requested */
if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
- rs_setup_recovery(rs, MaxSector);
+ clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- } else
- rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
- 0 : (resize ? calculated_dev_sectors : MaxSector));
+ rs_setup_recovery(rs, MaxSector);
+ } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
+ /*
+ * Set raid set to current size, i.e. size as of
+ * superblocks to grow to larger size in preresume.
+ */
+ r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false);
+ if (r)
+ goto bad;
+
+ rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
+ } else {
+ /* This is no size change or it is shrinking, update size and record in superblocks */
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
+ if (r)
+ goto bad;
+
+ if (sb_array_sectors > rs->array_sectors)
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ }
rs_set_cur(rs);
}
@@ -3406,10 +3412,9 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
/* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
- sector_t resync_max_sectors)
+ enum sync_state state, sector_t resync_max_sectors)
{
sector_t r;
- enum sync_state state;
struct mddev *mddev = &rs->md;
clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3420,8 +3425,6 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- state = decipher_sync_action(mddev, recovery);
-
if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
r = mddev->recovery_cp;
else
@@ -3439,18 +3442,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
/*
* In case we are recovering, the array is not in sync
* and health chars should show the recovering legs.
+ *
+ * Already retrieved recovery offset from curr_resync_completed above.
*/
;
- else if (state == st_resync)
- /*
- * If "resync" is occurring, the raid set
- * is or may be out of sync hence the health
- * characters shall be 'a'.
- */
- set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
- else if (state == st_reshape)
+
+ else if (state == st_resync || state == st_reshape)
/*
- * If "reshape" is occurring, the raid set
+ * If "resync/reshape" is occurring, the raid set
* is or may be out of sync hence the health
* characters shall be 'a'.
*/
@@ -3464,22 +3463,22 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
*/
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
- else {
- struct md_rdev *rdev;
-
+ else if (test_bit(MD_RECOVERY_NEEDED, &recovery))
/*
* We are idle and recovery is needed, prevent 'A' chars race
* caused by components still set to in-sync by constructor.
*/
- if (test_bit(MD_RECOVERY_NEEDED, &recovery))
- set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+ else {
/*
- * The raid set may be doing an initial sync, or it may
- * be rebuilding individual components. If all the
- * devices are In_sync, then it is the raid set that is
- * being initialized.
+ * We are idle and the raid set may be doing an initial
+ * sync, or it may be rebuilding individual components.
+ * If all the devices are In_sync, then it is the raid set
+ * that is being initialized.
*/
+ struct md_rdev *rdev;
+
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
@@ -3512,7 +3511,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
unsigned int rebuild_disks;
unsigned int write_mostly_params = 0;
sector_t progress, resync_max_sectors, resync_mismatches;
- const char *sync_action;
+ enum sync_state state;
struct raid_type *rt;
switch (type) {
@@ -3526,14 +3525,14 @@ static void raid_status(struct dm_target *ti, status_type_t type,
/* Access most recent mddev properties for status output */
smp_rmb();
- recovery = rs->md.recovery;
/* Get sensible max sectors even if raid set not yet started */
resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
mddev->resync_max_sectors : mddev->dev_sectors;
- progress = rs_get_progress(rs, recovery, resync_max_sectors);
+ recovery = rs->md.recovery;
+ state = decipher_sync_action(mddev, recovery);
+ progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
@@ -3561,7 +3560,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* See Documentation/admin-guide/device-mapper/dm-raid.rst for
* information on each of these states.
*/
- DMEMIT(" %s", sync_action);
+ DMEMIT(" %s", sync_str(state));
/*
* v1.5.0+:
@@ -3955,11 +3954,22 @@ static int raid_preresume(struct dm_target *ti)
if (r)
return r;
- /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
- mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
- r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
- to_bytes(rs->requested_bitmap_chunk_sectors), 0);
+ /* We are extending the raid set size, adjust mddev/md_rdev sizes and set capacity. */
+ if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
+ mddev->array_sectors = rs->array_sectors;
+ mddev->dev_sectors = rs->dev_sectors;
+ rs_set_rdev_sectors(rs);
+ rs_set_capacity(rs);
+ }
+
+ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
+ (rs->requested_bitmap_chunk_sectors &&
+ mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
+ int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
+
+ r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
if (r)
DMERR("Failed to resize bitmap");
}
@@ -3968,8 +3978,10 @@ static int raid_preresume(struct dm_target *ti)
/* Be prepared for mddev_resume() in raid_resume() */
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
+ if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
+ mddev->resync_max_sectors = mddev->dev_sectors;
}
/* Check for any reshape request unless new raid set */
@@ -4017,7 +4029,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 8547d7594338..63bbcc20f49a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -55,19 +55,6 @@ static void trigger_event(struct work_struct *work)
dm_table_event(sc->ti->table);
}
-static inline struct stripe_c *alloc_context(unsigned int stripes)
-{
- size_t len;
-
- if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
- stripes))
- return NULL;
-
- len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
-
- return kmalloc(len, GFP_KERNEL);
-}
-
/*
* Parse a single <dev> <sector> pair
*/
@@ -142,7 +129,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- sc = alloc_context(stripes);
+ sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
if (!sc) {
ti->error = "Memory allocation for striped context "
"failed";
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 52e049554f5c..2ae0c1913766 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -918,21 +918,15 @@ bool dm_table_supports_dax(struct dm_table *t,
static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-struct verify_rq_based_data {
- unsigned sq_count;
- unsigned mq_count;
-};
-
-static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- struct verify_rq_based_data *v = data;
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
- if (queue_is_mq(q))
- v->mq_count++;
- else
- v->sq_count++;
+ /* request-based cannot stack on partitions! */
+ if (bdev != bdev->bd_contains)
+ return false;
return queue_is_mq(q);
}
@@ -941,7 +935,6 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -1045,14 +1038,10 @@ verify_rq_based:
/* Non-request-stackable devices can't be used for request-based dm */
if (!tgt->type->iterate_devices ||
- !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
+ !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
- if (v.sq_count > 0) {
- DMERR("table load rejected: not all devices are blk-mq request-stackable");
- return -EINVAL;
- }
return 0;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fcd887703f95..5a2c494cb552 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -609,13 +609,12 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
blk_status_t error)
{
struct bio_list bios;
- unsigned long flags;
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
__merge_bio_list(&bios, master);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
error_bio_list(&bios, error);
}
@@ -623,15 +622,14 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
static void requeue_deferred_cells(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct list_head cells;
struct dm_bio_prison_cell *cell, *tmp;
INIT_LIST_HEAD(&cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice_init(&tc->deferred_cells, &cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
list_for_each_entry_safe(cell, tmp, &cells, user_list)
cell_requeue(pool, cell);
@@ -640,14 +638,13 @@ static void requeue_deferred_cells(struct thin_c *tc)
static void requeue_io(struct thin_c *tc)
{
struct bio_list bios;
- unsigned long flags;
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
__merge_bio_list(&bios, &tc->deferred_bio_list);
__merge_bio_list(&bios, &tc->retry_on_resume_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
requeue_deferred_cells(tc);
@@ -756,7 +753,6 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- unsigned long flags;
if (!bio_triggers_commit(tc, bio)) {
generic_make_request(bio);
@@ -777,9 +773,9 @@ static void issue(struct thin_c *tc, struct bio *bio)
* Batch together any bios that trigger commits and then issue a
* single commit for them in process_deferred_bios().
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_add(&pool->deferred_flush_bios, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
@@ -886,12 +882,15 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
{
struct pool *pool = tc->pool;
unsigned long flags;
+ int has_work;
spin_lock_irqsave(&tc->lock, flags);
cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
+ has_work = !bio_list_empty(&tc->deferred_bio_list);
spin_unlock_irqrestore(&tc->lock, flags);
- wake_worker(pool);
+ if (has_work)
+ wake_worker(pool);
}
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
@@ -960,7 +959,6 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- unsigned long flags;
/*
* If the bio has the REQ_FUA flag set we must commit the metadata
@@ -985,9 +983,9 @@ static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
* Batch together any bios that trigger commits and then issue a
* single commit for them in process_deferred_bios().
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_add(&pool->deferred_flush_completions, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
@@ -1226,14 +1224,13 @@ static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
static void process_prepared(struct pool *pool, struct list_head *head,
process_mapping_fn *fn)
{
- unsigned long flags;
struct list_head maps;
struct dm_thin_new_mapping *m, *tmp;
INIT_LIST_HEAD(&maps);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
list_splice_init(head, &maps);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
list_for_each_entry_safe(m, tmp, &maps, list)
(*fn)(m);
@@ -1510,14 +1507,12 @@ static int commit(struct pool *pool)
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
- unsigned long flags;
-
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
DMWARN("%s: reached low water mark for data device: sending event.",
dm_device_name(pool->pool_md));
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->low_water_triggered = true;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
dm_table_event(pool->ti->table);
}
}
@@ -1593,11 +1588,10 @@ static void retry_on_resume(struct bio *bio)
{
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
- unsigned long flags;
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->retry_on_resume_list, bio);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
}
static blk_status_t should_error_unserviceable_bio(struct pool *pool)
@@ -2170,7 +2164,6 @@ static void __sort_thin_deferred_bios(struct thin_c *tc)
static void process_thin_deferred_bios(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
@@ -2184,10 +2177,10 @@ static void process_thin_deferred_bios(struct thin_c *tc)
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
if (bio_list_empty(&tc->deferred_bio_list)) {
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
return;
}
@@ -2196,7 +2189,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
bio_list_merge(&bios, &tc->deferred_bio_list);
bio_list_init(&tc->deferred_bio_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
@@ -2206,10 +2199,10 @@ static void process_thin_deferred_bios(struct thin_c *tc)
* prepared mappings to process.
*/
if (ensure_next_mapping(pool)) {
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->deferred_bio_list, bio);
bio_list_merge(&tc->deferred_bio_list, &bios);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
break;
}
@@ -2264,16 +2257,15 @@ static unsigned sort_cells(struct pool *pool, struct list_head *cells)
static void process_thin_deferred_cells(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct list_head cells;
struct dm_bio_prison_cell *cell;
unsigned i, j, count;
INIT_LIST_HEAD(&cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice_init(&tc->deferred_cells, &cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
if (list_empty(&cells))
return;
@@ -2294,9 +2286,9 @@ static void process_thin_deferred_cells(struct thin_c *tc)
for (j = i; j < count; j++)
list_add(&pool->cell_sort_array[j]->user_list, &cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice(&cells, &tc->deferred_cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
return;
}
@@ -2349,7 +2341,6 @@ static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
static void process_deferred_bios(struct pool *pool)
{
- unsigned long flags;
struct bio *bio;
struct bio_list bios, bio_completions;
struct thin_c *tc;
@@ -2368,13 +2359,13 @@ static void process_deferred_bios(struct pool *pool)
bio_list_init(&bios);
bio_list_init(&bio_completions);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_merge(&bios, &pool->deferred_flush_bios);
bio_list_init(&pool->deferred_flush_bios);
bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
bio_list_init(&pool->deferred_flush_completions);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
@@ -2657,12 +2648,11 @@ static void metadata_operation_failed(struct pool *pool, const char *op, int r)
*/
static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
{
- unsigned long flags;
struct pool *pool = tc->pool;
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->deferred_bio_list, bio);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
wake_worker(pool);
}
@@ -2678,13 +2668,12 @@ static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
- unsigned long flags;
struct pool *pool = tc->pool;
throttle_lock(&pool->throttle);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_add_tail(&cell->user_list, &tc->deferred_cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
throttle_unlock(&pool->throttle);
wake_worker(pool);
@@ -2810,15 +2799,14 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
static void requeue_bios(struct pool *pool)
{
- unsigned long flags;
struct thin_c *tc;
rcu_read_lock();
list_for_each_entry_rcu(tc, &pool->active_thins, list) {
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
bio_list_init(&tc->retry_on_resume_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
}
rcu_read_unlock();
}
@@ -3412,15 +3400,14 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
/*
* As this is a singleton target, ti->begin is always zero.
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_set_dev(bio, pt->data_dev->bdev);
r = DM_MAPIO_REMAPPED;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
return r;
}
@@ -3591,7 +3578,6 @@ static void pool_resume(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
/*
* Must requeue active_thins' bios and then resume
@@ -3600,10 +3586,10 @@ static void pool_resume(struct dm_target *ti)
requeue_bios(pool);
pool_resume_active_thins(pool);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->low_water_triggered = false;
pool->suspended = false;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
do_waker(&pool->waker.work);
}
@@ -3612,11 +3598,10 @@ static void pool_presuspend(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->suspended = true;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
pool_suspend_active_thins(pool);
}
@@ -3625,13 +3610,12 @@ static void pool_presuspend_undo(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
pool_resume_active_thins(pool);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->suspended = false;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void pool_postsuspend(struct dm_target *ti)
@@ -4110,11 +4094,10 @@ static void thin_put(struct thin_c *tc)
static void thin_dtr(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&tc->pool->lock, flags);
+ spin_lock_irq(&tc->pool->lock);
list_del_rcu(&tc->list);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
synchronize_rcu();
thin_put(tc);
@@ -4150,7 +4133,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct thin_c *tc;
struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md;
- unsigned long flags;
mutex_lock(&dm_thin_pool_table.mutex);
@@ -4244,9 +4226,9 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
mutex_unlock(&dm_thin_pool_table.mutex);
- spin_lock_irqsave(&tc->pool->lock, flags);
+ spin_lock_irq(&tc->pool->lock);
if (tc->pool->suspended) {
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
ti->error = "Unable to activate thin device while pool is suspended";
r = -EINVAL;
@@ -4255,7 +4237,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
refcount_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
/*
* This synchronize_rcu() call is needed here otherwise we risk a
* wake_worker() call finding no bios to process (because the newly
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index d06b8aa41e26..7d727a72aa13 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1218,7 +1218,8 @@ bio_copy:
}
} while (bio->bi_iter.bi_size);
- if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks))
+ if (unlikely(bio->bi_opf & REQ_FUA ||
+ wc->uncommitted_blocks >= wc->autocommit_blocks))
writecache_flush(wc);
else
writecache_schedule_autocommit(wc);
@@ -1561,7 +1562,7 @@ static void writecache_writeback(struct work_struct *work)
{
struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
struct blk_plug plug;
- struct wc_entry *f, *g, *e = NULL;
+ struct wc_entry *f, *uninitialized_var(g), *e = NULL;
struct rb_node *node, *next_node;
struct list_head skipped;
struct writeback_list wbl;
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 595a73110e17..22b3cb0050a7 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -554,6 +554,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
dmz_release_mblock(zmd, mblk);
+ dmz_check_bdev(zmd->dev);
return ERR_PTR(-EIO);
}
@@ -625,6 +626,8 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
ret = submit_bio_wait(bio);
bio_put(bio);
+ if (ret)
+ dmz_check_bdev(zmd->dev);
return ret;
}
@@ -691,6 +694,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
+ dmz_check_bdev(zmd->dev);
ret = -EIO;
}
nr_mblks_submitted--;
@@ -768,7 +772,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
- goto out;
+ goto err;
}
/*
@@ -778,7 +782,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
ret = dmz_log_dirty_mblocks(zmd, &write_list);
if (ret)
- goto out;
+ goto err;
/*
* The log is on disk. It is now safe to update in place
@@ -786,11 +790,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
if (ret)
- goto out;
+ goto err;
ret = dmz_write_sb(zmd, zmd->mblk_primary);
if (ret)
- goto out;
+ goto err;
while (!list_empty(&write_list)) {
mblk = list_first_entry(&write_list, struct dmz_mblock, link);
@@ -805,16 +809,20 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
zmd->sb_gen++;
out:
- if (ret && !list_empty(&write_list)) {
- spin_lock(&zmd->mblk_lock);
- list_splice(&write_list, &zmd->mblk_dirty_list);
- spin_unlock(&zmd->mblk_lock);
- }
-
dmz_unlock_flush(zmd);
up_write(&zmd->mblk_sem);
return ret;
+
+err:
+ if (!list_empty(&write_list)) {
+ spin_lock(&zmd->mblk_lock);
+ list_splice(&write_list, &zmd->mblk_dirty_list);
+ spin_unlock(&zmd->mblk_lock);
+ }
+ if (!dmz_check_bdev(zmd->dev))
+ ret = -EIO;
+ goto out;
}
/*
@@ -1080,9 +1088,10 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
/*
* Initialize a zone descriptor.
*/
-static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
- struct blk_zone *blkz)
+static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
{
+ struct dmz_metadata *zmd = data;
+ struct dm_zone *zone = &zmd->zones[idx];
struct dmz_dev *dev = zmd->dev;
/* Ignore the eventual last runt (smaller) zone */
@@ -1096,26 +1105,29 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
atomic_set(&zone->refcount, 0);
zone->chunk = DMZ_MAP_UNMAPPED;
- if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ switch (blkz->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
zmd->nr_rnd_zones++;
- } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
- blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
set_bit(DMZ_SEQ, &zone->flags);
- } else
+ break;
+ default:
return -ENXIO;
-
- if (blkz->cond == BLK_ZONE_COND_OFFLINE)
- set_bit(DMZ_OFFLINE, &zone->flags);
- else if (blkz->cond == BLK_ZONE_COND_READONLY)
- set_bit(DMZ_READ_ONLY, &zone->flags);
+ }
if (dmz_is_rnd(zone))
zone->wp_block = 0;
else
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
- if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
+ if (blkz->cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz->cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
+ else {
zmd->nr_useable_zones++;
if (dmz_is_rnd(zone)) {
zmd->nr_rnd_zones++;
@@ -1139,23 +1151,13 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
}
/*
- * The size of a zone report in number of zones.
- * This results in 4096*64B=256KB report zones commands.
- */
-#define DMZ_REPORT_NR_ZONES 4096
-
-/*
* Allocate and initialize zone descriptors using the zone
* information from disk.
*/
static int dmz_init_zones(struct dmz_metadata *zmd)
{
struct dmz_dev *dev = zmd->dev;
- struct dm_zone *zone;
- struct blk_zone *blkz;
- unsigned int nr_blkz;
- sector_t sector = 0;
- int i, ret = 0;
+ int ret;
/* Init */
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
@@ -1169,54 +1171,38 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
dmz_dev_info(dev, "Using %zu B for zone information",
sizeof(struct dm_zone) * dev->nr_zones);
- /* Get zone information */
- nr_blkz = DMZ_REPORT_NR_ZONES;
- blkz = kcalloc(nr_blkz, sizeof(struct blk_zone), GFP_KERNEL);
- if (!blkz) {
- ret = -ENOMEM;
- goto out;
- }
-
/*
- * Get zone information and initialize zone descriptors.
- * At the same time, determine where the super block
- * should be: first block of the first randomly writable
- * zone.
+ * Get zone information and initialize zone descriptors. At the same
+ * time, determine where the super block should be: first block of the
+ * first randomly writable zone.
*/
- zone = zmd->zones;
- while (sector < dev->capacity) {
- /* Get zone information */
- nr_blkz = DMZ_REPORT_NR_ZONES;
- ret = blkdev_report_zones(dev->bdev, sector, blkz, &nr_blkz);
- if (ret) {
- dmz_dev_err(dev, "Report zones failed %d", ret);
- goto out;
- }
+ ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
+ zmd);
+ if (ret < 0) {
+ dmz_drop_zones(zmd);
+ return ret;
+ }
- if (!nr_blkz)
- break;
+ return 0;
+}
- /* Process report */
- for (i = 0; i < nr_blkz; i++) {
- ret = dmz_init_zone(zmd, zone, &blkz[i]);
- if (ret)
- goto out;
- sector += dev->zone_nr_sectors;
- zone++;
- }
- }
+static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
+ void *data)
+{
+ struct dm_zone *zone = data;
- /* The entire zone configuration of the disk should now be known */
- if (sector < dev->capacity) {
- dmz_dev_err(dev, "Failed to get correct zone information");
- ret = -ENXIO;
- }
-out:
- kfree(blkz);
- if (ret)
- dmz_drop_zones(zmd);
+ clear_bit(DMZ_OFFLINE, &zone->flags);
+ clear_bit(DMZ_READ_ONLY, &zone->flags);
+ if (blkz->cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz->cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
- return ret;
+ if (dmz_is_seq(zone))
+ zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
+ else
+ zone->wp_block = 0;
+ return 0;
}
/*
@@ -1224,9 +1210,7 @@ out:
*/
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- unsigned int nr_blkz = 1;
unsigned int noio_flag;
- struct blk_zone blkz;
int ret;
/*
@@ -1236,29 +1220,19 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
- &blkz, &nr_blkz);
+ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1,
+ dmz_update_zone_cb, zone);
memalloc_noio_restore(noio_flag);
- if (!nr_blkz)
+
+ if (ret == 0)
ret = -EIO;
- if (ret) {
+ if (ret < 0) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
+ dmz_check_bdev(zmd->dev);
return ret;
}
- clear_bit(DMZ_OFFLINE, &zone->flags);
- clear_bit(DMZ_READ_ONLY, &zone->flags);
- if (blkz.cond == BLK_ZONE_COND_OFFLINE)
- set_bit(DMZ_OFFLINE, &zone->flags);
- else if (blkz.cond == BLK_ZONE_COND_READONLY)
- set_bit(DMZ_READ_ONLY, &zone->flags);
-
- if (dmz_is_seq(zone))
- zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
- else
- zone->wp_block = 0;
-
return 0;
}
@@ -1312,9 +1286,9 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
struct dmz_dev *dev = zmd->dev;
- ret = blkdev_reset_zones(dev->bdev,
- dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_NOIO);
+ ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
+ dmz_start_sect(zmd, zone),
+ dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index d240d7ca8a8a..e7ace908a9b7 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -82,6 +82,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
dmz_id(zmd, zone), (unsigned long long)wp_block,
(unsigned long long)block, nr_blocks, ret);
+ dmz_check_bdev(zrc->dev);
return ret;
}
@@ -489,12 +490,7 @@ static void dmz_reclaim_work(struct work_struct *work)
ret = dmz_do_reclaim(zrc);
if (ret) {
dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
- if (ret == -EIO)
- /*
- * LLD might be performing some error handling sequence
- * at the underlying device. To not interfere, do not
- * attempt to schedule the next reclaim run immediately.
- */
+ if (!dmz_check_bdev(zrc->dev))
return;
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index d3bcc4197f5d..4574e0dedbd6 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -80,6 +80,8 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
+ if (bio->bi_status != BLK_STS_OK)
+ bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
@@ -565,32 +567,52 @@ out:
}
/*
- * Check the backing device availability. If it's on the way out,
+ * Check if the backing device is being removed. If it's on the way out,
* start failing I/O. Reclaim and metadata components also call this
* function to cleanly abort operation in the event of such failure.
*/
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
{
- struct gendisk *disk;
+ if (dmz_dev->flags & DMZ_BDEV_DYING)
+ return true;
- if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
- disk = dmz_dev->bdev->bd_disk;
- if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
- dmz_dev_warn(dmz_dev, "Backing device queue dying");
- dmz_dev->flags |= DMZ_BDEV_DYING;
- } else if (disk->fops->check_events) {
- if (disk->fops->check_events(disk, 0) &
- DISK_EVENT_MEDIA_CHANGE) {
- dmz_dev_warn(dmz_dev, "Backing device offline");
- dmz_dev->flags |= DMZ_BDEV_DYING;
- }
- }
+ if (dmz_dev->flags & DMZ_CHECK_BDEV)
+ return !dmz_check_bdev(dmz_dev);
+
+ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+ dmz_dev_warn(dmz_dev, "Backing device queue dying");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
}
return dmz_dev->flags & DMZ_BDEV_DYING;
}
/*
+ * Check the backing device availability. This detects such events as
+ * backing device going offline due to errors, media removals, etc.
+ * This check is less efficient than dmz_bdev_is_dying() and should
+ * only be performed as a part of error handling.
+ */
+bool dmz_check_bdev(struct dmz_dev *dmz_dev)
+{
+ struct gendisk *disk;
+
+ dmz_dev->flags &= ~DMZ_CHECK_BDEV;
+
+ if (dmz_bdev_is_dying(dmz_dev))
+ return false;
+
+ disk = dmz_dev->bdev->bd_disk;
+ if (disk->fops->check_events &&
+ disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
+ dmz_dev_warn(dmz_dev, "Backing device offline");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ }
+
+ return !(dmz_dev->flags & DMZ_BDEV_DYING);
+}
+
+/*
* Process a new BIO.
*/
static int dmz_map(struct dm_target *ti, struct bio *bio)
@@ -902,8 +924,8 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
- if (dmz_bdev_is_dying(dmz->dev))
- return -ENODEV;
+ if (!dmz_check_bdev(dmz->dev))
+ return -EIO;
*bdev = dmz->dev->bdev;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index d8e70b0ade35..5b5e493d479c 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -72,6 +72,7 @@ struct dmz_dev {
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
+#define DMZ_CHECK_BDEV (2 << 0)
/*
* Zone descriptor.
@@ -255,5 +256,6 @@ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
* Functions defined in dm-zoned-target.c
*/
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+bool dmz_check_bdev(struct dmz_dev *dmz_dev);
#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1a5e328c443a..e8f9661a10a1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -440,14 +440,48 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
+{
+ struct dm_report_zones_args *args = data;
+ sector_t sector_diff = args->tgt->begin - args->start;
+
+ /*
+ * Ignore zones beyond the target range.
+ */
+ if (zone->start >= args->start + args->tgt->len)
+ return 0;
+
+ /*
+ * Remap the start sector and write pointer position of the zone
+ * to match its position in the target range.
+ */
+ zone->start += sector_diff;
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+ zone->wp += sector_diff;
+ }
+
+ args->next_sector = zone->start + zone->len;
+ return args->orig_cb(zone, args->zone_idx++, args->orig_data);
+}
+EXPORT_SYMBOL_GPL(dm_report_zones_cb);
+
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
-#ifdef CONFIG_BLK_DEV_ZONED
struct mapped_device *md = disk->private_data;
- struct dm_target *tgt;
struct dm_table *map;
int srcu_idx, ret;
+ struct dm_report_zones_args args = {
+ .next_sector = sector,
+ .orig_data = data,
+ .orig_cb = cb,
+ };
if (dm_suspended_md(md))
return -EAGAIN;
@@ -456,38 +490,30 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
if (!map)
return -EIO;
- tgt = dm_table_find_target(map, sector);
- if (!tgt) {
- ret = -EIO;
- goto out;
- }
+ do {
+ struct dm_target *tgt;
- /*
- * If we are executing this, we already know that the block device
- * is a zoned device and so each target should have support for that
- * type of drive. A missing report_zones method means that the target
- * driver has a problem.
- */
- if (WARN_ON(!tgt->type->report_zones)) {
- ret = -EIO;
- goto out;
- }
+ tgt = dm_table_find_target(map, args.next_sector);
+ if (WARN_ON_ONCE(!tgt->type->report_zones)) {
+ ret = -EIO;
+ goto out;
+ }
- /*
- * blkdev_report_zones() will loop and call this again to cover all the
- * zones of the target, eventually moving on to the next target.
- * So there is no need to loop here trying to fill the entire array
- * of zones.
- */
- ret = tgt->type->report_zones(tgt, sector, zones, nr_zones);
+ args.tgt = tgt;
+ ret = tgt->type->report_zones(tgt, &args, nr_zones);
+ if (ret < 0)
+ goto out;
+ } while (args.zone_idx < nr_zones &&
+ args.next_sector < get_capacity(disk));
+ ret = args.zone_idx;
out:
dm_put_live_table(md, srcu_idx);
return ret;
-#else
- return -ENOTSUPP;
-#endif
}
+#else
+#define dm_blk_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
@@ -1174,7 +1200,8 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
+ * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
+ * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1212,54 +1239,6 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
-/*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the underlying device of the target. The zone
- * descriptors must be remapped to match their position within the dm device.
- * The caller target should obtain the zones information using
- * blkdev_report_zones() to ensure that remapping for partition offset is
- * already handled.
- */
-void dm_remap_zone_report(struct dm_target *ti, sector_t start,
- struct blk_zone *zones, unsigned int *nr_zones)
-{
-#ifdef CONFIG_BLK_DEV_ZONED
- struct blk_zone *zone;
- unsigned int nrz = *nr_zones;
- int i;
-
- /*
- * Remap the start sector and write pointer position of the zones in
- * the array. Since we may have obtained from the target underlying
- * device more zones that the target size, also adjust the number
- * of zones.
- */
- for (i = 0; i < nrz; i++) {
- zone = zones + i;
- if (zone->start >= start + ti->len) {
- memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
- break;
- }
-
- zone->start = zone->start + ti->begin - start;
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- continue;
-
- if (zone->cond == BLK_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
- else if (zone->cond == BLK_ZONE_COND_EMPTY)
- zone->wp = zone->start;
- else
- zone->wp = zone->wp + ti->begin - start;
- }
-
- *nr_zones = i;
-#else /* !CONFIG_BLK_DEV_ZONED */
- *nr_zones = 0;
-#endif
-}
-EXPORT_SYMBOL_GPL(dm_remap_zone_report);
-
static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
@@ -1627,7 +1606,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
- } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
+ } else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
ci.sector_count = 0;
error = __split_and_process_non_flush(&ci);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index b092c7b5282f..3ad18246fcb3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2139,6 +2139,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page),
sizeof(bitmap_super_t));
+ spin_lock_irq(&bitmap->counts.lock);
md_bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store;
@@ -2154,7 +2155,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
blocks = min(old_counts.chunks << old_counts.chunkshift,
chunks << chunkshift);
- spin_lock_irq(&bitmap->counts.lock);
/* For cluster raid, need to pre-allocate bitmap */
if (mddev_is_clustered(bitmap->mddev)) {
unsigned long page;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index c766c559d36d..26c75c0199fa 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -244,10 +244,9 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
sector_t start_sector, end_sector, data_offset;
sector_t bio_sector = bio->bi_iter.bi_sector;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 6780938d2991..152f9e65a226 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -104,10 +104,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1be7abeb24fd..805b33e27496 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -550,7 +550,13 @@ static void md_submit_flush_data(struct work_struct *ws)
}
}
-void md_flush_request(struct mddev *mddev, struct bio *bio)
+/*
+ * Manages consolidation of flushes and submitting any flushes needed for
+ * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
+ * being finished in another context. Returns false if the flushing is
+ * complete but still needs the I/O portion of the bio to be processed.
+ */
+bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
ktime_t start = ktime_get_boottime();
spin_lock_irq(&mddev->lock);
@@ -575,9 +581,10 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
else {
bio->bi_opf &= ~REQ_PREFLUSH;
- mddev->pers->make_request(mddev, bio);
+ return false;
}
}
+ return true;
}
EXPORT_SYMBOL(md_flush_request);
@@ -1098,6 +1105,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
+ bool spare_disk = true;
/*
* Calculate the position of the superblock (512byte sectors),
@@ -1148,8 +1156,18 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
else
rdev->desc_nr = sb->this_disk.number;
+ /* not spare disk, or LEVEL_MULTIPATH */
+ if (sb->level == LEVEL_MULTIPATH ||
+ (rdev->desc_nr >= 0 &&
+ sb->disks[rdev->desc_nr].state &
+ ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
+ spare_disk = false;
+
if (!refdev) {
- ret = 1;
+ if (!spare_disk)
+ ret = 1;
+ else
+ ret = 0;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
@@ -1165,7 +1183,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
- if (ev1 > ev2)
+
+ if (!spare_disk && ev1 > ev2)
ret = 1;
else
ret = 0;
@@ -1525,6 +1544,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sector_t sectors;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
+ bool spare_disk = true;
/*
* Calculate the position of the superblock in 512byte sectors.
@@ -1658,8 +1678,19 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != 0)
return -EINVAL;
+ /* not spare disk, or LEVEL_MULTIPATH */
+ if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
+ (rdev->desc_nr >= 0 &&
+ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
+ spare_disk = false;
+
if (!refdev) {
- ret = 1;
+ if (!spare_disk)
+ ret = 1;
+ else
+ ret = 0;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
@@ -1676,7 +1707,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
- if (ev1 > ev2)
+ if (!spare_disk && ev1 > ev2)
ret = 1;
else
ret = 0;
@@ -3597,7 +3628,7 @@ abort_free:
* Check a full RAID array for plausibility
*/
-static void analyze_sbs(struct mddev *mddev)
+static int analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
@@ -3618,6 +3649,12 @@ static void analyze_sbs(struct mddev *mddev)
md_kick_rdev_from_array(rdev);
}
+ /* Cannot find a valid fresh disk */
+ if (!freshest) {
+ pr_warn("md: cannot find a valid disk\n");
+ return -EINVAL;
+ }
+
super_types[mddev->major_version].
validate_super(mddev, freshest);
@@ -3652,6 +3689,8 @@ static void analyze_sbs(struct mddev *mddev)
clear_bit(In_sync, &rdev->flags);
}
}
+
+ return 0;
}
/* Read a fixed-point number.
@@ -5570,7 +5609,9 @@ int md_run(struct mddev *mddev)
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
- analyze_sbs(mddev);
+ err = analyze_sbs(mddev);
+ if (err)
+ return -EINVAL;
}
if (mddev->level != LEVEL_NONE)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index c5e3ff398b59..5f86f8adb0a4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -550,7 +550,7 @@ struct md_personality
int level;
struct list_head list;
struct module *owner;
- bool (*make_request)(struct mddev *mddev, struct bio *bio);
+ bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
/*
* start up works that do NOT require md_thread. tasks that
* requires md_thread should go into start()
@@ -703,7 +703,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
extern int mddev_congested(struct mddev *mddev, int bits);
-extern void md_flush_request(struct mddev *mddev, struct bio *bio);
+extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 1e772287b1c8..b7c20979bd19 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -575,10 +575,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
unsigned chunk_sects;
unsigned sectors;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
raid0_handle_discard(mddev, bio);
@@ -615,7 +614,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
tmp_dev = map_sector(mddev, zone, sector, &sector);
break;
default:
- WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
+ WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
bio_io_error(bio);
return true;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0466ee2453b4..a409ab6f30bc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -819,6 +819,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
else
generic_make_request(bio);
bio = next;
+ cond_resched();
}
}
@@ -1567,10 +1568,9 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
{
sector_t sectors;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
/*
* There is a limit to the maximum size, but
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 299c7b1c9718..ec136e44aef7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
out_free_pages:
while (--j >= 0)
- resync_free_pages(&rps[j * 2]);
+ resync_free_pages(&rps[j]);
j = 0;
out_free_bio:
@@ -1525,10 +1525,9 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
int chunk_sects = chunk_mask + 1;
int sectors = bio_sectors(bio);
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
if (!md_write_start(mddev, bio))
return false;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 18a4064a61a8..cab5b1352892 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1404,7 +1404,7 @@ int ppl_init_log(struct r5conf *conf)
atomic64_set(&ppl_conf->seq, 0);
INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
- ppl_conf->write_hint = RWF_WRITE_LIFE_NOT_SET;
+ ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
if (!mddev->external) {
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 223e97ab27e6..f0fc538bfe59 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1134,7 +1134,7 @@ again:
bi->bi_iter.bi_size = STRIPE_SIZE;
bi->bi_write_hint = sh->dev[i].write_hint;
if (!rrdev)
- sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
+ sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -1187,7 +1187,7 @@ again:
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_iter.bi_size = STRIPE_SIZE;
rbi->bi_write_hint = sh->dev[i].write_hint;
- sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
+ sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -5592,8 +5592,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
if (ret == 0)
return true;
if (ret == -ENODEV) {
- md_flush_request(mddev, bi);
- return true;
+ if (md_flush_request(mddev, bi))
+ return true;
}
/* ret == -EAGAIN, fallback */
/*
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 5ef7daeb8cbd..9340435a94a0 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -319,6 +319,8 @@ static void cec_post_state_event(struct cec_adapter *adap)
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event(adap, &ev);
}
@@ -1976,7 +1978,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* Play function, this message can have variable length
* depending on the specific play function that is used.
*/
- case 0x60:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
if (msg->len == 2)
rc_keydown(adap->rc, RC_PROTO_CEC,
msg->msg[2], 0);
@@ -1993,8 +1995,12 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* For the time being these messages are not processed by the
* framework and are simply forwarded to the user space.
*/
- case 0x56: case 0x57:
- case 0x67: case 0x68: case 0x69: case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
break;
default:
rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0);
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 12d676484472..17d1cb2e5f97 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -187,6 +187,21 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return 0;
}
+static long cec_adap_g_connector_info(struct cec_adapter *adap,
+ struct cec_log_addrs __user *parg)
+{
+ int ret = 0;
+
+ if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
+ return -ENOTTY;
+
+ mutex_lock(&adap->lock);
+ if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info)))
+ ret = -EFAULT;
+ mutex_unlock(&adap->lock);
+ return ret;
+}
+
static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_msg __user *parg)
{
@@ -506,6 +521,9 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case CEC_ADAP_S_LOG_ADDRS:
return cec_adap_s_log_addrs(adap, fh, block, parg);
+ case CEC_ADAP_G_CONNECTOR_INFO:
+ return cec_adap_g_connector_info(adap, parg);
+
case CEC_TRANSMIT:
return cec_transmit(adap, fh, block, parg);
@@ -578,6 +596,8 @@ static int cec_open(struct inode *inode, struct file *filp)
/* Queue up initial state events */
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event_fh(fh, &ev, 0);
#ifdef CONFIG_CEC_PIN
if (adap->pin && adap->pin->ops->read_hpd) {
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 9c610e1e99b8..db7adffcdc76 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -257,11 +257,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
struct cec_adapter *adap;
int res;
- /*
- * Disable this capability until the connector info public API
- * is ready.
- */
- caps &= ~CEC_CAP_CONNECTOR_INFO;
#ifndef CONFIG_MEDIA_CEC_RC
caps &= ~CEC_CAP_RC;
#endif
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 4d82a5522072..7cf42b133dbc 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -153,13 +153,14 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
}
EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_register);
-void cec_notifier_cec_adap_unregister(struct cec_notifier *n)
+void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
+ struct cec_adapter *adap)
{
if (!n)
return;
mutex_lock(&n->lock);
- n->cec_adap->notifier = NULL;
+ adap->notifier = NULL;
n->cec_adap = NULL;
n->callback = NULL;
mutex_unlock(&n->lock);
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 8f987bc0dd88..660fe111f540 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -1279,6 +1279,15 @@ static void cec_pin_adap_free(struct cec_adapter *adap)
kfree(pin);
}
+static int cec_pin_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct cec_pin *pin = adap->pin;
+
+ if (pin->ops->received)
+ return pin->ops->received(adap, msg);
+ return -ENOMSG;
+}
+
void cec_pin_changed(struct cec_adapter *adap, bool value)
{
struct cec_pin *pin = adap->pin;
@@ -1301,6 +1310,7 @@ static const struct cec_adap_ops cec_pin_adap_ops = {
.error_inj_parse_line = cec_pin_error_inj_parse_line,
.error_inj_show = cec_pin_error_inj_show,
#endif
+ .received = cec_pin_received,
};
struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 0ba51dacc580..c1511094fdc7 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -230,8 +230,8 @@ static char *siano_msgs[] = {
[MSG_SMS_FLASH_DL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FLASH_DL_REQ",
[MSG_SMS_EXEC_TEST_1_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_REQ",
[MSG_SMS_EXEC_TEST_1_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_RES",
- [MSG_SMS_ENBALE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_REQ",
- [MSG_SMS_ENBALE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_RES",
+ [MSG_SMS_ENABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_REQ",
+ [MSG_SMS_ENABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_RES",
[MSG_SMS_SPI_SET_BUS_WIDTH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_REQ",
[MSG_SMS_SPI_SET_BUS_WIDTH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_RES",
[MSG_SMS_SEND_EMM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_REQ",
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index a2f95f4899c2..b3b793b5caf3 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -434,8 +434,8 @@ enum msg_types {
MSG_SMS_FLASH_DL_REQ = 732,
MSG_SMS_EXEC_TEST_1_REQ = 734,
MSG_SMS_EXEC_TEST_1_RES = 735,
- MSG_SMS_ENBALE_TS_INTERFACE_REQ = 736,
- MSG_SMS_ENBALE_TS_INTERFACE_RES = 737,
+ MSG_SMS_ENABLE_TS_INTERFACE_REQ = 736,
+ MSG_SMS_ENABLE_TS_INTERFACE_RES = 737,
MSG_SMS_SPI_SET_BUS_WIDTH_REQ = 738,
MSG_SMS_SPI_SET_BUS_WIDTH_RES = 739,
MSG_SMS_SEND_EMM_REQ = 740,
diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h
index b2c54c256e86..ada41d5c4e83 100644
--- a/drivers/media/common/siano/smsir.h
+++ b/drivers/media/common/siano/smsir.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
*
* Siano Mobile Silicon, Inc.
* MDTV receiver kernel modules.
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 5a9ba3846f0a..e652f4318284 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -49,8 +49,11 @@ module_param(debug, int, 0644);
V4L2_BUF_FLAG_REQUEST_FD | \
V4L2_BUF_FLAG_TIMESTAMP_MASK)
/* Output buffer flags that should be passed on to the driver */
-#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
- V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
+ V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | \
+ V4L2_BUF_FLAG_TIMECODE | \
+ V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
/*
* __verify_planes_array() - verify that the planes array passed in struct
@@ -194,6 +197,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
}
vbuf->sequence = 0;
vbuf->request_fd = -1;
+ vbuf->is_held = false;
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
switch (b->memory) {
@@ -321,6 +325,8 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
*/
vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
vbuf->field = b->field;
+ if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
} else {
/* Zero any output buffer flags as this is a capture buffer */
vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
@@ -654,6 +660,8 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
if (q->io_modes & VB2_DMABUF)
*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+ if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 6f7eedb4c00e..0ba382948c51 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -298,7 +298,7 @@ int cxd2820r_sleep_c(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index d56c6f788196..fbdfa6bf38dc 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -392,7 +392,7 @@ int cxd2820r_sleep_t(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index f924a80b968a..34ef2bb2de34 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -386,7 +386,7 @@ int cxd2820r_sleep_t2(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 1b30cf570803..758c95bc3b11 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -60,6 +60,7 @@ struct cxd2841er_priv {
enum cxd2841er_xtal xtal;
enum fe_caps caps;
u32 flags;
+ unsigned long stats_time;
};
static const struct cxd2841er_cnr_data s_cn_data[] = {
@@ -3279,9 +3280,15 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
if (status & FE_HAS_LOCK) {
+ if (priv->stats_time &&
+ (!time_after(jiffies, priv->stats_time)))
+ return 0;
+
+ /* Prevent retrieving stats faster than once per second */
+ priv->stats_time = jiffies + msecs_to_jiffies(1000);
+
cxd2841er_read_snr(fe);
cxd2841er_read_ucblocks(fe);
-
cxd2841er_read_ber(fe);
} else {
p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
@@ -3360,6 +3367,9 @@ done:
p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ /* Reset the wait for jiffies logic */
+ priv->stats_time = 0;
+
return ret;
}
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 2f5af4813a74..ac7be872f460 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -4201,7 +4201,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
u16 *data, u32 flags)
{
u8 buf[2] = { 0 };
- int rc = -EIO;
+ int rc;
u16 word = 0;
if (!data)
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 4e50441c247a..a7faf0cf8788 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -517,7 +517,7 @@ static void mb86a20s_reset_frontend_cache(struct dvb_frontend *fe)
* Estimates the bit rate using the per-segment bit rate given by
* ABNT/NBR 15601 spec (table 4).
*/
-static u32 isdbt_rate[3][5][4] = {
+static const u32 isdbt_rate[3][5][4] = {
{ /* DQPSK/QPSK */
{ 280850, 312060, 330420, 340430 }, /* 1/2 */
{ 374470, 416080, 440560, 453910 }, /* 2/3 */
@@ -539,13 +539,9 @@ static u32 isdbt_rate[3][5][4] = {
}
};
-static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
- u32 modulation, u32 forward_error_correction,
- u32 guard_interval,
- u32 segment)
+static u32 isdbt_layer_min_bitrate(struct dtv_frontend_properties *c,
+ u32 layer)
{
- struct mb86a20s_state *state = fe->demodulator_priv;
- u32 rate;
int mod, fec, guard;
/*
@@ -553,7 +549,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
* to consider the lowest bit rate, to avoid taking too long time
* to get BER.
*/
- switch (modulation) {
+ switch (c->layer[layer].modulation) {
case DQPSK:
case QPSK:
default:
@@ -567,7 +563,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (forward_error_correction) {
+ switch (c->layer[layer].fec) {
default:
case FEC_1_2:
case FEC_AUTO:
@@ -587,7 +583,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (guard_interval) {
+ switch (c->guard_interval) {
default:
case GUARD_INTERVAL_1_4:
guard = 0;
@@ -603,29 +599,14 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- /* Samples BER at BER_SAMPLING_RATE seconds */
- rate = isdbt_rate[mod][fec][guard] * segment * BER_SAMPLING_RATE;
-
- /* Avoids sampling too quickly or to overflow the register */
- if (rate < 256)
- rate = 256;
- else if (rate > (1 << 24) - 1)
- rate = (1 << 24) - 1;
-
- dev_dbg(&state->i2c->dev,
- "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
- __func__, 'A' + layer,
- segment * isdbt_rate[mod][fec][guard]/1000,
- rate, rate);
-
- state->estimated_rate[layer] = rate;
+ return isdbt_rate[mod][fec][guard] * c->layer[layer].segment_count;
}
static int mb86a20s_get_frontend(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int layer, rc;
+ int layer, rc, rate, counter;
dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
@@ -676,10 +657,21 @@ static int mb86a20s_get_frontend(struct dvb_frontend *fe)
dev_dbg(&state->i2c->dev, "%s: interleaving %d.\n",
__func__, rc);
c->layer[layer].interleaving = rc;
- mb86a20s_layer_bitrate(fe, layer, c->layer[layer].modulation,
- c->layer[layer].fec,
- c->guard_interval,
- c->layer[layer].segment_count);
+
+ rate = isdbt_layer_min_bitrate(c, layer);
+ counter = rate * BER_SAMPLING_RATE;
+
+ /* Avoids sampling too quickly or to overflow the register */
+ if (counter < 256)
+ counter = 256;
+ else if (counter > (1 << 24) - 1)
+ counter = (1 << 24) - 1;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
+ __func__, 'A' + layer, rate / 1000, counter, counter);
+
+ state->estimated_rate[layer] = counter;
}
rc = mb86a20s_writereg(state, 0x6d, 0x84);
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index 7cae7d632030..d43a67045dbe 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -135,11 +135,6 @@ static inline int mt312_writereg(struct mt312_state *state,
return mt312_write(state, reg, &tmp, 1);
}
-static inline u32 mt312_div(u32 a, u32 b)
-{
- return (a + (b / 2)) / b;
-}
-
static int mt312_reset(struct mt312_state *state, const u8 full)
{
return mt312_writereg(state, RESET, full ? 0x80 : 0x40);
@@ -187,7 +182,7 @@ static int mt312_get_symbol_rate(struct mt312_state *state, u32 *sr)
monitor = (buf[0] << 8) | buf[1];
dprintk("sr(auto) = %u\n",
- mt312_div(monitor * 15625, 4));
+ DIV_ROUND_CLOSEST(monitor * 15625, 4));
} else {
ret = mt312_writereg(state, MON_CTRL, 0x05);
if (ret < 0)
@@ -291,10 +286,10 @@ static int mt312_initfe(struct dvb_frontend *fe)
}
/* SYS_CLK */
- buf[0] = mt312_div(state->xtal * state->freq_mult * 2, 1000000);
+ buf[0] = DIV_ROUND_CLOSEST(state->xtal * state->freq_mult * 2, 1000000);
/* DISEQC_RATIO */
- buf[1] = mt312_div(state->xtal, 22000 * 4);
+ buf[1] = DIV_ROUND_CLOSEST(state->xtal, 22000 * 4);
ret = mt312_write(state, SYS_CLK, buf, sizeof(buf));
if (ret < 0)
@@ -610,7 +605,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
}
/* sr = (u16)(sr * 256.0 / 1000000.0) */
- sr = mt312_div(p->symbol_rate * 4, 15625);
+ sr = DIV_ROUND_CLOSEST(p->symbol_rate * 4, 15625);
/* SYM_RATE */
buf[0] = (sr >> 8) & 0x3f;
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index 50dccb394efa..ecd21adf8950 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -9,38 +9,43 @@
#define SI2168_H
#include <linux/dvb/frontend.h>
-/*
- * I2C address
- * 0x64
+/**
+ * struct si2168_config - configuration parameters for si2168
+ *
+ * @fe:
+ * frontend returned by driver
+ * @i2c_adapter:
+ * tuner I2C adapter returned by driver
+ * @ts_mode:
+ * Transport Stream mode. Can be:
+ * - %SI2168_TS_PARALLEL
+ * - %SI2168_TS_SERIAL
+ * - %SI2168_TS_TRISTATE
+ * - %SI2168_TS_CLK_MANUAL
+ * @ts_clock_inv:
+ * TS clock inverted
+ * @ts_clock_gapped:
+ * TS clock gapped
+ * @spectral_inversion:
+ * Inverted spectrum
+ *
+ * Note:
+ * The I2C address of this demod is 0x64.
*/
struct si2168_config {
- /*
- * frontend
- * returned by driver
- */
struct dvb_frontend **fe;
-
- /*
- * tuner I2C adapter
- * returned by driver
- */
struct i2c_adapter **i2c_adapter;
- /* TS mode */
#define SI2168_TS_PARALLEL 0x06
#define SI2168_TS_SERIAL 0x03
#define SI2168_TS_TRISTATE 0x00
#define SI2168_TS_CLK_MANUAL 0x20
u8 ts_mode;
- /* TS clock inverted */
- bool ts_clock_inv;
-
- /* TS clock gapped */
- bool ts_clock_gapped;
-
- /* Inverted spectrum */
- bool spectral_inversion;
+ /* Flags */
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
#endif
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 804d5b30c697..18bea5222082 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -34,12 +34,12 @@ struct si2168_dev {
unsigned int chip_id;
unsigned int version;
const char *firmware_name;
- bool active;
- bool warm;
u8 ts_mode;
- bool ts_clock_inv;
- bool ts_clock_gapped;
- bool spectral_inversion;
+ unsigned int active:1;
+ unsigned int warm:1;
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
/* firmware command struct */
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 849d63dbc279..e83836b29715 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -685,10 +685,33 @@ tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
p += new_msgs[j].len;
}
- if (i < num)
+ if (i < num) {
ret = -ENOMEM;
- else
+ } else if (!state->cfg.split_tuner_read_i2c || rd_num == 0) {
ret = i2c_transfer(state->i2c_client->adapter, new_msgs, j);
+ } else {
+ /*
+ * Split transactions at each I2C_M_RD message.
+ * Some of the parent device require this,
+ * such as Friio (see. dvb-usb-gl861).
+ */
+ int from, to;
+
+ ret = 0;
+ from = 0;
+ do {
+ int r;
+
+ to = from + 1;
+ while (to < j && !(new_msgs[to].flags & I2C_M_RD))
+ to++;
+ r = i2c_transfer(state->i2c_client->adapter,
+ &new_msgs[from], to - from);
+ ret = (r <= 0) ? r : ret + r;
+ from = to;
+ } while (from < j && ret > 0);
+ }
+
if (ret >= 0 && ret < j)
ret = -EIO;
kfree(new_msgs);
diff --git a/drivers/media/dvb-frontends/tc90522.h b/drivers/media/dvb-frontends/tc90522.h
index ac0e2ab51924..07e3813bf590 100644
--- a/drivers/media/dvb-frontends/tc90522.h
+++ b/drivers/media/dvb-frontends/tc90522.h
@@ -28,6 +28,9 @@ struct tc90522_config {
/* [OUT] tuner I2C adapter returned by driver */
struct i2c_adapter *tuner_i2c;
+
+ /* [IN] use two separate I2C transactions for one tuner read */
+ bool split_tuner_read_i2c;
};
#endif /* TC90522_H */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 7eee1812bba3..c68e002d26ea 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -566,10 +566,23 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+if MEDIA_CAMERA_SUPPORT
+
+config VIDEO_HI556
+ tristate "Hynix Hi-556 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-556 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi556.
+
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
@@ -581,7 +594,6 @@ config VIDEO_IMX214
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
@@ -592,16 +604,25 @@ config VIDEO_IMX258
config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
+config VIDEO_IMX290
+ tristate "Sony IMX290 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX290 camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx290.
+
config VIDEO_IMX319
tristate "Sony IMX319 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX319 camera.
@@ -612,7 +633,6 @@ config VIDEO_IMX319
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX355 camera.
@@ -623,7 +643,6 @@ config VIDEO_IMX355
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
@@ -633,8 +652,7 @@ config VIDEO_OV2640
config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
- depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
+ depends on VIDEO_V4L2 && I2C && GPIOLIB
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -646,7 +664,6 @@ config VIDEO_OV2659
config VIDEO_OV2680
tristate "OmniVision OV2680 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -658,7 +675,6 @@ config VIDEO_OV2680
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -671,7 +687,6 @@ config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
depends on OF
depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Omnivision
@@ -681,7 +696,6 @@ config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -693,7 +707,6 @@ config VIDEO_OV5645
config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -705,7 +718,6 @@ config VIDEO_OV5647
config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
@@ -716,7 +728,6 @@ config VIDEO_OV6650
config VIDEO_OV5670
tristate "OmniVision OV5670 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -729,7 +740,6 @@ config VIDEO_OV5670
config VIDEO_OV5675
tristate "OmniVision OV5675 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -742,7 +752,7 @@ config VIDEO_OV5675
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
@@ -753,7 +763,6 @@ config VIDEO_OV5695
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -765,7 +774,6 @@ config VIDEO_OV7251
config VIDEO_OV772X
tristate "OmniVision OV772x sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_SCCB
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -777,7 +785,6 @@ config VIDEO_OV772X
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
@@ -788,7 +795,6 @@ config VIDEO_OV7640
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -798,7 +804,6 @@ config VIDEO_OV7670
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
@@ -806,7 +811,6 @@ config VIDEO_OV7740
config VIDEO_OV8856
tristate "OmniVision OV8856 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -833,7 +837,6 @@ config VIDEO_OV9650
config VIDEO_OV13858
tristate "OmniVision OV13858 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -842,7 +845,6 @@ config VIDEO_OV13858
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the ST VS6624
camera.
@@ -853,7 +855,6 @@ config VIDEO_VS6624
config VIDEO_MT9M001
tristate "mt9m001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports MT9M001 cameras from Micron, monochrome
and colour models.
@@ -861,7 +862,6 @@ config VIDEO_MT9M001
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This driver supports MT9M032 camera sensors from Aptina, monochrome
@@ -878,7 +878,6 @@ config VIDEO_MT9M111
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This is a Video4Linux2 sensor driver for the Aptina
@@ -887,7 +886,6 @@ config VIDEO_MT9P031
config VIDEO_MT9T001
tristate "Aptina MT9T001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
@@ -895,7 +893,6 @@ config VIDEO_MT9T001
config VIDEO_MT9T112
tristate "Aptina MT9T111/MT9T112 support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
@@ -906,7 +903,6 @@ config VIDEO_MT9T112
config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
@@ -915,7 +911,6 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
select V4L2_FWNODE
help
@@ -925,7 +920,6 @@ config VIDEO_MT9V032
config VIDEO_MT9V111
tristate "Aptina MT9V111 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina/Micron
MT9V111 sensor.
@@ -936,14 +930,12 @@ config VIDEO_MT9V111
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports SR030PC30 VGA camera from Siliconfile
config VIDEO_NOON010PC30
tristate "Siliconfile NOON010PC30 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports NOON010PC30 CIF camera from Siliconfile
@@ -952,7 +944,6 @@ source "drivers/media/i2c/m5mols/Kconfig"
config VIDEO_RJ54N1
tristate "Sharp RJ54N1CB0C sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
sensor.
@@ -962,7 +953,6 @@ config VIDEO_RJ54N1
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
@@ -970,7 +960,6 @@ config VIDEO_S5K6AA
config VIDEO_S5K6A3
tristate "Samsung S5K6A3 sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6A3 raw
@@ -1002,12 +991,15 @@ config VIDEO_S5C73M3
help
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
+endif
comment "Lens drivers"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on GPIOLIB && I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
help
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
@@ -1042,12 +1034,15 @@ config VIDEO_DW9807_VCM
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
+endif
+
comment "Flash devices"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_ADP1653
tristate "ADP1653 flash support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
help
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
@@ -1055,7 +1050,6 @@ config VIDEO_ADP1653
config VIDEO_LM3560
tristate "LM3560 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3560 dual flash controllers. It controls
@@ -1064,12 +1058,13 @@ config VIDEO_LM3560
config VIDEO_LM3646
tristate "LM3646 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
+endif
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -1113,6 +1108,7 @@ comment "SDR tuner chips"
config SDR_MAX2175
tristate "Maxim 2175 RF to Bits tuner"
depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
+ select REGMAP_I2C
help
Support for Maxim 2175 tuner. It is an advanced analog/digital
radio receiver with RF-to-Bits front-end designed for SDR solutions.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index beb170b002dc..c147bb9d28db 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -109,9 +109,11 @@ obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_HI556) += hi556.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
+obj-$(CONFIG_VIDEO_IMX290) += imx290.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
obj-$(CONFIG_VIDEO_IMX355) += imx355.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 925c171e7797..19c74db0649f 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -19,13 +19,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-#define AD5820_NAME "ad5820"
-
/* Register definitions */
#define AD5820_POWER_DOWN (1 << 15)
#define AD5820_DAC_SHIFT 4
@@ -47,6 +46,8 @@ struct ad5820_device {
u32 focus_ramp_time;
u32 focus_ramp_mode;
+ struct gpio_desc *enable_gpio;
+
struct mutex power_lock;
int power_count;
@@ -114,6 +115,8 @@ static int ad5820_power_off(struct ad5820_device *coil, bool standby)
ret = ad5820_update_hw(coil);
}
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
+
ret2 = regulator_disable(coil->vana);
if (ret)
return ret;
@@ -128,6 +131,8 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
if (ret < 0)
return ret;
+ gpiod_set_value_cansleep(coil->enable_gpio, 1);
+
if (restore) {
/* Restore the hardware settings. */
coil->standby = false;
@@ -138,6 +143,7 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
return 0;
fail:
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
coil->standby = true;
regulator_disable(coil->vana);
@@ -304,11 +310,21 @@ static int ad5820_probe(struct i2c_client *client,
return ret;
}
+ coil->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(coil->enable_gpio)) {
+ ret = PTR_ERR(coil->enable_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "could not get enable gpio\n");
+ return ret;
+ }
+
mutex_init(&coil->power_lock);
v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
coil->subdev.internal_ops = &ad5820_internal_ops;
+ coil->subdev.entity.function = MEDIA_ENT_F_LENS;
strscpy(coil->subdev.name, "ad5820 focus", sizeof(coil->subdev.name));
ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
@@ -341,17 +357,28 @@ static int ad5820_remove(struct i2c_client *client)
}
static const struct i2c_device_id ad5820_id_table[] = {
- { AD5820_NAME, 0 },
+ { "ad5820", 0 },
+ { "ad5821", 0 },
+ { "ad5823", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+static const struct of_device_id ad5820_of_table[] = {
+ { .compatible = "adi,ad5820" },
+ { .compatible = "adi,ad5821" },
+ { .compatible = "adi,ad5823" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5820_of_table);
+
static SIMPLE_DEV_PM_OPS(ad5820_pm, ad5820_suspend, ad5820_resume);
static struct i2c_driver ad5820_i2c_driver = {
.driver = {
- .name = AD5820_NAME,
+ .name = "ad5820",
.pm = &ad5820_pm,
+ .of_match_table = ad5820_of_table,
},
.probe = ad5820_probe,
.remove = ad5820_remove,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index e780969cc2f2..6528e2343fc8 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1309,9 +1309,6 @@ static int adv7180_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr, client->adapter->name);
-
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -1382,6 +1379,9 @@ static int adv7180_probe(struct i2c_client *client,
if (ret)
goto err_free_irq;
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr, client->adapter->name);
+
return 0;
err_free_irq:
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 885619841719..0855f648416d 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2547,7 +2547,7 @@ struct adv7842_cfg_read_infoframe {
u8 payload_addr;
};
-static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infoframe *cri)
+static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_infoframe *cri)
{
int i;
u8 buffer[32];
@@ -2585,7 +2585,7 @@ static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infofr
static void adv7842_log_infoframes(struct v4l2_subdev *sd)
{
int i;
- struct adv7842_cfg_read_infoframe cri[] = {
+ static const struct adv7842_cfg_read_infoframe cri[] = {
{ "AVI", 0x01, 0xe0, 0x00 },
{ "Audio", 0x02, 0xe3, 0x1c },
{ "SDP", 0x04, 0xe6, 0x2a },
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 43336175c7d9..73bc50c919d7 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -157,7 +157,7 @@ static int bt819_init(struct v4l2_subdev *sd)
0x12, 0x04, /* 0x12 Output Format */
0x13, 0x20, /* 0x13 Vertical Scaling msb 0x00
chroma comb OFF, line drop scaling, interlace scaling
- BUG? Why does turning the chroma comb on fuck up color?
+ BUG? Why does turning the chroma comb on screw up color?
Bug in the bt819 stepping on my board?
*/
0x14, 0x00, /* 0x14 Vertical Scaling lsb */
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
new file mode 100644
index 000000000000..c66cd1446c0f
--- /dev/null
+++ b/drivers/media/i2c/hi556.c
@@ -0,0 +1,1200 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI556_REG_VALUE_08BIT 1
+#define HI556_REG_VALUE_16BIT 2
+#define HI556_REG_VALUE_24BIT 3
+
+#define HI556_LINK_FREQ_437MHZ 437000000ULL
+#define HI556_MCLK 19200000
+#define HI556_DATA_LANES 2
+#define HI556_RGB_DEPTH 10
+
+#define HI556_REG_CHIP_ID 0x0f16
+#define HI556_CHIP_ID 0x0556
+
+#define HI556_REG_MODE_SELECT 0x0a00
+#define HI556_MODE_STANDBY 0x0000
+#define HI556_MODE_STREAMING 0x0100
+
+/* vertical-timings from sensor */
+#define HI556_REG_FLL 0x0006
+#define HI556_FLL_30FPS 0x0814
+#define HI556_FLL_30FPS_MIN 0x0814
+#define HI556_FLL_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define HI556_REG_LLP 0x0008
+
+/* Exposure controls from sensor */
+#define HI556_REG_EXPOSURE 0x0074
+#define HI556_EXPOSURE_MIN 6
+#define HI556_EXPOSURE_MAX_MARGIN 2
+#define HI556_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI556_REG_ANALOG_GAIN 0x0077
+#define HI556_ANAL_GAIN_MIN 0
+#define HI556_ANAL_GAIN_MAX 240
+#define HI556_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define HI556_REG_MWB_GR_GAIN 0x0078
+#define HI556_REG_MWB_GB_GAIN 0x007a
+#define HI556_REG_MWB_R_GAIN 0x007c
+#define HI556_REG_MWB_B_GAIN 0x007e
+#define HI556_DGTL_GAIN_MIN 0
+#define HI556_DGTL_GAIN_MAX 2048
+#define HI556_DGTL_GAIN_STEP 1
+#define HI556_DGTL_GAIN_DEFAULT 256
+
+/* Test Pattern Control */
+#define HI556_REG_ISP 0X0a05
+#define HI556_REG_ISP_TPG_EN 0x01
+#define HI556_REG_TEST_PATTERN 0x0201
+
+enum {
+ HI556_LINK_FREQ_437MHZ_INDEX,
+};
+
+struct hi556_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi556_reg_list {
+ u32 num_of_regs;
+ const struct hi556_reg *regs;
+};
+
+struct hi556_link_freq_config {
+ const struct hi556_reg_list reg_list;
+};
+
+struct hi556_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 llp;
+
+ /* Default vertical timining size */
+ u32 fll_def;
+
+ /* Min vertical timining size */
+ u32 fll_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct hi556_reg_list reg_list;
+};
+
+#define to_hi556(_sd) container_of(_sd, struct hi556, sd)
+
+//SENSOR_INITIALIZATION
+static const struct hi556_reg mipi_data_rate_874mbps[] = {
+ {0x0e00, 0x0102},
+ {0x0e02, 0x0102},
+ {0x0e0c, 0x0100},
+ {0x2000, 0x7400},
+ {0x2002, 0x001c},
+ {0x2004, 0x0242},
+ {0x2006, 0x0942},
+ {0x2008, 0x7007},
+ {0x200a, 0x0fd9},
+ {0x200c, 0x0259},
+ {0x200e, 0x7008},
+ {0x2010, 0x160e},
+ {0x2012, 0x0047},
+ {0x2014, 0x2118},
+ {0x2016, 0x0041},
+ {0x2018, 0x00d8},
+ {0x201a, 0x0145},
+ {0x201c, 0x0006},
+ {0x201e, 0x0181},
+ {0x2020, 0x13cc},
+ {0x2022, 0x2057},
+ {0x2024, 0x7001},
+ {0x2026, 0x0fca},
+ {0x2028, 0x00cb},
+ {0x202a, 0x009f},
+ {0x202c, 0x7002},
+ {0x202e, 0x13cc},
+ {0x2030, 0x019b},
+ {0x2032, 0x014d},
+ {0x2034, 0x2987},
+ {0x2036, 0x2766},
+ {0x2038, 0x0020},
+ {0x203a, 0x2060},
+ {0x203c, 0x0e5d},
+ {0x203e, 0x181d},
+ {0x2040, 0x2066},
+ {0x2042, 0x20c4},
+ {0x2044, 0x5000},
+ {0x2046, 0x0005},
+ {0x2048, 0x0000},
+ {0x204a, 0x01db},
+ {0x204c, 0x025a},
+ {0x204e, 0x00c0},
+ {0x2050, 0x0005},
+ {0x2052, 0x0006},
+ {0x2054, 0x0ad9},
+ {0x2056, 0x0259},
+ {0x2058, 0x0618},
+ {0x205a, 0x0258},
+ {0x205c, 0x2266},
+ {0x205e, 0x20c8},
+ {0x2060, 0x2060},
+ {0x2062, 0x707b},
+ {0x2064, 0x0fdd},
+ {0x2066, 0x81b8},
+ {0x2068, 0x5040},
+ {0x206a, 0x0020},
+ {0x206c, 0x5060},
+ {0x206e, 0x3143},
+ {0x2070, 0x5081},
+ {0x2072, 0x025c},
+ {0x2074, 0x7800},
+ {0x2076, 0x7400},
+ {0x2078, 0x001c},
+ {0x207a, 0x0242},
+ {0x207c, 0x0942},
+ {0x207e, 0x0bd9},
+ {0x2080, 0x0259},
+ {0x2082, 0x7008},
+ {0x2084, 0x160e},
+ {0x2086, 0x0047},
+ {0x2088, 0x2118},
+ {0x208a, 0x0041},
+ {0x208c, 0x00d8},
+ {0x208e, 0x0145},
+ {0x2090, 0x0006},
+ {0x2092, 0x0181},
+ {0x2094, 0x13cc},
+ {0x2096, 0x2057},
+ {0x2098, 0x7001},
+ {0x209a, 0x0fca},
+ {0x209c, 0x00cb},
+ {0x209e, 0x009f},
+ {0x20a0, 0x7002},
+ {0x20a2, 0x13cc},
+ {0x20a4, 0x019b},
+ {0x20a6, 0x014d},
+ {0x20a8, 0x2987},
+ {0x20aa, 0x2766},
+ {0x20ac, 0x0020},
+ {0x20ae, 0x2060},
+ {0x20b0, 0x0e5d},
+ {0x20b2, 0x181d},
+ {0x20b4, 0x2066},
+ {0x20b6, 0x20c4},
+ {0x20b8, 0x50a0},
+ {0x20ba, 0x0005},
+ {0x20bc, 0x0000},
+ {0x20be, 0x01db},
+ {0x20c0, 0x025a},
+ {0x20c2, 0x00c0},
+ {0x20c4, 0x0005},
+ {0x20c6, 0x0006},
+ {0x20c8, 0x0ad9},
+ {0x20ca, 0x0259},
+ {0x20cc, 0x0618},
+ {0x20ce, 0x0258},
+ {0x20d0, 0x2266},
+ {0x20d2, 0x20c8},
+ {0x20d4, 0x2060},
+ {0x20d6, 0x707b},
+ {0x20d8, 0x0fdd},
+ {0x20da, 0x86b8},
+ {0x20dc, 0x50e0},
+ {0x20de, 0x0020},
+ {0x20e0, 0x5100},
+ {0x20e2, 0x3143},
+ {0x20e4, 0x5121},
+ {0x20e6, 0x7800},
+ {0x20e8, 0x3140},
+ {0x20ea, 0x01c4},
+ {0x20ec, 0x01c1},
+ {0x20ee, 0x01c0},
+ {0x20f0, 0x01c4},
+ {0x20f2, 0x2700},
+ {0x20f4, 0x3d40},
+ {0x20f6, 0x7800},
+ {0x20f8, 0xffff},
+ {0x27fe, 0xe000},
+ {0x3000, 0x60f8},
+ {0x3002, 0x187f},
+ {0x3004, 0x7060},
+ {0x3006, 0x0114},
+ {0x3008, 0x60b0},
+ {0x300a, 0x1473},
+ {0x300c, 0x0013},
+ {0x300e, 0x140f},
+ {0x3010, 0x0040},
+ {0x3012, 0x100f},
+ {0x3014, 0x60f8},
+ {0x3016, 0x187f},
+ {0x3018, 0x7060},
+ {0x301a, 0x0114},
+ {0x301c, 0x60b0},
+ {0x301e, 0x1473},
+ {0x3020, 0x0013},
+ {0x3022, 0x140f},
+ {0x3024, 0x0040},
+ {0x3026, 0x000f},
+
+ {0x0b00, 0x0000},
+ {0x0b02, 0x0045},
+ {0x0b04, 0xb405},
+ {0x0b06, 0xc403},
+ {0x0b08, 0x0081},
+ {0x0b0a, 0x8252},
+ {0x0b0c, 0xf814},
+ {0x0b0e, 0xc618},
+ {0x0b10, 0xa828},
+ {0x0b12, 0x004c},
+ {0x0b14, 0x4068},
+ {0x0b16, 0x0000},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x0954, 0x0009},
+ {0x0956, 0x0000},
+ {0x0958, 0xbb80},
+ {0x095a, 0x5140},
+ {0x0c00, 0x1110},
+ {0x0c02, 0x0011},
+ {0x0c04, 0x0000},
+ {0x0c06, 0x0200},
+ {0x0c10, 0x0040},
+ {0x0c12, 0x0040},
+ {0x0c14, 0x0040},
+ {0x0c16, 0x0040},
+ {0x0a10, 0x4000},
+ {0x3068, 0xf800},
+ {0x306a, 0xf876},
+ {0x006c, 0x0000},
+ {0x005e, 0x0200},
+ {0x000e, 0x0100},
+ {0x0e0a, 0x0001},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x07bc},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x080e},
+ {0x0070, 0x0407},
+ {0x0002, 0x0000},
+ {0x0a02, 0x0100},
+ {0x0a24, 0x0100},
+ {0x0046, 0x0000},
+ {0x0076, 0x0000},
+ {0x0060, 0x0000},
+ {0x0062, 0x0530},
+ {0x0064, 0x0500},
+ {0x0066, 0x0530},
+ {0x0068, 0x0500},
+ {0x0122, 0x0300},
+ {0x015a, 0xff08},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x005c, 0x0102},
+ {0x0a1a, 0x0800},
+};
+
+static const struct hi556_reg mode_2592x1944_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8252},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x0a04, 0x014a},
+ {0x090c, 0x0fdc},
+ {0x090e, 0x002d},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc10a},
+ {0x0916, 0x071f},
+ {0x0918, 0x0408},
+ {0x091a, 0x0c0d},
+ {0x091c, 0x0f09},
+ {0x091e, 0x0a00},
+ {0x0958, 0xbb80},
+};
+
+static const struct hi556_reg mode_1296x972_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8259},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7167},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0122},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0404},
+ {0x0012, 0x000c},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0022},
+ {0x002a, 0x002b},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x3311},
+ {0x0030, 0x3311},
+ {0x0032, 0x3311},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0510},
+ {0x0a14, 0x03cc},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0308},
+ {0x0806, 0x0100},
+ {0x0a04, 0x016a},
+ {0x090e, 0x0010},
+ {0x090c, 0x09c0},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc106},
+ {0x0916, 0x040e},
+ {0x0918, 0x0304},
+ {0x091a, 0x0708},
+ {0x091c, 0x0e06},
+ {0x091e, 0x0300},
+ {0x0958, 0xbb80},
+};
+
+static const char * const hi556_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Gradient Horizontal",
+ "Gradient Vertical",
+ "Check Board",
+ "Slant Pattern",
+};
+
+static const s64 link_freq_menu_items[] = {
+ HI556_LINK_FREQ_437MHZ,
+};
+
+static const struct hi556_link_freq_config link_freq_configs[] = {
+ [HI556_LINK_FREQ_437MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_874mbps),
+ .regs = mipi_data_rate_874mbps,
+ }
+ }
+};
+
+static const struct hi556_mode supported_modes[] = {
+ {
+ .width = 2592,
+ .height = 1944,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs),
+ .regs = mode_2592x1944_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ },
+ {
+ .width = 1296,
+ .height = 972,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x972_regs),
+ .regs = mode_1296x972_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ }
+};
+
+struct hi556 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct hi556_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HI556_DATA_LANES;
+
+ do_div(pixel_rate, HI556_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi556_read_reg(struct hi556 *hi556, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int hi556_write_reg_list(struct hi556 *hi556,
+ const struct hi556_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = hi556_write_reg(hi556, r_list->regs[i].address,
+ HI556_REG_VALUE_16BIT,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi556_update_digital_gain(struct hi556 *hi556, u32 d_gain)
+{
+ int ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GR_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GB_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_R_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return hi556_write_reg(hi556, HI556_REG_MWB_B_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+}
+
+static int hi556_test_pattern(struct hi556 *hi556, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ if (pattern) {
+ ret = hi556_read_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT,
+ val | HI556_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN,
+ HI556_REG_VALUE_08BIT, pattern);
+}
+
+static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi556 *hi556 = container_of(ctrl->handler,
+ struct hi556, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi556->cur_mode->height + ctrl->val -
+ HI556_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi556->exposure,
+ hi556->exposure->minimum,
+ exposure_max, hi556->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi556_write_reg(hi556, HI556_REG_ANALOG_GAIN,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi556_update_digital_gain(hi556, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = hi556_write_reg(hi556, HI556_REG_EXPOSURE,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ ret = hi556_write_reg(hi556, HI556_REG_FLL,
+ HI556_REG_VALUE_16BIT,
+ hi556->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi556_test_pattern(hi556, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi556_ctrl_ops = {
+ .s_ctrl = hi556_set_ctrl,
+};
+
+static int hi556_init_controls(struct hi556 *hi556)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &hi556->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi556->mutex;
+ hi556->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (hi556->link_freq)
+ hi556->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi556->pixel_rate = v4l2_ctrl_new_std
+ (ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX),
+ 1,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX));
+ hi556->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi556->cur_mode->fll_min -
+ hi556->cur_mode->height,
+ HI556_FLL_MAX -
+ hi556->cur_mode->height, 1,
+ hi556->cur_mode->fll_def -
+ hi556->cur_mode->height);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ hi556->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi556->hblank)
+ hi556->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI556_ANAL_GAIN_MIN, HI556_ANAL_GAIN_MAX,
+ HI556_ANAL_GAIN_STEP, HI556_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI556_DGTL_GAIN_MIN, HI556_DGTL_GAIN_MAX,
+ HI556_DGTL_GAIN_STEP, HI556_DGTL_GAIN_DEFAULT);
+ exposure_max = hi556->cur_mode->fll_def - HI556_EXPOSURE_MAX_MARGIN;
+ hi556->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI556_EXPOSURE_MIN, exposure_max,
+ HI556_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi556_test_pattern_menu) - 1,
+ 0, 0, hi556_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ hi556->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void hi556_assign_pad_format(const struct hi556_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int hi556_start_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ const struct hi556_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = hi556->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &hi556->cur_mode->reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(hi556->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hi556_stop_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+
+ if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream");
+}
+
+static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi556->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi556->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&hi556->mutex);
+ return ret;
+ }
+
+ ret = hi556_start_streaming(hi556);
+ if (ret) {
+ enable = 0;
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+
+ hi556->streaming = enable;
+ mutex_unlock(&hi556->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused hi556_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming)
+ hi556_stop_streaming(hi556);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused hi556_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+ int ret;
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming) {
+ ret = hi556_start_streaming(hi556);
+ if (ret)
+ goto error;
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+
+error:
+ hi556_stop_streaming(hi556);
+ hi556->streaming = 0;
+ mutex_unlock(&hi556->mutex);
+ return ret;
+}
+
+static int hi556_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ const struct hi556_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ hi556->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(hi556->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->fll_def - mode->height;
+ __v4l2_ctrl_modify_range(hi556->vblank,
+ mode->fll_min - mode->height,
+ HI556_FLL_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi556->vblank, vblank_def);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi556->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, cfg,
+ fmt->pad);
+ else
+ hi556_assign_pad_format(hi556->cur_mode, &fmt->format);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int hi556_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi556_video_ops = {
+ .s_stream = hi556_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi556_pad_ops = {
+ .set_fmt = hi556_set_format,
+ .get_fmt = hi556_get_format,
+ .enum_mbus_code = hi556_enum_mbus_code,
+ .enum_frame_size = hi556_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops hi556_subdev_ops = {
+ .video = &hi556_video_ops,
+ .pad = &hi556_pad_ops,
+};
+
+static const struct media_entity_operations hi556_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops hi556_internal_ops = {
+ .open = hi556_open,
+};
+
+static int hi556_identify_module(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ int ret;
+ u32 val;
+
+ ret = hi556_read_reg(hi556, HI556_REG_CHIP_ID,
+ HI556_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != HI556_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ HI556_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int hi556_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret = 0;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (mclk != HI556_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int hi556_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_probe(struct i2c_client *client)
+{
+ struct hi556 *hi556;
+ int ret;
+
+ ret = hi556_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ hi556 = devm_kzalloc(&client->dev, sizeof(*hi556), GFP_KERNEL);
+ if (!hi556)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
+ ret = hi556_identify_module(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&hi556->mutex);
+ hi556->cur_mode = &supported_modes[0];
+ ret = hi556_init_controls(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ hi556->sd.internal_ops = &hi556_internal_ops;
+ hi556->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi556->sd.entity.ops = &hi556_subdev_entity_ops;
+ hi556->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&hi556->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&hi556->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi556->sd.ctrl_handler);
+ mutex_destroy(&hi556->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops hi556_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hi556_suspend, hi556_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hi556_acpi_ids[] = {
+ {"INT3537"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, hi556_acpi_ids);
+#endif
+
+static struct i2c_driver hi556_i2c_driver = {
+ .driver = {
+ .name = "hi556",
+ .pm = &hi556_pm_ops,
+ .acpi_match_table = ACPI_PTR(hi556_acpi_ids),
+ },
+ .probe_new = hi556_probe,
+ .remove = hi556_remove,
+};
+
+module_i2c_driver(hi556_i2c_driver);
+
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("Hynix HI556 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 159a3a604f0e..adcaaa8c86d1 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -47,6 +47,7 @@ struct imx214 {
struct v4l2_ctrl *pixel_rate;
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *unit_size;
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
@@ -948,6 +949,10 @@ static int imx214_probe(struct i2c_client *client)
static const s64 link_freq[] = {
IMX214_DEFAULT_LINK_FREQ,
};
+ static const struct v4l2_area unit_size = {
+ .width = 1120,
+ .height = 1120,
+ };
int ret;
ret = imx214_parse_fwnode(dev);
@@ -1029,6 +1034,10 @@ static int imx214_probe(struct i2c_client *client)
V4L2_CID_EXPOSURE,
0, 3184, 1, 0x0c70);
+ imx214->unit_size = v4l2_ctrl_new_std_compound(&imx214->ctrls,
+ NULL,
+ V4L2_CID_UNIT_CELL_SIZE,
+ v4l2_ctrl_ptr_create((void *)&unit_size));
ret = imx214->ctrls.error;
if (ret) {
dev_err(&client->dev, "%s control init failed (%d)\n",
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
new file mode 100644
index 000000000000..f7678e5a5d87
--- /dev/null
+++ b/drivers/media/i2c/imx290.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sony IMX290 CMOS Image Sensor Driver
+ *
+ * Copyright (C) 2019 FRAMOS GmbH.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define IMX290_STANDBY 0x3000
+#define IMX290_REGHOLD 0x3001
+#define IMX290_XMSTA 0x3002
+#define IMX290_GAIN 0x3014
+
+#define IMX290_DEFAULT_LINK_FREQ 445500000
+
+static const char * const imx290_supply_name[] = {
+ "vdda",
+ "vddd",
+ "vdddo",
+};
+
+#define IMX290_NUM_SUPPLIES ARRAY_SIZE(imx290_supply_name)
+
+struct imx290_regval {
+ u16 reg;
+ u8 val;
+};
+
+struct imx290_mode {
+ u32 width;
+ u32 height;
+ u32 pixel_rate;
+ u32 link_freq_index;
+
+ const struct imx290_regval *data;
+ u32 data_size;
+};
+
+struct imx290 {
+ struct device *dev;
+ struct clk *xclk;
+ struct regmap *regmap;
+
+ struct v4l2_subdev sd;
+ struct v4l2_fwnode_endpoint ep;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt current_format;
+ const struct imx290_mode *current_mode;
+
+ struct regulator_bulk_data supplies[IMX290_NUM_SUPPLIES];
+ struct gpio_desc *rst_gpio;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+
+ struct mutex lock;
+};
+
+struct imx290_pixfmt {
+ u32 code;
+};
+
+static const struct imx290_pixfmt imx290_formats[] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10 },
+};
+
+static const struct regmap_config imx290_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct imx290_regval imx290_global_init_settings[] = {
+ { 0x3007, 0x00 },
+ { 0x3009, 0x00 },
+ { 0x3018, 0x65 },
+ { 0x3019, 0x04 },
+ { 0x301a, 0x00 },
+ { 0x3443, 0x03 },
+ { 0x3444, 0x20 },
+ { 0x3445, 0x25 },
+ { 0x3407, 0x03 },
+ { 0x303a, 0x0c },
+ { 0x3040, 0x00 },
+ { 0x3041, 0x00 },
+ { 0x303c, 0x00 },
+ { 0x303d, 0x00 },
+ { 0x3042, 0x9c },
+ { 0x3043, 0x07 },
+ { 0x303e, 0x49 },
+ { 0x303f, 0x04 },
+ { 0x304b, 0x0a },
+ { 0x300f, 0x00 },
+ { 0x3010, 0x21 },
+ { 0x3012, 0x64 },
+ { 0x3016, 0x09 },
+ { 0x3070, 0x02 },
+ { 0x3071, 0x11 },
+ { 0x309b, 0x10 },
+ { 0x309c, 0x22 },
+ { 0x30a2, 0x02 },
+ { 0x30a6, 0x20 },
+ { 0x30a8, 0x20 },
+ { 0x30aa, 0x20 },
+ { 0x30ac, 0x20 },
+ { 0x30b0, 0x43 },
+ { 0x3119, 0x9e },
+ { 0x311c, 0x1e },
+ { 0x311e, 0x08 },
+ { 0x3128, 0x05 },
+ { 0x313d, 0x83 },
+ { 0x3150, 0x03 },
+ { 0x317e, 0x00 },
+ { 0x32b8, 0x50 },
+ { 0x32b9, 0x10 },
+ { 0x32ba, 0x00 },
+ { 0x32bb, 0x04 },
+ { 0x32c8, 0x50 },
+ { 0x32c9, 0x10 },
+ { 0x32ca, 0x00 },
+ { 0x32cb, 0x04 },
+ { 0x332c, 0xd3 },
+ { 0x332d, 0x10 },
+ { 0x332e, 0x0d },
+ { 0x3358, 0x06 },
+ { 0x3359, 0xe1 },
+ { 0x335a, 0x11 },
+ { 0x3360, 0x1e },
+ { 0x3361, 0x61 },
+ { 0x3362, 0x10 },
+ { 0x33b0, 0x50 },
+ { 0x33b2, 0x1a },
+ { 0x33b3, 0x04 },
+};
+
+static const struct imx290_regval imx290_1080p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x00 },
+ { 0x303a, 0x0c },
+ { 0x3414, 0x0a },
+ { 0x3472, 0x80 },
+ { 0x3473, 0x07 },
+ { 0x3418, 0x38 },
+ { 0x3419, 0x04 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x18 },
+ { 0x305d, 0x03 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x57 },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x37 },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x1f },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x1f },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x1f },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x77 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x1f },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0x98 },
+ { 0x301d, 0x08 },
+};
+
+static const struct imx290_regval imx290_720p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x10 },
+ { 0x303a, 0x06 },
+ { 0x3414, 0x04 },
+ { 0x3472, 0x00 },
+ { 0x3473, 0x05 },
+ { 0x3418, 0xd0 },
+ { 0x3419, 0x02 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x20 },
+ { 0x305d, 0x00 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x4f },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x2f },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x17 },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x17 },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x17 },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x57 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x17 },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0xe4 },
+ { 0x301d, 0x0c },
+};
+
+static const struct imx290_regval imx290_10bit_settings[] = {
+ { 0x3005, 0x00},
+ { 0x3046, 0x00},
+ { 0x3129, 0x1d},
+ { 0x317c, 0x12},
+ { 0x31ec, 0x37},
+ { 0x3441, 0x0a},
+ { 0x3442, 0x0a},
+ { 0x300a, 0x3c},
+ { 0x300b, 0x00},
+};
+
+/* supported link frequencies */
+static const s64 imx290_link_freq[] = {
+ IMX290_DEFAULT_LINK_FREQ,
+};
+
+/* Mode configs */
+static const struct imx290_mode imx290_modes[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .data = imx290_1080p_settings,
+ .data_size = ARRAY_SIZE(imx290_1080p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .data = imx290_720p_settings,
+ .data_size = ARRAY_SIZE(imx290_720p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+};
+
+static inline struct imx290 *to_imx290(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx290, sd);
+}
+
+static inline int imx290_read_reg(struct imx290 *imx290, u16 addr, u8 *value)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(imx290->regmap, addr, &regval);
+ if (ret) {
+ dev_err(imx290->dev, "I2C read failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ *value = regval & 0xff;
+
+ return 0;
+}
+
+static int imx290_write_reg(struct imx290 *imx290, u16 addr, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(imx290->regmap, addr, value);
+ if (ret) {
+ dev_err(imx290->dev, "I2C write failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_register_array(struct imx290 *imx290,
+ const struct imx290_regval *settings,
+ unsigned int num_settings)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_settings; ++i, ++settings) {
+ ret = imx290_write_reg(imx290, settings->reg, settings->val);
+ if (ret < 0)
+ return ret;
+
+ /* Settle time is 10ms for all registers */
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int imx290_write_buffered_reg(struct imx290 *imx290, u16 address_low,
+ u8 nr_regs, u32 value)
+{
+ unsigned int i;
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x01);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ for (i = 0; i < nr_regs; i++) {
+ ret = imx290_write_reg(imx290, address_low + i,
+ (u8)(value >> (i * 8)));
+ if (ret) {
+ dev_err(imx290->dev, "Error writing buffered registers\n");
+ return ret;
+ }
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x00);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_gain(struct imx290 *imx290, u32 value)
+{
+ int ret;
+
+ ret = imx290_write_buffered_reg(imx290, IMX290_GAIN, 1, value);
+ if (ret)
+ dev_err(imx290->dev, "Unable to write gain\n");
+
+ return ret;
+}
+
+/* Stop streaming */
+static int imx290_stop_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x01);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x01);
+}
+
+static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx290 *imx290 = container_of(ctrl->handler,
+ struct imx290, ctrls);
+ int ret = 0;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(imx290->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ret = imx290_set_gain(imx290, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(imx290->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx290_ctrl_ops = {
+ .s_ctrl = imx290_set_ctrl,
+};
+
+static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(imx290_formats))
+ return -EINVAL;
+
+ code->code = imx290_formats[code->index].code;
+
+ return 0;
+}
+
+static int imx290_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&imx290->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ framefmt = v4l2_subdev_get_try_format(&imx290->sd, cfg,
+ fmt->pad);
+ else
+ framefmt = &imx290->current_format;
+
+ fmt->format = *framefmt;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ const struct imx290_mode *mode;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int i;
+
+ mutex_lock(&imx290->lock);
+
+ mode = v4l2_find_nearest_size(imx290_modes,
+ ARRAY_SIZE(imx290_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+
+ for (i = 0; i < ARRAY_SIZE(imx290_formats); i++)
+ if (imx290_formats[i].code == fmt->format.code)
+ break;
+
+ if (i >= ARRAY_SIZE(imx290_formats))
+ i = 0;
+
+ fmt->format.code = imx290_formats[i].code;
+ fmt->format.field = V4L2_FIELD_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ } else {
+ format = &imx290->current_format;
+ __v4l2_ctrl_s_ctrl(imx290->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(imx290->pixel_rate, mode->pixel_rate);
+
+ imx290->current_mode = mode;
+ }
+
+ *format = fmt->format;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = { 0 };
+
+ fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.width = 1920;
+ fmt.format.height = 1080;
+
+ imx290_set_fmt(subdev, cfg, &fmt);
+
+ return 0;
+}
+
+static int imx290_write_current_format(struct imx290 *imx290,
+ struct v4l2_mbus_framefmt *format)
+{
+ int ret;
+
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ ret = imx290_set_register_array(imx290, imx290_10bit_settings,
+ ARRAY_SIZE(
+ imx290_10bit_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set format registers\n");
+ return ret;
+ }
+ break;
+ default:
+ dev_err(imx290->dev, "Unknown pixel format\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx290_start_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ /* Set init register settings */
+ ret = imx290_set_register_array(imx290, imx290_global_init_settings,
+ ARRAY_SIZE(
+ imx290_global_init_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set init registers\n");
+ return ret;
+ }
+
+ /* Set current frame format */
+ ret = imx290_write_current_format(imx290, &imx290->current_format);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set frame format\n");
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ ret = imx290_set_register_array(imx290, imx290->current_mode->data,
+ imx290->current_mode->data_size);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set current mode\n");
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = v4l2_ctrl_handler_setup(imx290->sd.ctrl_handler);
+ if (ret) {
+ dev_err(imx290->dev, "Could not sync v4l2 controls\n");
+ return ret;
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x00);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ /* Start streaming */
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x00);
+}
+
+static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret = 0;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(imx290->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(imx290->dev);
+ goto unlock_and_return;
+ }
+
+ ret = imx290_start_streaming(imx290);
+ if (ret) {
+ dev_err(imx290->dev, "Start stream failed\n");
+ pm_runtime_put(imx290->dev);
+ goto unlock_and_return;
+ }
+ } else {
+ imx290_stop_streaming(imx290);
+ pm_runtime_put(imx290->dev);
+ }
+
+unlock_and_return:
+
+ return ret;
+}
+
+static int imx290_get_regulators(struct device *dev, struct imx290 *imx290)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX290_NUM_SUPPLIES; i++)
+ imx290->supplies[i].supply = imx290_supply_name[i];
+
+ return devm_regulator_bulk_get(dev, IMX290_NUM_SUPPLIES,
+ imx290->supplies);
+}
+
+static int imx290_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret;
+
+ ret = clk_prepare_enable(imx290->xclk);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(IMX290_NUM_SUPPLIES, imx290->supplies);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable regulators\n");
+ clk_disable_unprepare(imx290->xclk);
+ return ret;
+ }
+
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 1);
+ usleep_range(30000, 31000);
+
+ return 0;
+}
+
+static int imx290_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ clk_disable_unprepare(imx290->xclk);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 0);
+ regulator_bulk_disable(IMX290_NUM_SUPPLIES, imx290->supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx290_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx290_power_on, imx290_power_off, NULL)
+};
+
+static const struct v4l2_subdev_video_ops imx290_video_ops = {
+ .s_stream = imx290_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx290_pad_ops = {
+ .init_cfg = imx290_entity_init_cfg,
+ .enum_mbus_code = imx290_enum_mbus_code,
+ .get_fmt = imx290_get_fmt,
+ .set_fmt = imx290_set_fmt,
+};
+
+static const struct v4l2_subdev_ops imx290_subdev_ops = {
+ .video = &imx290_video_ops,
+ .pad = &imx290_pad_ops,
+};
+
+static const struct media_entity_operations imx290_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int imx290_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *endpoint;
+ struct imx290 *imx290;
+ u32 xclk_freq;
+ int ret;
+
+ imx290 = devm_kzalloc(dev, sizeof(*imx290), GFP_KERNEL);
+ if (!imx290)
+ return -ENOMEM;
+
+ imx290->dev = dev;
+ imx290->regmap = devm_regmap_init_i2c(client, &imx290_regmap_config);
+ if (IS_ERR(imx290->regmap)) {
+ dev_err(dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &imx290->ep);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "Parsing endpoint node failed\n");
+ goto free_err;
+ }
+
+ if (!imx290->ep.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ if (imx290->ep.link_frequencies[0] != IMX290_DEFAULT_LINK_FREQ) {
+ dev_err(dev, "Unsupported link frequency\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Only CSI2 is supported for now */
+ if (imx290->ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "Unsupported bus type, should be CSI2\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Set default mode to max resolution */
+ imx290->current_mode = &imx290_modes[0];
+
+ /* get system clock (xclk) */
+ imx290->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(imx290->xclk)) {
+ dev_err(dev, "Could not get xclk");
+ ret = PTR_ERR(imx290->xclk);
+ goto free_err;
+ }
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not get xclk frequency\n");
+ goto free_err;
+ }
+
+ /* external clock must be 37.125 MHz */
+ if (xclk_freq != 37125000) {
+ dev_err(dev, "External clock frequency %u is not supported\n",
+ xclk_freq);
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ ret = clk_set_rate(imx290->xclk, xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not set xclk frequency\n");
+ goto free_err;
+ }
+
+ ret = imx290_get_regulators(dev, imx290);
+ if (ret < 0) {
+ dev_err(dev, "Cannot get regulators\n");
+ goto free_err;
+ }
+
+ imx290->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(imx290->rst_gpio)) {
+ dev_err(dev, "Cannot get reset gpio\n");
+ ret = PTR_ERR(imx290->rst_gpio);
+ goto free_err;
+ }
+
+ mutex_init(&imx290->lock);
+
+ v4l2_ctrl_handler_init(&imx290->ctrls, 3);
+
+ v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_GAIN, 0, 72, 1, 0);
+ imx290->link_freq =
+ v4l2_ctrl_new_int_menu(&imx290->ctrls,
+ &imx290_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(imx290_link_freq) - 1,
+ 0, imx290_link_freq);
+ if (imx290->link_freq)
+ imx290->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ imx290->pixel_rate = v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1,
+ imx290_modes[0].pixel_rate);
+
+ imx290->sd.ctrl_handler = &imx290->ctrls;
+
+ if (imx290->ctrls.error) {
+ dev_err(dev, "Control initialization error %d\n",
+ imx290->ctrls.error);
+ ret = imx290->ctrls.error;
+ goto free_ctrl;
+ }
+
+ v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
+ imx290->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx290->sd.dev = &client->dev;
+ imx290->sd.entity.ops = &imx290_subdev_entity_ops;
+ imx290->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ imx290->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&imx290->sd.entity, 1, &imx290->pad);
+ if (ret < 0) {
+ dev_err(dev, "Could not register media entity\n");
+ goto free_ctrl;
+ }
+
+ ret = v4l2_async_register_subdev(&imx290->sd);
+ if (ret < 0) {
+ dev_err(dev, "Could not register v4l2 device\n");
+ goto free_entity;
+ }
+
+ /* Power on the device to match runtime PM state below */
+ ret = imx290_power_on(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not power on the device\n");
+ goto free_entity;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return 0;
+
+free_entity:
+ media_entity_cleanup(&imx290->sd.entity);
+free_ctrl:
+ v4l2_ctrl_handler_free(&imx290->ctrls);
+ mutex_destroy(&imx290->lock);
+free_err:
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return ret;
+}
+
+static int imx290_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ mutex_destroy(&imx290->lock);
+
+ pm_runtime_disable(imx290->dev);
+ if (!pm_runtime_status_suspended(imx290->dev))
+ imx290_power_off(imx290->dev);
+ pm_runtime_set_suspended(imx290->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx290_of_match[] = {
+ { .compatible = "sony,imx290" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx290_of_match);
+
+static struct i2c_driver imx290_i2c_driver = {
+ .probe_new = imx290_probe,
+ .remove = imx290_remove,
+ .driver = {
+ .name = "imx290",
+ .pm = &imx290_pm_ops,
+ .of_match_table = of_match_ptr(imx290_of_match),
+ },
+};
+
+module_i2c_driver(imx290_i2c_driver);
+
+MODULE_DESCRIPTION("Sony IMX290 CMOS Image Sensor Driver");
+MODULE_AUTHOR("FRAMOS GmbH");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index d8a8853f9a2b..c76ccf67a909 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -134,7 +134,7 @@ static int lm3646_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct lm3646_flash *flash = to_lm3646_flash(ctrl);
unsigned int reg_val;
- int rval = -EINVAL;
+ int rval;
switch (ctrl->id) {
case V4L2_CID_FLASH_LED_MODE:
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 19a3ceea3bc2..506a30e69ced 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -591,8 +591,8 @@ static int max2175_set_lo_freq(struct max2175 *ctx, u32 lo_freq)
lo_freq *= lo_mult;
int_desired = lo_freq / ctx->xtal_freq;
- frac_desired = div_u64((u64)(lo_freq % ctx->xtal_freq) << 20,
- ctx->xtal_freq);
+ frac_desired = div64_ul((u64)(lo_freq % ctx->xtal_freq) << 20,
+ ctx->xtal_freq);
/* Check CSM is not busy */
ret = max2175_poll_csm_ready(ctx);
diff --git a/drivers/media/i2c/max2175.h b/drivers/media/i2c/max2175.h
index 1ece587c153d..4c722ea3e5f1 100644
--- a/drivers/media/i2c/max2175.h
+++ b/drivers/media/i2c/max2175.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Maxim Integrated MAX2175 RF to Bits tuner driver
*
* This driver & most of the hard coded values are based on the reference
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 5613072908ac..210ea76adb53 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -167,7 +167,7 @@ static int multi_reg_write(struct i2c_client *client,
static int mt9m001_init(struct i2c_client *client)
{
- const struct mt9m001_reg init_regs[] = {
+ static const struct mt9m001_reg init_regs[] = {
/*
* Issue a soft reset. This returns all registers to their
* default values.
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index f4ded0669ff9..42f64175a6df 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Omnivision OV2659 CMOS Image Sensor driver
*
@@ -5,46 +6,21 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/media.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
-#include <media/media-entity.h>
#include <media/i2c/ov2659.h>
-#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
-#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#define DRIVER_NAME "ov2659"
@@ -232,6 +208,10 @@ struct ov2659 {
struct sensor_register *format_ctrl_regs;
struct ov2659_pll_ctrl pll;
int streaming;
+ /* used to control the sensor PWDN pin */
+ struct gpio_desc *pwdn_gpio;
+ /* used to control the sensor RESETB pin */
+ struct gpio_desc *resetb_gpio;
};
static const struct sensor_register ov2659_init_regs[] = {
@@ -419,10 +399,14 @@ static struct sensor_register ov2659_720p[] = {
{ REG_TIMING_YINC, 0x11 },
{ REG_TIMING_VERT_FORMAT, 0x80 },
{ REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x12 },
{ 0x3a03, 0xe8 },
{ 0x3a09, 0x6f },
{ 0x3a0b, 0x5d },
{ 0x3a15, 0x9a },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
{ REG_NULL, 0x00 },
};
@@ -661,7 +645,7 @@ static struct sensor_register ov2659_vga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -709,7 +693,7 @@ static struct sensor_register ov2659_qvga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -1198,14 +1182,27 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
/* Stop Streaming Sequence */
ov2659_set_streaming(ov2659, 0);
ov2659->streaming = on;
+ pm_runtime_put(&client->dev);
goto unlock;
}
- ov2659_set_pixel_clock(ov2659);
- ov2659_set_frame_size(ov2659);
- ov2659_set_format(ov2659);
- ov2659_set_streaming(ov2659, 1);
- ov2659->streaming = on;
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto unlock;
+ }
+
+ ret = ov2659_init(sd, 0);
+ if (!ret)
+ ret = ov2659_set_pixel_clock(ov2659);
+ if (!ret)
+ ret = ov2659_set_frame_size(ov2659);
+ if (!ret)
+ ret = ov2659_set_format(ov2659);
+ if (!ret) {
+ ov2659_set_streaming(ov2659, 1);
+ ov2659->streaming = on;
+ }
unlock:
mutex_unlock(&ov2659->lock);
@@ -1239,12 +1236,18 @@ static int ov2659_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov2659 *ov2659 =
container_of(ctrl->handler, struct ov2659, ctrls);
+ struct i2c_client *client = ov2659->client;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
return ov2659_set_test_pattern(ov2659, ctrl->val);
}
+ pm_runtime_put(&client->dev);
return 0;
}
@@ -1257,6 +1260,39 @@ static const char * const ov2659_test_pattern_menu[] = {
"Vertical Color Bars",
};
+static int ov2659_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 1);
+
+ return 0;
+}
+
+static int ov2659_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 0);
+
+ if (ov2659->resetb_gpio) {
+ gpiod_set_value(ov2659->resetb_gpio, 1);
+ usleep_range(500, 1000);
+ gpiod_set_value(ov2659->resetb_gpio, 0);
+ usleep_range(3000, 5000);
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* V4L2 subdev internal operations
*/
@@ -1330,13 +1366,13 @@ static int ov2659_detect(struct v4l2_subdev *sd)
unsigned short id;
id = OV265X_ID(pid, ver);
- if (id != OV2659_ID)
+ if (id != OV2659_ID) {
dev_err(&client->dev,
"Sensor detection failed (%04X, %d)\n",
id, ret);
- else {
+ ret = -ENODEV;
+ } else {
dev_info(&client->dev, "Found OV%04X sensor\n", id);
- ret = ov2659_init(sd, 0);
}
}
@@ -1413,6 +1449,18 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->xvclk_frequency > 27000000)
return -EINVAL;
+ /* Optional gpio don't fail if not present */
+ ov2659->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ov2659->pwdn_gpio))
+ return PTR_ERR(ov2659->pwdn_gpio);
+
+ /* Optional gpio don't fail if not present */
+ ov2659->resetb_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2659->resetb_gpio))
+ return PTR_ERR(ov2659->resetb_gpio);
+
v4l2_ctrl_handler_init(&ov2659->ctrls, 2);
ov2659->link_frequency =
v4l2_ctrl_new_std(&ov2659->ctrls, &ov2659_ctrl_ops,
@@ -1458,6 +1506,8 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->frame_size = &ov2659_framesizes[2];
ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
+ ov2659_power_on(&client->dev);
+
ret = ov2659_detect(sd);
if (ret < 0)
goto error;
@@ -1471,10 +1521,15 @@ static int ov2659_probe(struct i2c_client *client)
dev_info(&client->dev, "%s sensor driver registered !!\n", sd->name);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
return 0;
error:
v4l2_ctrl_handler_free(&ov2659->ctrls);
+ ov2659_power_off(&client->dev);
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
return ret;
@@ -1490,9 +1545,18 @@ static int ov2659_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov2659_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
return 0;
}
+static const struct dev_pm_ops ov2659_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov2659_power_off, ov2659_power_on, NULL)
+};
+
static const struct i2c_device_id ov2659_id[] = {
{ "ov2659", 0 },
{ /* sentinel */ },
@@ -1510,6 +1574,7 @@ MODULE_DEVICE_TABLE(of, ov2659_of_match);
static struct i2c_driver ov2659_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
+ .pm = &ov2659_pm_ops,
.of_match_table = of_match_ptr(ov2659_of_match),
},
.probe_new = ov2659_probe,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 500d9bbff10b..5e495c833d32 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -193,6 +193,7 @@ struct ov5640_mode_info {
struct ov5640_ctrls {
struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *pixel_rate;
struct {
struct v4l2_ctrl *auto_exp;
struct v4l2_ctrl *exposure;
@@ -489,7 +490,6 @@ static const struct reg_value ov5640_setting_720P_1280_720[] = {
};
static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
- {0x3008, 0x42, 0, 0},
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
@@ -517,7 +517,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
{0x3a15, 0x60, 0, 0}, {0x4407, 0x04, 0, 0},
{0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
- {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
+ {0x4005, 0x1a, 0, 0},
};
static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
@@ -1611,9 +1611,24 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
!(mode->hact == 640 && mode->vact == 480))
return NULL;
+ /* 2592x1944 only works at 15fps max */
+ if ((mode->hact == 2592 && mode->vact == 1944) &&
+ fr > OV5640_15_FPS)
+ return NULL;
+
return mode;
}
+static u64 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
+{
+ u64 rate;
+
+ rate = sensor->current_mode->vtot * sensor->current_mode->htot;
+ rate *= ov5640_framerates[sensor->current_fr];
+
+ return rate;
+}
+
/*
* sensor changes between scaling and subsampling, go through
* exposure calculation
@@ -1818,8 +1833,7 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
* All the formats we support have 16 bits per pixel, seems to require
* the same rate than YUV, so we can just use 16 bpp all the time.
*/
- rate = mode->vtot * mode->htot * 16;
- rate *= ov5640_framerates[sensor->current_fr];
+ rate = ov5640_calc_pixel_rate(sensor) * 16;
if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
ret = ov5640_set_mipi_pclk(sensor, rate);
@@ -2233,6 +2247,8 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2657,6 +2673,11 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* we can use our own mutex for the ctrl lock */
hdl->lock = &sensor->lock;
+ /* Clock related controls */
+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ 0, INT_MAX, 1,
+ ov5640_calc_pixel_rate(sensor));
+
/* Auto/manual white balance */
ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
V4L2_CID_AUTO_WHITE_BALANCE,
@@ -2704,6 +2725,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
goto free_ctrls;
}
+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
@@ -2816,6 +2838,9 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->frame_interval = fi->interval;
sensor->current_mode = mode;
sensor->pending_mode_change = true;
+
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
}
out:
mutex_unlock(&sensor->lock);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 34b7046d9702..d6cd15bb699a 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1325,7 +1325,7 @@ static int ov5695_probe(struct i2c_client *client,
goto err_power_off;
#endif
- ret = v4l2_async_register_subdev(sd);
+ ret = v4l2_async_register_subdev_sensor_common(sd);
if (ret) {
dev_err(dev, "v4l2 async register subdev failed\n");
goto err_clean_entity;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 5b9af5e5b7f1..91906b94f978 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -124,12 +124,13 @@
#define DEF_AECH 0x4D
-#define CLKRC_6MHz 0x00
+#define CLKRC_8MHz 0x00
#define CLKRC_12MHz 0x40
#define CLKRC_16MHz 0x80
#define CLKRC_24MHz 0xc0
#define CLKRC_DIV_MASK 0x3f
#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
+#define DEF_CLKRC 0x00
#define COMA_RESET BIT(7)
#define COMA_QCIF BIT(5)
@@ -196,13 +197,33 @@ struct ov6650 {
struct v4l2_clk *clk;
bool half_scale; /* scale down output by 2 */
struct v4l2_rect rect; /* sensor cropping window */
- unsigned long pclk_limit; /* from host */
- unsigned long pclk_max; /* from resolution and format */
struct v4l2_fract tpf; /* as requested with s_frame_interval */
u32 code;
- enum v4l2_colorspace colorspace;
};
+struct ov6650_xclk {
+ unsigned long rate;
+ u8 clkrc;
+};
+
+static const struct ov6650_xclk ov6650_xclk[] = {
+{
+ .rate = 8000000,
+ .clkrc = CLKRC_8MHz,
+},
+{
+ .rate = 12000000,
+ .clkrc = CLKRC_12MHz,
+},
+{
+ .rate = 16000000,
+ .clkrc = CLKRC_16MHz,
+},
+{
+ .rate = 24000000,
+ .clkrc = CLKRC_24MHz,
+},
+};
static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_YUYV8_2X8,
@@ -213,6 +234,17 @@ static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_Y8_1X8,
};
+static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
+ .width = W_CIF,
+ .height = H_CIF,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
/* read a register */
static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
{
@@ -465,38 +497,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect rect = sel->r;
int ret;
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
- &rect.height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
- &rect.top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
- 0);
+ v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
+ &sel->r.height, 2, H_CIF, 1, 0);
+ v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
+ (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
+ &sel->r.top, DEF_VSTRT << 1,
+ (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
+ 1, 0);
- ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
+ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
if (!ret) {
- priv->rect.left = rect.left;
+ priv->rect.width += priv->rect.left - sel->r.left;
+ priv->rect.left = sel->r.left;
ret = ov6650_reg_write(client, REG_HSTOP,
- (rect.left + rect.width) >> 1);
+ (sel->r.left + sel->r.width) >> 1);
}
if (!ret) {
- priv->rect.width = rect.width;
- ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
+ priv->rect.width = sel->r.width;
+ ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
}
if (!ret) {
- priv->rect.top = rect.top;
+ priv->rect.height += priv->rect.top - sel->r.top;
+ priv->rect.top = sel->r.top;
ret = ov6650_reg_write(client, REG_VSTOP,
- (rect.top + rect.height) >> 1);
+ (sel->r.top + sel->r.height) >> 1);
}
if (!ret)
- priv->rect.height = rect.height;
+ priv->rect.height = sel->r.height;
return ret;
}
@@ -512,12 +545,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- mf->colorspace = priv->colorspace;
- mf->field = V4L2_FIELD_NONE;
+ /* initialize response with default media bus frame format */
+ *mf = ov6650_def_fmt;
+
+ /* update media bus format code and frame size */
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+ } else {
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -526,22 +567,7 @@ static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
return width > rect->width >> 1 || height > rect->height >> 1;
}
-static u8 to_clkrc(struct v4l2_fract *timeperframe,
- unsigned long pclk_limit, unsigned long pclk_max)
-{
- unsigned long pclk;
-
- if (timeperframe->numerator && timeperframe->denominator)
- pclk = pclk_max * timeperframe->denominator /
- (FRAME_RATE_MAX * timeperframe->numerator);
- else
- pclk = pclk_max;
-
- if (pclk_limit && pclk_limit < pclk)
- pclk = pclk_limit;
-
- return (pclk_max - 1) / pclk;
-}
+#define to_clkrc(div) ((div) - 1)
/* set the format we will capture in */
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
@@ -560,8 +586,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
.r.height = mf->height << half_scale,
};
u32 code = mf->code;
- unsigned long mclk, pclk;
- u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
+ u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
int ret;
/* select color matrix configuration for given color encoding */
@@ -610,58 +635,35 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
return -EINVAL;
}
- priv->code = code;
if (code == MEDIA_BUS_FMT_Y8_1X8 ||
code == MEDIA_BUS_FMT_SBGGR8_1X8) {
coml_mask = COML_ONE_CHANNEL;
coml_set = 0;
- priv->pclk_max = 4000000;
} else {
coml_mask = 0;
coml_set = COML_ONE_CHANNEL;
- priv->pclk_max = 8000000;
}
- if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
- priv->colorspace = V4L2_COLORSPACE_SRGB;
- else if (code != 0)
- priv->colorspace = V4L2_COLORSPACE_JPEG;
-
if (half_scale) {
dev_dbg(&client->dev, "max resolution: QCIF\n");
coma_set |= COMA_QCIF;
- priv->pclk_max /= 2;
} else {
dev_dbg(&client->dev, "max resolution: CIF\n");
coma_mask |= COMA_QCIF;
}
- priv->half_scale = half_scale;
-
- clkrc = CLKRC_12MHz;
- mclk = 12000000;
- priv->pclk_limit = 1334000;
- dev_dbg(&client->dev, "using 12MHz input clock\n");
-
- clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- pclk = priv->pclk_max / GET_CLKRC_DIV(clkrc);
- dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
- mclk / pclk, 10 * mclk % pclk / pclk);
ret = ov6650_set_selection(sd, NULL, &sel);
if (!ret)
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
- if (!ret)
- ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
-
if (!ret) {
- mf->colorspace = priv->colorspace;
- mf->width = priv->rect.width >> half_scale;
- mf->height = priv->rect.height >> half_scale;
+ priv->half_scale = half_scale;
+
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
}
+ if (!ret)
+ priv->code = code;
+
return ret;
}
@@ -680,8 +682,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
&mf->height, 2, H_CIF, 1, 0);
- mf->field = V4L2_FIELD_NONE;
-
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
@@ -691,20 +691,39 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
- mf->colorspace = V4L2_COLORSPACE_JPEG;
break;
default:
mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
/* fall through */
case MEDIA_BUS_FMT_SBGGR8_1X8:
- mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return ov6650_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ /* store media bus format code and frame size in pad config */
+ cfg->try_fmt.width = mf->width;
+ cfg->try_fmt.height = mf->height;
+ cfg->try_fmt.code = mf->code;
+ /* return default mbus frame format updated with pad config */
+ *mf = ov6650_def_fmt;
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+
+ } else {
+ /* apply new media bus format code and frame size */
+ int ret = ov6650_s_fmt(sd, mf);
+
+ if (ret)
+ return ret;
+
+ /* return default format updated with active size and code */
+ *mf = ov6650_def_fmt;
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -725,9 +744,7 @@ static int ov6650_g_frame_interval(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- ival->interval.numerator = GET_CLKRC_DIV(to_clkrc(&priv->tpf,
- priv->pclk_limit, priv->pclk_max));
- ival->interval.denominator = FRAME_RATE_MAX;
+ ival->interval = priv->tpf;
dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
ival->interval.numerator, ival->interval.denominator);
@@ -742,7 +759,6 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
struct ov6650 *priv = to_ov6650(client);
struct v4l2_fract *tpf = &ival->interval;
int div, ret;
- u8 clkrc;
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
@@ -754,19 +770,12 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
- /*
- * Keep result to be used as tpf limit
- * for subsequent clock divider calculations
- */
- priv->tpf.numerator = div;
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK);
+ ret = ov6650_reg_rmw(client, REG_CLKRC, to_clkrc(div), CLKRC_DIV_MASK);
if (!ret) {
- tpf->numerator = GET_CLKRC_DIV(clkrc);
- tpf->denominator = FRAME_RATE_MAX;
+ priv->tpf.numerator = div;
+ priv->tpf.denominator = FRAME_RATE_MAX;
+
+ *tpf = priv->tpf;
}
return ret;
@@ -788,7 +797,7 @@ static int ov6650_reset(struct i2c_client *client)
}
/* program default register values */
-static int ov6650_prog_dflt(struct i2c_client *client)
+static int ov6650_prog_dflt(struct i2c_client *client, u8 clkrc)
{
int ret;
@@ -796,6 +805,8 @@ static int ov6650_prog_dflt(struct i2c_client *client)
ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
if (!ret)
+ ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
+ if (!ret)
ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
return ret;
@@ -805,8 +816,10 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- u8 pidh, pidl, midh, midl;
- int ret;
+ const struct ov6650_xclk *xclk = NULL;
+ unsigned long rate;
+ u8 pidh, pidl, midh, midl;
+ int i, ret = 0;
priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -815,6 +828,33 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
return ret;
}
+ rate = v4l2_clk_get_rate(priv->clk);
+ for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ if (rate != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using host default clock rate %lukHz\n",
+ rate / 1000);
+ break;
+ }
+ for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ ret = v4l2_clk_set_rate(priv->clk, ov6650_xclk[i].rate);
+ if (ret || v4l2_clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using negotiated clock rate %lukHz\n",
+ xclk->rate / 1000);
+ break;
+ }
+ if (!xclk) {
+ dev_err(&client->dev, "unable to get supported clock rate\n");
+ if (!ret)
+ ret = -EINVAL;
+ goto eclkput;
+ }
+
ret = ov6650_s_power(sd, 1);
if (ret < 0)
goto eclkput;
@@ -848,7 +888,12 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
ret = ov6650_reset(client);
if (!ret)
- ret = ov6650_prog_dflt(client);
+ ret = ov6650_prog_dflt(client, xclk->clkrc);
+ if (!ret) {
+ struct v4l2_mbus_framefmt mf = ov6650_def_fmt;
+
+ ret = ov6650_s_fmt(sd, &mf);
+ }
if (!ret)
ret = v4l2_ctrl_handler_setup(&priv->hdl);
@@ -989,8 +1034,10 @@ static int ov6650_probe(struct i2c_client *client,
V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto ectlhdlfree;
+ }
v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
@@ -1001,15 +1048,18 @@ static int ov6650_probe(struct i2c_client *client,
priv->rect.top = DEF_VSTRT << 1;
priv->rect.width = W_CIF;
priv->rect.height = H_CIF;
- priv->half_scale = false;
- priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
- priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ /* Hardware default frame interval */
+ priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
+ priv->tpf.denominator = FRAME_RATE_MAX;
priv->subdev.internal_ops = &ov6650_internal_ops;
ret = v4l2_async_register_subdev(&priv->subdev);
- if (ret)
- v4l2_ctrl_handler_free(&priv->hdl);
+ if (!ret)
+ return 0;
+ectlhdlfree:
+ v4l2_ctrl_handler_free(&priv->hdl);
return ret;
}
@@ -1041,6 +1091,6 @@ static struct i2c_driver ov6650_i2c_driver = {
module_i2c_driver(ov6650_i2c_driver);
-MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV6650");
-MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_DESCRIPTION("V4L2 subdevice driver for OmniVision OV6650 camera sensor");
+MODULE_AUTHOR("Janusz Krzysztofik <jmkrzyszt@gmail.com");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h
index 44fabe08234d..4b5f6985710b 100644
--- a/drivers/media/i2c/saa711x_regs.h
+++ b/drivers/media/i2c/saa711x_regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
* saa711x - Philips SAA711x video decoder register specifications
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 9adf8e034e7d..84f9771b5fed 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -682,66 +682,6 @@ static int smiapp_get_all_limits(struct smiapp_sensor *sensor)
return 0;
}
-static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- static u32 const limits[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN,
- };
- static u32 const limits_replace[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN,
- };
- unsigned int i;
- int rval;
-
- if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY] ==
- SMIAPP_BINNING_CAPABILITY_NO) {
- for (i = 0; i < ARRAY_SIZE(limits); i++)
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
-
- return 0;
- }
-
- rval = smiapp_get_limits(sensor, limits, ARRAY_SIZE(limits));
- if (rval < 0)
- return rval;
-
- /*
- * Sanity check whether the binning limits are valid. If not,
- * use the non-binning ones.
- */
- if (sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN])
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(limits); i++) {
- dev_dbg(&client->dev,
- "replace limit 0x%8.8x \"%s\" = %d, 0x%x\n",
- smiapp_reg_limits[limits[i]].addr,
- smiapp_reg_limits[limits[i]].what,
- sensor->limits[limits_replace[i]],
- sensor->limits[limits_replace[i]]);
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
- }
-
- return 0;
-}
-
static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
@@ -891,60 +831,47 @@ static void smiapp_update_blanking(struct smiapp_sensor *sensor)
{
struct v4l2_ctrl *vblank = sensor->vblank;
struct v4l2_ctrl *hblank = sensor->hblank;
+ uint16_t min_fll, max_fll, min_llp, max_llp, min_lbp;
int min, max;
+ if (sensor->binning_vertical > 1 || sensor->binning_horizontal > 1) {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN];
+ } else {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK];
+ }
+
min = max_t(int,
sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
- sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
+ min_fll -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
- max = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
+ max = max_fll - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
__v4l2_ctrl_modify_range(vblank, min, max, vblank->step, min);
min = max_t(int,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
+ min_llp -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
- max = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
+ min_lbp);
+ max = max_llp - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
__v4l2_ctrl_modify_range(hblank, min, max, hblank->step, min);
__smiapp_update_exposure_limits(sensor);
}
-static int smiapp_update_mode(struct smiapp_sensor *sensor)
+static int smiapp_pll_blanking_update(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned int binning_mode;
int rval;
- /* Binning has to be set up here; it affects limits */
- if (sensor->binning_horizontal == 1 &&
- sensor->binning_vertical == 1) {
- binning_mode = 0;
- } else {
- u8 binning_type =
- (sensor->binning_horizontal << 4)
- | sensor->binning_vertical;
-
- rval = smiapp_write(
- sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
- if (rval < 0)
- return rval;
-
- binning_mode = 1;
- }
- rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
- if (rval < 0)
- return rval;
-
- /* Get updated limits due to binning */
- rval = smiapp_get_limits_binning(sensor);
- if (rval < 0)
- return rval;
-
rval = smiapp_pll_update(sensor);
if (rval < 0)
return rval;
@@ -970,62 +897,91 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
* SMIA++ NVM handling
*
*/
-static int smiapp_read_nvm(struct smiapp_sensor *sensor,
- unsigned char *nvm)
+
+static int smiapp_read_nvm_page(struct smiapp_sensor *sensor, u32 p, u8 *nvm,
+ u8 *status)
{
- u32 i, s, p, np, v;
- int rval = 0, rval2;
+ unsigned int i;
+ int rval;
+ u32 s;
- np = sensor->nvm_size / SMIAPP_NVM_PAGE_SIZE;
- for (p = 0; p < np; p++) {
- rval = smiapp_write(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
- if (rval)
- goto out;
+ *status = 0;
- rval = smiapp_write(sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN |
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN);
- if (rval)
- goto out;
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
+ if (rval)
+ return rval;
- for (i = 1000; i > 0; i--) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN);
+ if (rval)
+ return rval;
- if (rval)
- goto out;
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
+ if (rval)
+ return rval;
+
+ if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE) {
+ *status = s;
+ return -ENODATA;
+ }
+ if (sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL) {
+ for (i = 1000; i > 0; i--) {
if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
break;
- }
- if (!i) {
- rval = -ETIMEDOUT;
- goto out;
- }
-
- for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
rval = smiapp_read(
sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
- &v);
- if (rval)
- goto out;
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
- *nvm++ = v;
+ if (rval)
+ return rval;
}
+
+ if (!i)
+ return -ETIMEDOUT;
}
-out:
+ for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
+ u32 v;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
+ &v);
+ if (rval)
+ return rval;
+
+ *nvm++ = v;
+ }
+
+ return 0;
+}
+
+static int smiapp_read_nvm(struct smiapp_sensor *sensor, unsigned char *nvm,
+ size_t nvm_size)
+{
+ u8 status = 0;
+ u32 p;
+ int rval = 0, rval2;
+
+ for (p = 0; p < nvm_size / SMIAPP_NVM_PAGE_SIZE && !rval; p++) {
+ rval = smiapp_read_nvm_page(sensor, p, nvm, &status);
+ nvm += SMIAPP_NVM_PAGE_SIZE;
+ }
+
+ if (rval == -ENODATA &&
+ status & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE)
+ rval = 0;
+
rval2 = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL, 0);
if (rval < 0)
return rval;
else
- return rval2;
+ return rval2 ?: p * SMIAPP_NVM_PAGE_SIZE;
}
/*
@@ -1324,10 +1280,6 @@ static int smiapp_power_on(struct device *dev)
rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
if (rval)
goto out_cci_addr_fail;
-
- rval = smiapp_update_mode(sensor);
- if (rval < 0)
- goto out_cci_addr_fail;
}
mutex_unlock(&sensor->mutex);
@@ -1387,6 +1339,7 @@ static int smiapp_power_off(struct device *dev)
static int smiapp_start_streaming(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int binning_mode;
int rval;
mutex_lock(&sensor->mutex);
@@ -1397,6 +1350,27 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
if (rval)
goto out;
+ /* Binning configuration */
+ if (sensor->binning_horizontal == 1 &&
+ sensor->binning_vertical == 1) {
+ binning_mode = 0;
+ } else {
+ u8 binning_type =
+ (sensor->binning_horizontal << 4)
+ | sensor->binning_vertical;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
+ if (rval < 0)
+ goto out;
+
+ binning_mode = 1;
+ }
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
+ if (rval < 0)
+ goto out;
+
+ /* Set up PLL */
rval = smiapp_pll_configure(sensor);
if (rval)
goto out;
@@ -2073,7 +2047,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return smiapp_update_mode(sensor);
+ return smiapp_pll_blanking_update(sensor);
return 0;
}
@@ -2312,41 +2286,34 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- unsigned int nbytes;
+ int rval;
if (!sensor->dev_init_done)
return -EBUSY;
- if (!sensor->nvm_size) {
- int rval;
-
- /* NVM not read yet - read it now */
- sensor->nvm_size = sensor->hwcfg->nvm_size;
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ return -ENODEV;
+ }
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(&client->dev);
- pm_runtime_put(&client->dev);
- return -ENODEV;
- }
+ rval = smiapp_read_nvm(sensor, buf, PAGE_SIZE);
+ if (rval < 0) {
+ pm_runtime_put(&client->dev);
+ dev_err(&client->dev, "nvm read failed\n");
+ return -ENODEV;
+ }
- if (smiapp_read_nvm(sensor, sensor->nvm)) {
- dev_err(&client->dev, "nvm read failed\n");
- return -ENODEV;
- }
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
- pm_runtime_mark_last_busy(&client->dev);
- pm_runtime_put_autosuspend(&client->dev);
- }
/*
* NVM is still way below a PAGE_SIZE, so we can safely
* assume this for now.
*/
- nbytes = min_t(unsigned int, sensor->nvm_size, PAGE_SIZE);
- memcpy(buf, sensor->nvm, nbytes);
-
- return nbytes;
+ return rval;
}
static DEVICE_ATTR(nvm, S_IRUGO, smiapp_sysfs_nvm_read, NULL);
@@ -2810,16 +2777,13 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
}
}
- /* NVM size is not mandatory */
- fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
-
rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
if (rval)
dev_info(dev, "can't get clock-frequency\n");
- dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
- hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
+ dev_dbg(dev, "clk %d, mode %d\n", hwcfg->ext_clk,
+ hwcfg->csi_signalling_mode);
if (!bus_cfg.nr_of_link_frequencies) {
dev_warn(dev, "no link frequencies defined\n");
@@ -2862,7 +2826,6 @@ static int smiapp_probe(struct i2c_client *client)
return -ENOMEM;
sensor->hwcfg = hwcfg;
- mutex_init(&sensor->mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
@@ -2926,6 +2889,8 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
return rval;
+ mutex_init(&sensor->mutex);
+
rval = smiapp_identify_module(sensor);
if (rval) {
rval = -ENODEV;
@@ -3003,17 +2968,10 @@ static int smiapp_probe(struct i2c_client *client)
rval = -ENOENT;
goto out_power_off;
}
- /* SMIA++ NVM initialization - it will be read from the sensor
- * when it is first requested by userspace.
- */
- if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
- sensor->nvm = devm_kzalloc(&client->dev,
- sensor->hwcfg->nvm_size, GFP_KERNEL);
- if (sensor->nvm == NULL) {
- rval = -ENOMEM;
- goto out_cleanup;
- }
+ if (sensor->minfo.smiapp_version &&
+ sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED) {
if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
dev_err(&client->dev, "sysfs nvm entry failed\n");
rval = -EBUSY;
@@ -3086,7 +3044,7 @@ static int smiapp_probe(struct i2c_client *client)
}
mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
+ rval = smiapp_pll_blanking_update(sensor);
mutex_unlock(&sensor->mutex);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
@@ -3101,19 +3059,23 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
goto out_media_entity_cleanup;
- rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
- if (rval < 0)
- goto out_media_entity_cleanup;
-
pm_runtime_set_active(&client->dev);
pm_runtime_get_noresume(&client->dev);
pm_runtime_enable(&client->dev);
+
+ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
+ if (rval < 0)
+ goto out_disable_runtime_pm;
+
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
+out_disable_runtime_pm:
+ pm_runtime_disable(&client->dev);
+
out_media_entity_cleanup:
media_entity_cleanup(&sensor->src->sd.entity);
@@ -3122,6 +3084,7 @@ out_cleanup:
out_power_off:
smiapp_power_off(&client->dev);
+ mutex_destroy(&sensor->mutex);
return rval;
}
@@ -3144,6 +3107,7 @@ static int smiapp_remove(struct i2c_client *client)
media_entity_cleanup(&sensor->ssds[i].sd.entity);
}
smiapp_cleanup(sensor);
+ mutex_destroy(&sensor->mutex);
return 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 2804a4d9a4e1..43505cd0616e 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -11,25 +11,29 @@
#ifndef __SMIAPP_REG_H_
#define __SMIAPP_REG_H_
+#include <linux/bits.h>
+
#include "smiapp-reg-defs.h"
/* Bits for above register */
-#define SMIAPP_IMAGE_ORIENTATION_HFLIP (1 << 0)
-#define SMIAPP_IMAGE_ORIENTATION_VFLIP (1 << 1)
-
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN (0 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE (1 << 3)
-
-#define SMIAPP_SOFTWARE_RESET (1 << 0)
-
-#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE (1 << 0)
-#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE (1 << 1)
+#define SMIAPP_IMAGE_ORIENTATION_HFLIP BIT(0)
+#define SMIAPP_IMAGE_ORIENTATION_VFLIP BIT(1)
+
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE BIT(3)
+
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL BIT(2)
+
+#define SMIAPP_SOFTWARE_RESET BIT(0)
+
+#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE BIT(0)
+#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE BIT(1)
#define SMIAPP_DPHY_CTRL_AUTOMATIC 0
/* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index ecf8a17dbe37..3ab874a5deba 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -208,9 +208,6 @@ struct smiapp_sensor {
bool dev_init_done;
u8 compressed_min_bpp;
- u8 *nvm; /* nvm memory buffer */
- unsigned int nvm_size; /* bytes */
-
struct smiapp_module_info minfo;
struct smiapp_pll pll;
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 81285b8d5cfb..003ba22334cd 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -971,6 +971,11 @@ static int mipid02_probe(struct i2c_client *client)
bridge->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
+ if (IS_ERR(bridge->reset_gpio)) {
+ dev_err(dev, "failed to get reset GPIO\n");
+ return PTR_ERR(bridge->reset_gpio);
+ }
+
ret = mipid02_get_regulators(bridge);
if (ret) {
dev_err(dev, "failed to get regulators %d", ret);
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index ecf87534613b..d9b3daada07d 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 Gateworks Corporation
*/
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 9088186c24d1..f716129adf09 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
*
* tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers
*
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 39f66e7a0e42..8be03fe5928c 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -375,7 +375,7 @@ static int vpx3220_s_routing(struct v4l2_subdev *sd,
input = 1: COMPOSITE input
input = 2: SVHS input */
- const int input_vals[3][2] = {
+ static const int input_vals[3][2] = {
{0x0c, 0},
{0x0d, 0},
{0x0e, 1}
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
index e19df5165e78..da8088351135 100644
--- a/drivers/media/mc/mc-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -575,6 +575,38 @@ static void media_device_release(struct media_devnode *devnode)
dev_dbg(devnode->parent, "Media device released\n");
}
+static void __media_device_unregister_entity(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_link *link, *tmp;
+ struct media_interface *intf;
+ unsigned int i;
+
+ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+
+ /* Remove all interface links pointing to this entity */
+ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+ list_for_each_entry_safe(link, tmp, &intf->links, list) {
+ if (link->entity == entity)
+ __media_remove_intf_link(link);
+ }
+ }
+
+ /* Remove all data links that belong to this entity */
+ __media_entity_remove_links(entity);
+
+ /* Remove all pads that belong to this entity */
+ for (i = 0; i < entity->num_pads; i++)
+ media_gobj_destroy(&entity->pads[i].graph_obj);
+
+ /* Remove the entity */
+ media_gobj_destroy(&entity->graph_obj);
+
+ /* invoke entity_notify callbacks to handle entity removal?? */
+
+ entity->graph_obj.mdev = NULL;
+}
+
/**
* media_device_register_entity - Register an entity with a media device
* @mdev: The media device
@@ -632,6 +664,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
*/
ret = media_graph_walk_init(&new, mdev);
if (ret) {
+ __media_device_unregister_entity(entity);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
@@ -644,38 +677,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
-static void __media_device_unregister_entity(struct media_entity *entity)
-{
- struct media_device *mdev = entity->graph_obj.mdev;
- struct media_link *link, *tmp;
- struct media_interface *intf;
- unsigned int i;
-
- ida_free(&mdev->entity_internal_idx, entity->internal_idx);
-
- /* Remove all interface links pointing to this entity */
- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
- list_for_each_entry_safe(link, tmp, &intf->links, list) {
- if (link->entity == entity)
- __media_remove_intf_link(link);
- }
- }
-
- /* Remove all data links that belong to this entity */
- __media_entity_remove_links(entity);
-
- /* Remove all pads that belong to this entity */
- for (i = 0; i < entity->num_pads; i++)
- media_gobj_destroy(&entity->pads[i].graph_obj);
-
- /* Remove the entity */
- media_gobj_destroy(&entity->graph_obj);
-
- /* invoke entity_notify callbacks to handle entity removal?? */
-
- entity->graph_obj.mdev = NULL;
-}
-
void media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 85f3e7307538..fa57e12f2ac8 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -664,7 +664,7 @@ static int _cx18_process_idx_data(struct cx18_buffer *buf,
struct cx18_enc_idx_entry *e_buf;
/* Frame type lookup: 1=I, 2=P, 4=B */
- const int mapping[8] = {
+ static const int mapping[8] = {
-1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P,
-1, V4L2_ENC_IDX_FRAME_B, -1, -1, -1
};
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index e880afe37f15..d59ca3601785 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
- if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
+ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
+ GFP_KERNEL)) {
+ kfree(state);
return -ENOMEM;
+ }
state->dev = dev;
sd = &state->sd;
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 3cd87626cd79..9fa388626bae 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -1781,6 +1781,41 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_NOTONLYTV_LV3H] = {
+ .name = "NotOnlyTV LV3H",
+ .tuner_type = TUNER_XC2028,
+ .radio_type = UNSET,
+ .tuner_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
+ /* if gpio1:bit9 is enabled, DVB-T won't work */
+
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ } },
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO] = {
.name = "DViCO FusionHDTV DVB-T PRO",
.tuner_type = TUNER_XC2028,
@@ -2654,6 +2689,7 @@ static const struct cx88_subid cx88_subids[] = {
.subdevice = 0x6f18,
.card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
}, {
+ /* Also NotOnlyTV LV3H (version 1.11 is silkscreened on the board) */
.subvendor = 0x14f1,
.subdevice = 0x8852,
.card = CX88_BOARD_GENIATECH_X8000_MT,
@@ -3121,6 +3157,7 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
@@ -3322,6 +3359,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
udelay(1000);
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
cx88_xc3028_winfast1800h_callback(core, XC2028_TUNER_RESET, 0);
@@ -3378,6 +3416,11 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
*/
ctl->disable_power_mgmt = 1;
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ ctl->read_not_reliable = 1;
+ break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 0292d0947cc7..202ff9e8c257 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1378,6 +1378,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index dcc0f02aeb70..b8abcd550604 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1277,7 +1277,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
core = cx88_core_get(dev->pci);
if (!core) {
err = -EINVAL;
- goto fail_free;
+ goto fail_disable;
}
dev->core = core;
@@ -1323,7 +1323,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->audio_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
}
@@ -1337,7 +1337,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->video_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
if (vc->id == V4L2_CID_CHROMA_AGC)
@@ -1509,11 +1509,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
fail_unreg:
cx8800_unregister_video(dev);
- free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
+fail_irq:
+ free_irq(pci_dev->irq, dev);
fail_core:
core->v4ldev = NULL;
cx88_core_put(core, dev->pci);
+fail_disable:
+ pci_disable_device(pci_dev);
fail_free:
kfree(dev);
return err;
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 744a22328ebc..ce4acf6de6aa 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -228,6 +228,7 @@ extern const struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36 89
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43 90
+#define CX88_BOARD_NOTONLYTV_LV3H 91
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index bb3a8cc9de0c..9dce31d2b525 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 6d22c0107d33..80478b026d75 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -325,7 +325,7 @@ static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
- struct v4l2_decode_vbi_line vbi;
+ struct v4l2_decode_vbi_line vbi = {};
int i;
unsigned lines = 0;
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 67aebe759232..c0bd5d7e148b 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -60,10 +60,8 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(!mantis)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index deadd0b92233..906e4500d87d 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -69,10 +69,8 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(mantis == NULL)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 9ae04e18e6c6..126d085be9a7 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -13,12 +13,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <asm/div64.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif
#include "saa7164.h"
MODULE_DESCRIPTION("Driver for NXP SAA7164 based TV cards");
@@ -1045,92 +1043,138 @@ static void saa7164_dev_unregister(struct saa7164_dev *dev)
return;
}
-#ifdef CONFIG_PROC_FS
-static int saa7164_proc_show(struct seq_file *m, void *v)
+#ifdef CONFIG_DEBUG_FS
+static void *saa7164_seq_start(struct seq_file *s, loff_t *pos)
{
struct saa7164_dev *dev;
- struct tmComResBusInfo *b;
- struct list_head *list;
- int i, c;
+ loff_t index = *pos;
- if (saa7164_devcount == 0)
- return 0;
+ mutex_lock(&devlist);
+ list_for_each_entry(dev, &saa7164_devlist, devlist) {
+ if (index-- == 0) {
+ mutex_unlock(&devlist);
+ return dev;
+ }
+ }
+ mutex_unlock(&devlist);
- list_for_each(list, &saa7164_devlist) {
- dev = list_entry(list, struct saa7164_dev, devlist);
- seq_printf(m, "%s = %p\n", dev->name, dev);
+ return NULL;
+}
- /* Lock the bus from any other access */
- b = &dev->bus;
- mutex_lock(&b->lock);
+static void *saa7164_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct saa7164_dev *dev = v;
+ void *ret;
- seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
- b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+ mutex_lock(&devlist);
+ if (list_is_last(&dev->devlist, &saa7164_devlist))
+ ret = NULL;
+ else
+ ret = list_next_entry(dev, devlist);
+ mutex_unlock(&devlist);
- seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
- b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+ ++*pos;
- seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
- b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+ return ret;
+}
- seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
- b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
- c = 0;
- seq_printf(m, "\n Set Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeSetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+static void saa7164_seq_stop(struct seq_file *s, void *v)
+{
+}
- seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+static int saa7164_seq_show(struct seq_file *m, void *v)
+{
+ struct saa7164_dev *dev = v;
+ struct tmComResBusInfo *b;
+ int i, c;
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
- }
+ seq_printf(m, "%s = %p\n", dev->name, dev);
- c = 0;
- seq_printf(m, "\n Get Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeGetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+ /* Lock the bus from any other access */
+ b = &dev->bus;
+ mutex_lock(&b->lock);
- seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+ seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
+ seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+ c = 0;
+ seq_puts(m, "\n Set Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeSetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
}
+ }
- mutex_unlock(&b->lock);
+ c = 0;
+ seq_puts(m, "\n Get Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeGetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+ seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
+ }
}
+ mutex_unlock(&b->lock);
+
return 0;
}
-static struct proc_dir_entry *saa7164_pe;
+static const struct seq_operations saa7164_seq_ops = {
+ .start = saa7164_seq_start,
+ .next = saa7164_seq_next,
+ .stop = saa7164_seq_stop,
+ .show = saa7164_seq_show,
+};
-static int saa7164_proc_create(void)
+static int saa7164_open(struct inode *inode, struct file *file)
{
- saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
- if (!saa7164_pe)
- return -ENOMEM;
+ return seq_open(file, &saa7164_seq_ops);
+}
- return 0;
+static const struct file_operations saa7164_operations = {
+ .owner = THIS_MODULE,
+ .open = saa7164_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct dentry *saa7614_dentry;
+
+static void __init saa7164_debugfs_create(void)
+{
+ saa7614_dentry = debugfs_create_file("saa7164", 0444, NULL, NULL,
+ &saa7164_operations);
}
-static void saa7164_proc_destroy(void)
+static void __exit saa7164_debugfs_remove(void)
{
- if (saa7164_pe)
- remove_proc_entry("saa7164", NULL);
+ debugfs_remove(saa7614_dentry);
}
#else
-static int saa7164_proc_create(void) { return 0; }
-static void saa7164_proc_destroy(void) {}
+static void saa7164_debugfs_create(void) { }
+static void saa7164_debugfs_remove(void) { }
#endif
static int saa7164_thread_function(void *data)
@@ -1507,7 +1551,7 @@ static int __init saa7164_init(void)
if (ret)
return ret;
- saa7164_proc_create();
+ saa7164_debugfs_create();
pr_info("saa7164 driver loaded\n");
@@ -1516,7 +1560,7 @@ static int __init saa7164_init(void)
static void __exit saa7164_fini(void)
{
- saa7164_proc_destroy();
+ saa7164_debugfs_remove();
pci_unregister_driver(&saa7164_pci_driver);
}
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 65bc7e29450b..2b5e0154814c 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 30c8f2ec9c3c..eaa57d835ea8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -353,7 +353,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
G723_PERIOD_BYTES * PERIODS,
G723_PERIOD_BYTES * PERIODS);
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 40373bd23381..7786e51d19ae 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -300,7 +300,7 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(dev->pci_dev),
+ &dev->pci_dev->dev,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX);
return 0;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f1f61419fd29..e84f35d3a68e 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -483,6 +483,7 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM if ARCH_QCOM
select VIDEOBUF2_DMA_SG
@@ -493,6 +494,19 @@ config VIDEO_QCOM_VENUS
on various Qualcomm SoCs.
To compile this driver as a module choose m here.
+config VIDEO_SUN8I_DEINTERLACE
+ tristate "Allwinner Deinterlace driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ depends on PM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Allwinner deinterlace unit with scaling
+ capability found on some SoCs, like H3.
+ To compile this driver as a module choose m here.
+
endif # V4L_MEM2MEM_DRIVERS
# TI VIDEO PORT Helper Modules
@@ -585,9 +599,10 @@ config VIDEO_MESON_G12A_AO_CEC
config CEC_GPIO
tristate "Generic GPIO-based CEC driver"
- depends on PREEMPT || COMPILE_TEST
+ depends on PREEMPTION || COMPILE_TEST
select CEC_CORE
select CEC_PIN
+ select CEC_NOTIFIER
select GPIOLIB
help
This is a generic GPIO-based CEC driver.
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 6ee7eb0d36f4..d13db96e3015 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
-obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
-
-obj-$(CONFIG_VIDEO_TI_CAL) += ti-vpe/
+obj-y += ti-vpe/
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 2b42ba1f5949..09104304bd06 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI VPFE capture Driver
*
@@ -5,19 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/delay.h>
@@ -69,125 +57,64 @@ static const struct vpfe_standard vpfe_standards[] = {
{V4L2_STD_625_50, 720, 576, {54, 59}, 1},
};
-struct bus_format {
- unsigned int width;
- unsigned int bpp;
-};
-
-/*
- * struct vpfe_fmt - VPFE media bus format information
- * @code: V4L2 media bus format code
- * @shifted: V4L2 media bus format code for the same pixel layout but
- * shifted to be 8 bits per pixel. =0 if format is not shiftable.
- * @pixelformat: V4L2 pixel format FCC identifier
- * @width: Bits per pixel (when transferred over a bus)
- * @bpp: Bytes per pixel (when stored in memory)
- * @supported: Indicates format supported by subdev
- */
-struct vpfe_fmt {
- u32 fourcc;
- u32 code;
- struct bus_format l;
- struct bus_format s;
- bool supported;
- u32 index;
-};
-
-static struct vpfe_fmt formats[] = {
+static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.code = MEDIA_BUS_FMT_YUYV8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
.code = MEDIA_BUS_FMT_YVYU8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
.code = MEDIA_BUS_FMT_VYUY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
.code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB565X,
.code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
},
};
-static int
-__vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp);
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt);
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f);
-static struct vpfe_fmt *find_format_by_code(unsigned int code)
+static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe,
+ unsigned int code)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->code == code)
return fmt;
}
@@ -195,13 +122,14 @@ static struct vpfe_fmt *find_format_by_code(unsigned int code)
return NULL;
}
-static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
+static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe,
+ unsigned int pixelformat)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->fourcc == pixelformat)
return fmt;
}
@@ -209,48 +137,18 @@ static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
return NULL;
}
-static void
-mbus_to_pix(struct vpfe_device *vpfe,
- const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix, unsigned int *bpp)
+static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt)
{
struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
unsigned int bus_width = sdinfo->vpfe_param.bus_width;
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_code(mbus->code);
- if (WARN_ON(fmt == NULL)) {
- pr_err("Invalid mbus code set\n");
- *bpp = 1;
- return;
- }
-
- memset(pix, 0, sizeof(*pix));
- v4l2_fill_pix_format(pix, mbus);
- pix->pixelformat = fmt->fourcc;
- *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
+ u32 bpp, bus_width_bytes, clocksperpixel;
- /* pitch should be 32 bytes aligned */
- pix->bytesperline = ALIGN(pix->width * *bpp, 32);
- pix->sizeimage = pix->bytesperline * pix->height;
-}
-
-static void pix_to_mbus(struct vpfe_device *vpfe,
- struct v4l2_pix_format *pix_fmt,
- struct v4l2_mbus_framefmt *mbus_fmt)
-{
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_pix(pix_fmt->pixelformat);
- if (!fmt) {
- /* default to first entry */
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- pix_fmt->pixelformat);
- fmt = &formats[0];
- }
+ bus_width_bytes = ALIGN(bus_width, 8) >> 3;
+ clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width);
+ bpp = clocksperpixel * bus_width_bytes;
- memset(mbus_fmt, 0, sizeof(*mbus_fmt));
- v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
+ return bpp;
}
/* Print Four-character-code (FOURCC) */
@@ -267,20 +165,6 @@ static char *print_fourcc(u32 fmt)
return code;
}
-static int
-cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
-{
- return lhs->type == rhs->type &&
- lhs->fmt.pix.width == rhs->fmt.pix.width &&
- lhs->fmt.pix.height == rhs->fmt.pix.height &&
- lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
- lhs->fmt.pix.field == rhs->fmt.pix.field &&
- lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
- lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
- lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
- lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
-}
-
static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
{
return ioread32(ccdc->ccdc_cfg.base_addr + offset);
@@ -345,13 +229,9 @@ static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
vert_nr_lines = (image_win->height >> 1) - 1;
vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
/* configure VDINT0 */
val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
} else {
- /* Since first line doesn't have any data */
- vert_start += 1;
vert_nr_lines = image_win->height - 1;
/*
* configure VDINT0 and VDINT1. VDINT1 will be at half
@@ -405,7 +285,6 @@ vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
- ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
max_gamma > max_data) {
vpfe_dbg(1, vpfe, "Invalid data line select\n");
return -EINVAL;
@@ -445,40 +324,25 @@ static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
{
- int dma_cntl, i, pcr;
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ u32 dma_cntl, pcr;
- /* If the CCDC module is still busy wait for it to be done */
- for (i = 0; i < 10; i++) {
- usleep_range(5000, 6000);
- pcr = vpfe_reg_read(ccdc, VPFE_PCR);
- if (!pcr)
- break;
+ pcr = vpfe_reg_read(ccdc, VPFE_PCR);
+ if (pcr)
+ vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr);
- /* make sure it it is disabled */
- vpfe_pcr_enable(ccdc, 0);
- }
+ dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
+ if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
+ vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)",
+ dma_cntl);
/* Disable CCDC by resetting all register to default POR values */
vpfe_ccdc_restore_defaults(ccdc);
- /* if DMA_CNTL overflow bit is set. Clear it
- * It appears to take a while for this to become quiescent ~20ms
- */
- for (i = 0; i < 10; i++) {
- dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
- if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
- break;
-
- /* Clear the overflow bit */
- vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
- usleep_range(5000, 6000);
- }
-
/* Disabled the module at the CONFIG level */
vpfe_config_enable(ccdc, 0);
pm_runtime_put_sync(dev);
-
return 0;
}
@@ -494,8 +358,8 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
x = copy_from_user(&raw_params, params, sizeof(raw_params));
if (x) {
vpfe_dbg(1, vpfe,
- "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
- x);
+ "%s: error in copying ccdc params, %d\n",
+ __func__, x);
return -EFAULT;
}
@@ -513,11 +377,9 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
*/
static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
u32 syn_mode;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
/*
* first restore the CCDC registers to default values
* This is important since we assume default values to be set in
@@ -649,8 +511,6 @@ static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
unsigned int syn_mode;
unsigned int val;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
-
/* Reset CCDC */
vpfe_ccdc_restore_defaults(ccdc);
@@ -751,8 +611,8 @@ static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
{
struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
- vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
- ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
+ vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n",
+ __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
@@ -1036,10 +896,9 @@ static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
{
enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ u32 bpp;
int ret = 0;
- vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
-
vpfe_dbg(1, vpfe, "pixelformat: %s\n",
print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
@@ -1050,7 +909,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
}
/* configure the image window */
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp);
switch (vpfe->fmt.fmt.pix.field) {
case V4L2_FIELD_INTERLACED:
@@ -1094,7 +954,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
static int vpfe_config_image_format(struct vpfe_device *vpfe,
v4l2_std_id std_id)
{
- struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
+ struct vpfe_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
int i, ret;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
@@ -1116,26 +977,29 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe,
return -EINVAL;
}
- vpfe->crop.top = vpfe->crop.left = 0;
- vpfe->crop.width = vpfe->std_info.active_pixels;
- vpfe->crop.height = vpfe->std_info.active_lines;
- pix->width = vpfe->crop.width;
- pix->height = vpfe->crop.height;
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
-
- /* first field and frame format based on standard frame format */
- if (vpfe->std_info.frame_format)
- pix->field = V4L2_FIELD_INTERLACED;
- else
- pix->field = V4L2_FIELD_NONE;
-
- ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
+ ret = __subdev_get_format(vpfe, &mbus_fmt);
if (ret)
return ret;
+ fmt = find_format_by_code(vpfe, mbus_fmt.code);
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n",
+ mbus_fmt.code);
+ return -EINVAL;
+ }
+
+ /* Save current subdev format */
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc;
+ vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt);
+ vpfe->current_vpfe_fmt = fmt;
+
/* Update the crop window based on found values */
- vpfe->crop.width = pix->width;
- vpfe->crop.height = pix->height;
+ vpfe->crop.top = 0;
+ vpfe->crop.left = 0;
+ vpfe->crop.width = mbus_fmt.width;
+ vpfe->crop.height = mbus_fmt.height;
return vpfe_config_ccdc_image_format(vpfe);
}
@@ -1237,22 +1101,29 @@ unlock:
* This function will get next buffer from the dma queue and
* set the buffer address in the vpfe register for capture.
* the buffer is marked active
- *
- * Assumes caller is holding vpfe->dma_queue_lock already
*/
-static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
{
+ dma_addr_t addr;
+
+ spin_lock(&vpfe->dma_queue_lock);
+ if (list_empty(&vpfe->dma_queue)) {
+ spin_unlock(&vpfe->dma_queue_lock);
+ return;
+ }
+
vpfe->next_frm = list_entry(vpfe->dma_queue.next,
struct vpfe_cap_buffer, list);
list_del(&vpfe->next_frm->list);
+ spin_unlock(&vpfe->dma_queue_lock);
- vpfe_set_sdr_addr(&vpfe->ccdc,
- vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
+ vpfe_set_sdr_addr(&vpfe->ccdc, addr);
}
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
{
- unsigned long addr;
+ dma_addr_t addr;
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
vpfe->field_off;
@@ -1277,6 +1148,58 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
vpfe->cur_frm = vpfe->next_frm;
}
+static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
+ enum v4l2_field field)
+{
+ int fid;
+
+ /* interlaced or TB capture check which field
+ * we are in hardware
+ */
+ fid = vpfe_ccdc_getfid(&vpfe->ccdc);
+
+ /* switch the software maintained field id */
+ vpfe->field ^= 1;
+ if (fid == vpfe->field) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the
+ * current frame and move on
+ */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+
+ if (vpfe->stopping)
+ return;
+
+ /*
+ * based on whether the two fields are stored
+ * interleave or separately in memory,
+ * reconfigure the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe);
+ } else {
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ if (vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ }
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe->field = fid;
+ }
+}
+
/*
* vpfe_isr : ISR handler for vpfe capture (VINT0)
* @irq: irq number
@@ -1288,76 +1211,28 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
static irqreturn_t vpfe_isr(int irq, void *dev)
{
struct vpfe_device *vpfe = (struct vpfe_device *)dev;
- enum v4l2_field field;
- int intr_status;
- int fid;
+ enum v4l2_field field = vpfe->fmt.fmt.pix.field;
+ int intr_status, stopping = vpfe->stopping;
intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
if (intr_status & VPFE_VDINT0) {
- field = vpfe->fmt.fmt.pix.field;
-
if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
if (vpfe->cur_frm != vpfe->next_frm)
vpfe_process_buffer_complete(vpfe);
- goto next_intr;
+ } else {
+ vpfe_handle_interlaced_irq(vpfe, field);
}
-
- /* interlaced or TB capture check which field
- we are in hardware */
- fid = vpfe_ccdc_getfid(&vpfe->ccdc);
-
- /* switch the software maintained field id */
- vpfe->field ^= 1;
- if (fid == vpfe->field) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the
- * next frame is available, release the
- * current frame and move on
- */
- if (vpfe->cur_frm != vpfe->next_frm)
- vpfe_process_buffer_complete(vpfe);
- /*
- * based on whether the two fields are stored
- * interleave or separately in memory,
- * reconfigure the CCDC memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_schedule_bottom_field(vpfe);
-
- goto next_intr;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the empty
- * queue if no frame is available hold on to the
- * current buffer
- */
- spin_lock(&vpfe->dma_queue_lock);
- if (!list_empty(&vpfe->dma_queue) &&
- vpfe->cur_frm == vpfe->next_frm)
- vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- vpfe->field = fid;
+ if (stopping) {
+ vpfe->stopping = false;
+ complete(&vpfe->capture_stop);
}
}
-next_intr:
- if (intr_status & VPFE_VDINT1) {
- spin_lock(&vpfe->dma_queue_lock);
- if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
- !list_empty(&vpfe->dma_queue) &&
+ if (intr_status & VPFE_VDINT1 && !stopping) {
+ if (field == V4L2_FIELD_NONE &&
vpfe->cur_frm == vpfe->next_frm)
vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
}
vpfe_clear_intr(&vpfe->ccdc, intr_status);
@@ -1394,8 +1269,6 @@ static int vpfe_querycap(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_querycap\n");
-
strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
@@ -1404,83 +1277,74 @@ static int vpfe_querycap(struct file *file, void *priv,
}
/* get the format set at output pad of the adjacent subdev */
-static int __vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct v4l2_mbus_framefmt mbus_fmt;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
- ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
return ret;
- if (!ret) {
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
- } else {
- ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
- sdinfo->grp_id,
- pad, get_fmt,
- NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
- v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
- mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
- }
-
- format->type = vpfe->fmt.type;
+ *fmt = *mbus_fmt;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
return 0;
}
/* set the format at output pad of the adjacent subdev */
-static int __vpfe_set_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_set_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ *mbus_fmt = *fmt;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
- pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
+ return 0;
+}
- ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
- if (ret)
- return ret;
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f)
+{
+ u32 bpp;
+
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n");
+ return -EINVAL;
+ }
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ bpp = __get_bytesperpixel(vpfe, fmt);
- format->type = vpfe->fmt.type;
+ /* pitch should be 32 bytes aligned */
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+ __func__, print_fourcc(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
return 0;
}
@@ -1490,8 +1354,6 @@ static int vpfe_g_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
-
*fmt = vpfe->fmt;
return 0;
@@ -1502,82 +1364,124 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- struct vpfe_fmt *fmt = NULL;
- unsigned int k;
-
- vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
- f->index);
+ struct vpfe_fmt *fmt;
sdinfo = vpfe->current_subdev;
if (!sdinfo->sd)
return -EINVAL;
- if (f->index > ARRAY_SIZE(formats))
+ if (f->index >= vpfe->num_active_fmt)
return -EINVAL;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- if (formats[k].index == f->index) {
- fmt = &formats[k];
- break;
- }
- }
- if (!fmt)
- return -EINVAL;
+ fmt = vpfe->active_fmt[f->index];
f->pixelformat = fmt->fourcc;
- vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s\n",
- f->index, fmt->code, print_fourcc(fmt->fourcc));
+ vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n",
+ __func__, f->index, fmt->code, print_fourcc(fmt->fourcc));
return 0;
}
static int vpfe_try_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
+ struct v4l2_format *f)
{
struct vpfe_device *vpfe = video_drvdata(file);
- unsigned int bpp;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ const struct vpfe_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret, found;
+
+ fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat);
+ if (!fmt) {
+ /* default to first entry */
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ f->fmt.pix.pixelformat);
+ fmt = vpfe->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = vpfe->fmt.fmt.pix.field;
+
+ /* check for/find a valid width/height */
+ ret = 0;
+ found = false;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ break;
+
+ if (f->fmt.pix.width == fse.max_width &&
+ f->fmt.pix.height == fse.max_height) {
+ found = true;
+ break;
+ } else if (f->fmt.pix.width >= fse.min_width &&
+ f->fmt.pix.width <= fse.max_width &&
+ f->fmt.pix.height >= fse.min_height &&
+ f->fmt.pix.height <= fse.max_height) {
+ found = true;
+ break;
+ }
+ }
- vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
+ if (!found) {
+ /* use existing values as default */
+ f->fmt.pix.width = vpfe->fmt.fmt.pix.width;
+ f->fmt.pix.height = vpfe->fmt.fmt.pix.height;
+ }
- return __vpfe_get_format(vpfe, fmt, &bpp);
+ /*
+ * Use current colorspace for now, it will get
+ * updated properly during s_fmt
+ */
+ f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace;
+ return vpfe_calc_format_size(vpfe, fmt, f);
}
static int vpfe_s_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct vpfe_device *vpfe = video_drvdata(file);
- struct v4l2_format format;
- unsigned int bpp;
+ struct vpfe_fmt *f;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
}
- ret = __vpfe_get_format(vpfe, &format, &bpp);
- if (ret)
+ ret = vpfe_try_fmt(file, priv, fmt);
+ if (ret < 0)
return ret;
+ f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat);
- if (!cmp_v4l2_format(fmt, &format)) {
- /* Sensor format is different from the requested format
- * so we need to change it
- */
- ret = __vpfe_set_format(vpfe, fmt, &bpp);
- if (ret)
- return ret;
- } else /* Just make sure all of the fields are consistent */
- *fmt = format;
+ v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code);
- /* First detach any IRQ if currently attached */
- vpfe_detach_irq(vpfe);
- vpfe->fmt = *fmt;
- vpfe->bpp = bpp;
+ ret = __subdev_set_format(vpfe, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ /* Just double check nothing has gone wrong */
+ if (mbus_fmt.code != f->code) {
+ vpfe_dbg(3, vpfe,
+ "%s subdev changed format on us, this should not happen\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = f->fourcc;
+ vpfe_calc_format_size(vpfe, f, &vpfe->fmt);
+ *fmt = vpfe->fmt;
+ vpfe->current_vpfe_fmt = f;
/* Update the crop window based on found values */
vpfe->crop.width = fmt->fmt.pix.width;
@@ -1592,57 +1496,40 @@ static int vpfe_enum_size(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_subdev_frame_size_enum fse;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_mbus_framefmt mbus;
- struct v4l2_pix_format pix;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
struct vpfe_fmt *fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
-
/* check for valid format */
- fmt = find_format_by_pix(fsize->pixel_format);
+ fmt = find_format_by_pix(vpfe, fsize->pixel_format);
if (!fmt) {
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- fsize->pixel_format);
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n",
+ fsize->pixel_format);
return -EINVAL;
}
memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- memset(&pix, 0x0, sizeof(pix));
- /* Construct pix from parameter and use default for the rest */
- pix.pixelformat = fsize->pixel_format;
- pix.width = 640;
- pix.height = 480;
- pix.colorspace = V4L2_COLORSPACE_SRGB;
- pix.field = V4L2_FIELD_NONE;
- pix_to_mbus(vpfe, &pix, &mbus);
-
memset(&fse, 0x0, sizeof(fse));
fse.index = fsize->index;
fse.pad = 0;
- fse.code = mbus.code;
+ fse.code = fmt->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
if (ret)
- return -EINVAL;
+ return ret;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
- fse.index, fse.code, fse.min_width, fse.max_width,
- fse.min_height, fse.max_height);
+ vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.max_width;
fsize->discrete.height = fse.max_height;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
- fsize->index, print_fourcc(fsize->pixel_format),
- fsize->discrete.width, fsize->discrete.height);
+ vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n",
+ __func__, fsize->index, print_fourcc(fsize->pixel_format),
+ fsize->discrete.width, fsize->discrete.height);
return 0;
}
@@ -1707,8 +1594,6 @@ static int vpfe_enum_input(struct file *file, void *priv,
struct vpfe_subdev_info *sdinfo;
int subdev, index;
- vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
-
if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
inp->index) < 0) {
vpfe_dbg(1, vpfe,
@@ -1725,8 +1610,6 @@ static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_input\n");
-
return vpfe_get_app_input_index(vpfe, index);
}
@@ -1739,8 +1622,6 @@ static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
u32 input, output;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1796,9 +1677,6 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe,
- "vpfe_s_input: index: %d\n", index);
-
return vpfe_set_input(vpfe, index);
}
@@ -1807,8 +1685,6 @@ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_querystd\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
@@ -1824,12 +1700,14 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
struct vpfe_subdev_info *sdinfo;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_std\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
+ /* if trying to set the same std then nothing to do */
+ if (vpfe_standards[vpfe->std_index].std_id == std_id)
+ return 0;
+
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1853,8 +1731,6 @@ static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_g_std\n");
-
sdinfo = vpfe->current_subdev;
if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
return -ENODATA;
@@ -1872,8 +1748,6 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
{
struct v4l2_rect image_win;
- vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
-
vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
vpfe->field_off = image_win.height * image_win.width;
}
@@ -1957,6 +1831,29 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
}
+static void vpfe_return_all_buffers(struct vpfe_device *vpfe,
+ enum vb2_buffer_state state)
+{
+ struct vpfe_cap_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+
+ if (vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state);
+
+ if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state);
+
+ vpfe->cur_frm = NULL;
+ vpfe->next_frm = NULL;
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
/*
* vpfe_start_streaming : Starts the DMA engine for streaming
* @vb: ptr to vb2_buffer
@@ -1965,7 +1862,6 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
- struct vpfe_cap_buffer *buf, *tmp;
struct vpfe_subdev_info *sdinfo;
unsigned long flags;
unsigned long addr;
@@ -1980,6 +1876,9 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
vpfe_attach_irq(vpfe);
+ vpfe->stopping = false;
+ init_completion(&vpfe->capture_stop);
+
if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
vpfe_ccdc_config_raw(&vpfe->ccdc);
else
@@ -2008,11 +1907,8 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
err:
- list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- }
-
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED);
+ vpfe_pcr_enable(&vpfe->ccdc, 0);
return ret;
}
@@ -2027,11 +1923,15 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
struct vpfe_subdev_info *sdinfo;
- unsigned long flags;
int ret;
vpfe_pcr_enable(&vpfe->ccdc, 0);
+ /* Wait for the last frame to be captured */
+ vpfe->stopping = true;
+ wait_for_completion_timeout(&vpfe->capture_stop,
+ msecs_to_jiffies(250));
+
vpfe_detach_irq(vpfe);
sdinfo = vpfe->current_subdev;
@@ -2040,27 +1940,7 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
/* release all active buffers */
- spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
- if (vpfe->cur_frm == vpfe->next_frm) {
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- } else {
- if (vpfe->cur_frm != NULL)
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- if (vpfe->next_frm != NULL)
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
-
- while (!list_empty(&vpfe->dma_queue)) {
- vpfe->next_frm = list_entry(vpfe->dma_queue.next,
- struct vpfe_cap_buffer, list);
- list_del(&vpfe->next_frm->list);
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR);
}
static int vpfe_g_pixelaspect(struct file *file, void *priv,
@@ -2068,8 +1948,6 @@ static int vpfe_g_pixelaspect(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_pixelaspect\n");
-
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
return -EINVAL;
@@ -2128,6 +2006,7 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_rect cr = vpfe->crop;
struct v4l2_rect r = s->r;
+ u32 bpp;
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
@@ -2153,10 +2032,12 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
s->r = vpfe->crop = r;
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp);
vpfe->fmt.fmt.pix.width = r.width;
vpfe->fmt.fmt.pix.height = r.height;
- vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ vpfe->fmt.fmt.pix.bytesperline =
+ vpfe_ccdc_get_line_length(&vpfe->ccdc);
vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
vpfe->fmt.fmt.pix.height;
@@ -2172,8 +2053,6 @@ static long vpfe_ioctl_default(struct file *file, void *priv,
struct vpfe_device *vpfe = video_drvdata(file);
int ret;
- vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
-
if (!valid_prio) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
@@ -2279,10 +2158,10 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
struct vpfe_device, v4l2_dev);
struct v4l2_subdev_mbus_code_enum mbus_code;
struct vpfe_subdev_info *sdinfo;
+ struct vpfe_fmt *fmt;
+ int ret = 0;
bool found = false;
- int i, j;
-
- vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
+ int i, j, k;
for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
if (vpfe->cfg->asd[i]->match.fwnode ==
@@ -2302,27 +2181,37 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
- /* setup the supported formats & indexes */
- for (j = 0, i = 0; ; ++j) {
- struct vpfe_fmt *fmt;
- int ret;
-
+ vpfe->num_active_fmt = 0;
+ for (j = 0, i = 0; (ret != -EINVAL); ++j) {
memset(&mbus_code, 0, sizeof(mbus_code));
mbus_code.index = j;
mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
- NULL, &mbus_code);
+ NULL, &mbus_code);
if (ret)
- break;
-
- fmt = find_format_by_code(mbus_code.code);
- if (!fmt)
continue;
- fmt->supported = true;
- fmt->index = i++;
+ vpfe_dbg(3, vpfe,
+ "subdev %s: code: %04x idx: %d\n",
+ subdev->name, mbus_code.code, j);
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (mbus_code.code != fmt->code)
+ continue;
+ vpfe->active_fmt[i] = fmt;
+ vpfe_dbg(3, vpfe,
+ "matched fourcc: %s code: %04x idx: %d\n",
+ print_fourcc(fmt->fourcc), mbus_code.code, i);
+ vpfe->num_active_fmt = ++i;
+ }
}
+ if (!i) {
+ vpfe_err(vpfe, "No suitable format reported by subdev %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
return 0;
}
@@ -2605,8 +2494,6 @@ static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe = platform_get_drvdata(pdev);
- vpfe_dbg(2, vpfe, "vpfe_remove\n");
-
pm_runtime_disable(&pdev->dev);
v4l2_async_notifier_unregister(&vpfe->notifier);
@@ -2653,22 +2540,21 @@ static int vpfe_suspend(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full suspend if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Save VPFE context */
- vpfe_save_context(ccdc);
+ /* Save VPFE context */
+ vpfe_save_context(ccdc);
- /* Disable CCDC */
- vpfe_pcr_enable(ccdc, 0);
- vpfe_config_enable(ccdc, 0);
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+ vpfe_config_enable(ccdc, 0);
- /* Disable both master and slave clock */
- pm_runtime_put_sync(dev);
+ /* Disable both master and slave clock */
+ pm_runtime_put_sync(dev);
+ }
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -2710,19 +2596,18 @@ static int vpfe_resume(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- /* Enable both master and slave clock */
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full resume if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ /* Enable both master and slave clock */
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Restore VPFE context */
- vpfe_restore_context(ccdc);
+ /* Restore VPFE context */
+ vpfe_restore_context(ccdc);
- vpfe_config_enable(ccdc, 0);
- pm_runtime_put_sync(dev);
+ vpfe_config_enable(ccdc, 0);
+ pm_runtime_put_sync(dev);
+ }
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 4678285f34c6..05ee37db0273 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 - 2014 Texas Instruments, Inc.
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef AM437X_VPFE_H
@@ -23,6 +11,7 @@
#include <linux/am437x-vpfe.h>
#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/i2c.h>
@@ -214,6 +203,25 @@ struct vpfe_ccdc {
u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)];
};
+/*
+ * struct vpfe_fmt - VPFE media bus format information
+ * fourcc: V4L2 pixel format code
+ * code: V4L2 media bus format code
+ * bitsperpixel: Bits per pixel over the bus
+ */
+struct vpfe_fmt {
+ u32 fourcc;
+ u32 code;
+ u32 bitsperpixel;
+};
+
+/*
+ * When formats[] is modified make sure to adjust this value also.
+ * Expect compile time warnings if VPFE_NUM_FORMATS is smaller then
+ * the number of elements in formats[].
+ */
+#define VPFE_NUM_FORMATS 10
+
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
@@ -249,8 +257,11 @@ struct vpfe_device {
struct vpfe_cap_buffer *next_frm;
/* Used to store pixel format */
struct v4l2_format fmt;
- /* Used to store current bytes per pixel based on current format */
- unsigned int bpp;
+ /* Used to keep a reference to the current vpfe_fmt */
+ struct vpfe_fmt *current_vpfe_fmt;
+ struct vpfe_fmt *active_fmt[VPFE_NUM_FORMATS];
+ unsigned int num_active_fmt;
+
/*
* used when IMP is chained to store the crop window which
* is different from the image window
@@ -270,6 +281,8 @@ struct vpfe_device {
*/
u32 field_off;
struct vpfe_ccdc ccdc;
+ int stopping;
+ struct completion capture_stop;
};
#endif /* AM437X_VPFE_H */
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
index 0746c48ec23f..63ecdca3b908 100644
--- a/drivers/media/platform/am437x/am437x-vpfe_regs.h
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI AM437x Image Sensor Interface Registers
*
@@ -5,15 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef AM437X_VPFE_REGS_H
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index eb12f3793062..d8593cb2ae84 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -606,6 +606,16 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
aspeed_video_start_frame(video);
}
+ /*
+ * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
+ * are disabled in the VE_INTERRUPT_CTRL register so clear them to
+ * prevent unnecessary interrupt calls.
+ */
+ if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
+ sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
+ if (sts & VE_INTERRUPT_FRAME_COMPLETE)
+ sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
+
return sts ? IRQ_NONE : IRQ_HANDLED;
}
@@ -614,7 +624,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
int i;
int hsync_counter = 0;
int vsync_counter = 0;
- u32 sts;
+ u32 sts, ctrl;
for (i = 0; i < NUM_POLARITY_CHECKS; ++i) {
sts = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
@@ -629,30 +639,29 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
hsync_counter++;
}
- if (hsync_counter < 0 || vsync_counter < 0) {
- u32 ctrl = 0;
+ ctrl = aspeed_video_read(video, VE_CTRL);
- if (hsync_counter < 0) {
- ctrl = VE_CTRL_HSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_HSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_HSYNC_POS_POL;
- }
-
- if (vsync_counter < 0) {
- ctrl = VE_CTRL_VSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_VSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_VSYNC_POS_POL;
- }
+ if (hsync_counter < 0) {
+ ctrl |= VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_HSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_HSYNC_POS_POL;
+ }
- if (ctrl)
- aspeed_video_update(video, VE_CTRL, 0, ctrl);
+ if (vsync_counter < 0) {
+ ctrl |= VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_VSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_VSYNC_POS_POL;
}
+
+ aspeed_video_write(video, VE_CTRL, ctrl);
}
static bool aspeed_video_alloc_buf(struct aspeed_video *video,
@@ -741,6 +750,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
}
set_bit(VIDEO_RES_DETECT, &video->flags);
+ aspeed_video_update(video, VE_CTRL,
+ VE_CTRL_VSYNC_POL | VE_CTRL_HSYNC_POL, 0);
aspeed_video_enable_mode_detect(video);
rc = wait_event_interruptible_timeout(video->wait,
@@ -1646,7 +1657,8 @@ static int aspeed_video_probe(struct platform_device *pdev)
{
int rc;
struct resource *res;
- struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
+ struct aspeed_video *video =
+ devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL);
if (!video)
return -ENOMEM;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 31ace114eda1..be9ec59774d6 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -129,7 +129,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
*/
for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
unsigned int idx = find_first_zero_bit(&lanes_used,
- sizeof(lanes_used));
+ csi2rx->max_lanes);
set_bit(idx, &lanes_used);
reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
}
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
index 5b17d3a31896..42d2c2cd9a78 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -8,10 +8,12 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <media/cec-notifier.h>
#include <media/cec-pin.h>
struct cec_gpio {
struct cec_adapter *adap;
+ struct cec_notifier *notifier;
struct device *dev;
struct gpio_desc *cec_gpio;
@@ -173,9 +175,17 @@ static const struct cec_pin_ops cec_gpio_pin_ops = {
static int cec_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device *hdmi_dev;
struct cec_gpio *cec;
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
int ret;
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
+ if (PTR_ERR(hdmi_dev) == -EPROBE_DEFER)
+ return PTR_ERR(hdmi_dev);
+ if (IS_ERR(hdmi_dev))
+ caps |= CEC_CAP_PHYS_ADDR;
+
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
@@ -196,8 +206,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
return PTR_ERR(cec->v5_gpio);
cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
- cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
- CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ cec, pdev->name, caps);
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
@@ -205,7 +214,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
cec->adap->name, cec);
if (ret)
- return ret;
+ goto del_adap;
cec_gpio_disable_irq(cec->adap);
@@ -218,7 +227,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"hpd-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
if (cec->v5_gpio) {
@@ -230,23 +239,37 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"v5-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret) {
- cec_delete_adapter(cec->adap);
- return ret;
+ if (!IS_ERR(hdmi_dev)) {
+ cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ cec->adap);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto del_adap;
+ }
}
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto unreg_notifier;
+
platform_set_drvdata(pdev, cec);
return 0;
+
+unreg_notifier:
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
+del_adap:
+ cec_delete_adapter(cec->adap);
+ return ret;
}
static int cec_gpio_remove(struct platform_device *pdev)
{
struct cec_gpio *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 73222c0615c0..94fb4d2ecc43 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -933,7 +933,8 @@ static int coda_g_selection(struct file *file, void *fh,
rsel = &r;
/* fallthrough */
case V4L2_SEL_TGT_CROP:
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ ctx->inst_type == CODA_INST_DECODER)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -942,7 +943,8 @@ static int coda_g_selection(struct file *file, void *fh,
/* fallthrough */
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ ctx->inst_type == CODA_INST_ENCODER)
return -EINVAL;
break;
default:
@@ -1084,16 +1086,16 @@ static int coda_decoder_cmd(struct file *file, void *fh,
switch (dc->cmd) {
case V4L2_DEC_CMD_START:
- mutex_lock(&ctx->bitstream_mutex);
mutex_lock(&dev->coda_mutex);
+ mutex_lock(&ctx->bitstream_mutex);
coda_bitstream_flush(ctx);
- mutex_unlock(&dev->coda_mutex);
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_clear_last_buffer_dequeued(dst_vq);
ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
+ mutex_unlock(&dev->coda_mutex);
break;
case V4L2_DEC_CMD_STOP:
stream_end = false;
@@ -2387,6 +2389,7 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->mem_ops = &vb2_dma_contig_memops;
return coda_queue_init(priv, dst_vq);
@@ -2959,8 +2962,6 @@ static int coda_probe(struct platform_device *pdev)
else
return -EINVAL;
- spin_lock_init(&dev->irqlock);
-
dev->dev = &pdev->dev;
dev->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(dev->clk_per)) {
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 848bf1da401e..9f226140b486 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -86,7 +86,6 @@ struct coda_dev {
struct gen_pool *iram_pool;
struct coda_aux_buf iram;
- spinlock_t irqlock;
struct mutex dev_mutex;
struct mutex coda_mutex;
struct workqueue_struct *workqueue;
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index 4a3b3810fd89..f048e8994785 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -314,7 +314,8 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
return 0;
out_probe_notify:
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
+ cros_ec_cec->adap);
out_probe_adapter:
cec_delete_adapter(cros_ec_cec->adap);
return ret;
@@ -335,7 +336,8 @@ static int cros_ec_cec_remove(struct platform_device *pdev)
return ret;
}
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
+ cros_ec_cec->adap);
cec_unregister_adapter(cros_ec_cec->adap);
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 378cc302e1f8..d2cbcdca0463 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -313,7 +313,7 @@ static int isp_video_release(struct file *file)
ivc->streaming = 0;
}
- vb2_fop_release(file);
+ _vb2_fop_release(file, NULL);
if (v4l2_fh_is_singular_file(file)) {
fimc_pipeline_call(&ivc->ve, close);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index a838189d4490..9aaf3b8060d5 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1457,12 +1457,12 @@ static int fimc_md_probe(struct platform_device *pdev)
ret = v4l2_device_register(dev, &fmd->v4l2_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
- return ret;
+ goto err_md;
}
ret = fimc_md_get_clocks(fmd);
if (ret)
- goto err_md;
+ goto err_v4l2dev;
ret = fimc_md_get_pinctrl(fmd);
if (ret < 0) {
@@ -1519,9 +1519,10 @@ err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
fimc_md_put_clocks(fmd);
+err_v4l2dev:
+ v4l2_device_unregister(&fmd->v4l2_dev);
err_md:
media_device_cleanup(&fmd->media_dev);
- v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
}
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
index 3b39e875292e..891533060d49 100644
--- a/drivers/media/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/platform/meson/ao-cec-g12a.c
@@ -662,34 +662,27 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_ao_cec_g12a_regmap_conf);
if (IS_ERR(ao_cec->regmap)) {
ret = PTR_ERR(ao_cec->regmap);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
&meson_ao_cec_g12a_cec_regmap_conf);
if (IS_ERR(ao_cec->regmap_cec)) {
ret = PTR_ERR(ao_cec->regmap_cec);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -699,45 +692,52 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
if (IS_ERR(ao_cec->oscin)) {
dev_err(&pdev->dev, "oscin clock request failed\n");
ret = PTR_ERR(ao_cec->oscin);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = meson_ao_cec_g12a_setup_clk(ao_cec);
if (ret)
- goto out_probe_notify;
+ goto out_probe_adapter;
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
device_reset_optional(&pdev->dev);
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_core_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_core_clk;
+ goto out_probe_notify;
/* Setup Hardware */
regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
return 0;
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
+
out_probe_core_clk:
clk_disable_unprepare(ao_cec->core);
-out_probe_notify:
- cec_notifier_cec_adap_unregister(ao_cec->notify);
-
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
@@ -752,7 +752,7 @@ static int meson_ao_cec_g12a_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
- cec_notifier_cec_adap_unregister(ao_cec->notify);
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index 64ed549bf012..09aff82c3773 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -624,20 +624,13 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -647,20 +640,20 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->core = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(ao_cec->core)) {
dev_err(&pdev->dev, "core clock request failed\n");
ret = PTR_ERR(ao_cec->core);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
@@ -674,9 +667,16 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->pdev = pdev;
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_clk;
+ goto out_probe_notify;
/* Setup Hardware */
writel_relaxed(CEC_GEN_CNTL_RESET,
@@ -684,12 +684,12 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
return 0;
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
+
out_probe_clk:
clk_disable_unprepare(ao_cec->core);
-out_probe_notify:
- cec_notifier_cec_adap_unregister(ao_cec->notify);
-
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
@@ -704,7 +704,7 @@ static int meson_ao_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
- cec_notifier_cec_adap_unregister(ao_cec->notify);
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
return 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 26a55c3e807e..858727824889 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -284,7 +284,7 @@ static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
fmt = &mtk_video_formats[k];
if (fmt->fourcc == pixelformat) {
mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt.fourcc, pixelformat);
+ dst_q_data->fmt->fourcc, pixelformat);
dst_q_data->fmt = fmt;
return;
}
@@ -841,12 +841,20 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
return -EINVAL;
pix_mp = &f->fmt.pix_mp;
+ /*
+ * Setting OUTPUT format after OUTPUT buffers are allocated is invalid
+ * if using the stateful API.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
mtk_v4l2_err("out_q_ctx buffers already requested");
ret = -EBUSY;
}
+ /*
+ * Setting CAPTURE format after CAPTURE buffers are allocated is
+ * invalid.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
mtk_v4l2_err("cap_q_ctx buffers already requested");
@@ -865,6 +873,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
fmt = mtk_vdec_find_format(f);
}
}
+ if (fmt == NULL)
+ return -EINVAL;
q_data->fmt = fmt;
vidioc_try_fmt(f, q_data->fmt);
@@ -873,10 +883,10 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
q_data->coded_width = pix_mp->width;
q_data->coded_height = pix_mp->height;
- ctx->colorspace = f->fmt.pix_mp.colorspace;
- ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
- ctx->quantization = f->fmt.pix_mp.quantization;
- ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quantization = pix_mp->quantization;
+ ctx->xfer_func = pix_mp->xfer_func;
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 00d090df11bb..944771ee5f5c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -253,13 +253,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res == NULL) {
- dev_err(&pdev->dev, "get memory resource failed.");
- ret = -ENXIO;
- goto err_res;
- }
- dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR((__force void *)dev->reg_base[i])) {
ret = PTR_ERR((__force void *)dev->reg_base[i]);
goto err_res;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 49aa85a9bb5a..50048c170b99 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -283,7 +283,6 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_H264;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 63a8708ce682..6011fdd60a22 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -402,7 +402,6 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP8;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5066c283d86d..24c1f0bf2147 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -793,7 +793,6 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP9;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
if (vpu_dec_init(&inst->vpu)) {
mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 3f38cc4509ef..70abfd4cd4b9 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -25,10 +25,16 @@ static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
}
/*
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ *
* This function runs in interrupt context and it means there's an IPI MSG
* from VPU.
*/
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
{
struct vdec_vpu_ipi_ack *msg = data;
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
@@ -102,6 +108,7 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
mtk_vcodec_debug_enter(vpu);
init_waitqueue_head(&vpu->wq);
+ vpu->handler = vpu_dec_ipi_handler;
err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
if (err != 0) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index b76f717e4fd7..f779b0676fbd 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -76,13 +76,4 @@ int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
*/
int vpu_dec_reset(struct vdec_vpu_inst *vpu);
-/**
- * vpu_dec_ipi_handler - Handler for VPU ipi message.
- *
- * @data: ipi message
- * @len : length of ipi message
- * @priv: callback private data which is passed by decoder when register.
- */
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
-
#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index cc2ff40d060d..a768707abb94 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -273,7 +273,7 @@ int vpu_ipi_register(struct platform_device *pdev,
return -EPROBE_DEFER;
}
- if (id >= 0 && id < IPI_MAX && handler) {
+ if (id < IPI_MAX && handler) {
ipi_desc = vpu->ipi_desc;
ipi_desc[id].name = name;
ipi_desc[id].handler = handler;
@@ -398,7 +398,7 @@ int vpu_wdt_reg_handler(struct platform_device *pdev,
handler = vpu->wdt.handler;
- if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
+ if (id < VPU_RST_MAX && wdt_reset) {
dev_dbg(vpu->dev, "wdt register id %d\n", id);
mutex_lock(&vpu->vpu_mutex);
handler[id].reset_func = wdt_reset;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index e6eff512a8a1..07312a2fab24 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/interconnect.h>
#include <linux/ioctl.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -239,6 +240,14 @@ static int venus_probe(struct platform_device *pdev)
if (IS_ERR(core->base))
return PTR_ERR(core->base);
+ core->video_path = of_icc_get(dev, "video-mem");
+ if (IS_ERR(core->video_path))
+ return PTR_ERR(core->video_path);
+
+ core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
+ if (IS_ERR(core->cpucfg_path))
+ return PTR_ERR(core->cpucfg_path);
+
core->irq = platform_get_irq(pdev, 0);
if (core->irq < 0)
return core->irq;
@@ -273,6 +282,10 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = icc_set_bw(core->cpucfg_path, 0, kbps_to_icc(1000));
+ if (ret)
+ return ret;
+
ret = hfi_create(core, &venus_core_ops);
if (ret)
return ret;
@@ -355,6 +368,9 @@ static int venus_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ icc_put(core->video_path);
+ icc_put(core->cpucfg_path);
+
v4l2_device_unregister(&core->v4l2_dev);
return ret;
@@ -427,10 +443,11 @@ static const struct venus_resources msm8916_res = {
};
static const struct freq_tbl msm8996_freq_table[] = {
- { 1944000, 490000000 }, /* 4k UHD @ 60 */
- { 972000, 320000000 }, /* 4k UHD @ 30 */
- { 489600, 150000000 }, /* 1080p @ 60 */
- { 244800, 75000000 }, /* 1080p @ 30 */
+ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
+ { 972000, 520000000 }, /* 4k UHD @ 30 */
+ { 489600, 346666667 }, /* 1080p @ 60 */
+ { 244800, 150000000 }, /* 1080p @ 30 */
+ { 108000, 75000000 }, /* 720p @ 30 */
};
static const struct reg_val msm8996_reg_preset[] = {
@@ -464,9 +481,40 @@ static const struct freq_tbl sdm845_freq_table[] = {
{ 244800, 100000000 }, /* 1920x1080@30 */
};
+static struct codec_freq_data sdm845_codec_freq_data[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10 },
+};
+
+static const struct bw_tbl sdm845_bw_table_enc[] = {
+ { 1944000, 1612000, 0, 2416000, 0 }, /* 3840x2160@60 */
+ { 972000, 951000, 0, 1434000, 0 }, /* 3840x2160@30 */
+ { 489600, 723000, 0, 973000, 0 }, /* 1920x1080@60 */
+ { 244800, 370000, 0, 495000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sdm845_bw_table_dec[] = {
+ { 2073600, 3929000, 0, 5551000, 0 }, /* 4096x2160@60 */
+ { 1036800, 1987000, 0, 2797000, 0 }, /* 4096x2160@30 */
+ { 489600, 1040000, 0, 1298000, 0 }, /* 1920x1080@60 */
+ { 244800, 530000, 0, 659000, 0 }, /* 1920x1080@30 */
+};
+
static const struct venus_resources sdm845_res = {
.freq_tbl = sdm845_freq_table,
.freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .bw_tbl_enc = sdm845_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
+ .bw_tbl_dec = sdm845_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
+ .codec_freq_data = sdm845_codec_freq_data,
+ .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.max_load = 3110400, /* 4096x2160@90 */
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 922cb7e64bfa..11585fb3cae3 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -26,12 +26,33 @@ struct reg_val {
u32 value;
};
+struct codec_freq_data {
+ u32 pixfmt;
+ u32 session_type;
+ unsigned long vpp_freq;
+ unsigned long vsp_freq;
+};
+
+struct bw_tbl {
+ u32 mbs_per_sec;
+ u32 avg;
+ u32 peak;
+ u32 avg_10bit;
+ u32 peak_10bit;
+};
+
struct venus_resources {
u64 dma_mask;
const struct freq_tbl *freq_tbl;
unsigned int freq_tbl_size;
+ const struct bw_tbl *bw_tbl_enc;
+ unsigned int bw_tbl_enc_size;
+ const struct bw_tbl *bw_tbl_dec;
+ unsigned int bw_tbl_dec_size;
const struct reg_val *reg_tbl;
unsigned int reg_tbl_size;
+ const struct codec_freq_data *codec_freq_data;
+ unsigned int codec_freq_data_size;
const char * const clks[VIDC_CLKS_NUM_MAX];
unsigned int clks_num;
enum hfi_version hfi_version;
@@ -115,6 +136,8 @@ struct venus_core {
struct clk *core1_clk;
struct clk *core0_bus_clk;
struct clk *core1_bus_clk;
+ struct icc_path *video_path;
+ struct icc_path *cpucfg_path;
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -208,6 +231,12 @@ struct venus_buffer {
struct list_head ref_list;
};
+struct clock_data {
+ u32 core_id;
+ unsigned long freq;
+ const struct codec_freq_data *codec_freq_data;
+};
+
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
enum venus_dec_state {
@@ -288,6 +317,7 @@ struct venus_inst {
struct list_head list;
struct mutex lock;
struct venus_core *core;
+ struct clock_data clk_data;
struct list_head dpbbufs;
struct list_head internalbufs;
struct list_head registeredbufs;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 1ad96c25ab09..a172f1ac0b35 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/iopoll.h>
+#include <linux/interconnect.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
@@ -388,12 +389,91 @@ static u32 load_per_type(struct venus_core *core, u32 session_type)
return mbs_per_sec;
}
-int venus_helper_load_scale_clocks(struct venus_core *core)
+static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
{
+ const struct venus_resources *res = inst->core->res;
+ const struct bw_tbl *bw_tbl;
+ unsigned int num_rows, i;
+
+ *avg = 0;
+ *peak = 0;
+
+ if (mbs == 0)
+ return;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
+ num_rows = res->bw_tbl_enc_size;
+ bw_tbl = res->bw_tbl_enc;
+ } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ num_rows = res->bw_tbl_dec_size;
+ bw_tbl = res->bw_tbl_dec;
+ } else {
+ return;
+ }
+
+ if (!bw_tbl || num_rows == 0)
+ return;
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs > bw_tbl[i].mbs_per_sec)
+ break;
+
+ if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
+ *avg = bw_tbl[i].avg_10bit;
+ *peak = bw_tbl[i].peak_10bit;
+ } else {
+ *avg = bw_tbl[i].avg;
+ *peak = bw_tbl[i].peak;
+ }
+ }
+}
+
+static int load_scale_bw(struct venus_core *core)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ mbs_per_sec = load_per_instance(inst);
+ mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
+ total_avg += avg;
+ total_peak += peak;
+ }
+ mutex_unlock(&core->lock);
+
+ dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
+ total_avg, total_peak);
+
+ return icc_set_bw(core->video_path, total_avg, total_peak);
+}
+
+static int set_clk_freq(struct venus_core *core, unsigned long freq)
+{
+ struct clk *clk = core->clks[0];
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core0_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core1_clk, freq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int scale_clocks(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
const struct freq_tbl *table = core->res->freq_tbl;
unsigned int num_rows = core->res->freq_tbl_size;
unsigned long freq = table[0].freq;
- struct clk *clk = core->clks[0];
struct device *dev = core->dev;
u32 mbs_per_sec;
unsigned int i;
@@ -419,23 +499,124 @@ int venus_helper_load_scale_clocks(struct venus_core *core)
set_freq:
- ret = clk_set_rate(clk, freq);
- if (ret)
- goto err;
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core0_clk, freq);
- if (ret)
- goto err;
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core1_clk, freq);
- if (ret)
- goto err;
+ return 0;
+}
+
+static unsigned long calculate_inst_freq(struct venus_inst *inst,
+ unsigned long filled_len)
+{
+ unsigned long vpp_freq = 0, vsp_freq = 0;
+ u32 fps = (u32)inst->fps;
+ u32 mbs_per_sec;
+
+ mbs_per_sec = load_per_instance(inst) / fps;
+
+ vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
+ /* 21 / 20 is overhead factor */
+ vpp_freq += vpp_freq / 20;
+ vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
+
+ /* 10 / 7 is overhead factor */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
+ else
+ vsp_freq += ((fps * filled_len * 8) * 10) / 7;
+
+ return max(vpp_freq, vsp_freq);
+}
+
+static int scale_clocks_v4(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct device *dev = core->dev;
+ unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
+ unsigned long filled_len = 0;
+ struct venus_buffer *buf, *n;
+ struct vb2_buffer *vb;
+ int i, ret;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ vb = &buf->vb.vb2_buf;
+ filled_len = max(filled_len, vb2_get_plane_payload(vb, 0));
+ }
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
+ return 0;
+
+ freq = calculate_inst_freq(inst, filled_len);
+ inst->clk_data.freq = freq;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
+ freq_core1 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
+ freq_core2 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ freq_core1 += inst->clk_data.freq;
+ freq_core2 += inst->clk_data.freq;
+ }
+ }
+ mutex_unlock(&core->lock);
+
+ freq = max(freq_core1, freq_core2);
+
+ if (freq >= table[0].freq) {
+ freq = table[0].freq;
+ dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
+ freq, table[0].freq);
+ goto set_freq;
+ }
+
+ for (i = num_rows - 1 ; i >= 0; i--) {
+ if (freq <= table[i].freq) {
+ freq = table[i].freq;
+ break;
+ }
+ }
+
+set_freq:
+
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
+
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
return 0;
+}
-err:
- dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
- return ret;
+int venus_helper_load_scale_clocks(struct venus_inst *inst)
+{
+ if (IS_V4(inst->core))
+ return scale_clocks_v4(inst);
+
+ return scale_clocks(inst);
}
EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
@@ -541,6 +722,8 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
put_ts_metadata(inst, vbuf);
+
+ venus_helper_load_scale_clocks(inst);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
fdata.buffer_type = HFI_BUFFER_OUTPUT;
@@ -809,6 +992,7 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct hfi_videocores_usage_type cu;
+ inst->clk_data.core_id = usage;
if (!IS_V4(inst->core))
return 0;
@@ -818,6 +1002,36 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
}
EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst)
+{
+ const struct codec_freq_data *data;
+ unsigned int i, data_size;
+ u32 pixfmt;
+ int ret = 0;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ data = inst->core->res->codec_freq_data;
+ data_size = inst->core->res->codec_freq_data_size;
+ pixfmt = inst->session_type == VIDC_SESSION_TYPE_DEC ?
+ inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
+
+ for (i = 0; i < data_size; i++) {
+ if (data[i].pixfmt == pixfmt &&
+ data[i].session_type == inst->session_type) {
+ inst->clk_data.codec_freq_data = &data[i];
+ break;
+ }
+ }
+
+ if (!inst->clk_data.codec_freq_data)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_init_codec_freq_data);
+
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs)
@@ -1140,7 +1354,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -1193,7 +1407,6 @@ EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
int venus_helper_vb2_start_streaming(struct venus_inst *inst)
{
- struct venus_core *core = inst->core;
int ret;
ret = venus_helper_intbufs_alloc(inst);
@@ -1204,7 +1417,7 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_bufs_free;
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_load_res(inst);
if (ret)
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 01f411b12f81..34dcd0c13f06 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -33,6 +33,7 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height,
u32 buftype);
int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst);
int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
@@ -59,7 +60,7 @@ int venus_helper_intbufs_free(struct venus_inst *inst);
int venus_helper_intbufs_realloc(struct venus_inst *inst);
int venus_helper_queue_dpb_bufs(struct venus_inst *inst);
int venus_helper_unregister_bufs(struct venus_inst *inst);
-int venus_helper_load_scale_clocks(struct venus_core *core);
+int venus_helper_load_scale_clocks(struct venus_inst *inst);
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst);
int venus_helper_process_initial_out_bufs(struct venus_inst *inst);
void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 7129a2aea09a..0d8855014ab3 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -1472,6 +1472,7 @@ static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
+ u32 ctrl_status;
bool val;
int ret;
@@ -1487,6 +1488,10 @@ static int venus_suspend_3xx(struct venus_core *core)
return -EINVAL;
}
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ goto power_off;
+
/*
* Power collapse sequence for Venus 3xx and 4xx versions:
* 1. Check for ARM9 and video core to be idle by checking WFI bit
@@ -1511,6 +1516,7 @@ static int venus_suspend_3xx(struct venus_core *core)
if (ret)
return ret;
+power_off:
mutex_lock(&hdev->lock);
ret = venus_power_off(hdev);
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 7f4660555ddb..8feaf5daece9 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -685,6 +685,10 @@ static int vdec_session_init(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
return 0;
deinit:
hfi_session_deinit(inst);
@@ -864,7 +868,7 @@ reconfigure:
if (ret)
goto free_dpb_bufs;
- venus_helper_load_scale_clocks(inst->core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_continue(inst);
if (ret)
@@ -1072,7 +1076,7 @@ static void vdec_session_release(struct venus_inst *inst)
hfi_session_abort(inst);
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
mutex_unlock(&inst->lock);
@@ -1412,9 +1416,6 @@ static const struct v4l2_file_operations vdec_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int vdec_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 1b7fb2d5887c..453edf966d4f 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -842,6 +842,10 @@ static int venc_init_session(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
ret = venc_set_properties(inst);
if (ret)
goto deinit;
@@ -1235,9 +1239,6 @@ static const struct v4l2_file_operations venc_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int venc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 6993484ff0f3..7440c8965d27 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -983,6 +983,7 @@ static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
static const struct rvin_info rcar_info_r8a7795 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7795_routes,
@@ -1077,6 +1078,7 @@ static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
static const struct rvin_info rcar_info_r8a7796 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7796_routes,
@@ -1121,6 +1123,7 @@ static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
static const struct rvin_info rcar_info_r8a77965 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77965_routes,
@@ -1168,6 +1171,7 @@ static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
static const struct rvin_info rcar_info_r8a77980 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77980_routes,
@@ -1184,6 +1188,7 @@ static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
static const struct rvin_info rcar_info_r8a77990 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77990_routes,
@@ -1196,6 +1201,7 @@ static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
static const struct rvin_info rcar_info_r8a77995 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77995_routes,
@@ -1207,6 +1213,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a7796,
},
{
+ .compatible = "renesas,vin-r8a774b1",
+ .data = &rcar_info_r8a77965,
+ },
+ {
.compatible = "renesas,vin-r8a774c0",
.data = &rcar_info_r8a77990,
},
@@ -1282,7 +1292,6 @@ static int rcar_vin_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rvin_dev *vin;
- struct resource *mem;
int irq, ret;
vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL);
@@ -1301,11 +1310,7 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (attr)
vin->info = attr->data;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL)
- return -EINVAL;
-
- vin->base = devm_ioremap_resource(vin->dev, mem);
+ vin->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vin->base))
return PTR_ERR(vin->base);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index c14af1b929df..faa9fb23a2e9 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -1082,6 +1082,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.data = &rcar_csi2_info_r8a7796,
},
{
+ .compatible = "renesas,r8a774b1-csi2",
+ .data = &rcar_csi2_info_r8a77965,
+ },
+ {
.compatible = "renesas,r8a774c0-csi2",
.data = &rcar_csi2_info_r8a77990,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 3cb29b2e0b2b..cf9029efeb04 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -118,6 +118,7 @@
#define VNDMR_ABIT (1 << 2)
#define VNDMR_DTMD_YCSEP (1 << 1)
#define VNDMR_DTMD_ARGB (1 << 0)
+#define VNDMR_DTMD_YCSEP_420 (3 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
@@ -529,12 +530,17 @@ static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
{
+ unsigned int crop_height;
u32 xs, ys;
/* Set scaling coefficient */
+ crop_height = vin->crop.height;
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
+ crop_height *= 2;
+
ys = 0;
- if (vin->crop.height != vin->compose.height)
- ys = (4096 * vin->crop.height) / vin->compose.height;
+ if (crop_height != vin->compose.height)
+ ys = (4096 * crop_height) / vin->compose.height;
rvin_write(vin, ys, VNYS_REG);
xs = 0;
@@ -557,16 +563,11 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
rvin_write(vin, 0, VNSPPOC_REG);
rvin_write(vin, 0, VNSLPOC_REG);
rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
+
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
- break;
- default:
+ else
rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
- break;
- }
vin_dbg(vin,
"Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n",
@@ -583,21 +584,9 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
/* Set Start/End Pixel/Line Pre-Clip */
rvin_write(vin, vin->crop.left, VNSPPRC_REG);
rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
+ rvin_write(vin, vin->crop.top, VNSLPRC_REG);
+ rvin_write(vin, vin->crop.top + vin->crop.height - 1, VNELPRC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- rvin_write(vin, vin->crop.top / 2, VNSLPRC_REG);
- rvin_write(vin, (vin->crop.top + vin->crop.height) / 2 - 1,
- VNELPRC_REG);
- break;
- default:
- rvin_write(vin, vin->crop.top, VNSLPRC_REG);
- rvin_write(vin, vin->crop.top + vin->crop.height - 1,
- VNELPRC_REG);
- break;
- }
/* TODO: Add support for the UDS scaler. */
if (vin->info->model != RCAR_GEN3)
@@ -641,6 +630,9 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
+ case V4L2_FIELD_ALTERNATE:
+ vnmc = VNMC_IM_ODD_EVEN;
+ break;
default:
vnmc = VNMC_IM_ODD;
break;
@@ -710,11 +702,13 @@ static int rvin_setup(struct rvin_dev *vin)
* Output format
*/
switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
rvin_write(vin,
- ALIGN(vin->format.width * vin->format.height, 0x80),
- VNUVAOF_REG);
- dmr = VNDMR_DTMD_YCSEP;
+ ALIGN(vin->format.bytesperline * vin->format.height,
+ 0x80), VNUVAOF_REG);
+ dmr = vin->format.pixelformat == V4L2_PIX_FMT_NV12 ?
+ VNDMR_DTMD_YCSEP_420 : VNDMR_DTMD_YCSEP;
output_is_yuv = true;
break;
case V4L2_PIX_FMT_YUYV:
@@ -799,6 +793,18 @@ static bool rvin_capture_active(struct rvin_dev *vin)
return rvin_read(vin, VNMS_REG) & VNMS_CA;
}
+static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms)
+{
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ /* If FS is set it is an Even field. */
+ if (vnms & VNMS_FS)
+ return V4L2_FIELD_BOTTOM;
+ return V4L2_FIELD_TOP;
+ }
+
+ return vin->format.field;
+}
+
static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
{
const struct rvin_video_format *fmt;
@@ -948,7 +954,7 @@ static irqreturn_t rvin_irq(int irq, void *data)
/* Capture frame */
if (vin->queue_buf[slot]) {
- vin->queue_buf[slot]->field = vin->format.field;
+ vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms);
vin->queue_buf[slot]->sequence = vin->sequence;
vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf,
@@ -1075,6 +1081,7 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
case V4L2_FIELD_NONE:
+ case V4L2_FIELD_ALTERNATE:
break;
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index cbc1c07f0a96..9e2e63ffcc47 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -31,6 +31,10 @@
static const struct rvin_video_format rvin_formats[] = {
{
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_NV16,
.bpp = 1,
},
@@ -72,6 +76,9 @@ const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
if (vin->info->model == RCAR_M1 && pixelformat == V4L2_PIX_FMT_XBGR32)
return NULL;
+ if (pixelformat == V4L2_PIX_FMT_NV12 && !vin->info->nv12)
+ return NULL;
+
for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
if (rvin_formats[i].fourcc == pixelformat)
return rvin_formats + i;
@@ -90,17 +97,29 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin,
if (WARN_ON(!fmt))
return -EINVAL;
- align = pix->pixelformat == V4L2_PIX_FMT_NV16 ? 0x20 : 0x10;
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ align = 0x20;
+ break;
+ default:
+ align = 0x10;
+ break;
+ }
return ALIGN(pix->width, align) * fmt->bpp;
}
static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
{
- if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ return pix->bytesperline * pix->height * 3 / 2;
+ case V4L2_PIX_FMT_NV16:
return pix->bytesperline * pix->height * 2;
-
- return pix->bytesperline * pix->height;
+ default:
+ return pix->bytesperline * pix->height;
+ }
}
static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
@@ -117,23 +136,23 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- break;
case V4L2_FIELD_ALTERNATE:
- /*
- * Driver does not (yet) support outputting ALTERNATE to a
- * userspace. It does support outputting INTERLACED so use
- * the VIN hardware to combine the two fields.
- */
- pix->field = V4L2_FIELD_INTERLACED;
- pix->height *= 2;
break;
default:
pix->field = RVIN_DEFAULT_FIELD;
break;
}
- /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
- walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
+ /* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ walign = 5;
+ break;
+ default:
+ walign = 1;
+ break;
+ }
/* Limit to VIN capabilities */
v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign,
@@ -164,22 +183,32 @@ static int rvin_reset_format(struct rvin_dev *vin)
v4l2_fill_pix_format(&vin->format, &fmt.format);
+ vin->src_rect.top = 0;
+ vin->src_rect.left = 0;
+ vin->src_rect.width = vin->format.width;
+ vin->src_rect.height = vin->format.height;
+
+ /* Make use of the hardware interlacer by default. */
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ vin->format.field = V4L2_FIELD_INTERLACED;
+ vin->format.height *= 2;
+ }
+
rvin_format_align(vin, &vin->format);
- vin->source.top = 0;
- vin->source.left = 0;
- vin->source.width = vin->format.width;
- vin->source.height = vin->format.height;
+ vin->crop = vin->src_rect;
- vin->crop = vin->source;
- vin->compose = vin->source;
+ vin->compose.top = 0;
+ vin->compose.left = 0;
+ vin->compose.width = vin->format.width;
+ vin->compose.height = vin->format.height;
return 0;
}
static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_pix_format *pix,
- struct v4l2_rect *crop, struct v4l2_rect *compose)
+ struct v4l2_rect *src_rect)
{
struct v4l2_subdev *sd = vin_to_source(vin);
struct v4l2_subdev_pad_config *pad_cfg;
@@ -208,21 +237,15 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto done;
+ ret = 0;
v4l2_fill_pix_format(pix, &format.format);
- if (crop) {
- crop->top = 0;
- crop->left = 0;
- crop->width = pix->width;
- crop->height = pix->height;
-
- /*
- * If source is ALTERNATE the driver will use the VIN hardware
- * to INTERLACE it. The crop height then needs to be doubled.
- */
- if (pix->field == V4L2_FIELD_ALTERNATE)
- crop->height *= 2;
+ if (src_rect) {
+ src_rect->top = 0;
+ src_rect->left = 0;
+ src_rect->width = pix->width;
+ src_rect->height = pix->height;
}
if (field != V4L2_FIELD_ANY)
@@ -232,17 +255,10 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
pix->height = height;
rvin_format_align(vin, pix);
-
- if (compose) {
- compose->top = 0;
- compose->left = 0;
- compose->width = pix->width;
- compose->height = pix->height;
- }
done:
v4l2_subdev_free_pad_config(pad_cfg);
- return 0;
+ return ret;
}
static int rvin_querycap(struct file *file, void *priv,
@@ -262,29 +278,34 @@ static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
{
struct rvin_dev *vin = video_drvdata(file);
- return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL,
- NULL);
+ return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL);
}
static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_rect crop, compose;
+ struct v4l2_rect fmt_rect, src_rect;
int ret;
if (vb2_is_busy(&vin->queue))
return -EBUSY;
ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
- &crop, &compose);
+ &src_rect);
if (ret)
return ret;
vin->format = f->fmt.pix;
- vin->crop = crop;
- vin->compose = compose;
- vin->source = crop;
+
+ fmt_rect.top = 0;
+ fmt_rect.left = 0;
+ fmt_rect.width = vin->format.width;
+ fmt_rect.height = vin->format.height;
+
+ v4l2_rect_map_inside(&vin->crop, &src_rect);
+ v4l2_rect_map_inside(&vin->compose, &fmt_rect);
+ vin->src_rect = src_rect;
return 0;
}
@@ -302,12 +323,22 @@ static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index >= ARRAY_SIZE(rvin_formats))
- return -EINVAL;
-
- f->pixelformat = rvin_formats[f->index].fourcc;
+ struct rvin_dev *vin = video_drvdata(file);
+ unsigned int i;
+ int matched;
+
+ matched = -1;
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) {
+ if (rvin_format_from_pixel(vin, rvin_formats[i].fourcc))
+ matched++;
+
+ if (matched == f->index) {
+ f->pixelformat = rvin_formats[i].fourcc;
+ return 0;
+ }
+ }
- return 0;
+ return -EINVAL;
}
static int rvin_g_selection(struct file *file, void *fh,
@@ -322,8 +353,8 @@ static int rvin_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
s->r.left = s->r.top = 0;
- s->r.width = vin->source.width;
- s->r.height = vin->source.height;
+ s->r.width = vin->src_rect.width;
+ s->r.height = vin->src_rect.height;
break;
case V4L2_SEL_TGT_CROP:
s->r = vin->crop;
@@ -365,21 +396,22 @@ static int rvin_s_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP:
/* Can't crop outside of source input */
max_rect.top = max_rect.left = 0;
- max_rect.width = vin->source.width;
- max_rect.height = vin->source.height;
+ max_rect.width = vin->src_rect.width;
+ max_rect.height = vin->src_rect.height;
v4l2_rect_map_inside(&r, &max_rect);
- v4l_bound_align_image(&r.width, 6, vin->source.width, 0,
- &r.height, 2, vin->source.height, 0, 0);
+ v4l_bound_align_image(&r.width, 6, vin->src_rect.width, 0,
+ &r.height, 2, vin->src_rect.height, 0, 0);
- r.top = clamp_t(s32, r.top, 0, vin->source.height - r.height);
- r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width);
+ r.top = clamp_t(s32, r.top, 0,
+ vin->src_rect.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, vin->src_rect.width - r.width);
vin->crop = s->r = r;
vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
r.width, r.height, r.left, r.top,
- vin->source.width, vin->source.height);
+ vin->src_rect.width, vin->src_rect.height);
break;
case V4L2_SEL_TGT_COMPOSE:
/* Make sure compose rect fits inside output format */
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index e562c2ff21ec..a36b0824f81d 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -126,6 +126,7 @@ struct rvin_group_route {
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
* @use_mc: use media controller instead of controlling subdevice
+ * @nv12: support outputing NV12 pixel format
* @max_width: max input width the VIN supports
* @max_height: max input height the VIN supports
* @routes: list of possible routes from the CSI-2 recivers to
@@ -134,6 +135,7 @@ struct rvin_group_route {
struct rvin_info {
enum model_id model;
bool use_mc;
+ bool nv12;
unsigned int max_width;
unsigned int max_height;
@@ -176,7 +178,7 @@ struct rvin_info {
*
* @crop: active cropping
* @compose: active composing
- * @source: active size of the video source
+ * @src_rect: active size of the video source
* @std: active video standard of the video source
*
* @alpha: Alpha component to fill in for supported pixel formats
@@ -215,7 +217,7 @@ struct rvin_dev {
struct v4l2_rect crop;
struct v4l2_rect compose;
- struct v4l2_rect source;
+ struct v4l2_rect src_rect;
v4l2_std_id std;
unsigned int alpha;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 608e5217ccd5..0f267a237b42 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index cb93a13e1777..97bed45360f0 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2369,7 +2369,7 @@ static int fdp1_probe(struct platform_device *pdev)
dprintk(fdp1, "FDP1 Version R-Car H3\n");
break;
case FD1_IP_M3N:
- dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+ dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
break;
case FD1_IP_E3:
dprintk(fdp1, "FDP1 Version R-Car E3\n");
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index 1a65532dc36d..e80204f5720c 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -553,7 +553,7 @@ void camif_hw_disable_capture(struct camif_vp *vp)
void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
{
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 6ddcc35b0bbd..2a3e7ffefe0a 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -239,7 +239,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
@@ -250,7 +250,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
{
struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 8dbbd5f2a40a..ac2162235cef 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1236,7 +1236,6 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
}
result->sof = sof;
result->sof_len = sof_len;
- result->components = components;
return true;
}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 3bc52f83f5bc..4407fe775afa 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -190,7 +190,6 @@ struct s5p_jpeg_marker {
* @dqt: DQT markers' positions relative to the buffer beginning
* @sof: SOF0 marker's position relative to the buffer beginning
* @sof_len: SOF0 marker's payload length (without length field itself)
- * @components: number of image components
* @size: image buffer size in bytes
*/
struct s5p_jpeg_q_data {
@@ -202,7 +201,6 @@ struct s5p_jpeg_q_data {
struct s5p_jpeg_marker dqt;
u32 sof;
u32 sof_len;
- u32 components;
u32 size;
};
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
index 9cd60fe1867c..2ff62a488b27 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -671,10 +671,11 @@ static int secocec_probe(struct platform_device *pdev)
return ret;
err_notifier:
- cec_notifier_cec_adap_unregister(secocec->notifier);
+ cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
err_delete_adapter:
cec_delete_adapter(secocec->cec_adap);
err:
+ release_region(BRA_SMB_BASE_ADDR, 7);
dev_err(dev, "%s device probe failed\n", dev_name(dev));
return ret;
@@ -692,7 +693,7 @@ static int secocec_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "IR disabled");
}
- cec_notifier_cec_adap_unregister(secocec->notifier);
+ cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
cec_unregister_adapter(secocec->cec_adap);
release_region(BRA_SMB_BASE_ADDR, 7);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index e90f1ba30574..675b5f2b4c2e 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -651,8 +651,7 @@ static int bdisp_release(struct file *file)
dev_dbg(bdisp->dev, "%s\n", __func__);
- if (mutex_lock_interruptible(&bdisp->lock))
- return -ERESTARTSYS;
+ mutex_lock(&bdisp->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
index 8f0ddcbeed9d..301fa10f419b 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
@@ -225,36 +225,16 @@ static const struct debugfs_reg32 fei_sys_regs[] = {
void c8sectpfe_debugfs_init(struct c8sectpfei *fei)
{
- struct dentry *root;
- struct dentry *file;
-
- root = debugfs_create_dir("c8sectpfe", NULL);
- if (!root)
- goto err;
-
- fei->root = root;
-
fei->regset = devm_kzalloc(fei->dev, sizeof(*fei->regset), GFP_KERNEL);
if (!fei->regset)
- goto err;
+ return;
fei->regset->regs = fei_sys_regs;
fei->regset->nregs = ARRAY_SIZE(fei_sys_regs);
fei->regset->base = fei->io;
- file = debugfs_create_regset32("registers", S_IRUGO, root,
- fei->regset);
- if (!file) {
- dev_err(fei->dev,
- "%s not able to create 'registers' debugfs\n"
- , __func__);
- goto err;
- }
-
- return;
-
-err:
- debugfs_remove_recursive(root);
+ fei->root = debugfs_create_dir("c8sectpfe", NULL);
+ debugfs_create_regset32("registers", S_IRUGO, fei->root, fei->regset);
}
void c8sectpfe_debugfs_exit(struct c8sectpfei *fei)
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
index 8118c7365d3f..f0c73e64b586 100644
--- a/drivers/media/platform/sti/cec/stih-cec.c
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -359,7 +359,7 @@ static int stih_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
@@ -370,7 +370,7 @@ static int stih_cec_remove(struct platform_device *pdev)
{
struct stih_cec *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
index a05127529006..3878cb4efdc2 100644
--- a/drivers/media/platform/sunxi/Makefile
+++ b/drivers/media/platform/sunxi/Makefile
@@ -1,2 +1,3 @@
obj-y += sun4i-csi/
obj-y += sun6i-csi/
+obj-y += sun8i-di/
diff --git a/drivers/media/platform/sunxi/sun8i-di/Makefile b/drivers/media/platform/sunxi/sun8i-di/Makefile
new file mode 100644
index 000000000000..109f7e5442b7
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SUN8I_DEINTERLACE) += sun8i-di.o
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
new file mode 100644
index 000000000000..aaa1dc159ac2
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner sun8i deinterlacer with scaler driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * Based on vim2m driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "sun8i-di.h"
+
+#define FLAG_SIZE (DEINTERLACE_MAX_WIDTH * DEINTERLACE_MAX_HEIGHT / 4)
+
+static u32 deinterlace_formats[] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+};
+
+static inline u32 deinterlace_read(struct deinterlace_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline void deinterlace_write(struct deinterlace_dev *dev,
+ u32 reg, u32 value)
+{
+ writel(value, dev->base + reg);
+}
+
+static inline void deinterlace_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 bits)
+{
+ writel(readl(dev->base + reg) | bits, dev->base + reg);
+}
+
+static inline void deinterlace_clr_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 clr, u32 set)
+{
+ u32 val = readl(dev->base + reg);
+
+ val &= ~clr;
+ val |= set;
+
+ writel(val, dev->base + reg);
+}
+
+static void deinterlace_device_run(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *dev = ctx->dev;
+ u32 size, stride, width, height, val;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned int hstep, vstep;
+ dma_addr_t addr;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE,
+ DEINTERLACE_MOD_ENABLE_EN);
+
+ if (ctx->field) {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag1_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag2_buf_dma);
+ } else {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag2_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag1_buf_dma);
+ }
+ deinterlace_write(dev, DEINTERLACE_FLAG_LINE_STRIDE, 0x200);
+
+ width = ctx->src_fmt.width;
+ height = ctx->src_fmt.height;
+ stride = ctx->src_fmt.bytesperline;
+ size = stride * height;
+
+ addr = vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR2, 0);
+
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE1, stride);
+
+ deinterlace_write(dev, DEINTERLACE_CH0_IN_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_IN_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ val = DEINTERLACE_IN_FMT_FMT(DEINTERLACE_IN_FMT_YUV420) |
+ DEINTERLACE_IN_FMT_MOD(DEINTERLACE_MODE_UV_COMBINED);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_IN_FMT, val);
+
+ if (ctx->prev)
+ addr = vb2_dma_contig_plane_dma_addr(&ctx->prev->vb2_buf, 0);
+
+ deinterlace_write(dev, DEINTERLACE_PRELUMA, addr);
+ deinterlace_write(dev, DEINTERLACE_PRECHROMA, addr + size);
+
+ val = DEINTERLACE_OUT_FMT_FMT(DEINTERLACE_OUT_FMT_YUV420SP);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_OUT_FMT, val);
+
+ width = ctx->dst_fmt.width;
+ height = ctx->dst_fmt.height;
+ stride = ctx->dst_fmt.bytesperline;
+ size = stride * height;
+
+ deinterlace_write(dev, DEINTERLACE_CH0_OUT_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_OUT_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE1, stride);
+
+ addr = vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR2, 0);
+
+ hstep = (ctx->src_fmt.width << 16) / ctx->dst_fmt.width;
+ vstep = (ctx->src_fmt.height << 16) / ctx->dst_fmt.height;
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_FACT, vstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_REG_READY);
+
+ deinterlace_set_bits(dev, DEINTERLACE_INT_ENABLE,
+ DEINTERLACE_INT_ENABLE_WB_EN);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_WB_EN);
+}
+
+static int deinterlace_job_ready(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) >= 1 &&
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) >= 2;
+}
+
+static void deinterlace_job_abort(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+static irqreturn_t deinterlace_irq(int irq, void *data)
+{
+ struct deinterlace_dev *dev = data;
+ struct vb2_v4l2_buffer *src, *dst;
+ enum vb2_buffer_state state;
+ struct deinterlace_ctx *ctx;
+ unsigned int val;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_NONE;
+ }
+
+ val = deinterlace_read(dev, DEINTERLACE_INT_STATUS);
+ if (!(val & DEINTERLACE_INT_STATUS_WRITEBACK))
+ return IRQ_NONE;
+
+ deinterlace_write(dev, DEINTERLACE_INT_ENABLE, 0);
+ deinterlace_set_bits(dev, DEINTERLACE_INT_STATUS,
+ DEINTERLACE_INT_STATUS_WRITEBACK);
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE, 0);
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START, 0);
+
+ val = deinterlace_read(dev, DEINTERLACE_STATUS);
+ if (val & DEINTERLACE_STATUS_WB_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(dst, state);
+
+ if (ctx->field != ctx->first_field || ctx->aborting) {
+ ctx->field = ctx->first_field;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+ ctx->prev = src;
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ ctx->field = !ctx->first_field;
+ deinterlace_device_run(ctx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void deinterlace_init(struct deinterlace_dev *dev)
+{
+ u32 val;
+ int i;
+
+ deinterlace_write(dev, DEINTERLACE_BYPASS,
+ DEINTERLACE_BYPASS_CSC);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE_CTRL,
+ DEINTERLACE_WB_LINE_STRIDE_CTRL_EN);
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_OUT_CTRL);
+ deinterlace_write(dev, DEINTERLACE_AGTH_SEL,
+ DEINTERLACE_AGTH_SEL_LINEBUF);
+
+ val = DEINTERLACE_CTRL_EN |
+ DEINTERLACE_CTRL_MODE_MIXED |
+ DEINTERLACE_CTRL_DIAG_INTP_EN |
+ DEINTERLACE_CTRL_TEMP_DIFF_EN;
+ deinterlace_write(dev, DEINTERLACE_CTRL, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_LUMA_TH,
+ DEINTERLACE_LUMA_TH_MIN_LUMA_MSK,
+ DEINTERLACE_LUMA_TH_MIN_LUMA(4));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_SPAT_COMP,
+ DEINTERLACE_SPAT_COMP_TH2_MSK,
+ DEINTERLACE_SPAT_COMP_TH2(5));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_TEMP_DIFF,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(5));
+
+ val = DEINTERLACE_DIAG_INTP_TH0(60) |
+ DEINTERLACE_DIAG_INTP_TH1(0) |
+ DEINTERLACE_DIAG_INTP_TH3(30);
+ deinterlace_write(dev, DEINTERLACE_DIAG_INTP, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
+ DEINTERLACE_CHROMA_DIFF_TH_MSK,
+ DEINTERLACE_CHROMA_DIFF_TH(5));
+
+ /* neutral filter coefficients */
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+
+ for (i = 0; i < 32; i++) {
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ }
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+}
+
+static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct deinterlace_ctx, fh);
+}
+
+static bool deinterlace_check_format(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(deinterlace_formats); i++)
+ if (deinterlace_formats[i] == pixelformat)
+ return true;
+
+ return false;
+}
+
+static void deinterlace_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int height = pix_fmt->height;
+ unsigned int width = pix_fmt->width;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+
+ width = clamp(width, DEINTERLACE_MIN_WIDTH,
+ DEINTERLACE_MAX_WIDTH);
+ height = clamp(height, DEINTERLACE_MIN_HEIGHT,
+ DEINTERLACE_MAX_HEIGHT);
+
+ bytesperline = ALIGN(width, 2);
+ /* luma */
+ sizeimage = bytesperline * height;
+ /* chroma */
+ sizeimage += bytesperline * height / 2;
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+ pix_fmt->bytesperline = bytesperline;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int deinterlace_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DEINTERLACE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, DEINTERLACE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DEINTERLACE_NAME);
+
+ return 0;
+}
+
+static int deinterlace_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index < ARRAY_SIZE(deinterlace_formats)) {
+ f->pixelformat = deinterlace_formats[f->index];
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int deinterlace_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ if (!deinterlace_check_format(fsize->pixel_format))
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = DEINTERLACE_MIN_WIDTH;
+ fsize->stepwise.min_height = DEINTERLACE_MIN_HEIGHT;
+ fsize->stepwise.max_width = DEINTERLACE_MAX_WIDTH;
+ fsize->stepwise.max_height = DEINTERLACE_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->src_fmt;
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_NONE)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->src_fmt = f->fmt.pix;
+
+ /* Propagate colorspace information to capture. */
+ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
+ ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
+ ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
+ .vidioc_querycap = deinterlace_querycap,
+
+ .vidioc_enum_framesizes = deinterlace_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_cap = deinterlace_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = deinterlace_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = deinterlace_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_out = deinterlace_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = deinterlace_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = deinterlace_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int deinterlace_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static int deinterlace_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static void deinterlace_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void deinterlace_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+
+ do {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, state);
+ } while (vbuf);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) && ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+}
+
+static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx->dev->dev;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable module\n");
+
+ goto err_runtime_get;
+ }
+
+ ctx->first_field =
+ ctx->src_fmt.field == V4L2_FIELD_INTERLACED_BT;
+ ctx->field = ctx->first_field;
+
+ ctx->prev = NULL;
+ ctx->aborting = 0;
+
+ ctx->flag1_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag1_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag1_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem1;
+ }
+
+ ctx->flag2_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag2_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag2_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem2;
+ }
+ }
+
+ return 0;
+
+err_no_mem2:
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+err_no_mem1:
+ pm_runtime_put(dev);
+err_runtime_get:
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void deinterlace_stop_streaming(struct vb2_queue *vq)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ struct device *dev = ctx->dev->dev;
+
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag2_buf,
+ ctx->flag2_buf_dma);
+
+ pm_runtime_put(dev);
+ }
+
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops deinterlace_qops = {
+ .queue_setup = deinterlace_queue_setup,
+ .buf_prepare = deinterlace_buf_prepare,
+ .buf_queue = deinterlace_buf_queue,
+ .start_streaming = deinterlace_start_streaming,
+ .stop_streaming = deinterlace_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int deinterlace_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->ops = &deinterlace_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->min_buffers_needed = 2;
+ dst_vq->ops = &deinterlace_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int deinterlace_open(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ /* default output format */
+ ctx->src_fmt.pixelformat = deinterlace_formats[0];
+ ctx->src_fmt.field = V4L2_FIELD_INTERLACED;
+ ctx->src_fmt.width = 640;
+ ctx->src_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->src_fmt);
+
+ /* default capture format */
+ ctx->dst_fmt.pixelformat = deinterlace_formats[0];
+ ctx->dst_fmt.field = V4L2_FIELD_NONE;
+ ctx->dst_fmt.width = 640;
+ ctx->dst_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->dst_fmt);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &deinterlace_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_free;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int deinterlace_release(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = container_of(file->private_data,
+ struct deinterlace_ctx, fh);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations deinterlace_fops = {
+ .owner = THIS_MODULE,
+ .open = deinterlace_open,
+ .release = deinterlace_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device deinterlace_video_device = {
+ .name = DEINTERLACE_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &deinterlace_fops,
+ .ioctl_ops = &deinterlace_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops deinterlace_m2m_ops = {
+ .device_run = deinterlace_device_run,
+ .job_ready = deinterlace_job_ready,
+ .job_abort = deinterlace_job_abort,
+};
+
+static int deinterlace_probe(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int irq, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vfd = deinterlace_video_device;
+ dev->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev->dev, "Failed to get IRQ\n");
+
+ return irq;
+ }
+
+ ret = devm_request_irq(dev->dev, irq, deinterlace_irq,
+ 0, dev_name(dev->dev), dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ ret = of_dma_configure(dev->dev, dev->dev->of_node, true);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->base)) {
+ dev_err(dev->dev, "Failed to map registers\n");
+
+ return PTR_ERR(dev->base);
+ }
+
+ dev->bus_clk = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->bus_clk)) {
+ dev_err(dev->dev, "Failed to get bus clock\n");
+
+ return PTR_ERR(dev->bus_clk);
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ dev_err(dev->dev, "Failed to get mod clock\n");
+
+ return PTR_ERR(dev->mod_clk);
+ }
+
+ dev->ram_clk = devm_clk_get(dev->dev, "ram");
+ if (IS_ERR(dev->ram_clk)) {
+ dev_err(dev->dev, "Failed to get ram clock\n");
+
+ return PTR_ERR(dev->ram_clk);
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ dev_err(dev->dev, "Failed to get reset control\n");
+
+ return PTR_ERR(dev->rstc);
+ }
+
+ mutex_init(&dev->dev_mutex);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register V4L2 device\n");
+
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s",
+ deinterlace_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+
+ goto err_v4l2;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->m2m_dev = v4l2_m2m_init(&deinterlace_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_video;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ pm_runtime_enable(dev->dev);
+
+ return 0;
+
+err_video:
+ video_unregister_device(&dev->vfd);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int deinterlace_remove(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int deinterlace_runtime_resume(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = clk_set_rate_exclusive(dev->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev->dev, "Failed to set exclusive mod clock rate\n");
+
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev->bus_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable bus clock\n");
+
+ goto err_exlusive_rate;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable mod clock\n");
+
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(dev->ram_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable ram clock\n");
+
+ goto err_mod_clk;
+ }
+
+ ret = reset_control_deassert(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ goto err_ram_clk;
+ }
+
+ deinterlace_init(dev);
+
+ return 0;
+
+err_exlusive_rate:
+ clk_rate_exclusive_put(dev->mod_clk);
+err_ram_clk:
+ clk_disable_unprepare(dev->ram_clk);
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_bus_clk:
+ clk_disable_unprepare(dev->bus_clk);
+
+ return ret;
+}
+
+static int deinterlace_runtime_suspend(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+
+ reset_control_assert(dev->rstc);
+
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->bus_clk);
+ clk_rate_exclusive_put(dev->mod_clk);
+
+ return 0;
+}
+
+static const struct of_device_id deinterlace_dt_match[] = {
+ { .compatible = "allwinner,sun8i-h3-deinterlace" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, deinterlace_dt_match);
+
+static const struct dev_pm_ops deinterlace_pm_ops = {
+ .runtime_resume = deinterlace_runtime_resume,
+ .runtime_suspend = deinterlace_runtime_suspend,
+};
+
+static struct platform_driver deinterlace_driver = {
+ .probe = deinterlace_probe,
+ .remove = deinterlace_remove,
+ .driver = {
+ .name = DEINTERLACE_NAME,
+ .of_match_table = deinterlace_dt_match,
+ .pm = &deinterlace_pm_ops,
+ },
+};
+module_platform_driver(deinterlace_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner Deinterlace driver");
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
new file mode 100644
index 000000000000..0254251d8687
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Allwinner Deinterlace driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#ifndef _SUN8I_DEINTERLACE_H_
+#define _SUN8I_DEINTERLACE_H_
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/platform_device.h>
+
+#define DEINTERLACE_NAME "sun8i-di"
+
+#define DEINTERLACE_MOD_ENABLE 0x00
+#define DEINTERLACE_MOD_ENABLE_EN BIT(0)
+
+#define DEINTERLACE_FRM_CTRL 0x04
+#define DEINTERLACE_FRM_CTRL_REG_READY BIT(0)
+#define DEINTERLACE_FRM_CTRL_WB_EN BIT(2)
+#define DEINTERLACE_FRM_CTRL_OUT_CTRL BIT(11)
+#define DEINTERLACE_FRM_CTRL_START BIT(16)
+#define DEINTERLACE_FRM_CTRL_COEF_ACCESS BIT(23)
+
+#define DEINTERLACE_BYPASS 0x08
+#define DEINTERLACE_BYPASS_CSC BIT(1)
+
+#define DEINTERLACE_AGTH_SEL 0x0c
+#define DEINTERLACE_AGTH_SEL_LINEBUF BIT(8)
+
+#define DEINTERLACE_LINT_CTRL 0x10
+#define DEINTERLACE_TRD_PRELUMA 0x1c
+#define DEINTERLACE_BUF_ADDR0 0x20
+#define DEINTERLACE_BUF_ADDR1 0x24
+#define DEINTERLACE_BUF_ADDR2 0x28
+
+#define DEINTERLACE_FIELD_CTRL 0x2c
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT(v) ((v) & 0xff)
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK (0xff)
+
+#define DEINTERLACE_TB_OFFSET0 0x30
+#define DEINTERLACE_TB_OFFSET1 0x34
+#define DEINTERLACE_TB_OFFSET2 0x38
+#define DEINTERLACE_TRD_PRECHROMA 0x3c
+#define DEINTERLACE_LINE_STRIDE0 0x40
+#define DEINTERLACE_LINE_STRIDE1 0x44
+#define DEINTERLACE_LINE_STRIDE2 0x48
+
+#define DEINTERLACE_IN_FMT 0x4c
+#define DEINTERLACE_IN_FMT_PS(v) ((v) & 3)
+#define DEINTERLACE_IN_FMT_FMT(v) (((v) & 7) << 4)
+#define DEINTERLACE_IN_FMT_MOD(v) (((v) & 7) << 8)
+
+#define DEINTERLACE_WB_ADDR0 0x50
+#define DEINTERLACE_WB_ADDR1 0x54
+#define DEINTERLACE_WB_ADDR2 0x58
+
+#define DEINTERLACE_OUT_FMT 0x5c
+#define DEINTERLACE_OUT_FMT_FMT(v) ((v) & 0xf)
+#define DEINTERLACE_OUT_FMT_PS(v) (((v) & 3) << 5)
+
+#define DEINTERLACE_INT_ENABLE 0x60
+#define DEINTERLACE_INT_ENABLE_WB_EN BIT(7)
+
+#define DEINTERLACE_INT_STATUS 0x64
+#define DEINTERLACE_INT_STATUS_WRITEBACK BIT(7)
+
+#define DEINTERLACE_STATUS 0x68
+#define DEINTERLACE_STATUS_COEF_STATUS BIT(11)
+#define DEINTERLACE_STATUS_WB_ERROR BIT(12)
+
+#define DEINTERLACE_CSC_COEF 0x70 /* 12 registers */
+
+#define DEINTERLACE_CTRL 0xa0
+#define DEINTERLACE_CTRL_EN BIT(0)
+#define DEINTERLACE_CTRL_FLAG_OUT_EN BIT(8)
+#define DEINTERLACE_CTRL_MODE_PASSTROUGH (0 << 16)
+#define DEINTERLACE_CTRL_MODE_WEAVE (1 << 16)
+#define DEINTERLACE_CTRL_MODE_BOB (2 << 16)
+#define DEINTERLACE_CTRL_MODE_MIXED (3 << 16)
+#define DEINTERLACE_CTRL_DIAG_INTP_EN BIT(24)
+#define DEINTERLACE_CTRL_TEMP_DIFF_EN BIT(25)
+
+#define DEINTERLACE_DIAG_INTP 0xa4
+#define DEINTERLACE_DIAG_INTP_TH0(v) ((v) & 0x7f)
+#define DEINTERLACE_DIAG_INTP_TH0_MSK (0x7f)
+#define DEINTERLACE_DIAG_INTP_TH1(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_DIAG_INTP_TH1_MSK (0x7f << 8)
+#define DEINTERLACE_DIAG_INTP_TH3(v) (((v) & 0xff) << 24)
+#define DEINTERLACE_DIAG_INTP_TH3_MSK (0xff << 24)
+
+#define DEINTERLACE_TEMP_DIFF 0xa8
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH(v) ((v) & 0x7f)
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH_MSK (0x7f)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK (0x7f << 8)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH(v) (((v) & 0x7ff) << 16)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH_MSK (0x7ff << 16)
+
+#define DEINTERLACE_LUMA_TH 0xac
+#define DEINTERLACE_LUMA_TH_MIN_LUMA(v) ((v) & 0xff)
+#define DEINTERLACE_LUMA_TH_MIN_LUMA_MSK (0xff)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA(v) (((v) & 0xff) << 8)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA_MSK (0xff << 8)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT_MSK (0xff << 16)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC(v) (((v) & 3) << 24)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC_MSK (3 << 24)
+
+#define DEINTERLACE_SPAT_COMP 0xb0
+#define DEINTERLACE_SPAT_COMP_TH2(v) ((v) & 0xff)
+#define DEINTERLACE_SPAT_COMP_TH2_MSK (0xff)
+#define DEINTERLACE_SPAT_COMP_TH3(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_SPAT_COMP_TH3_MSK (0xff << 16)
+
+#define DEINTERLACE_CHROMA_DIFF 0xb4
+#define DEINTERLACE_CHROMA_DIFF_TH(v) ((v) & 0xff)
+#define DEINTERLACE_CHROMA_DIFF_TH_MSK (0xff)
+#define DEINTERLACE_CHROMA_DIFF_LUMA(v) (((v) & 0x3f) << 16)
+#define DEINTERLACE_CHROMA_DIFF_LUMA_MSK (0x3f << 16)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA(v) (((v) & 0x3f) << 24)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA_MSK (0x3f << 24)
+
+#define DEINTERLACE_PRELUMA 0xb8
+#define DEINTERLACE_PRECHROMA 0xbc
+#define DEINTERLACE_TILE_FLAG0 0xc0
+#define DEINTERLACE_TILE_FLAG1 0xc4
+#define DEINTERLACE_FLAG_LINE_STRIDE 0xc8
+#define DEINTERLACE_FLAG_SEQ 0xcc
+
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL 0xd0
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL_EN BIT(0)
+
+#define DEINTERLACE_WB_LINE_STRIDE0 0xd4
+#define DEINTERLACE_WB_LINE_STRIDE1 0xd8
+#define DEINTERLACE_WB_LINE_STRIDE2 0xdc
+#define DEINTERLACE_TRD_CTRL 0xe0
+#define DEINTERLACE_TRD_BUF_ADDR0 0xe4
+#define DEINTERLACE_TRD_BUF_ADDR1 0xe8
+#define DEINTERLACE_TRD_BUF_ADDR2 0xec
+#define DEINTERLACE_TRD_TB_OFF0 0xf0
+#define DEINTERLACE_TRD_TB_OFF1 0xf4
+#define DEINTERLACE_TRD_TB_OFF2 0xf8
+#define DEINTERLACE_TRD_WB_STRIDE 0xfc
+#define DEINTERLACE_CH0_IN_SIZE 0x100
+#define DEINTERLACE_CH0_OUT_SIZE 0x104
+#define DEINTERLACE_CH0_HORZ_FACT 0x108
+#define DEINTERLACE_CH0_VERT_FACT 0x10c
+#define DEINTERLACE_CH0_HORZ_PHASE 0x110
+#define DEINTERLACE_CH0_VERT_PHASE0 0x114
+#define DEINTERLACE_CH0_VERT_PHASE1 0x118
+#define DEINTERLACE_CH0_HORZ_TAP0 0x120
+#define DEINTERLACE_CH0_HORZ_TAP1 0x124
+#define DEINTERLACE_CH0_VERT_TAP 0x128
+#define DEINTERLACE_CH1_IN_SIZE 0x200
+#define DEINTERLACE_CH1_OUT_SIZE 0x204
+#define DEINTERLACE_CH1_HORZ_FACT 0x208
+#define DEINTERLACE_CH1_VERT_FACT 0x20c
+#define DEINTERLACE_CH1_HORZ_PHASE 0x210
+#define DEINTERLACE_CH1_VERT_PHASE0 0x214
+#define DEINTERLACE_CH1_VERT_PHASE1 0x218
+#define DEINTERLACE_CH1_HORZ_TAP0 0x220
+#define DEINTERLACE_CH1_HORZ_TAP1 0x224
+#define DEINTERLACE_CH1_VERT_TAP 0x228
+#define DEINTERLACE_CH0_HORZ_COEF0 0x400 /* 32 registers */
+#define DEINTERLACE_CH0_HORZ_COEF1 0x480 /* 32 registers */
+#define DEINTERLACE_CH0_VERT_COEF 0x500 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF0 0x600 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF1 0x680 /* 32 registers */
+#define DEINTERLACE_CH1_VERT_COEF 0x700 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF0 0x800 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF1 0x880 /* 32 registers */
+#define DEINTERLACE_CH3_VERT_COEF 0x900 /* 32 registers */
+
+#define DEINTERLACE_MIN_WIDTH 2U
+#define DEINTERLACE_MIN_HEIGHT 2U
+#define DEINTERLACE_MAX_WIDTH 2048U
+#define DEINTERLACE_MAX_HEIGHT 1100U
+
+#define DEINTERLACE_MODE_UV_COMBINED 2
+
+#define DEINTERLACE_IN_FMT_YUV420 2
+
+#define DEINTERLACE_OUT_FMT_YUV420SP 13
+
+#define DEINTERLACE_PS_UVUV 0
+#define DEINTERLACE_PS_VUVU 1
+
+#define DEINTERLACE_IDENTITY_COEF 0x4000
+
+#define DEINTERLACE_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
+
+struct deinterlace_ctx {
+ struct v4l2_fh fh;
+ struct deinterlace_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+
+ void *flag1_buf;
+ dma_addr_t flag1_buf_dma;
+
+ void *flag2_buf;
+ dma_addr_t flag2_buf_dma;
+
+ struct vb2_v4l2_buffer *prev;
+
+ unsigned int first_field;
+ unsigned int field;
+
+ int aborting;
+};
+
+struct deinterlace_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+
+ void __iomem *base;
+
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct clk *ram_clk;
+
+ struct reset_control *rstc;
+};
+
+#endif
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index a632602131f2..a99caac59f44 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -409,7 +409,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_adapter:
cec_delete_adapter(cec->adap);
err_clk:
@@ -423,7 +423,7 @@ static int tegra_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(cec->clk);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index eda2a5985da7..834114a4eebe 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -15,76 +15,96 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
#include "csc.h"
/*
- * 16 coefficients in the order:
+ * 12 coefficients in the order:
* a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
- * (we may need to pass non-default values from user space later on, we might
- * need to make the coefficient struct more easy to populate)
*/
-struct colorspace_coeffs {
- u16 sd[12];
- u16 hd[12];
+struct quantization {
+ u16 coeff[12];
};
-/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
-#define CSC_COEFFS_VIDEO_RANGE_Y2R 0
-#define CSC_COEFFS_GRAPHICS_RANGE_Y2R 1
-#define CSC_COEFFS_VIDEO_RANGE_R2Y 2
-#define CSC_COEFFS_GRAPHICS_RANGE_R2Y 3
+struct colorspace {
+ struct quantization limited;
+ struct quantization full;
+};
+
+struct encoding_direction {
+ struct colorspace r601;
+ struct colorspace r709;
+};
+
+struct csc_coeffs {
+ struct encoding_direction y2r;
+ struct encoding_direction r2y;
+};
/* default colorspace coefficients */
-static struct colorspace_coeffs colorspace_coeffs[4] = {
- [CSC_COEFFS_VIDEO_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
- 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+static struct csc_coeffs csc_coeffs = {
+ .y2r = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+ 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+ 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ }
+ },
},
- {
- /* HDTV */
- 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
- 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+ 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ }
+ },
},
},
- [CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
- 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ .r2y = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+ 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+ 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
- },
- },
- [CSC_COEFFS_VIDEO_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
- 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
- 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
- },
- },
- [CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
- 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+ 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x00bb, 0x0275, 0x003f, 0x1f99, 0x1ea5, 0x01c2,
+ 0x01c2, 0x1e67, 0x1fd7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
},
+
};
void csc_dump_regs(struct csc_data *csc)
@@ -117,46 +137,114 @@ EXPORT_SYMBOL(csc_set_coeff_bypass);
* set the color space converter coefficient shadow register values
*/
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace)
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt)
{
u32 *csc_reg5 = csc_reg0 + 5;
u32 *shadow_csc = csc_reg0;
- struct colorspace_coeffs *sd_hd_coeffs;
u16 *coeff, *end_coeff;
- enum v4l2_colorspace yuv_colorspace;
- int sel = 0;
-
- /*
- * support only graphics data range(full range) for now, a control ioctl
- * would be nice here
- */
- /* Y2R */
- if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
- (src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- src_colorspace == V4L2_COLORSPACE_REC709)) {
+ const struct v4l2_pix_format *pix;
+ const struct v4l2_pix_format_mplane *mp;
+ const struct v4l2_format_info *src_finfo, *dst_finfo;
+ enum v4l2_ycbcr_encoding src_ycbcr_enc, dst_ycbcr_enc;
+ enum v4l2_quantization src_quantization, dst_quantization;
+ u32 src_pixelformat, dst_pixelformat;
+
+ switch (src_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &src_fmt->fmt.pix;
+ src_pixelformat = pix->pixelformat;
+ src_ycbcr_enc = pix->ycbcr_enc;
+ src_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &src_fmt->fmt.pix_mp;
+ src_pixelformat = mp->pixelformat;
+ src_ycbcr_enc = mp->ycbcr_enc;
+ src_quantization = mp->quantization;
+ break;
+ }
+
+ switch (dst_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &dst_fmt->fmt.pix;
+ dst_pixelformat = pix->pixelformat;
+ dst_ycbcr_enc = pix->ycbcr_enc;
+ dst_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &dst_fmt->fmt.pix_mp;
+ dst_pixelformat = mp->pixelformat;
+ dst_ycbcr_enc = mp->ycbcr_enc;
+ dst_quantization = mp->quantization;
+ break;
+ }
+
+ src_finfo = v4l2_format_info(src_pixelformat);
+ dst_finfo = v4l2_format_info(dst_pixelformat);
+
+ if (v4l2_is_format_yuv(src_finfo) &&
+ v4l2_is_format_rgb(dst_finfo)) {
/* Y2R */
- sel = 1;
- yuv_colorspace = src_colorspace;
- } else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- dst_colorspace == V4L2_COLORSPACE_REC709) &&
- src_colorspace == V4L2_COLORSPACE_SRGB) {
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ src_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (src_quantization == V4L2_QUANTIZATION_DEFAULT)
+ src_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r601.limited.coeff;
+ } else if (src_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r709.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ }
+ } else if (v4l2_is_format_rgb(src_finfo) &&
+ v4l2_is_format_yuv(dst_finfo)) {
/* R2Y */
- sel = 3;
- yuv_colorspace = dst_colorspace;
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ dst_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (dst_quantization == V4L2_QUANTIZATION_DEFAULT)
+ dst_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r601.limited.coeff;
+ } else if (dst_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r709.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ }
} else {
*csc_reg5 |= CSC_BYPASS;
return;
}
- sd_hd_coeffs = &colorspace_coeffs[sel];
-
- /* select between SD or HD coefficients */
- if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
- coeff = sd_hd_coeffs->sd;
- else
- coeff = sd_hd_coeffs->hd;
-
end_coeff = coeff + 12;
for (; coeff < end_coeff; coeff += 2)
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
index de9a58af2ca8..af2e86bccf57 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -58,8 +58,8 @@ struct csc_data {
void csc_dump_regs(struct csc_data *csc);
void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace);
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt);
+
struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 53d27cd6e10a..2e5148ae7a0f 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -56,6 +56,11 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_C420,
.depth = 4,
},
+ [VPDMA_DATA_FMT_CB420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CB420,
+ .depth = 4,
+ },
[VPDMA_DATA_FMT_YCR422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YCR422,
@@ -759,7 +764,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word1: line_length = %d, xfer_height = %d\n",
dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
- pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
+ pr_debug("word2: start_addr = %x\n", dtd->start_addr);
pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
dtd_get_pkt_type(dtd),
@@ -825,7 +830,8 @@ void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = raw_vpdma_chan;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
@@ -893,7 +899,8 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = chan_info[chan].num;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 28bc94129348..393fcbb3cb40 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -57,6 +57,7 @@ struct vpdma_data_format {
* line stride of source and dest
* buffers should be 16 byte aligned
*/
+#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
@@ -71,6 +72,7 @@ enum vpdma_yuv_formats {
VPDMA_DATA_FMT_C444,
VPDMA_DATA_FMT_C422,
VPDMA_DATA_FMT_C420,
+ VPDMA_DATA_FMT_CB420,
VPDMA_DATA_FMT_YCR422,
VPDMA_DATA_FMT_YC444,
VPDMA_DATA_FMT_CRY422,
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index c488609bc162..0bbee45338bd 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -92,6 +92,7 @@
#define DATA_TYPE_C444 0x4
#define DATA_TYPE_C422 0x5
#define DATA_TYPE_C420 0x6
+#define DATA_TYPE_CB420 0x16
#define DATA_TYPE_YC444 0x8
#define DATA_TYPE_YCB422 0x7
#define DATA_TYPE_YCR422 0x17
@@ -165,11 +166,11 @@ struct vpdma_dtd {
u32 xfer_length_height;
u32 w1;
};
- dma_addr_t start_addr;
+ u32 start_addr;
u32 pkt_ctl;
union {
u32 frame_width_height; /* inbound */
- dma_addr_t desc_write_addr; /* outbound */
+ u32 desc_write_addr; /* outbound */
};
union {
u32 start_h_v; /* inbound */
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 60b575bb44c4..65c2c048b018 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -52,7 +52,7 @@
#define MIN_W 32
#define MIN_H 32
#define MAX_W 2048
-#define MAX_H 1184
+#define MAX_H 2048
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
@@ -249,6 +249,14 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420],
+ },
+ },
+ {
.fourcc = V4L2_PIX_FMT_YUYV,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
@@ -311,14 +319,9 @@ static struct vpe_fmt vpe_formats[] = {
* there is one source queue and one destination queue for each m2m context.
*/
struct vpe_q_data {
- unsigned int width; /* frame width */
- unsigned int height; /* frame height */
- unsigned int nplanes; /* Current number of planes */
- unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
- enum v4l2_colorspace colorspace;
- enum v4l2_field field; /* supported field value */
+ /* current v4l2 format info */
+ struct v4l2_format format;
unsigned int flags;
- unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
struct v4l2_rect c_rect; /* crop/compose rectangle */
struct vpe_fmt *fmt; /* format info */
};
@@ -328,9 +331,14 @@ struct vpe_q_data {
#define Q_DATA_MODE_TILED BIT(1)
#define Q_DATA_INTERLACED_ALTERNATE BIT(2)
#define Q_DATA_INTERLACED_SEQ_TB BIT(3)
+#define Q_DATA_INTERLACED_SEQ_BT BIT(4)
+
+#define Q_IS_SEQ_XX (Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
#define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
- Q_DATA_INTERLACED_SEQ_TB)
+ Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
enum {
Q_DATA_SRC = 0,
@@ -338,20 +346,25 @@ enum {
};
/* find our format description corresponding to the passed v4l2_format */
-static struct vpe_fmt *find_format(struct v4l2_format *f)
+static struct vpe_fmt *__find_format(u32 fourcc)
{
struct vpe_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
fmt = &vpe_formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ return __find_format(f->fmt.pix.pixelformat);
+}
+
/*
* there is one vpe_dev structure in the driver, it is shared by
* all instances.
@@ -681,7 +694,8 @@ static void set_cfg_modes(struct vpe_ctx *ctx)
* Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
*/
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
cfg_mode = 0;
write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
@@ -696,7 +710,8 @@ static void set_line_modes(struct vpe_ctx *ctx)
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
int line_mode = 1;
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
line_mode = 0; /* double lines to line buffer */
/* regs for now */
@@ -741,11 +756,12 @@ static void set_src_registers(struct vpe_ctx *ctx)
static void set_dst_registers(struct vpe_ctx *ctx)
{
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
+ const struct v4l2_format_info *finfo;
u32 val = 0;
- if (clrspc == V4L2_COLORSPACE_SRGB) {
+ finfo = v4l2_format_info(fmt->fourcc);
+ if (v4l2_is_format_rgb(finfo)) {
val |= VPE_RGB_OUT_SELECT;
vpdma_set_bg_color(ctx->dev->vpdma,
(struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
@@ -758,7 +774,8 @@ static void set_dst_registers(struct vpe_ctx *ctx)
*/
val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
- if (fmt->fourcc != V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc != V4L2_PIX_FMT_NV12 &&
+ fmt->fourcc != V4L2_PIX_FMT_NV21)
val |= VPE_DS_BYPASS;
mmr_adb->out_fmt_reg[0] = val;
@@ -847,11 +864,13 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
unsigned int src_h = s_q_data->c_rect.height;
unsigned int dst_w = d_q_data->c_rect.width;
unsigned int dst_h = d_q_data->c_rect.height;
+ struct v4l2_pix_format_mplane *spix;
size_t mv_buf_size;
int ret;
ctx->sequence = 0;
ctx->field = V4L2_FIELD_TOP;
+ spix = &s_q_data->format.fmt.pix_mp;
if ((s_q_data->flags & Q_IS_INTERLACED) &&
!(d_q_data->flags & Q_IS_INTERLACED)) {
@@ -866,9 +885,9 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
* extra space will not be used by the de-interlacer, but will
* ensure that vpdma operates correctly
*/
- bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
- VPDMA_STRIDE_ALIGN);
- mv_buf_size = bytes_per_line * s_q_data->height;
+ bytes_per_line = ALIGN((spix->width * mv->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ mv_buf_size = bytes_per_line * spix->height;
ctx->deinterlacing = true;
src_h <<= 1;
@@ -888,7 +907,7 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
set_dei_regs(ctx);
csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
- s_q_data->colorspace, d_q_data->colorspace);
+ &s_q_data->format, &d_q_data->format);
sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
@@ -901,14 +920,6 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
}
/*
- * Return the vpe_ctx structure for a given struct file
- */
-static struct vpe_ctx *file2ctx(struct file *file)
-{
- return container_of(file->private_data, struct vpe_ctx, fh);
-}
-
-/*
* mem2mem callbacks
*/
@@ -1010,27 +1021,33 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
struct vpe_fmt *fmt = q_data->fmt;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = !ctx->src_mv_buf_selector;
+ struct v4l2_pix_format_mplane *pix;
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
if (port == VPE_PORT_MV_OUT) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
q_data = &ctx->q_data[Q_DATA_SRC];
+ pix = &q_data->format.fmt.pix_mp;
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
+ pix = &q_data->format.fmt.pix_mp;
vpdma_fmt = fmt->vpdma_fmt[plane];
/*
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1044,6 +1061,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1054,8 +1072,8 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
MAX_W, MAX_H);
- vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
+ vpdma_add_out_dtd(&ctx->desc_list, pix->width,
+ stride, &q_data->c_rect,
vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
@@ -1067,6 +1085,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_fmt *fmt = q_data->fmt;
+ struct v4l2_pix_format_mplane *pix;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = ctx->src_mv_buf_selector;
int field = vbuf->field == V4L2_FIELD_BOTTOM;
@@ -1074,10 +1093,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
+ pix = &q_data->format.fmt.pix_mp;
if (port == VPE_PORT_MV_IN) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
@@ -1087,10 +1110,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1104,26 +1127,39 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
- /*
- * Use top or bottom field from same vb alternately
- * f,f-1,f-2 = TBT when seq is even
- * f,f-1,f-2 = BTB when seq is odd
- */
- field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+ /*
+ * field used in VPDMA desc = 0 (top) / 1 (bottom)
+ * Use top or bottom field from same vb alternately
+ * For each de-interlacing operation, f,f-1,f-2 should be one
+ * of TBT or BTB
+ */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB ||
+ q_data->flags & Q_DATA_INTERLACED_SEQ_BT) {
+ /* Select initial value based on format */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT)
+ field = 1;
+ else
+ field = 0;
+
+ /* Toggle for each vb_index and each operation */
+ field = (field + p_data->vb_index + ctx->sequence) % 2;
if (field) {
- /*
- * bottom field of a SEQ_TB buffer
- * Skip the top field data by
- */
- int height = q_data->height / 2;
- int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
- 1 : (vpdma_fmt->depth >> 3);
+ int height = pix->height / 2;
+ int bpp;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
+ bpp = 1;
+ else
+ bpp = vpdma_fmt->depth >> 3;
+
if (plane)
height /= 2;
- dma_addr += q_data->width * height * bpp;
+
+ dma_addr += pix->width * height * bpp;
}
}
}
@@ -1136,13 +1172,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
frame_width = q_data->c_rect.width;
frame_height = q_data->c_rect.height;
- if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21))
frame_height /= 2;
- vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
- vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
- frame_height, 0, 0);
+ vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride,
+ &q_data->c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, field, flags, frame_width,
+ frame_height, 0, 0);
}
/*
@@ -1176,13 +1213,18 @@ static void device_run(void *priv)
struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
-
- if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
- ctx->sequence % 2 == 0) {
- /* When using SEQ_TB buffers, When using it first time,
- * No need to remove the buffer as the next field is present
- * in the same buffer. (so that job_ready won't fail)
- * It will be removed when using bottom field
+ const struct v4l2_format_info *d_finfo;
+
+ d_finfo = v4l2_format_info(d_q_data->fmt->fourcc);
+
+ if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX &&
+ ctx->sequence % 2 == 0) {
+ /* When using SEQ_XX type buffers, each buffer has two fields
+ * each buffer has two fields (top & bottom)
+ * Removing one buffer is actually getting two fields
+ * Alternate between two operations:-
+ * Even : consume one field but DO NOT REMOVE from queue
+ * Odd : consume other field and REMOVE from queue
*/
ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[0] == NULL);
@@ -1246,7 +1288,7 @@ static void device_run(void *priv)
if (ctx->deinterlacing)
add_out_dtd(ctx, VPE_PORT_MV_OUT);
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
add_out_dtd(ctx, VPE_PORT_RGB_OUT);
} else {
add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
@@ -1288,7 +1330,7 @@ static void device_run(void *priv)
}
/* sync on channel control descriptors for output ports */
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
VPE_CHAN_RGB_OUT);
} else {
@@ -1391,9 +1433,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
/* the previous dst mv buffer becomes the next src mv buffer */
ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
- if (ctx->aborting)
- goto finished;
-
s_vb = ctx->src_vbs[0];
d_vb = ctx->dst_vb;
@@ -1404,6 +1443,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->timecode = s_vb->timecode;
d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_IS_INTERLACED) {
@@ -1457,6 +1497,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->src_vbs[0] = NULL;
ctx->dst_vb = NULL;
+ if (ctx->aborting)
+ goto finished;
+
ctx->bufs_completed++;
if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
device_run(ctx);
@@ -1519,38 +1562,32 @@ static int vpe_enum_fmt(struct file *file, void *priv,
static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vb2_queue *vq;
struct vpe_q_data *q_data;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
- pix->width = q_data->width;
- pix->height = q_data->height;
- pix->pixelformat = q_data->fmt->fourcc;
- pix->field = q_data->field;
+ *f = q_data->format;
- if (V4L2_TYPE_IS_OUTPUT(f->type)) {
- pix->colorspace = q_data->colorspace;
- } else {
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
struct vpe_q_data *s_q_data;
+ struct v4l2_pix_format_mplane *spix;
- /* get colorspace from the source queue */
+ /* get colorimetry from the source queue */
s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ spix = &s_q_data->format.fmt.pix_mp;
- pix->colorspace = s_q_data->colorspace;
- }
-
- pix->num_planes = q_data->nplanes;
-
- for (i = 0; i < pix->num_planes; i++) {
- pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
- pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ pix->colorspace = spix->colorspace;
+ pix->xfer_func = spix->xfer_func;
+ pix->ycbcr_enc = spix->ycbcr_enc;
+ pix->quantization = spix->quantization;
}
return 0;
@@ -1564,15 +1601,18 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
unsigned int w_align;
int i, depth, depth_bytes, height;
unsigned int stride = 0;
+ const struct v4l2_format_info *finfo;
if (!fmt || !(fmt->types & type)) {
- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
pix->pixelformat);
- return -EINVAL;
+ fmt = __find_format(V4L2_PIX_FMT_YUYV);
}
- if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
- && pix->field != V4L2_FIELD_SEQ_TB)
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ pix->field != V4L2_FIELD_SEQ_TB &&
+ pix->field != V4L2_FIELD_SEQ_BT)
pix->field = V4L2_FIELD_NONE;
depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
@@ -1615,27 +1655,25 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
- if (!pix->num_planes)
+ if (!pix->num_planes || pix->num_planes > 2)
pix->num_planes = fmt->coplanar ? 2 : 1;
else if (pix->num_planes > 1 && !fmt->coplanar)
pix->num_planes = 1;
pix->pixelformat = fmt->fourcc;
+ finfo = v4l2_format_info(fmt->fourcc);
/*
* For the actual image parameters, we need to consider the field
- * height of the image for SEQ_TB buffers.
+ * height of the image for SEQ_XX buffers.
*/
- if (pix->field == V4L2_FIELD_SEQ_TB)
+ if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT)
height = pix->height / 2;
else
height = pix->height;
if (!pix->colorspace) {
- if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
- fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ if (v4l2_is_format_rgb(finfo)) {
pix->colorspace = V4L2_COLORSPACE_SRGB;
} else {
if (height > 1280) /* HD */
@@ -1654,6 +1692,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
if (stride > plane_fmt->bytesperline)
plane_fmt->bytesperline = stride;
+ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
+ stride,
+ VPDMA_MAX_STRIDE);
+
plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
VPDMA_STRIDE_ALIGN);
@@ -1679,7 +1721,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_fmt *fmt = find_format(f);
if (V4L2_TYPE_IS_OUTPUT(f->type))
@@ -1691,10 +1733,9 @@ static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct v4l2_plane_pix_format *plane_fmt;
+ struct v4l2_pix_format_mplane *qpix;
struct vpe_q_data *q_data;
struct vb2_queue *vq;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
@@ -1709,42 +1750,34 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
if (!q_data)
return -EINVAL;
+ qpix = &q_data->format.fmt.pix_mp;
q_data->fmt = find_format(f);
- q_data->width = pix->width;
- q_data->height = pix->height;
- q_data->colorspace = pix->colorspace;
- q_data->field = pix->field;
- q_data->nplanes = pix->num_planes;
-
- for (i = 0; i < pix->num_planes; i++) {
- plane_fmt = &pix->plane_fmt[i];
-
- q_data->bytesperline[i] = plane_fmt->bytesperline;
- q_data->sizeimage[i] = plane_fmt->sizeimage;
- }
+ q_data->format = *f;
q_data->c_rect.left = 0;
q_data->c_rect.top = 0;
- q_data->c_rect.width = q_data->width;
- q_data->c_rect.height = q_data->height;
+ q_data->c_rect.width = pix->width;
+ q_data->c_rect.height = pix->height;
- if (q_data->field == V4L2_FIELD_ALTERNATE)
+ if (qpix->field == V4L2_FIELD_ALTERNATE)
q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
- else if (q_data->field == V4L2_FIELD_SEQ_TB)
+ else if (qpix->field == V4L2_FIELD_SEQ_TB)
q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
+ else if (qpix->field == V4L2_FIELD_SEQ_BT)
+ q_data->flags |= Q_DATA_INTERLACED_SEQ_BT;
else
q_data->flags &= ~Q_IS_INTERLACED;
- /* the crop height is halved for the case of SEQ_TB buffers */
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ /* the crop height is halved for the case of SEQ_XX buffers */
+ if (q_data->flags & Q_IS_SEQ_XX)
q_data->c_rect.height /= 2;
vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
- f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
- q_data->bytesperline[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ f->type, pix->width, pix->height, pix->pixelformat,
+ pix->plane_fmt[0].bytesperline);
+ if (pix->num_planes == 2)
vpe_dbg(ctx->dev, " bpl_uv %d\n",
- q_data->bytesperline[VPE_CHROMA]);
+ pix->plane_fmt[1].bytesperline);
return 0;
}
@@ -1752,7 +1785,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
int ret;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
ret = vpe_try_fmt(file, priv, f);
if (ret)
@@ -1773,6 +1806,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
{
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
int height;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1783,6 +1817,8 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE:
/*
@@ -1809,27 +1845,27 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
}
/*
- * For SEQ_TB buffers, crop height should be less than the height of
+ * For SEQ_XX buffers, crop height should be less than the height of
* the field height, not the buffer height
*/
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
- height = q_data->height / 2;
+ if (q_data->flags & Q_IS_SEQ_XX)
+ height = pix->height / 2;
else
- height = q_data->height;
+ height = pix->height;
if (s->r.top < 0 || s->r.left < 0) {
vpe_err(ctx->dev, "negative values for top and left\n");
s->r.top = s->r.left = 0;
}
- v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
+ v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1,
&s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
/* adjust left/top if cropping rectangle is out of bounds */
- if (s->r.left + s->r.width > q_data->width)
- s->r.left = q_data->width - s->r.width;
- if (s->r.top + s->r.height > q_data->height)
- s->r.top = q_data->height - s->r.height;
+ if (s->r.left + s->r.width > pix->width)
+ s->r.left = pix->width - s->r.width;
+ if (s->r.top + s->r.height > pix->height)
+ s->r.top = pix->height - s->r.height;
return 0;
}
@@ -1837,8 +1873,9 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
static int vpe_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
bool use_c_rect = false;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1849,6 +1886,8 @@ static int vpe_g_selection(struct file *file, void *fh,
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -1887,8 +1926,8 @@ static int vpe_g_selection(struct file *file, void *fh,
*/
s->r.left = 0;
s->r.top = 0;
- s->r.width = q_data->width;
- s->r.height = q_data->height;
+ s->r.width = pix->width;
+ s->r.height = pix->height;
}
return 0;
@@ -1898,7 +1937,7 @@ static int vpe_g_selection(struct file *file, void *fh,
static int vpe_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
struct v4l2_selection sel = *s;
int ret;
@@ -1991,17 +2030,21 @@ static int vpe_queue_setup(struct vb2_queue *vq,
int i;
struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
q_data = get_q_data(ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
- *nplanes = q_data->nplanes;
+ pix = &q_data->format.fmt.pix_mp;
+ *nplanes = pix->num_planes;
for (i = 0; i < *nplanes; i++)
- sizes[i] = q_data->sizeimage[i];
+ sizes[i] = pix->plane_fmt[i].sizeimage;
vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
sizes[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ if (*nplanes == 2)
vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
return 0;
@@ -2012,12 +2055,16 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vpe_q_data *q_data;
- int i, num_planes;
+ struct v4l2_pix_format_mplane *pix;
+ int i;
vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
- num_planes = q_data->nplanes;
+ if (!q_data)
+ return -EINVAL;
+
+ pix = &q_data->format.fmt.pix_mp;
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (!(q_data->flags & Q_IS_INTERLACED)) {
@@ -2025,23 +2072,24 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
} else {
if (vbuf->field != V4L2_FIELD_TOP &&
vbuf->field != V4L2_FIELD_BOTTOM &&
- vbuf->field != V4L2_FIELD_SEQ_TB)
+ vbuf->field != V4L2_FIELD_SEQ_TB &&
+ vbuf->field != V4L2_FIELD_SEQ_BT)
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++) {
- if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ for (i = 0; i < pix->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
vpe_err(ctx->dev,
"data will not fit into plane (%lu < %lu)\n",
vb2_plane_size(vb, i),
- (long) q_data->sizeimage[i]);
+ (long)pix->plane_fmt[i].sizeimage);
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++)
- vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+ for (i = 0; i < pix->num_planes; i++)
+ vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
return 0;
}
@@ -2226,6 +2274,7 @@ static int vpe_open(struct file *file)
struct vpe_q_data *s_q_data;
struct v4l2_ctrl_handler *hdl;
struct vpe_ctx *ctx;
+ struct v4l2_pix_format_mplane *pix;
int ret;
vpe_dbg(dev, "vpe_open\n");
@@ -2261,7 +2310,7 @@ static int vpe_open(struct file *file)
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
+ file->private_data = ctx;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 1);
@@ -2274,23 +2323,32 @@ static int vpe_open(struct file *file)
v4l2_ctrl_handler_setup(hdl);
s_q_data = &ctx->q_data[Q_DATA_SRC];
- s_q_data->fmt = &vpe_formats[2];
- s_q_data->width = 1920;
- s_q_data->height = 1080;
- s_q_data->nplanes = 1;
- s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
+ pix = &s_q_data->format.fmt.pix_mp;
+ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ pix->pixelformat = s_q_data->fmt->fourcc;
+ s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ pix->width = 1920;
+ pix->height = 1080;
+ pix->num_planes = 1;
+ pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
- s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
- s_q_data->height);
- s_q_data->colorspace = V4L2_COLORSPACE_REC709;
- s_q_data->field = V4L2_FIELD_NONE;
+ pix->plane_fmt[VPE_LUMA].sizeimage =
+ pix->plane_fmt[VPE_LUMA].bytesperline *
+ pix->height;
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pix->field = V4L2_FIELD_NONE;
s_q_data->c_rect.left = 0;
s_q_data->c_rect.top = 0;
- s_q_data->c_rect.width = s_q_data->width;
- s_q_data->c_rect.height = s_q_data->height;
+ s_q_data->c_rect.width = pix->width;
+ s_q_data->c_rect.height = pix->height;
s_q_data->flags = 0;
ctx->q_data[Q_DATA_DST] = *s_q_data;
+ ctx->q_data[Q_DATA_DST].format.type =
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
set_dei_shadow_registers(ctx);
set_src_registers(ctx);
@@ -2346,12 +2404,18 @@ free_ctx:
static int vpe_release(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
vpe_dbg(dev, "releasing instance %p\n", ctx);
mutex_lock(&dev->dev_mutex);
free_mv_buffers(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
+
vpdma_free_desc_list(&ctx->desc_list);
vpdma_free_desc_buf(&ctx->mmr_adb);
@@ -2459,6 +2523,13 @@ static int vpe_probe(struct platform_device *pdev)
struct vpe_dev *dev;
int ret, irq, func;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return ret;
+ }
+
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -2473,7 +2544,12 @@ static int vpe_probe(struct platform_device *pdev)
mutex_init(&dev->dev_mutex);
dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "vpe_top");
+ "vpe_top");
+ if (!dev->res) {
+ dev_err(&pdev->dev, "missing 'vpe_top' resources data\n");
+ return -ENODEV;
+ }
+
/*
* HACK: we get resource info from device tree in the form of a list of
* VPE sub blocks, the driver currently uses only the base of vpe_top
@@ -2568,7 +2644,7 @@ static int vpe_remove(struct platform_device *pdev)
#if defined(CONFIG_OF)
static const struct of_device_id vpe_of_match[] = {
{
- .compatible = "ti,vpe",
+ .compatible = "ti,dra7-vpe",
},
{},
};
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 0ee143ae0f6b..82350097503e 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -2139,6 +2139,9 @@ static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev)
v4l2_m2m_release(dev->stateful_enc.m2m_dev);
v4l2_m2m_release(dev->stateful_dec.m2m_dev);
v4l2_m2m_release(dev->stateless_dec.m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -2250,7 +2253,6 @@ static int vicodec_remove(struct platform_device *pdev)
v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->stateful_enc.vfd);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index acd3bd48c7e2..8d6b09623d88 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1073,6 +1073,9 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
if (!q_data)
return -EINVAL;
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->aborting = 0;
+
q_data->sequence = 0;
return 0;
}
@@ -1272,6 +1275,9 @@ static void vim2m_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_m2m_release(dev->m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -1343,6 +1349,7 @@ static int vim2m_probe(struct platform_device *pdev)
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
+ dev->m2m_dev = NULL;
goto error_dev;
}
@@ -1395,7 +1402,6 @@ static int vim2m_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->vfd);
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index 96d06f030c31..a53b2b532e9f 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-vimc-y := vimc-core.o vimc-common.o vimc-streamer.o
+vimc-y := vimc-core.o vimc-common.o vimc-streamer.o vimc-capture.o \
+ vimc-debayer.o vimc-scaler.o vimc-sensor.o
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o
-obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc-capture.o vimc-debayer.o \
- vimc-scaler.o vimc-sensor.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 1d56b91830ba..76c015898cfd 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h>
@@ -16,12 +12,9 @@
#include "vimc-common.h"
#include "vimc-streamer.h"
-#define VIMC_CAP_DRV_NAME "vimc-capture"
-
struct vimc_cap_device {
struct vimc_ent_device ved;
struct video_device vdev;
- struct device *dev;
struct v4l2_pix_format format;
struct vb2_queue queue;
struct list_head buf_list;
@@ -36,6 +29,7 @@ struct vimc_cap_device {
struct mutex lock;
u32 sequence;
struct vimc_stream stream;
+ struct media_pad pad;
};
static const struct v4l2_pix_format fmt_default = {
@@ -130,7 +124,7 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- dev_dbg(vcap->dev, "%s: format update: "
+ dev_dbg(vcap->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcap->vdev.name,
/* old */
@@ -306,7 +300,7 @@ static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
unsigned long size = vcap->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(vcap->dev, "%s: buffer too small (%lu < %lu)\n",
+ dev_err(vcap->ved.dev, "%s: buffer too small (%lu < %lu)\n",
vcap->vdev.name, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
@@ -328,7 +322,7 @@ static const struct vb2_ops vimc_cap_qops = {
};
static const struct media_entity_operations vimc_cap_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = vimc_vdev_link_validate,
};
static void vimc_cap_release(struct video_device *vdev)
@@ -336,19 +330,16 @@ static void vimc_cap_release(struct video_device *vdev)
struct vimc_cap_device *vcap =
container_of(vdev, struct vimc_cap_device, vdev);
- vimc_pads_cleanup(vcap->ved.pads);
+ media_entity_cleanup(vcap->ved.ent);
kfree(vcap);
}
-static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
- ved);
+ struct vimc_cap_device *vcap;
+ vcap = container_of(ved, struct vimc_cap_device, ved);
vb2_queue_release(&vcap->queue);
- media_entity_cleanup(ved->ent);
video_unregister_device(&vcap->vdev);
}
@@ -391,11 +382,10 @@ static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
return NULL;
}
-static int vimc_cap_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
const struct vimc_pix_map *vpix;
struct vimc_cap_device *vcap;
struct video_device *vdev;
@@ -405,23 +395,16 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
/* Allocate the vimc_cap_device struct */
vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
if (!vcap)
- return -ENOMEM;
-
- /* Allocate the pads */
- vcap->ved.pads =
- vimc_pads_init(1, (const unsigned long[1]) {MEDIA_PAD_FL_SINK});
- if (IS_ERR(vcap->ved.pads)) {
- ret = PTR_ERR(vcap->ved.pads);
- goto err_free_vcap;
- }
+ return NULL;
/* Initialize the media entity */
- vcap->vdev.entity.name = pdata->entity_name;
+ vcap->vdev.entity.name = vcfg_name;
vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ vcap->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vcap->vdev.entity,
- 1, vcap->ved.pads);
+ 1, &vcap->pad);
if (ret)
- goto err_clean_pads;
+ goto err_free_vcap;
/* Initialize the lock */
mutex_init(&vcap->lock);
@@ -440,8 +423,8 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
ret = vb2_queue_init(q);
if (ret) {
- dev_err(comp, "%s: vb2 queue init failed (err=%d)\n",
- pdata->entity_name, ret);
+ dev_err(&vimc->pdev.dev, "%s: vb2 queue init failed (err=%d)\n",
+ vcfg_name, ret);
goto err_clean_m_ent;
}
@@ -460,8 +443,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vcap->ved.ent = &vcap->vdev.entity;
vcap->ved.process_frame = vimc_cap_process_frame;
vcap->ved.vdev_get_format = vimc_cap_get_format;
- dev_set_drvdata(comp, &vcap->ved);
- vcap->dev = comp;
+ vcap->ved.dev = &vimc->pdev.dev;
/* Initialize the video_device struct */
vdev = &vcap->vdev;
@@ -474,68 +456,25 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vdev->queue = q;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
- strscpy(vdev->name, pdata->entity_name, sizeof(vdev->name));
+ strscpy(vdev->name, vcfg_name, sizeof(vdev->name));
video_set_drvdata(vdev, &vcap->ved);
/* Register the video_device with the v4l2 and the media framework */
ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
- dev_err(comp, "%s: video register failed (err=%d)\n",
+ dev_err(&vimc->pdev.dev, "%s: video register failed (err=%d)\n",
vcap->vdev.name, ret);
goto err_release_queue;
}
- return 0;
+ return &vcap->ved;
err_release_queue:
vb2_queue_release(q);
err_clean_m_ent:
media_entity_cleanup(&vcap->vdev.entity);
-err_clean_pads:
- vimc_pads_cleanup(vcap->ved.pads);
err_free_vcap:
kfree(vcap);
- return ret;
-}
-
-static const struct component_ops vimc_cap_comp_ops = {
- .bind = vimc_cap_comp_bind,
- .unbind = vimc_cap_comp_unbind,
-};
-
-static int vimc_cap_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_cap_comp_ops);
-}
-
-static int vimc_cap_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_cap_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_cap_driver_ids[] = {
- {
- .name = VIMC_CAP_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_cap_pdrv = {
- .probe = vimc_cap_probe,
- .remove = vimc_cap_remove,
- .id_table = vimc_cap_driver_ids,
- .driver = {
- .name = VIMC_CAP_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_cap_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Capture");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 7e1ae0b12f1e..16ce9f3b7c75 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -164,6 +164,16 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
},
};
+bool vimc_is_source(struct media_entity *ent)
+{
+ unsigned int i;
+
+ for (i = 0; i < ent->num_pads; i++)
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SINK)
+ return false;
+ return true;
+}
+
const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
{
if (i >= ARRAY_SIZE(vimc_pix_map_list))
@@ -171,7 +181,6 @@ const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
return &vimc_pix_map_list[i];
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
{
@@ -183,7 +192,6 @@ const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
{
@@ -195,87 +203,37 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
-
-/* Helper function to allocate and initialize pads */
-struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
-{
- struct media_pad *pads;
- unsigned int i;
-
- /* Allocate memory for the pads */
- pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
- if (!pads)
- return ERR_PTR(-ENOMEM);
-
- /* Initialize the pads */
- for (i = 0; i < num_pads; i++) {
- pads[i].index = i;
- pads[i].flags = pads_flag[i];
- }
-
- return pads;
-}
-EXPORT_SYMBOL_GPL(vimc_pads_init);
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable)
-{
- struct v4l2_subdev *sd;
- struct media_pad *pad;
- unsigned int i;
- int ret;
-
- for (i = 0; i < ent->num_pads; i++) {
- if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
- continue;
-
- /* Start the stream in the subdevice direct connected */
- pad = media_entity_remote_pad(&ent->pads[i]);
- if (!pad)
- continue;
-
- if (!is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
-
- sd = media_entity_to_v4l2_subdev(pad->entity);
- ret = v4l2_subdev_call(sd, video, s_stream, enable);
- if (ret && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vimc_pipeline_s_stream);
-
-static int vimc_get_mbus_format(struct media_pad *pad,
- struct v4l2_subdev_format *fmt)
+static int vimc_get_pix_format(struct media_pad *pad,
+ struct v4l2_pix_format *fmt)
{
if (is_media_entity_v4l2_subdev(pad->entity)) {
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(pad->entity);
+ struct v4l2_subdev_format sd_fmt;
+ const struct vimc_pix_map *pix_map;
int ret;
- fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt->pad = pad->index;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = pad->index;
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
if (ret)
return ret;
+ v4l2_fill_pix_format(fmt, &sd_fmt.format);
+ pix_map = vimc_pix_map_by_code(sd_fmt.format.code);
+ fmt->pixelformat = pix_map->pixelformat;
} else if (is_media_entity_v4l2_video_device(pad->entity)) {
struct video_device *vdev = container_of(pad->entity,
struct video_device,
entity);
struct vimc_ent_device *ved = video_get_drvdata(vdev);
- const struct vimc_pix_map *vpix;
- struct v4l2_pix_format vdev_fmt;
if (!ved->vdev_get_format)
return -ENOIOCTLCMD;
- ved->vdev_get_format(ved, &vdev_fmt);
- vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
- v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
+ ved->vdev_get_format(ved, fmt);
} else {
return -EINVAL;
}
@@ -283,16 +241,16 @@ static int vimc_get_mbus_format(struct media_pad *pad,
return 0;
}
-int vimc_link_validate(struct media_link *link)
+int vimc_vdev_link_validate(struct media_link *link)
{
- struct v4l2_subdev_format source_fmt, sink_fmt;
+ struct v4l2_pix_format source_fmt, sink_fmt;
int ret;
- ret = vimc_get_mbus_format(link->source, &source_fmt);
+ ret = vimc_get_pix_format(link->source, &source_fmt);
if (ret)
return ret;
- ret = vimc_get_mbus_format(link->sink, &sink_fmt);
+ ret = vimc_get_pix_format(link->sink, &sink_fmt);
if (ret)
return ret;
@@ -301,21 +259,21 @@ int vimc_link_validate(struct media_link *link)
"%s:snk:%dx%d (0x%x, %d, %d, %d, %d)\n",
/* src */
link->source->entity->name,
- source_fmt.format.width, source_fmt.format.height,
- source_fmt.format.code, source_fmt.format.colorspace,
- source_fmt.format.quantization, source_fmt.format.xfer_func,
- source_fmt.format.ycbcr_enc,
+ source_fmt.width, source_fmt.height,
+ source_fmt.pixelformat, source_fmt.colorspace,
+ source_fmt.quantization, source_fmt.xfer_func,
+ source_fmt.ycbcr_enc,
/* sink */
link->sink->entity->name,
- sink_fmt.format.width, sink_fmt.format.height,
- sink_fmt.format.code, sink_fmt.format.colorspace,
- sink_fmt.format.quantization, sink_fmt.format.xfer_func,
- sink_fmt.format.ycbcr_enc);
-
- /* The width, height and code must match. */
- if (source_fmt.format.width != sink_fmt.format.width
- || source_fmt.format.height != sink_fmt.format.height
- || source_fmt.format.code != sink_fmt.format.code)
+ sink_fmt.width, sink_fmt.height,
+ sink_fmt.pixelformat, sink_fmt.colorspace,
+ sink_fmt.quantization, sink_fmt.xfer_func,
+ sink_fmt.ycbcr_enc);
+
+ /* The width, height and pixelformat must match. */
+ if (source_fmt.width != sink_fmt.width ||
+ source_fmt.height != sink_fmt.height ||
+ source_fmt.pixelformat != sink_fmt.pixelformat)
return -EPIPE;
/*
@@ -323,44 +281,43 @@ int vimc_link_validate(struct media_link *link)
* to support interlaced hardware connected to bridges that support
* progressive formats only.
*/
- if (source_fmt.format.field != sink_fmt.format.field &&
- sink_fmt.format.field != V4L2_FIELD_NONE)
+ if (source_fmt.field != sink_fmt.field &&
+ sink_fmt.field != V4L2_FIELD_NONE)
return -EPIPE;
/*
* If colorspace is DEFAULT, then assume all the colorimetry is also
* DEFAULT, return 0 to skip comparing the other colorimetry parameters
*/
- if (source_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT
- || sink_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ if (source_fmt.colorspace == V4L2_COLORSPACE_DEFAULT ||
+ sink_fmt.colorspace == V4L2_COLORSPACE_DEFAULT)
return 0;
/* Colorspace must match. */
- if (source_fmt.format.colorspace != sink_fmt.format.colorspace)
+ if (source_fmt.colorspace != sink_fmt.colorspace)
return -EPIPE;
/* Colorimetry must match if they are not set to DEFAULT */
- if (source_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && sink_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && source_fmt.format.ycbcr_enc != sink_fmt.format.ycbcr_enc)
+ if (source_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ sink_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ source_fmt.ycbcr_enc != sink_fmt.ycbcr_enc)
return -EPIPE;
- if (source_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && sink_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && source_fmt.format.quantization != sink_fmt.format.quantization)
+ if (source_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ sink_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ source_fmt.quantization != sink_fmt.quantization)
return -EPIPE;
- if (source_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && sink_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && source_fmt.format.xfer_func != sink_fmt.format.xfer_func)
+ if (source_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ sink_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ source_fmt.xfer_func != sink_fmt.xfer_func)
return -EPIPE;
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_link_validate);
static const struct media_entity_operations vimc_ent_sd_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = v4l2_subdev_link_validate,
};
int vimc_ent_sd_register(struct vimc_ent_device *ved,
@@ -369,17 +326,12 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops)
{
int ret;
- /* Allocate the pads */
- ved->pads = vimc_pads_init(num_pads, pads_flag);
- if (IS_ERR(ved->pads))
- return PTR_ERR(ved->pads);
-
/* Fill the vimc_ent_device struct */
ved->ent = &sd->entity;
@@ -398,9 +350,9 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
/* Initialize the media entity */
- ret = media_entity_pads_init(&sd->entity, num_pads, ved->pads);
+ ret = media_entity_pads_init(&sd->entity, num_pads, pads);
if (ret)
- goto err_clean_pads;
+ return ret;
/* Register the subdev with the v4l2 and the media framework */
ret = v4l2_device_register_subdev(v4l2_dev, sd);
@@ -415,16 +367,5 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
err_clean_m_ent:
media_entity_cleanup(&sd->entity);
-err_clean_pads:
- vimc_pads_cleanup(ved->pads);
return ret;
}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
-
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
-{
- media_entity_cleanup(ved->ent);
- vimc_pads_cleanup(ved->pads);
- v4l2_device_unregister_subdev(sd);
-}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 9c2e0e216c6b..87eb8259c2a8 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -8,6 +8,7 @@
#ifndef _VIMC_COMMON_H_
#define _VIMC_COMMON_H_
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
@@ -18,6 +19,7 @@
#define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000)
#define VIMC_CID_VIMC_CLASS (0x00f00000 | 1)
#define VIMC_CID_TEST_PATTERN (VIMC_CID_VIMC_BASE + 0)
+#define VIMC_CID_MEAN_WIN_SIZE (VIMC_CID_VIMC_BASE + 1)
#define VIMC_FRAME_MAX_WIDTH 4096
#define VIMC_FRAME_MAX_HEIGHT 2160
@@ -26,6 +28,10 @@
#define VIMC_FRAME_INDEX(lin, col, width, bpp) ((lin * width + col) * bpp)
+/* Source and sink pad checks */
+#define VIMC_IS_SRC(pad) (pad)
+#define VIMC_IS_SINK(pad) (!(pad))
+
/**
* struct vimc_colorimetry_clamp - Adjust colorimetry parameters
*
@@ -53,21 +59,6 @@ do { \
} while (0)
/**
- * struct vimc_platform_data - platform data to components
- *
- * @entity_name: The name of the entity to be created
- *
- * Board setup code will often provide additional information using the device's
- * platform_data field to hold additional information.
- * When injecting a new platform_device in the component system the core needs
- * to provide to the corresponding submodules the name of the entity that should
- * be used when registering the subdevice in the Media Controller system.
- */
-struct vimc_platform_data {
- char entity_name[32];
-};
-
-/**
* struct vimc_pix_map - maps media bus code with v4l2 pixel format
*
* @code: media bus format code defined by MEDIA_BUS_FMT_* macros
@@ -85,10 +76,11 @@ struct vimc_pix_map {
};
/**
- * struct vimc_ent_device - core struct that represents a node in the topology
+ * struct vimc_ent_device - core struct that represents an entity in the
+ * topology
*
+ * @dev: a pointer of the device struct of the driver
* @ent: the pointer to struct media_entity for the node
- * @pads: the list of pads of the node
* @process_frame: callback send a frame to that node
* @vdev_get_format: callback that returns the current format a pad, used
* only when is_media_entity_v4l2_video_device(ent) returns
@@ -103,8 +95,8 @@ struct vimc_pix_map {
* media_entity
*/
struct vimc_ent_device {
+ struct device *dev;
struct media_entity *ent;
- struct media_pad *pads;
void * (*process_frame)(struct vimc_ent_device *ved,
const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
@@ -112,38 +104,65 @@ struct vimc_ent_device {
};
/**
- * vimc_pads_init - initialize pads
- *
- * @num_pads: number of pads to initialize
- * @pads_flags: flags to use in each pad
+ * struct vimc_device - main device for vimc driver
*
- * Helper functions to allocate/initialize pads
+ * @pdev pointer to the platform device
+ * @pipe_cfg pointer to the vimc pipeline configuration structure
+ * @ent_devs array of vimc_ent_device pointers
+ * @mdev the associated media_device parent
+ * @v4l2_dev Internal v4l2 parent device
*/
-struct media_pad *vimc_pads_init(u16 num_pads,
- const unsigned long *pads_flag);
+struct vimc_device {
+ struct platform_device pdev;
+ const struct vimc_pipeline_config *pipe_cfg;
+ struct vimc_ent_device **ent_devs;
+ struct media_device mdev;
+ struct v4l2_device v4l2_dev;
+};
/**
- * vimc_pads_cleanup - free pads
- *
- * @pads: pointer to the pads
- *
- * Helper function to free the pads initialized with vimc_pads_init
+ * struct vimc_ent_config Structure which describes individual
+ * configuration for each entity
+ *
+ * @name entity name
+ * @ved pointer to vimc_ent_device (a node in the
+ * topology)
+ * @add subdev add hook - initializes and registers
+ * subdev called from vimc-core
+ * @rm subdev rm hook - unregisters and frees
+ * subdev called from vimc-core
*/
-static inline void vimc_pads_cleanup(struct media_pad *pads)
-{
- kfree(pads);
-}
+struct vimc_ent_config {
+ const char *name;
+ struct vimc_ent_device *(*add)(struct vimc_device *vimc,
+ const char *vcfg_name);
+ void (*rm)(struct vimc_device *vimc, struct vimc_ent_device *ved);
+};
/**
- * vimc_pipeline_s_stream - start stream through the pipeline
+ * vimc_is_source - returns true if the entity has only source pads
*
- * @ent: the pointer to struct media_entity for the node
- * @enable: 1 to start the stream and 0 to stop
+ * @ent: pointer to &struct media_entity
*
- * Helper function to call the s_stream of the subdevices connected
- * in all the sink pads of the entity
*/
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
+bool vimc_is_source(struct media_entity *ent);
+
+/* prototypes for vimc_ent_config add and rm hooks */
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
/**
* vimc_pix_map_by_index - get vimc_pix_map struct by its index
@@ -176,7 +195,8 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
* unique.
* @function: media entity function defined by MEDIA_ENT_F_* macros
* @num_pads: number of pads to initialize
- * @pads_flag: flags to use in each pad
+ * @pads: the array of pads of the entity, the caller should set the
+ flags of the pads
* @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops
* @sd_ops: pointer to &struct v4l2_subdev_ops.
*
@@ -189,29 +209,17 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops);
/**
- * vimc_ent_sd_unregister - cleanup and unregister a subdev node
- *
- * @ved: the vimc_ent_device struct to be cleaned up
- * @sd: the v4l2_subdev struct to be unregistered
- *
- * Helper function cleanup and unregister the struct vimc_ent_device and struct
- * v4l2_subdev which represents a subdev node in the topology
- */
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved,
- struct v4l2_subdev *sd);
-
-/**
- * vimc_link_validate - validates a media link
+ * vimc_vdev_link_validate - validates a media link
*
* @link: pointer to &struct media_link
*
* This function calls validates if a media link is valid for streaming.
*/
-int vimc_link_validate(struct media_link *link);
+int vimc_vdev_link_validate(struct media_link *link);
#endif
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 571c55aa0e16..97a272f3350a 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -5,7 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -24,29 +23,6 @@
.flags = link_flags, \
}
-struct vimc_device {
- /* The platform device */
- struct platform_device pdev;
-
- /* The pipeline configuration */
- const struct vimc_pipeline_config *pipe_cfg;
-
- /* The Associated media_device parent */
- struct media_device mdev;
-
- /* Internal v4l2 parent device*/
- struct v4l2_device v4l2_dev;
-
- /* Subdevices */
- struct platform_device **subdevs;
-};
-
-/* Structure which describes individual configuration for each entity */
-struct vimc_ent_config {
- const char *name;
- const char *drv;
-};
-
/* Structure which describes links between entities */
struct vimc_ent_link {
unsigned int src_ent;
@@ -68,43 +44,52 @@ struct vimc_pipeline_config {
* Topology Configuration
*/
-static const struct vimc_ent_config ent_config[] = {
+static struct vimc_ent_config ent_config[] = {
{
.name = "Sensor A",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Sensor B",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Debayer A",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Debayer B",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Raw Capture 0",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
.name = "Raw Capture 1",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
- .name = "RGB/YUV Input",
/* TODO: change this to vimc-input when it is implemented */
- .drv = "vimc-sensor",
+ .name = "RGB/YUV Input",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Scaler",
- .drv = "vimc-scaler",
+ .add = vimc_sca_add,
+ .rm = vimc_sca_rm,
},
{
.name = "RGB/YUV Capture",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
};
@@ -127,7 +112,7 @@ static const struct vimc_ent_link ent_links[] = {
VIMC_ENT_LINK(7, 1, 8, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
};
-static const struct vimc_pipeline_config pipe_cfg = {
+static struct vimc_pipeline_config pipe_cfg = {
.ents = ent_config,
.num_ents = ARRAY_SIZE(ent_config),
.links = ent_links,
@@ -136,6 +121,14 @@ static const struct vimc_pipeline_config pipe_cfg = {
/* -------------------------------------------------------------------------- */
+static void vimc_rm_links(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ media_entity_remove_links(vimc->ent_devs[i]->ent);
+}
+
static int vimc_create_links(struct vimc_device *vimc)
{
unsigned int i;
@@ -144,32 +137,56 @@ static int vimc_create_links(struct vimc_device *vimc)
/* Initialize the links between entities */
for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
- /*
- * TODO: Check another way of retrieving ved struct without
- * relying on platform_get_drvdata
- */
+
struct vimc_ent_device *ved_src =
- platform_get_drvdata(vimc->subdevs[link->src_ent]);
+ vimc->ent_devs[link->src_ent];
struct vimc_ent_device *ved_sink =
- platform_get_drvdata(vimc->subdevs[link->sink_ent]);
+ vimc->ent_devs[link->sink_ent];
ret = media_create_pad_link(ved_src->ent, link->src_pad,
ved_sink->ent, link->sink_pad,
link->flags);
if (ret)
- return ret;
+ goto err_rm_links;
}
return 0;
+
+err_rm_links:
+ vimc_rm_links(vimc);
+ return ret;
}
-static int vimc_comp_bind(struct device *master)
+static int vimc_add_subdevs(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
- int ret;
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ dev_dbg(&vimc->pdev.dev, "new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].add(vimc,
+ vimc->pipe_cfg->ents[i].name);
+ if (!vimc->ent_devs[i]) {
+ dev_err(&vimc->pdev.dev, "add new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void vimc_rm_subdevs(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ if (vimc->ent_devs[i])
+ vimc->pipe_cfg->ents[i].rm(vimc, vimc->ent_devs[i]);
+}
- dev_dbg(master, "bind");
+static int vimc_register_devices(struct vimc_device *vimc)
+{
+ int ret;
/* Register the v4l2 struct */
ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
@@ -179,22 +196,31 @@ static int vimc_comp_bind(struct device *master)
return ret;
}
- /* Bind subdevices */
- ret = component_bind_all(master, &vimc->v4l2_dev);
- if (ret)
+ /* allocate ent_devs */
+ vimc->ent_devs = kcalloc(vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->ent_devs), GFP_KERNEL);
+ if (!vimc->ent_devs) {
+ ret = -ENOMEM;
goto err_v4l2_unregister;
+ }
+
+ /* Invoke entity config hooks to initialize and register subdevs */
+ ret = vimc_add_subdevs(vimc);
+ if (ret)
+ /* remove sundevs that got added */
+ goto err_rm_subdevs;
/* Initialize links */
ret = vimc_create_links(vimc);
if (ret)
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
/* Register the media device */
ret = media_device_register(&vimc->mdev);
if (ret) {
dev_err(vimc->mdev.dev,
"media device register failed (err=%d)\n", ret);
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
}
/* Expose all subdev's nodes*/
@@ -211,98 +237,32 @@ static int vimc_comp_bind(struct device *master)
err_mdev_unregister:
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
-err_comp_unbind_all:
- component_unbind_all(master, NULL);
+err_rm_subdevs:
+ vimc_rm_subdevs(vimc);
+ kfree(vimc->ent_devs);
err_v4l2_unregister:
v4l2_device_unregister(&vimc->v4l2_dev);
return ret;
}
-static void vimc_comp_unbind(struct device *master)
+static void vimc_unregister(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
-
- dev_dbg(master, "unbind");
-
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
- component_unbind_all(master, NULL);
v4l2_device_unregister(&vimc->v4l2_dev);
+ kfree(vimc->ent_devs);
}
-static int vimc_comp_compare(struct device *comp, void *data)
-{
- return comp == data;
-}
-
-static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
-{
- struct component_match *match = NULL;
- struct vimc_platform_data pdata;
- int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- dev_dbg(&vimc->pdev.dev, "new pdev for %s\n",
- vimc->pipe_cfg->ents[i].drv);
-
- strscpy(pdata.entity_name, vimc->pipe_cfg->ents[i].name,
- sizeof(pdata.entity_name));
-
- vimc->subdevs[i] = platform_device_register_data(&vimc->pdev.dev,
- vimc->pipe_cfg->ents[i].drv,
- PLATFORM_DEVID_AUTO,
- &pdata,
- sizeof(pdata));
- if (IS_ERR(vimc->subdevs[i])) {
- match = ERR_CAST(vimc->subdevs[i]);
- while (--i >= 0)
- platform_device_unregister(vimc->subdevs[i]);
-
- return match;
- }
-
- component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
- &vimc->subdevs[i]->dev);
- }
-
- return match;
-}
-
-static void vimc_rm_subdevs(struct vimc_device *vimc)
-{
- unsigned int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
- platform_device_unregister(vimc->subdevs[i]);
-}
-
-static const struct component_master_ops vimc_comp_ops = {
- .bind = vimc_comp_bind,
- .unbind = vimc_comp_unbind,
-};
-
static int vimc_probe(struct platform_device *pdev)
{
struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
- struct component_match *match = NULL;
int ret;
dev_dbg(&pdev->dev, "probe");
memset(&vimc->mdev, 0, sizeof(vimc->mdev));
- /* Create platform_device for each entity in the topology*/
- vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
- sizeof(*vimc->subdevs), GFP_KERNEL);
- if (!vimc->subdevs)
- return -ENOMEM;
-
- match = vimc_add_subdevs(vimc);
- if (IS_ERR(match))
- return PTR_ERR(match);
-
/* Link the media device within the v4l2_device */
vimc->v4l2_dev.mdev = &vimc->mdev;
@@ -314,12 +274,9 @@ static int vimc_probe(struct platform_device *pdev)
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
- /* Add self to the component system */
- ret = component_master_add_with_match(&pdev->dev, &vimc_comp_ops,
- match);
+ ret = vimc_register_devices(vimc);
if (ret) {
media_device_cleanup(&vimc->mdev);
- vimc_rm_subdevs(vimc);
return ret;
}
@@ -332,8 +289,8 @@ static int vimc_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "remove");
- component_master_del(&pdev->dev, &vimc_comp_ops);
vimc_rm_subdevs(vimc);
+ vimc_unregister(vimc);
return 0;
}
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index b72b8385067b..5d1b67d684bb 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -5,28 +5,16 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_DEB_DRV_NAME "vimc-debayer"
-
-static unsigned int deb_mean_win_size = 3;
-module_param(deb_mean_win_size, uint, 0000);
-MODULE_PARM_DESC(deb_mean_win_size, " the window size to calculate the mean.\n"
- "NOTE: the window size needs to be an odd number, as the main pixel "
- "stays in the center of the window, otherwise the next odd number "
- "is considered");
-
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
-
enum vimc_deb_rgb_colors {
VIMC_DEB_RED = 0,
VIMC_DEB_GREEN = 1,
@@ -41,7 +29,6 @@ struct vimc_deb_pix_map {
struct vimc_deb_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* The active format */
struct v4l2_mbus_framefmt sink_fmt;
u32 src_code;
@@ -51,6 +38,9 @@ struct vimc_deb_device {
u8 *src_frame;
const struct vimc_deb_pix_map *sink_pix_map;
unsigned int sink_bpp;
+ unsigned int mean_win_size;
+ struct v4l2_ctrl_handler hdl;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -159,7 +149,7 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
/* We only support one format for source pads */
- if (IS_SRC(code->pad)) {
+ if (VIMC_IS_SRC(code->pad)) {
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
if (code->index)
@@ -185,7 +175,7 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index)
return -EINVAL;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
const struct vimc_deb_pix_map *vpix =
vimc_deb_pix_map_by_code(fse->code);
@@ -215,7 +205,7 @@ static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
vdeb->sink_fmt;
/* Set the right code for the source pad */
- if (IS_SRC(fmt->pad))
+ if (VIMC_IS_SRC(fmt->pad))
fmt->format.code = vdeb->src_code;
return 0;
@@ -262,7 +252,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
/* TODO: Add support for other formats */
fmt->format.code = vdeb->src_code;
@@ -270,7 +260,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_deb_adjust_sink_fmt(&fmt->format);
- dev_dbg(vdeb->dev, "%s: sink format update: "
+ dev_dbg(vdeb->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdeb->sd.name,
/* old */
@@ -351,11 +341,18 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
+static const struct v4l2_subdev_core_ops vimc_deb_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops vimc_deb_video_ops = {
.s_stream = vimc_deb_s_stream,
};
static const struct v4l2_subdev_ops vimc_deb_ops = {
+ .core = &vimc_deb_core_ops,
.pad = &vimc_deb_pad_ops,
.video = &vimc_deb_video_ops,
};
@@ -389,11 +386,11 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
* the top left corner of the mean window (considering the current
* pixel as the center)
*/
- seek = deb_mean_win_size / 2;
+ seek = vdeb->mean_win_size / 2;
/* Sum the values of the colors in the mean window */
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n",
vdeb->sd.name, lin, col, vdeb->sink_fmt.height, seek);
@@ -426,7 +423,7 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
vdeb->sink_fmt.width,
vdeb->sink_bpp);
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n",
vdeb->sd.name, index, wlin, wcol, color);
@@ -437,21 +434,21 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
/* Save how many values we already added */
n_rgb[color]++;
- dev_dbg(vdeb->dev, "deb: %s: RGB CALC: val %d, n %d\n",
+ dev_dbg(vdeb->ved.dev, "deb: %s: RGB CALC: val %d, n %d\n",
vdeb->sd.name, rgb[color], n_rgb[color]);
}
}
/* Calculate the mean */
for (i = 0; i < 3; i++) {
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n",
vdeb->sd.name, lin, col, i, rgb[i], n_rgb[i]);
if (n_rgb[i])
rgb[i] = rgb[i] / n_rgb[i];
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: FINAL CALC: %dx%d Color %d, val %d\n",
vdeb->sd.name, lin, col, i, rgb[i]);
}
@@ -476,14 +473,34 @@ static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
}
return vdeb->src_frame;
+}
+static int vimc_deb_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vimc_deb_device *vdeb =
+ container_of(ctrl->handler, struct vimc_deb_device, hdl);
+
+ switch (ctrl->id) {
+ case VIMC_CID_MEAN_WIN_SIZE:
+ vdeb->mean_win_size = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
+static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = {
+ .s_ctrl = vimc_deb_s_ctrl,
+};
+
static void vimc_deb_release(struct v4l2_subdev *sd)
{
struct vimc_deb_device *vdeb =
container_of(sd, struct vimc_deb_device, sd);
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+ media_entity_cleanup(vdeb->ved.ent);
kfree(vdeb);
}
@@ -491,44 +508,69 @@ static const struct v4l2_subdev_internal_ops vimc_deb_int_ops = {
.release = vimc_deb_release,
};
-static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
- ved);
+ struct vimc_deb_device *vdeb;
- vimc_ent_sd_unregister(ved, &vdeb->sd);
+ vdeb = container_of(ved, struct vimc_deb_device, ved);
+ v4l2_device_unregister_subdev(&vdeb->sd);
}
-static int vimc_deb_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+static const struct v4l2_ctrl_config vimc_deb_ctrl_class = {
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
+ .id = VIMC_CID_VIMC_CLASS,
+ .name = "VIMC Controls",
+ .type = V4L2_CTRL_TYPE_CTRL_CLASS,
+};
+
+static const struct v4l2_ctrl_config vimc_deb_ctrl_mean_win_size = {
+ .ops = &vimc_deb_ctrl_ops,
+ .id = VIMC_CID_MEAN_WIN_SIZE,
+ .name = "Debayer Mean Window Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 25,
+ .step = 2,
+ .def = 3,
+};
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_deb_device *vdeb;
int ret;
/* Allocate the vdeb struct */
vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
if (!vdeb)
- return -ENOMEM;
+ return NULL;
+
+ /* Create controls: */
+ v4l2_ctrl_handler_init(&vdeb->hdl, 2);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_class, NULL);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_mean_win_size, NULL);
+ vdeb->sd.ctrl_handler = &vdeb->hdl;
+ if (vdeb->hdl.error) {
+ ret = vdeb->hdl.error;
+ goto err_free_vdeb;
+ }
/* Initialize ved and sd */
+ vdeb->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vdeb->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vdeb->pads,
&vimc_deb_int_ops, &vimc_deb_ops);
- if (ret) {
- kfree(vdeb);
- return ret;
- }
+ if (ret)
+ goto err_free_hdl;
vdeb->ved.process_frame = vimc_deb_process_frame;
- dev_set_drvdata(comp, &vdeb->ved);
- vdeb->dev = comp;
+ vdeb->ved.dev = &vimc->pdev.dev;
+ vdeb->mean_win_size = vimc_deb_ctrl_mean_win_size.def;
/* Initialize the frame format */
vdeb->sink_fmt = sink_fmt_default;
@@ -541,46 +583,12 @@ static int vimc_deb_comp_bind(struct device *comp, struct device *master,
vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
- return 0;
-}
-
-static const struct component_ops vimc_deb_comp_ops = {
- .bind = vimc_deb_comp_bind,
- .unbind = vimc_deb_comp_unbind,
-};
+ return &vdeb->ved;
-static int vimc_deb_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_deb_comp_ops);
-}
-
-static int vimc_deb_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_deb_comp_ops);
+err_free_hdl:
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+err_free_vdeb:
+ kfree(vdeb);
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_deb_driver_ids[] = {
- {
- .name = VIMC_DEB_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_deb_pdrv = {
- .probe = vimc_deb_probe,
- .remove = vimc_deb_remove,
- .id_table = vimc_deb_driver_ids,
- .driver = {
- .name = VIMC_DEB_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_deb_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Debayer");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 49ab8d9dd9c9..2f88a7d9d67b 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -5,30 +5,22 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_SCA_DRV_NAME "vimc-scaler"
-
static unsigned int sca_mult = 3;
module_param(sca_mult, uint, 0000);
MODULE_PARM_DESC(sca_mult, " the image size multiplier");
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
#define MAX_ZOOM 8
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* NOTE: the source fmt is the same as the sink
* with the width and hight multiplied by mult
*/
@@ -37,6 +29,7 @@ struct vimc_sca_device {
u8 *src_frame;
unsigned int src_line_size;
unsigned int bpp;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -98,7 +91,7 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
fse->max_width = VIMC_FRAME_MAX_WIDTH;
fse->max_height = VIMC_FRAME_MAX_HEIGHT;
} else {
@@ -121,7 +114,7 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
vsca->sink_fmt;
/* Scale the frame size for the source pad */
- if (IS_SRC(format->pad)) {
+ if (VIMC_IS_SRC(format->pad)) {
format->format.width = vsca->sink_fmt.width * sca_mult;
format->format.height = vsca->sink_fmt.height * sca_mult;
}
@@ -170,7 +163,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
fmt->format.width = sink_fmt->width * sca_mult;
fmt->format.height = sink_fmt->height * sca_mult;
@@ -178,7 +171,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_sca_adjust_sink_fmt(&fmt->format);
- dev_dbg(vsca->dev, "%s: sink format update: "
+ dev_dbg(vsca->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
/* old */
@@ -278,7 +271,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
vsca->bpp);
pixel = &sink_frame[index];
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
vsca->sd.name, lin, col, index);
@@ -288,7 +281,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
vsca->sink_fmt.width * sca_mult, vsca->bpp);
- dev_dbg(vsca->dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
+ dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
vsca->sd.name, lin * sca_mult, col * sca_mult, index);
/* Repeat this pixel mult times */
@@ -297,7 +290,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
* pixel repetition in a line
*/
for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: sca: scale_pix src pos %d\n",
vsca->sd.name, index + j);
@@ -343,6 +336,7 @@ static void vimc_sca_release(struct v4l2_subdev *sd)
struct vimc_sca_device *vsca =
container_of(sd, struct vimc_sca_device, sd);
+ media_entity_cleanup(vsca->ved.ent);
kfree(vsca);
}
@@ -350,89 +344,45 @@ static const struct v4l2_subdev_internal_ops vimc_sca_int_ops = {
.release = vimc_sca_release,
};
-static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
- ved);
+ struct vimc_sca_device *vsca;
- vimc_ent_sd_unregister(ved, &vsca->sd);
+ vsca = container_of(ved, struct vimc_sca_device, ved);
+ v4l2_device_unregister_subdev(&vsca->sd);
}
-
-static int vimc_sca_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sca_device *vsca;
int ret;
/* Allocate the vsca struct */
vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
if (!vsca)
- return -ENOMEM;
+ return NULL;
/* Initialize ved and sd */
+ vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vsca->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vsca->pads,
&vimc_sca_int_ops, &vimc_sca_ops);
if (ret) {
kfree(vsca);
- return ret;
+ return NULL;
}
vsca->ved.process_frame = vimc_sca_process_frame;
- dev_set_drvdata(comp, &vsca->ved);
- vsca->dev = comp;
+ vsca->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsca->sink_fmt = sink_fmt_default;
- return 0;
-}
-
-static const struct component_ops vimc_sca_comp_ops = {
- .bind = vimc_sca_comp_bind,
- .unbind = vimc_sca_comp_unbind,
-};
-
-static int vimc_sca_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sca_comp_ops);
-}
-
-static int vimc_sca_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sca_comp_ops);
-
- return 0;
+ return &vsca->ved;
}
-
-static const struct platform_device_id vimc_sca_driver_ids[] = {
- {
- .name = VIMC_SCA_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sca_pdrv = {
- .probe = vimc_sca_probe,
- .remove = vimc_sca_remove,
- .id_table = vimc_sca_driver_ids,
- .driver = {
- .name = VIMC_SCA_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sca_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Scaler");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 6c53b9fc1617..32380f504591 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/vmalloc.h>
#include <media/v4l2-ctrls.h>
@@ -18,18 +14,15 @@
#include "vimc-common.h"
-#define VIMC_SEN_DRV_NAME "vimc-sensor"
-
struct vimc_sen_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
struct tpg_data tpg;
- struct task_struct *kthread_sen;
u8 *frame;
/* The active format */
struct v4l2_mbus_framefmt mbus_format;
struct v4l2_ctrl_handler hdl;
+ struct media_pad pad;
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -164,7 +157,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
/* Set the new format */
vimc_sen_adjust_fmt(&fmt->format);
- dev_dbg(vsen->dev, "%s: format update: "
+ dev_dbg(vsen->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsen->sd.name,
/* old */
@@ -208,10 +201,6 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
const struct vimc_pix_map *vpix;
unsigned int frame_size;
- if (vsen->kthread_sen)
- /* tpg is already executing */
- return 0;
-
/* Calculate the frame size */
vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
frame_size = vsen->mbus_format.width * vpix->bpp *
@@ -297,6 +286,7 @@ static void vimc_sen_release(struct v4l2_subdev *sd)
v4l2_ctrl_handler_free(&vsen->hdl);
tpg_free(&vsen->tpg);
+ media_entity_cleanup(vsen->ved.ent);
kfree(vsen);
}
@@ -304,14 +294,12 @@ static const struct v4l2_subdev_internal_ops vimc_sen_int_ops = {
.release = vimc_sen_release,
};
-static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sen_device *vsen =
- container_of(ved, struct vimc_sen_device, ved);
+ struct vimc_sen_device *vsen;
- vimc_ent_sd_unregister(ved, &vsen->sd);
+ vsen = container_of(ved, struct vimc_sen_device, ved);
+ v4l2_device_unregister_subdev(&vsen->sd);
}
/* Image Processing Controls */
@@ -331,18 +319,17 @@ static const struct v4l2_ctrl_config vimc_sen_ctrl_test_pattern = {
.qmenu = tpg_pattern_strings,
};
-static int vimc_sen_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sen_device *vsen;
int ret;
/* Allocate the vsen struct */
vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
if (!vsen)
- return -ENOMEM;
+ return NULL;
v4l2_ctrl_handler_init(&vsen->hdl, 4);
@@ -366,78 +353,36 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
goto err_free_vsen;
}
+ /* Initialize the test pattern generator */
+ tpg_init(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height);
+ ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
+ if (ret)
+ goto err_free_hdl;
+
/* Initialize ved and sd */
+ vsen->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
- pdata->entity_name,
- MEDIA_ENT_F_CAM_SENSOR, 1,
- (const unsigned long[1]) {MEDIA_PAD_FL_SOURCE},
+ vcfg_name,
+ MEDIA_ENT_F_CAM_SENSOR, 1, &vsen->pad,
&vimc_sen_int_ops, &vimc_sen_ops);
if (ret)
- goto err_free_hdl;
+ goto err_free_tpg;
vsen->ved.process_frame = vimc_sen_process_frame;
- dev_set_drvdata(comp, &vsen->ved);
- vsen->dev = comp;
+ vsen->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsen->mbus_format = fmt_default;
- /* Initialize the test pattern generator */
- tpg_init(&vsen->tpg, vsen->mbus_format.width,
- vsen->mbus_format.height);
- ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
- if (ret)
- goto err_unregister_ent_sd;
-
- return 0;
+ return &vsen->ved;
-err_unregister_ent_sd:
- vimc_ent_sd_unregister(&vsen->ved, &vsen->sd);
+err_free_tpg:
+ tpg_free(&vsen->tpg);
err_free_hdl:
v4l2_ctrl_handler_free(&vsen->hdl);
err_free_vsen:
kfree(vsen);
- return ret;
-}
-
-static const struct component_ops vimc_sen_comp_ops = {
- .bind = vimc_sen_comp_bind,
- .unbind = vimc_sen_comp_unbind,
-};
-
-static int vimc_sen_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sen_comp_ops);
-}
-
-static int vimc_sen_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sen_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_sen_driver_ids[] = {
- {
- .name = VIMC_SEN_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sen_pdrv = {
- .probe = vimc_sen_probe,
- .remove = vimc_sen_remove,
- .id_table = vimc_sen_driver_ids,
- .driver = {
- .name = VIMC_SEN_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sen_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Sensor");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index 048d770e498b..cd6b55433c9e 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -7,7 +7,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -97,17 +96,26 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
sd = media_entity_to_v4l2_subdev(ved->ent);
ret = v4l2_subdev_call(sd, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
- pr_err("subdev_call error %s\n",
- ved->ent->name);
+ dev_err(ved->dev, "subdev_call error %s\n",
+ ved->ent->name);
vimc_streamer_pipeline_terminate(stream);
return ret;
}
}
entity = vimc_get_source_entity(ved->ent);
- /* Check if the end of the pipeline was reached*/
- if (!entity)
+ /* Check if the end of the pipeline was reached */
+ if (!entity) {
+ /* the first entity of the pipe should be source only */
+ if (!vimc_is_source(ved->ent)) {
+ dev_err(ved->dev,
+ "first entity in the pipe '%s' is not a source\n",
+ ved->ent->name);
+ vimc_streamer_pipeline_terminate(stream);
+ return -EPIPE;
+ }
return 0;
+ }
/* Get the next device in the pipeline */
if (is_media_entity_v4l2_subdev(entity)) {
@@ -217,4 +225,3 @@ int vimc_streamer_s_stream(struct vimc_stream *stream,
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 2f5762e3309a..e8a50c506dc9 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -3,7 +3,7 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o
+ vivid-osd.o vivid-meta-cap.o vivid-meta-out.o
ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
vivid-objs += vivid-cec.o
endif
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index 4d822dbed972..4d2413e87730 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -276,12 +276,11 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
bool is_source)
{
- char name[sizeof(dev->vid_out_dev.name) + 2];
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
+ char name[32];
- snprintf(name, sizeof(name), "%s%d",
- is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
- idx);
+ snprintf(name, sizeof(name), "vivid-%03d-vid-%s%d",
+ dev->inst, is_source ? "out" : "cap", idx);
return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
name, caps, 1);
}
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 53315c8dd2bb..c184f9b0be69 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -37,6 +37,8 @@
#include "vivid-osd.h"
#include "vivid-cec.h"
#include "vivid-ctrls.h"
+#include "vivid-meta-cap.h"
+#include "vivid-meta-out.h"
#define VIVID_MODULE_NAME "vivid"
@@ -79,6 +81,14 @@ static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(radio_tx_nr, int, NULL, 0444);
MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect");
+static int meta_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_cap_nr, " videoX start number, -1 is autodetect");
+
+static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect");
+
static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(ccs_cap_mode, int, NULL, 0444);
MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
@@ -95,10 +105,15 @@ static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1
module_param_array(multiplanar, uint, NULL, 0444);
MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device.");
-/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
-static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
+/*
+ * Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr +
+ * vbi-out + vid-out + meta-cap
+ */
+static unsigned int node_types[VIVID_MAX_DEVS] = {
+ [0 ... (VIVID_MAX_DEVS - 1)] = 0x61d3d
+};
module_param_array(node_types, uint, NULL, 0444);
-MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the following meaning:\n"
+MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the following meaning:\n"
"\t\t bit 0: Video Capture node\n"
"\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 4: Radio Receiver node\n"
@@ -106,7 +121,9 @@ MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the f
"\t\t bit 8: Video Output node\n"
"\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 12: Radio Transmitter node\n"
- "\t\t bit 16: Framebuffer for testing overlays");
+ "\t\t bit 16: Framebuffer for testing overlays\n"
+ "\t\t bit 17: Metadata Capture node\n"
+ "\t\t bit 18: Metadata Output node\n");
/* Default: 4 inputs */
static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
@@ -205,7 +222,8 @@ static int vidioc_querycap(struct file *file, void *priv,
cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
dev->vbi_cap_caps | dev->vbi_out_caps |
dev->radio_rx_caps | dev->radio_tx_caps |
- dev->sdr_cap_caps | V4L2_CAP_DEVICE_CAPS;
+ dev->sdr_cap_caps | dev->meta_cap_caps |
+ dev->meta_out_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -433,7 +451,9 @@ static bool vivid_is_last_user(struct vivid_dev *dev)
vivid_is_in_use(&dev->vbi_out_dev) +
vivid_is_in_use(&dev->sdr_cap_dev) +
vivid_is_in_use(&dev->radio_rx_dev) +
- vivid_is_in_use(&dev->radio_tx_dev);
+ vivid_is_in_use(&dev->radio_tx_dev) +
+ vivid_is_in_use(&dev->meta_cap_dev) +
+ vivid_is_in_use(&dev->meta_out_dev);
return uses == 1;
}
@@ -459,6 +479,8 @@ static int vivid_fop_release(struct file *file)
set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags);
}
mutex_unlock(&dev->mutex);
if (file->private_data == dev->overlay_cap_owner)
@@ -604,6 +626,16 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_log_status = vidioc_log_status,
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_enum_fmt_meta_cap = vidioc_enum_fmt_meta_cap,
+ .vidioc_g_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_s_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_try_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+
+ .vidioc_enum_fmt_meta_out = vidioc_enum_fmt_meta_out,
+ .vidioc_g_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = vidioc_g_fmt_meta_out,
};
/* -----------------------------------------------------------------
@@ -616,6 +648,9 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
vivid_free_controls(dev);
v4l2_device_unregister(&dev->v4l2_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
vfree(dev->scaled_line);
vfree(dev->blended_line);
vfree(dev->edid);
@@ -645,14 +680,44 @@ static const struct media_device_ops vivid_media_ops = {
};
#endif
+static int vivid_create_queue(struct vivid_dev *dev,
+ struct vb2_queue *q,
+ u32 buf_type,
+ unsigned int min_buffers_needed,
+ const struct vb2_ops *ops)
+{
+ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE && !dev->has_raw_vbi_cap)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_OUTPUT && !dev->has_raw_vbi_out)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+
+ q->type = buf_type;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ;
+ if (allocators[dev->inst] != 1)
+ q->io_modes |= VB2_USERPTR;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = ops;
+ q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
+ &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = min_buffers_needed;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
+
+ return vb2_queue_init(q);
+}
+
static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
V4L2_DV_BT_CEA_1280X720P60;
- static const struct vb2_mem_ops * const vivid_mem_ops[2] = {
- &vb2_vmalloc_memops,
- &vb2_dma_contig_memops,
- };
unsigned in_type_counter[4] = { 0, 0, 0, 0 };
unsigned out_type_counter[4] = { 0, 0, 0, 0 };
int ccs_cap = ccs_cap_mode[inst];
@@ -661,9 +726,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
bool has_modulator;
struct vivid_dev *dev;
struct video_device *vfd;
- struct vb2_queue *q;
unsigned node_type = node_types[inst];
- unsigned int allocator = allocators[inst];
v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
int ret;
int i;
@@ -758,6 +821,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap;
}
+ /* do we create a meta capture device */
+ dev->has_meta_cap = node_type & 0x20000;
+
+ /* sanity checks */
+ if ((in_type_counter[WEBCAM] || in_type_counter[HDMI]) &&
+ !dev->has_vid_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "Webcam or HDMI input without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if ((in_type_counter[TV] || in_type_counter[SVID]) &&
+ !dev->has_vid_cap && !dev->has_vbi_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "TV or S-Video input without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a video output device? */
dev->has_vid_out = node_type & 0x0100;
@@ -768,6 +850,24 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out;
}
+ /* do we create a metadata output device */
+ dev->has_meta_out = node_type & 0x40000;
+
+ /* sanity checks */
+ if (out_type_counter[SVID] &&
+ !dev->has_vid_out && !dev->has_vbi_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "S-Video output without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if (out_type_counter[HDMI] && !dev->has_vid_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "HDMI output without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a radio receiver device? */
dev->has_radio_rx = node_type & 0x0010;
@@ -777,6 +877,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* do we create a software defined radio capture device? */
dev->has_sdr_cap = node_type & 0x0020;
+ /* do we have a TV tuner? */
+ dev->has_tv_tuner = in_type_counter[TV];
+
/* do we have a tuner? */
has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
dev->has_radio_rx || dev->has_sdr_cap;
@@ -828,7 +931,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vid_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vid_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vid_out) {
@@ -849,7 +952,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vbi_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vbi_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vbi_out) {
@@ -875,6 +978,23 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
V4L2_CAP_READWRITE;
+ /* set up the capabilities of meta capture device */
+ if (dev->has_meta_cap) {
+ dev->meta_cap_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->meta_cap_caps |= V4L2_CAP_AUDIO;
+ if (dev->has_tv_tuner)
+ dev->meta_cap_caps |= V4L2_CAP_TUNER;
+ }
+ /* set up the capabilities of meta output device */
+ if (dev->has_meta_out) {
+ dev->meta_out_caps = V4L2_CAP_META_OUTPUT |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->meta_out_caps |= V4L2_CAP_AUDIO;
+ }
+
ret = -ENOMEM;
/* initialize the test pattern generator */
tpg_init(&dev->tpg, 640, 360);
@@ -934,6 +1054,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_ENUMAUDIO);
}
if (!dev->has_audio_outputs) {
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT);
@@ -942,6 +1065,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_ENUMAUDOUT);
}
if (!in_type_counter[TV] && !in_type_counter[SVID]) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD);
@@ -959,12 +1085,16 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_FREQUENCY);
}
if (!has_tuner) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_TUNER);
}
if (in_type_counter[HDMI] == 0) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID);
@@ -990,12 +1120,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY);
/* configure internal data */
dev->fmt_cap = &vivid_formats[0];
@@ -1078,6 +1211,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->vbi_cap_active);
INIT_LIST_HEAD(&dev->vbi_out_active);
INIT_LIST_HEAD(&dev->sdr_cap_active);
+ INIT_LIST_HEAD(&dev->meta_cap_active);
+ INIT_LIST_HEAD(&dev->meta_out_active);
INIT_LIST_HEAD(&dev->cec_work_list);
spin_lock_init(&dev->cec_slock);
@@ -1092,126 +1227,69 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
goto unreg_dev;
}
- if (allocator == 1)
+ if (allocators[inst] == 1)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- else if (allocator >= ARRAY_SIZE(vivid_mem_ops))
- allocator = 0;
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
- snprintf(dev->vid_cap_dev.name, sizeof(dev->vid_cap_dev.name),
- "vivid-%03d-vid-cap", inst);
/* initialize vid_cap queue */
- q = &dev->vb_vid_cap_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
- V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_cap_q,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, 2,
+ &vivid_vid_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vid_out) {
- snprintf(dev->vid_out_dev.name, sizeof(dev->vid_out_dev.name),
- "vivid-%03d-vid-out", inst);
/* initialize vid_out queue */
- q = &dev->vb_vid_out_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
- V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_out_q,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, 2,
+ &vivid_vid_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_cap) {
/* initialize vbi_cap queue */
- q = &dev->vb_vbi_cap_q;
- q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
- V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_cap_q,
+ V4L2_BUF_TYPE_VBI_CAPTURE, 2,
+ &vivid_vbi_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_out) {
/* initialize vbi_out queue */
- q = &dev->vb_vbi_out_q;
- q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
- V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_out_q,
+ V4L2_BUF_TYPE_VBI_OUTPUT, 2,
+ &vivid_vbi_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_sdr_cap) {
/* initialize sdr_cap queue */
- q = &dev->vb_sdr_cap_q;
- q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_sdr_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 8;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_sdr_cap_q,
+ V4L2_BUF_TYPE_SDR_CAPTURE, 8,
+ &vivid_sdr_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_cap) {
+ /* initialize meta_cap queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_cap_q,
+ V4L2_BUF_TYPE_META_CAPTURE, 2,
+ &vivid_meta_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_out) {
+ /* initialize meta_out queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_out_q,
+ V4L2_BUF_TYPE_META_OUTPUT, 1,
+ &vivid_meta_out_qops);
if (ret)
goto unreg_dev;
}
@@ -1222,7 +1300,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret)
goto unreg_dev;
v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
- dev->fb_info.node);
+ dev->fb_info.node);
}
#ifdef CONFIG_VIDEO_VIVID_CEC
@@ -1265,10 +1343,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->vid_cap_caps;
@@ -1314,6 +1396,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_vid_out) {
vfd = &dev->vid_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-out", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
@@ -1492,6 +1576,65 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
video_device_node_name(vfd));
}
+ if (dev->has_meta_cap) {
+ vfd = &dev->meta_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_cap_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_cap;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_meta_out) {
+ vfd = &dev->meta_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-out", inst);
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_out_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_out_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_out;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata output device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
#ifdef CONFIG_MEDIA_CONTROLLER
/* Register the media device */
ret = media_device_register(&dev->mdev);
@@ -1508,6 +1651,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
return 0;
unreg_dev:
+ video_unregister_device(&dev->meta_out_dev);
+ video_unregister_device(&dev->meta_cap_dev);
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
video_unregister_device(&dev->sdr_cap_dev);
@@ -1580,7 +1725,6 @@ static int vivid_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
- media_device_cleanup(&dev->mdev);
#endif
if (dev->has_vid_cap) {
@@ -1624,6 +1768,16 @@ static int vivid_remove(struct platform_device *pdev)
unregister_framebuffer(&dev->fb_info);
vivid_fb_release_buffers(dev);
}
+ if (dev->has_meta_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_cap_dev));
+ video_unregister_device(&dev->meta_cap_dev);
+ }
+ if (dev->has_meta_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_out_dev));
+ video_unregister_device(&dev->meta_out_dev);
+ }
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 7ebb14673c75..59192b67231c 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -131,6 +131,8 @@ struct vivid_dev {
struct media_pad vbi_cap_pad;
struct media_pad vbi_out_pad;
struct media_pad sdr_cap_pad;
+ struct media_pad meta_cap_pad;
+ struct media_pad meta_out_pad;
#endif
struct v4l2_ctrl_handler ctrl_hdl_user_gen;
struct v4l2_ctrl_handler ctrl_hdl_user_vid;
@@ -153,6 +155,11 @@ struct vivid_dev {
struct v4l2_ctrl_handler ctrl_hdl_radio_tx;
struct video_device sdr_cap_dev;
struct v4l2_ctrl_handler ctrl_hdl_sdr_cap;
+ struct video_device meta_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_cap;
+ struct video_device meta_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_out;
+
spinlock_t slock;
struct mutex mutex;
@@ -164,6 +171,8 @@ struct vivid_dev {
u32 sdr_cap_caps;
u32 radio_rx_caps;
u32 radio_tx_caps;
+ u32 meta_cap_caps;
+ u32 meta_out_caps;
/* supported features */
bool multiplanar;
@@ -189,6 +198,9 @@ struct vivid_dev {
bool has_radio_tx;
bool has_sdr_cap;
bool has_fb;
+ bool has_meta_cap;
+ bool has_meta_out;
+ bool has_tv_tuner;
bool can_loop_video;
@@ -390,6 +402,8 @@ struct vivid_dev {
struct list_head vid_cap_active;
struct vb2_queue vb_vbi_cap_q;
struct list_head vbi_cap_active;
+ struct vb2_queue vb_meta_cap_q;
+ struct list_head meta_cap_active;
/* thread for generating video capture stream */
struct task_struct *kthread_vid_cap;
@@ -407,6 +421,9 @@ struct vivid_dev {
u32 vbi_cap_seq_count;
bool vbi_cap_streaming;
bool stream_sliced_vbi_cap;
+ u32 meta_cap_seq_start;
+ u32 meta_cap_seq_count;
+ bool meta_cap_streaming;
/* video output */
const struct vivid_fmt *fmt_out;
@@ -421,6 +438,8 @@ struct vivid_dev {
struct list_head vid_out_active;
struct vb2_queue vb_vbi_out_q;
struct list_head vbi_out_active;
+ struct vb2_queue vb_meta_out_q;
+ struct list_head meta_out_active;
/* video loop precalculated rectangles */
@@ -461,6 +480,9 @@ struct vivid_dev {
u32 vbi_out_seq_count;
bool vbi_out_streaming;
bool stream_sliced_vbi_out;
+ u32 meta_out_seq_start;
+ u32 meta_out_seq_count;
+ bool meta_out_streaming;
/* SDR capture */
struct vb2_queue vb_sdr_cap_q;
@@ -527,6 +549,9 @@ struct vivid_dev {
/* CEC OSD String */
char osd[14];
unsigned long osd_jiffies;
+
+ bool meta_pts;
+ bool meta_scr;
};
static inline bool vivid_is_webcam(const struct vivid_dev *dev)
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index cb19a9a73092..68e8124c7973 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -32,6 +32,7 @@
#define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8)
#define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9)
#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
+#define VIVID_CID_AREA (VIVID_CID_CUSTOM_BASE + 11)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -94,6 +95,9 @@
#define VIVID_CID_SDR_CAP_FM_DEVIATION (VIVID_CID_VIVID_BASE + 110)
+#define VIVID_CID_META_CAP_GENERATE_PTS (VIVID_CID_VIVID_BASE + 111)
+#define VIVID_CID_META_CAP_GENERATE_SCR (VIVID_CID_VIVID_BASE + 112)
+
/* General User Controls */
static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -110,6 +114,7 @@ static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
clear_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
break;
case VIVID_CID_BUTTON:
dev->button_pressed = 30;
@@ -262,6 +267,18 @@ static const struct v4l2_ctrl_config vivid_ctrl_disconnect = {
.type = V4L2_CTRL_TYPE_BUTTON,
};
+static const struct v4l2_area area = {
+ .width = 1000,
+ .height = 2000,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_area = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_AREA,
+ .name = "Area",
+ .type = V4L2_CTRL_TYPE_AREA,
+ .p_def.p_const = &area,
+};
/* Framebuffer Controls */
@@ -1421,6 +1438,47 @@ static const struct v4l2_ctrl_config vivid_ctrl_sdr_cap_fm_deviation = {
.step = 1,
};
+/* Metadata Capture Control */
+
+static int vivid_meta_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev,
+ ctrl_hdl_meta_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_META_CAP_GENERATE_PTS:
+ dev->meta_pts = ctrl->val;
+ break;
+ case VIVID_CID_META_CAP_GENERATE_SCR:
+ dev->meta_scr = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_meta_cap_ctrl_ops = {
+ .s_ctrl = vivid_meta_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_pts = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_PTS,
+ .name = "Generate PTS",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_src_clk = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_SCR,
+ .name = "Generate SCR",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
static const struct v4l2_ctrl_config vivid_ctrl_class = {
.ops = &vivid_user_gen_ctrl_ops,
@@ -1448,6 +1506,9 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
struct v4l2_ctrl_handler *hdl_radio_rx = &dev->ctrl_hdl_radio_rx;
struct v4l2_ctrl_handler *hdl_radio_tx = &dev->ctrl_hdl_radio_tx;
struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
+ struct v4l2_ctrl_handler *hdl_meta_cap = &dev->ctrl_hdl_meta_cap;
+ struct v4l2_ctrl_handler *hdl_meta_out = &dev->ctrl_hdl_meta_out;
+
struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
.ops = &vivid_vid_cap_ctrl_ops,
.id = VIVID_CID_DV_TIMINGS,
@@ -1486,6 +1547,10 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_sdr_cap, 19);
v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_cap, 2);
+ v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_out, 2);
+ v4l2_ctrl_new_custom(hdl_meta_out, &vivid_ctrl_class, NULL);
/* User Controls */
dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
@@ -1522,6 +1587,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
@@ -1743,6 +1809,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_sdr_cap,
&vivid_ctrl_sdr_cap_fm_deviation, NULL);
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_pts, NULL);
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_src_clk, NULL);
+ }
+
if (hdl_user_gen->error)
return hdl_user_gen->error;
if (hdl_user_vid->error)
@@ -1817,6 +1890,20 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
return hdl_sdr_cap->error;
dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_streaming, NULL, false);
+ if (hdl_meta_cap->error)
+ return hdl_meta_cap->error;
+ dev->meta_cap_dev.ctrl_handler = hdl_meta_cap;
+ }
+ if (dev->has_meta_out) {
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_streaming, NULL, false);
+ if (hdl_meta_out->error)
+ return hdl_meta_out->error;
+ dev->meta_out_dev.ctrl_handler = hdl_meta_out;
+ }
return 0;
}
@@ -1836,4 +1923,6 @@ void vivid_free_controls(struct vivid_dev *dev)
v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_out);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 003319d7816d..01a9d671b947 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -39,6 +39,7 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
{
@@ -677,6 +678,7 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
{
struct vivid_buffer *vid_cap_buf = NULL;
struct vivid_buffer *vbi_cap_buf = NULL;
+ struct vivid_buffer *meta_cap_buf = NULL;
u64 f_time = 0;
dprintk(dev, 1, "Video Capture Thread Tick\n");
@@ -704,15 +706,19 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
list_del(&vbi_cap_buf->list);
}
}
+ if (!list_empty(&dev->meta_cap_active)) {
+ meta_cap_buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_cap_buf->list);
+ }
+
spin_unlock(&dev->slock);
- if (!vid_cap_buf && !vbi_cap_buf)
+ if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf)
goto update_mv;
f_time = dev->cap_frame_period * dev->vid_cap_seq_count +
dev->cap_stream_start + dev->time_wrap_offset;
- if (!dev->tstamp_src_is_soe)
- f_time += dev->cap_frame_eof_offset;
if (vid_cap_buf) {
v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
@@ -735,6 +741,8 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
vid_cap_buf->vb.vb2_buf.index);
vid_cap_buf->vb.vb2_buf.timestamp = f_time;
+ if (!dev->tstamp_src_is_soe)
+ vid_cap_buf->vb.vb2_buf.timestamp += dev->cap_frame_eof_offset;
}
if (vbi_cap_buf) {
@@ -756,8 +764,22 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
/* If capturing a VBI, offset by 0.05 */
vbi_period = dev->cap_frame_period * 5;
do_div(vbi_period, 100);
- vbi_cap_buf->vb.vb2_buf.timestamp = f_time + vbi_period;
+ vbi_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset + vbi_period;
+ }
+
+ if (meta_cap_buf) {
+ v4l2_ctrl_request_setup(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vivid_meta_cap_fillbuff(dev, meta_cap_buf, f_time);
+ v4l2_ctrl_request_complete(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&meta_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_cap %d done\n",
+ meta_cap_buf->vb.vb2_buf.index);
+ meta_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset;
}
+
dev->dqbuf_error = false;
update_mv:
@@ -796,7 +818,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies;
@@ -835,6 +861,7 @@ static int vivid_thread_vid_cap(void *data)
dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
+ dev->meta_cap_seq_count = dev->cap_seq_count - dev->meta_cap_seq_start;
vivid_thread_vid_cap_tick(dev, dropped_bufs);
@@ -883,8 +910,10 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_cap_streaming)
dev->vid_cap_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_cap_streaming)
dev->vbi_cap_seq_start = seq_count;
+ else
+ dev->meta_cap_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -894,6 +923,7 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
dev->vid_cap_seq_start = dev->seq_wrap * 128;
dev->vbi_cap_seq_start = dev->seq_wrap * 128;
+ dev->meta_cap_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
"%s-vid-cap", dev->v4l2_dev.name);
@@ -951,13 +981,27 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_cap_streaming || dev->vbi_cap_streaming)
+ if (pstreaming == &dev->meta_cap_streaming) {
+ while (!list_empty(&dev->meta_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_cap buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_cap_streaming || dev->vbi_cap_streaming ||
+ dev->meta_cap_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index ce5bcda2348c..6780687978f9 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -38,11 +38,13 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
{
struct vivid_buffer *vid_out_buf = NULL;
struct vivid_buffer *vbi_out_buf = NULL;
+ struct vivid_buffer *meta_out_buf = NULL;
dprintk(dev, 1, "Video Output Thread Tick\n");
@@ -69,9 +71,14 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
struct vivid_buffer, list);
list_del(&vbi_out_buf->list);
}
+ if (!list_empty(&dev->meta_out_active)) {
+ meta_out_buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_out_buf->list);
+ }
spin_unlock(&dev->slock);
- if (!vid_out_buf && !vbi_out_buf)
+ if (!vid_out_buf && !vbi_out_buf && !meta_out_buf)
return;
if (vid_out_buf) {
@@ -111,6 +118,21 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
dprintk(dev, 2, "vbi_out buffer %d done\n",
vbi_out_buf->vb.vb2_buf.index);
}
+ if (meta_out_buf) {
+ v4l2_ctrl_request_setup(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ v4l2_ctrl_request_complete(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vivid_meta_out_process(dev, meta_out_buf);
+ meta_out_buf->vb.sequence = dev->meta_out_seq_count;
+ meta_out_buf->vb.vb2_buf.timestamp =
+ ktime_get_ns() + dev->time_wrap_offset;
+ vb2_buffer_done(&meta_out_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ meta_out_buf->vb.vb2_buf.index);
+ }
+
dev->dqbuf_error = false;
}
@@ -136,6 +158,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = 0xffffff80U;
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->vbi_out_seq_start = 0;
+ dev->meta_out_seq_start = 0;
dev->out_seq_resync = false;
for (;;) {
@@ -143,7 +166,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies;
@@ -178,6 +205,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = buffers_since_start + dev->out_seq_offset;
dev->vid_out_seq_count = dev->out_seq_count - dev->vid_out_seq_start;
dev->vbi_out_seq_count = dev->out_seq_count - dev->vbi_out_seq_start;
+ dev->meta_out_seq_count = dev->out_seq_count - dev->meta_out_seq_start;
vivid_thread_vid_out_tick(dev);
mutex_unlock(&dev->mutex);
@@ -229,8 +257,10 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_out_streaming)
dev->vid_out_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_out_streaming)
dev->vbi_out_seq_start = seq_count;
+ else
+ dev->meta_out_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -239,6 +269,7 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->seq_wrap * 128;
dev->vbi_out_seq_start = dev->seq_wrap * 128;
+ dev->meta_out_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_out = kthread_run(vivid_thread_vid_out, dev,
"%s-vid-out", dev->v4l2_dev.name);
@@ -296,13 +327,27 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_out_streaming || dev->vbi_out_streaming)
+ if (pstreaming == &dev->meta_out_streaming) {
+ while (!list_empty(&dev->meta_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_out_streaming || dev->vbi_out_streaming ||
+ dev->meta_out_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.c b/drivers/media/platform/vivid/vivid-meta-cap.c
new file mode 100644
index 000000000000..780f96860a6d
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-cap.c - meta capture support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
+
+static int meta_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev,
+ &dev->meta_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->meta_cap_streaming);
+}
+
+static void meta_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_cap);
+}
+
+const struct vb2_ops vivid_meta_cap_qops = {
+ .queue_setup = meta_cap_queue_setup,
+ .buf_prepare = meta_cap_buf_prepare,
+ .buf_queue = meta_cap_buf_queue,
+ .start_streaming = meta_cap_start_streaming,
+ .stop_streaming = meta_cap_stop_streaming,
+ .buf_request_complete = meta_cap_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_CAPTURE;
+ f->pixelformat = V4L2_META_FMT_UVC;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_cap)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_UVC;
+ meta->buffersize = sizeof(struct vivid_uvc_meta_buf);
+ return 0;
+}
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe)
+{
+ struct vivid_uvc_meta_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ int buf_off = 0;
+
+ buf->vb.sequence = dev->meta_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.sequence /= 2;
+ memset(meta, 1, vb2_plane_size(&buf->vb.vb2_buf, 0));
+
+ meta->ns = ktime_get_ns();
+ meta->sof = buf->vb.sequence * 30;
+ meta->length = sizeof(*meta) - offsetof(struct vivid_uvc_meta_buf, length);
+ meta->flags = UVC_STREAM_EOH | UVC_STREAM_EOF;
+
+ if ((buf->vb.sequence % 2) == 0)
+ meta->flags |= UVC_STREAM_FID;
+
+ dprintk(dev, 2, "%s ns:%llu sof:%4d len:%u flags: 0x%02x",
+ __func__, meta->ns, meta->sof, meta->length, meta->flags);
+ if (dev->meta_pts) {
+ meta->flags |= UVC_STREAM_PTS;
+ meta->buf[0] = div_u64(soe, VIVID_META_CLOCK_UNIT);
+ buf_off = 4;
+ dprintk(dev, 2, " pts: %u\n", *(__u32 *)(meta->buf));
+ }
+
+ if (dev->meta_scr) {
+ meta->flags |= UVC_STREAM_SCR;
+ meta->buf[buf_off] = div_u64((soe + dev->cap_frame_eof_offset),
+ VIVID_META_CLOCK_UNIT);
+
+ meta->buf[buf_off + 4] = (buf->vb.sequence * 30) % 1000;
+ dprintk(dev, 2, " stc: %u, sof counter: %u\n",
+ *(__u32 *)(meta->buf + buf_off),
+ *(__u16 *)(meta->buf + buf_off + 4));
+ }
+ dprintk(dev, 2, "\n");
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.h b/drivers/media/platform/vivid/vivid-meta-cap.h
new file mode 100644
index 000000000000..4670d00d1576
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-cap.h - meta capture support functions.
+ */
+#ifndef _VIVID_META_CAP_H_
+#define _VIVID_META_CAP_H_
+
+#define VIVID_META_CLOCK_UNIT 10 /* 100 MHz */
+
+struct vivid_uvc_meta_buf {
+ __u64 ns;
+ __u16 sof;
+ __u8 length;
+ __u8 flags;
+ __u8 buf[10]; /* PTS(4)+STC(4)+SOF(2) */
+} __packed;
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe);
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-meta-out.c b/drivers/media/platform/vivid/vivid-meta-out.c
new file mode 100644
index 000000000000..ff8a039aba72
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-out.c - meta output support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
+
+static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_out_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_out_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev,
+ &dev->meta_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->meta_out_streaming);
+}
+
+static void meta_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_out);
+}
+
+const struct vb2_ops vivid_meta_out_qops = {
+ .queue_setup = meta_out_queue_setup,
+ .buf_prepare = meta_out_buf_prepare,
+ .buf_queue = meta_out_buf_queue,
+ .start_streaming = meta_out_start_streaming,
+ .stop_streaming = meta_out_stop_streaming,
+ .buf_request_complete = meta_out_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_OUTPUT;
+ f->pixelformat = V4L2_META_FMT_VIVID;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_out)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_VIVID;
+ meta->buffersize = sizeof(struct vivid_meta_out_buf);
+ return 0;
+}
+
+void vivid_meta_out_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
+{
+ struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+ tpg_s_brightness(&dev->tpg, meta->brightness);
+ tpg_s_contrast(&dev->tpg, meta->contrast);
+ tpg_s_saturation(&dev->tpg, meta->saturation);
+ tpg_s_hue(&dev->tpg, meta->hue);
+ dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
+ __func__, meta->brightness, meta->contrast,
+ meta->saturation, meta->hue);
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-out.h b/drivers/media/platform/vivid/vivid-meta-out.h
new file mode 100644
index 000000000000..0c639b7c2842
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-out.h - meta output support functions.
+ */
+#ifndef _VIVID_META_OUT_H_
+#define _VIVID_META_OUT_H_
+
+struct vivid_meta_out_buf {
+ u16 brightness;
+ u16 contrast;
+ u16 saturation;
+ s16 hue;
+};
+
+void vivid_meta_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_out_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 9acc709b0740..2b7522e16efc 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies;
@@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
}
/* shutdown control thread */
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL;
- mutex_lock(&dev->mutex);
}
static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 8cbaa0c998ed..e94beef008c8 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_cap)
- return 0;
-
dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++)
@@ -1359,7 +1356,9 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
if (i == dev->input)
return 0;
- if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ if (vb2_is_busy(&dev->vb_vid_cap_q) ||
+ vb2_is_busy(&dev->vb_vbi_cap_q) ||
+ vb2_is_busy(&dev->vb_meta_cap_q))
return -EBUSY;
dev->input = i;
@@ -1369,6 +1368,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
}
dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
+ dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
vivid_update_format_cap(dev, false);
if (dev->colorspace) {
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 148b663a6075..ee3446e3217c 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_out)
- return 0;
-
dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) {
@@ -1082,7 +1079,9 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
if (o == dev->output)
return 0;
- if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ if (vb2_is_busy(&dev->vb_vid_out_q) ||
+ vb2_is_busy(&dev->vb_vbi_out_q) ||
+ vb2_is_busy(&dev->vb_meta_out_q))
return -EBUSY;
dev->output = o;
@@ -1093,6 +1092,7 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
dev->vid_out_dev.tvnorms = 0;
dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
+ dev->meta_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
vivid_update_format_out(dev);
v4l2_ctrl_activate(dev->ctrl_display_present, vivid_is_hdmi_out(dev));
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 5aec4d17eb21..2378bdae57ae 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video DMA
*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index f71e2b650453..a528a32ea1dc 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Core
*
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index e65fce9538f9..cc52c1854dbd 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Composite Device
*
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index 90cf44245283..855845911ffc 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video Timing Controller
*
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 104ac41c6f96..112376873167 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1148,8 +1148,7 @@ static int wl1273_fm_fops_release(struct file *file)
if (radio->rds_users > 0) {
radio->rds_users--;
if (radio->rds_users == 0) {
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
+ mutex_lock(&core->lock);
radio->irq_flags &= ~WL1273_RDS_EVENT;
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 7541698a0be1..f491420d7b53 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -482,6 +482,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
if (radio->gpio_reset)
gpiod_set_value(radio->gpio_reset, 0);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
return 0;
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 37a850421fbb..ed95244da894 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -83,6 +83,7 @@ struct imon_usb_dev_descr {
__u16 flags;
#define IMON_NO_FLAGS 0
#define IMON_NEED_20MS_PKT_DELAY 1
+#define IMON_SUPPRESS_REPEATED_KEYS 2
struct imon_panel_key_table key_table[];
};
@@ -149,8 +150,9 @@ struct imon_context {
struct timer_list ttimer; /* touch screen timer */
int touch_x; /* x coordinate on touchscreen */
int touch_y; /* y coordinate on touchscreen */
- struct imon_usb_dev_descr *dev_descr; /* device description with key
- table for front panels */
+ const struct imon_usb_dev_descr *dev_descr;
+ /* device description with key */
+ /* table for front panels */
};
#define TOUCH_TIMEOUT (HZ/30)
@@ -315,6 +317,32 @@ static const struct imon_usb_dev_descr imon_DH102 = {
}
};
+/* imon ultrabay front panel key table */
+static const struct imon_usb_dev_descr ultrabay_table = {
+ .flags = IMON_SUPPRESS_REPEATED_KEYS,
+ .key_table = {
+ { 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */
+ { 0x000000000100ffeell, KEY_UP },
+ { 0x000000000001ffeell, KEY_DOWN },
+ { 0x000000160000ffeell, KEY_ENTER },
+ { 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */
+ { 0x000000200000ffeell, KEY_VIDEO }, /* Movie */
+ { 0x000000210000ffeell, KEY_CAMERA }, /* Photo */
+ { 0x000000270000ffeell, KEY_DVD }, /* DVD */
+ { 0x000000230000ffeell, KEY_TV }, /* TV */
+ { 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */
+ { 0x000000070000ffeell, KEY_REWIND },
+ { 0x000000040000ffeell, KEY_STOP },
+ { 0x000000020000ffeell, KEY_PLAYPAUSE },
+ { 0x000000080000ffeell, KEY_FASTFORWARD },
+ { 0x000000060000ffeell, KEY_NEXT }, /* Next */
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000010000ffeell, KEY_MUTE },
+ { 0, KEY_RESERVED },
+ }
+};
+
/*
* USB Device ID for iMON USB Control Boards
*
@@ -1264,9 +1292,11 @@ static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code)
{
- int i;
+ const struct imon_panel_key_table *key_table;
u32 keycode = KEY_RESERVED;
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ int i;
+
+ key_table = ictx->dev_descr->key_table;
for (i = 0; key_table[i].hw_code != 0; i++) {
if (key_table[i].hw_code == (code | 0xffee)) {
@@ -1550,7 +1580,6 @@ static void imon_incoming_packet(struct imon_context *ictx,
u32 kc;
u64 scancode;
int press_type = 0;
- long msec;
ktime_t t;
static ktime_t prev_time;
u8 ktype;
@@ -1598,8 +1627,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
- buf[7] == 0x86) {
+ if (ictx->touch && len == 8 && buf[7] == 0x86) {
imon_touch_event(ictx, buf);
return;
@@ -1653,14 +1681,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_lock_irqsave(&ictx->kc_lock, flags);
t = ktime_get();
- /* KEY_MUTE repeats from knob need to be suppressed */
- if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
- msec = ktime_ms_delta(t, prev_time);
- if (msec < ictx->idev->rep[REP_DELAY]) {
+ /* KEY repeats from knob and panel that need to be suppressed */
+ if (ictx->kc == KEY_MUTE ||
+ ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) {
+ if (ictx->kc == ictx->last_keycode &&
+ ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
+
prev_time = t;
kc = ictx->kc;
@@ -1848,6 +1878,14 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR");
ictx->display_supported = false;
break;
+ /* Soundgraph iMON UltraBay */
+ case 0x98:
+ dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE;
+ ictx->dev_descr = &ultrabay_table;
+ break;
+
default:
dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1979,10 +2017,12 @@ out:
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ const struct imon_panel_key_table *key_table;
struct input_dev *idev;
int ret, i;
+ key_table = ictx->dev_descr->key_table;
+
idev = input_allocate_device();
if (!idev)
goto out;
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index d4aedcf76418..aae0a3cc9479 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -57,32 +57,18 @@ static void imon_ir_data(struct imon *imon)
* fls will tell us the highest bit set plus 1 (or 0 if no
* bits are set).
*/
+ rawir.pulse = !rawir.pulse;
bit = fls64(data & (BIT_ULL(offset) - 1));
if (bit < offset) {
- dev_dbg(imon->dev, "pulse: %d bits", offset - bit);
- rawir.pulse = true;
+ dev_dbg(imon->dev, "%s: %d bits",
+ rawir.pulse ? "pulse" : "space", offset - bit);
rawir.duration = (offset - bit) * BIT_DURATION;
ir_raw_event_store_with_filter(imon->rcdev, &rawir);
- if (bit == 0)
- break;
-
offset = bit;
}
- /*
- * Find highest clear bit which is less than offset.
- *
- * Just invert the data and use same trick as above.
- */
- bit = fls64(~data & (BIT_ULL(offset) - 1));
- dev_dbg(imon->dev, "space: %d bits", offset - bit);
-
- rawir.pulse = false;
- rawir.duration = (offset - bit) * BIT_DURATION;
- ir_raw_event_store_with_filter(imon->rcdev, &rawir);
-
- offset = bit;
+ data = ~data;
} while (offset > 0);
if (packet_no == 0x0a && !imon->rcdev->idle) {
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
index 64fb65a9a19f..028df5cb1828 100644
--- a/drivers/media/rc/ir-rcmm-decoder.c
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -79,7 +79,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT))
break;
data->state = STATE_LOW;
@@ -91,7 +91,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT))
break;
data->state = STATE_BUMP;
@@ -164,6 +164,8 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
break;
}
+ dev_dbg(&dev->dev, "RC-MM decode failed at count %d state %d (%uus %s)\n",
+ data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 3ab6cec0dc3b..07667c04c1d2 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -382,7 +382,7 @@ static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
ite_dbg("%s called", __func__);
/* clear the array just in case */
- memset(last_sent, 0, ARRAY_SIZE(last_sent));
+ memset(last_sent, 0, sizeof(last_sent));
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index a56fc634d2d6..63261ef6380a 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-avermedia-rm-ks.o \
rc-avertv-303.o \
rc-azurewave-ad-tu700.o \
+ rc-beelink-gs1.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
@@ -114,6 +115,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-tt-1500.o \
rc-twinhan-dtv-cab-ci.o \
rc-twinhan1027.o \
+ rc-vega-s9x.o \
rc-videomate-m1f.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
diff --git a/drivers/media/rc/keymaps/rc-beelink-gs1.c b/drivers/media/rc/keymaps/rc-beelink-gs1.c
new file mode 100644
index 000000000000..cedbd5d20bc7
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-beelink-gs1.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2019 Clément Péron
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * Keymap for the Beelink GS1 remote control
+ */
+
+static struct rc_map_table beelink_gs1_table[] = {
+ /*
+ * TV Keys (Power, Learn and Volume)
+ * { 0x40400d, KEY_TV },
+ * { 0x80f1, KEY_TV },
+ * { 0x80f3, KEY_TV },
+ * { 0x80f4, KEY_TV },
+ */
+
+ { 0x8051, KEY_POWER },
+ { 0x804d, KEY_MUTE },
+ { 0x8040, KEY_CONFIG },
+
+ { 0x8026, KEY_UP },
+ { 0x8028, KEY_DOWN },
+ { 0x8025, KEY_LEFT },
+ { 0x8027, KEY_RIGHT },
+ { 0x800d, KEY_OK },
+
+ { 0x8053, KEY_HOME },
+ { 0x80bc, KEY_MEDIA },
+ { 0x801b, KEY_BACK },
+ { 0x8049, KEY_MENU },
+
+ { 0x804e, KEY_VOLUMEUP },
+ { 0x8056, KEY_VOLUMEDOWN },
+
+ { 0x8054, KEY_SUBTITLE }, /* Web */
+ { 0x8052, KEY_EPG }, /* Media */
+
+ { 0x8041, KEY_CHANNELUP },
+ { 0x8042, KEY_CHANNELDOWN },
+
+ { 0x8031, KEY_1 },
+ { 0x8032, KEY_2 },
+ { 0x8033, KEY_3 },
+
+ { 0x8034, KEY_4 },
+ { 0x8035, KEY_5 },
+ { 0x8036, KEY_6 },
+
+ { 0x8037, KEY_7 },
+ { 0x8038, KEY_8 },
+ { 0x8039, KEY_9 },
+
+ { 0x8044, KEY_DELETE },
+ { 0x8030, KEY_0 },
+ { 0x8058, KEY_MODE }, /* # Input Method */
+};
+
+static struct rc_map_list beelink_gs1_map = {
+ .map = {
+ .scan = beelink_gs1_table,
+ .size = ARRAY_SIZE(beelink_gs1_table),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_BEELINK_GS1,
+ }
+};
+
+static int __init init_rc_map_beelink_gs1(void)
+{
+ return rc_map_register(&beelink_gs1_map);
+}
+
+static void __exit exit_rc_map_beelink_gs1(void)
+{
+ rc_map_unregister(&beelink_gs1_map);
+}
+
+module_init(init_rc_map_beelink_gs1)
+module_exit(exit_rc_map_beelink_gs1)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Clément Péron <peron.clem@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-vega-s9x.c b/drivers/media/rc/keymaps/rc-vega-s9x.c
new file mode 100644
index 000000000000..bf210c4dc535
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-vega-s9x.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the Tronsmart Vega S9x remote control
+//
+
+static struct rc_map_table vega_s9x[] = {
+ { 0x18, KEY_POWER },
+ { 0x17, KEY_MUTE }, // mouse
+
+ { 0x46, KEY_UP },
+ { 0x47, KEY_LEFT },
+ { 0x55, KEY_OK },
+ { 0x15, KEY_RIGHT },
+ { 0x16, KEY_DOWN },
+
+ { 0x06, KEY_HOME },
+ { 0x42, KEY_PLAYPAUSE},
+ { 0x40, KEY_BACK },
+
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x04, KEY_MENU },
+ { 0x10, KEY_VOLUMEUP },
+};
+
+static struct rc_map_list vega_s9x_map = {
+ .map = {
+ .scan = vega_s9x,
+ .size = ARRAY_SIZE(vega_s9x),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_VEGA_S9X,
+ }
+};
+
+static int __init init_rc_map_vega_s9x(void)
+{
+ return rc_map_register(&vega_s9x_map);
+}
+
+static void __exit exit_rc_map_vega_s9x(void)
+{
+ rc_map_unregister(&vega_s9x_map);
+}
+
+module_init(init_rc_map_vega_s9x)
+module_exit(exit_rc_map_vega_s9x)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 3fc9829a9233..f9616158bcf4 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
datasize = 4;
break;
case MCE_CMD_G_REVISION:
- datasize = 2;
+ datasize = 4;
break;
case MCE_RSP_EQWAKESUPPORT:
case MCE_RSP_GETWAKESOURCE:
@@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
char *inout;
u8 cmd, subcmd, *data;
struct device *dev = ir->dev;
- int start, skip = 0;
u32 carrier, period;
- /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
- if (ir->flags.microsoft_gen1 && !out && !offset)
- skip = 2;
-
- if (len <= skip)
+ if (offset < 0 || offset >= buf_len)
return;
dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
@@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
inout = out ? "Request" : "Got";
- start = offset + skip;
- cmd = buf[start] & 0xff;
- subcmd = buf[start + 1] & 0xff;
- data = buf + start + 2;
+ cmd = buf[offset];
+ subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
+ data = &buf[offset] + 2;
+
+ /* Trace meaningless 0xb1 0x60 header bytes on original receiver */
+ if (ir->flags.microsoft_gen1 && !out && !offset) {
+ dev_dbg(dev, "MCE gen 1 header");
+ return;
+ }
+
+ /* Trace IR data header or trailer */
+ if (cmd != MCE_CMD_PORT_IR &&
+ (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
+ if (cmd == MCE_IRDATA_TRAILER)
+ dev_dbg(dev, "End of raw IR data");
+ else
+ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
+ cmd & MCE_PACKET_LENGTH_MASK);
+ return;
+ }
+
+ /* Unexpected end of buffer? */
+ if (offset + len > buf_len)
+ return;
+ /* Decode MCE command/response */
switch (cmd) {
case MCE_CMD_NULL:
if (subcmd == MCE_CMD_NULL)
@@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
dev_dbg(dev, "Get hw/sw rev?");
else
dev_dbg(dev, "hw/sw rev %*ph",
- 4, &buf[start + 2]);
+ 4, &buf[offset + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
@@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
default:
break;
}
-
- if (cmd == MCE_IRDATA_TRAILER)
- dev_dbg(dev, "End of raw IR data");
- else if ((cmd != MCE_CMD_PORT_IR) &&
- ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
- dev_dbg(dev, "Raw IR data, %d pulse/space samples",
- cmd & MCE_PACKET_LENGTH_MASK);
#endif
}
@@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
}
/*
+ * Handle PORT_SYS/IR command response received from the MCE device.
+ *
+ * Assumes single response with all its data (not truncated)
+ * in buf_in[]. The response itself determines its total length
+ * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
+ *
* We don't do anything but print debug spew for many of the command bits
* we receive from the hardware, but some of them are useful information
* we want to store so that we can use them.
*/
-static void mceusb_handle_command(struct mceusb_dev *ir, int index)
+static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
{
+ u8 cmd = buf_in[0];
+ u8 subcmd = buf_in[1];
+ u8 *hi = &buf_in[2]; /* read only when required */
+ u8 *lo = &buf_in[3]; /* read only when required */
struct ir_raw_event rawir = {};
- u8 hi = ir->buf_in[index + 1] & 0xff;
- u8 lo = ir->buf_in[index + 2] & 0xff;
u32 carrier_cycles;
u32 cycles_fix;
- switch (ir->buf_in[index]) {
- /* the one and only 5-byte return value command */
- case MCE_RSP_GETPORTSTATUS:
- if ((ir->buf_in[index + 4] & 0xff) == 0x00)
- ir->txports_cabled |= 1 << hi;
- break;
+ if (cmd == MCE_CMD_PORT_SYS) {
+ switch (subcmd) {
+ /* the one and only 5-byte return value command */
+ case MCE_RSP_GETPORTSTATUS:
+ if (buf_in[5] == 0)
+ ir->txports_cabled |= 1 << *hi;
+ break;
+
+ /* 1-byte return value commands */
+ case MCE_RSP_EQEMVER:
+ ir->emver = *hi;
+ break;
+
+ /* No return value commands */
+ case MCE_RSP_CMD_ILLEGAL:
+ ir->need_reset = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return;
+ }
+ if (cmd != MCE_CMD_PORT_IR)
+ return;
+
+ switch (subcmd) {
/* 2-byte return value commands */
case MCE_RSP_EQIRTIMEOUT:
- ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
+ ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
break;
case MCE_RSP_EQIRNUMPORTS:
- ir->num_txports = hi;
- ir->num_rxports = lo;
+ ir->num_txports = *hi;
+ ir->num_rxports = *lo;
break;
case MCE_RSP_EQIRRXCFCNT:
/*
@@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
*/
if (ir->carrier_report_enabled && ir->learning_active &&
ir->pulse_tunit > 0) {
- carrier_cycles = (hi << 8 | lo);
+ carrier_cycles = (*hi << 8 | *lo);
/*
* Adjust carrier cycle count by adding
* 1 missed count per pulse "on"
@@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
break;
/* 1-byte return value commands */
- case MCE_RSP_EQEMVER:
- ir->emver = hi;
- break;
case MCE_RSP_EQIRTXPORTS:
- ir->tx_mask = hi;
+ ir->tx_mask = *hi;
break;
case MCE_RSP_EQIRRXPORTEN:
- ir->learning_active = ((hi & 0x02) == 0x02);
- if (ir->rxports_active != hi) {
+ ir->learning_active = ((*hi & 0x02) == 0x02);
+ if (ir->rxports_active != *hi) {
dev_info(ir->dev, "%s-range (0x%x) receiver active",
- ir->learning_active ? "short" : "long", hi);
- ir->rxports_active = hi;
+ ir->learning_active ? "short" : "long", *hi);
+ ir->rxports_active = *hi;
}
break;
+
+ /* No return value commands */
case MCE_RSP_CMD_ILLEGAL:
case MCE_RSP_TX_TIMEOUT:
ir->need_reset = true;
break;
+
default:
break;
}
@@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
ir->rem + 2, false);
- mceusb_handle_command(ir, i);
+ if (i + ir->rem < buf_len)
+ mceusb_handle_command(ir, &ir->buf_in[i - 1]);
ir->parser_state = CMD_DATA;
break;
case PARSE_IRDATA:
@@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem--;
break;
case CMD_HEADER:
- /* decode mce packets of the form (84),AA,BB,CC,DD */
- /* IR data packets can span USB messages - rem */
ir->cmd = ir->buf_in[i];
if ((ir->cmd == MCE_CMD_PORT_IR) ||
((ir->cmd & MCE_PORT_MASK) !=
MCE_COMMAND_IRDATA)) {
+ /*
+ * got PORT_SYS, PORT_IR, or unknown
+ * command response prefix
+ */
ir->parser_state = SUBCMD;
continue;
}
+ /*
+ * got IR data prefix (0x80 + num_bytes)
+ * decode MCE packets of the form {0x83, AA, BB, CC}
+ * IR data packets can span USB messages
+ */
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
mceusb_dev_printdata(ir, ir->buf_in, buf_len,
i, ir->rem + 1, false);
@@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
+
+ /*
+ * Accept IR data spanning multiple rx buffers.
+ * Reject MCE command response spanning multiple rx buffers.
+ */
+ if (ir->parser_state != PARSE_IRDATA || !ir->rem)
+ ir->parser_state = CMD_HEADER;
+
if (event) {
dev_dbg(ir->dev, "processed IR data");
ir_raw_event_handle(ir->rc);
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 9f21b3e8b377..5f36244cc34f 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Remote Controller core raw events header
*
* Copyright (C) 2010 by Mauro Carvalho Chehab
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 13da4c5c7d17..7741151606ef 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
set_bit(MSC_SCAN, dev->input_dev->mscbit);
/* Pointer/mouse events */
+ set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
set_bit(EV_REL, dev->input_dev->evbit);
set_bit(REL_X, dev->input_dev->relbit);
set_bit(REL_Y, dev->input_dev->relbit);
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
index 451ec4e9dcfa..b8eb5bc4d9be 100644
--- a/drivers/media/rc/tango-ir.c
+++ b/drivers/media/rc/tango-ir.c
@@ -157,20 +157,10 @@ static int tango_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rc_dev *rc;
struct tango_ir *ir;
- struct resource *rc5_res;
- struct resource *rc6_res;
u64 clkrate, clkdiv;
int irq, err;
u32 val;
- rc5_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!rc5_res)
- return -EINVAL;
-
- rc6_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!rc6_res)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
@@ -179,11 +169,11 @@ static int tango_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- ir->rc5_base = devm_ioremap_resource(dev, rc5_res);
+ ir->rc5_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->rc5_base))
return PTR_ERR(ir->rc5_base);
- ir->rc6_base = devm_ioremap_resource(dev, rc6_res);
+ ir->rc6_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ir->rc6_base))
return PTR_ERR(ir->rc6_base);
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 83ca5dc047ea..0e26d22f0b26 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -206,7 +206,7 @@ static int qm1d1c0042_set_params(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- a = (freq + state->cfg.xtal_freq / 2) / state->cfg.xtal_freq;
+ a = DIV_ROUND_CLOSEST(freq, state->cfg.xtal_freq);
state->regs[0x06] &= 0x40;
state->regs[0x06] |= (a - 12) / 4;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index e87040d6eca7..898e0f9f8b70 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -118,6 +118,11 @@ static int si2157_init(struct dvb_frontend *fe)
goto err;
}
+ if (dev->dont_load_firmware) {
+ dev_info(&client->dev, "device is buggy, skipping firmware download\n");
+ goto skip_fw_download;
+ }
+
/* query chip revision */
memcpy(cmd.args, "\x02", 1);
cmd.wlen = 1;
@@ -440,6 +445,7 @@ static int si2157_probe(struct i2c_client *client,
i2c_set_clientdata(client, dev);
dev->fe = cfg->fe;
dev->inversion = cfg->inversion;
+ dev->dont_load_firmware = cfg->dont_load_firmware;
dev->if_port = cfg->if_port;
dev->chiptype = (u8)id->driver_data;
dev->if_frequency = 5000000; /* default value of property 0x0706 */
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index c22ca784f43f..ffdece3c2eaa 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -11,29 +11,34 @@
#include <media/media-device.h>
#include <media/dvb_frontend.h>
-/*
- * I2C address
- * 0x60
+/**
+ * struct si2157_config - configuration parameters for si2157
+ *
+ * @fe:
+ * frontend returned by driver
+ * @mdev:
+ * media device returned by driver
+ * @inversion:
+ * spectral inversion
+ * @dont_load_firmware:
+ * Instead of uploading a new firmware, use the existing one
+ * @if_port:
+ * Port selection
+ * Select the RF interface to use (pins 9+11 or 12+13)
+ *
+ * Note:
+ * The I2C address of this demod is 0x60.
*/
struct si2157_config {
- /*
- * frontend
- */
struct dvb_frontend *fe;
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_device *mdev;
#endif
- /*
- * Spectral Inversion
- */
- bool inversion;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
- /*
- * Port selection
- * Select the RF interface to use (pins 9+11 or 12+13)
- */
u8 if_port;
};
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 2bda903358da..778f81b39996 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -23,8 +23,9 @@ enum si2157_pads {
struct si2157_dev {
struct mutex i2c_mutex;
struct dvb_frontend *fe;
- bool active;
- bool inversion;
+ unsigned int active:1;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
u8 chiptype;
u8 if_port;
u32 if_frequency;
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 50d017a4822a..fcca39d3e006 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028_types
*
* This file includes internal tipes to be used inside tuner-xc2028.
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 7b58bc06e35c..2dd45d0765d7 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028
*
* Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 1826ff825c2e..039963a7765b 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
mutex_unlock(&fc_usb->data_mutex);
- return 0;
+ return ret;
}
/* actual bus specific access functions,
@@ -504,7 +504,13 @@ urb_error:
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
/* use the alternate setting with the larges buffer */
- usb_set_interface(fc_usb->udev,0,1);
+ int ret = usb_set_interface(fc_usb->udev, 0, 1);
+
+ if (ret) {
+ err("set interface failed.");
+ return ret;
+ }
+
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
err("cannot handle USB speed because it is too slow.");
@@ -538,6 +544,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n");
return -ENOMEM;
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 74f3b29d9c60..2fe2b2d335ba 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_CX231XX
depends on VIDEO_DEV && I2C && I2C_MUX
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select VIDEO_CX25840
select VIDEO_CX2341X
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 6d218a036966..1aec4459f50a 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -60,10 +60,6 @@
#define MCI_MODE_MEMORY_READ 0x000
#define MCI_MODE_MEMORY_WRITE 0x4000
-static unsigned int mpegbufs = 8;
-module_param(mpegbufs, int, 0644);
-MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32");
-
static unsigned int mpeglines = 128;
module_param(mpeglines, int, 0644);
MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32");
@@ -1080,16 +1076,6 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
return 0;
}
-static void cx231xx_417_check_encoder(struct cx231xx *dev)
-{
- u32 status, seq;
-
- status = 0;
- seq = 0;
- cx231xx_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq);
- dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq);
-}
-
static void cx231xx_codec_settings(struct cx231xx *dev)
{
dprintk(1, "%s()\n", __func__);
@@ -1227,40 +1213,25 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
/* ------------------------------------------------------------------ */
-static int bb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned int size = mpeglinesize * mpeglines;
- fh->dev->ts1.ts_packet_size = mpeglinesize;
- fh->dev->ts1.ts_packet_count = mpeglines;
+ dev->ts1.ts_packet_size = mpeglinesize;
+ dev->ts1.ts_packet_count = mpeglines;
- *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
- *count = mpegbufs;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
- return 0;
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = mpeglinesize * mpeglines;
- BUG_ON(in_interrupt());
-
- spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
- spin_unlock_irqrestore(&dev->video_mode.slock, flags);
- videobuf_waiton(vq, &buf->vb, 0, 0);
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ return 0;
}
static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *urb,
@@ -1276,13 +1247,13 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
return;
buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ struct cx231xx_buffer, list);
dev->video_mode.isoc_ctl.buf = buf;
dma_q->mpeg_buffer_done = 1;
}
/* Fill buffer */
buf = dev->video_mode.isoc_ctl.buf;
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if ((dma_q->mpeg_buffer_completed+len) <
mpeglines*mpeglinesize) {
@@ -1306,11 +1277,10 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
memcpy(vbuf+dma_q->mpeg_buffer_completed,
data, tail_data);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dma_q->mpeg_buffer_completed = 0;
if (len - tail_data > 0) {
@@ -1331,17 +1301,15 @@ static void buffer_filled(char *data, int len, struct urb *urb,
if (list_empty(&dma_q->active))
return;
- buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Fill buffer */
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
memcpy(vbuf, data, len);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
@@ -1394,100 +1362,104 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
-static int bb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = q->priv_data;
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
- int size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
- return -EINVAL;
- buf->vb.width = fh->dev->ts1.ts_packet_size;
- buf->vb.height = fh->dev->ts1.ts_packet_count;
- buf->vb.size = size;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
}
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
+
+ vidq->sequence = 0;
dev->mode_tv = 1;
- if (urb_init) {
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- rc = cx231xx_unmute_audio(dev);
- if (dev->USE_ISO) {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
- rc = cx231xx_init_isoc(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_isoc_copy);
- } else {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- rc = cx231xx_init_bulk(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- if (rc < 0)
- goto fail;
- }
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ cx231xx_set_gpio_value(dev, 2, 0);
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
+ cx231xx_initialize_codec(dev);
+
+ cx231xx_start_TS1(dev);
+
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, 320, 5,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
-fail:
- free_buffer(q, buf);
- return rc;
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void bb_buf_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned long flags;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ call_all(dev, video, s_stream, 0);
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ cx231xx_stop_TS1(dev);
-}
+ /* do this before setting alternate! */
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
+ cx231xx_set_mode(dev, CX231XX_SUSPEND);
-static void bb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- /*struct cx231xx_fh *fh = q->priv_data;*/
- /*struct cx231xx *dev = (struct cx231xx *)fh->dev;*/
+ cx231xx_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
+ CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_BITS_NONE);
- free_buffer(q, buf);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_qops = {
- .buf_setup = bb_buf_setup,
- .buf_prepare = bb_buf_prepare,
- .buf_queue = bb_buf_queue,
- .buf_release = bb_buf_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------ */
@@ -1495,8 +1467,7 @@ static const struct videobuf_queue_ops cx231xx_qops = {
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->encodernorm.id & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1511,8 +1482,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1533,8 +1503,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*norm = dev->encodernorm.id;
return 0;
@@ -1542,8 +1511,7 @@ static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cx231xx_tvnorms); i++)
@@ -1575,8 +1543,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev *sd;
dprintk(3, "enter vidioc_s_ctrl()\n");
@@ -1601,8 +1568,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_g_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1621,8 +1587,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_try_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1636,230 +1601,21 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_reqbufs(&fh->vidq, p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_querybuf(&fh->vidq, p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_qbuf(&fh->vidq, p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
-
- return videobuf_dqbuf(&fh->vidq, b, file->f_flags & O_NONBLOCK);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "enter vidioc_streamon()\n");
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- if (dev->USE_ISO)
- cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else {
- cx231xx_init_bulk(dev, 320,
- 5,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- dprintk(3, "exit vidioc_streamon()\n");
- return videobuf_streamon(&fh->vidq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_streamoff(&fh->vidq);
-}
-
static int vidioc_log_status(struct file *file, void *priv)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
call_all(dev, core, log_status);
return v4l2_ctrl_log_status(file, priv);
}
-static int mpeg_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx231xx *dev = video_drvdata(file);
- struct cx231xx_fh *fh;
-
- dprintk(2, "%s()\n", __func__);
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- mutex_unlock(&dev->lock);
- return -ENOMEM;
- }
-
- file->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- fh->dev = dev;
-
-
- videobuf_queue_vmalloc_init(&fh->vidq, &cx231xx_qops,
- NULL, &dev->video_mode.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer), fh, &dev->lock);
-/*
- videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
- dev->dev, &dev->ts1.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
-*/
-
- cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
- cx231xx_set_gpio_value(dev, 2, 0);
-
- cx231xx_initialize_codec(dev);
-
- mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
- cx231xx_start_TS1(dev);
-
- return 0;
-}
-
-static int mpeg_release(struct file *file)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "mpeg_release()! dev=0x%p\n", dev);
-
- mutex_lock(&dev->lock);
-
- cx231xx_stop_TS1(dev);
-
- /* do this before setting alternate! */
- if (dev->USE_ISO)
- cx231xx_uninit_isoc(dev);
- else
- cx231xx_uninit_bulk(dev);
- cx231xx_set_mode(dev, CX231XX_SUSPEND);
-
- cx231xx_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
- CX231xx_RAW_BITS_NONE);
-
- /* FIXME: Review this crap */
- /* Shut device down on last close */
- if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
- if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
- /* stop mpeg capture */
-
- msleep(500);
- cx231xx_417_check_encoder(dev);
-
- }
- }
-
- if (fh->vidq.streaming)
- videobuf_streamoff(&fh->vidq);
- if (fh->vidq.reading)
- videobuf_read_stop(&fh->vidq);
-
- videobuf_mmap_free(&fh->vidq);
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static ssize_t mpeg_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- /* Deal w/ A/V decoder * and mpeg encoder sync issues. */
- /* Start mpeg encoder on first read. */
- if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
- if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
- if (cx231xx_initialize_codec(dev) < 0)
- return -EINVAL;
- }
- }
-
- return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
-}
-
-static __poll_t mpeg_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(file, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(file, &fh->vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
-}
-
-static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- dprintk(2, "%s()\n", __func__);
-
- return videobuf_mmap_mapper(&fh->vidq, vma);
-}
-
static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
- .open = mpeg_open,
- .release = mpeg_release,
- .read = mpeg_read,
- .poll = mpeg_poll,
- .mmap = mpeg_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -1881,12 +1637,12 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = vidioc_log_status,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = cx231xx_g_register,
@@ -1980,6 +1736,7 @@ int cx231xx_417_register(struct cx231xx *dev)
/* FIXME: Port1 hardcoded here */
int err = -ENODEV;
struct cx231xx_tsport *tsport = &dev->ts1;
+ struct vb2_queue *q;
dprintk(1, "%s()\n", __func__);
@@ -2017,6 +1774,21 @@ int cx231xx_417_register(struct cx231xx *dev)
/* Allocate and initialize V4L video device */
cx231xx_video_dev_init(dev, dev->udev,
&dev->v4l_device, &cx231xx_mpeg_template, "mpeg");
+ q = &dev->mpegq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ err = vb2_queue_init(q);
+ if (err)
+ return err;
+ dev->v4l_device.queue = q;
+
err = video_register_device(&dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 9ef362e221df..fd6e2df3d1b7 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -14,7 +14,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index d417b5fe4093..0974965e848f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1240,7 +1240,7 @@ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev)
int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
u8 analog_or_digital)
{
- int status = 0;
+ int status;
/* first set the direction to output */
status = cx231xx_set_gpio_direction(dev,
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e123e74c549e..92efe6c1f47b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1479,13 +1479,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
goto err_dev_init;
}
- /* init video dma queues */
+ /* init video dma queue */
INIT_LIST_HEAD(&dev->video_mode.vidq.active);
- INIT_LIST_HEAD(&dev->video_mode.vidq.queued);
- /* init vbi dma queues */
+ /* init vbi dma queue */
INIT_LIST_HEAD(&dev->vbi_mode.vidq.active);
- INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued);
/* Reset other chips required if they are tied up with GPIO pins */
cx231xx_add_into_devlist(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index fba7ccdf5a25..d2f143a096d1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -153,131 +153,98 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
Vbi buf operations
------------------------------------------------------------------*/
-static int
-vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int vbi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
u32 height = 0;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- *size = (dev->width * height * 2 * 2);
- if (0 == *count)
- *count = CX231XX_DEF_VBI_BUF;
-
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
-
+ *nplanes = 1;
+ sizes[0] = (dev->width * height * 2 * 2);
return 0;
}
/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->vbi_mode.slock, flags);
- if (dev->vbi_mode.bulk_ctl.buf == buf)
- dev->vbi_mode.bulk_ctl.buf = NULL;
- spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
-static int
-vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vbi_buf_prepare(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
u32 height = 0;
+ u32 size;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- buf->vb.size = ((dev->width << 1) * height * 2);
+ size = ((dev->width << 1) * height * 2);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < size)
return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = height;
- buf->vb.field = field;
- buf->vb.field = V4L2_FIELD_SEQ_TB;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
-
- if (!dev->vbi_mode.bulk_ctl.num_bufs)
- urb_init = 1;
-
- if (urb_init) {
- rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
- CX231XX_NUM_VBI_BUFS,
- dev->vbi_mode.alt_max_pkt_size[0],
- cx231xx_isoc_vbi_copy);
- if (rc < 0)
- goto fail;
- }
-
- buf->vb.state = VIDEOBUF_PREPARED;
+ vb2_set_plane_payload(vb, 0, size);
return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
}
-static void
-vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void vbi_buf_queue(struct vb2_buffer *vb)
{
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ unsigned long flags;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
+}
+
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ dev->vbi_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
}
-static void vbi_buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ int ret;
+
+ vidq->sequence = 0;
+ ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
+ CX231XX_NUM_VBI_BUFS,
+ dev->vbi_mode.alt_max_pkt_size[0],
+ cx231xx_isoc_vbi_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+static void vbi_stop_streaming(struct vb2_queue *vq)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-const struct videobuf_queue_ops cx231xx_vbi_qops = {
- .buf_setup = vbi_buffer_setup,
- .buf_prepare = vbi_buffer_prepare,
- .buf_queue = vbi_buffer_queue,
- .buf_release = vbi_buffer_release,
+struct vb2_ops cx231xx_vbi_qops = {
+ .queue_setup = vbi_queue_setup,
+ .buf_prepare = vbi_buf_prepare,
+ .buf_queue = vbi_buf_queue,
+ .start_streaming = vbi_start_streaming,
+ .stop_streaming = vbi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -512,16 +479,15 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
+ /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
dev->vbi_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
@@ -611,11 +577,11 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0));
dev->vbi_mode.bulk_ctl.buf = *buf;
@@ -656,7 +622,7 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -EINVAL;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if (dma_q->bytes_left_in_line != _line_size) {
current_line_bytes_copied =
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.h b/drivers/media/usb/cx231xx/cx231xx-vbi.h
index 7cddd629fbfc..0b21bee5fa30 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.h
@@ -10,7 +10,7 @@
#ifndef _CX231XX_VBI_H
#define _CX231XX_VBI_H
-extern const struct videobuf_queue_ops cx231xx_vbi_qops;
+extern struct vb2_ops cx231xx_vbi_qops;
#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
#define NTSC_VBI_END_LINE 21
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 9b51f07a729e..69abafaebbf3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -58,10 +58,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(CX231XX_VERSION);
-static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
module_param_array(card, int, NULL, 0444);
module_param_array(video_nr, int, NULL, 0444);
@@ -166,18 +166,19 @@ static inline void buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.vb2_buf.index);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = NULL;
else
dev->video_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
@@ -241,11 +242,11 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = *buf;
@@ -653,7 +654,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -1;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
current_line_bytes_copied = _line_size - dma_q->bytes_left_in_line;
@@ -672,7 +673,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
bytes_to_copy : dma_q->bytes_left_in_line;
- if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + buf->vb.size))
+ if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + dev->size))
return 0;
/* The below copies the UYVY data straight into video buffer */
@@ -708,149 +709,98 @@ u8 cx231xx_is_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q)
Videobuf operations
------------------------------------------------------------------*/
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)>>3;
- if (0 == *count)
- *count = CX231XX_DEF_BUF;
+ dev->size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
-
- cx231xx_enable_analog_tuner(dev);
+ if (*nplanes)
+ return sizes[0] < dev->size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = dev->size;
return 0;
}
-/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
-
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
+ list_add_tail(&buf->list, &vidq->active);
spin_unlock_irqrestore(&dev->video_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
-static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
-
- /* The only currently supported format is 16 bits/pixel */
- buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth
- + 7) >> 3;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
- }
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
- if (urb_init) {
- dev->mode_tv = 0;
- if (dev->USE_ISO)
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else
- rc = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_bulk_copy);
- if (rc < 0)
- goto fail;
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
}
-
- buf->vb.state = VIDEOBUF_PREPARED;
-
- return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
}
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ vidq->sequence = 0;
+ dev->mode_tv = 0;
+ cx231xx_enable_analog_tuner(dev);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = (struct cx231xx *)fh->dev;
-
- cx231xx_isocdbg("cx231xx: called buffer_release\n");
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ call_all(dev, video, s_stream, 0);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/********************* v4l2 interface **************************************/
@@ -872,58 +822,6 @@ void video_mux(struct cx231xx *dev, int index)
cx231xx_do_mode_ctrl_overrides(dev);
}
-/* Usage lock check functions */
-static int res_get(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
- int rc = 0;
-
- /* This instance already has stream_on */
- if (fh->stream_on)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (dev->stream_on)
- return -EBUSY;
- dev->stream_on = 1;
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (dev->vbi_stream_on)
- return -EBUSY;
- dev->vbi_stream_on = 1;
- } else
- return -EINVAL;
-
- fh->stream_on = 1;
-
- return rc;
-}
-
-static int res_check(struct cx231xx_fh *fh)
-{
- return fh->stream_on;
-}
-
-static void res_free(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
-
- fh->stream_on = 0;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- dev->stream_on = 0;
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- dev->vbi_stream_on = 0;
-}
-
-static int check_dev(struct cx231xx *dev)
-{
- if (dev->state & DEV_DISCONNECTED) {
- dev_err(dev->dev, "v4l2 ioctl: device not present\n");
- return -ENODEV;
- }
- return 0;
-}
-
/* ------------------------------------------------------------------
IOCTL vidioc handling
------------------------------------------------------------------*/
@@ -931,8 +829,7 @@ static int check_dev(struct cx231xx *dev)
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
@@ -960,8 +857,7 @@ static struct cx231xx_fmt *format_by_fourcc(unsigned int fourcc)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int width = f->fmt.pix.width;
unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
@@ -993,39 +889,25 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
- struct cx231xx_fmt *fmt;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ int rc;
- rc = check_dev(dev);
- if (rc < 0)
+ rc = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (rc)
return rc;
- vidioc_try_fmt_vid_cap(file, priv, f);
-
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt)
- return -EINVAL;
-
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
+ if (vb2_is_busy(&dev->vidq)) {
dev_err(dev->dev, "%s: queue busy\n", __func__);
return -EBUSY;
}
- if (dev->stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s: device in use by another fh\n", __func__);
- return -EBUSY;
- }
-
/* set new image size */
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- dev->format = fmt;
+ dev->format = format_by_fourcc(f->fmt.pix.pixelformat);
v4l2_fill_mbus_format(&format.format, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
call_all(dev, pad, set_fmt, NULL, &format);
@@ -1036,8 +918,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*id = dev->norm;
return 0;
@@ -1045,21 +926,15 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (dev->norm == norm)
return 0;
- if (videobuf_queue_is_busy(&fh->vb_vidq))
+ if (vb2_is_busy(&dev->vidq))
return -EBUSY;
dev->norm = norm;
@@ -1141,8 +1016,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev)
int cx231xx_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
u32 gen_stat;
unsigned int n;
int ret;
@@ -1181,8 +1055,7 @@ int cx231xx_enum_input(struct file *file, void *priv,
int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*i = dev->video_input;
@@ -1191,14 +1064,9 @@ int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
+ struct cx231xx *dev = video_drvdata(file);
dev->mode_tv = 0;
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (i >= MAX_CX231XX_INPUT)
return -EINVAL;
@@ -1220,13 +1088,7 @@ int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
+ struct cx231xx *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -1244,27 +1106,15 @@ int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != t->index)
return -EINVAL;
-#if 0
- call_all(dev, tuner, s_tuner, t);
-#endif
return 0;
}
int cx231xx_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (f->tuner)
return -EINVAL;
@@ -1277,8 +1127,7 @@ int cx231xx_g_frequency(struct file *file, void *priv,
int cx231xx_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_frequency new_freq = *f;
int rc;
u32 if_frequency = 5400000;
@@ -1287,10 +1136,6 @@ int cx231xx_s_frequency(struct file *file, void *priv,
"Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
f->frequency, f->type);
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != f->tuner)
return -EINVAL;
@@ -1365,8 +1210,7 @@ int cx231xx_g_chip_info(struct file *file, void *fh,
int cx231xx_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 value[4] = { 0, 0, 0, 0 };
u32 data = 0;
@@ -1424,8 +1268,7 @@ int cx231xx_g_register(struct file *file, void *priv,
int cx231xx_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 data[4] = { 0, 0, 0, 0 };
@@ -1472,8 +1315,7 @@ int cx231xx_s_register(struct file *file, void *priv,
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->norm & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1488,8 +1330,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1508,54 +1349,10 @@ static int vidioc_g_selection(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (likely(rc >= 0))
- rc = videobuf_streamon(&fh->vb_vidq);
-
- call_all(dev, video, s_stream, 1);
-
- return rc;
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (type != fh->type)
- return -EINVAL;
-
- cx25840_call(dev, video, s_stream, 0);
-
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh);
-
- return 0;
-}
-
int cx231xx_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
strscpy(cap->driver, "cx231xx", sizeof(cap->driver));
strscpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
@@ -1587,8 +1384,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1610,8 +1406,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1634,77 +1429,16 @@ static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
-
- if (dev->vbi_stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s device in use by another fh\n", __func__);
- return -EBUSY;
- }
return vidioc_try_fmt_vbi_cap(file, priv, f);
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_reqbufs(&fh->vb_vidq, rb);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_querybuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_qbuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
-}
-
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1717,7 +1451,7 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
}
static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1733,52 +1467,20 @@ static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner
*/
static int cx231xx_v4l2_open(struct file *filp)
{
- int radio = 0;
struct video_device *vdev = video_devdata(filp);
struct cx231xx *dev = video_drvdata(filp);
- struct cx231xx_fh *fh;
- enum v4l2_buf_type fh_type = 0;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
- break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
- default:
- return -EINVAL;
- }
-
- cx231xx_videodbg("open dev=%s type=%s users=%d\n",
- video_device_node_name(vdev), v4l2_type_names[fh_type],
- dev->users);
-
-#if 0
- errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
- if (errCode < 0) {
- dev_err(dev->dev,
- "Device locked on digital mode. Can't open analog\n");
- return -EBUSY;
- }
-#endif
+ int ret;
- fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
- if (mutex_lock_interruptible(&dev->lock)) {
- kfree(fh);
+ if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(filp);
+ if (ret) {
+ mutex_unlock(&dev->lock);
+ return ret;
}
- fh->dev = dev;
- fh->type = fh_type;
- filp->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
+ if (dev->users++ == 0) {
/* Power up in Analog TV mode */
if (dev->board.external_av)
cx231xx_set_power_mode(dev,
@@ -1786,10 +1488,6 @@ static int cx231xx_v4l2_open(struct file *filp)
else
cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
-#if 0
- cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
-#endif
-
/* set video alternate setting */
cx231xx_set_video_alternate(dev);
@@ -1799,38 +1497,21 @@ static int cx231xx_v4l2_open(struct file *filp)
/* device needs to be initialized before isoc transfer */
dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
-
}
- if (radio) {
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO) {
cx231xx_videodbg("video_open: setting radio device\n");
/* cx231xx_start_radio(dev); */
call_all(dev, tuner, s_radio);
}
-
- dev->users++;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_video_qops,
- NULL, &dev->video_mode.slock,
- fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
/* Set the required alternate setting VBI interface works in
Bulk mode only */
cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_vbi_qops,
- NULL, &dev->vbi_mode.slock,
- fh->type, V4L2_FIELD_SEQ_TB,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
}
mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
-
return 0;
}
@@ -1871,68 +1552,12 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
*/
static int cx231xx_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
-
- cx231xx_videodbg("users=%d\n", dev->users);
-
- cx231xx_videodbg("users=%d\n", dev->users);
- if (res_check(fh))
- res_free(fh);
-
- /*
- * To workaround error number=-71 on EP0 for VideoGrabber,
- * need exclude following.
- * FIXME: It is probably safe to remove most of these, as we're
- * now avoiding the alternate setting for INDEX_VANC
- */
- if (!dev->board.no_alt_vanc)
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- if (atomic_read(&dev->devlist_count) > 0) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
- return 0;
- }
-
- /* do this before setting alternate! */
- cx231xx_uninit_vbi_isoc(dev);
-
- /* set alternate 0 */
- if (!dev->vbi_or_sliced_cc_mode)
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
- else
- cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- dev->users--;
- wake_up_interruptible(&dev->open);
- return 0;
- }
+ struct cx231xx *dev = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
- v4l2_fh_del(&fh->fh);
- dev->users--;
- if (!dev->users) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
+ _vb2_fop_release(filp, NULL);
+ if (--dev->users == 0) {
/* Save some power by putting tuner to sleep */
call_all(dev, tuner, standby);
@@ -1942,20 +1567,40 @@ static int cx231xx_close(struct file *filp)
else
cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
+ }
+
+ /*
+ * To workaround error number=-71 on EP0 for VideoGrabber,
+ * need exclude following.
+ * FIXME: It is probably safe to remove most of these, as we're
+ * now avoiding the alternate setting for INDEX_VANC
+ */
+ if (!dev->board.no_alt_vanc && vdev->vfl_type == VFL_TYPE_VBI) {
+ /* do this before setting alternate! */
+ cx231xx_uninit_vbi_isoc(dev);
/* set alternate 0 */
+ if (!dev->vbi_or_sliced_cc_mode)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ else
+ cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
+
+ wake_up_interruptible_nr(&dev->open, 1);
+ return 0;
+ }
+
+ if (dev->users == 0) {
+ /* set alternate 0 */
cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0);
}
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
+
wake_up_interruptible(&dev->open);
return 0;
}
static int cx231xx_v4l2_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(filp);
int rc;
mutex_lock(&dev->lock);
@@ -1964,116 +1609,13 @@ static int cx231xx_v4l2_close(struct file *filp)
return rc;
}
-/*
- * cx231xx_v4l2_read()
- * will allocate buffers when called for the first time
- */
-static ssize_t
-cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
- loff_t *pos)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if ((fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
- (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)) {
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- mutex_unlock(&dev->lock);
- return rc;
- }
- return 0;
-}
-
-/*
- * cx231xx_v4l2_poll()
- * will allocate buffers when called for the first time
- */
-static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return EPOLLERR;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return EPOLLERR;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(filp, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) ||
- (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type)) {
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(filp, &fh->vb_vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
- }
- return res | EPOLLERR;
-}
-
-/*
- * cx231xx_v4l2_mmap()
- */
-static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- mutex_unlock(&dev->lock);
-
- cx231xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end -
- (unsigned long)vma->vm_start, rc);
-
- return rc;
-}
-
static const struct v4l2_file_operations cx231xx_v4l_fops = {
.owner = THIS_MODULE,
.open = cx231xx_v4l2_open,
.release = cx231xx_v4l2_close,
- .read = cx231xx_v4l2_read,
- .poll = cx231xx_v4l2_poll,
- .mmap = cx231xx_v4l2_mmap,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -2088,17 +1630,17 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
.vidioc_g_pixelaspect = vidioc_g_pixelaspect,
.vidioc_g_selection = vidioc_g_selection,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = cx231xx_enum_input,
.vidioc_g_input = cx231xx_g_input,
.vidioc_s_input = cx231xx_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = cx231xx_g_tuner,
.vidioc_s_tuner = cx231xx_s_tuner,
.vidioc_g_frequency = cx231xx_g_frequency,
@@ -2175,6 +1717,7 @@ static void cx231xx_vdev_init(struct cx231xx *dev,
int cx231xx_register_analog_devices(struct cx231xx *dev)
{
+ struct vb2_queue *q;
int ret;
dev_info(dev->dev, "v4l2 driver version %s\n", CX231XX_VERSION);
@@ -2221,6 +1764,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize video media entity!\n");
#endif
dev->vdev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vdev.queue = q;
dev->vdev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
@@ -2254,6 +1812,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize vbi media entity!\n");
#endif
dev->vbi_dev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_vbi_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vbi_dev.queue = q;
dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VBI_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index b007611abc37..b32eab641793 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -20,7 +20,7 @@
#include <media/drv-intf/cx2341x.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
@@ -223,8 +223,8 @@ struct cx231xx_fmt {
/* buffer for one video frame */
struct cx231xx_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
struct list_head frame;
int top_field;
int receiving;
@@ -237,7 +237,6 @@ enum ps_package_head {
struct cx231xx_dmaqueue {
struct list_head active;
- struct list_head queued;
wait_queue_head_t wq;
@@ -251,6 +250,7 @@ struct cx231xx_dmaqueue {
u32 lines_completed;
u8 field1_done;
u32 lines_per_field;
+ u32 sequence;
/*Mpeg2 control buffer*/
u8 *p_left_data;
@@ -427,23 +427,6 @@ struct cx231xx_audio {
struct cx231xx;
-struct cx231xx_fh {
- struct v4l2_fh fh;
- struct cx231xx *dev;
- unsigned int stream_on:1; /* Locks streams */
- enum v4l2_buf_type type;
-
- struct videobuf_queue vb_vidq;
-
- /* vbi capture */
- struct videobuf_queue vidq;
- struct videobuf_queue vbiq;
-
- /* MPEG Encoder specifics ONLY */
-
- atomic_t v4l_reading;
-};
-
/*****************************************************************/
/* set/get i2c */
/* 00--1Mb/s, 01-400kb/s, 10--100kb/s, 11--5Mb/s */
@@ -634,6 +617,7 @@ struct cx231xx {
int width; /* current frame width */
int height; /* current frame height */
int interlaced; /* 1=interlace fields, 0=just top fields */
+ unsigned int size;
struct cx231xx_audio adev;
@@ -657,6 +641,9 @@ struct cx231xx {
struct media_pad input_pad[MAX_CX231XX_INPUT];
#endif
+ struct vb2_queue vidq;
+ struct vb2_queue vbiq;
+
unsigned char eedata[256];
struct cx231xx_video_mode video_mode;
@@ -717,6 +704,7 @@ struct cx231xx {
u8 USE_ISO;
struct cx231xx_tvnorm encodernorm;
struct cx231xx_tsport ts1, ts2;
+ struct vb2_queue mpegq;
struct video_device v4l_device;
atomic_t v4l_reader_count;
u32 freq;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 3afd18733614..792667ee5ebc 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1197,6 +1197,15 @@ err:
return ret;
}
+/*
+ * The I2C speed register is calculated with:
+ * I2C speed register = (1000000000 / (24.4 * 16 * I2C_speed))
+ *
+ * The default speed register for it930x is 7, with means a
+ * speed of ~366 kbps
+ */
+#define I2C_SPEED_366K 7
+
static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
@@ -1208,13 +1217,13 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
- /* I2C master bus 2 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
+ /* I2C master bus 2 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f6a7, I2C_SPEED_366K);
if (ret < 0)
goto err;
- /* I2C master bus 1,3 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f103, 0x07);
+ /* I2C master bus 1,3 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f103, I2C_SPEED_366K);
if (ret < 0)
goto err;
@@ -1610,6 +1619,24 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = adap->fe[0];
+
+ /*
+ * HACK: The Logilink VG0022A has a bug: when the si2157
+ * firmware that came with the device is replaced by a new
+ * one, the I2C transfers to the tuner will return just 0xff.
+ *
+ * Probably, the vendor firmware has some patch specifically
+ * designed for this device. So, we can't replace by the
+ * generic firmware. The right solution would be to extract
+ * the si2157 firmware from the original driver and ask the
+ * driver to load the specifically designed firmware, but,
+ * while we don't have that, the next best solution is to just
+ * keep the original firmware at the device.
+ */
+ if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
+ le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100)
+ si2157_config.dont_load_firmware = true;
+
si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port;
ret = af9035_add_i2c_dev(d, "si2157",
it930x_addresses_table[state->it930x_addresses].tuner_i2c_addr,
@@ -2121,6 +2148,8 @@ static const struct usb_device_id af9035_id_table[] = {
&it930x_props, "ITE 9303 Generic", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD310,
&it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
+ { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100,
+ &it930x_props, "Logilink VG0022A", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index b874a49ececf..52bcc2d2efe5 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -121,6 +121,7 @@ struct dvb_usb_driver_info {
* @interval: time in ms between two queries
* @driver_type: used to point if a device supports raw mode
* @bulk_mode: device supports bulk mode for rc (disable polling mode)
+ * @timeout: set to length of last space before raw IR goes idle
*/
struct dvb_usb_rc {
const char *map_name;
@@ -130,6 +131,7 @@ struct dvb_usb_rc {
unsigned int interval;
enum rc_driver_type driver_type;
bool bulk_mode;
+ int timeout;
};
/**
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index e5e056bf9dfa..f1c79f351ec8 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -150,6 +150,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
dev->map_name = d->rc.map_name;
dev->allowed_protocols = d->rc.allowed_protos;
dev->change_protocol = d->rc.change_protocol;
+ dev->timeout = d->rc.timeout;
dev->priv = d;
ret = rc_register_device(dev);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 617a306f6815..356fd8e66834 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -22,7 +22,6 @@ MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dvbsky_state {
- struct mutex stream_mutex;
u8 ibuf[DVBSKY_BUF_LEN];
u8 obuf[DVBSKY_BUF_LEN];
u8 last_lock;
@@ -60,17 +59,19 @@ static int dvbsky_usb_generic_rw(struct dvb_usb_device *d,
static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff)
{
struct dvbsky_state *state = d_to_priv(d);
+ static const u8 obuf_pre[3] = { 0x37, 0, 0 };
+ static const u8 obuf_post[3] = { 0x36, 3, 0 };
int ret;
- u8 obuf_pre[3] = { 0x37, 0, 0 };
- u8 obuf_post[3] = { 0x36, 3, 0 };
- mutex_lock(&state->stream_mutex);
- ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0);
+ mutex_lock(&d->usb_mutex);
+ memcpy(state->obuf, obuf_pre, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
if (!ret && onoff) {
msleep(20);
- ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0);
+ memcpy(state->obuf, obuf_post, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
}
- mutex_unlock(&state->stream_mutex);
+ mutex_unlock(&d->usb_mutex);
return ret;
}
@@ -591,17 +592,7 @@ static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
static int dvbsky_init(struct dvb_usb_device *d)
{
struct dvbsky_state *state = d_to_priv(d);
-
- /* use default interface */
- /*
- ret = usb_set_interface(d->udev, 0, 0);
- if (ret)
- return ret;
- */
- mutex_init(&state->stream_mutex);
-
state->last_lock = 0;
-
return 0;
}
@@ -792,6 +783,9 @@ static const struct usb_device_id dvbsky_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
+ { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C_LITE,
+ &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C Lite",
+ NULL) },
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C2,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C v2",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index c7197e534c02..19217dcf20f1 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -5,7 +5,7 @@
*/
#include <linux/string.h>
-#include "gl861.h"
+#include "dvb_usb.h"
#include "zl10353.h"
#include "qt1010.h"
@@ -14,93 +14,157 @@
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
- u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
-{
- u16 index;
- u16 value = addr << (8 + 1);
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 req, type;
- u8 *buf;
- int ret;
+struct gl861 {
+ /* USB control message buffer */
+ u8 buf[16];
- if (wo) {
- req = GL861_REQ_I2C_WRITE;
- type = GL861_WRITE;
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- } else { /* rw */
- req = GL861_REQ_I2C_READ;
- type = GL861_READ;
- buf = kmalloc(rlen, GFP_KERNEL);
- }
- if (!buf)
- return -ENOMEM;
+ struct i2c_adapter *demod_sub_i2c;
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+};
- switch (wlen) {
- case 1:
- index = wbuf[0];
+#define CMD_WRITE_SHORT 0x01
+#define CMD_READ 0x02
+#define CMD_WRITE 0x03
+
+static int gl861_ctrl_msg(struct dvb_usb_device *d, u8 request, u16 value,
+ u16 index, void *data, u16 size)
+{
+ struct gl861 *ctx = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
+ int ret;
+ unsigned int pipe;
+ u8 requesttype;
+
+ mutex_lock(&d->usb_mutex);
+
+ switch (request) {
+ case CMD_WRITE:
+ memcpy(ctx->buf, data, size);
+ /* Fall through */
+ case CMD_WRITE_SHORT:
+ pipe = usb_sndctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_OUT;
break;
- case 2:
- index = wbuf[0];
- value = value + wbuf[1];
+ case CMD_READ:
+ pipe = usb_rcvctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_IN;
break;
default:
- dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n",
- KBUILD_MODNAME, wlen);
- kfree(buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_mutex_unlock;
}
- usleep_range(1000, 2000); /* avoid I2C errors */
+ ret = usb_control_msg(d->udev, pipe, request, requesttype, value,
+ index, ctx->buf, size, 200);
+ dev_dbg(&intf->dev, "%d | %02x %02x %*ph %*ph %*ph %s %*ph\n",
+ ret, requesttype, request, 2, &value, 2, &index, 2, &size,
+ (requesttype & USB_DIR_IN) ? "<<<" : ">>>", size, ctx->buf);
+ if (ret < 0)
+ goto err_mutex_unlock;
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type,
- value, index, buf, rlen, 2000);
+ if (request == CMD_READ)
+ memcpy(data, ctx->buf, size);
- if (!wo && ret > 0)
- memcpy(rbuf, buf, rlen);
+ usleep_range(1000, 2000); /* Avoid I2C errors */
- kfree(buf);
+ mutex_unlock(&d->usb_mutex);
+
+ return 0;
+
+err_mutex_unlock:
+ mutex_unlock(&d->usb_mutex);
+ dev_dbg(&intf->dev, "failed %d\n", ret);
return ret;
}
-/* I2C */
-static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+static int gl861_short_write(struct dvb_usb_device *d, u8 addr, u8 reg, u8 val)
+{
+ return gl861_ctrl_msg(d, CMD_WRITE_SHORT,
+ (addr << 9) | val, reg, NULL, 0);
+}
+
+static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
+ struct usb_interface *intf = d->intf;
+ struct gl861 *ctx = d_to_priv(d);
+ int ret;
+ u8 request, *data;
+ u16 value, index, size;
+
+ /* XXX: I2C adapter maximum data lengths are not tested */
+ if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ /* I2C write */
+ if (msg[0].len < 2 || msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+
+ if (msg[0].len == 2) {
+ request = CMD_WRITE_SHORT;
+ value |= msg[0].buf[1];
+ size = 0;
+ data = NULL;
+ } else {
+ request = CMD_WRITE;
+ size = msg[0].len - 1;
+ data = &msg[0].buf[1];
+ }
+
+ ret = gl861_ctrl_msg(d, request, value, index, data, size);
+ } else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ (msg[1].flags & I2C_M_RD)) {
+ /* I2C write + read */
+ if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- /* write/read request */
- if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, msg[i+1].buf, msg[i+1].len) < 0)
- break;
- i++;
- } else
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, NULL, 0) < 0)
- break;
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[1].buf, msg[1].len);
+ } else if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ /* I2C read */
+ if (msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ value = (msg[0].addr << 1) << 8;
+ index = 0x0100;
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[0].buf, msg[0].len);
+ } else {
+ /* Unsupported I2C message */
+ dev_dbg(&intf->dev, "unknown i2c msg, num %u\n", num);
+ ret = -EOPNOTSUPP;
}
+ if (ret)
+ goto err;
- mutex_unlock(&d->i2c_mutex);
- return i;
+ return num;
+err:
+ dev_dbg(&intf->dev, "failed %d\n", ret);
+ return ret;
}
-static u32 gl861_i2c_func(struct i2c_adapter *adapter)
+static u32 gl861_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm gl861_i2c_algo = {
- .master_xfer = gl861_i2c_xfer,
- .functionality = gl861_i2c_func,
+ .master_xfer = gl861_i2c_master_xfer,
+ .functionality = gl861_i2c_functionality,
};
/* Callbacks for DVB USB */
@@ -149,6 +213,8 @@ static struct dvb_usb_device_properties gl861_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct gl861),
+
.i2c_algo = &gl861_i2c_algo,
.frontend_attach = gl861_frontend_attach,
.tuner_attach = gl861_tuner_attach,
@@ -166,14 +232,6 @@ static struct dvb_usb_device_properties gl861_props = {
/*
* For Friio
*/
-
-struct friio_priv {
- struct i2c_adapter *demod_sub_i2c;
- struct i2c_client *i2c_client_demod;
- struct i2c_client *i2c_client_tuner;
- struct i2c_adapter tuner_adap;
-};
-
struct friio_config {
struct i2c_board_info demod_info;
struct tc90522_config demod_cfg;
@@ -184,132 +242,10 @@ struct friio_config {
static const struct friio_config friio_config = {
.demod_info = { I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x18), },
+ .demod_cfg = { .split_tuner_read_i2c = true, },
.tuner_info = { I2C_BOARD_INFO("tua6034_friio", 0x60), },
};
-/* For another type of I2C:
- * message sent by a USB control-read/write transaction with data stage.
- * Used in init/config of Friio.
- */
-static int
-gl861_i2c_write_ex(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- addr << (8 + 1), 0x0100, buf, wlen, 2000);
- kfree(buf);
- return ret;
-}
-
-static int
-gl861_i2c_read_ex(struct dvb_usb_device *d, u8 addr, u8 *rbuf, u16 rlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmalloc(rlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
- GL861_REQ_I2C_READ, GL861_READ,
- addr << (8 + 1), 0x0100, buf, rlen, 2000);
- if (ret > 0 && rlen > 0)
- memcpy(buf, rbuf, rlen);
- kfree(buf);
- return ret;
-}
-
-/* For I2C transactions to the tuner of Friio (dvb_pll).
- *
- * Friio uses irregular USB encapsulation for tuner i2c transactions:
- * write transacions are encapsulated with a different USB 'request' value.
- *
- * Although all transactions are sent via the demod(tc90522)
- * and the demod provides an i2c adapter for them, it cannot be used in Friio
- * since it assumes using the same parent adapter with the demod,
- * which does not use the request value and uses same one for both read/write.
- * So we define a dedicated i2c adapter here.
- */
-
-static int
-friio_i2c_tuner_read(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- struct friio_priv *priv;
- u8 addr;
-
- priv = d_to_priv(d);
- addr = priv->i2c_client_demod->addr;
- return gl861_i2c_read_ex(d, addr, msg->buf, msg->len);
-}
-
-static int
-friio_i2c_tuner_write(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- u8 *buf;
- int ret;
- struct friio_priv *priv;
-
- priv = d_to_priv(d);
-
- if (msg->len < 1)
- return -EINVAL;
-
- buf = kmalloc(msg->len + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- buf[0] = msg->addr << 1;
- memcpy(buf + 1, msg->buf, msg->len);
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- priv->i2c_client_demod->addr << (8 + 1),
- 0xFE, buf, msg->len + 1, 2000);
- kfree(buf);
- return ret;
-}
-
-static int friio_tuner_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
-{
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
-
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- int ret;
-
- if (msg[i].flags & I2C_M_RD)
- ret = friio_i2c_tuner_read(d, &msg[i]);
- else
- ret = friio_i2c_tuner_write(d, &msg[i]);
-
- if (ret < 0)
- break;
-
- usleep_range(1000, 2000); /* avoid I2C errors */
- }
-
- mutex_unlock(&d->i2c_mutex);
- return i;
-}
-
-static struct i2c_algorithm friio_tuner_i2c_algo = {
- .master_xfer = friio_tuner_i2c_xfer,
- .functionality = gl861_i2c_func,
-};
/* GPIO control in Friio */
@@ -377,9 +313,11 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
/* init/config of gl861 for Friio */
/* NOTE:
* This function cannot be moved to friio_init()/dvb_usbv2_init(),
- * because the init defined here must be done before any activities like I2C,
+ * because the init defined here includes a whole device reset,
+ * it must be run early before any activities like I2C,
* but friio_init() is called by dvb-usbv2 after {_frontend, _tuner}_attach(),
* where I2C communication is used.
+ * In addition, this reset is required in reset_resume() as well.
* Thus this function is set to be called from _power_ctl().
*
* Since it will be called on the early init stage
@@ -389,7 +327,7 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
static int friio_reset(struct dvb_usb_device *d)
{
int i, ret;
- u8 wbuf[2], rbuf[2];
+ u8 wbuf[1], rbuf[2];
static const u8 friio_init_cmds[][2] = {
{0x33, 0x08}, {0x37, 0x40}, {0x3a, 0x1f}, {0x3b, 0xff},
@@ -401,16 +339,12 @@ static int friio_reset(struct dvb_usb_device *d)
if (ret < 0)
return ret;
- wbuf[0] = 0x11;
- wbuf[1] = 0x02;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x02);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- wbuf[0] = 0x11;
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x00);
if (ret < 0)
return ret;
@@ -420,14 +354,13 @@ static int friio_reset(struct dvb_usb_device *d)
*/
usleep_range(1000, 2000);
- wbuf[0] = 0x03;
- wbuf[1] = 0x80;
- ret = gl861_i2c_write_ex(d, 0x09, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x09 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x09, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x09 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
@@ -435,38 +368,33 @@ static int friio_reset(struct dvb_usb_device *d)
usleep_range(1000, 2000);
- ret = gl861_i2c_write_ex(d, 0x48, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x48 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x48, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x48 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
return -ENODEV;
- wbuf[0] = 0x30;
- wbuf[1] = 0x04;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x30, 0x04);
if (ret < 0)
return ret;
- wbuf[0] = 0x00;
- wbuf[1] = 0x01;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x00, 0x01);
if (ret < 0)
return ret;
- wbuf[0] = 0x06;
- wbuf[1] = 0x0f;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x06, 0x0f);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(friio_init_cmds); i++) {
- ret = gl861_i2c_msg(d, 0x00, (u8 *)friio_init_cmds[i], 2,
- NULL, 0);
+ ret = gl861_short_write(d, 0x00, friio_init_cmds[i][0],
+ friio_init_cmds[i][1]);
if (ret < 0)
return ret;
}
@@ -488,9 +416,10 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d;
struct tc90522_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
info = &friio_config.demod_info;
+ cfg = friio_config.demod_cfg;
d = adap_to_d(adap);
cl = dvb_module_probe("tc90522", info->type,
&d->i2c_adap, info->addr, &cfg);
@@ -498,25 +427,17 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
adap->fe[0] = cfg.fe;
- /* ignore cfg.tuner_i2c and create new one */
priv = adap_to_priv(adap);
priv->i2c_client_demod = cl;
- priv->tuner_adap.algo = &friio_tuner_i2c_algo;
- priv->tuner_adap.dev.parent = &d->udev->dev;
- strscpy(priv->tuner_adap.name, d->name, sizeof(priv->tuner_adap.name));
- strlcat(priv->tuner_adap.name, "-tuner", sizeof(priv->tuner_adap.name));
- priv->demod_sub_i2c = &priv->tuner_adap;
- i2c_set_adapdata(&priv->tuner_adap, d);
-
- return i2c_add_adapter(&priv->tuner_adap);
+ priv->demod_sub_i2c = cfg.tuner_i2c;
+ return 0;
}
static int friio_frontend_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
- i2c_del_adapter(&priv->tuner_adap);
dvb_module_release(priv->i2c_client_demod);
return 0;
}
@@ -526,7 +447,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
const struct i2c_board_info *info;
struct dvb_pll_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
info = &friio_config.tuner_info;
@@ -543,7 +464,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
static int friio_tuner_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
dvb_module_release(priv->i2c_client_tuner);
@@ -554,7 +475,7 @@ static int friio_init(struct dvb_usb_device *d)
{
int i;
int ret;
- struct friio_priv *priv;
+ struct gl861 *priv;
static const u8 demod_init[][2] = {
{0x01, 0x40}, {0x04, 0x38}, {0x05, 0x40}, {0x07, 0x40},
@@ -606,7 +527,7 @@ static struct dvb_usb_device_properties friio_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
- .size_of_priv = sizeof(struct friio_priv),
+ .size_of_priv = sizeof(struct gl861),
.i2c_algo = &gl861_i2c_algo,
.power_ctrl = friio_power_ctrl,
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.h b/drivers/media/usb/dvb-usb-v2/gl861.h
deleted file mode 100644
index 02c00e10748a..000000000000
--- a/drivers/media/usb/dvb-usb-v2/gl861.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DVB_USB_GL861_H_
-#define _DVB_USB_GL861_H_
-
-#include "dvb_usb.h"
-
-#define GL861_WRITE 0x40
-#define GL861_READ 0xc0
-
-#define GL861_REQ_I2C_WRITE 0x01
-#define GL861_REQ_I2C_READ 0x02
-#define GL861_REQ_I2C_RAW 0x03
-
-#endif
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 1a36bda28542..5016ede7b35f 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1781,7 +1781,6 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
}
/* 'flush' ir_raw_event_store_with_filter() */
- ir_raw_event_set_idle(d->rc_dev, true);
ir_raw_event_handle(d->rc_dev);
exit:
return ret;
@@ -1804,6 +1803,8 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
rc->driver_type = RC_DRIVER_IR_RAW;
rc->query = rtl2832u_rc_query;
rc->interval = 200;
+ /* we program idle len to 0xc0, set timeout to one less */
+ rc->timeout = 0xbf * 50800;
return 0;
}
@@ -1957,7 +1958,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
- &rtl28xxu_props, "Astrometa DVB-T2", NULL) },
+ &rtl28xxu_props, "Astrometa DVB-T2",
+ RC_MAP_ASTROMETA_T2HYBRID) },
{ DVB_USB_DEVICE(0x5654, 0xca42,
&rtl28xxu_props, "GoTView MasterHD 3", NULL) },
{ }
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 02697d86e8c1..ac93e88d7038 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -976,8 +976,9 @@ static int af9005_identify_state(struct usb_device *udev,
else if (reply == 0x02)
*cold = 0;
else
- return -EIO;
- deb_info("Identify state cold = %d\n", *cold);
+ ret = -EIO;
+ if (!ret)
+ deb_info("Identify state cold = %d\n", *cold);
err:
kfree(buf);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index f02fa0a67aa4..fac19ec46089 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
{
u8 ircode[4];
- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
+ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
+ return 0;
if (ircode[2] || ircode[3])
rc_keydown(d->rc_dev, RC_PROTO_NEC,
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 49c9b70b632b..79dfbb25714b 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -31,7 +31,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 5983e72a0622..def9cdd931a9 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2487,6 +2487,24 @@ const struct em28xx_board em28xx_boards[] = {
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
},
+ /*
+ * 1b80:e349 Magix USB Videowandler-2
+ * (same chips as Honestech VIDBOX NW03)
+ * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner
+ */
+ [EM2861_BOARD_MAGIX_VIDEOWANDLER2] = {
+ .name = "Magix USB Videowandler-2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_SAA711X,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2696,6 +2714,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_PLEX_PX_BCUD },
{ USB_DEVICE(0xeb1a, 0x5051), /* Ion Video 2 PC MKII / Startech svid2usb23 / Raygo R12-41373 */
.driver_info = EM2860_BOARD_TVP5150_REFERENCE_DESIGN },
+ { USB_DEVICE(0x1b80, 0xe349), /* Magix USB Videowandler-2 */
+ .driver_info = EM2861_BOARD_MAGIX_VIDEOWANDLER2 },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a73faf12f7e4..0ab6c493bc74 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -471,13 +471,13 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32},
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
{EM2874_R80_GPIO_P0_CTRL, 0xaf, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x76},
@@ -493,7 +493,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -537,20 +537,20 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
static void terratec_h5_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_h5_init[] = {
+ static const struct em28xx_reg_seq terratec_h5_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_h5_end[] = {
+ static const struct em28xx_reg_seq terratec_h5_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -594,14 +594,14 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* 0xe6: unknown (does not affect DVB-T).
* 0xb6: unknown (does not affect DVB-T).
*/
- struct em28xx_reg_seq terratec_htc_stick_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_stick_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
{ -1, -1, -1, -1},
@@ -611,7 +611,7 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -642,14 +642,14 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
@@ -660,7 +660,7 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -704,7 +704,7 @@ static void pctv_520e_init(struct em28xx *dev)
* digital demodulator and tuner are routed via AVF4910B.
*/
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -800,7 +800,7 @@ static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
static void px_bcud_init(struct em28xx *dev)
{
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs1[] = {
@@ -818,7 +818,7 @@ static void px_bcud_init(struct em28xx *dev)
{{ 0x85, 0x7a }, 2},
{{ 0x87, 0x04 }, 2},
};
- static struct em28xx_reg_seq gpio[] = {
+ static const struct em28xx_reg_seq gpio[] = {
{EM28XX_R06_I2C_CLK, 0x40, 0xff, 300},
{EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 60},
{EM28XX_R15_RGAIN, 0x20, 0xff, 0},
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index a3155ec196cc..592b98b3643a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -949,7 +949,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
unsigned char buf;
int i, rc;
- memset(i2c_devicelist, 0, ARRAY_SIZE(i2c_devicelist));
+ memset(i2c_devicelist, 0, sizeof(i2c_devicelist));
for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
dev->i2c_client[bus].addr = i;
@@ -964,7 +964,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
if (bus == dev->def_i2c_bus)
dev->i2c_hash = em28xx_hash_mem(i2c_devicelist,
- ARRAY_SIZE(i2c_devicelist), 32);
+ sizeof(i2c_devicelist), 32);
}
/*
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index c8bc59059a19..4ecadd57dac7 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -149,6 +149,7 @@
#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100
#define EM2884_BOARD_TERRATEC_H6 101
#define EM2882_BOARD_ZOLID_HYBRID_TV_STICK 102
+#define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index 863c485f4275..97799cfb832e 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -378,6 +378,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index 3d7f6dcdd7a8..6ca947aef298 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -276,6 +276,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index f869eb6065ce..b23988d8c7bc 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -35,7 +35,7 @@ struct sd {
static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
int size)
{
- int ret = -1;
+ int ret;
u8 req_type = 0;
unsigned int pipe = 0;
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
index 7104a88b1e43..aac19d449be2 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
@@ -117,7 +117,7 @@ static int st6422_init(struct sd *sd)
{
int err = 0, i;
- const u16 st6422_bridge_init[][2] = {
+ static const u16 st6422_bridge_init[][2] = {
{ STV_ISO_ENABLE, 0x00 }, /* disable capture */
{ 0x1436, 0x00 },
{ 0x1432, 0x03 }, /* 0x00-0x1F brightness */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index a34717eba409..eaa08c7999d4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -898,8 +898,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
if (!list_empty(&vp->dev_video->devbase.fh_list) ||
- !list_empty(&vp->dev_radio->devbase.fh_list))
+ (vp->dev_radio &&
+ !list_empty(&vp->dev_radio->devbase.fh_list))) {
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
return;
+ }
pvr2_v4l2_destroy_no_lock(vp);
}
@@ -935,7 +939,8 @@ static int pvr2_v4l2_release(struct file *file)
kfree(fhp);
if (vp->channel.mc_head->disconnect_flag &&
list_empty(&vp->dev_video->devbase.fh_list) &&
- list_empty(&vp->dev_radio->devbase.fh_list)) {
+ (!vp->dev_radio ||
+ list_empty(&vp->dev_radio->devbase.fh_list))) {
pvr2_v4l2_destroy_no_lock(vp);
}
return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h
index d10424673db9..6a181f2e7ef2 100644
--- a/drivers/media/usb/tm6000/tm6000-regs.h
+++ b/drivers/media/usb/tm6000/tm6000-regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
index b275dbce3a1b..e3c6933f854d 100644
--- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h
+++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index bf396544da9a..c08c95312739 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 6f108996142d..e746c8ddfc49 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -378,8 +378,7 @@ int usbtv_audio_init(struct usbtv *usbtv)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usbtv_pcm_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), USBTV_AUDIO_BUFFER,
- USBTV_AUDIO_BUFFER);
+ NULL, USBTV_AUDIO_BUFFER, USBTV_AUDIO_BUFFER);
rv = snd_card_register(card);
if (rv)
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index cdc66adda755..93d36aab824f 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto unlock;
+ }
if (usbvision->user) {
err_code = -EBUSY;
} else {
@@ -377,6 +381,7 @@ unlock:
static int usbvision_v4l2_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "close");
@@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision);
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision);
return 0;
@@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv,
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ if (!usbvision->dev)
+ return -ENODEV;
+
strscpy(vc->driver, "USBVision", sizeof(vc->driver));
strscpy(vc->card,
usbvision_device_data[usbvision->dev_model].model_string,
@@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto out;
+ }
err_code = v4l2_fh_open(file);
if (err_code)
goto out;
@@ -1093,21 +1107,24 @@ out:
static int usbvision_radio_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "");
mutex_lock(&usbvision->v4l2_lock);
/* Set packet size to 0 */
usbvision->iface_alt = 0;
- usb_set_interface(usbvision->dev, usbvision->iface,
- usbvision->iface_alt);
+ if (usbvision->dev)
+ usb_set_interface(usbvision->dev, usbvision->iface,
+ usbvision->iface_alt);
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
v4l2_fh_release(file);
usbvision_release(usbvision);
@@ -1539,6 +1556,7 @@ err_usb:
static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
+ int u;
PDEBUG(DBG_PROBE, "");
@@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
v4l2_device_disconnect(&usbvision->v4l2_dev);
usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
+ u = usbvision->user;
usb_put_dev(usbvision->dev);
usbvision->dev = NULL; /* USB device is no more */
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->user) {
+ if (u) {
printk(KERN_INFO "%s: In use, disconnect pending\n",
__func__);
wake_up_interruptible(&usbvision->wait_frame);
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index d2b109959d82..2b8af4b54117 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -108,15 +108,7 @@ void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
void uvc_debugfs_init(void)
{
- struct dentry *dir;
-
- dir = debugfs_create_dir("uvcvideo", usb_debug_root);
- if (IS_ERR_OR_NULL(dir)) {
- uvc_printk(KERN_INFO, "Unable to create debugfs directory\n");
- return;
- }
-
- uvc_debugfs_root_dir = dir;
+ uvc_debugfs_root_dir = debugfs_create_dir("uvcvideo", usb_debug_root);
}
void uvc_debugfs_cleanup(void)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 66ee168ddc7e..428235ca2635 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->name) - len);
}
+ /* Initialize the media device. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &intf->dev;
+ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+ if (udev->serial)
+ strscpy(dev->mdev.serial, udev->serial,
+ sizeof(dev->mdev.serial));
+ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
+ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ media_device_init(&dev->mdev);
+
+ dev->vdev.mdev = &dev->mdev;
+#endif
+
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
@@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
- /* Initialize the media device and register the V4L2 device. */
-#ifdef CONFIG_MEDIA_CONTROLLER
- dev->mdev.dev = &intf->dev;
- strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
- if (udev->serial)
- strscpy(dev->mdev.serial, udev->serial,
- sizeof(dev->mdev.serial));
- usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- media_device_init(&dev->mdev);
-
- dev->vdev.mdev = &dev->mdev;
-#endif
+ /* Register the V4L2 device. */
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index 99bb71b47117..b6279ad7ac84 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -51,7 +51,7 @@ static int uvc_meta_v4l2_get_format(struct file *file, void *fh,
memset(fmt, 0, sizeof(*fmt));
fmt->dataformat = stream->meta.format;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
@@ -72,7 +72,7 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
fmt->dataformat = fmeta == dev->info->meta_format
? fmeta : V4L2_META_FMT_UVC;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index da72577c2998..cd60c6c1749e 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -79,7 +79,7 @@ static int uvc_queue_setup(struct vb2_queue *vq,
switch (vq->type) {
case V4L2_BUF_TYPE_META_CAPTURE:
- size = UVC_METATADA_BUF_SIZE;
+ size = UVC_METADATA_BUF_SIZE;
break;
default:
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index c7c1baa90dea..f773dc5d802c 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -491,7 +491,7 @@ struct uvc_stats_stream {
unsigned int max_sof; /* Maximum STC.SOF value */
};
-#define UVC_METATADA_BUF_SIZE 1024
+#define UVC_METADATA_BUF_SIZE 1024
/**
* struct uvc_copy_op: Context structure to schedule asynchronous memcpy
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 637962825d7a..57dbcc8083bf 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -20,7 +20,6 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/highmem.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -556,14 +555,12 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
{
unsigned char *pdest;
unsigned char *psrc;
- s32 idx = -1;
- struct zr364xx_framei *frm;
+ s32 idx = cam->cur_frame;
+ struct zr364xx_framei *frm = &cam->buffer.frame[idx];
int i = 0;
unsigned char *ptr = NULL;
_DBG("buffer to user\n");
- idx = cam->cur_frame;
- frm = &cam->buffer.frame[idx];
/* swap bytes if camera needs it */
if (cam->method == METHOD0) {
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 62f7aa92ac29..d0e5ebc736f9 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -236,77 +236,79 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{
static const struct v4l2_format_info formats[] = {
/* RGB formats */
- { .format = V4L2_PIX_FMT_BGR24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XBGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XRGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ARGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ABGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_GREY, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
- { .format = V4L2_PIX_FMT_YUYV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVYU, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_UYVY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_VYUY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats */
- { .format = V4L2_PIX_FMT_NV12, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV24, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV42, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_YUV410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YVU410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YUV411P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats, non contiguous variant */
- { .format = V4L2_PIX_FMT_YUV420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_NV12M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* Bayer RGB formats */
- { .format = V4L2_PIX_FMT_SBGGR8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
};
unsigned int i;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 1d8f38824631..2928c5e0a73d 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -29,6 +29,8 @@
#define call_op(master, op) \
(has_op(master, op) ? master->ops->op(master) : 0)
+static const union v4l2_ctrl_ptr ptr_null;
+
/* Internal temporary helper struct, one for each v4l2_ext_control */
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
@@ -566,6 +568,16 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Disabled at slice boundary",
"NULL",
};
+ static const char * const hevc_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const hevc_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -687,7 +699,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return hevc_tier;
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
return hevc_loop_filter_mode;
-
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ return hevc_decode_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ return hevc_start_code;
default:
return NULL;
}
@@ -957,6 +972,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -994,6 +1014,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
case V4L2_CID_PAN_SPEED: return "Pan, Speed";
case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+ case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
/* FM Radio Modulator controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1265,6 +1286,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
@@ -1375,6 +1398,19 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER:
*type = V4L2_CTRL_TYPE_VP8_FRAME_HEADER;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS:
+ *type = V4L2_CTRL_TYPE_HEVC_SPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS:
+ *type = V4L2_CTRL_TYPE_HEVC_PPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ break;
+ case V4L2_CID_UNIT_CELL_SIZE:
+ *type = V4L2_CTRL_TYPE_AREA;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1520,7 +1556,8 @@ static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
if (ctrl->is_int)
return ptr1.p_s32[idx] == ptr2.p_s32[idx];
idx *= ctrl->elem_size;
- return !memcmp(ptr1.p + idx, ptr2.p + idx, ctrl->elem_size);
+ return !memcmp(ptr1.p_const + idx, ptr2.p_const + idx,
+ ctrl->elem_size);
}
}
@@ -1530,7 +1567,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
void *p = ptr.p + idx * ctrl->elem_size;
- memset(p, 0, ctrl->elem_size);
+ if (ctrl->p_def.p_const)
+ memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
/*
* The cast is needed to get rid of a gcc warning complaining that
@@ -1672,7 +1712,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
{
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_area *area;
void *p = ptr.p + idx * ctrl->elem_size;
+ unsigned int i;
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
@@ -1748,6 +1793,76 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
zero_padding(p_vp8_frame_header->entropy_header);
zero_padding(p_vp8_frame_header->coder_state);
break;
+
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ p_hevc_sps = p;
+
+ if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
+ p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
+ p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
+ p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
+ }
+
+ if (!(p_hevc_sps->flags &
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
+ p_hevc_sps->num_long_term_ref_pics_sps = 0;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ p_hevc_pps = p;
+
+ if (!(p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
+ p_hevc_pps->diff_cu_qp_delta_depth = 0;
+
+ if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
+ p_hevc_pps->num_tile_columns_minus1 = 0;
+ p_hevc_pps->num_tile_rows_minus1 = 0;
+ memset(&p_hevc_pps->column_width_minus1, 0,
+ sizeof(p_hevc_pps->column_width_minus1));
+ memset(&p_hevc_pps->row_height_minus1, 0,
+ sizeof(p_hevc_pps->row_height_minus1));
+
+ p_hevc_pps->flags &=
+ ~V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED;
+ }
+
+ if (p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
+ p_hevc_pps->pps_beta_offset_div2 = 0;
+ p_hevc_pps->pps_tc_offset_div2 = 0;
+ }
+
+ zero_padding(*p_hevc_pps);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ p_hevc_slice_params = p;
+
+ if (p_hevc_slice_params->num_active_dpb_entries >
+ V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
+ return -EINVAL;
+
+ zero_padding(p_hevc_slice_params->pred_weight_table);
+
+ for (i = 0; i < p_hevc_slice_params->num_active_dpb_entries;
+ i++) {
+ struct v4l2_hevc_dpb_entry *dpb_entry =
+ &p_hevc_slice_params->dpb[i];
+
+ zero_padding(*dpb_entry);
+ }
+
+ zero_padding(*p_hevc_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_AREA:
+ area = p;
+ if (!area->width || !area->height)
+ return -EINVAL;
+ break;
+
default:
return -EINVAL;
}
@@ -1840,7 +1955,7 @@ static int ptr_to_user(struct v4l2_ext_control *c,
u32 len;
if (ctrl->is_ptr && !ctrl->is_string)
- return copy_to_user(c->ptr, ptr.p, c->size) ?
+ return copy_to_user(c->ptr, ptr.p_const, c->size) ?
-EFAULT : 0;
switch (ctrl->type) {
@@ -1955,7 +2070,7 @@ static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
{
if (ctrl == NULL)
return;
- memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size);
+ memcpy(to.p, from.p_const, ctrl->elems * ctrl->elem_size);
}
/* Copy the new value to the current value. */
@@ -2354,7 +2469,8 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, void *priv)
+ const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ void *priv)
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra;
@@ -2421,6 +2537,18 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
elem_size = sizeof(struct v4l2_ctrl_vp8_frame_header);
break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ elem_size = sizeof(struct v4l2_area);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2460,6 +2588,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
is_array)
sz_extra += 2 * tot_ctrl_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
+ sz_extra += elem_size;
+
ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
handler_set_err(hdl, -ENOMEM);
@@ -2503,6 +2634,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->p_new.p = &ctrl->val;
ctrl->p_cur.p = &ctrl->cur.val;
}
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
+ }
+
for (idx = 0; idx < elems; idx++) {
ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
@@ -2554,7 +2691,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
type, min, max,
is_menu ? cfg->menu_skip_mask : step, def,
cfg->dims, cfg->elem_size,
- flags, qmenu, qmenu_int, priv);
+ flags, qmenu, qmenu_int, cfg->p_def, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -2579,7 +2716,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, NULL);
+ flags, NULL, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -2612,7 +2749,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, qmenu_int, NULL);
+ flags, qmenu, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
@@ -2644,11 +2781,32 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, NULL, NULL);
+ flags, qmenu, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+/* Helper function for standard compound controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+ s64 min, max, step, def;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type < V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, p_def, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
+
/* Helper function for standard integer menu controls */
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
@@ -2669,7 +2827,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, 0, def, NULL, 0,
- flags, NULL, qmenu_int, NULL);
+ flags, NULL, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
@@ -3144,6 +3302,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
struct v4l2_ctrl_handler *prev_hdl = NULL;
struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ mutex_lock(main_hdl->lock);
if (list_empty(&main_hdl->requests_queued))
goto queue;
@@ -3175,18 +3334,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
queue:
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
list_del_init(&hdl->requests);
+ mutex_lock(main_hdl->lock);
if (hdl->request_is_queued) {
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
}
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_release(struct media_request_object *obj)
@@ -4080,6 +4243,18 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(ctrl->type != V4L2_CTRL_TYPE_AREA);
+ *ctrl->p_new.p_area = *area;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_area);
+
void v4l2_ctrl_request_complete(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
{
@@ -4128,9 +4303,11 @@ void v4l2_ctrl_request_complete(struct media_request *req,
v4l2_ctrl_unlock(ctrl);
}
+ mutex_lock(main_hdl->lock);
WARN_ON(!hdl->request_is_queued);
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
media_request_object_complete(obj);
media_request_object_put(obj);
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 4037689a945a..da42d172714a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -533,13 +533,23 @@ static int get_index(struct video_device *vdev)
*/
static void determine_valid_ioctls(struct video_device *vdev)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -571,8 +581,10 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_querymenu)
set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
- SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ if (!is_tch) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ }
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
@@ -586,40 +598,32 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
- if (is_vid || is_tch) {
- /* video and metadata specific ioctls */
+ if (is_vid) {
+ /* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_overlay ||
- ops->vidioc_enum_fmt_meta_cap)) ||
- (is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_meta_out)))
+ ops->vidioc_enum_fmt_vid_overlay)) ||
+ (is_tx && ops->vidioc_enum_fmt_vid_out))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
- ops->vidioc_g_fmt_vid_overlay ||
- ops->vidioc_g_fmt_meta_cap)) ||
+ ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
- ops->vidioc_g_fmt_vid_out_overlay ||
- ops->vidioc_g_fmt_meta_out)))
+ ops->vidioc_g_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
- ops->vidioc_s_fmt_vid_overlay ||
- ops->vidioc_s_fmt_meta_cap)) ||
+ ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
- ops->vidioc_s_fmt_vid_out_overlay ||
- ops->vidioc_s_fmt_meta_out)))
+ ops->vidioc_s_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
- ops->vidioc_try_fmt_vid_overlay ||
- ops->vidioc_try_fmt_meta_cap)) ||
+ ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
- ops->vidioc_try_fmt_vid_out_overlay ||
- ops->vidioc_try_fmt_meta_out)))
+ ops->vidioc_try_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
@@ -641,7 +645,21 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- } else if (is_vbi) {
+ }
+ if (is_meta && is_rx) {
+ /* metadata capture specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
+ } else if (is_meta && is_tx) {
+ /* metadata output specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
+ }
+ if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
@@ -659,30 +677,35 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_sliced_vbi_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_tch) {
+ /* touch specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
} else if (is_sdr && is_rx) {
/* SDR receiver specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap);
} else if (is_sdr && is_tx) {
/* SDR transmitter specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out);
}
- if (is_vid || is_vbi || is_sdr || is_tch) {
- /* ioctls valid for video, metadata, vbi or sdr */
+ if (is_vid || is_vbi || is_sdr || is_tch || is_meta) {
+ /* ioctls valid for video, vbi, sdr, touch and metadata */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
@@ -694,8 +717,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
}
- if (is_vid || is_vbi || is_tch) {
- /* ioctls valid for video or vbi */
+ if (is_vid || is_vbi || is_meta) {
+ /* ioctls valid for video, vbi and metadata */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
@@ -719,8 +742,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
- if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
- ops->vidioc_g_std))
+ if (ops->vidioc_g_parm || ops->vidioc_g_std)
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
@@ -734,7 +756,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
- if (is_rx) {
+ if (is_rx && !is_tch) {
/* receiver only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4f23e939ead0..230d65a64217 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -293,7 +293,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
if (prefix == NULL)
prefix = "";
- pr_info("%s: %s%ux%u%s%u.%u (%ux%u)\n", dev_prefix, prefix,
+ pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix,
bt->width, bt->height, bt->interlaced ? "i" : "p",
fps / 100, fps % 100, htot, vtot);
@@ -757,7 +757,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
hsync = (frame_width * 8 + 50) / 100;
- hsync = ((hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN) * GTF_CELL_GRAN;
+ hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 3bd1888787eb..192cac076761 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -512,6 +512,7 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
return;
kfree(vep->link_frequencies);
+ vep->link_frequencies = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 51b912743f0f..4e700583659b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -932,12 +932,22 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
static int check_fmt(struct file *file, enum v4l2_buf_type type)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -996,11 +1006,11 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
return 0;
break;
case V4L2_BUF_TYPE_META_CAPTURE:
- if (is_vid && is_rx && ops->vidioc_g_fmt_meta_cap)
+ if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
return 0;
break;
case V4L2_BUF_TYPE_META_OUTPUT:
- if (is_vid && is_tx && ops->vidioc_g_fmt_meta_out)
+ if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
return 0;
break;
default:
@@ -1330,6 +1340,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
+ case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
default:
/* Compressed formats */
@@ -1356,6 +1367,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
@@ -1466,10 +1478,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
return ret;
}
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
if (ret)
@@ -1507,6 +1535,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
@@ -1544,21 +1574,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static void v4l_pix_format_touch(struct v4l2_pix_format *p)
-{
- /*
- * The v4l2_pix_format structure contains fields that make no sense for
- * touch. Set them to default values in this case.
- */
-
- p->field = V4L2_FIELD_NONE;
- p->colorspace = V4L2_COLORSPACE_RAW;
- p->flags = 0;
- p->ycbcr_enc = 0;
- p->quantization = 0;
- p->xfer_func = 0;
-}
-
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -1602,12 +1617,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
@@ -1633,22 +1648,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
@@ -1704,12 +1719,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
@@ -1735,22 +1750,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
@@ -2637,7 +2652,7 @@ struct v4l2_ioctl_info {
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
- sizeof(((struct v4l2_struct *)0)->field)) << 16)
+ FIELD_SIZEOF(struct v4l2_struct, field)) << 16)
#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 19937dd3c6f6..1afd9c6ad908 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -284,7 +284,8 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags_job, flags_out, flags_cap;
+ unsigned long flags_job;
+ struct vb2_v4l2_buffer *dst, *src;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -307,20 +308,37 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
- && !m2m_ctx->out_q_ctx.buffered) {
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
- goto out_unlock;
+ goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
- && !m2m_ctx->cap_q_ctx.buffered) {
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
- goto cap_unlock;
+ goto job_unlock;
}
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+
+ m2m_ctx->new_frame = true;
+
+ if (src && dst && dst->is_held &&
+ dst->vb2_buf.copied_timestamp &&
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
+ dst->is_held = false;
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available after returning held buffer\n");
+ goto job_unlock;
+ }
+ }
+
+ if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -331,13 +349,6 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
- return;
-
-cap_unlock:
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
-out_unlock:
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
@@ -412,37 +423,97 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
}
}
-void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
- struct v4l2_m2m_ctx *m2m_ctx)
+/*
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags;
+ /*
+ * This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes.
+ */
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ /*
+ * We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
+}
+
+/*
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Called by an instance not currently running\n");
- return;
+ return false;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
+ return true;
+}
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
-
- /* This instance might have more buffers ready, but since we do not
- * allow more than one job on the job_queue per instance, each has
- * to be scheduled separately after the previous one finishes. */
- __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+ bool schedule_next;
- /* We might be running in atomic context,
- * but the job must be run in non-atomic context.
+ /*
+ * This function should not be used for drivers that support
+ * holding capture buffers. Those should use
+ * v4l2_m2m_buf_done_and_job_finish() instead.
*/
- schedule_work(&m2m_dev->job_work);
+ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ bool schedule_next = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (WARN_ON(!src_buf || !dst_buf))
+ goto unlock;
+ v4l2_m2m_buf_done(src_buf, state);
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ if (!dst_buf->is_held) {
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, state);
+ }
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
@@ -1154,6 +1225,59 @@ int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_FLUSH)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct vb2_v4l2_buffer *out_vb, *cap_vb;
+ struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
+ unsigned long flags;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
+ cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
+
+ /*
+ * If there is an out buffer pending, then clear any HOLD flag.
+ *
+ * By clearing this flag we ensure that when this output
+ * buffer is processed any held capture buffer will be released.
+ */
+ if (out_vb) {
+ out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ } else if (cap_vb && cap_vb->is_held) {
+ /*
+ * If there were no output buffers, but there is a
+ * capture buffer that is held, then release that
+ * buffer.
+ */
+ cap_vb->is_held = false;
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
+
/*
* v4l2_file_operations helpers. It is assumed here same lock is used
* for the output and the capture buffer queue.
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f725cd9b66b9..9e987c0f840e 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -112,7 +112,7 @@ static int subdev_close(struct file *file)
return 0;
}
-static inline int check_which(__u32 which)
+static inline int check_which(u32 which)
{
if (which != V4L2_SUBDEV_FORMAT_TRY &&
which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -121,7 +121,7 @@ static inline int check_which(__u32 which)
return 0;
}
-static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
+static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
if (sd->entity.num_pads) {
@@ -136,7 +136,7 @@ static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
return 0;
}
-static int check_cfg(__u32 which, struct v4l2_subdev_pad_config *cfg)
+static int check_cfg(u32 which, struct v4l2_subdev_pad_config *cfg)
{
if (which == V4L2_SUBDEV_FORMAT_TRY && !cfg)
return -EINVAL;
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 516f454fde14..08192fd70eb4 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -6,16 +6,16 @@
comment "MemoryStick drivers"
config MEMSTICK_UNSAFE_RESUME
- bool "Allow unsafe resume (DANGEROUS)"
- help
- If you say Y here, the MemoryStick layer will assume that all
- cards stayed in their respective slots during the suspend. The
- normal behaviour is to remove them at suspend and
- redetecting them at resume. Breaking this assumption will
- in most cases result in data corruption.
+ bool "Allow unsafe resume (DANGEROUS)"
+ help
+ If you say Y here, the MemoryStick layer will assume that all
+ cards stayed in their respective slots during the suspend. The
+ normal behaviour is to remove them at suspend and
+ redetecting them at resume. Breaking this assumption will
+ in most cases result in data corruption.
- This option is usually just for embedded systems which use
- a MemoryStick card for rootfs. Most people should say N here.
+ This option is usually just for embedded systems which use
+ a MemoryStick card for rootfs. Most people should say N here.
config MSPRO_BLOCK
tristate "MemoryStick Pro block device driver"
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 446c93ecef8f..4113343da056 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -18,7 +18,7 @@ config MEMSTICK_TIFM_MS
'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
(TIFM_7XX1)'.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called tifm_ms.
config MEMSTICK_JMICRON_38X
@@ -29,7 +29,7 @@ config MEMSTICK_JMICRON_38X
Say Y here if you want to be able to access MemoryStick cards with
the JMicron(R) JMB38X MemoryStick card reader.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called jmb38x_ms.
config MEMSTICK_R592
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 64fff6abe60e..b36142df0295 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -433,13 +433,13 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
writel(((1 << 16) & BLOCK_COUNT_MASK)
| (data_len & BLOCK_SIZE_MASK),
host->addr + BLOCK);
- t_val = readl(host->addr + INT_STATUS_ENABLE);
- t_val |= host->req->data_dir == READ
- ? INT_STATUS_FIFO_RRDY
- : INT_STATUS_FIFO_WRDY;
+ t_val = readl(host->addr + INT_STATUS_ENABLE);
+ t_val |= host->req->data_dir == READ
+ ? INT_STATUS_FIFO_RRDY
+ : INT_STATUS_FIFO_WRDY;
- writel(t_val, host->addr + INT_STATUS_ENABLE);
- writel(t_val, host->addr + INT_SIGNAL_ENABLE);
+ writel(t_val, host->addr + INT_STATUS_ENABLE);
+ writel(t_val, host->addr + INT_SIGNAL_ENABLE);
} else {
cmd &= ~(TPC_DATA_SEL | 0xf);
host->cmd_flags |= REG_DATA;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ae24d3ea68ea..420900852166 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1210,13 +1210,6 @@ config AB8500_DEBUG
Select this option if you want debug information using the debug
filesystem, debugfs.
-config AB8500_GPADC
- bool "ST-Ericsson AB8500 GPADC driver"
- depends on AB8500_CORE && REGULATOR_AB8500
- default y
- help
- AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
-
config MFD_DB8500_PRCMU
bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
depends on UX500_SOC_DB8500
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c1067ea46204..aed99f08739f 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -177,7 +177,6 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
-obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
# ab8500-core need to come after db8500-prcmu (which provides the channel)
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index f4e26b6e5362..1a9a3414d4fa 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -84,7 +84,6 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/string.h>
@@ -103,11 +102,6 @@ static int num_irqs;
static struct device_attribute **dev_attr;
static char **event_name;
-static u8 avg_sample = SAMPLE_16;
-static u8 trig_edge = RISING_EDGE;
-static u8 conv_type = ADC_SW;
-static u8 trig_timer;
-
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -152,7 +146,6 @@ static struct hwreg_cfg hwreg_cfg = {
};
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_ADC_NAME_STRING "gpadc"
#define AB8500_NUM_BANKS AB8500_DEBUG_FIELD_LAST
#define AB8500_REV_REG 0x80
@@ -1646,633 +1639,6 @@ report_write_failure:
DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
-{
- int bat_ctrl_raw;
- int bat_ctrl_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL,
- avg_sample, trig_edge, trig_timer, conv_type);
- bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- BAT_CTRL, bat_ctrl_raw);
-
- seq_printf(s, "%d,0x%X\n", bat_ctrl_convert, bat_ctrl_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-
-static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
-{
- int btemp_ball_raw;
- int btemp_ball_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL,
- avg_sample, trig_edge, trig_timer, conv_type);
- btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
- btemp_ball_raw);
-
- seq_printf(s, "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-
-static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
-{
- int main_charger_v_raw;
- int main_charger_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- MAIN_CHARGER_V, main_charger_v_raw);
-
- seq_printf(s, "%d,0x%X\n", main_charger_v_convert, main_charger_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-
-static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
-{
- int acc_detect1_raw;
- int acc_detect1_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1,
- avg_sample, trig_edge, trig_timer, conv_type);
- acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
- acc_detect1_raw);
-
- seq_printf(s, "%d,0x%X\n", acc_detect1_convert, acc_detect1_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-
-static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
-{
- int acc_detect2_raw;
- int acc_detect2_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2,
- avg_sample, trig_edge, trig_timer, conv_type);
- acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- ACC_DETECT2, acc_detect2_raw);
-
- seq_printf(s, "%d,0x%X\n", acc_detect2_convert, acc_detect2_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-
-static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
-{
- int aux1_raw;
- int aux1_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1,
- avg_sample, trig_edge, trig_timer, conv_type);
- aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
- aux1_raw);
-
- seq_printf(s, "%d,0x%X\n", aux1_convert, aux1_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-
-static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
-{
- int aux2_raw;
- int aux2_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2,
- avg_sample, trig_edge, trig_timer, conv_type);
- aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
- aux2_raw);
-
- seq_printf(s, "%d,0x%X\n", aux2_convert, aux2_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-
-static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
-{
- int main_bat_v_raw;
- int main_bat_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
- main_bat_v_raw);
-
- seq_printf(s, "%d,0x%X\n", main_bat_v_convert, main_bat_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-
-static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
-{
- int vbus_v_raw;
- int vbus_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
- vbus_v_raw);
-
- seq_printf(s, "%d,0x%X\n", vbus_v_convert, vbus_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-
-static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
-{
- int main_charger_c_raw;
- int main_charger_c_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- MAIN_CHARGER_C, main_charger_c_raw);
-
- seq_printf(s, "%d,0x%X\n", main_charger_c_convert, main_charger_c_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-
-static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
-{
- int usb_charger_c_raw;
- int usb_charger_c_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C,
- avg_sample, trig_edge, trig_timer, conv_type);
- usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- USB_CHARGER_C, usb_charger_c_raw);
-
- seq_printf(s, "%d,0x%X\n", usb_charger_c_convert, usb_charger_c_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-
-static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
-{
- int bk_bat_v_raw;
- int bk_bat_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- BK_BAT_V, bk_bat_v_raw);
-
- seq_printf(s, "%d,0x%X\n", bk_bat_v_convert, bk_bat_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-
-static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
-{
- int die_temp_raw;
- int die_temp_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP,
- avg_sample, trig_edge, trig_timer, conv_type);
- die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
- die_temp_raw);
-
- seq_printf(s, "%d,0x%X\n", die_temp_convert, die_temp_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-
-static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
-{
- int usb_id_raw;
- int usb_id_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- usb_id_raw = ab8500_gpadc_read_raw(gpadc, USB_ID,
- avg_sample, trig_edge, trig_timer, conv_type);
- usb_id_convert = ab8500_gpadc_ad_to_voltage(gpadc, USB_ID,
- usb_id_raw);
-
- seq_printf(s, "%d,0x%X\n", usb_id_convert, usb_id_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-
-static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
-{
- int xtal_temp_raw;
- int xtal_temp_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- xtal_temp_raw = ab8500_gpadc_read_raw(gpadc, XTAL_TEMP,
- avg_sample, trig_edge, trig_timer, conv_type);
- xtal_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, XTAL_TEMP,
- xtal_temp_raw);
-
- seq_printf(s, "%d,0x%X\n", xtal_temp_convert, xtal_temp_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-
-static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
-{
- int vbat_true_meas_raw;
- int vbat_true_meas_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_true_meas_raw = ab8500_gpadc_read_raw(gpadc, VBAT_TRUE_MEAS,
- avg_sample, trig_edge, trig_timer, conv_type);
- vbat_true_meas_convert =
- ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS,
- vbat_true_meas_raw);
-
- seq_printf(s, "%d,0x%X\n", vbat_true_meas_convert, vbat_true_meas_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
-{
- int bat_ctrl_raw;
- int bat_ctrl_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_ctrl_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_CTRL_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
-
- bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc, BAT_CTRL,
- bat_ctrl_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- bat_ctrl_convert, bat_ctrl_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-
-static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
-{
- int vbat_meas_raw;
- int vbat_meas_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_meas_raw = ab8500_gpadc_double_read_raw(gpadc, VBAT_MEAS_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
- vbat_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
- vbat_meas_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- vbat_meas_convert, vbat_meas_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
-{
- int vbat_true_meas_raw;
- int vbat_true_meas_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_true_meas_raw = ab8500_gpadc_double_read_raw(gpadc,
- VBAT_TRUE_MEAS_AND_IBAT, avg_sample, trig_edge,
- trig_timer, conv_type, &ibat_raw);
- vbat_true_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- VBAT_TRUE_MEAS, vbat_true_meas_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- vbat_true_meas_convert, vbat_true_meas_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-
-static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
-{
- int bat_temp_raw;
- int bat_temp_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_temp_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_TEMP_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
- bat_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
- bat_temp_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- bat_temp_convert, bat_temp_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-
-static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
-{
- struct ab8500_gpadc *gpadc;
- u16 vmain_l, vmain_h, btemp_l, btemp_h;
- u16 vbat_l, vbat_h, ibat_l, ibat_h;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- ab8540_gpadc_get_otp(gpadc, &vmain_l, &vmain_h, &btemp_l, &btemp_h,
- &vbat_l, &vbat_h, &ibat_l, &ibat_h);
- seq_printf(s,
- "VMAIN_L:0x%X\n"
- "VMAIN_H:0x%X\n"
- "BTEMP_L:0x%X\n"
- "BTEMP_H:0x%X\n"
- "VBAT_L:0x%X\n"
- "VBAT_H:0x%X\n"
- "IBAT_L:0x%X\n"
- "IBAT_H:0x%X\n",
- vmain_l, vmain_h, btemp_l, btemp_h,
- vbat_l, vbat_h, ibat_l, ibat_h);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
-
-static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", avg_sample);
-
- return 0;
-}
-
-static int ab8500_gpadc_avg_sample_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_avg_sample_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_avg_sample_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_avg_sample;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_avg_sample);
- if (err)
- return err;
-
- if ((user_avg_sample == SAMPLE_1) || (user_avg_sample == SAMPLE_4)
- || (user_avg_sample == SAMPLE_8)
- || (user_avg_sample == SAMPLE_16)) {
- avg_sample = (u8) user_avg_sample;
- } else {
- dev_err(dev,
- "debugfs err input: should be egal to 1, 4, 8 or 16\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_avg_sample_fops = {
- .open = ab8500_gpadc_avg_sample_open,
- .read = seq_read,
- .write = ab8500_gpadc_avg_sample_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_trig_edge_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", trig_edge);
-
- return 0;
-}
-
-static int ab8500_gpadc_trig_edge_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_trig_edge_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_trig_edge_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_trig_edge;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_trig_edge);
- if (err)
- return err;
-
- if ((user_trig_edge == RISING_EDGE)
- || (user_trig_edge == FALLING_EDGE)) {
- trig_edge = (u8) user_trig_edge;
- } else {
- dev_err(dev, "Wrong input:\n"
- "Enter 0. Rising edge\n"
- "Enter 1. Falling edge\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_trig_edge_fops = {
- .open = ab8500_gpadc_trig_edge_open,
- .read = seq_read,
- .write = ab8500_gpadc_trig_edge_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_trig_timer_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", trig_timer);
-
- return 0;
-}
-
-static int ab8500_gpadc_trig_timer_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_trig_timer_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_trig_timer_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_trig_timer;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_trig_timer);
- if (err)
- return err;
-
- if (user_trig_timer & ~0xFF) {
- dev_err(dev,
- "debugfs error input: should be between 0 to 255\n");
- return -EINVAL;
- }
-
- trig_timer = (u8) user_trig_timer;
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_trig_timer_fops = {
- .open = ab8500_gpadc_trig_timer_open,
- .read = seq_read,
- .write = ab8500_gpadc_trig_timer_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_conv_type_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", conv_type);
-
- return 0;
-}
-
-static int ab8500_gpadc_conv_type_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_conv_type_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_conv_type_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_conv_type;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_conv_type);
- if (err)
- return err;
-
- if ((user_conv_type == ADC_SW)
- || (user_conv_type == ADC_HW)) {
- conv_type = (u8) user_conv_type;
- } else {
- dev_err(dev, "Wrong input:\n"
- "Enter 0. ADC SW conversion\n"
- "Enter 1. ADC HW conversion\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_conv_type_fops = {
- .open = ab8500_gpadc_conv_type_open,
- .read = seq_read,
- .write = ab8500_gpadc_conv_type_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
/*
* return length of an ASCII numerical value, 0 is string is not a
* numerical value.
@@ -2647,7 +2013,6 @@ static const struct file_operations ab8500_hwreg_fops = {
static int ab8500_debug_probe(struct platform_device *plf)
{
struct dentry *ab8500_dir;
- struct dentry *ab8500_gpadc_dir;
struct ab8500 *ab8500;
struct resource *res;
@@ -2689,9 +2054,6 @@ static int ab8500_debug_probe(struct platform_device *plf)
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
- ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
- ab8500_dir);
-
debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
&plf->dev, &ab8500_bank_registers_fops);
debugfs_create_file("all-banks", S_IRUGO, ab8500_dir,
@@ -2727,83 +2089,6 @@ static int ab8500_debug_probe(struct platform_device *plf)
&plf->dev, &ab8500_hwreg_fops);
debugfs_create_file("all-modem-registers", (S_IRUGO | S_IWUSR | S_IWGRP),
ab8500_dir, &plf->dev, &ab8500_modem_fops);
- debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bat_ctrl_fops);
- debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_btemp_ball_fops);
- debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_v_fops);
- debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect1_fops);
- debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect2_fops);
- debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux1_fops);
- debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux2_fops);
- debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_bat_v_fops);
- debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_vbus_v_fops);
- debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_c_fops);
- debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_usb_charger_c_fops);
- debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bk_bat_v_fops);
- debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_die_temp_fops);
- debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_usb_id_fops);
- if (is_ab8540(ab8500)) {
- debugfs_create_file("xtal_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_xtal_temp_fops);
- debugfs_create_file("vbattruemeas", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_fops);
- debugfs_create_file("batctrl_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_bat_ctrl_and_ibat_fops);
- debugfs_create_file("vbatmeas_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_meas_and_ibat_fops);
- debugfs_create_file("vbattruemeas_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_and_ibat_fops);
- debugfs_create_file("battemp_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_bat_temp_and_ibat_fops);
- debugfs_create_file("otp_calib", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_otp_calib_fops);
- }
- debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_avg_sample_fops);
- debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_edge_fops);
- debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_timer_fops);
- debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_conv_type_fops);
return 0;
}
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
deleted file mode 100644
index 005f9ee34cd1..000000000000
--- a/drivers/mfd/ab8500-gpadc.c
+++ /dev/null
@@ -1,1075 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Arun R Murthy <arun.murthy@stericsson.com>
- * Author: Daniel Willerud <daniel.willerud@stericsson.com>
- * Author: Johan Palsson <johan.palsson@stericsson.com>
- * Author: M'boumba Cedric Madianga
- */
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
-
-/*
- * GPADC register offsets
- * Bank : 0x0A
- */
-#define AB8500_GPADC_CTRL1_REG 0x00
-#define AB8500_GPADC_CTRL2_REG 0x01
-#define AB8500_GPADC_CTRL3_REG 0x02
-#define AB8500_GPADC_AUTO_TIMER_REG 0x03
-#define AB8500_GPADC_STAT_REG 0x04
-#define AB8500_GPADC_MANDATAL_REG 0x05
-#define AB8500_GPADC_MANDATAH_REG 0x06
-#define AB8500_GPADC_AUTODATAL_REG 0x07
-#define AB8500_GPADC_AUTODATAH_REG 0x08
-#define AB8500_GPADC_MUX_CTRL_REG 0x09
-#define AB8540_GPADC_MANDATA2L_REG 0x09
-#define AB8540_GPADC_MANDATA2H_REG 0x0A
-#define AB8540_GPADC_APEAAX_REG 0x10
-#define AB8540_GPADC_APEAAT_REG 0x11
-#define AB8540_GPADC_APEAAM_REG 0x12
-#define AB8540_GPADC_APEAAH_REG 0x13
-#define AB8540_GPADC_APEAAL_REG 0x14
-
-/*
- * OTP register offsets
- * Bank : 0x15
- */
-#define AB8500_GPADC_CAL_1 0x0F
-#define AB8500_GPADC_CAL_2 0x10
-#define AB8500_GPADC_CAL_3 0x11
-#define AB8500_GPADC_CAL_4 0x12
-#define AB8500_GPADC_CAL_5 0x13
-#define AB8500_GPADC_CAL_6 0x14
-#define AB8500_GPADC_CAL_7 0x15
-/* New calibration for 8540 */
-#define AB8540_GPADC_OTP4_REG_7 0x38
-#define AB8540_GPADC_OTP4_REG_6 0x39
-#define AB8540_GPADC_OTP4_REG_5 0x3A
-
-/* gpadc constants */
-#define EN_VINTCORE12 0x04
-#define EN_VTVOUT 0x02
-#define EN_GPADC 0x01
-#define DIS_GPADC 0x00
-#define AVG_1 0x00
-#define AVG_4 0x20
-#define AVG_8 0x40
-#define AVG_16 0x60
-#define ADC_SW_CONV 0x04
-#define EN_ICHAR 0x80
-#define BTEMP_PULL_UP 0x08
-#define EN_BUF 0x40
-#define DIS_ZERO 0x00
-#define GPADC_BUSY 0x01
-#define EN_FALLING 0x10
-#define EN_TRIG_EDGE 0x02
-#define EN_VBIAS_XTAL_TEMP 0x02
-
-/* GPADC constants from AB8500 spec, UM0836 */
-#define ADC_RESOLUTION 1024
-#define ADC_CH_BTEMP_MIN 0
-#define ADC_CH_BTEMP_MAX 1350
-#define ADC_CH_DIETEMP_MIN 0
-#define ADC_CH_DIETEMP_MAX 1350
-#define ADC_CH_CHG_V_MIN 0
-#define ADC_CH_CHG_V_MAX 20030
-#define ADC_CH_ACCDET2_MIN 0
-#define ADC_CH_ACCDET2_MAX 2500
-#define ADC_CH_VBAT_MIN 2300
-#define ADC_CH_VBAT_MAX 4800
-#define ADC_CH_CHG_I_MIN 0
-#define ADC_CH_CHG_I_MAX 1500
-#define ADC_CH_BKBAT_MIN 0
-#define ADC_CH_BKBAT_MAX 3200
-
-/* GPADC constants from AB8540 spec */
-#define ADC_CH_IBAT_MIN (-6000) /* mA range measured by ADC for ibat */
-#define ADC_CH_IBAT_MAX 6000
-#define ADC_CH_IBAT_MIN_V (-60) /* mV range measured by ADC for ibat */
-#define ADC_CH_IBAT_MAX_V 60
-#define IBAT_VDROP_L (-56) /* mV */
-#define IBAT_VDROP_H 56
-
-/* This is used to not lose precision when dividing to get gain and offset */
-#define CALIB_SCALE 1000
-/*
- * Number of bits shift used to not lose precision
- * when dividing to get ibat gain.
- */
-#define CALIB_SHIFT_IBAT 20
-
-/* Time in ms before disabling regulator */
-#define GPADC_AUDOSUSPEND_DELAY 1
-
-#define CONVERSION_TIME 500 /* ms */
-
-enum cal_channels {
- ADC_INPUT_VMAIN = 0,
- ADC_INPUT_BTEMP,
- ADC_INPUT_VBAT,
- ADC_INPUT_IBAT,
- NBR_CAL_INPUTS,
-};
-
-/**
- * struct adc_cal_data - Table for storing gain and offset for the calibrated
- * ADC channels
- * @gain: Gain of the ADC channel
- * @offset: Offset of the ADC channel
- */
-struct adc_cal_data {
- s64 gain;
- s64 offset;
- u16 otp_calib_hi;
- u16 otp_calib_lo;
-};
-
-/**
- * struct ab8500_gpadc - AB8500 GPADC device information
- * @dev: pointer to the struct device
- * @node: a list of AB8500 GPADCs, hence prepared for
- reentrance
- * @parent: pointer to the struct ab8500
- * @ab8500_gpadc_complete: pointer to the struct completion, to indicate
- * the completion of gpadc conversion
- * @ab8500_gpadc_lock: structure of type mutex
- * @regu: pointer to the struct regulator
- * @irq_sw: interrupt number that is used by gpadc for Sw
- * conversion
- * @irq_hw: interrupt number that is used by gpadc for Hw
- * conversion
- * @cal_data array of ADC calibration data structs
- */
-struct ab8500_gpadc {
- struct device *dev;
- struct list_head node;
- struct ab8500 *parent;
- struct completion ab8500_gpadc_complete;
- struct mutex ab8500_gpadc_lock;
- struct regulator *regu;
- int irq_sw;
- int irq_hw;
- struct adc_cal_data cal_data[NBR_CAL_INPUTS];
-};
-
-static LIST_HEAD(ab8500_gpadc_list);
-
-/**
- * ab8500_gpadc_get() - returns a reference to the primary AB8500 GPADC
- * (i.e. the first GPADC in the instance list)
- */
-struct ab8500_gpadc *ab8500_gpadc_get(char *name)
-{
- struct ab8500_gpadc *gpadc;
-
- list_for_each_entry(gpadc, &ab8500_gpadc_list, node) {
- if (!strcmp(name, dev_name(gpadc->dev)))
- return gpadc;
- }
-
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(ab8500_gpadc_get);
-
-/**
- * ab8500_gpadc_ad_to_voltage() - Convert a raw ADC value to a voltage
- */
-int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 channel,
- int ad_value)
-{
- int res;
-
- switch (channel) {
- case MAIN_CHARGER_V:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_VMAIN].gain) {
- res = ADC_CH_CHG_V_MIN + (ADC_CH_CHG_V_MAX -
- ADC_CH_CHG_V_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VMAIN].gain +
- gpadc->cal_data[ADC_INPUT_VMAIN].offset) / CALIB_SCALE;
- break;
-
- case XTAL_TEMP:
- case BAT_CTRL:
- case BTEMP_BALL:
- case ACC_DETECT1:
- case ADC_AUX1:
- case ADC_AUX2:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_BTEMP].gain) {
- res = ADC_CH_BTEMP_MIN + (ADC_CH_BTEMP_MAX -
- ADC_CH_BTEMP_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_BTEMP].gain +
- gpadc->cal_data[ADC_INPUT_BTEMP].offset) / CALIB_SCALE;
- break;
-
- case MAIN_BAT_V:
- case VBAT_TRUE_MEAS:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_VBAT].gain) {
- res = ADC_CH_VBAT_MIN + (ADC_CH_VBAT_MAX -
- ADC_CH_VBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VBAT].gain +
- gpadc->cal_data[ADC_INPUT_VBAT].offset) / CALIB_SCALE;
- break;
-
- case DIE_TEMP:
- res = ADC_CH_DIETEMP_MIN +
- (ADC_CH_DIETEMP_MAX - ADC_CH_DIETEMP_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case ACC_DETECT2:
- res = ADC_CH_ACCDET2_MIN +
- (ADC_CH_ACCDET2_MAX - ADC_CH_ACCDET2_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case VBUS_V:
- res = ADC_CH_CHG_V_MIN +
- (ADC_CH_CHG_V_MAX - ADC_CH_CHG_V_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case MAIN_CHARGER_C:
- case USB_CHARGER_C:
- res = ADC_CH_CHG_I_MIN +
- (ADC_CH_CHG_I_MAX - ADC_CH_CHG_I_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case BK_BAT_V:
- res = ADC_CH_BKBAT_MIN +
- (ADC_CH_BKBAT_MAX - ADC_CH_BKBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case IBAT_VIRTUAL_CHANNEL:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_IBAT].gain) {
- res = ADC_CH_IBAT_MIN + (ADC_CH_IBAT_MAX -
- ADC_CH_IBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_IBAT].gain +
- gpadc->cal_data[ADC_INPUT_IBAT].offset)
- >> CALIB_SHIFT_IBAT;
- break;
-
- default:
- dev_err(gpadc->dev,
- "unknown channel, not possible to convert\n");
- res = -EINVAL;
- break;
-
- }
- return res;
-}
-EXPORT_SYMBOL(ab8500_gpadc_ad_to_voltage);
-
-/**
- * ab8500_gpadc_sw_hw_convert() - gpadc conversion
- * @channel: analog channel to be converted to digital data
- * @avg_sample: number of ADC sample to average
- * @trig_egde: selected ADC trig edge
- * @trig_timer: selected ADC trigger delay timer
- * @conv_type: selected conversion type (HW or SW conversion)
- *
- * This function converts the selected analog i/p to digital
- * data.
- */
-int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
-{
- int ad_value;
- int voltage;
-
- ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type);
-
- /* On failure retry a second time */
- if (ad_value < 0)
- ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type);
- if (ad_value < 0) {
- dev_err(gpadc->dev, "GPADC raw value failed ch: %d\n",
- channel);
- return ad_value;
- }
-
- voltage = ab8500_gpadc_ad_to_voltage(gpadc, channel, ad_value);
- if (voltage < 0)
- dev_err(gpadc->dev,
- "GPADC to voltage conversion failed ch: %d AD: 0x%x\n",
- channel, ad_value);
-
- return voltage;
-}
-EXPORT_SYMBOL(ab8500_gpadc_sw_hw_convert);
-
-/**
- * ab8500_gpadc_read_raw() - gpadc read
- * @channel: analog channel to be read
- * @avg_sample: number of ADC sample to average
- * @trig_edge: selected trig edge
- * @trig_timer: selected ADC trigger delay timer
- * @conv_type: selected conversion type (HW or SW conversion)
- *
- * This function obtains the raw ADC value for an hardware conversion,
- * this then needs to be converted by calling ab8500_gpadc_ad_to_voltage()
- */
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
-{
- return ab8500_gpadc_double_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type,
- NULL);
-}
-
-int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
- int *ibat)
-{
- int ret;
- int looplimit = 0;
- unsigned long completion_timeout;
- u8 val, low_data, high_data, low_data2, high_data2;
- u8 val_reg1 = 0;
- unsigned int delay_min = 0;
- unsigned int delay_max = 0;
- u8 data_low_addr, data_high_addr;
-
- if (!gpadc)
- return -ENODEV;
-
- /* check if convertion is supported */
- if ((gpadc->irq_sw < 0) && (conv_type == ADC_SW))
- return -ENOTSUPP;
- if ((gpadc->irq_hw < 0) && (conv_type == ADC_HW))
- return -ENOTSUPP;
-
- mutex_lock(&gpadc->ab8500_gpadc_lock);
- /* Enable VTVout LDO this is required for GPADC */
- pm_runtime_get_sync(gpadc->dev);
-
- /* Check if ADC is not busy, lock and proceed */
- do {
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
- if (ret < 0)
- goto out;
- if (!(val & GPADC_BUSY))
- break;
- msleep(20);
- } while (++looplimit < 10);
- if (looplimit >= 10 && (val & GPADC_BUSY)) {
- dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
- ret = -EINVAL;
- goto out;
- }
-
- /* Enable GPADC */
- val_reg1 |= EN_GPADC;
-
- /* Select the channel source and set average samples */
- switch (avg_sample) {
- case SAMPLE_1:
- val = channel | AVG_1;
- break;
- case SAMPLE_4:
- val = channel | AVG_4;
- break;
- case SAMPLE_8:
- val = channel | AVG_8;
- break;
- default:
- val = channel | AVG_16;
- break;
- }
-
- if (conv_type == ADC_HW) {
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL3_REG, val);
- val_reg1 |= EN_TRIG_EDGE;
- if (trig_edge)
- val_reg1 |= EN_FALLING;
- } else
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL2_REG, val);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: set avg samples failed\n");
- goto out;
- }
-
- /*
- * Enable ADC, buffering, select rising edge and enable ADC path
- * charging current sense if it needed, ABB 3.0 needs some special
- * treatment too.
- */
- switch (channel) {
- case MAIN_CHARGER_C:
- case USB_CHARGER_C:
- val_reg1 |= EN_BUF | EN_ICHAR;
- break;
- case BTEMP_BALL:
- if (!is_ab8500_2p0_or_earlier(gpadc->parent)) {
- val_reg1 |= EN_BUF | BTEMP_PULL_UP;
- /*
- * Delay might be needed for ABB8500 cut 3.0, if not,
- * remove when hardware will be availible
- */
- delay_min = 1000; /* Delay in micro seconds */
- delay_max = 10000; /* large range optimises sleepmode */
- break;
- }
- /* Intentional fallthrough */
- default:
- val_reg1 |= EN_BUF;
- break;
- }
-
- /* Write configuration to register */
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL1_REG, val_reg1);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: set Control register failed\n");
- goto out;
- }
-
- if (delay_min != 0)
- usleep_range(delay_min, delay_max);
-
- if (conv_type == ADC_HW) {
- /* Set trigger delay timer */
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_AUTO_TIMER_REG, trig_timer);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: trig timer failed\n");
- goto out;
- }
- completion_timeout = 2 * HZ;
- data_low_addr = AB8500_GPADC_AUTODATAL_REG;
- data_high_addr = AB8500_GPADC_AUTODATAH_REG;
- } else {
- /* Start SW conversion */
- ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
- ADC_SW_CONV, ADC_SW_CONV);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: start s/w conv failed\n");
- goto out;
- }
- completion_timeout = msecs_to_jiffies(CONVERSION_TIME);
- data_low_addr = AB8500_GPADC_MANDATAL_REG;
- data_high_addr = AB8500_GPADC_MANDATAH_REG;
- }
-
- /* wait for completion of conversion */
- if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete,
- completion_timeout)) {
- dev_err(gpadc->dev,
- "timeout didn't receive GPADC conv interrupt\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Read the converted RAW data */
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, data_low_addr, &low_data);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: read low data failed\n");
- goto out;
- }
-
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, data_high_addr, &high_data);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: read high data failed\n");
- goto out;
- }
-
- /* Check if double convertion is required */
- if ((channel == BAT_CTRL_AND_IBAT) ||
- (channel == VBAT_MEAS_AND_IBAT) ||
- (channel == VBAT_TRUE_MEAS_AND_IBAT) ||
- (channel == BAT_TEMP_AND_IBAT)) {
-
- if (conv_type == ADC_HW) {
- /* not supported */
- ret = -ENOTSUPP;
- dev_err(gpadc->dev,
- "gpadc_conversion: only SW double conversion supported\n");
- goto out;
- } else {
- /* Read the converted RAW data 2 */
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8540_GPADC_MANDATA2L_REG,
- &low_data2);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: read sw low data 2 failed\n");
- goto out;
- }
-
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8540_GPADC_MANDATA2H_REG,
- &high_data2);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: read sw high data 2 failed\n");
- goto out;
- }
- if (ibat != NULL) {
- *ibat = (high_data2 << 8) | low_data2;
- } else {
- dev_warn(gpadc->dev,
- "gpadc_conversion: ibat not stored\n");
- }
-
- }
- }
-
- /* Disable GPADC */
- ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
- AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
- goto out;
- }
-
- /* Disable VTVout LDO this is required for GPADC */
- pm_runtime_mark_last_busy(gpadc->dev);
- pm_runtime_put_autosuspend(gpadc->dev);
-
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
-
- return (high_data << 8) | low_data;
-
-out:
- /*
- * It has shown to be needed to turn off the GPADC if an error occurs,
- * otherwise we might have problem when waiting for the busy bit in the
- * GPADC status register to go low. In V1.1 there wait_for_completion
- * seems to timeout when waiting for an interrupt.. Not seen in V2.0
- */
- (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
- AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- pm_runtime_put(gpadc->dev);
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
- dev_err(gpadc->dev,
- "gpadc_conversion: Failed to AD convert channel %d\n", channel);
- return ret;
-}
-EXPORT_SYMBOL(ab8500_gpadc_read_raw);
-
-/**
- * ab8500_bm_gpadcconvend_handler() - isr for gpadc conversion completion
- * @irq: irq number
- * @data: pointer to the data passed during request irq
- *
- * This is a interrupt service routine for gpadc conversion completion.
- * Notifies the gpadc completion is completed and the converted raw value
- * can be read from the registers.
- * Returns IRQ status(IRQ_HANDLED)
- */
-static irqreturn_t ab8500_bm_gpadcconvend_handler(int irq, void *_gpadc)
-{
- struct ab8500_gpadc *gpadc = _gpadc;
-
- complete(&gpadc->ab8500_gpadc_complete);
-
- return IRQ_HANDLED;
-}
-
-static int otp_cal_regs[] = {
- AB8500_GPADC_CAL_1,
- AB8500_GPADC_CAL_2,
- AB8500_GPADC_CAL_3,
- AB8500_GPADC_CAL_4,
- AB8500_GPADC_CAL_5,
- AB8500_GPADC_CAL_6,
- AB8500_GPADC_CAL_7,
-};
-
-static int otp4_cal_regs[] = {
- AB8540_GPADC_OTP4_REG_7,
- AB8540_GPADC_OTP4_REG_6,
- AB8540_GPADC_OTP4_REG_5,
-};
-
-static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
-{
- int i;
- int ret[ARRAY_SIZE(otp_cal_regs)];
- u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
- int ret_otp4[ARRAY_SIZE(otp4_cal_regs)];
- u8 gpadc_otp4[ARRAY_SIZE(otp4_cal_regs)];
- int vmain_high, vmain_low;
- int btemp_high, btemp_low;
- int vbat_high, vbat_low;
- int ibat_high, ibat_low;
- s64 V_gain, V_offset, V2A_gain, V2A_offset;
- struct ab8500 *ab8500;
-
- ab8500 = gpadc->parent;
-
- /* First we read all OTP registers and store the error code */
- for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
- ret[i] = abx500_get_register_interruptible(gpadc->dev,
- AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
- if (ret[i] < 0)
- dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
- __func__, otp_cal_regs[i]);
- }
-
- /*
- * The ADC calibration data is stored in OTP registers.
- * The layout of the calibration data is outlined below and a more
- * detailed description can be found in UM0836
- *
- * vm_h/l = vmain_high/low
- * bt_h/l = btemp_high/low
- * vb_h/l = vbat_high/low
- *
- * Data bits 8500/9540:
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | vm_h9 | vm_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- *
- * Data bits 8540:
- * OTP2
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h9 | vm_h8 | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- *
- * Data bits 8540:
- * OTP4
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | ib_h9 | ib_h8 | ib_h7
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | ib_h6 | ib_h5 | ib_h4 | ib_h3 | ib_h2 | ib_h1 | ib_h0 | ib_l5
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | ib_l4 | ib_l3 | ib_l2 | ib_l1 | ib_l0 |
- *
- *
- * Ideal output ADC codes corresponding to injected input voltages
- * during manufacturing is:
- *
- * vmain_high: Vin = 19500mV / ADC ideal code = 997
- * vmain_low: Vin = 315mV / ADC ideal code = 16
- * btemp_high: Vin = 1300mV / ADC ideal code = 985
- * btemp_low: Vin = 21mV / ADC ideal code = 16
- * vbat_high: Vin = 4700mV / ADC ideal code = 982
- * vbat_low: Vin = 2380mV / ADC ideal code = 33
- */
-
- if (is_ab8540(ab8500)) {
- /* Calculate gain and offset for VMAIN if all reads succeeded*/
- if (!(ret[1] < 0 || ret[2] < 0)) {
- vmain_high = (((gpadc_cal[1] & 0xFF) << 2) |
- ((gpadc_cal[2] & 0xC0) >> 6));
- vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
- (u16)vmain_high;
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
- (u16)vmain_low;
-
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
- (19500 - 315) / (vmain_high - vmain_low);
- gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
- 19500 - (CALIB_SCALE * (19500 - 315) /
- (vmain_high - vmain_low)) * vmain_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
- }
-
- /* Read IBAT calibration Data */
- for (i = 0; i < ARRAY_SIZE(otp4_cal_regs); i++) {
- ret_otp4[i] = abx500_get_register_interruptible(
- gpadc->dev, AB8500_OTP_EMUL,
- otp4_cal_regs[i], &gpadc_otp4[i]);
- if (ret_otp4[i] < 0)
- dev_err(gpadc->dev,
- "%s: read otp4 reg 0x%02x failed\n",
- __func__, otp4_cal_regs[i]);
- }
-
- /* Calculate gain and offset for IBAT if all reads succeeded */
- if (!(ret_otp4[0] < 0 || ret_otp4[1] < 0 || ret_otp4[2] < 0)) {
- ibat_high = (((gpadc_otp4[0] & 0x07) << 7) |
- ((gpadc_otp4[1] & 0xFE) >> 1));
- ibat_low = (((gpadc_otp4[1] & 0x01) << 5) |
- ((gpadc_otp4[2] & 0xF8) >> 3));
-
- gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi =
- (u16)ibat_high;
- gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo =
- (u16)ibat_low;
-
- V_gain = ((IBAT_VDROP_H - IBAT_VDROP_L)
- << CALIB_SHIFT_IBAT) / (ibat_high - ibat_low);
-
- V_offset = (IBAT_VDROP_H << CALIB_SHIFT_IBAT) -
- (((IBAT_VDROP_H - IBAT_VDROP_L) <<
- CALIB_SHIFT_IBAT) / (ibat_high - ibat_low))
- * ibat_high;
- /*
- * Result obtained is in mV (at a scale factor),
- * we need to calculate gain and offset to get mA
- */
- V2A_gain = (ADC_CH_IBAT_MAX - ADC_CH_IBAT_MIN)/
- (ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
- V2A_offset = ((ADC_CH_IBAT_MAX_V * ADC_CH_IBAT_MIN -
- ADC_CH_IBAT_MAX * ADC_CH_IBAT_MIN_V)
- << CALIB_SHIFT_IBAT)
- / (ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
-
- gpadc->cal_data[ADC_INPUT_IBAT].gain =
- V_gain * V2A_gain;
- gpadc->cal_data[ADC_INPUT_IBAT].offset =
- V_offset * V2A_gain + V2A_offset;
- } else {
- gpadc->cal_data[ADC_INPUT_IBAT].gain = 0;
- }
-
- dev_dbg(gpadc->dev, "IBAT gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_IBAT].gain,
- gpadc->cal_data[ADC_INPUT_IBAT].offset);
- } else {
- /* Calculate gain and offset for VMAIN if all reads succeeded */
- if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
- vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
- ((gpadc_cal[1] & 0x3F) << 2) |
- ((gpadc_cal[2] & 0xC0) >> 6));
- vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
- (u16)vmain_high;
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
- (u16)vmain_low;
-
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
- (19500 - 315) / (vmain_high - vmain_low);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
- 19500 - (CALIB_SCALE * (19500 - 315) /
- (vmain_high - vmain_low)) * vmain_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
- }
- }
-
- /* Calculate gain and offset for BTEMP if all reads succeeded */
- if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
- btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
- (gpadc_cal[3] << 1) | ((gpadc_cal[4] & 0x80) >> 7));
- btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
-
- gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi = (u16)btemp_high;
- gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo = (u16)btemp_low;
-
- gpadc->cal_data[ADC_INPUT_BTEMP].gain =
- CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
- gpadc->cal_data[ADC_INPUT_BTEMP].offset = CALIB_SCALE * 1300 -
- (CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low))
- * btemp_high;
- } else {
- gpadc->cal_data[ADC_INPUT_BTEMP].gain = 0;
- }
-
- /* Calculate gain and offset for VBAT if all reads succeeded */
- if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
- vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
- vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
-
- gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi = (u16)vbat_high;
- gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo = (u16)vbat_low;
-
- gpadc->cal_data[ADC_INPUT_VBAT].gain = CALIB_SCALE *
- (4700 - 2380) / (vbat_high - vbat_low);
- gpadc->cal_data[ADC_INPUT_VBAT].offset = CALIB_SCALE * 4700 -
- (CALIB_SCALE * (4700 - 2380) /
- (vbat_high - vbat_low)) * vbat_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VBAT].gain = 0;
- }
-
- dev_dbg(gpadc->dev, "VMAIN gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_VMAIN].gain,
- gpadc->cal_data[ADC_INPUT_VMAIN].offset);
-
- dev_dbg(gpadc->dev, "BTEMP gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_BTEMP].gain,
- gpadc->cal_data[ADC_INPUT_BTEMP].offset);
-
- dev_dbg(gpadc->dev, "VBAT gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_VBAT].gain,
- gpadc->cal_data[ADC_INPUT_VBAT].offset);
-}
-
-#ifdef CONFIG_PM
-static int ab8500_gpadc_runtime_suspend(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
-
- regulator_disable(gpadc->regu);
- return 0;
-}
-
-static int ab8500_gpadc_runtime_resume(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
- int ret;
-
- ret = regulator_enable(gpadc->regu);
- if (ret)
- dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int ab8500_gpadc_suspend(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
-
- mutex_lock(&gpadc->ab8500_gpadc_lock);
-
- pm_runtime_get_sync(dev);
-
- regulator_disable(gpadc->regu);
- return 0;
-}
-
-static int ab8500_gpadc_resume(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
- int ret;
-
- ret = regulator_enable(gpadc->regu);
- if (ret)
- dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
-
- pm_runtime_mark_last_busy(gpadc->dev);
- pm_runtime_put_autosuspend(gpadc->dev);
-
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
- return ret;
-}
-#endif
-
-static int ab8500_gpadc_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct ab8500_gpadc *gpadc;
-
- gpadc = devm_kzalloc(&pdev->dev,
- sizeof(struct ab8500_gpadc), GFP_KERNEL);
- if (!gpadc)
- return -ENOMEM;
-
- gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
- if (gpadc->irq_sw < 0)
- dev_err(gpadc->dev, "failed to get platform sw_conv_end irq\n");
-
- gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
- if (gpadc->irq_hw < 0)
- dev_err(gpadc->dev, "failed to get platform hw_conv_end irq\n");
-
- gpadc->dev = &pdev->dev;
- gpadc->parent = dev_get_drvdata(pdev->dev.parent);
- mutex_init(&gpadc->ab8500_gpadc_lock);
-
- /* Initialize completion used to notify completion of conversion */
- init_completion(&gpadc->ab8500_gpadc_complete);
-
- /* Register interrupts */
- if (gpadc->irq_sw >= 0) {
- ret = request_threaded_irq(gpadc->irq_sw, NULL,
- ab8500_bm_gpadcconvend_handler,
- IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
- "ab8500-gpadc-sw",
- gpadc);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "Failed to register interrupt irq: %d\n",
- gpadc->irq_sw);
- goto fail;
- }
- }
-
- if (gpadc->irq_hw >= 0) {
- ret = request_threaded_irq(gpadc->irq_hw, NULL,
- ab8500_bm_gpadcconvend_handler,
- IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
- "ab8500-gpadc-hw",
- gpadc);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "Failed to register interrupt irq: %d\n",
- gpadc->irq_hw);
- goto fail_irq;
- }
- }
-
- /* VTVout LDO used to power up ab8500-GPADC */
- gpadc->regu = devm_regulator_get(&pdev->dev, "vddadc");
- if (IS_ERR(gpadc->regu)) {
- ret = PTR_ERR(gpadc->regu);
- dev_err(gpadc->dev, "failed to get vtvout LDO\n");
- goto fail_irq;
- }
-
- platform_set_drvdata(pdev, gpadc);
-
- ret = regulator_enable(gpadc->regu);
- if (ret) {
- dev_err(gpadc->dev, "Failed to enable vtvout LDO: %d\n", ret);
- goto fail_enable;
- }
-
- pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(gpadc->dev);
- pm_runtime_set_active(gpadc->dev);
- pm_runtime_enable(gpadc->dev);
-
- ab8500_gpadc_read_calibration_data(gpadc);
- list_add_tail(&gpadc->node, &ab8500_gpadc_list);
- dev_dbg(gpadc->dev, "probe success\n");
-
- return 0;
-
-fail_enable:
-fail_irq:
- free_irq(gpadc->irq_sw, gpadc);
- free_irq(gpadc->irq_hw, gpadc);
-fail:
- return ret;
-}
-
-static int ab8500_gpadc_remove(struct platform_device *pdev)
-{
- struct ab8500_gpadc *gpadc = platform_get_drvdata(pdev);
-
- /* remove this gpadc entry from the list */
- list_del(&gpadc->node);
- /* remove interrupt - completion of Sw ADC conversion */
- if (gpadc->irq_sw >= 0)
- free_irq(gpadc->irq_sw, gpadc);
- if (gpadc->irq_hw >= 0)
- free_irq(gpadc->irq_hw, gpadc);
-
- pm_runtime_get_sync(gpadc->dev);
- pm_runtime_disable(gpadc->dev);
-
- regulator_disable(gpadc->regu);
-
- pm_runtime_set_suspended(gpadc->dev);
-
- pm_runtime_put_noidle(gpadc->dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
- SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
- ab8500_gpadc_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(ab8500_gpadc_suspend,
- ab8500_gpadc_resume)
-
-};
-
-static struct platform_driver ab8500_gpadc_driver = {
- .probe = ab8500_gpadc_probe,
- .remove = ab8500_gpadc_remove,
- .driver = {
- .name = "ab8500-gpadc",
- .pm = &ab8500_gpadc_pm_ops,
- },
-};
-
-static int __init ab8500_gpadc_init(void)
-{
- return platform_driver_register(&ab8500_gpadc_driver);
-}
-subsys_initcall_sync(ab8500_gpadc_init);
-
-/**
- * ab8540_gpadc_get_otp() - returns OTP values
- *
- */
-void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
- u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
- u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h)
-{
- *vmain_l = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo;
- *vmain_h = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi;
- *btemp_l = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo;
- *btemp_h = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi;
- *vbat_l = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo;
- *vbat_h = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi;
- *ibat_l = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo;
- *ibat_h = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi;
-}
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index b6ab72fa0569..ab09b8225b76 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -75,7 +75,7 @@ static struct mfd_cell crystal_cove_byt_dev[] = {
.resources = gpio_resources,
},
{
- .name = "crystal_cove_pmic",
+ .name = "byt_crystal_cove_pmic",
},
{
.name = "crystal_cove_pwm",
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 6ac3607a79c2..c906324d293e 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -91,6 +91,32 @@ static int tps6105x_add_device(struct tps6105x *tps6105x,
PLATFORM_DEVID_AUTO, cell, 1, NULL, 0, NULL);
}
+static struct tps6105x_platform_data *tps6105x_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct tps6105x_platform_data *pdata;
+ struct device_node *child;
+
+ if (!np)
+ return ERR_PTR(-EINVAL);
+ if (of_get_available_child_count(np) > 1) {
+ dev_err(dev, "cannot support multiple operational modes");
+ return ERR_PTR(-EINVAL);
+ }
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ pdata->mode = TPS6105X_MODE_SHUTDOWN;
+ for_each_available_child_of_node(np, child) {
+ if (child->name && !of_node_cmp(child->name, "regulator"))
+ pdata->mode = TPS6105X_MODE_VOLTAGE;
+ else if (child->name && !of_node_cmp(child->name, "led"))
+ pdata->mode = TPS6105X_MODE_TORCH;
+ }
+
+ return pdata;
+}
+
static int tps6105x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -99,9 +125,11 @@ static int tps6105x_probe(struct i2c_client *client,
int ret;
pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_err(&client->dev, "missing platform data\n");
- return -ENODEV;
+ if (!pdata)
+ pdata = tps6105x_parse_dt(&client->dev);
+ if (IS_ERR(pdata)) {
+ dev_err(&client->dev, "No platform data or DT found");
+ return PTR_ERR(pdata);
}
tps6105x = devm_kmalloc(&client->dev, sizeof(*tps6105x), GFP_KERNEL);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c55b63750757..7f0d48f406e3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -8,7 +8,6 @@ menu "Misc devices"
config SENSORS_LIS3LV02D
tristate
depends on INPUT
- select INPUT_POLLDEV
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
@@ -326,14 +325,14 @@ config SENSORS_TSL2550
will be called tsl2550.
config SENSORS_BH1770
- tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
- depends on I2C
- ---help---
- Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or
SFH7770 (Osram) combined ambient light and proximity sensor chip.
- To compile this driver as a module, choose M here: the
- module will be called bh1770glc. If unsure, say N here.
+ To compile this driver as a module, choose M here: the
+ module will be called bh1770glc. If unsure, say N here.
config SENSORS_APDS990X
tristate "APDS990X combined als and proximity sensors"
@@ -438,8 +437,8 @@ config PCI_ENDPOINT_TEST
select CRC32
tristate "PCI Endpoint Test driver"
---help---
- Enable this configuration option to enable the host side test driver
- for PCI Endpoint.
+ Enable this configuration option to enable the host side test driver
+ for PCI Endpoint.
config XILINX_SDFEC
tristate "Xilinx SDFEC 16"
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 08b5b639d77f..7de7840f613c 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -109,7 +109,6 @@ static int __init tc_probe(struct platform_device *pdev)
struct atmel_tc *tc;
struct clk *clk;
int irq;
- struct resource *r;
unsigned int i;
if (of_get_child_count(pdev->dev.of_node))
@@ -133,8 +132,7 @@ static int __init tc_probe(struct platform_device *pdev)
if (IS_ERR(tc->slow_clk))
return PTR_ERR(tc->slow_clk);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tc->regs = devm_ioremap_resource(&pdev->dev, r);
+ tc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tc->regs))
return PTR_ERR(tc->regs);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index d9bff5a2217e..1f56267ed2f4 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 40a6d199f2ea..4214f02a17fd 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -191,7 +191,6 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
{
- int err = 0;
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
@@ -231,7 +230,7 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
- return err;
+ return 0;
}
static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
new file mode 100644
index 000000000000..32dcec2e9dfd
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5261.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5261_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0x3A, 0x3A, 0x3A},
+ {0xE6, 0xE6, 0xE6},
+ {0xB3, 0xB3, 0xB3},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+ /* 0x814~0x817 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ if (!rts5261_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rts5261_reg_to_card_drive_sel(reg);
+
+ if (rts5261_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ /* 0x724~0x727 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ pcr->aspm_en = rts5261_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
+}
+
+static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5261_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5261_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5261_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5261_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5261_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5261_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG1,
+ RTS5261_LDO1_TUNE_MASK, RTS5261_LDO1_33);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO1_POWERON, RTS5261_LDO1_POWERON);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO3318_POWERON, RTS5261_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5261_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5261_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u16 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5261_CARD_PWR_CTL,
+ RTS5261_PUPDC, RTS5261_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val |= PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val &= ~PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5261_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5261_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5261_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5261_stop_cmd(pcr);
+ rts5261_switch_output_voltage(pcr, OUTPUT_3V3);
+
+}
+
+static void rts5261_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+static void rts5261_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+
+}
+
+static int rts5261_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5261_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO_POWERON_MASK, 0);
+
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5261_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_LMT_THD_MASK,
+ RTS5261_LDO1_LMT_THD_2000);
+
+ mask = SD_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rts5261_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+ }
+}
+
+static void rts5261_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+}
+
+static void rts5261_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rts5261_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rts5261_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ }
+
+}
+
+static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
+{
+ int retval;
+ u32 lval, i;
+ u8 valid, efuse_valid, tmp;
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
+ REG_EFUSE_POR | REG_EFUSE_POWERON);
+ udelay(1);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_ADDR,
+ RTS5261_EFUSE_ADDR_MASK, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_CTL,
+ RTS5261_EFUSE_ENABLE | RTS5261_EFUSE_MODE_MASK,
+ RTS5261_EFUSE_ENABLE);
+
+ /* Wait transfer end */
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_READ_DATA, &tmp);
+ efuse_valid = ((tmp & 0x0C) >> 2);
+ pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
+
+ if (efuse_valid == 0) {
+ retval = rtsx_pci_read_config_dword(pcr,
+ PCR_SETTING_REG2, &lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "read 0x814 DW fail\n");
+ pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
+ /* 0x816 */
+ valid = (u8)((lval >> 16) & 0x03);
+ pcr_dbg(pcr, "0x816: %d\n", valid);
+ }
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR, 0);
+ pcr_dbg(pcr, "Disable efuse por!\n");
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval);
+ lval = lval & 0x00FFFFFF;
+ retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "write config fail\n");
+
+ return retval;
+}
+
+static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ rts5261_init_from_cfg(pcr);
+ rts5261_init_from_hw(pcr);
+
+ /* power off efuse */
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_AUX_CLK_16M_EN, 0);
+
+ /* Release PRSNT# */
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_FORCE_PRSNT_LOW, 0);
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5261_fill_driving(pcr, OUTPUT_3V3);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ /* Clear Enter RTD3_cold Information*/
+ rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
+ RTS5261_INFORM_RTD3_COLD, 0);
+
+ return 0;
+}
+
+static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = FORCE_ASPM_CTL0;
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ }
+ pcr->aspm_enabled = enable;
+
+}
+
+static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ udelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5261_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5261_enable_aspm(pcr, true);
+ else
+ rts5261_disable_aspm(pcr, false);
+}
+
+static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5261_pcr_ops = {
+ .fetch_vendor_settings = rtsx5261_fetch_vendor_settings,
+ .turn_on_led = rts5261_turn_on_led,
+ .turn_off_led = rts5261_turn_off_led,
+ .extra_init_hw = rts5261_extra_init_hw,
+ .enable_auto_blink = rts5261_enable_auto_blink,
+ .disable_auto_blink = rts5261_disable_auto_blink,
+ .card_power_on = rts5261_card_power_on,
+ .card_power_off = rts5261_card_power_off,
+ .switch_output_voltage = rts5261_switch_output_voltage,
+ .force_power_down = rts5261_force_power_down,
+ .stop_cmd = rts5261_stop_cmd,
+ .set_aspm = rts5261_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5261_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5261_enable_ocp,
+ .disable_ocp = rts5261_disable_ocp,
+ .init_ocp = rts5261_init_ocp,
+ .process_ocp = rts5261_process_ocp,
+ .clear_ocpstat = rts5261_clear_ocpstat,
+};
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u8 n, clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5261_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5261_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = (u8)(clk - 4);
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = (u8)(125/clk + 3);
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2);
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+
+}
+
+void rts5261_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5261_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 27, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5261_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5261_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5261_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5261_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5261_LDO1_OCP_THD_1040;
+}
diff --git a/drivers/misc/cardreader/rts5261.h b/drivers/misc/cardreader/rts5261.h
new file mode 100644
index 000000000000..ebfdd236a553
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+#ifndef RTS5261_H
+#define RTS5261_H
+
+/*New add*/
+#define rts5261_vendor_setting_valid(reg) ((reg) & 0x010000)
+#define rts5261_reg_to_aspm(reg) (((reg) >> 28) ^ 0x03)
+#define rts5261_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5261_reg_to_card_drive_sel(reg) ((((reg) >> 6) & 0x01) << 6)
+#define rts5261_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) ^ 0x03)
+#define rts5261_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) ^ 0x03)
+
+
+#define RTS5261_AUTOLOAD_CFG0 0xFF7B
+#define RTS5261_AUTOLOAD_CFG1 0xFF7C
+#define RTS5261_AUTOLOAD_CFG2 0xFF7D
+#define RTS5261_AUTOLOAD_CFG3 0xFF7E
+#define RTS5261_AUTOLOAD_CFG4 0xFF7F
+#define RTS5261_FORCE_PRSNT_LOW (1 << 6)
+#define RTS5261_AUX_CLK_16M_EN (1 << 5)
+
+#define RTS5261_REG_VREF 0xFE97
+#define RTS5261_PWD_SUSPND_EN (1 << 4)
+
+#define RTS5261_PAD_H3L1 0xFF79
+#define PAD_GPIO_H3L1 (1 << 3)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5261_SSC_DEPTH_MASK 0x07
+#define RTS5261_SSC_DEPTH_DISALBE 0x00
+#define RTS5261_SSC_DEPTH_8M 0x01
+#define RTS5261_SSC_DEPTH_4M 0x02
+#define RTS5261_SSC_DEPTH_2M 0x03
+#define RTS5261_SSC_DEPTH_1M 0x04
+#define RTS5261_SSC_DEPTH_512K 0x05
+#define RTS5261_SSC_DEPTH_256K 0x06
+#define RTS5261_SSC_DEPTH_128K 0x07
+
+/* efuse control register*/
+#define RTS5261_EFUSE_CTL 0xFC30
+#define RTS5261_EFUSE_ENABLE 0x80
+/* EFUSE_MODE: 0=READ 1=PROGRAM */
+#define RTS5261_EFUSE_MODE_MASK 0x40
+#define RTS5261_EFUSE_PROGRAM 0x40
+
+#define RTS5261_EFUSE_ADDR 0xFC31
+#define RTS5261_EFUSE_ADDR_MASK 0x3F
+
+#define RTS5261_EFUSE_WRITE_DATA 0xFC32
+#define RTS5261_EFUSE_READ_DATA 0xFC34
+
+/* DMACTL 0xFE2C */
+#define RTS5261_DMA_PACK_SIZE_MASK 0xF0
+
+/* FW config info register */
+#define RTS5261_FW_CFG_INFO0 0xFF50
+#define RTS5261_FW_EXPRESS_TEST_MASK (0x01<<0)
+#define RTS5261_FW_EA_MODE_MASK (0x01<<5)
+
+/* FW config register */
+#define RTS5261_FW_CFG0 0xFF54
+#define RTS5261_FW_ENTER_EXPRESS (0x01<<0)
+
+#define RTS5261_FW_CFG1 0xFF55
+#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01<<7)
+#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01<<6)
+#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01<<5)
+/*MCU_bus_mode_sel: 0=real 8051 1=fake mcu*/
+#define RTS5261_MCU_BUS_SEL_MASK (0x01<<4)
+/*MCU_clock_sel:VerA 00=aux16M 01=aux400K 1x=REFCLK100M*/
+/*MCU_clock_sel:VerB 00=aux400K 01=aux16M 10=REFCLK100M*/
+#define RTS5261_MCU_CLOCK_SEL_MASK (0x03<<2)
+#define RTS5261_MCU_CLOCK_SEL_16M (0x01<<2)
+#define RTS5261_MCU_CLOCK_GATING (0x01<<1)
+#define RTS5261_DRIVER_ENABLE_FW (0x01<<0)
+
+/* FW status register */
+#define RTS5261_FW_STATUS 0xFF56
+#define RTS5261_EXPRESS_LINK_FAIL_MASK (0x01<<7)
+
+/* FW control register */
+#define RTS5261_FW_CTL 0xFF5F
+#define RTS5261_INFORM_RTD3_COLD (0x01<<5)
+
+#define RTS5261_REG_FPDCTL 0xFF60
+
+#define RTS5261_REG_LDO12_CFG 0xFF6E
+#define RTS5261_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO12_115 (0x03<<1)
+#define RTS5261_LDO12_120 (0x04<<1)
+#define RTS5261_LDO12_125 (0x05<<1)
+#define RTS5261_LDO12_130 (0x06<<1)
+#define RTS5261_LDO12_135 (0x07<<1)
+
+/* LDO control register */
+#define RTS5261_CARD_PWR_CTL 0xFD50
+#define RTS5261_SD_CLK_ISO (0x01<<7)
+#define RTS5261_PAD_SD_DAT_FW_CTRL (0x01<<6)
+#define RTS5261_PUPDC (0x01<<5)
+#define RTS5261_SD_CMD_ISO (0x01<<4)
+#define RTS5261_SD_DAT_ISO_MASK (0x0F<<0)
+
+#define RTS5261_LDO1233318_POW_CTL 0xFF70
+#define RTS5261_LDO3318_POWERON (0x01<<3)
+#define RTS5261_LDO3_POWERON (0x01<<2)
+#define RTS5261_LDO2_POWERON (0x01<<1)
+#define RTS5261_LDO1_POWERON (0x01<<0)
+#define RTS5261_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5261_DV3318_CFG 0xFF71
+#define RTS5261_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5261_DV3318_18 (0x02<<4)
+#define RTS5261_DV3318_19 (0x04<<4)
+#define RTS5261_DV3318_33 (0x07<<4)
+
+#define RTS5261_LDO1_CFG0 0xFF72
+#define RTS5261_LDO1_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO1_OCP_EN (0x01<<4)
+#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO1_OCP_LMT_EN (0x01<<1)
+
+/* CRD6603-433 190319 request changed */
+#define RTS5261_LDO1_OCP_THD_740 (0x00<<5)
+#define RTS5261_LDO1_OCP_THD_800 (0x01<<5)
+#define RTS5261_LDO1_OCP_THD_860 (0x02<<5)
+#define RTS5261_LDO1_OCP_THD_920 (0x03<<5)
+#define RTS5261_LDO1_OCP_THD_980 (0x04<<5)
+#define RTS5261_LDO1_OCP_THD_1040 (0x05<<5)
+#define RTS5261_LDO1_OCP_THD_1100 (0x06<<5)
+#define RTS5261_LDO1_OCP_THD_1160 (0x07<<5)
+
+#define RTS5261_LDO1_LMT_THD_450 (0x00<<2)
+#define RTS5261_LDO1_LMT_THD_1000 (0x01<<2)
+#define RTS5261_LDO1_LMT_THD_1500 (0x02<<2)
+#define RTS5261_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5261_LDO1_CFG1 0xFF73
+#define RTS5261_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO1_18 (0x05<<1)
+#define RTS5261_LDO1_33 (0x07<<1)
+#define RTS5261_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO2_CFG0 0xFF74
+#define RTS5261_LDO2_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO2_OCP_EN (0x01<<4)
+#define RTS5261_LDO2_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO2_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO2_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO2_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO2_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO2_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO2_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO2_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO2_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO2_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO2_CFG1 0xFF75
+#define RTS5261_LDO2_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO2_18 (0x05<<1)
+#define RTS5261_LDO2_33 (0x07<<1)
+#define RTS5261_LDO2_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO3_CFG0 0xFF76
+#define RTS5261_LDO3_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO3_OCP_EN (0x01<<4)
+#define RTS5261_LDO3_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO3_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO3_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO3_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO3_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO3_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO3_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO3_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO3_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO3_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO3_CFG1 0xFF77
+#define RTS5261_LDO3_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO3_18 (0x05<<1)
+#define RTS5261_LDO3_33 (0x07<<1)
+#define RTS5261_LDO3_PWD_MASK (0x01<<0)
+
+#define RTS5261_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+#define REG_EFUSE_BYPASS 0x08
+#define REG_EFUSE_POR 0x04
+#define REG_EFUSE_POWER_MASK 0x03
+#define REG_EFUSE_POWERON 0x03
+#define REG_EFUSE_POWEROFF 0x00
+
+
+/* Single LUN, support SD/SD EXPRESS */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+#define SD_EXPRESS_LUN 2
+
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5261_H */
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index b4a66b64f742..fd7b2167103d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -22,6 +22,7 @@
#include <asm/unaligned.h>
#include "rtsx_pcr.h"
+#include "rts5261.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -34,9 +35,6 @@ static struct mfd_cell rtsx_pcr_cells[] = {
[RTSX_SD_CARD] = {
.name = DRV_NAME_RTSX_PCI_SDMMC,
},
- [RTSX_MS_CARD] = {
- .name = DRV_NAME_RTSX_PCI_MS,
- },
};
static const struct pci_device_id rtsx_pci_ids[] = {
@@ -51,6 +49,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -438,8 +437,16 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
if (end)
option |= RTSX_SG_END;
- val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ if (PCI_PID(pcr) == PID_5261) {
+ if (len > 0xFFFF)
+ val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
+ | (((u64)len >> 16) << 6) | option;
+ else
+ val = ((u64)addr << 32) | ((u64)len << 16) | option;
+ } else {
+ val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ }
put_unaligned_le64(val, ptr);
pcr->sgi++;
}
@@ -684,7 +691,6 @@ int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
else
return -EINVAL;
-
return rtsx_pci_set_pull_ctl(pcr, tbl);
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
@@ -735,6 +741,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
};
+ if (PCI_PID(pcr) == PID_5261)
+ return rts5261_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
+
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
clk_divider = SD_CLK_DIVIDE_128;
@@ -1253,7 +1263,15 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_enable_bus_int(pcr);
/* Power on SSC */
- err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ if (PCI_PID(pcr) == PID_5261) {
+ /* Gating real mcu clock */
+ err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
+ RTS5261_MCU_CLOCK_GATING, 0);
+ err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, 0);
+ } else {
+ err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ }
if (err < 0)
return err;
@@ -1283,7 +1301,12 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
/* Enable SSC Clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
0xFF, SSC_8X_EN | SSC_SEL_4M);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+ if (PCI_PID(pcr) == PID_5261)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5261_SSC_DEPTH_2M);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+
/* Disable cd_pwr_save */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
/* Clear Link Ready Interrupt */
@@ -1314,6 +1337,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_524A:
case PID_525A:
case PID_5260:
+ case PID_5261:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1393,9 +1417,14 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+
case 0x5260:
rts5260_init_params(pcr);
break;
+
+ case 0x5261:
+ rts5261_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 98f729263dc1..ed391df52f4f 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -53,6 +53,7 @@ void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
+void rts5261_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2cfe3d4ae144..226b5efa6a77 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -175,6 +175,10 @@ static int eeprom_probe(struct i2c_client *client,
}
}
+ /* Let the users know they are using deprecated driver */
+ dev_notice(&client->dev,
+ "eeprom driver is deprecated, please use at24 instead\n");
+
/* create the sysfs eeprom file */
return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 1b1a794d639d..ae4ee27a63c4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -32,8 +32,9 @@
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_CTXID_MASK (0xFF0)
-#define INIT_FILELEN_MAX (64 * 1024 * 1024)
+#define INIT_FILELEN_MAX (2 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
+#define ADSP_MMAP_ADD_PAGES 0x1000
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
@@ -66,6 +67,8 @@
/* Remote Method id table */
#define FASTRPC_RMID_INIT_ATTACH 0
#define FASTRPC_RMID_INIT_RELEASE 1
+#define FASTRPC_RMID_INIT_MMAP 4
+#define FASTRPC_RMID_INIT_MUNMAP 5
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
@@ -89,6 +92,23 @@ struct fastrpc_remote_arg {
u64 len;
};
+struct fastrpc_mmap_rsp_msg {
+ u64 vaddr;
+};
+
+struct fastrpc_mmap_req_msg {
+ s32 pgid;
+ u32 flags;
+ u64 vaddr;
+ s32 num;
+};
+
+struct fastrpc_munmap_req_msg {
+ s32 pgid;
+ u64 vaddr;
+ u64 size;
+};
+
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
@@ -123,6 +143,9 @@ struct fastrpc_buf {
/* Lock for dma buf attachments */
struct mutex lock;
struct list_head attachments;
+ /* mmap support */
+ struct list_head node; /* list of user requested mmaps */
+ uintptr_t raddr;
};
struct fastrpc_dma_buf_attachment {
@@ -192,6 +215,7 @@ struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
+ struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sctx;
@@ -269,6 +293,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
return -ENOMEM;
INIT_LIST_HEAD(&buf->attachments);
+ INIT_LIST_HEAD(&buf->node);
mutex_init(&buf->lock);
buf->fl = fl;
@@ -276,6 +301,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
buf->phys = 0;
buf->size = size;
buf->dev = dev;
+ buf->raddr = 0;
buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
GFP_KERNEL);
@@ -934,8 +960,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (err)
goto bail;
- /* Wait for remote dsp to respond or time out */
- err = wait_for_completion_interruptible(&ctx->work);
+ if (kernel) {
+ if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
+ err = -ETIMEDOUT;
+ } else {
+ err = wait_for_completion_interruptible(&ctx->work);
+ }
+
if (err)
goto bail;
@@ -954,12 +985,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
}
bail:
- /* We are done with this compute context, remove it from pending list */
- spin_lock(&fl->lock);
- list_del(&ctx->node);
- spin_unlock(&fl->lock);
- fastrpc_context_put(ctx);
-
+ if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
+ /* We are done with this compute context */
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
if (err)
dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
@@ -1131,6 +1163,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
+ struct fastrpc_buf *buf, *b;
unsigned long flags;
fastrpc_release_current_dsp_process(fl);
@@ -1152,6 +1185,11 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
fastrpc_map_put(map);
}
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ list_del(&buf->node);
+ fastrpc_buf_free(buf);
+ }
+
fastrpc_session_free(cctx, fl->sctx);
fastrpc_channel_ctx_put(cctx);
@@ -1180,6 +1218,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
INIT_LIST_HEAD(&fl->maps);
+ INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
fl->tgid = current->tgid;
fl->cctx = cctx;
@@ -1285,6 +1324,148 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
+ struct fastrpc_req_munmap *req)
+{
+ struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
+ struct fastrpc_buf *buf, *b;
+ struct fastrpc_munmap_req_msg req_msg;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ break;
+ buf = NULL;
+ }
+ spin_unlock(&fl->lock);
+
+ if (!buf) {
+ dev_err(dev, "mmap not in list\n");
+ return -EINVAL;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.size = buf->size;
+ req_msg.vaddr = buf->raddr;
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (!err) {
+ dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
+ spin_lock(&fl->lock);
+ list_del(&buf->node);
+ spin_unlock(&fl->lock);
+ fastrpc_buf_free(buf);
+ } else {
+ dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
+ }
+
+ return err;
+}
+
+static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_req_munmap req;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ return fastrpc_req_munmap_impl(fl, &req);
+}
+
+static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
+ struct fastrpc_buf *buf = NULL;
+ struct fastrpc_mmap_req_msg req_msg;
+ struct fastrpc_mmap_rsp_msg rsp_msg;
+ struct fastrpc_req_munmap req_unmap;
+ struct fastrpc_phy_page pages;
+ struct fastrpc_req_mmap req;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ if (req.flags != ADSP_MMAP_ADD_PAGES) {
+ dev_err(dev, "flag not supported 0x%x\n", req.flags);
+ return -EINVAL;
+ }
+
+ if (req.vaddrin) {
+ dev_err(dev, "adding user allocated pages is not supported\n");
+ return -EINVAL;
+ }
+
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
+ if (err) {
+ dev_err(dev, "failed to allocate buffer\n");
+ return err;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.flags = req.flags;
+ req_msg.vaddr = req.vaddrin;
+ req_msg.num = sizeof(pages);
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ pages.addr = buf->phys;
+ pages.size = buf->size;
+
+ args[1].ptr = (u64) (uintptr_t) &pages;
+ args[1].length = sizeof(pages);
+
+ args[2].ptr = (u64) (uintptr_t) &rsp_msg;
+ args[2].length = sizeof(rsp_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (err) {
+ dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
+ goto err_invoke;
+ }
+
+ /* update the buffer to be able to deallocate the memory on the DSP */
+ buf->raddr = (uintptr_t) rsp_msg.vaddr;
+
+ /* let the client know the address to use */
+ req.vaddrout = rsp_msg.vaddr;
+
+ spin_lock(&fl->lock);
+ list_add_tail(&buf->node, &fl->mmaps);
+ spin_unlock(&fl->lock);
+
+ if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
+ /* unmap the memory and release the buffer */
+ req_unmap.vaddrout = buf->raddr;
+ req_unmap.size = buf->size;
+ fastrpc_req_munmap_impl(fl, &req_unmap);
+ return -EFAULT;
+ }
+
+ dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
+ buf->raddr, buf->size);
+
+ return 0;
+
+err_invoke:
+ fastrpc_buf_free(buf);
+
+ return err;
+}
+
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1305,6 +1486,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
+ case FASTRPC_IOCTL_MMAP:
+ err = fastrpc_req_mmap(fl, argp);
+ break;
+ case FASTRPC_IOCTL_MUNMAP:
+ err = fastrpc_req_munmap(fl, argp);
+ break;
default:
err = -ENOTTY;
break;
@@ -1430,8 +1617,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return -ENOMEM;
data->miscdev.minor = MISC_DYNAMIC_MINOR;
- data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
- domains[domain_id]);
+ data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
+ domains[domain_id]);
data->miscdev.fops = &fastrpc_fops;
err = misc_register(&data->miscdev);
if (err)
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index a9ac045dcfde..8850f475a413 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -65,6 +65,18 @@ static void cs_put(struct hl_cs *cs)
kref_put(&cs->refcount, cs_do_release);
}
+static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ /*
+ * Patched CB is created for external queues jobs, and for H/W queues
+ * jobs if the user CB was allocated by driver and MMU is disabled.
+ */
+ return (job->queue_type == QUEUE_TYPE_EXT ||
+ (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb &&
+ !hdev->mmu_enable));
+}
+
/*
* cs_parser - parse the user command submission
*
@@ -91,11 +103,13 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
parser.patched_cb = NULL;
parser.user_cb = job->user_cb;
parser.user_cb_size = job->user_cb_size;
- parser.ext_queue = job->ext_queue;
+ parser.queue_type = job->queue_type;
+ parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
job->patched_cb = NULL;
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
- if (job->ext_queue) {
+
+ if (is_cb_patched(hdev, job)) {
if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
@@ -124,7 +138,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_cs *cs = job->cs;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job)) {
hl_userptr_delete_list(hdev, &job->userptr_list);
/*
@@ -140,6 +154,19 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
}
}
+ /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
+ * enabled, the user CB isn't released in cs_parser() and thus should be
+ * released here.
+ */
+ if (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb && hdev->mmu_enable) {
+ spin_lock(&job->user_cb->lock);
+ job->user_cb->cs_cnt--;
+ spin_unlock(&job->user_cb->lock);
+
+ hl_cb_put(job->user_cb);
+ }
+
/*
* This is the only place where there can be multiple threads
* modifying the list at the same time
@@ -150,7 +177,8 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
hl_debugfs_remove_job(hdev, job);
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_put(cs);
kfree(job);
@@ -387,18 +415,13 @@ static void job_wq_completion(struct work_struct *work)
free_job(hdev, job);
}
-static struct hl_cb *validate_queue_index(struct hl_device *hdev,
- struct hl_cb_mgr *cb_mgr,
- struct hl_cs_chunk *chunk,
- bool *ext_queue)
+static int validate_queue_index(struct hl_device *hdev,
+ struct hl_cs_chunk *chunk,
+ enum hl_queue_type *queue_type,
+ bool *is_kernel_allocated_cb)
{
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
- u32 cb_handle;
- struct hl_cb *cb;
-
- /* Assume external queue */
- *ext_queue = true;
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
@@ -406,20 +429,29 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev,
(hw_queue_prop->type == QUEUE_TYPE_NA)) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
- return NULL;
+ return -EINVAL;
}
if (hw_queue_prop->driver_only) {
dev_err(hdev->dev,
"Queue index %d is restricted for the kernel driver\n",
chunk->queue_index);
- return NULL;
- } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
- *ext_queue = false;
- return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
+ return -EINVAL;
}
- /* Retrieve CB object */
+ *queue_type = hw_queue_prop->type;
+ *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
+
+ return 0;
+}
+
+static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
+ struct hl_cb_mgr *cb_mgr,
+ struct hl_cs_chunk *chunk)
+{
+ struct hl_cb *cb;
+ u32 cb_handle;
+
cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
cb = hl_cb_get(hdev, cb_mgr, cb_handle);
@@ -444,7 +476,8 @@ release_cb:
return NULL;
}
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
{
struct hl_cs_job *job;
@@ -452,12 +485,14 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
if (!job)
return NULL;
- job->ext_queue = ext_queue;
+ job->queue_type = queue_type;
+ job->is_kernel_allocated_cb = is_kernel_allocated_cb;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job))
INIT_LIST_HEAD(&job->userptr_list);
+
+ if (job->queue_type == QUEUE_TYPE_EXT)
INIT_WORK(&job->finish_work, job_wq_completion);
- }
return job;
}
@@ -470,7 +505,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
- bool ext_queue_present = false;
+ bool int_queues_only = true;
u32 size_to_copy;
int rc, i, parse_cnt;
@@ -514,23 +549,33 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
- bool ext_queue;
+ enum hl_queue_type queue_type;
+ bool is_kernel_allocated_cb;
+
+ rc = validate_queue_index(hdev, chunk, &queue_type,
+ &is_kernel_allocated_cb);
+ if (rc)
+ goto free_cs_object;
- cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
- &ext_queue);
- if (ext_queue) {
- ext_queue_present = true;
+ if (is_kernel_allocated_cb) {
+ cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
rc = -EINVAL;
goto free_cs_object;
}
+ } else {
+ cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
- job = hl_cs_allocate_job(hdev, ext_queue);
+ if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
+ int_queues_only = false;
+
+ job = hl_cs_allocate_job(hdev, queue_type,
+ is_kernel_allocated_cb);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
- if (ext_queue)
+ if (is_kernel_allocated_cb)
goto release_cb;
else
goto free_cs_object;
@@ -540,7 +585,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
job->cs = cs;
job->user_cb = cb;
job->user_cb_size = chunk->cb_size;
- if (job->ext_queue)
+ if (is_kernel_allocated_cb)
job->job_cb_size = cb->size;
else
job->job_cb_size = chunk->cb_size;
@@ -553,10 +598,11 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/*
* Increment CS reference. When CS reference is 0, CS is
* done and can be signaled to user and free all its resources
- * Only increment for JOB on external queues, because only
- * for those JOBs we get completion
+ * Only increment for JOB on external or H/W queues, because
+ * only for those JOBs we get completion
*/
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_get(cs);
hl_debugfs_add_job(hdev, job);
@@ -570,9 +616,9 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
}
}
- if (!ext_queue_present) {
+ if (int_queues_only) {
dev_err(hdev->dev,
- "Reject CS %d.%llu because no external queues jobs\n",
+ "Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL;
goto free_cs_object;
@@ -580,9 +626,10 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
- dev_err(hdev->dev,
- "Failed to submit CS %d.%llu to H/W queues, error %d\n",
- cs->ctx->asid, cs->sequence, rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ cs->ctx->asid, cs->sequence, rc);
goto free_cs_object;
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 87f37ac31ccd..20413e350343 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -307,45 +307,57 @@ static inline u64 get_hop0_addr(struct hl_ctx *ctx)
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
{
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP0_MASK) >> HOP0_SHIFT);
+ ((virt_addr & mask) >> shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP1_MASK) >> HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
+ mmu_specs->hop0_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP2_MASK) >> HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
+ mmu_specs->hop1_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP3_MASK) >> HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
+ mmu_specs->hop2_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP4_MASK) >> HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
+ mmu_specs->hop3_shift);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
+ mmu_specs->hop4_shift);
}
static inline u64 get_next_hop_addr(u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -355,7 +367,10 @@ static int mmu_show(struct seq_file *s, void *data)
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
struct hl_ctx *ctx;
+ bool is_dram_addr;
u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
@@ -377,33 +392,39 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* the following lookup is copied from unmap() in mmu.c */
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
hop1_addr = get_next_hop_addr(hop0_pte);
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
hop2_addr = get_next_hop_addr(hop1_pte);
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
hop3_addr = get_next_hop_addr(hop2_pte);
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
if (!(hop3_pte & LAST_MASK)) {
@@ -412,7 +433,8 @@ static int mmu_show(struct seq_file *s, void *data)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
if (!(hop4_pte & PAGE_PRESENT_MASK))
goto not_mapped;
@@ -506,6 +528,12 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't check device idle during reset\n");
+ return 0;
+ }
+
hdev->asic_funcs->is_device_idle(hdev, NULL, s);
return 0;
@@ -534,41 +562,50 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
u64 *phys_addr)
{
struct hl_ctx *ctx = hdev->compute_ctx;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop_addr, hop_pte_addr, hop_pte;
- u64 offset_mask = HOP4_MASK | OFFSET_MASK;
+ u64 offset_mask = HOP4_MASK | FLAGS_MASK;
int rc = 0;
+ bool is_dram_addr;
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* hop 0 */
hop_addr = get_hop0_addr(ctx);
- hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 1 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 2 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 3 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
if (!(hop_pte & LAST_MASK)) {
@@ -576,10 +613,11 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
+ virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
- offset_mask = OFFSET_MASK;
+ offset_mask = FLAGS_MASK;
}
if (!(hop_pte & PAGE_PRESENT_MASK))
@@ -608,6 +646,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
u32 val;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
+ return 0;
+ }
+
if (*ppos)
return 0;
@@ -637,6 +680,11 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
+ return 0;
+ }
+
rc = kstrtouint_from_user(buf, count, 16, &value);
if (rc)
return rc;
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 459fee70a597..b155e9549076 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -42,12 +42,10 @@ static void hpriv_release(struct kref *ref)
{
struct hl_fpriv *hpriv;
struct hl_device *hdev;
- struct hl_ctx *ctx;
hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev;
- ctx = hpriv->ctx;
put_pid(hpriv->taskpid);
@@ -889,13 +887,19 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
- /* Kill processes here after CS rollback. This is because the process
- * can't really exit until all its CSs are done, which is what we
- * do in cs rollback
- */
- if (from_hard_reset_thread)
+ if (hard_reset) {
+ /* Kill processes here after CS rollback. This is because the
+ * process can't really exit until all its CSs are done, which
+ * is what we do in cs rollback
+ */
device_kill_open_processes(hdev);
+ /* Flush the Event queue workers to make sure no other thread is
+ * reading or writing to registers during the reset
+ */
+ flush_workqueue(hdev->eq_wq);
+ }
+
/* Release kernel context */
if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
hdev->kernel_ctx = NULL;
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index ea2ca67fbfbf..f5bd03171dac 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -143,10 +143,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
if (!rc) {
- if (result == ARMCP_PACKET_FENCE_VAL)
- dev_info(hdev->dev,
- "queue test on CPU queue succeeded\n");
- else
+ if (result != ARMCP_PACKET_FENCE_VAL)
dev_err(hdev->dev,
"CPU queue test failed (0x%08lX)\n", result);
} else {
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 6fba14b81f90..c8d16aa4382c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -72,6 +72,9 @@
*
*/
+#define GOYA_UBOOT_FW_FILE "habanalabs/goya/goya-u-boot.bin"
+#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
+
#define GOYA_MMU_REGS_NUM 63
#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
@@ -337,17 +340,20 @@ void goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 1;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
NUMBER_OF_INT_HW_QUEUES; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < HL_MAX_QUEUES; i++)
@@ -377,6 +383,23 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->dmmu.hop0_shift = HOP0_SHIFT;
+ prop->dmmu.hop1_shift = HOP1_SHIFT;
+ prop->dmmu.hop2_shift = HOP2_SHIFT;
+ prop->dmmu.hop3_shift = HOP3_SHIFT;
+ prop->dmmu.hop4_shift = HOP4_SHIFT;
+ prop->dmmu.hop0_mask = HOP0_MASK;
+ prop->dmmu.hop1_mask = HOP1_MASK;
+ prop->dmmu.hop2_mask = HOP2_MASK;
+ prop->dmmu.hop3_mask = HOP3_MASK;
+ prop->dmmu.hop4_mask = HOP4_MASK;
+ prop->dmmu.huge_page_size = PAGE_SIZE_2MB;
+
+ /* No difference between PMMU and DMMU except of page size */
+ memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->pmmu.page_size = PAGE_SIZE_4KB;
+
prop->va_space_host_start_address = VA_HOST_SPACE_START;
prop->va_space_host_end_address = VA_HOST_SPACE_END;
prop->va_space_dram_start_address = VA_DDR_SPACE_START;
@@ -393,6 +416,9 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
+
+ strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
}
/*
@@ -1454,6 +1480,9 @@ static void goya_init_golden_registers(struct hl_device *hdev)
1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
+ ICACHE_FETCH_LINE_NUM, 2);
}
WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
@@ -1533,7 +1562,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
- u64 qman_base_addr;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
@@ -1545,9 +1573,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
- qman_base_addr = hdev->asic_prop.sram_base_address +
- MME_QMAN_BASE_OFFSET;
-
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
@@ -2141,13 +2166,11 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
*/
static int goya_push_uboot_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_UBOOT_FW_FILE, dst);
}
/*
@@ -2160,13 +2183,11 @@ static int goya_push_uboot_to_device(struct hl_device *hdev)
*/
static int goya_push_linux_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
}
static int goya_pldm_init_cpu(struct hl_device *hdev)
@@ -2291,6 +2312,10 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
10000,
cpu_timeout);
+ /* Read U-Boot version now in case we will later fail */
+ goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
+ goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
if (rc) {
dev_err(hdev->dev, "Error in ARM u-boot!");
switch (status) {
@@ -2328,6 +2353,11 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
"ARM status %d - u-boot stopped by user\n",
status);
break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "ARM status %d - Thermal Sensor initialization failed\n",
+ status);
+ break;
default:
dev_err(hdev->dev,
"ARM status %d - Invalid status code\n",
@@ -2337,10 +2367,6 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
- /* Read U-Boot version now in case we will later fail */
- goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
- goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
-
if (!hdev->fw_loading) {
dev_info(hdev->dev, "Skip loading FW\n");
goto out;
@@ -2453,7 +2479,8 @@ int goya_mmu_init(struct hl_device *hdev)
WREG32_AND(mmSTLB_STLB_FEATURE_EN,
(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
WREG32(mmMMU_MMU_ENABLE, 1);
WREG32(mmMMU_SPI_MASK, 0xF);
@@ -2978,9 +3005,6 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
rc = -EIO;
- } else {
- dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
- hw_queue_id);
}
free_pkt:
@@ -3925,7 +3949,7 @@ static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
return 0;
dev_err(hdev->dev,
- "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
+ "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
parser->user_cb, parser->user_cb_size);
return -EFAULT;
@@ -3935,7 +3959,7 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct goya_device *goya = hdev->asic_specific;
- if (!parser->ext_queue)
+ if (parser->queue_type == QUEUE_TYPE_INT)
return goya_parse_cb_no_ext_queue(hdev, parser);
if (goya->hw_cap_initialized & HW_CAP_MMU)
@@ -4606,7 +4630,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
lin_dma_pkt++;
} while (--lin_dma_pkts_cnt);
- job = hl_cs_allocate_job(hdev, true);
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -4835,13 +4859,15 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
-static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
+static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+ u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -4880,7 +4906,8 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
u32 status, timeout_usec, inv_data, pi;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -5137,7 +5164,8 @@ static const struct hl_asic_funcs goya_funcs = {
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
- .halt_coresight = goya_halt_coresight
+ .halt_coresight = goya_halt_coresight,
+ .get_clk_rate = goya_get_clk_rate
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 89b6574f8e4f..c3230cb6e25c 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -233,4 +233,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index b4d406af1bed..c1ee6e2b5dff 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -8,6 +8,7 @@
#include "goyaP.h"
#include "include/goya/goya_coresight.h"
#include "include/goya/asic_reg/goya_regs.h"
+#include "include/goya/asic_reg/goya_masks.h"
#include <uapi/misc/habanalabs.h>
@@ -377,33 +378,32 @@ static int goya_config_etr(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etr *input;
- u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
u32 val;
int rc;
- WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+ WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
- val = RREG32(base_reg + 0x304);
+ val = RREG32(mmPSOC_ETR_FFCR);
val |= 0x1000;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
val |= 0x40;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
- rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- WREG32(base_reg + 0x20, 0);
+ WREG32(mmPSOC_ETR_CTL, 0);
if (params->enable) {
input = params->input;
@@ -423,25 +423,26 @@ static int goya_config_etr(struct hl_device *hdev,
return -EINVAL;
}
- WREG32(base_reg + 0x34, 0x3FFC);
- WREG32(base_reg + 0x4, input->buffer_size);
- WREG32(base_reg + 0x28, input->sink_mode);
- WREG32(base_reg + 0x110, 0x700);
- WREG32(base_reg + 0x118,
+ WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
+ WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
+ WREG32(mmPSOC_ETR_MODE, input->sink_mode);
+ WREG32(mmPSOC_ETR_AXICTL,
+ 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
- WREG32(base_reg + 0x11C,
+ WREG32(mmPSOC_ETR_DBAHI,
upper_32_bits(input->buffer_address));
- WREG32(base_reg + 0x304, 3);
- WREG32(base_reg + 0x308, 0xA);
- WREG32(base_reg + 0x20, 1);
+ WREG32(mmPSOC_ETR_FFCR, 3);
+ WREG32(mmPSOC_ETR_PSCR, 0xA);
+ WREG32(mmPSOC_ETR_CTL, 1);
} else {
- WREG32(base_reg + 0x34, 0);
- WREG32(base_reg + 0x4, 0x400);
- WREG32(base_reg + 0x118, 0);
- WREG32(base_reg + 0x11C, 0);
- WREG32(base_reg + 0x308, 0);
- WREG32(base_reg + 0x28, 0);
- WREG32(base_reg + 0x304, 0);
+ WREG32(mmPSOC_ETR_BUFWM, 0);
+ WREG32(mmPSOC_ETR_RSZ, 0x400);
+ WREG32(mmPSOC_ETR_DBALO, 0);
+ WREG32(mmPSOC_ETR_DBAHI, 0);
+ WREG32(mmPSOC_ETR_PSCR, 0);
+ WREG32(mmPSOC_ETR_MODE, 0);
+ WREG32(mmPSOC_ETR_FFCR, 0);
if (params->output_size >= sizeof(u64)) {
u32 rwp, rwphi;
@@ -451,8 +452,8 @@ static int goya_config_etr(struct hl_device *hdev,
* the buffer is set in the RWP register (lower 32
* bits), and in the RWPHI register (upper 8 bits).
*/
- rwp = RREG32(base_reg + 0x18);
- rwphi = RREG32(base_reg + 0x3c) & 0xff;
+ rwp = RREG32(mmPSOC_ETR_RWP);
+ rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
*(u64 *) params->output = ((u64) rwphi << 32) | rwp;
}
}
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index a2a700c3d597..b2ebc01e27f4 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -32,6 +32,37 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
}
}
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+{
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0) {
+ dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
+ value);
+ return value;
+ }
+
+ *max_clk = (value / 1000 / 1000);
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0) {
+ dev_err(hdev->dev,
+ "Failed to retrieve device current clock %ld\n",
+ value);
+ return value;
+ }
+
+ *cur_clk = (value / 1000 / 1000);
+
+ return 0;
+}
+
static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 75862be53c60..00c949f4ccd1 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -40,8 +40,6 @@
#define HL_MAX_QUEUES 128
-#define HL_MAX_JOBS_PER_CS 64
-
/* MUST BE POWER OF 2 and larger than 1 */
#define HL_MAX_PENDING_CS 64
@@ -85,12 +83,15 @@ struct hl_fpriv;
* @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
* memories and/or operates the compute engines.
* @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
+ * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
+ * notifications are sent by H/W.
*/
enum hl_queue_type {
QUEUE_TYPE_NA,
QUEUE_TYPE_EXT,
QUEUE_TYPE_INT,
- QUEUE_TYPE_CPU
+ QUEUE_TYPE_CPU,
+ QUEUE_TYPE_HW
};
/**
@@ -98,10 +99,13 @@ enum hl_queue_type {
* @type: queue type.
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
+ * @requires_kernel_cb: true if a CB handle must be provided for jobs on this
+ * queue, false otherwise (a CB address must be provided).
*/
struct hw_queue_properties {
enum hl_queue_type type;
u8 driver_only;
+ u8 requires_kernel_cb;
};
/**
@@ -110,8 +114,8 @@ struct hw_queue_properties {
* @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
*/
enum vm_type_t {
- VM_TYPE_USERPTR,
- VM_TYPE_PHYS_PACK
+ VM_TYPE_USERPTR = 0x1,
+ VM_TYPE_PHYS_PACK = 0x2
};
/**
@@ -127,12 +131,44 @@ enum hl_device_hw_state {
};
/**
+ * struct hl_mmu_properties - ASIC specific MMU address translation properties.
+ * @hop0_shift: shift of hop 0 mask.
+ * @hop1_shift: shift of hop 1 mask.
+ * @hop2_shift: shift of hop 2 mask.
+ * @hop3_shift: shift of hop 3 mask.
+ * @hop4_shift: shift of hop 4 mask.
+ * @hop0_mask: mask to get the PTE address in hop 0.
+ * @hop1_mask: mask to get the PTE address in hop 1.
+ * @hop2_mask: mask to get the PTE address in hop 2.
+ * @hop3_mask: mask to get the PTE address in hop 3.
+ * @hop4_mask: mask to get the PTE address in hop 4.
+ * @page_size: default page size used to allocate memory.
+ * @huge_page_size: page size used to allocate memory with huge pages.
+ */
+struct hl_mmu_properties {
+ u64 hop0_shift;
+ u64 hop1_shift;
+ u64 hop2_shift;
+ u64 hop3_shift;
+ u64 hop4_shift;
+ u64 hop0_mask;
+ u64 hop1_mask;
+ u64 hop2_mask;
+ u64 hop3_mask;
+ u64 hop4_mask;
+ u32 page_size;
+ u32 huge_page_size;
+};
+
+/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
* @armcp_info: received various information from ArmCP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
* @preboot_ver: F/W Preboot version.
+ * @dmmu: DRAM MMU address translation properties.
+ * @pmmu: PCI (host) MMU address translation properties.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -169,53 +205,55 @@ enum hl_device_hw_state {
* @psoc_pci_pll_nf: PCI PLL NF value.
* @psoc_pci_pll_od: PCI PLL OD value.
* @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
- * @completion_queues_count: number of completion queues.
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
* @tpc_enabled_mask: which TPCs are enabled.
+ * @completion_queues_count: number of completion queues.
*/
struct asic_fixed_properties {
struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
- struct armcp_info armcp_info;
- char uboot_ver[VERSION_MAX_LEN];
- char preboot_ver[VERSION_MAX_LEN];
- u64 sram_base_address;
- u64 sram_end_address;
- u64 sram_user_base_address;
- u64 dram_base_address;
- u64 dram_end_address;
- u64 dram_user_base_address;
- u64 dram_size;
- u64 dram_pci_bar_size;
- u64 max_power_default;
- u64 va_space_host_start_address;
- u64 va_space_host_end_address;
- u64 va_space_dram_start_address;
- u64 va_space_dram_end_address;
- u64 dram_size_for_default_page_mapping;
- u64 pcie_dbi_base_address;
- u64 pcie_aux_dbi_reg_addr;
- u64 mmu_pgt_addr;
- u64 mmu_dram_default_page_addr;
- u32 mmu_pgt_size;
- u32 mmu_pte_size;
- u32 mmu_hop_table_size;
- u32 mmu_hop0_tables_total_size;
- u32 dram_page_size;
- u32 cfg_size;
- u32 sram_size;
- u32 max_asid;
- u32 num_of_events;
- u32 psoc_pci_pll_nr;
- u32 psoc_pci_pll_nf;
- u32 psoc_pci_pll_od;
- u32 psoc_pci_pll_div_factor;
- u32 high_pll;
- u32 cb_pool_cb_cnt;
- u32 cb_pool_cb_size;
- u8 completion_queues_count;
- u8 tpc_enabled_mask;
+ struct armcp_info armcp_info;
+ char uboot_ver[VERSION_MAX_LEN];
+ char preboot_ver[VERSION_MAX_LEN];
+ struct hl_mmu_properties dmmu;
+ struct hl_mmu_properties pmmu;
+ u64 sram_base_address;
+ u64 sram_end_address;
+ u64 sram_user_base_address;
+ u64 dram_base_address;
+ u64 dram_end_address;
+ u64 dram_user_base_address;
+ u64 dram_size;
+ u64 dram_pci_bar_size;
+ u64 max_power_default;
+ u64 va_space_host_start_address;
+ u64 va_space_host_end_address;
+ u64 va_space_dram_start_address;
+ u64 va_space_dram_end_address;
+ u64 dram_size_for_default_page_mapping;
+ u64 pcie_dbi_base_address;
+ u64 pcie_aux_dbi_reg_addr;
+ u64 mmu_pgt_addr;
+ u64 mmu_dram_default_page_addr;
+ u32 mmu_pgt_size;
+ u32 mmu_pte_size;
+ u32 mmu_hop_table_size;
+ u32 mmu_hop0_tables_total_size;
+ u32 dram_page_size;
+ u32 cfg_size;
+ u32 sram_size;
+ u32 max_asid;
+ u32 num_of_events;
+ u32 psoc_pci_pll_nr;
+ u32 psoc_pci_pll_nf;
+ u32 psoc_pci_pll_od;
+ u32 psoc_pci_pll_div_factor;
+ u32 high_pll;
+ u32 cb_pool_cb_cnt;
+ u32 cb_pool_cb_size;
+ u8 tpc_enabled_mask;
+ u8 completion_queues_count;
};
/**
@@ -236,8 +274,6 @@ struct hl_dma_fence {
* Command Buffers
*/
-#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
-
/**
* struct hl_cb_mgr - describes a Command Buffer Manager.
* @cb_lock: protects cb_handles.
@@ -481,8 +517,8 @@ enum hl_pll_frequency {
* @get_events_stat: retrieve event queue entries histogram.
* @read_pte: read MMU page table entry from DRAM.
* @write_pte: write MMU page table entry to DRAM.
- * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
- * hard (L0 & L1) flush.
+ * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
+ * (L1 only) or hard (L0 & L1) flush.
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
@@ -502,6 +538,7 @@ enum hl_pll_frequency {
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
+ * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -562,7 +599,8 @@ struct hl_asic_funcs {
u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
- void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
+ void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
+ u32 flags);
void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
@@ -584,6 +622,7 @@ struct hl_asic_funcs {
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
+ int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
};
@@ -688,7 +727,7 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
- * @addr: user-space virtual pointer to the start of the memory area.
+ * @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
@@ -752,11 +791,14 @@ struct hl_cs {
* @userptr_list: linked-list of userptr mappings that belong to this job and
* wait for completion.
* @debugfs_list: node in debugfs list of command submission jobs.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @id: the id of this job inside a CS.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @job_cb_size: the actual size of the CB that we put on the queue.
- * @ext_queue: whether the job is for external queue or internal queue.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_job {
struct list_head cs_node;
@@ -766,39 +808,44 @@ struct hl_cs_job {
struct work_struct finish_work;
struct list_head userptr_list;
struct list_head debugfs_list;
+ enum hl_queue_type queue_type;
u32 id;
u32 hw_queue_id;
u32 user_cb_size;
u32 job_cb_size;
- u8 ext_queue;
+ u8 is_kernel_allocated_cb;
};
/**
- * struct hl_cs_parser - command submission paerser properties.
+ * struct hl_cs_parser - command submission parser properties.
* @user_cb: the CB we got from the user.
* @patched_cb: in case of patching, this is internal CB which is submitted on
* the queue instead of the CB we got from the IOCTL.
* @job_userptr_list: linked-list of userptr mappings that belong to the related
* job and wait for completion.
* @cs_sequence: the sequence number of the related CS.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @ctx_id: the ID of the context the related CS belongs to.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @patched_cb_size: the size of the CB after parsing.
- * @ext_queue: whether the job is for external queue or internal queue.
* @job_id: the id of the related job inside the related CS.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
struct hl_cb *patched_cb;
struct list_head *job_userptr_list;
u64 cs_sequence;
+ enum hl_queue_type queue_type;
u32 ctx_id;
u32 hw_queue_id;
u32 user_cb_size;
u32 patched_cb_size;
- u8 ext_queue;
u8 job_id;
+ u8 is_kernel_allocated_cb;
};
@@ -1048,9 +1095,10 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
-#define WREG32_FIELD(reg, field, val) \
- WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
- (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD(reg, offset, field, val) \
+ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
+ ~REG_FIELD_MASK(reg, field)) | \
+ (val) << REG_FIELD_SHIFT(reg, field))
/* Timeout should be longer when working with simulator but cap the
* increased timeout to some maximum
@@ -1501,7 +1549,8 @@ int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
void hl_cs_rollback_all(struct hl_device *hdev);
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void goya_set_asic_funcs(struct hl_device *hdev);
@@ -1513,7 +1562,7 @@ void hl_vm_fini(struct hl_device *hdev);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr);
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
void hl_userptr_delete_list(struct hl_device *hdev,
struct list_head *userptr_list);
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 66d9c710073c..6474b868ef27 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -60,11 +60,16 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
hw_ip.dram_size = prop->dram_size - dram_kmd_size;
- if (hw_ip.dram_size > 0)
+ if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
hw_ip.num_of_events = prop->num_of_events;
- memcpy(hw_ip.armcp_version,
- prop->armcp_info.armcp_version, VERSION_MAX_LEN);
+
+ memcpy(hw_ip.armcp_version, prop->armcp_info.armcp_version,
+ min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
+
+ memcpy(hw_ip.card_name, prop->armcp_info.card_name,
+ min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
+
hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
@@ -179,17 +184,14 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
goto out;
}
- if (output) {
- if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
- output,
- args->output_size)) {
- dev_err(hdev->dev,
- "copy to user failed in debug ioctl\n");
- rc = -EFAULT;
- goto out;
- }
+ if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
+ output, args->output_size)) {
+ dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
+ rc = -EFAULT;
+ goto out;
}
+
out:
kfree(params);
kfree(output);
@@ -221,6 +223,41 @@ static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
}
+static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_clk_rate clk_rate = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
+ &clk_rate.max_clk_rate_mhz);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &clk_rate,
+ min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
+}
+
+static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_reset_count reset_count = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
+
+ return copy_to_user(out, &reset_count,
+ min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -239,6 +276,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DEVICE_STATUS:
return device_status_info(hdev, args);
+ case HL_INFO_RESET_COUNT:
+ return get_reset_count(hdev, args);
+
default:
break;
}
@@ -271,6 +311,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = hw_events_info(hdev, true, args);
break;
+ case HL_INFO_CLK_RATE:
+ rc = get_clk_rate(hdev, args);
+ break;
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
@@ -406,9 +450,8 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
retcode = func(hpriv, kdata);
- if (cmd & IOC_OUT)
- if (copy_to_user((void __user *)arg, kdata, usize))
- retcode = -EFAULT;
+ if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
+ retcode = -EFAULT;
out_err:
if (retcode)
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 55b383b2a116..91579dde9262 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -58,8 +58,8 @@ out:
}
/*
- * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
- *
+ * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
+ * H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
* @ctl: BD's control word
@@ -73,8 +73,8 @@ out:
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
- u32 ctl, u32 len, u64 ptr)
+static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
+ struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
@@ -174,6 +174,45 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
}
/*
+ * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
+ * @hdev: Pointer to hl_device structure.
+ * @q: Pointer to hl_hw_queue structure.
+ * @num_of_entries: How many entries to check for space.
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the completion queue.
+ * This check also ensures that there is enough space in the h/w queue, as
+ * both queues are of the same size.
+ * - Reserve space in the completion queue (needs to be reversed if there
+ * is a failure down the road before the actual submission of work).
+ *
+ * Both operations are done using the "free_slots_cnt" field of the completion
+ * queue. The CI counters of the queue and the completion queue are not
+ * needed/used for the H/W queue type.
+ */
+static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
+ int num_of_entries)
+{
+ atomic_t *free_slots =
+ &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+
+ /*
+ * Check we have enough space in the completion queue.
+ * Add -1 to counter (decrement) unless counter was already 0.
+ * In that case, CQ is full so we can't submit a new CB.
+ * atomic_add_unless will return 0 if counter was already 0.
+ */
+ if (atomic_add_negative(num_of_entries * -1, free_slots)) {
+ dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
+ num_of_entries, q->hw_queue_id);
+ atomic_add(num_of_entries, free_slots);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
* hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
*
* @hdev: pointer to hl_device structure
@@ -188,7 +227,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
- int rc;
+ int rc = 0;
/*
* The CPU queue is a synchronous queue with an effective depth of
@@ -206,11 +245,18 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
goto out;
}
- rc = ext_queue_sanity_checks(hdev, q, 1, false);
- if (rc)
- goto out;
+ /*
+ * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
+ * type only on init phase, when the queues are empty and being tested,
+ * so there is no need for sanity checks.
+ */
+ if (q->queue_type != QUEUE_TYPE_HW) {
+ rc = ext_queue_sanity_checks(hdev, q, 1, false);
+ if (rc)
+ goto out;
+ }
- ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
if (q->queue_type != QUEUE_TYPE_CPU)
@@ -220,14 +266,14 @@ out:
}
/*
- * ext_hw_queue_schedule_job - submit an JOB to an external queue
+ * ext_queue_schedule_job - submit a JOB to an external queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
+static void ext_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -260,7 +306,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
* H/W queues is done under the scheduler mutex
*
* No need to check if CQ is full because it was already
- * checked in hl_queue_sanity_checks
+ * checked in ext_queue_sanity_checks
*/
cq = &hdev->completion_queue[q->hw_queue_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
@@ -274,18 +320,18 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
- ext_queue_submit_bd(hdev, q, ctl, len, ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
- * int_hw_queue_schedule_job - submit an JOB to an internal queue
+ * int_queue_schedule_job - submit a JOB to an internal queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void int_hw_queue_schedule_job(struct hl_cs_job *job)
+static void int_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -308,6 +354,60 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
}
/*
+ * hw_queue_schedule_job - submit a JOB to a H/W queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_cq *cq;
+ u64 ptr;
+ u32 offset, ctl, len;
+
+ /*
+ * Upon PQE completion, COMP_DATA is used as the write data to the
+ * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
+ * write address offset in the SM block (QMAN LBW message).
+ * The write address offset is calculated as "COMP_OFFSET << 2".
+ */
+ offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
+ ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
+ ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
+
+ len = job->job_cb_size;
+
+ /*
+ * A patched CB is created only if a user CB was allocated by driver and
+ * MMU is disabled. If MMU is enabled, the user CB should be used
+ * instead. If the user CB wasn't allocated by driver, assume that it
+ * holds an address.
+ */
+ if (job->patched_cb)
+ ptr = job->patched_cb->bus_address;
+ else if (job->is_kernel_allocated_cb)
+ ptr = job->user_cb->bus_address;
+ else
+ ptr = (u64) (uintptr_t) job->user_cb;
+
+ /*
+ * No need to protect pi_offset because scheduling to the
+ * H/W queues is done under the scheduler mutex
+ *
+ * No need to check if CQ is full because it was already
+ * checked in hw_queue_sanity_checks
+ */
+ cq = &hdev->completion_queue[q->hw_queue_id];
+ cq->pi = hl_cq_inc_ptr(cq->pi);
+
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+}
+
+/*
* hl_hw_queue_schedule_cs - schedule a command submission
*
* @job : pointer to the CS
@@ -330,23 +430,34 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
q = &hdev->kernel_queues[0];
- /* This loop assumes all external queues are consecutive */
for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_EXT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ switch (q->queue_type) {
+ case QUEUE_TYPE_EXT:
rc = ext_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i], true);
- if (rc)
- goto unroll_cq_resv;
- cq_cnt++;
- }
- } else if (q->queue_type == QUEUE_TYPE_INT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ cs->jobs_in_queue_cnt[i], true);
+ break;
+ case QUEUE_TYPE_INT:
rc = int_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i]);
- if (rc)
- goto unroll_cq_resv;
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ default:
+ dev_err(hdev->dev, "Queue type %d is invalid\n",
+ q->queue_type);
+ rc = -EINVAL;
+ break;
}
+
+ if (rc)
+ goto unroll_cq_resv;
+
+ if (q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW)
+ cq_cnt++;
}
}
@@ -373,21 +484,30 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- if (job->ext_queue)
- ext_hw_queue_schedule_job(job);
- else
- int_hw_queue_schedule_job(job);
+ switch (job->queue_type) {
+ case QUEUE_TYPE_EXT:
+ ext_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_INT:
+ int_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_HW:
+ hw_queue_schedule_job(job);
+ break;
+ default:
+ break;
+ }
cs->submitted = true;
goto out;
unroll_cq_resv:
- /* This loop assumes all external queues are consecutive */
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
- if ((q->queue_type == QUEUE_TYPE_EXT) &&
- (cs->jobs_in_queue_cnt[i])) {
+ if ((q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW) &&
+ cs->jobs_in_queue_cnt[i]) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
@@ -414,8 +534,8 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
q->ci = hl_queue_inc_ptr(q->ci);
}
-static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
- struct hl_hw_queue *q, bool is_cpu_queue)
+static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+ bool is_cpu_queue)
{
void *p;
int rc;
@@ -465,7 +585,7 @@ free_queue:
return rc;
}
-static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
@@ -485,18 +605,38 @@ static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
return 0;
}
-static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_queue_init(hdev, q, true);
+}
+
+static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, true);
+ return ext_and_cpu_queue_init(hdev, q, false);
}
-static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, false);
+ void *p;
+
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->kernel_address = (u64) (uintptr_t) p;
+
+ /* Make sure read/write pointers are initialized to start of queue */
+ q->ci = 0;
+ q->pi = 0;
+
+ return 0;
}
/*
- * hw_queue_init - main initialization function for H/W queue object
+ * queue_init - main initialization function for H/W queue object
*
* @hdev: pointer to hl_device device structure
* @q: pointer to hl_hw_queue queue structure
@@ -505,7 +645,7 @@ static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
* Allocate dma-able memory for the queue and initialize fields
* Returns 0 on success
*/
-static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
u32 hw_queue_id)
{
int rc;
@@ -516,21 +656,20 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
- rc = ext_hw_queue_init(hdev, q);
+ rc = ext_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_INT:
- rc = int_hw_queue_init(hdev, q);
+ rc = int_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_CPU:
- rc = cpu_hw_queue_init(hdev, q);
+ rc = cpu_queue_init(hdev, q);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_NA:
q->valid = 0;
return 0;
-
default:
dev_crit(hdev->dev, "wrong queue type %d during init\n",
q->queue_type);
@@ -554,7 +693,7 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
*
* Free the queue memory
*/
-static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
+static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
{
if (!q->valid)
return;
@@ -612,7 +751,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
- rc = hw_queue_init(hdev, q, i);
+ rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
@@ -624,7 +763,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
release_queues:
for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
@@ -637,7 +776,7 @@ void hl_hw_queues_destroy(struct hl_device *hdev)
int i;
for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
}
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 8618891d5afa..3c44ef3a23ed 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -260,4 +260,6 @@
#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+
#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index 19b0f0ef1d0b..fce490e6a231 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -84,6 +84,7 @@
#include "tpc6_rtr_regs.h"
#include "tpc7_nrtr_regs.h"
#include "tpc0_eml_cfg_regs.h"
+#include "psoc_etr_regs.h"
#include "psoc_global_conf_masks.h"
#include "dma_macro_masks.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
new file mode 100644
index 000000000000..b7c33e025db5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_REGS_H_
+#define ASIC_REG_PSOC_ETR_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR (Prototype: ETR)
+ *****************************************
+ */
+
+#define mmPSOC_ETR_RSZ 0x2C43004
+
+#define mmPSOC_ETR_STS 0x2C4300C
+
+#define mmPSOC_ETR_RRD 0x2C43010
+
+#define mmPSOC_ETR_RRP 0x2C43014
+
+#define mmPSOC_ETR_RWP 0x2C43018
+
+#define mmPSOC_ETR_TRG 0x2C4301C
+
+#define mmPSOC_ETR_CTL 0x2C43020
+
+#define mmPSOC_ETR_RWD 0x2C43024
+
+#define mmPSOC_ETR_MODE 0x2C43028
+
+#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C
+
+#define mmPSOC_ETR_CBUFLEVEL 0x2C43030
+
+#define mmPSOC_ETR_BUFWM 0x2C43034
+
+#define mmPSOC_ETR_RRPHI 0x2C43038
+
+#define mmPSOC_ETR_RWPHI 0x2C4303C
+
+#define mmPSOC_ETR_AXICTL 0x2C43110
+
+#define mmPSOC_ETR_DBALO 0x2C43118
+
+#define mmPSOC_ETR_DBAHI 0x2C4311C
+
+#define mmPSOC_ETR_FFSR 0x2C43300
+
+#define mmPSOC_ETR_FFCR 0x2C43304
+
+#define mmPSOC_ETR_PSCR 0x2C43308
+
+#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0
+
+#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8
+
+#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC
+
+#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0
+
+#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4
+
+#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8
+
+#define mmPSOC_ETR_ITCTRL 0x2C43F00
+
+#define mmPSOC_ETR_CLAIMSET 0x2C43FA0
+
+#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4
+
+#define mmPSOC_ETR_LAR 0x2C43FB0
+
+#define mmPSOC_ETR_LSR 0x2C43FB4
+
+#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8
+
+#define mmPSOC_ETR_DEVID 0x2C43FC8
+
+#define mmPSOC_ETR_DEVTYPE 0x2C43FCC
+
+#define mmPSOC_ETR_PERIPHID4 0x2C43FD0
+
+#define mmPSOC_ETR_PERIPHID5 0x2C43FD4
+
+#define mmPSOC_ETR_PERIPHID6 0x2C43FD8
+
+#define mmPSOC_ETR_PERIPHID7 0x2C43FDC
+
+#define mmPSOC_ETR_PERIPHID0 0x2C43FE0
+
+#define mmPSOC_ETR_PERIPHID1 0x2C43FE4
+
+#define mmPSOC_ETR_PERIPHID2 0x2C43FE8
+
+#define mmPSOC_ETR_PERIPHID3 0x2C43FEC
+
+#define mmPSOC_ETR_COMPID0 0x2C43FF0
+
+#define mmPSOC_ETR_COMPID1 0x2C43FF4
+
+#define mmPSOC_ETR_COMPID2 0x2C43FF8
+
+#define mmPSOC_ETR_COMPID3 0x2C43FFC
+
+#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 4cd04c090285..2853a2de8cf6 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -20,6 +20,8 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_DRAM_INIT_FAIL,
CPU_BOOT_STATUS_FIT_CORRUPTED,
CPU_BOOT_STATUS_UBOOT_NOT_READY,
+ CPU_BOOT_STATUS_RESERVED,
+ CPU_BOOT_STATUS_TS_INIT_FAIL,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index 71ea3c3e8ba3..a6851a9d3f03 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -12,18 +12,16 @@
#define PAGE_SHIFT_2MB 21
#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
-#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
#define PAGE_PRESENT_MASK 0x0000000000001ull
#define SWAP_OUT_MASK 0x0000000000004ull
#define LAST_MASK 0x0000000000800ull
-#define PHYS_ADDR_MASK 0xFFFFFFFFFFFFF000ull
#define HOP0_MASK 0x3000000000000ull
#define HOP1_MASK 0x0FF8000000000ull
#define HOP2_MASK 0x0007FC0000000ull
#define HOP3_MASK 0x000003FE00000ull
#define HOP4_MASK 0x00000001FF000ull
-#define OFFSET_MASK 0x0000000000FFFull
+#define FLAGS_MASK 0x0000000000FFFull
#define HOP0_SHIFT 48
#define HOP1_SHIFT 39
@@ -31,8 +29,7 @@
#define HOP3_SHIFT 21
#define HOP4_SHIFT 12
-#define PTE_PHYS_ADDR_SHIFT 12
-#define PTE_PHYS_ADDR_MASK ~OFFSET_MASK
+#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
#define HL_PTE_SIZE sizeof(u64)
#define HOP_TABLE_SIZE PAGE_SIZE_4KB
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h
index bf59bbe27fdc..0fdb49188ed7 100644
--- a/drivers/misc/habanalabs/include/qman_if.h
+++ b/drivers/misc/habanalabs/include/qman_if.h
@@ -23,6 +23,8 @@ struct hl_bd {
#define HL_BD_SIZE sizeof(struct hl_bd)
/*
+ * S/W CTL FIELDS.
+ *
* BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is
* valid. 1 means the repeat field is valid, 0 means not-valid,
* i.e. repeat == 1
@@ -34,6 +36,16 @@ struct hl_bd {
#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF
/*
+ * H/W CTL FIELDS
+ */
+
+#define BD_CTL_COMP_OFFSET_SHIFT 16
+#define BD_CTL_COMP_OFFSET_MASK 0x00FF0000
+
+#define BD_CTL_COMP_DATA_SHIFT 0
+#define BD_CTL_COMP_DATA_MASK 0x0000FFFF
+
+/*
* COMPLETION QUEUE
*/
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 365fb0cb8dff..6c72cb4eff54 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/genalloc.h>
-#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
#define HL_MMU_DEBUG 0
/*
@@ -159,20 +158,19 @@ pages_pack_err:
}
/*
- * get_userptr_from_host_va - initialize userptr structure from given host
- * virtual address
- *
- * @hdev : habanalabs device structure
- * @args : parameters containing the virtual address and size
- * @p_userptr : pointer to result userptr structure
+ * dma_map_host_va - DMA mapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @p_userptr: pointer to result userptr structure
*
* This function does the following:
* - Allocate userptr structure
* - Pin the given host memory using the userptr structure
* - Perform DMA mapping to have the DMA addresses of the pages
*/
-static int get_userptr_from_host_va(struct hl_device *hdev,
- struct hl_mem_in *args, struct hl_userptr **p_userptr)
+static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr **p_userptr)
{
struct hl_userptr *userptr;
int rc;
@@ -183,8 +181,7 @@ static int get_userptr_from_host_va(struct hl_device *hdev,
goto userptr_err;
}
- rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
- args->map_host.mem_size, userptr);
+ rc = hl_pin_host_memory(hdev, addr, size, userptr);
if (rc) {
dev_err(hdev->dev, "Failed to pin host memory\n");
goto pin_err;
@@ -215,16 +212,16 @@ userptr_err:
}
/*
- * free_userptr - free userptr structure
- *
- * @hdev : habanalabs device structure
- * @userptr : userptr to free
+ * dma_unmap_host_va - DMA unmapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @userptr: userptr to free
*
* This function does the following:
* - Unpins the physical pages
* - Frees the userptr structure
*/
-static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+static void dma_unmap_host_va(struct hl_device *hdev,
+ struct hl_userptr *userptr)
{
hl_unpin_host_memory(hdev, userptr);
kfree(userptr);
@@ -253,10 +250,9 @@ static void dram_pg_pool_do_release(struct kref *ref)
}
/*
- * free_phys_pg_pack - free physical page pack
- *
- * @hdev : habanalabs device structure
- * @phys_pg_pack : physical page pack to free
+ * free_phys_pg_pack - free physical page pack
+ * @hdev: habanalabs device structure
+ * @phys_pg_pack: physical page pack to free
*
* This function does the following:
* - For DRAM memory only, iterate over the pack and free each physical block
@@ -264,7 +260,7 @@ static void dram_pg_pool_do_release(struct kref *ref)
* - Free the hl_vm_phys_pg_pack structure
*/
static void free_phys_pg_pack(struct hl_device *hdev,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
@@ -519,8 +515,8 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u64 size, u64 hint_addr,
- bool is_userptr)
+ struct hl_va_range *va_range, u64 size, u64 hint_addr,
+ bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
u64 valid_start, valid_size, prev_start, prev_end, page_mask,
@@ -528,18 +524,17 @@ static u64 get_va_block(struct hl_device *hdev,
u32 page_size;
bool add_prev = false;
- if (is_userptr) {
+ if (is_userptr)
/*
* We cannot know if the user allocated memory with huge pages
* or not, hence we continue with the biggest possible
* granularity.
*/
- page_size = PAGE_SIZE_2MB;
- page_mask = PAGE_MASK_2MB;
- } else {
- page_size = hdev->asic_prop.dram_page_size;
- page_mask = ~((u64)page_size - 1);
- }
+ page_size = hdev->asic_prop.pmmu.huge_page_size;
+ else
+ page_size = hdev->asic_prop.dmmu.page_size;
+
+ page_mask = ~((u64)page_size - 1);
mutex_lock(&va_range->lock);
@@ -549,7 +544,6 @@ static u64 get_va_block(struct hl_device *hdev,
/* calc the first possible aligned addr */
valid_start = va_block->start;
-
if (valid_start & (page_size - 1)) {
valid_start &= page_mask;
valid_start += page_size;
@@ -561,7 +555,6 @@ static u64 get_va_block(struct hl_device *hdev,
if (valid_size >= size &&
(!new_va_block || valid_size < res_valid_size)) {
-
new_va_block = va_block;
res_valid_start = valid_start;
res_valid_size = valid_size;
@@ -631,11 +624,10 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
/*
* init_phys_pg_pack_from_userptr - initialize physical page pack from host
- * memory
- *
- * @ctx : current context
- * @userptr : userptr to initialize from
- * @pphys_pg_pack : res pointer
+ * memory
+ * @ctx: current context
+ * @userptr: userptr to initialize from
+ * @pphys_pg_pack: result pointer
*
* This function does the following:
* - Pin the physical pages related to the given virtual block
@@ -643,16 +635,19 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
* virtual block
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
- struct hl_userptr *userptr,
- struct hl_vm_phys_pg_pack **pphys_pg_pack)
+ struct hl_userptr *userptr,
+ struct hl_vm_phys_pg_pack **pphys_pg_pack)
{
+ struct hl_mmu_properties *mmu_prop = &ctx->hdev->asic_prop.pmmu;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask, total_npages;
- u32 npages, page_size = PAGE_SIZE;
+ u32 npages, page_size = PAGE_SIZE,
+ huge_page_size = mmu_prop->huge_page_size;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
+ u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
@@ -675,14 +670,14 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
total_npages += npages;
- if ((npages % PGS_IN_2MB_PAGE) ||
- (dma_addr & (PAGE_SIZE_2MB - 1)))
+ if ((npages % pgs_in_huge_page) ||
+ (dma_addr & (huge_page_size - 1)))
is_huge_page_opt = false;
}
if (is_huge_page_opt) {
- page_size = PAGE_SIZE_2MB;
- total_npages /= PGS_IN_2MB_PAGE;
+ page_size = huge_page_size;
+ do_div(total_npages, pgs_in_huge_page);
}
page_mask = ~(((u64) page_size) - 1);
@@ -714,7 +709,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
dma_addr += page_size;
if (is_huge_page_opt)
- npages -= PGS_IN_2MB_PAGE;
+ npages -= pgs_in_huge_page;
else
npages--;
}
@@ -731,19 +726,18 @@ page_pack_arr_mem_err:
}
/*
- * map_phys_page_pack - maps the physical page pack
- *
- * @ctx : current context
- * @vaddr : start address of the virtual area to map from
- * @phys_pg_pack : the pack of physical pages to map to
+ * map_phys_pg_pack - maps the physical page pack.
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to map from
+ * @phys_pg_pack: the pack of physical pages to map to
*
* This function does the following:
* - Maps each chunk of virtual memory to matching physical chunk
* - Stores number of successful mappings in the given argument
- * - Returns 0 on success, error code otherwise.
+ * - Returns 0 on success, error code otherwise
*/
-static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
@@ -783,6 +777,36 @@ err:
return rc;
}
+/*
+ * unmap_phys_pg_pack - unmaps the physical page pack
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to unmap
+ * @phys_pg_pack: the pack of physical pages to unmap
+ */
+static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 next_vaddr, i;
+ u32 page_size;
+
+ page_size = phys_pg_pack->page_size;
+ next_vaddr = vaddr;
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+ /*
+ * unmapping on Palladium can be really long, so avoid a CPU
+ * soft lockup bug by sleeping a little between unmapping pages
+ */
+ if (hdev->pldm)
+ usleep_range(500, 1000);
+ }
+}
+
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
u64 *paddr)
{
@@ -839,7 +863,10 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
*device_addr = 0;
if (is_userptr) {
- rc = get_userptr_from_host_va(hdev, args, &userptr);
+ u64 addr = args->map_host.host_virt_addr,
+ size = args->map_host.mem_size;
+
+ rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc) {
dev_err(hdev->dev, "failed to get userptr from va\n");
return rc;
@@ -850,7 +877,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
- args->map_host.host_virt_addr);
+ addr);
goto init_page_pack_err;
}
@@ -909,7 +936,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
mutex_lock(&ctx->mmu_lock);
- rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
+ rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
mutex_unlock(&ctx->mmu_lock);
dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
@@ -917,7 +944,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
mutex_unlock(&ctx->mmu_lock);
@@ -955,7 +982,7 @@ shared_err:
free_phys_pg_pack(hdev, phys_pg_pack);
init_page_pack_err:
if (is_userptr)
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
return rc;
}
@@ -965,20 +992,20 @@ init_page_pack_err:
*
* @ctx : current context
* @vaddr : device virtual address to unmap
+ * @ctx_free : true if in context free flow, false otherwise.
*
* This function does the following:
* - Unmap the physical pages related to the given virtual address
* - return the device virtual block to the virtual block list
*/
-static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
+static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
+ struct hl_va_range *va_range;
enum vm_type_t *vm_type;
- u64 next_vaddr, i;
- u32 page_size;
bool is_userptr;
int rc;
@@ -1003,9 +1030,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
+ va_range = &ctx->host_va_range;
userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+ &phys_pg_pack);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1014,6 +1042,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
}
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
is_userptr = false;
+ va_range = &ctx->dram_va_range;
phys_pg_pack = hnode->ptr;
} else {
dev_warn(hdev->dev,
@@ -1029,42 +1058,41 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
goto mapping_cnt_err;
}
- page_size = phys_pg_pack->page_size;
- vaddr &= ~(((u64) page_size) - 1);
-
- next_vaddr = vaddr;
+ vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
mutex_lock(&ctx->mmu_lock);
- for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
- dev_warn_ratelimited(hdev->dev,
- "unmap failed for vaddr: 0x%llx\n", next_vaddr);
-
- /* unmapping on Palladium can be really long, so avoid a CPU
- * soft lockup bug by sleeping a little between unmapping pages
- */
- if (hdev->pldm)
- usleep_range(500, 1000);
- }
+ unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ /*
+ * During context free this function is called in a loop to clean all
+ * the context mappings. Hence the cache invalidation can be called once
+ * at the loop end rather than for each iteration
+ */
+ if (!ctx_free)
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, *vm_type);
mutex_unlock(&ctx->mmu_lock);
- if (add_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- vaddr,
- vaddr + phys_pg_pack->total_size - 1))
- dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
- vaddr);
+ /*
+ * No point in maintaining the free VA block list if the context is
+ * closing as the list will be freed anyway
+ */
+ if (!ctx_free) {
+ rc = add_va_block(hdev, va_range, vaddr,
+ vaddr + phys_pg_pack->total_size - 1);
+ if (rc)
+ dev_warn(hdev->dev,
+ "add va block failed for vaddr: 0x%llx\n",
+ vaddr);
+ }
atomic_dec(&phys_pg_pack->mapping_cnt);
kfree(hnode);
if (is_userptr) {
free_phys_pg_pack(hdev, phys_pg_pack);
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
}
return 0;
@@ -1189,8 +1217,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_UNMAP:
- rc = unmap_device_va(ctx,
- args->in.unmap.device_virt_addr);
+ rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
+ false);
break;
default:
@@ -1203,20 +1231,72 @@ out:
return rc;
}
+static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
+ u32 npages, u64 start, u32 offset,
+ struct hl_userptr *userptr)
+{
+ int rc;
+
+ if (!access_ok((void __user *) (uintptr_t) addr, size)) {
+ dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
+ return -EFAULT;
+ }
+
+ userptr->vec = frame_vector_create(npages);
+ if (!userptr->vec) {
+ dev_err(hdev->dev, "Failed to create frame vector\n");
+ return -ENOMEM;
+ }
+
+ rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ userptr->vec);
+
+ if (rc != npages) {
+ dev_err(hdev->dev,
+ "Failed to map host memory, user ptr probably wrong\n");
+ if (rc < 0)
+ goto destroy_framevec;
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ if (frame_vector_to_pages(userptr->vec) < 0) {
+ dev_err(hdev->dev,
+ "Failed to translate frame vector to pages\n");
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ rc = sg_alloc_table_from_pages(userptr->sgt,
+ frame_vector_pages(userptr->vec),
+ npages, offset, size, GFP_ATOMIC);
+ if (rc < 0) {
+ dev_err(hdev->dev, "failed to create SG table from pages\n");
+ goto put_framevec;
+ }
+
+ return 0;
+
+put_framevec:
+ put_vaddr_frames(userptr->vec);
+destroy_framevec:
+ frame_vector_destroy(userptr->vec);
+ return rc;
+}
+
/*
- * hl_pin_host_memory - pins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @addr : the user-space virtual address of the memory area
- * @size : the size of the memory area
- * @userptr : pointer to hl_userptr structure
+ * hl_pin_host_memory - pins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Pins the physical pages
- * - Create a SG list from those pages
+ * - Create an SG list from those pages
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
- struct hl_userptr *userptr)
+ struct hl_userptr *userptr)
{
u64 start, end;
u32 npages, offset;
@@ -1227,11 +1307,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
- if (!access_ok((void __user *) (uintptr_t) addr, size)) {
- dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
- return -EFAULT;
- }
-
/*
* If the combination of the address and size requested for this memory
* region causes an integer overflow, return error.
@@ -1244,6 +1319,14 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
+ /*
+ * This function can be called also from data path, hence use atomic
+ * always as it is not a big allocation.
+ */
+ userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+ if (!userptr->sgt)
+ return -ENOMEM;
+
start = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
end = PAGE_ALIGN(addr + size);
@@ -1254,42 +1337,12 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
userptr->dma_mapped = false;
INIT_LIST_HEAD(&userptr->job_node);
- userptr->vec = frame_vector_create(npages);
- if (!userptr->vec) {
- dev_err(hdev->dev, "Failed to create frame vector\n");
- return -ENOMEM;
- }
-
- rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- userptr->vec);
-
- if (rc != npages) {
- dev_err(hdev->dev,
- "Failed to map host memory, user ptr probably wrong\n");
- if (rc < 0)
- goto destroy_framevec;
- rc = -EFAULT;
- goto put_framevec;
- }
-
- if (frame_vector_to_pages(userptr->vec) < 0) {
+ rc = get_user_memory(hdev, addr, size, npages, start, offset,
+ userptr);
+ if (rc) {
dev_err(hdev->dev,
- "Failed to translate frame vector to pages\n");
- rc = -EFAULT;
- goto put_framevec;
- }
-
- userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
- if (!userptr->sgt) {
- rc = -ENOMEM;
- goto put_framevec;
- }
-
- rc = sg_alloc_table_from_pages(userptr->sgt,
- frame_vector_pages(userptr->vec),
- npages, offset, size, GFP_ATOMIC);
- if (rc < 0) {
- dev_err(hdev->dev, "failed to create SG table from pages\n");
+ "failed to get user memory for address 0x%llx\n",
+ addr);
goto free_sgt;
}
@@ -1299,34 +1352,28 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
free_sgt:
kfree(userptr->sgt);
-put_framevec:
- put_vaddr_frames(userptr->vec);
-destroy_framevec:
- frame_vector_destroy(userptr->vec);
return rc;
}
/*
- * hl_unpin_host_memory - unpins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr : pointer to hl_userptr structure
+ * hl_unpin_host_memory - unpins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Unpins the physical pages related to the host memory
* - Free the SG list
*/
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{
struct page **pages;
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
- hdev->asic_funcs->hl_dma_unmap_sg(hdev,
- userptr->sgt->sgl,
- userptr->sgt->nents,
- userptr->dir);
+ hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents,
+ userptr->dir);
pages = frame_vector_pages(userptr->vec);
if (!IS_ERR(pages)) {
@@ -1342,8 +1389,6 @@ int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
sg_free_table(userptr->sgt);
kfree(userptr->sgt);
-
- return 0;
}
/*
@@ -1542,43 +1587,16 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
* @hdev : pointer to the habanalabs structure
* va_range : pointer to virtual addresses range
*
- * This function initializes the following:
- * - Checks that the given range contains the whole initial range
+ * This function does the following:
* - Frees the virtual addresses block list and its lock
*/
static void hl_va_range_fini(struct hl_device *hdev,
struct hl_va_range *va_range)
{
- struct hl_vm_va_block *va_block;
-
- if (list_empty(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not be empty on cleanup!\n");
- goto out;
- }
-
- if (!list_is_singular(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not contain multiple blocks on cleanup!\n");
- goto free_va_list;
- }
-
- va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
-
- if (va_block->start != va_range->start_addr ||
- va_block->end != va_range->end_addr) {
- dev_warn(hdev->dev,
- "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
- va_block->start, va_block->end);
- goto free_va_list;
- }
-
-free_va_list:
mutex_lock(&va_range->lock);
clear_va_list_locked(hdev, &va_range->list);
mutex_unlock(&va_range->lock);
-out:
mutex_destroy(&va_range->lock);
}
@@ -1613,21 +1631,31 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
- if (!hash_empty(ctx->mem_hash))
- dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
+ /*
+ * Clearly something went wrong on hard reset so no point in printing
+ * another side effect error
+ */
+ if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
+ dev_notice(hdev->dev,
+ "ctx %d is freed while it has va in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
hnode->vaddr, ctx->asid);
- unmap_device_va(ctx, hnode->vaddr);
+ unmap_device_va(ctx, hnode->vaddr, true);
}
+ /* invalidate the cache once after the unmapping loop */
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev,
- "page list 0x%p of asid %d is still alive\n",
+ "page list 0x%px of asid %d is still alive\n",
phys_pg_list, ctx->asid);
atomic64_sub(phys_pg_list->total_size,
&hdev->dram_used_mem);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 176c315836f1..6262b26e2086 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -25,10 +25,9 @@ static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
return pgt_info;
}
-static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
{
struct hl_device *hdev = ctx->hdev;
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
hdev->asic_prop.mmu_hop_table_size);
@@ -37,6 +36,13 @@ static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
kfree(pgt_info);
}
+static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+
+ _free_hop(ctx, pgt_info);
+}
+
static u64 alloc_hop(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -105,8 +111,8 @@ static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
* clear the 12 LSBs and translate the shadow hop to its associated
* physical hop, and add back the original 12 LSBs.
*/
- u64 phys_val = get_phys_addr(ctx, val & PTE_PHYS_ADDR_MASK) |
- (val & OFFSET_MASK);
+ u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
+ (val & FLAGS_MASK);
ctx->hdev->asic_funcs->write_pte(ctx->hdev,
get_phys_addr(ctx, shadow_pte_addr),
@@ -159,7 +165,7 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
*/
num_of_ptes_left = pgt_info->num_of_ptes;
if (!num_of_ptes_left)
- free_hop(ctx, hop_addr);
+ _free_hop(ctx, pgt_info);
return num_of_ptes_left;
}
@@ -171,35 +177,50 @@ static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
((virt_addr & mask) >> shift);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
+ mmu_prop->hop0_shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
+ mmu_prop->hop1_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
+ mmu_prop->hop2_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
+ mmu_prop->hop3_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
+ mmu_prop->hop4_shift);
}
static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -288,23 +309,23 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
}
/* need only pte 0 in hops 0 and 1 */
- pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_addr, pte_val);
- pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_addr, pte_val);
get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
+ pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, pte_val);
get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
- pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
+ pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
LAST_MASK | PAGE_PRESENT_MASK;
for (i = 0 ; i < num_of_hop3 ; i++) {
@@ -400,8 +421,6 @@ int hl_mmu_init(struct hl_device *hdev)
if (!hdev->mmu_enable)
return 0;
- /* MMU H/W init was already done in device hw_init() */
-
hdev->mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
@@ -427,6 +446,8 @@ int hl_mmu_init(struct hl_device *hdev)
goto err_pool_add;
}
+ /* MMU H/W init will be done in device hw_init() */
+
return 0;
err_pool_add:
@@ -450,10 +471,10 @@ void hl_mmu_fini(struct hl_device *hdev)
if (!hdev->mmu_enable)
return;
+ /* MMU H/W fini was already done in device hw_fini() */
+
kvfree(hdev->mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_pgt_pool);
-
- /* MMU H/W fini will be done in device hw_fini() */
}
/**
@@ -501,36 +522,36 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
dram_default_mapping_fini(ctx);
if (!hash_empty(ctx->mmu_shadow_hash))
- dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
- free_hop(ctx, pgt_info->shadow_addr);
+ _free_hop(ctx, pgt_info);
}
mutex_destroy(&ctx->mmu_lock);
}
-static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
+static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
hop3_addr = 0, hop3_pte_addr = 0,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte;
- bool is_dram_addr, is_huge, clear_hop3 = true;
+ bool is_huge, clear_hop3 = true;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
@@ -539,7 +560,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
@@ -548,7 +569,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
@@ -557,7 +578,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
@@ -575,7 +596,8 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
@@ -584,7 +606,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte == default_pte) {
dev_err(hdev->dev,
@@ -667,25 +689,36 @@ not_mapped:
int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr;
u32 real_page_size, npages;
int i, rc;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and unmap them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and unmap them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
- page_size);
+ "page size of %u is not %uKB nor %uMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -694,7 +727,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_unmap(ctx, real_virt_addr);
+ rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
return rc;
@@ -705,10 +738,11 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
}
static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
- u32 page_size)
+ u32 page_size, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
@@ -716,21 +750,19 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte = 0;
bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge, is_dram_addr;
+ hop4_new = false, is_huge;
int rc = -ENOMEM;
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * This mapping function can map a 4KB/2MB page. For 2MB page there are
- * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
- * pages only but user memory could have been allocated with one of the
- * two page sizes. Since this is a common code for all the three cases,
- * we need this hugs page check.
+ * This mapping function can map a page or a huge page. For huge page
+ * there are only 3 hops rather than 4. Currently the DRAM allocation
+ * uses huge pages only but user memory could have been allocated with
+ * one of the two page sizes. Since this is a common code for all the
+ * three cases, we need this hugs page check.
*/
- is_huge = page_size == PAGE_SIZE_2MB;
-
- is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_huge = page_size == mmu_prop->huge_page_size;
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
@@ -738,28 +770,28 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
}
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
if (hop1_addr == ULLONG_MAX)
goto err;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
if (hop2_addr == ULLONG_MAX)
goto err;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
if (hop3_addr == ULLONG_MAX)
goto err;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
if (!is_huge) {
@@ -767,13 +799,14 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop4_addr == ULLONG_MAX)
goto err;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte != default_pte) {
@@ -813,7 +846,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
+ curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
| PAGE_PRESENT_MASK;
if (is_huge)
@@ -823,25 +856,25 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop1_new) {
curr_pte =
- (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_pte_addr, curr_pte);
}
if (hop2_new) {
curr_pte =
- (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_pte_addr, curr_pte);
get_pte(ctx, hop1_addr);
}
if (hop3_new) {
curr_pte =
- (hop3_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, curr_pte);
get_pte(ctx, hop2_addr);
}
if (!is_huge) {
if (hop4_new) {
- curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
+ curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop3_pte_addr, curr_pte);
get_pte(ctx, hop3_addr);
@@ -890,25 +923,36 @@ err:
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and map them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and map them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't map\n",
- page_size);
+ "page size of %u is not %dKB nor %dMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -923,7 +967,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
for (i = 0 ; i < npages ; i++) {
rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
- real_page_size);
+ real_page_size, is_dram_addr);
if (rc)
goto err;
@@ -937,7 +981,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
- if (_hl_mmu_unmap(ctx, real_virt_addr))
+ if (_hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 94dfb9e40e29..1aa433a7f66c 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/drivers/char/hpilo.h
*
diff --git a/drivers/misc/ibmvmc.h b/drivers/misc/ibmvmc.h
index e140ada8fe2c..0e1756fffeae 100644
--- a/drivers/misc/ibmvmc.h
+++ b/drivers/misc/ibmvmc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* linux/drivers/misc/ibmvmc.h
*
* IBM Power Systems Virtual Management Channel Support.
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 057d7bbde402..dd65cedf3b12 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -16,7 +16,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
@@ -434,23 +434,23 @@ int lis3lv02d_poweron(struct lis3lv02d *lis3)
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_poll(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
int x, y, z;
mutex_lock(&lis3->mutex);
lis3lv02d_get_xyz(lis3, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x);
- input_report_abs(pidev->input, ABS_Y, y);
- input_report_abs(pidev->input, ABS_Z, z);
- input_sync(pidev->input);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_Z, z);
+ input_sync(input);
mutex_unlock(&lis3->mutex);
}
-static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
+static int lis3lv02d_joystick_open(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
if (lis3->pm_dev)
pm_runtime_get_sync(lis3->pm_dev);
@@ -461,12 +461,14 @@ static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
* Update coordinates for the case where poll interval is 0 and
* the chip in running purely under interrupt control
*/
- lis3lv02d_joystick_poll(pidev);
+ lis3lv02d_joystick_poll(input);
+
+ return 0;
}
-static void lis3lv02d_joystick_close(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_close(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
atomic_set(&lis3->wake_thread, 0);
if (lis3->pm_dev)
@@ -497,7 +499,7 @@ out:
static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
- struct input_dev *dev = lis3->idev->input;
+ struct input_dev *dev = lis3->idev;
u8 click_src;
mutex_lock(&lis3->mutex);
@@ -677,26 +679,19 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
if (lis3->idev)
return -EINVAL;
- lis3->idev = input_allocate_polled_device();
- if (!lis3->idev)
+ input_dev = input_allocate_device();
+ if (!input_dev)
return -ENOMEM;
- lis3->idev->poll = lis3lv02d_joystick_poll;
- lis3->idev->open = lis3lv02d_joystick_open;
- lis3->idev->close = lis3lv02d_joystick_close;
- lis3->idev->poll_interval = MDPS_POLL_INTERVAL;
- lis3->idev->poll_interval_min = MDPS_POLL_MIN;
- lis3->idev->poll_interval_max = MDPS_POLL_MAX;
- lis3->idev->private = lis3;
- input_dev = lis3->idev->input;
-
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0;
input_dev->dev.parent = &lis3->pdev->dev;
- set_bit(EV_ABS, input_dev->evbit);
+ input_dev->open = lis3lv02d_joystick_open;
+ input_dev->close = lis3lv02d_joystick_close;
+
max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY;
if (lis3->whoami == WAI_12B) {
fuzz = LIS3_DEFAULT_FUZZ_12B;
@@ -712,17 +707,32 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+ input_set_drvdata(input_dev, lis3);
+ lis3->idev = input_dev;
+
+ err = input_setup_polling(input_dev, lis3lv02d_joystick_poll);
+ if (err)
+ goto err_free_input;
+
+ input_set_poll_interval(input_dev, MDPS_POLL_INTERVAL);
+ input_set_min_poll_interval(input_dev, MDPS_POLL_MIN);
+ input_set_max_poll_interval(input_dev, MDPS_POLL_MAX);
+
lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns);
lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns);
lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns);
- err = input_register_polled_device(lis3->idev);
- if (err) {
- input_free_polled_device(lis3->idev);
- lis3->idev = NULL;
- }
+ err = input_register_device(lis3->idev);
+ if (err)
+ goto err_free_input;
+ return 0;
+
+err_free_input:
+ input_free_device(input_dev);
+ lis3->idev = NULL;
return err;
+
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
@@ -738,8 +748,7 @@ void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
if (lis3->irq)
misc_deregister(&lis3->miscdev);
- input_unregister_polled_device(lis3->idev);
- input_free_polled_device(lis3->idev);
+ input_unregister_device(lis3->idev);
lis3->idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -895,10 +904,9 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *lis3,
(p->click_thresh_y << 4));
if (lis3->idev) {
- struct input_dev *input_dev = lis3->idev->input;
- input_set_capability(input_dev, EV_KEY, BTN_X);
- input_set_capability(input_dev, EV_KEY, BTN_Y);
- input_set_capability(input_dev, EV_KEY, BTN_Z);
+ input_set_capability(lis3->idev, EV_KEY, BTN_X);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Y);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Z);
}
}
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index 1b0c99883c57..c394c0b08519 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -6,7 +6,7 @@
* Copyright (C) 2008-2009 Eric Piel
*/
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/regulator/consumer.h>
#include <linux/miscdevice.h>
@@ -281,7 +281,7 @@ struct lis3lv02d {
* (1/1000th of earth gravity)
*/
- struct input_polled_dev *idev; /* input device */
+ struct input_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
struct regulator_bulk_data regulators[2];
atomic_t count; /* interrupt count after last read */
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
index 0a146b32da13..de7c5ab528d9 100644
--- a/drivers/misc/lkdtm/refcount.c
+++ b/drivers/misc/lkdtm/refcount.c
@@ -6,14 +6,6 @@
#include "lkdtm.h"
#include <linux/refcount.h>
-#ifdef CONFIG_REFCOUNT_FULL
-#define REFCOUNT_MAX (UINT_MAX - 1)
-#define REFCOUNT_SATURATED UINT_MAX
-#else
-#define REFCOUNT_MAX INT_MAX
-#define REFCOUNT_SATURATED (INT_MIN / 2)
-#endif
-
static void overflow_check(refcount_t *ref)
{
switch (refcount_read(ref)) {
@@ -127,7 +119,7 @@ void lkdtm_REFCOUNT_DEC_ZERO(void)
static void check_negative(refcount_t *ref, int start)
{
/*
- * CONFIG_REFCOUNT_FULL refuses to move a refcount at all on an
+ * refcount_t refuses to move a refcount at all on an
* over-sub, so we have to track our starting position instead of
* looking only at zero-pinning.
*/
@@ -210,7 +202,6 @@ static void check_from_zero(refcount_t *ref)
/*
* A refcount_inc() from zero should pin to zero or saturate and may WARN.
- * Only CONFIG_REFCOUNT_FULL provides this protection currently.
*/
void lkdtm_REFCOUNT_INC_ZERO(void)
{
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 0a2b99e1af45..9ad9c01ddf41 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -46,8 +46,6 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
*/
static void number_of_connections(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
if (cldev->me_cl->props.max_number_of_connections > 1)
cldev->do_match = 0;
}
@@ -59,8 +57,6 @@ static void number_of_connections(struct mei_cl_device *cldev)
*/
static void blacklist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 0;
}
@@ -71,8 +67,6 @@ static void blacklist(struct mei_cl_device *cldev)
*/
static void whitelist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 1;
}
@@ -256,7 +250,6 @@ static void mei_wd(struct mei_cl_device *cldev)
{
struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
if (pdev->device == MEI_DEV_ID_WPT_LP ||
pdev->device == MEI_DEV_ID_SPT ||
pdev->device == MEI_DEV_ID_SPT_H)
@@ -410,8 +403,6 @@ static void mei_nfc(struct mei_cl_device *cldev)
bus = cldev->bus;
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
mutex_lock(&bus->device_lock);
/* we need to connect to INFO GUID */
cl = mei_cl_alloc_linked(bus);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 985bd4fd3328..a0a495c95e3c 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -791,11 +791,44 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", maxconn);
+}
+static DEVICE_ATTR_RO(max_conn);
+
+static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 fixed = mei_me_cl_fixed(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", fixed);
+}
+static DEVICE_ATTR_RO(fixed);
+
+static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u", maxlen);
+}
+static DEVICE_ATTR_RO(max_len);
+
static struct attribute *mei_cldev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_uuid.attr,
&dev_attr_version.attr,
&dev_attr_modalias.attr,
+ &dev_attr_max_conn.attr,
+ &dev_attr_fixed.attr,
+ &dev_attr_max_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(mei_cldev);
@@ -873,15 +906,16 @@ static const struct device_type mei_cl_device_type = {
/**
* mei_cl_bus_set_name - set device name for me client device
+ * <controller>-<client device>
+ * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
*
* @cldev: me client device
*/
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
- cldev->name,
- mei_me_cl_uuid(cldev->me_cl),
- mei_me_cl_ver(cldev->me_cl));
+ dev_set_name(&cldev->dev, "%s-%pUl",
+ dev_name(cldev->bus->dev),
+ mei_me_cl_uuid(cldev->me_cl));
}
/**
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index c1f9e810cf81..2f8954def591 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -69,6 +69,42 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
return me_cl->props.protocol_version;
}
+/**
+ * mei_me_cl_max_conn - return me client max number of connections
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max number of connections
+ */
+static inline u8 mei_me_cl_max_conn(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_number_of_connections;
+}
+
+/**
+ * mei_me_cl_fixed - return me client fixed address, if any
+ *
+ * @me_cl: me client
+ *
+ * Return: me client fixed address
+ */
+static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.fixed_address;
+}
+
+/**
+ * mei_me_cl_max_len - return me client max msg length
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max msg length
+ */
+static inline u32 mei_me_cl_max_len(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_msg_length;
+}
+
/*
* MEI IO Functions
*/
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index c681f6fab342..93027fd96c71 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -27,18 +27,6 @@
#include "mei_hdcp.h"
-static inline u8 mei_get_ddi_index(enum port port)
-{
- switch (port) {
- case PORT_A:
- return MEI_DDI_A;
- case PORT_B ... PORT_F:
- return (u8)port;
- default:
- return MEI_DDI_INVALID_PORT;
- }
-}
-
/**
* mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW
* @dev: device corresponding to the mei_cl_device
@@ -69,7 +57,8 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
session_init_in.port.integrated_port_type = data->port_type;
- session_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_init_in.port.physical_port = (u8)data->fw_ddi;
+ session_init_in.port.attached_transcoder = (u8)data->fw_tc;
session_init_in.protocol = data->protocol;
byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
@@ -138,7 +127,8 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
verify_rxcert_in.port.integrated_port_type = data->port_type;
- verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_rxcert_in.port.physical_port = (u8)data->fw_ddi;
+ verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc;
verify_rxcert_in.cert_rx = rx_cert->cert_rx;
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
@@ -208,7 +198,8 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
send_hprime_in.port.integrated_port_type = data->port_type;
- send_hprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ send_hprime_in.port.physical_port = (u8)data->fw_ddi;
+ send_hprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
@@ -265,7 +256,8 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
pairing_info_in.port.integrated_port_type = data->port_type;
- pairing_info_in.port.physical_port = mei_get_ddi_index(data->port);
+ pairing_info_in.port.physical_port = (u8)data->fw_ddi;
+ pairing_info_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
@@ -323,7 +315,8 @@ mei_hdcp_initiate_locality_check(struct device *dev,
lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
lc_init_in.port.integrated_port_type = data->port_type;
- lc_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ lc_init_in.port.physical_port = (u8)data->fw_ddi;
+ lc_init_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
if (byte < 0) {
@@ -378,7 +371,8 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
verify_lprime_in.port.integrated_port_type = data->port_type;
- verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_lprime_in.port.physical_port = (u8)data->fw_ddi;
+ verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
@@ -435,7 +429,8 @@ static int mei_hdcp_get_session_key(struct device *dev,
get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
get_skey_in.port.integrated_port_type = data->port_type;
- get_skey_in.port.physical_port = mei_get_ddi_index(data->port);
+ get_skey_in.port.physical_port = (u8)data->fw_ddi;
+ get_skey_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
if (byte < 0) {
@@ -499,7 +494,8 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
verify_repeater_in.port.integrated_port_type = data->port_type;
- verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_repeater_in.port.physical_port = (u8)data->fw_ddi;
+ verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
HDCP_2_2_RXINFO_LEN);
@@ -569,7 +565,8 @@ static int mei_hdcp_verify_mprime(struct device *dev,
WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
verify_mprime_in.port.integrated_port_type = data->port_type;
- verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_mprime_in.port.physical_port = (u8)data->fw_ddi;
+ verify_mprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
HDCP_2_2_MPRIME_LEN);
@@ -630,7 +627,8 @@ static int mei_hdcp_enable_authentication(struct device *dev,
enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
enable_auth_in.port.integrated_port_type = data->port_type;
- enable_auth_in.port.physical_port = mei_get_ddi_index(data->port);
+ enable_auth_in.port.physical_port = (u8)data->fw_ddi;
+ enable_auth_in.port.attached_transcoder = (u8)data->fw_tc;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
@@ -684,7 +682,8 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
session_close_in.port.integrated_port_type = data->port_type;
- session_close_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_close_in.port.physical_port = (u8)data->fw_ddi;
+ session_close_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
sizeof(session_close_in));
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index e4b1cd54c853..18ffc773fa18 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -184,8 +184,11 @@ struct hdcp_cmd_no_data {
/* Uniquely identifies the hdcp port being addressed for a given command. */
struct hdcp_port_id {
u8 integrated_port_type;
+ /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
u8 physical_port;
- u16 reserved;
+ /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
+ u8 attached_transcoder;
+ u8 reserved;
} __packed;
/*
@@ -362,16 +365,4 @@ struct wired_cmd_repeater_auth_stream_req_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
-
-enum mei_fw_ddi {
- MEI_DDI_INVALID_PORT = 0x0,
-
- MEI_DDI_B = 1,
- MEI_DDI_C,
- MEI_DDI_D,
- MEI_DDI_E,
- MEI_DDI_F,
- MEI_DDI_A = 7,
- MEI_DDI_RANGE_END = MEI_DDI_A,
-};
#endif /* __MEI_HDCP_H__ */
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c09f8bb49495..7cd67fb2365d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,7 @@
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
@@ -162,7 +163,8 @@ access to ME_CBD */
#define ME_IS_HRA 0x00000002
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
-
+/* TRC control shadow register */
+#define ME_TRC 0x00000030
/* H_HPG_CSR register bits */
#define H_HPG_CSR_PGIHEXR 0x00000001
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index c4f6991d3028..668418d7ea77 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -173,6 +173,27 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
}
/**
+ * mei_me_trc_status - read trc status register
+ *
+ * @dev: mei device
+ * @trc: trc status register value
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (!hw->cfg->hw_trc_supported)
+ return -EOPNOTSUPP;
+
+ *trc = mei_me_reg_read(hw, ME_TRC);
+ trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
+
+ return 0;
+}
+
+/**
* mei_me_fw_status - read fw status register from pci config space
*
* @dev: mei device
@@ -183,20 +204,19 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
static int mei_me_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
int ret;
int i;
- if (!fw_status)
+ if (!fw_status || !hw->read_fws)
return -EINVAL;
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev, fw_src->status[i],
- &fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ ret = hw->read_fws(dev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -210,19 +230,26 @@ static int mei_me_fw_status(struct mei_device *dev,
* mei_me_hw_config - configure hw dependent settings
*
* @dev: mei device
+ *
+ * Return:
+ * * -EINVAL when read_fws is not set
+ * * 0 on success
+ *
*/
-static void mei_me_hw_config(struct mei_device *dev)
+static int mei_me_hw_config(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr, reg;
+ if (WARN_ON(!hw->read_fws))
+ return -EINVAL;
+
/* Doesn't change in runtime */
hcsr = mei_hcsr_read(dev);
hw->hbuf_depth = (hcsr & H_CBD) >> 24;
reg = 0;
- pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -233,6 +260,8 @@ static void mei_me_hw_config(struct mei_device *dev)
if (reg & H_D0I3C_I3)
hw->pg_state = MEI_PG_ON;
}
+
+ return 0;
}
/**
@@ -269,7 +298,7 @@ static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
}
/**
- * mei_me_intr_clear - clear and stop interrupts
+ * me_intr_clear - clear and stop interrupts
*
* @dev: the device structure
* @hcsr: supplied hcsr register value
@@ -323,9 +352,9 @@ static void mei_me_intr_disable(struct mei_device *dev)
*/
static void mei_me_synchronize_irq(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct mei_me_hw *hw = to_me_hw(dev);
- synchronize_irq(pdev->irq);
+ synchronize_irq(hw->irq);
}
/**
@@ -1294,6 +1323,7 @@ end:
static const struct mei_hw_ops mei_me_hw_ops = {
+ .trc_status = mei_me_trc_status,
.fw_status = mei_me_fw_status,
.pg_state = mei_me_pg_state,
@@ -1384,6 +1414,9 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
+#define MEI_CFG_TRC \
+ .hw_trc_supported = 1
+
/* ICH Legacy devices */
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS,
@@ -1432,6 +1465,14 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};
+/* Tiger Lake and newer devices */
+static const struct mei_cfg mei_me_pch15_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1446,6 +1487,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
@@ -1461,19 +1503,19 @@ const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
- * @pdev: The pci device structure
+ * @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
*
* Return: The mei_device pointer on success, NULL on failure.
*/
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg)
{
struct mei_device *dev;
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+ dev = devm_kzalloc(parent, sizeof(struct mei_device) +
sizeof(struct mei_me_hw), GFP_KERNEL);
if (!dev)
return NULL;
@@ -1483,7 +1525,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
- mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
+ mei_device_init(dev, parent, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 1d8794828cbc..4a8d4dcd5a91 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -21,12 +21,14 @@
* @quirk_probe: device exclusion quirk
* @dma_size: device DMA buffers size
* @fw_ver_supported: is fw version retrievable from FW
+ * @hw_trc_supported: does the hw support trc register
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
size_t dma_size[DMA_DSCR_NUM];
u32 fw_ver_supported:1;
+ u32 hw_trc_supported:1;
};
@@ -42,16 +44,20 @@ struct mei_cfg {
*
* @cfg: per device generation config and ops
* @mem_addr: io memory address
+ * @irq: irq number
* @pg_state: power gating state
* @d0i3_supported: di03 support
* @hbuf_depth: depth of hardware host/write buffer in slots
+ * @read_fws: read FW status register handler
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
+ int irq;
enum mei_pg_state pg_state;
bool d0i3_supported;
u8 hbuf_depth;
+ int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
@@ -74,6 +80,7 @@ struct mei_me_hw {
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -86,12 +93,13 @@ enum mei_cfg_idx {
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH15_CFG,
MEI_ME_NUM_CFG,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg);
int mei_me_pg_enter_sync(struct mei_device *dev);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 5e58656b8e19..785b260b3ae9 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -660,14 +660,16 @@ static int mei_txe_fw_status(struct mei_device *dev,
}
/**
- * mei_txe_hw_config - configure hardware at the start of the devices
+ * mei_txe_hw_config - configure hardware at the start of the devices
*
* @dev: the device structure
*
* Configure hardware at the start of the device should be done only
* once at the device probe time
+ *
+ * Return: always 0
*/
-static void mei_txe_hw_config(struct mei_device *dev)
+static int mei_txe_hw_config(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
@@ -677,6 +679,8 @@ static void mei_txe_hw_config(struct mei_device *dev)
dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
+
+ return 0;
}
/**
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index b9fef773e71b..bcee77768b91 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -190,7 +190,9 @@ int mei_start(struct mei_device *dev)
/* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
- mei_hw_config(dev);
+ ret = mei_hw_config(dev);
+ if (ret)
+ goto err;
dev_dbg(dev->dev, "reset in start the mei device.\n");
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 7310b476323c..4ef6e37caafc 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -701,6 +701,29 @@ static int mei_fasync(int fd, struct file *file, int band)
}
/**
+ * trc_show - mei device trc attribute show method
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t trc_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ u32 trc;
+ int ret;
+
+ ret = mei_trc_status(dev, &trc);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%08X\n", trc);
+}
+static DEVICE_ATTR_RO(trc);
+
+/**
* fw_status_show - mei device fw_status attribute show method
*
* @device: device pointer
@@ -887,6 +910,7 @@ static struct attribute *mei_attrs[] = {
&dev_attr_tx_queue_limit.attr,
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
+ &dev_attr_trc.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 0f2141178299..76f8ff5ff974 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -260,6 +260,7 @@ struct mei_cl {
* @hw_config : configure hw
*
* @fw_status : get fw status registers
+ * @trc_status : get trc status register
* @pg_state : power gating state of the device
* @pg_in_transition : is device now in pg transition
* @pg_is_enabled : is power gating enabled
@@ -287,9 +288,11 @@ struct mei_hw_ops {
bool (*hw_is_ready)(struct mei_device *dev);
int (*hw_reset)(struct mei_device *dev, bool enable);
int (*hw_start)(struct mei_device *dev);
- void (*hw_config)(struct mei_device *dev);
+ int (*hw_config)(struct mei_device *dev);
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
+ int (*trc_status)(struct mei_device *dev, u32 *trc);
+
enum mei_pg_state (*pg_state)(struct mei_device *dev);
bool (*pg_in_transition)(struct mei_device *dev);
bool (*pg_is_enabled)(struct mei_device *dev);
@@ -614,9 +617,9 @@ void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
*/
-static inline void mei_hw_config(struct mei_device *dev)
+static inline int mei_hw_config(struct mei_device *dev)
{
- dev->ops->hw_config(dev);
+ return dev->ops->hw_config(dev);
}
static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
@@ -711,6 +714,13 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+static inline int mei_trc_status(struct mei_device *dev, u32 *trc)
+{
+ if (dev->ops->trc_status)
+ return dev->ops->trc_status(dev, trc);
+ return -EOPNOTSUPP;
+}
+
static inline int mei_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3dca63eddaa0..c845b7e40f26 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,12 +98,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
/* required last entry */
@@ -120,6 +121,13 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
#endif /* CONFIG_PM */
+static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ return pci_read_config_dword(pdev, where, val);
+}
+
/**
* mei_me_quirk_probe - probe for devices that doesn't valid ME interface
*
@@ -191,13 +199,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(pdev, cfg);
+ dev = mei_me_dev_init(&pdev->dev, cfg);
if (!dev) {
err = -ENOMEM;
goto end;
}
hw = to_me_hw(dev);
hw->mem_addr = pcim_iomap_table(pdev)[0];
+ hw->irq = pdev->irq;
+ hw->read_fws = mei_me_read_fws;
pci_enable_msi(pdev);
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 948f45bbf135..b6841ba6d922 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Intel MIC & related support"
-comment "Intel MIC Bus Driver"
-
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
depends on 64BIT && PCI && X86
@@ -18,8 +16,6 @@ config INTEL_MIC_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Bus Driver"
-
config SCIF_BUS
tristate "SCIF Bus Driver"
depends on 64BIT && PCI && X86
@@ -35,8 +31,6 @@ config SCIF_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Bus Driver"
-
config VOP_BUS
tristate "VOP Bus Driver"
help
@@ -51,8 +45,6 @@ config VOP_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Host Driver"
-
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
@@ -71,8 +63,6 @@ config INTEL_MIC_HOST
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Card Driver"
-
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
depends on 64BIT && X86
@@ -90,8 +80,6 @@ config INTEL_MIC_CARD
For more information see
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Driver"
-
config SCIF
tristate "SCIF Driver"
depends on 64BIT && PCI && X86 && SCIF_BUS && IOMMU_SUPPORT
@@ -110,8 +98,6 @@ config SCIF
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Coprocessor State Management (COSM) Drivers"
-
config MIC_COSM
tristate "Intel MIC Coprocessor State Management (COSM) Drivers"
depends on 64BIT && PCI && X86 && SCIF
@@ -128,8 +114,6 @@ config MIC_COSM
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Driver"
-
config VOP
tristate "VOP Driver"
depends on VOP_BUS
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 97415afd79f3..345bf843a38e 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#ifndef _OCXL_INTERNAL_H_
#define _OCXL_INTERNAL_H_
diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h
index 024f417e7e01..17e21cb2addd 100644
--- a/drivers/misc/ocxl/trace.h
+++ b/drivers/misc/ocxl/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ocxl
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 3a8d76d1ccae..2817f4751306 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -119,7 +119,7 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
"cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
"tfh_write_restart", "tgh_invalidate"};
- seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ seq_puts(s, "#id count aver-clks max-clks\n");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -165,8 +165,7 @@ static int cch_seq_show(struct seq_file *file, void *data)
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
- seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
- "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
+ seq_puts(file, "# gid bid ctx# asid pid cbrs dsbytes mode\n");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
@@ -191,10 +190,8 @@ static int gru_seq_show(struct seq_file *file, void *data)
struct gru_state *gru = GID_TO_GRU(gid);
if (gid == 0) {
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid",
- "ctx", "cbr", "dsr", "ctx", "cbr", "dsr");
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy",
- "busy", "busy", "free", "free", "free");
+ seq_puts(file, "# gid nid ctx cbr dsr ctx cbr dsr\n");
+ seq_puts(file, "# busy busy busy free free free\n");
}
if (gru) {
ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f30448bf3a63..6c1a23cb3e8c 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -340,8 +340,6 @@ static const struct of_device_id sram_dt_ids[] = {
static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
- struct resource *res;
- size_t size;
int ret;
int (*init_func)(void);
@@ -351,25 +349,14 @@ static int sram_probe(struct platform_device *pdev)
sram->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(sram->dev, "found no memory resource\n");
- return -EINVAL;
- }
-
- size = resource_size(res);
-
- if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
- dev_err(sram->dev, "could not request region for resource\n");
- return -EBUSY;
- }
-
if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
- sram->virt_base = devm_ioremap(sram->dev, res->start, size);
+ sram->virt_base = devm_platform_ioremap_resource(pdev, 0);
else
- sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
- if (!sram->virt_base)
- return -ENOMEM;
+ sram->virt_base = devm_platform_ioremap_resource_wc(pdev, 0);
+ if (IS_ERR(sram->virt_base)) {
+ dev_err(&pdev->dev, "could not map SRAM registers\n");
+ return PTR_ERR(sram->virt_base);
+ }
sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
NUMA_NO_NODE, NULL);
@@ -382,7 +369,8 @@ static int sram_probe(struct platform_device *pdev)
else
clk_prepare_enable(sram->clk);
- ret = sram_reserve_regions(sram, res);
+ ret = sram_reserve_regions(sram,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (ret)
goto err_disable_clk;
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 7d9e23aa0b92..2ae9948a91e1 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -708,7 +708,6 @@ EXPORT_SYMBOL_GPL(st_unregister);
*/
static int st_tty_open(struct tty_struct *tty)
{
- int err = 0;
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
@@ -731,7 +730,8 @@ static int st_tty_open(struct tty_struct *tty)
*/
st_kim_complete(st_gdata->kim_data);
pr_debug("done %s", __func__);
- return err;
+
+ return 0;
}
static void st_tty_close(struct tty_struct *tty)
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 819e35995d32..cbb706dabede 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -28,6 +28,10 @@ MODULE_PARM_DESC(disable_guest,
static bool vmci_guest_personality_initialized;
static bool vmci_host_personality_initialized;
+static DEFINE_MUTEX(vmci_vsock_mutex); /* protects vmci_vsock_transport_cb */
+static vmci_vsock_cb vmci_vsock_transport_cb;
+static bool vmci_vsock_cb_host_called;
+
/*
* vmci_get_context_id() - Gets the current context ID.
*
@@ -45,6 +49,69 @@ u32 vmci_get_context_id(void)
}
EXPORT_SYMBOL_GPL(vmci_get_context_id);
+/*
+ * vmci_register_vsock_callback() - Register the VSOCK vmci_transport callback.
+ *
+ * The callback will be called when the first host or guest becomes active,
+ * or if they are already active when this function is called.
+ * To unregister the callback, call this function with NULL parameter.
+ *
+ * Returns 0 on success. -EBUSY if a callback is already registered.
+ */
+int vmci_register_vsock_callback(vmci_vsock_cb callback)
+{
+ int err = 0;
+
+ mutex_lock(&vmci_vsock_mutex);
+
+ if (vmci_vsock_transport_cb && callback) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ vmci_vsock_transport_cb = callback;
+
+ if (!vmci_vsock_transport_cb) {
+ vmci_vsock_cb_host_called = false;
+ goto out;
+ }
+
+ if (vmci_guest_code_active())
+ vmci_vsock_transport_cb(false);
+
+ if (vmci_host_users() > 0) {
+ vmci_vsock_cb_host_called = true;
+ vmci_vsock_transport_cb(true);
+ }
+
+out:
+ mutex_unlock(&vmci_vsock_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vmci_register_vsock_callback);
+
+void vmci_call_vsock_callback(bool is_host)
+{
+ mutex_lock(&vmci_vsock_mutex);
+
+ if (!vmci_vsock_transport_cb)
+ goto out;
+
+ /* In the host, this function could be called multiple times,
+ * but we want to register it only once.
+ */
+ if (is_host) {
+ if (vmci_vsock_cb_host_called)
+ goto out;
+
+ vmci_vsock_cb_host_called = true;
+ }
+
+ vmci_vsock_transport_cb(is_host);
+out:
+ mutex_unlock(&vmci_vsock_mutex);
+}
+
static int __init vmci_drv_init(void)
{
int vmci_err;
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index aab81b67670c..990682480bf6 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -36,10 +36,12 @@ extern struct pci_dev *vmci_pdev;
u32 vmci_get_context_id(void);
int vmci_send_datagram(struct vmci_datagram *dg);
+void vmci_call_vsock_callback(bool is_host);
int vmci_host_init(void);
void vmci_host_exit(void);
bool vmci_host_code_active(void);
+int vmci_host_users(void);
int vmci_guest_init(void);
void vmci_guest_exit(void);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 7a84a48c75da..cc8eeb361fcd 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -637,6 +637,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev->iobase + VMCI_CONTROL_ADDR);
pci_set_drvdata(pdev, vmci_dev);
+
+ vmci_call_vsock_callback(false);
return 0;
err_free_irq:
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 833e2bd248a5..ff3c396146ff 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -108,6 +108,11 @@ bool vmci_host_code_active(void)
atomic_read(&vmci_host_active_users) > 0);
}
+int vmci_host_users(void)
+{
+ return atomic_read(&vmci_host_active_users);
+}
+
/*
* Called on open of /dev/vmci.
*/
@@ -338,6 +343,8 @@ static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
atomic_inc(&vmci_host_active_users);
+ vmci_call_vsock_callback(true);
+
retval = 0;
out:
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2c71a434c915..95b41c0891d0 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -408,38 +408,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
-static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
- u32 retries_max)
-{
- int err;
- u32 retry_count = 0;
-
- if (!status || !retries_max)
- return -EINVAL;
-
- do {
- err = __mmc_send_status(card, status, 5);
- if (err)
- break;
-
- if (!R1_STATUS(*status) &&
- (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
- break; /* RPMB programming operation complete */
-
- /*
- * Rechedule to give the MMC device a chance to continue
- * processing the previous command without being polled too
- * frequently.
- */
- usleep_range(1000, 5000);
- } while (++retry_count < retries_max);
-
- if (retry_count == retries_max)
- err = -EPERM;
-
- return err;
-}
-
static int ioctl_do_sanitize(struct mmc_card *card)
{
int err;
@@ -468,6 +436,58 @@ out:
return err;
}
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ u32 *resp_errs)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ err = __mmc_send_status(card, &status, 5);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "error %d requesting status\n", err);
+ return err;
+ }
+
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (done) {
+ dev_err(mmc_dev(card->host),
+ "Card stuck in wrong state! %s status: %#x\n",
+ __func__, status);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!mmc_blk_in_tran_state(status));
+
+ return err;
+}
+
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
@@ -477,7 +497,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct scatterlist sg;
int err;
unsigned int target_part;
- u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
@@ -611,16 +630,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (idata->rpmb) {
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
/*
- * Ensure RPMB command has completed by polling CMD13
+ * Ensure RPMB/R1B command has completed by polling CMD13
* "Send Status".
*/
- err = ioctl_rpmb_card_status_poll(card, &status, 5);
- if (err)
- dev_err(mmc_dev(card->host),
- "%s: Card Status=0x%08X, error %d\n",
- __func__, status, err);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
}
return err;
@@ -970,58 +985,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
return ms;
}
-static inline bool mmc_blk_in_tran_state(u32 status)
-{
- /*
- * Some cards mishandle the status bits, so make sure to check both the
- * busy indication and the card state.
- */
- return status & R1_READY_FOR_DATA &&
- (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
-}
-
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, u32 *resp_errs)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- int err = 0;
- u32 status;
-
- do {
- bool done = time_after(jiffies, timeout);
-
- err = __mmc_send_status(card, &status, 5);
- if (err) {
- pr_err("%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- return err;
- }
-
- /* Accumulate any response error bits seen */
- if (resp_errs)
- *resp_errs |= status;
-
- /*
- * Timeout if the device never becomes ready for data and never
- * leaves the program state.
- */
- if (done) {
- pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
- mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__, status);
- return -ETIMEDOUT;
- }
-
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!mmc_blk_in_tran_state(status));
-
- return err;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1671,7 +1634,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
mmc_blk_send_stop(card, timeout);
- err = card_busy_detect(card, timeout, req, NULL);
+ err = card_busy_detect(card, timeout, NULL);
mmc_retune_release(card->host);
@@ -1895,7 +1858,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
return 0;
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
/*
* Do not assume data transferred correctly if there are any error bits
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 221127324709..abf8f5eb0a1c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1469,8 +1469,7 @@ void mmc_detach_bus(struct mmc_host *host)
mmc_bus_put(host);
}
-static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
- bool cd_irq)
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
{
/*
* If the device is configured as wakeup, we prevent a new sleep for
@@ -2129,7 +2128,7 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->hw_reset(host);
mmc_bus_put(host);
- if (ret)
+ if (ret < 0)
pr_warn("%s: tried to HW reset card, got error %d\n",
mmc_hostname(host), ret);
@@ -2297,11 +2296,8 @@ void mmc_rescan(struct work_struct *work)
mmc_bus_get(host);
- /*
- * if there is a _removable_ card registered, check whether it is
- * still present
- */
- if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
+ /* Verify a registered card to be functional, else remove it. */
+ if (host->bus_ops && !host->bus_dead)
host->bus_ops->detect(host);
host->detect_change = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 328c78dbee66..575ac0257af2 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -70,6 +70,8 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq);
int _mmc_detect_card_removed(struct mmc_host *host);
int mmc_detect_card_removed(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c8804895595f..f6912ded652d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -297,7 +297,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
}
}
-static void mmc_part_add(struct mmc_card *card, unsigned int size,
+static void mmc_part_add(struct mmc_card *card, u64 size,
unsigned int part_cfg, char *name, int idx, bool ro,
int area_type)
{
@@ -313,7 +313,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
u8 hc_erase_grp_sz, hc_wp_grp_sz;
- unsigned int part_size;
+ u64 part_size;
/*
* General purpose partition feature support --
@@ -343,8 +343,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
<< 8) +
ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
- part_size *= (size_t)(hc_erase_grp_sz *
- hc_wp_grp_sz);
+ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
"gp%d", idx, false,
@@ -362,7 +361,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
int err = 0, idx;
- unsigned int part_size;
+ u64 part_size;
struct device_node *np;
bool broken_hpi = false;
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 2d2d9ea8be4f..3dba15bccce2 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
END_FIXUP
};
+
static const struct mmc_fixup sdio_fixup_methods[] = {
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 26cabd53ddc5..ebb387aa5158 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1048,9 +1048,35 @@ static int mmc_sdio_runtime_resume(struct mmc_host *host)
return ret;
}
+/*
+ * SDIO HW reset
+ *
+ * Returns 0 if the HW reset was executed synchronously, returns 1 if the HW
+ * reset was asynchronously scheduled, else a negative error code.
+ */
static int mmc_sdio_hw_reset(struct mmc_host *host)
{
- mmc_power_cycle(host, host->card->ocr);
+ struct mmc_card *card = host->card;
+
+ /*
+ * In case the card is shared among multiple func drivers, reset the
+ * card through a rescan work. In this way it will be removed and
+ * re-detected, thus all func drivers becomes informed about it.
+ */
+ if (atomic_read(&card->sdio_funcs_probed) > 1) {
+ if (mmc_card_removed(card))
+ return 1;
+ host->rescan_entered = 0;
+ mmc_card_set_removed(card);
+ _mmc_detect_change(host, 0, false);
+ return 1;
+ }
+
+ /*
+ * A single func driver has been probed, then let's skip the heavy
+ * hotplug dance above and execute the reset immediately.
+ */
+ mmc_power_cycle(host, card->ocr);
return mmc_sdio_reinit_card(host);
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 2963e6542958..3cc928282af7 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -138,6 +138,8 @@ static int sdio_bus_probe(struct device *dev)
if (ret)
return ret;
+ atomic_inc(&func->card->sdio_funcs_probed);
+
/* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -153,7 +155,10 @@ static int sdio_bus_probe(struct device *dev)
/* Set the default block size so the driver is sure it's something
* sensible. */
sdio_claim_host(func);
- ret = sdio_set_block_size(func, 0);
+ if (mmc_card_removed(func->card))
+ ret = -ENOMEDIUM;
+ else
+ ret = sdio_set_block_size(func, 0);
sdio_release_host(func);
if (ret)
goto disable_runtimepm;
@@ -165,6 +170,7 @@ static int sdio_bus_probe(struct device *dev)
return 0;
disable_runtimepm:
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
dev_pm_domain_detach(dev, false);
@@ -181,6 +187,7 @@ static int sdio_bus_remove(struct device *dev)
pm_runtime_get_sync(dev);
drv->remove(func);
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->irq_handler) {
pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 49ea02c467bf..d06b2dfe3c95 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -159,6 +159,7 @@ config MMC_SDHCI_OF_ASPEED
tristate "SDHCI OF support for the ASPEED SDHCI controller"
depends on MMC_SDHCI_PLTFM
depends on OF && OF_ADDRESS
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the ASPEED Secure Digital Host Controller Interface.
@@ -368,6 +369,17 @@ config MMC_SDHCI_F_SDH30
If unsure, say N.
+config MMC_SDHCI_MILBEAUT
+ tristate "SDHCI support for Socionext Milbeaut Serieas using F_SDH30"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ Needed by Milbeaut SoC for MMC / SD / SDIO support.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_IPROC
tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
@@ -1011,6 +1023,7 @@ config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's AM654 SOCs. The controller supports
@@ -1019,3 +1032,11 @@ config MMC_SDHCI_AM654
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_OWL
+ tristate "Actions Semi Owl SD/MMC Host Controller support"
+ depends on HAS_DMA
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ help
+ This selects support for the SD/MMC Host Controller on
+ Actions Semi Owl SoCs.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 11c4598e91d9..21d9089e5eda 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
+obj-$(CONFIG_MMC_SDHCI_MILBEAUT) += sdhci-milbeaut.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_SDHCI_AM654) += sdhci_am654.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -73,6 +74,7 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
obj-$(CONFIG_MMC_BCM2835) += bcm2835.o
+obj-$(CONFIG_MMC_OWL) += owl-mmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index c26fbe5f2222..6f065bb5c55a 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -583,11 +583,11 @@ static void atmci_init_debugfs(struct atmel_mci_slot *slot)
debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
- debugfs_create_x32("pending_events", S_IRUSR, root,
- (u32 *)&host->pending_events);
- debugfs_create_x32("completed_events", S_IRUSR, root,
- (u32 *)&host->completed_events);
+ debugfs_create_u32("state", S_IRUSR, root, &host->state);
+ debugfs_create_xul("pending_events", S_IRUSR, root,
+ &host->pending_events);
+ debugfs_create_xul("completed_events", S_IRUSR, root,
+ &host->completed_events);
}
#if defined(CONFIG_OF)
@@ -2347,8 +2347,7 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
static int atmci_configure_dma(struct atmel_mci *host)
{
- host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
- "rxtx");
+ host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
if (PTR_ERR(host->dma.chan) == -ENODEV) {
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 148414d7f0c9..99f61fd2a658 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1357,7 +1357,6 @@ static int bcm2835_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct clk *clk;
- struct resource *iomem;
struct bcm2835_host *host;
struct mmc_host *mmc;
const __be32 *regaddr_p;
@@ -1373,8 +1372,7 @@ static int bcm2835_probe(struct platform_device *pdev)
host->pdev = pdev;
spin_lock_init(&host->lock);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 22aded1065ae..916746c6c2c7 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -148,7 +148,6 @@ static int octeon_mmc_probe(struct platform_device *pdev)
{
struct device_node *cn, *node = pdev->dev.of_node;
struct cvm_mmc_host *host;
- struct resource *res;
void __iomem *base;
int mmc_irq[9];
int i, ret = 0;
@@ -205,23 +204,13 @@ static int octeon_mmc_probe(struct platform_device *pdev)
host->last_slot = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[0] is missing\n");
- return -ENXIO;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
host->base = (void __iomem *)base;
host->reg_off = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[1] is missing\n");
- return -EINVAL;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
host->dma_base = (void __iomem *)base;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 79c55c7b4afd..fc9d4d000f97 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -176,11 +176,11 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
- debugfs_create_x32("pending_events", S_IRUSR, root,
- (u32 *)&host->pending_events);
- debugfs_create_x32("completed_events", S_IRUSR, root,
- (u32 *)&host->completed_events);
+ debugfs_create_u32("state", S_IRUSR, root, &host->state);
+ debugfs_create_xul("pending_events", S_IRUSR, root,
+ &host->pending_events);
+ debugfs_create_xul("completed_events", S_IRUSR, root,
+ &host->completed_events);
}
#endif /* defined(CONFIG_DEBUG_FS) */
@@ -3441,8 +3441,8 @@ int dw_mci_runtime_resume(struct device *dev)
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
- host->prev_blksz = 0;
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index f816c06ef916..78383f60a3dc 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -41,6 +41,7 @@
#define JZ_REG_MMC_RESP_FIFO 0x34
#define JZ_REG_MMC_RXFIFO 0x38
#define JZ_REG_MMC_TXFIFO 0x3C
+#define JZ_REG_MMC_LPM 0x40
#define JZ_REG_MMC_DMAC 0x44
#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
@@ -77,6 +78,8 @@
#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
+#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
+#define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
#define JZ_MMC_CMDAT_DMA_EN BIT(8)
#define JZ_MMC_CMDAT_INIT BIT(7)
#define JZ_MMC_CMDAT_BUSY BIT(6)
@@ -98,12 +101,20 @@
#define JZ_MMC_DMAC_DMA_SEL BIT(1)
#define JZ_MMC_DMAC_DMA_EN BIT(0)
+#define JZ_MMC_LPM_DRV_RISING BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
+#define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
+#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
+
#define JZ_MMC_CLK_RATE 24000000
enum jz4740_mmc_version {
JZ_MMC_JZ4740,
JZ_MMC_JZ4725B,
+ JZ_MMC_JZ4760,
JZ_MMC_JZ4780,
+ JZ_MMC_X1000,
};
enum jz4740_mmc_state {
@@ -852,6 +863,22 @@ static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
}
writew(div, host->base + JZ_REG_MMC_CLKRT);
+
+ if (real_rate > 25000000) {
+ if (host->version >= JZ_MMC_X1000) {
+ writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
+ JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4760) {
+ writel(JZ_MMC_LPM_DRV_RISING |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4725B)
+ writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ }
+
return real_rate;
}
@@ -895,11 +922,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
- host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
break;
case MMC_BUS_WIDTH_4:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
+ host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
+ break;
default:
break;
}
@@ -924,7 +956,9 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
+ { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
+ { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
{},
};
MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
@@ -1025,11 +1059,12 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
goto err_release_dma;
}
- dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
+ dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
host->use_dma ? "DMA" : "PIO",
- (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+ (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
+ ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
return 0;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 66e354d51ee9..74c6cfbf9172 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1421,7 +1421,7 @@ static int mmc_spi_probe(struct spi_device *spi)
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c37e70dbe250..40e72c30ea84 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -44,6 +44,7 @@
#define DRIVER_NAME "mmci-pl18x"
static void mmci_variant_init(struct mmci_host *host);
+static void ux500_variant_init(struct mmci_host *host);
static void ux500v2_variant_init(struct mmci_host *host);
static unsigned int fmax = 515633;
@@ -184,7 +185,7 @@ static struct variant_data variant_ux500 = {
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
- .init = mmci_variant_init,
+ .init = ux500_variant_init,
};
static struct variant_data variant_ux500v2 = {
@@ -261,6 +262,10 @@ static struct variant_data variant_stm32_sdmmc = {
.datalength_bits = 25,
.datactrl_blocksz = 14,
.stm32_idmabsize_mask = GENMASK(12, 5),
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
.init = sdmmc_variant_init,
};
@@ -419,7 +424,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
mmci_write_clkreg(host, clk);
}
-void mmci_dma_release(struct mmci_host *host)
+static void mmci_dma_release(struct mmci_host *host)
{
if (host->ops && host->ops->dma_release)
host->ops->dma_release(host);
@@ -427,7 +432,7 @@ void mmci_dma_release(struct mmci_host *host)
host->use_dma = false;
}
-void mmci_dma_setup(struct mmci_host *host)
+static void mmci_dma_setup(struct mmci_host *host)
{
if (!host->ops || !host->ops->dma_setup)
return;
@@ -462,7 +467,7 @@ static int mmci_validate_data(struct mmci_host *host,
return 0;
}
-int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
+static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
{
int err;
@@ -478,7 +483,7 @@ int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
return err;
}
-void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
+static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
int err)
{
if (host->ops && host->ops->unprep_data)
@@ -487,7 +492,7 @@ void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
data->host_cookie = 0;
}
-void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
@@ -495,7 +500,7 @@ void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
host->ops->get_next_data(host, data);
}
-int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
{
struct mmc_data *data = host->data;
int ret;
@@ -530,7 +535,7 @@ int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
return 0;
}
-void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!host->use_dma)
return;
@@ -539,7 +544,7 @@ void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
host->ops->dma_finalize(host, data);
}
-void mmci_dma_error(struct mmci_host *host)
+static void mmci_dma_error(struct mmci_host *host)
{
if (!host->use_dma)
return;
@@ -610,6 +615,67 @@ static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
}
+static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+
+ /*
+ * Before unmasking for the busy end IRQ, confirm that the
+ * command was sent successfully. To keep track of having a
+ * command in-progress, waiting for busy signaling to end,
+ * store the status in host->busy_status.
+ *
+ * Note that, the card may need a couple of clock cycles before
+ * it starts signaling busy on DAT0, hence re-read the
+ * MMCISTATUS register here, to allow the busy bit to be set.
+ * Potentially we may even need to poll the register for a
+ * while, to allow it to be set, but tests indicates that it
+ * isn't needed.
+ */
+ if (!host->busy_status && !(status & err_msk) &&
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+
+ host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent, then bail out if busy status is set and wait for the
+ * busy end IRQ.
+ *
+ * Note that, the HW triggers an IRQ on both edges while
+ * monitoring DAT0 for busy completion, but there is only one
+ * status bit in MMCISTATUS for the busy state. Therefore
+ * both the start and the end interrupts needs to be cleared,
+ * one after the other. So, clear the busy start IRQ here.
+ */
+ if (host->busy_status &&
+ (status & host->variant->busy_detect_flag)) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent and the busy bit isn't set, it means we have received
+ * the busy end IRQ. Clear and mask the IRQ, then continue to
+ * process the command.
+ */
+ if (host->busy_status) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@@ -948,14 +1014,21 @@ static struct mmci_host_ops mmci_variant_ops = {
};
#endif
-void mmci_variant_init(struct mmci_host *host)
+static void mmci_variant_init(struct mmci_host *host)
+{
+ host->ops = &mmci_variant_ops;
+}
+
+static void ux500_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
}
-void ux500v2_variant_init(struct mmci_host *host)
+static void ux500v2_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
}
@@ -1075,6 +1148,7 @@ static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
+ unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
cmd->opcode, cmd->arg, cmd->flags);
@@ -1097,6 +1171,16 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
else
c |= host->variant->cmdreg_srsp;
}
+
+ if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
+ if (!cmd->busy_timeout)
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
+
+ clks = (unsigned long long)cmd->busy_timeout * host->cclk;
+ do_div(clks, MSEC_PER_SEC);
+ writel_relaxed(clks, host->base + MMCIDATATIMER);
+ }
+
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
@@ -1201,6 +1285,7 @@ static void
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
+ u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
void __iomem *base = host->base;
bool sbc, busy_resp;
@@ -1215,74 +1300,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
* handling. Note that we tag on any latent IRQs postponed
* due to waiting for busy status.
*/
- if (!((status|host->busy_status) &
- (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
+ if (host->variant->busy_timeout && busy_resp)
+ err_msk |= MCI_DATATIMEOUT;
+
+ if (!((status | host->busy_status) &
+ (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
return;
/* Handle busy detection on DAT0 if the variant supports it. */
- if (busy_resp && host->variant->busy_detect) {
-
- /*
- * Before unmasking for the busy end IRQ, confirm that the
- * command was sent successfully. To keep track of having a
- * command in-progress, waiting for busy signaling to end,
- * store the status in host->busy_status.
- *
- * Note that, the card may need a couple of clock cycles before
- * it starts signaling busy on DAT0, hence re-read the
- * MMCISTATUS register here, to allow the busy bit to be set.
- * Potentially we may even need to poll the register for a
- * while, to allow it to be set, but tests indicates that it
- * isn't needed.
- */
- if (!host->busy_status &&
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
-
- writel(readl(base + MMCIMASK0) |
- host->variant->busy_detect_mask,
- base + MMCIMASK0);
-
- host->busy_status =
- status & (MCI_CMDSENT|MCI_CMDRESPEND);
- return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent, then bail out if busy status is set and wait for the
- * busy end IRQ.
- *
- * Note that, the HW triggers an IRQ on both edges while
- * monitoring DAT0 for busy completion, but there is only one
- * status bit in MMCISTATUS for the busy state. Therefore
- * both the start and the end interrupts needs to be cleared,
- * one after the other. So, clear the busy start IRQ here.
- */
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag)) {
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
+ if (busy_resp && host->variant->busy_detect)
+ if (!host->ops->busy_complete(host, status, err_msk))
return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent and the busy bit isn't set, it means we have received
- * the busy end IRQ. Clear and mask the IRQ, then continue to
- * process the command.
- */
- if (host->busy_status) {
-
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
-
- writel(readl(base + MMCIMASK0) &
- ~host->variant->busy_detect_mask,
- base + MMCIMASK0);
- host->busy_status = 0;
- }
- }
host->cmd = NULL;
@@ -1290,6 +1318,9 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
cmd->error = -EILSEQ;
+ } else if (host->variant->busy_timeout && busy_resp &&
+ status & MCI_DATATIMEOUT) {
+ cmd->error = -ETIMEDOUT;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1583,6 +1614,20 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ u32 max_busy_timeout = 0;
+
+ if (!host->variant->busy_detect)
+ return;
+
+ if (host->variant->busy_timeout && mmc->actual_clock)
+ max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
+
+ mmc->max_busy_timeout = max_busy_timeout;
+}
+
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -1687,6 +1732,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
mmci_set_clkreg(host, ios->clock);
+ mmci_set_max_busy_timeout(mmc);
+
if (host->ops && host->ops->set_pwrreg)
host->ops->set_pwrreg(host, pwr);
else
@@ -1957,7 +2004,6 @@ static int mmci_probe(struct amba_device *dev,
mmci_write_datactrlreg(host,
host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- mmc->max_busy_timeout = 0;
}
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 833236ecb31e..158e1231aa23 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -164,6 +164,7 @@
#define MCI_ST_CARDBUSY (1 << 24)
/* Extended status bits for the STM32 variants */
#define MCI_STM32_BUSYD0 BIT(20)
+#define MCI_STM32_BUSYD0END BIT(21)
#define MMCICLEAR 0x038
#define MCI_CMDCRCFAILCLR (1 << 0)
@@ -287,6 +288,8 @@ struct mmci_host;
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
* @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_timeout: true if the variant starts data timer when the DPSM
+ * enter in Wait_R or Busy state.
* @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
* @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
* indicating that the card is busy
@@ -333,6 +336,7 @@ struct variant_data {
u8 signal_direction:1;
u8 pwrreg_clkgate:1;
u8 busy_detect:1;
+ u8 busy_timeout:1;
u32 busy_dpsm_flag;
u32 busy_detect_flag;
u32 busy_detect_mask;
@@ -366,6 +370,7 @@ struct mmci_host_ops {
void (*dma_error)(struct mmci_host *host);
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
+ bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
};
struct mmci_host {
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 8e83ae6920ae..a4f7e8e689d3 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -25,8 +25,8 @@ struct sdmmc_priv {
void *sg_cpu;
};
-int sdmmc_idma_validate_data(struct mmci_host *host,
- struct mmc_data *data)
+static int sdmmc_idma_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
{
struct scatterlist *sg;
int i;
@@ -282,6 +282,47 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
return datactrl;
}
+static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+ u32 busy_d0, busy_d0end, mask, sdmmc_status;
+
+ mask = readl_relaxed(base + MMCIMASK0);
+ sdmmc_status = readl_relaxed(base + MMCISTATUS);
+ busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END;
+ busy_d0 = sdmmc_status & MCI_STM32_BUSYD0;
+
+ /* complete if there is an error or busy_d0end */
+ if ((status & err_msk) || busy_d0end)
+ goto complete;
+
+ /*
+ * On response the busy signaling is reflected in the BUSYD0 flag.
+ * if busy_d0 is in-progress we must activate busyd0end interrupt
+ * to wait this completion. Else this request has no busy step.
+ */
+ if (busy_d0) {
+ if (!host->busy_status) {
+ writel_relaxed(mask | host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_status = status &
+ (MCI_CMDSENT | MCI_CMDRESPEND);
+ }
+ return false;
+ }
+
+complete:
+ if (host->busy_status) {
+ writel_relaxed(mask & ~host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ writel_relaxed(host->variant->busy_detect_mask,
+ base + MMCICLEAR);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
static struct mmci_host_ops sdmmc_variant_ops = {
.validate_data = sdmmc_idma_validate_data,
.prep_data = sdmmc_idma_prep_data,
@@ -292,6 +333,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_finalize = sdmmc_idma_finalize,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
+ .busy_complete = sdmmc_busy_complete,
};
void sdmmc_variant_init(struct mmci_host *host)
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index a0670e9cd012..fc6b9cf27d0b 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -608,8 +608,8 @@ static int moxart_probe(struct platform_device *pdev)
host->timeout = msecs_to_jiffies(1000);
host->sysclk = clk_get_rate(clk);
host->fifo_width = readl(host->base + REG_FEATURE) << 2;
- host->dma_chan_tx = dma_request_slave_channel_reason(dev, "tx");
- host->dma_chan_rx = dma_request_slave_channel_reason(dev, "rx");
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 952fa4063ff8..767e964ca5a2 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1510,8 +1510,35 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- if (mmc_pdata(host)->init_card)
- mmc_pdata(host)->init_card(card);
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ struct device_node *np = mmc_dev(mmc)->of_node;
+
+ /*
+ * REVISIT: should be moved to sdio core and made more
+ * general e.g. by expanding the DT bindings of child nodes
+ * to provide a mechanism to provide this information:
+ * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ */
+
+ np = of_get_compatible_child(np, "ti,wl1251");
+ if (np) {
+ /*
+ * We have TI wl1251 attached to MMC3. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(host->dev, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+ card->ocr = 0x80;
+ of_node_put(np);
+ }
+ }
}
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
new file mode 100644
index 000000000000..771e3d00f1bb
--- /dev/null
+++ b/drivers/mmc/host/owl-mmc.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Actions Semi Owl SoCs SD/MMC driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * TODO: SDIO support
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+/*
+ * SDC registers
+ */
+#define OWL_REG_SD_EN 0x0000
+#define OWL_REG_SD_CTL 0x0004
+#define OWL_REG_SD_STATE 0x0008
+#define OWL_REG_SD_CMD 0x000c
+#define OWL_REG_SD_ARG 0x0010
+#define OWL_REG_SD_RSPBUF0 0x0014
+#define OWL_REG_SD_RSPBUF1 0x0018
+#define OWL_REG_SD_RSPBUF2 0x001c
+#define OWL_REG_SD_RSPBUF3 0x0020
+#define OWL_REG_SD_RSPBUF4 0x0024
+#define OWL_REG_SD_DAT 0x0028
+#define OWL_REG_SD_BLK_SIZE 0x002c
+#define OWL_REG_SD_BLK_NUM 0x0030
+#define OWL_REG_SD_BUF_SIZE 0x0034
+
+/* SD_EN Bits */
+#define OWL_SD_EN_RANE BIT(31)
+#define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
+#define OWL_SD_EN_S18EN BIT(12)
+#define OWL_SD_EN_RESE BIT(10)
+#define OWL_SD_EN_DAT1_S BIT(9)
+#define OWL_SD_EN_CLK_S BIT(8)
+#define OWL_SD_ENABLE BIT(7)
+#define OWL_SD_EN_BSEL BIT(6)
+#define OWL_SD_EN_SDIOEN BIT(3)
+#define OWL_SD_EN_DDREN BIT(2)
+#define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
+
+/* SD_CTL Bits */
+#define OWL_SD_CTL_TOUTEN BIT(31)
+#define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
+#define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
+#define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
+#define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
+#define OWL_SD_CTL_CMDLEN BIT(13)
+#define OWL_SD_CTL_SCC BIT(12)
+#define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
+#define OWL_SD_CTL_TS BIT(7)
+#define OWL_SD_CTL_LBE BIT(6)
+#define OWL_SD_CTL_C7EN BIT(5)
+#define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
+
+#define OWL_SD_DELAY_LOW_CLK 0x0f
+#define OWL_SD_DELAY_MID_CLK 0x0a
+#define OWL_SD_DELAY_HIGH_CLK 0x09
+#define OWL_SD_RDELAY_DDR50 0x0a
+#define OWL_SD_WDELAY_DDR50 0x08
+
+/* SD_STATE Bits */
+#define OWL_SD_STATE_DAT1BS BIT(18)
+#define OWL_SD_STATE_SDIOB_P BIT(17)
+#define OWL_SD_STATE_SDIOB_EN BIT(16)
+#define OWL_SD_STATE_TOUTE BIT(15)
+#define OWL_SD_STATE_BAEP BIT(14)
+#define OWL_SD_STATE_MEMRDY BIT(12)
+#define OWL_SD_STATE_CMDS BIT(11)
+#define OWL_SD_STATE_DAT1AS BIT(10)
+#define OWL_SD_STATE_SDIOA_P BIT(9)
+#define OWL_SD_STATE_SDIOA_EN BIT(8)
+#define OWL_SD_STATE_DAT0S BIT(7)
+#define OWL_SD_STATE_TEIE BIT(6)
+#define OWL_SD_STATE_TEI BIT(5)
+#define OWL_SD_STATE_CLNR BIT(4)
+#define OWL_SD_STATE_CLC BIT(3)
+#define OWL_SD_STATE_WC16ER BIT(2)
+#define OWL_SD_STATE_RC16ER BIT(1)
+#define OWL_SD_STATE_CRC7ER BIT(0)
+
+struct owl_mmc_host {
+ struct device *dev;
+ struct reset_control *reset;
+ void __iomem *base;
+ struct clk *clk;
+ struct completion sdc_complete;
+ spinlock_t lock;
+ int irq;
+ u32 clock;
+ bool ddr_50;
+
+ enum dma_data_direction dma_dir;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config dma_cfg;
+ struct completion dma_complete;
+
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+};
+
+static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
+{
+ unsigned int regval;
+
+ regval = readl(reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(regval, reg);
+}
+
+static irqreturn_t owl_irq_handler(int irq, void *devid)
+{
+ struct owl_mmc_host *owl_host = devid;
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&owl_host->lock, flags);
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (state & OWL_SD_STATE_TEI) {
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ state |= OWL_SD_STATE_TEI;
+ writel(state, owl_host->base + OWL_REG_SD_STATE);
+ complete(&owl_host->sdc_complete);
+ }
+
+ spin_unlock_irqrestore(&owl_host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
+{
+ struct mmc_request *mrq = owl_host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ /* Should never be NULL */
+ WARN_ON(!mrq);
+
+ owl_host->mrq = NULL;
+
+ if (data)
+ dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
+ owl_host->dma_dir);
+
+ /* Finally finish request */
+ mmc_request_done(owl_host->mmc, mrq);
+}
+
+static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
+ struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ u32 mode, state, resp[2];
+ u32 cmd_rsp_mask = 0;
+
+ init_completion(&owl_host->sdc_complete);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ mode = OWL_SD_CTL_TM(0);
+ break;
+
+ case MMC_RSP_R1:
+ if (data) {
+ if (data->flags & MMC_DATA_READ)
+ mode = OWL_SD_CTL_TM(4);
+ else
+ mode = OWL_SD_CTL_TM(5);
+ } else {
+ mode = OWL_SD_CTL_TM(1);
+ }
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+
+ break;
+
+ case MMC_RSP_R1B:
+ mode = OWL_SD_CTL_TM(3);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R2:
+ mode = OWL_SD_CTL_TM(2);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R3:
+ mode = OWL_SD_CTL_TM(1);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR;
+ break;
+
+ default:
+ dev_warn(owl_host->dev, "Unknown MMC command\n");
+ cmd->error = -EINVAL;
+ return;
+ }
+
+ /* Keep current WDELAY and RDELAY */
+ mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+
+ /* Start to send corresponding command type */
+ writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
+ writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
+
+ /* Set LBE to send clk at the end of last read block */
+ if (data) {
+ mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
+ } else {
+ mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
+ mode |= OWL_SD_CTL_TS;
+ }
+
+ owl_host->cmd = cmd;
+
+ /* Start transfer */
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (data)
+ return;
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, 30 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ cmd->error = -ETIMEDOUT;
+ return;
+ }
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (cmd_rsp_mask & state) {
+ if (state & OWL_SD_STATE_CLNR) {
+ dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+
+ if (state & OWL_SD_STATE_CRC7ER) {
+ dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+ }
+
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
+ cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
+ } else {
+ resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
+ cmd->resp[1] = resp[1] >> 8;
+ }
+ }
+}
+
+static void owl_mmc_dma_complete(void *param)
+{
+ struct owl_mmc_host *owl_host = param;
+ struct mmc_data *data = owl_host->data;
+
+ if (data)
+ complete(&owl_host->dma_complete);
+}
+
+static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
+ struct mmc_data *data)
+{
+ u32 total;
+
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
+ true);
+ writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
+ writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
+ total = data->blksz * data->blocks;
+
+ if (total < 512)
+ writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
+ else
+ writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
+
+ if (data->flags & MMC_DATA_WRITE) {
+ owl_host->dma_dir = DMA_TO_DEVICE;
+ owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
+ } else {
+ owl_host->dma_dir = DMA_FROM_DEVICE;
+ owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
+ }
+
+ dma_map_sg(owl_host->dma->device->dev, data->sg,
+ data->sg_len, owl_host->dma_dir);
+
+ dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
+ owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
+ data->sg_len,
+ owl_host->dma_cfg.direction,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!owl_host->desc) {
+ dev_err(owl_host->dev, "Can't prepare slave sg\n");
+ return -EBUSY;
+ }
+
+ owl_host->data = data;
+
+ owl_host->desc->callback = owl_mmc_dma_complete;
+ owl_host->desc->callback_param = (void *)owl_host;
+ data->error = 0;
+
+ return 0;
+}
+
+static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ int ret;
+
+ owl_host->mrq = mrq;
+ if (mrq->data) {
+ ret = owl_mmc_prepare_data(owl_host, data);
+ if (ret < 0) {
+ data->error = ret;
+ goto err_out;
+ }
+
+ init_completion(&owl_host->dma_complete);
+ dmaengine_submit(owl_host->desc);
+ dma_async_issue_pending(owl_host->dma);
+ }
+
+ owl_mmc_send_cmd(owl_host, mrq->cmd, data);
+
+ if (data) {
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete,
+ 10 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (!wait_for_completion_timeout(&owl_host->dma_complete,
+ 5 * HZ)) {
+ dev_err(owl_host->dev, "DMA interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (data->stop)
+ owl_mmc_send_cmd(owl_host, data->stop, NULL);
+
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+err_out:
+ owl_mmc_finish_request(owl_host);
+}
+
+static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
+ unsigned int rate)
+{
+ unsigned long clk_rate;
+ int ret;
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_CTL);
+ reg &= ~OWL_SD_CTL_DELAY_MSK;
+
+ /* Set RDELAY and WDELAY based on the clock */
+ if (rate <= 1000000) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 1000000) && (rate <= 26000000)) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ /* DDR50 mode has special delay chain */
+ } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
+ OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else {
+ dev_err(owl_host->dev, "SD clock rate not supported\n");
+ return -EINVAL;
+ }
+
+ clk_rate = clk_round_rate(owl_host->clk, rate << 1);
+ ret = clk_set_rate(owl_host->clk, clk_rate);
+
+ return ret;
+}
+
+static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
+{
+ if (!ios->clock)
+ return;
+
+ owl_host->clock = ios->clock;
+ owl_mmc_set_clk_rate(owl_host, ios->clock);
+}
+
+static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_EN);
+ reg &= ~0x03;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ break;
+ case MMC_BUS_WIDTH_4:
+ reg |= OWL_SD_EN_DATAWID(1);
+ break;
+ case MMC_BUS_WIDTH_8:
+ reg |= OWL_SD_EN_DATAWID(2);
+ break;
+ }
+
+ writel(reg, owl_host->base + OWL_REG_SD_EN);
+}
+
+static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
+{
+ reset_control_assert(owl_host->reset);
+ udelay(20);
+ reset_control_deassert(owl_host->reset);
+}
+
+static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
+{
+ u32 mode;
+
+ init_completion(&owl_host->sdc_complete);
+
+ /* Enable transfer end IRQ */
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
+ OWL_SD_STATE_TEIE, true);
+
+ /* Send init clk */
+ mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+ mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ return;
+ }
+}
+
+static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ dev_dbg(owl_host->dev, "Powering card up\n");
+
+ /* Reset the SDC controller to clear all previous states */
+ owl_mmc_ctr_reset(owl_host);
+ clk_prepare_enable(owl_host->clk);
+ writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
+ owl_host->base + OWL_REG_SD_EN);
+
+ break;
+
+ case MMC_POWER_ON:
+ dev_dbg(owl_host->dev, "Powering card on\n");
+ owl_mmc_power_on(owl_host);
+
+ break;
+
+ case MMC_POWER_OFF:
+ dev_dbg(owl_host->dev, "Powering card off\n");
+ clk_disable_unprepare(owl_host->clk);
+
+ return;
+
+ default:
+ dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
+ break;
+ }
+
+ if (ios->clock != owl_host->clock)
+ owl_mmc_set_clk(owl_host, ios);
+
+ owl_mmc_set_bus_width(owl_host, ios);
+
+ /* Enable DDR mode if requested */
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ owl_host->ddr_50 = 1;
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_DDREN, true);
+ } else {
+ owl_host->ddr_50 = 0;
+ }
+}
+
+static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ /* It is enough to change the pad ctrl bit for voltage switch */
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, false);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, true);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct mmc_host_ops owl_mmc_ops = {
+ .request = owl_mmc_request,
+ .set_ios = owl_mmc_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
+};
+
+static int owl_mmc_probe(struct platform_device *pdev)
+{
+ struct owl_mmc_host *owl_host;
+ struct mmc_host *mmc;
+ struct resource *res;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "mmc alloc host failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, mmc);
+
+ owl_host = mmc_priv(mmc);
+ owl_host->dev = &pdev->dev;
+ owl_host->mmc = mmc;
+ spin_lock_init(&owl_host->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ owl_host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(owl_host->base)) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = PTR_ERR(owl_host->base);
+ goto err_free_host;
+ }
+
+ owl_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->clk)) {
+ dev_err(&pdev->dev, "No clock defined\n");
+ ret = PTR_ERR(owl_host->clk);
+ goto err_free_host;
+ }
+
+ owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->reset)) {
+ dev_err(&pdev->dev, "Could not get reset control\n");
+ ret = PTR_ERR(owl_host->reset);
+ goto err_free_host;
+ }
+
+ mmc->ops = &owl_mmc_ops;
+ mmc->max_blk_count = 512;
+ mmc->max_blk_size = 512;
+ mmc->max_segs = 256;
+ mmc->max_seg_size = 262144;
+ mmc->max_req_size = 262144;
+ /* 100kHz ~ 52MHz */
+ mmc->f_min = 100000;
+ mmc->f_max = 52000000;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_4_BIT_DATA;
+ mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_165_195;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_free_host;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
+ if (!owl_host->dma) {
+ dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
+ ret = -ENXIO;
+ goto err_free_host;
+ }
+
+ dev_info(&pdev->dev, "Using %s for DMA transfers\n",
+ dma_chan_name(owl_host->dma));
+
+ owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.device_fc = false;
+
+ owl_host->irq = platform_get_irq(pdev, 0);
+ if (owl_host->irq < 0) {
+ ret = -EINVAL;
+ goto err_free_host;
+ }
+
+ ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
+ 0, dev_name(&pdev->dev), owl_host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n",
+ owl_host->irq);
+ goto err_free_host;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add host\n");
+ goto err_free_host;
+ }
+
+ dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
+
+ return 0;
+
+err_free_host:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int owl_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+ disable_irq(owl_host->irq);
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static const struct of_device_id owl_mmc_of_match[] = {
+ {.compatible = "actions,owl-mmc",},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
+
+static struct platform_driver owl_mmc_driver = {
+ .driver = {
+ .name = "owl_mmc",
+ .of_match_table = of_match_ptr(owl_mmc_of_match),
+ },
+ .probe = owl_mmc_probe,
+ .remove = owl_mmc_remove,
+};
+module_platform_driver(owl_mmc_driver);
+
+MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
+MODULE_AUTHOR("Actions Semi");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index a66f8d6d61d1..18839a10594c 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -308,6 +308,7 @@ static const struct soc_device_attribute soc_whitelist[] = {
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
/* generic ones */
{ .soc_id = "r8a774a1" },
+ { .soc_id = "r8a774b1" },
{ .soc_id = "r8a774c0" },
{ .soc_id = "r8a77470" },
{ .soc_id = "r8a7795" },
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 1604f512c7bd..105e73d4a3b9 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -61,7 +61,7 @@ struct sdhci_acpi_slot {
mmc_pm_flag_t pm_caps;
unsigned int flags;
size_t priv_size;
- int (*probe_slot)(struct platform_device *, const char *, const char *);
+ int (*probe_slot)(struct platform_device *, struct acpi_device *);
int (*remove_slot)(struct platform_device *);
int (*free_slot)(struct platform_device *pdev);
int (*setup_host)(struct platform_device *pdev);
@@ -325,12 +325,10 @@ static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device,
* wifi card in the expected slot with an ACPI companion node, is used to
* indicate that acpi_device_fix_up_power() should be avoided.
*/
-static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
- const char *uid)
+static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
{
return sdhci_acpi_cht() &&
- !strcmp(hid, "80860F14") &&
- !strcmp(uid, "2") &&
+ acpi_dev_hid_uid_match(adev, "80860F14", "2") &&
sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28);
}
@@ -345,8 +343,7 @@ static inline bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
-static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
- const char *uid)
+static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
{
return false;
}
@@ -375,19 +372,18 @@ out:
return ret;
}
-static int intel_probe_slot(struct platform_device *pdev, const char *hid,
- const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct intel_host *intel_host = sdhci_acpi_priv(c);
struct sdhci_host *host = c->host;
- if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
+ if (acpi_dev_hid_uid_match(adev, "80860F14", "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- if (hid && !strcmp(hid, "80865ACA"))
+ if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL))
host->mmc_host_ops.get_cd = bxt_get_cd;
intel_dsm_init(intel_host, &pdev->dev, host->mmc);
@@ -473,8 +469,7 @@ static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr)
return IRQ_HANDLED;
}
-static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
- const char *uid)
+static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
@@ -482,7 +477,7 @@ static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
*irq = -EINVAL;
- if (strcmp(hid, "QCOM8051"))
+ if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
*irq = platform_get_irq(pdev, 1);
@@ -501,14 +496,12 @@ static int qcom_free_slot(struct platform_device *pdev)
struct sdhci_host *host = c->host;
struct acpi_device *adev;
int *irq = sdhci_acpi_priv(c);
- const char *hid;
adev = ACPI_COMPANION(dev);
if (!adev)
return -ENODEV;
- hid = acpi_device_hid(adev);
- if (strcmp(hid, "QCOM8051"))
+ if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
if (*irq < 0)
@@ -583,7 +576,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
};
static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+ struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
@@ -654,17 +647,12 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
- const char *uid)
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev)
{
const struct sdhci_acpi_uid_slot *u;
for (u = sdhci_acpi_uids; u->hid; u++) {
- if (strcmp(u->hid, hid))
- continue;
- if (!u->uid)
- return u->slot;
- if (uid && !strcmp(u->uid, uid))
+ if (acpi_dev_hid_uid_match(adev, u->hid, u->uid))
return u->slot;
}
return NULL;
@@ -680,22 +668,17 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
struct resource *iomem;
resource_size_t len;
size_t priv_size;
- const char *hid;
- const char *uid;
int err;
device = ACPI_COMPANION(dev);
if (!device)
return -ENODEV;
- hid = acpi_device_hid(device);
- uid = acpi_device_uid(device);
-
- slot = sdhci_acpi_get_slot(hid, uid);
+ slot = sdhci_acpi_get_slot(device);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
- if (!sdhci_acpi_no_fixup_child_power(hid, uid)) {
+ if (!sdhci_acpi_no_fixup_child_power(device)) {
list_for_each_entry(child, &device->children, node)
if (child->status.present && child->status.enabled)
acpi_device_fix_up_power(child);
@@ -745,7 +728,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (c->slot) {
if (c->slot->probe_slot) {
- err = c->slot->probe_slot(pdev, hid, uid);
+ err = c->slot->probe_slot(pdev, device);
if (err)
goto err_free;
}
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 57b582bf73d9..9289bb4d633e 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -51,6 +51,11 @@
#define ESDHC_CLOCK_HCKEN 0x00000002
#define ESDHC_CLOCK_IPGEN 0x00000001
+/* System Control 2 Register */
+#define ESDHC_SYSTEM_CONTROL_2 0x3c
+#define ESDHC_SMPCLKSEL 0x00800000
+#define ESDHC_EXTN 0x00400000
+
/* Host Controller Capabilities Register 2 */
#define ESDHC_CAPABILITIES_1 0x114
@@ -59,7 +64,16 @@
#define ESDHC_HS400_WNDW_ADJUST 0x00000040
#define ESDHC_HS400_MODE 0x00000010
#define ESDHC_TB_EN 0x00000004
+#define ESDHC_TB_MODE_MASK 0x00000003
+#define ESDHC_TB_MODE_SW 0x00000003
+#define ESDHC_TB_MODE_3 0x00000002
+
+#define ESDHC_TBSTAT 0x124
+
#define ESDHC_TBPTR 0x128
+#define ESDHC_WNDW_STRT_PTR_SHIFT 8
+#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8)
+#define ESDHC_WNDW_END_PTR_MASK 0x7f
/* SD Clock Control Register */
#define ESDHC_SDCLKCTL 0x144
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
new file mode 100644
index 000000000000..a1aa21b9ae1c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ * Takao Orito <orito.takao@socionext.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+#include "sdhci-pltfm.h"
+#include "sdhci_f_sdh30.h"
+
+/* milbeaut bridge controller register */
+#define MLB_SOFT_RESET 0x0200
+#define MLB_SOFT_RESET_RSTX BIT(0)
+
+#define MLB_WP_CD_LED_SET 0x0210
+#define MLB_WP_CD_LED_SET_LED_INV BIT(2)
+
+#define MLB_CR_SET 0x0220
+#define MLB_CR_SET_CR_TOCLKUNIT BIT(24)
+#define MLB_CR_SET_CR_TOCLKFREQ_SFT (16)
+#define MLB_CR_SET_CR_TOCLKFREQ_MASK (0x3F << MLB_CR_SET_CR_TOCLKFREQ_SFT)
+#define MLB_CR_SET_CR_BCLKFREQ_SFT (8)
+#define MLB_CR_SET_CR_BCLKFREQ_MASK (0xFF << MLB_CR_SET_CR_BCLKFREQ_SFT)
+#define MLB_CR_SET_CR_RTUNTIMER_SFT (4)
+#define MLB_CR_SET_CR_RTUNTIMER_MASK (0xF << MLB_CR_SET_CR_RTUNTIMER_SFT)
+
+#define MLB_SD_TOCLK_I_DIV 16
+#define MLB_TOCLKFREQ_UNIT_THRES 16000000
+#define MLB_CAL_TOCLKFREQ_MHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000000)
+#define MLB_CAL_TOCLKFREQ_KHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000)
+#define MLB_TOCLKFREQ_MAX 63
+#define MLB_TOCLKFREQ_MIN 1
+
+#define MLB_SD_BCLK_I_DIV 4
+#define MLB_CAL_BCLKFREQ(rate) (rate / MLB_SD_BCLK_I_DIV / 1000000)
+#define MLB_BCLKFREQ_MAX 255
+#define MLB_BCLKFREQ_MIN 1
+
+#define MLB_CDR_SET 0x0230
+#define MLB_CDR_SET_CLK2POW16 3
+
+struct f_sdhost_priv {
+ struct clk *clk_iface;
+ struct clk *clk;
+ struct device *dev;
+ bool enable_cmd_dat_delay;
+};
+
+static void sdhci_milbeaut_soft_voltage_switch(struct sdhci_host *host)
+{
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+ ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+
+ ctrl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ usleep_range(2500, 3000);
+
+ ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING);
+ ctrl |= F_SDH30_CMD_CHK_DIS;
+ sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
+}
+
+static unsigned int sdhci_milbeaut_get_min_clock(struct sdhci_host *host)
+{
+ return F_SDH30_MIN_CLOCK;
+}
+
+static void sdhci_milbeaut_reset(struct sdhci_host *host, u8 mask)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u16 clk;
+ u32 ctl;
+ ktime_t timeout;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk = (clk & ~SDHCI_CLOCK_CARD_EN) | SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ sdhci_reset(host, mask);
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ timeout = ktime_add_ms(ktime_get(), 10);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static void sdhci_milbeaut_set_power(struct sdhci_host *host,
+ unsigned char mode, unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
+static const struct sdhci_ops sdhci_milbeaut_ops = {
+ .voltage_switch = sdhci_milbeaut_soft_voltage_switch,
+ .get_min_clock = sdhci_milbeaut_get_min_clock,
+ .reset = sdhci_milbeaut_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_milbeaut_set_power,
+};
+
+static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host,
+ int reset_flag)
+{
+ if (reset_flag)
+ sdhci_writel(host, 0, MLB_SOFT_RESET);
+ else
+ sdhci_writel(host, MLB_SOFT_RESET_RSTX, MLB_SOFT_RESET);
+}
+
+static void sdhci_milbeaut_bridge_init(struct sdhci_host *host,
+ int rate)
+{
+ u32 val, clk;
+
+ /* IO_SDIO_CR_SET should be set while reset */
+ val = sdhci_readl(host, MLB_CR_SET);
+ val &= ~(MLB_CR_SET_CR_TOCLKFREQ_MASK | MLB_CR_SET_CR_TOCLKUNIT |
+ MLB_CR_SET_CR_BCLKFREQ_MASK);
+ if (rate >= MLB_TOCLKFREQ_UNIT_THRES) {
+ clk = MLB_CAL_TOCLKFREQ_MHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ val |= MLB_CR_SET_CR_TOCLKUNIT |
+ (clk << MLB_CR_SET_CR_TOCLKFREQ_SFT);
+ } else {
+ clk = MLB_CAL_TOCLKFREQ_KHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_TOCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_TOCLKFREQ_SFT;
+ }
+
+ clk = MLB_CAL_BCLKFREQ(rate);
+ clk = min_t(u32, MLB_BCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_BCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_BCLKFREQ_SFT;
+ val &= ~MLB_CR_SET_CR_RTUNTIMER_MASK;
+ sdhci_writel(host, val, MLB_CR_SET);
+
+ sdhci_writel(host, MLB_CDR_SET_CLK2POW16, MLB_CDR_SET);
+
+ sdhci_writel(host, MLB_WP_CD_LED_SET_LED_INV, MLB_WP_CD_LED_SET);
+}
+
+static void sdhci_milbeaut_vendor_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
+ ctl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+
+ ctl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
+ ctl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 |
+ F_SDH30_AHB_INCR_4;
+ ctl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN);
+ sdhci_writew(host, ctl, F_SDH30_AHB_CONFIG);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static const struct of_device_id mlb_dt_ids[] = {
+ {
+ .compatible = "socionext,milbeaut-m10v-sdhci-3.0",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mlb_dt_ids);
+
+static void sdhci_milbeaut_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ int rate = clk_get_rate(priv->clk);
+ u16 ctl;
+
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ ctl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ ctl &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
+ sdhci_writew(host, ctl, SDHCI_CLOCK_CONTROL);
+
+ sdhci_milbeaut_bridge_reset(host, 1);
+
+ sdhci_milbeaut_bridge_init(host, rate);
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ sdhci_milbeaut_vendor_init(host);
+}
+
+static int sdhci_milbeaut_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, ret = 0;
+ struct f_sdhost_priv *priv;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "%s: no irq specified\n", __func__);
+ return irq;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ priv = sdhci_priv(host);
+ priv->dev = dev;
+
+ host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET |
+ SDHCI_QUIRK_DELAY_AFTER_POWER;
+ host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+ SDHCI_QUIRK2_TUNING_WORK_AROUND |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ host->hw_name = "f_sdh30";
+ host->ops = &sdhci_milbeaut_ops;
+ host->irq = irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ goto err;
+ }
+
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
+
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
+
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
+
+ sdhci_milbeaut_init(host);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(priv->clk);
+err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+err:
+ sdhci_free_host(host);
+ return ret;
+}
+
+static int sdhci_milbeaut_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+
+ sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ clk_disable_unprepare(priv->clk_iface);
+ clk_disable_unprepare(priv->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_milbeaut_driver = {
+ .driver = {
+ .name = "sdhci-milbeaut",
+ .of_match_table = of_match_ptr(mlb_dt_ids),
+ },
+ .probe = sdhci_milbeaut_probe,
+ .remove = sdhci_milbeaut_remove,
+};
+
+module_platform_driver(sdhci_milbeaut_driver);
+
+MODULE_DESCRIPTION("MILBEAUT SD Card Controller driver");
+MODULE_AUTHOR("Takao Orito <orito.takao@socionext.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci-milbeaut");
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7023cbec4017..e49b44b4d82e 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -22,6 +22,7 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
#include "sdhci-pltfm.h"
@@ -32,6 +33,10 @@
#define PHY_CLK_TOO_SLOW_HZ 400000
+/* Default settings for ZynqMP Clock Phases */
+#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}
+#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -72,13 +77,38 @@ struct sdhci_arasan_soc_ctl_map {
};
/**
+ * struct sdhci_arasan_clk_data
+ * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
+ * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @sampleclk_hw: Struct for the clock we might provide to a PHY.
+ * @sampleclk: Pointer to normal 'struct clock' for sampleclk_hw.
+ * @clk_phase_in: Array of Input Clock Phase Delays for all speed modes
+ * @clk_phase_out: Array of Output Clock Phase Delays for all speed modes
+ * @set_clk_delays: Function pointer for setting Clock Delays
+ * @clk_of_data: Platform specific runtime clock data storage pointer
+ */
+struct sdhci_arasan_clk_data {
+ struct clk_hw sdcardclk_hw;
+ struct clk *sdcardclk;
+ struct clk_hw sampleclk_hw;
+ struct clk *sampleclk;
+ int clk_phase_in[MMC_TIMING_MMC_HS400 + 1];
+ int clk_phase_out[MMC_TIMING_MMC_HS400 + 1];
+ void (*set_clk_delays)(struct sdhci_host *host);
+ void *clk_of_data;
+};
+
+struct sdhci_arasan_zynqmp_clk_data {
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+/**
* struct sdhci_arasan_data
* @host: Pointer to the main SDHCI host structure.
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
* @is_phy_on: True if the PHY is on; false if not.
- * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
- * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @clk_data: Struct for the Arasan Controller Clock Data.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
* @soc_ctl_map: Map to get offsets into soc_ctl registers.
*/
@@ -89,8 +119,7 @@ struct sdhci_arasan_data {
bool is_phy_on;
bool has_cqe;
- struct clk_hw sdcardclk_hw;
- struct clk *sdcardclk;
+ struct sdhci_arasan_clk_data clk_data;
struct regmap *soc_ctl_base;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
@@ -120,6 +149,12 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_emmc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x80, .width = 8, .shift = 2 },
+ .clockmultiplier = { .reg = 0, .width = -1, .shift = -1 },
+ .hiword_update = false,
+};
+
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
@@ -174,6 +209,7 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
bool ctrl_phy = false;
if (!IS_ERR(sdhci_arasan->phy)) {
@@ -215,6 +251,10 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_arasan->is_phy_on = false;
}
+ /* Set the Input and Output Clock Phase Delays */
+ if (clk_data->set_clk_delays)
+ clk_data->set_clk_delays(host);
+
sdhci_set_clock(host, clock);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE)
@@ -384,6 +424,11 @@ static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
.pdata = &sdhci_arasan_cqe_pdata,
};
+static struct sdhci_arasan_of_data intel_lgm_sdxc_data = {
+ .soc_ctl_map = &intel_lgm_sdxc_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -489,6 +534,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "intel,lgm-sdhci-5.1-emmc",
.data = &intel_lgm_emmc_data,
},
+ {
+ .compatible = "intel,lgm-sdhci-5.1-sdxc",
+ .data = &intel_lgm_sdxc_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
@@ -502,6 +551,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "arasan,sdhci-4.9a",
.data = &sdhci_arasan_data,
},
+ {
+ .compatible = "xlnx,zynqmp-8.9a",
+ .data = &sdhci_arasan_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -520,8 +573,10 @@ static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
struct sdhci_arasan_data *sdhci_arasan =
- container_of(hw, struct sdhci_arasan_data, sdcardclk_hw);
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
return host->mmc->actual_clock;
@@ -532,6 +587,177 @@ static const struct clk_ops arasan_sdcardclk_ops = {
};
/**
+ * sdhci_arasan_sampleclk_recalc_rate - Return the sampling clock rate
+ *
+ * Return the current actual rate of the sampling clock. This can be used
+ * to communicate with out PHY.
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate The parent rate (should be rate of clk_xin).
+ * Returns the sample clock rate.
+ */
+static unsigned long sdhci_arasan_sampleclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+
+ return host->mmc->actual_clock;
+}
+
+static const struct clk_ops arasan_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+};
+
+/**
+ * sdhci_zynqmp_sdcardclk_set_phase - Set the SD Output Clock Tap Delays
+ *
+ * Set the SD Output Clock Tap Delays for Output path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 30 Taps are available */
+ tap_max = 30;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 15 Taps are available */
+ tap_max = 15;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 8 Taps are available */
+ tap_max = 8;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_OUTPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Output Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sdcardclk_set_phase,
+};
+
+/**
+ * sdhci_zynqmp_sampleclk_set_phase - Set the SD Input Clock Tap Delays
+ *
+ * Set the SD Input Clock Tap Delays for Input path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 120 Taps are available */
+ tap_max = 120;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 60 Taps are available */
+ tap_max = 60;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 30 Taps are available */
+ tap_max = 30;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_INPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Input Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sampleclk_set_phase,
+};
+
+/**
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
*
* The corecfg_clockmultiplier is supposed to contain clock multiplier
@@ -609,39 +835,128 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz);
}
+static void sdhci_arasan_set_clk_delays(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+
+ clk_set_phase(clk_data->sampleclk,
+ clk_data->clk_phase_in[host->timing]);
+ clk_set_phase(clk_data->sdcardclk,
+ clk_data->clk_phase_out[host->timing]);
+}
+
+static void arasan_dt_read_clk_phase(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data,
+ unsigned int timing, const char *prop)
+{
+ struct device_node *np = dev->of_node;
+
+ int clk_phase[2] = {0};
+
+ /*
+ * Read Tap Delay values from DT, if the DT does not contain the
+ * Tap Values then use the pre-defined values.
+ */
+ if (of_property_read_variable_u32_array(np, prop, &clk_phase[0],
+ 2, 0)) {
+ dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n",
+ prop, clk_data->clk_phase_in[timing],
+ clk_data->clk_phase_out[timing]);
+ return;
+ }
+
+ /* The values read are Input and Output Clock Delays in order */
+ clk_data->clk_phase_in[timing] = clk_phase[0];
+ clk_data->clk_phase_out[timing] = clk_phase[1];
+}
+
/**
- * sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use
+ * arasan_dt_parse_clk_phases - Read Clock Delay values from DT
+ *
+ * Called at initialization to parse the values of Clock Delays.
+ *
+ * @dev: Pointer to our struct device.
+ * @clk_data: Pointer to the Clock Data structure
+ */
+static void arasan_dt_parse_clk_phases(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data)
+{
+ int *iclk_phase, *oclk_phase;
+ u32 mio_bank = 0;
+ int i;
+
+ /*
+ * This has been kept as a pointer and is assigned a function here.
+ * So that different controller variants can assign their own handling
+ * function.
+ */
+ clk_data->set_clk_delays = sdhci_arasan_set_clk_delays;
+
+ if (of_device_is_compatible(dev->of_node, "xlnx,zynqmp-8.9a")) {
+ iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_ICLK_PHASE;
+ oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_OCLK_PHASE;
+
+ of_property_read_u32(dev->of_node, "xlnx,mio-bank", &mio_bank);
+ if (mio_bank == 2) {
+ oclk_phase[MMC_TIMING_UHS_SDR104] = 90;
+ oclk_phase[MMC_TIMING_MMC_HS200] = 90;
+ }
+
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ clk_data->clk_phase_in[i] = iclk_phase[i];
+ clk_data->clk_phase_out[i] = oclk_phase[i];
+ }
+ }
+
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY,
+ "clk-phase-legacy");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS,
+ "clk-phase-mmc-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_SD_HS,
+ "clk-phase-sd-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR12,
+ "clk-phase-uhs-sdr12");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR25,
+ "clk-phase-uhs-sdr25");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR50,
+ "clk-phase-uhs-sdr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR104,
+ "clk-phase-uhs-sdr104");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_DDR50,
+ "clk-phase-uhs-ddr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_DDR52,
+ "clk-phase-mmc-ddr52");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS200,
+ "clk-phase-mmc-hs200");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS400,
+ "clk-phase-mmc-hs400");
+}
+
+/**
+ * sdhci_arasan_register_sdcardclk - Register the sdcardclk for a PHY to use
*
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
*
- * Note: without seriously re-architecting SDHCI's clock code and testing on
- * all platforms, there's no way to create a totally beautiful clock here
- * with all clock ops implemented. Instead, we'll just create a clock that can
- * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
- * framework that we're doing things behind its back. This should be sufficient
- * to create nice clean device tree bindings and later (if needed) we can try
- * re-architecting SDHCI if we see some benefit to it.
- *
* @sdhci_arasan: Our private data structure.
* @clk_xin: Pointer to the functional clock
* @dev: Pointer to our struct device.
* Returns 0 on success and error value on error
*/
-static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
- struct clk *clk_xin,
- struct device *dev)
+static int
+sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
struct device_node *np = dev->of_node;
struct clk_init_data sdcardclk_init;
const char *parent_clk_name;
int ret;
- /* Providing a clock to the PHY is optional; no error if missing */
- if (!of_find_property(np, "#clock-cells", NULL))
- return 0;
-
ret = of_property_read_string_index(np, "clock-output-names", 0,
&sdcardclk_init.name);
if (ret) {
@@ -653,17 +968,72 @@ static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
sdcardclk_init.parent_names = &parent_clk_name;
sdcardclk_init.num_parents = 1;
sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
- sdcardclk_init.ops = &arasan_sdcardclk_ops;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sdcardclk_init.ops = &zynqmp_sdcardclk_ops;
+ else
+ sdcardclk_init.ops = &arasan_sdcardclk_ops;
+
+ clk_data->sdcardclk_hw.init = &sdcardclk_init;
+ clk_data->sdcardclk =
+ devm_clk_register(dev, &clk_data->sdcardclk_hw);
+ clk_data->sdcardclk_hw.init = NULL;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get,
+ clk_data->sdcardclk);
+ if (ret)
+ dev_err(dev, "Failed to add sdcard clock provider\n");
+
+ return ret;
+}
+
+/**
+ * sdhci_arasan_register_sampleclk - Register the sampleclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int
+sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data sampleclk_init;
+ const char *parent_clk_name;
+ int ret;
- sdhci_arasan->sdcardclk_hw.init = &sdcardclk_init;
- sdhci_arasan->sdcardclk =
- devm_clk_register(dev, &sdhci_arasan->sdcardclk_hw);
- sdhci_arasan->sdcardclk_hw.init = NULL;
+ ret = of_property_read_string_index(np, "clock-output-names", 1,
+ &sampleclk_init.name);
+ if (ret) {
+ dev_err(dev, "DT has #clock-cells but no clock-output-names\n");
+ return ret;
+ }
+
+ parent_clk_name = __clk_get_name(clk_xin);
+ sampleclk_init.parent_names = &parent_clk_name;
+ sampleclk_init.num_parents = 1;
+ sampleclk_init.flags = CLK_GET_RATE_NOCACHE;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sampleclk_init.ops = &zynqmp_sampleclk_ops;
+ else
+ sampleclk_init.ops = &arasan_sampleclk_ops;
+
+ clk_data->sampleclk_hw.init = &sampleclk_init;
+ clk_data->sampleclk =
+ devm_clk_register(dev, &clk_data->sampleclk_hw);
+ clk_data->sampleclk_hw.init = NULL;
ret = of_clk_add_provider(np, of_clk_src_simple_get,
- sdhci_arasan->sdcardclk);
+ clk_data->sampleclk);
if (ret)
- dev_err(dev, "Failed to add clock provider\n");
+ dev_err(dev, "Failed to add sample clock provider\n");
return ret;
}
@@ -686,6 +1056,54 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+/**
+ * sdhci_arasan_register_sdclk - Register the sdcardclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * Note: without seriously re-architecting SDHCI's clock code and testing on
+ * all platforms, there's no way to create a totally beautiful clock here
+ * with all clock ops implemented. Instead, we'll just create a clock that can
+ * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
+ * framework that we're doing things behind its back. This should be sufficient
+ * to create nice clean device tree bindings and later (if needed) we can try
+ * re-architecting SDHCI if we see some benefit to it.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 num_clks = 0;
+ int ret;
+
+ /* Providing a clock to the PHY is optional; no error if missing */
+ if (of_property_read_u32(np, "#clock-cells", &num_clks) < 0)
+ return 0;
+
+ ret = sdhci_arasan_register_sdcardclk(sdhci_arasan, clk_xin, dev);
+ if (ret)
+ return ret;
+
+ if (num_clks) {
+ ret = sdhci_arasan_register_sampleclk(sdhci_arasan, clk_xin,
+ dev);
+ if (ret) {
+ sdhci_arasan_unregister_sdclk(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
{
struct sdhci_host *host = sdhci_arasan->host;
@@ -814,6 +1232,25 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (ret)
goto clk_disable_all;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data;
+ const struct zynqmp_eemi_ops *eemi_ops;
+
+ zynqmp_clk_data = devm_kzalloc(&pdev->dev,
+ sizeof(*zynqmp_clk_data),
+ GFP_KERNEL);
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops)) {
+ ret = PTR_ERR(eemi_ops);
+ goto unreg_clk;
+ }
+
+ zynqmp_clk_data->eemi_ops = eemi_ops;
+ sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
+ }
+
+ arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
+
ret = mmc_of_parse(host->mmc);
if (ret) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 8962f6664381..56912e30c47e 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -111,7 +111,19 @@ static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE) &&
+ (host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH))
+ val ^= SDHCI_CARD_PRESENT;
+
+ return val;
+}
+
static const struct sdhci_ops aspeed_sdhci_ops = {
+ .read_l = aspeed_sdhci_readl,
.set_clock = aspeed_sdhci_set_clock,
.get_max_clock = aspeed_sdhci_get_max_clock,
.set_bus_width = aspeed_sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index e7d1920729fb..5959e394b416 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -27,6 +27,9 @@
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
+#define SDMMC_CALCR 0x240
+#define SDMMC_CALCR_EN BIT(0)
+#define SDMMC_CALCR_ALWYSON BIT(4)
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
@@ -35,6 +38,7 @@ struct sdhci_at91_priv {
struct clk *gck;
struct clk *mainck;
bool restore_needed;
+ bool cal_always_on;
};
static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
@@ -116,10 +120,17 @@ static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
sdhci_reset(host, mask);
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
sdhci_at91_set_force_card_detect(host);
+
+ if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
+ sdhci_writel(host, SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
+ SDMMC_CALCR);
}
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
@@ -345,6 +356,14 @@ static int sdhci_at91_probe(struct platform_device *pdev)
priv->restore_needed = false;
+ /*
+ * if SDCAL pin is wrongly connected, we must enable
+ * the analog calibration cell permanently.
+ */
+ priv->cal_always_on =
+ device_property_read_bool(&pdev->dev,
+ "microchip,sdcal-inverted");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto clocks_disable_unprepare;
@@ -358,7 +377,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
/* HS200 is broken at this moment */
- host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1d1953dfc54b..5cca3fa4610b 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -77,8 +77,10 @@ struct sdhci_esdhc {
bool quirk_incorrect_hostver;
bool quirk_limited_clk_division;
bool quirk_unreliable_pulse_detection;
- bool quirk_fixup_tuning;
+ bool quirk_tuning_erratum_type1;
+ bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
+ bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
u32 div_ratio;
@@ -408,6 +410,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -416,10 +420,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32be(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32be(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -428,6 +446,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
@@ -560,6 +590,32 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
}
}
+static void esdhc_flush_async_fifo(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u32 val;
+
+ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ val |= ESDHC_FLUSH_ASYNC_FIFO;
+ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
+ ESDHC_FLUSH_ASYNC_FIFO))
+ break;
+ if (timedout) {
+ pr_err("%s: flushing asynchronous FIFO timeout.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -652,9 +708,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
esdhc_clock_enable(host, false);
- temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- temp |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
}
/* Wait max 20 ms */
@@ -796,16 +850,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
}
-static struct soc_device_attribute soc_fixup_tuning[] = {
+static struct soc_device_attribute soc_tuning_erratum_type1[] = {
+ { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ T1040", .revision = "1.0", },
{ .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ LS1021A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { },
+};
+
+static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1012A", .revision = "1.0", },
{ .family = "QorIQ LS1043A", .revision = "1.*", },
{ .family = "QorIQ LS1046A", .revision = "1.0", },
+ { .family = "QorIQ LS1080A", .revision = "1.0", },
+ { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { .family = "QorIQ LA1575A", .revision = "1.0", },
{ },
};
@@ -814,10 +873,7 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
u32 val;
esdhc_clock_enable(host, false);
-
- val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- val |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
val = sdhci_readl(host, ESDHC_TBCTL);
if (enable)
@@ -829,15 +885,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
esdhc_clock_enable(host, true);
}
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 tbstat_15_8, tbstat_7_0;
+ u32 val;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ /* Write TBCTL[11:8]=4'h8 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~(0xf << 8);
+ val |= 8 << 8;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read TBCTL[31:0] register and rewrite again */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read the TBSTAT[31:0] register twice */
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+
+ /* Reset data lines by setting ESDHCCTL[RSTD] */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+ /* Write 32'hFFFF_FFFF to IRQSTAT register */
+ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
+
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
+ * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
+ * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
+ */
+ tbstat_7_0 = val & 0xff;
+ tbstat_15_8 = (val >> 8) & 0xff;
+
+ if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ *window_start = 8 * esdhc->div_ratio;
+ *window_end = 4 * esdhc->div_ratio;
+ } else {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ }
+}
+
+static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
+ u8 window_start, u8 window_end)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+ int ret;
+
+ /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
+ val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
+ ESDHC_WNDW_STRT_PTR_MASK;
+ val |= window_end & ESDHC_WNDW_END_PTR_MASK;
+ sdhci_writel(host, val, ESDHC_TBPTR);
+
+ /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_SW;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ esdhc->in_sw_tuning = true;
+ ret = sdhci_execute_tuning(mmc, opcode);
+ esdhc->in_sw_tuning = false;
+ return ret;
+}
+
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 window_start, window_end;
+ int ret, retries = 1;
bool hs400_tuning;
unsigned int clk;
u32 val;
- int ret;
/* For tuning mode, the sd clock divisor value
* must be larger than 3 according to reference manual.
@@ -846,39 +984,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->clock > clk)
esdhc_of_set_clock(host, clk);
- if (esdhc->quirk_limited_clk_division &&
- host->flags & SDHCI_HS400_TUNING)
- esdhc_of_set_clock(host, host->clock);
-
esdhc_tuning_block_enable(host, true);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
- ret = sdhci_execute_tuning(mmc, opcode);
- if (hs400_tuning) {
- val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
- val |= ESDHC_FLW_CTL_BG;
- sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
- }
+ do {
+ if (esdhc->quirk_limited_clk_division &&
+ hs400_tuning)
+ esdhc_of_set_clock(host, host->clock);
- if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
+ /* Do HW tuning */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_3;
+ sdhci_writel(host, val, ESDHC_TBCTL);
- /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
- * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
- */
- val = sdhci_readl(host, ESDHC_TBPTR);
- val = (val & ~((0x7f << 8) | 0x7f)) |
- (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
- sdhci_writel(host, val, ESDHC_TBPTR);
+ ret = sdhci_execute_tuning(mmc, opcode);
+ if (ret)
+ break;
- /* program the software tuning mode by setting
- * TBCTL[TB_MODE]=2'h3
+ /* If HW tuning fails and triggers erratum,
+ * try workaround.
*/
- val = sdhci_readl(host, ESDHC_TBCTL);
- val |= 0x3;
- sdhci_writel(host, val, ESDHC_TBCTL);
- sdhci_execute_tuning(mmc, opcode);
+ ret = host->tuning_err;
+ if (ret == -EAGAIN &&
+ (esdhc->quirk_tuning_erratum_type1 ||
+ esdhc->quirk_tuning_erratum_type2)) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+ pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
+ mmc_hostname(mmc));
+ /* Do SW tuning */
+ esdhc_prepare_sw_tuning(host, &window_start,
+ &window_end);
+ ret = esdhc_execute_sw_tuning(mmc, opcode,
+ window_start,
+ window_end);
+ if (ret)
+ break;
+
+ /* Retry both HW/SW tuning with reduced clock. */
+ ret = host->tuning_err;
+ if (ret == -EAGAIN && retries) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+
+ clk = host->max_clk / (esdhc->div_ratio + 1);
+ esdhc_of_set_clock(host, clk);
+ pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
+ mmc_hostname(mmc));
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ } while (retries--);
+
+ if (ret) {
+ esdhc_tuning_block_enable(host, false);
+ } else if (hs400_tuning) {
+ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
+ val |= ESDHC_FLW_CTL_BG;
+ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
}
+
return ret;
}
@@ -1114,10 +1286,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
esdhc = sdhci_pltfm_priv(pltfm_host);
- if (soc_device_match(soc_fixup_tuning))
- esdhc->quirk_fixup_tuning = true;
+ if (soc_device_match(soc_tuning_erratum_type1))
+ esdhc->quirk_tuning_erratum_type1 = true;
+ else
+ esdhc->quirk_tuning_erratum_type1 = false;
+
+ if (soc_device_match(soc_tuning_erratum_type2))
+ esdhc->quirk_tuning_erratum_type2 = true;
else
- esdhc->quirk_fixup_tuning = false;
+ esdhc->quirk_tuning_erratum_type2 = false;
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index eaffa85bc728..acefb76b4e15 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -21,6 +21,7 @@
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/slot-gpio.h>
@@ -1590,11 +1591,59 @@ static int amd_probe(struct sdhci_pci_chip *chip)
return 0;
}
+static u32 sdhci_read_present_state(struct sdhci_host *host)
+{
+ return sdhci_readl(host, SDHCI_PRESENT_STATE);
+}
+
+static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 present_state;
+
+ /*
+ * SDHC 0x7906 requires a hard reset to clear all internal state.
+ * Otherwise it can get into a bad state where the DATA lines are always
+ * read as zeros.
+ */
+ if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
+ pci_clear_master(pdev);
+
+ pci_save_state(pdev);
+
+ pci_set_power_state(pdev, PCI_D3cold);
+ pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
+ pdev->current_state);
+ pci_set_power_state(pdev, PCI_D0);
+
+ pci_restore_state(pdev);
+
+ /*
+ * SDHCI_RESET_ALL says the card detect logic should not be
+ * reset, but since we need to reset the entire controller
+ * we should wait until the card detect logic has stabilized.
+ *
+ * This normally takes about 40ms.
+ */
+ readx_poll_timeout(
+ sdhci_read_present_state,
+ host,
+ present_state,
+ present_state & SDHCI_CD_STABLE,
+ 10000,
+ 100000
+ );
+ }
+
+ return sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -1673,6 +1722,8 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
+ SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 558202fe64c6..981bbbe63aff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,8 @@
#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
+#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4
+#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b056400e34b1..3140fe2e5dba 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -337,8 +337,19 @@ static void sdhci_init(struct sdhci_host *host, int soft)
static void sdhci_reinit(struct sdhci_host *host)
{
+ u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
sdhci_init(host, 0);
sdhci_enable_card_detection(host);
+
+ /*
+ * A change to the card detect bits indicates a change in present state,
+ * refer sdhci_set_card_detection(). A card detect interrupt might have
+ * been missed while the host controller was being reset, so trigger a
+ * rescan to check.
+ */
+ if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
static void __sdhci_led_activate(struct sdhci_host *host)
@@ -2202,7 +2213,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
- pr_warn("%s: 3.3V regulator output did not became stable\n",
+ pr_warn("%s: 3.3V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
@@ -2234,7 +2245,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (ctrl & SDHCI_CTRL_VDD_180)
return 0;
- pr_warn("%s: 1.8V regulator output did not became stable\n",
+ pr_warn("%s: 1.8V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index bb90757ecace..b8e897e31e2e 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -12,6 +12,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
+#include "cqhci.h"
#include "sdhci-pltfm.h"
/* CTL_CFG Registers */
@@ -68,6 +69,9 @@
#define CLOCK_TOO_SLOW_HZ 400000
+/* Command Queue Host Controller Interface Base address */
+#define SDHCI_AM654_CQE_BASE_ADDR 0x200
+
static struct regmap_config sdhci_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -259,6 +263,19 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -267,6 +284,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -290,6 +308,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -304,6 +323,40 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.pdata = &sdhci_j721e_4bit_pdata,
.flags = IOMUX_PRESENT,
};
+
+static void sdhci_am654_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static const struct cqhci_host_ops sdhci_am654_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_am654_dumpregs,
+};
+
+static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
+{
+ struct cqhci_host *cq_host;
+ int ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent, sizeof(struct cqhci_host),
+ GFP_KERNEL);
+ if (!cq_host)
+ return -ENOMEM;
+
+ cq_host->mmio = host->ioaddr + SDHCI_AM654_CQE_BASE_ADDR;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->ops = &sdhci_am654_cqhci_ops;
+
+ host->mmc->caps2 |= MMC_CAP2_CQE;
+
+ ret = cqhci_init(cq_host, host->mmc, 1);
+
+ return ret;
+}
+
static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -344,7 +397,23 @@ static int sdhci_am654_init(struct sdhci_host *host)
regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK,
ctl_cfg_2);
- return sdhci_add_host(host);
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ ret = sdhci_am654_cqe_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ return 0;
+
+err_cleanup_host:
+ sdhci_cleanup_host(host);
+ return ret;
}
static int sdhci_am654_get_of_property(struct platform_device *pdev,
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index f8b939e63e02..fa0dfc657c22 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -16,31 +16,7 @@
#include <linux/clk.h>
#include "sdhci-pltfm.h"
-
-/* F_SDH30 extended Controller registers */
-#define F_SDH30_AHB_CONFIG 0x100
-#define F_SDH30_AHB_BIGED 0x00000040
-#define F_SDH30_BUSLOCK_DMA 0x00000020
-#define F_SDH30_BUSLOCK_EN 0x00000010
-#define F_SDH30_SIN 0x00000008
-#define F_SDH30_AHB_INCR_16 0x00000004
-#define F_SDH30_AHB_INCR_8 0x00000002
-#define F_SDH30_AHB_INCR_4 0x00000001
-
-#define F_SDH30_TUNING_SETTING 0x108
-#define F_SDH30_CMD_CHK_DIS 0x00010000
-
-#define F_SDH30_IO_CONTROL2 0x114
-#define F_SDH30_CRES_O_DN 0x00080000
-#define F_SDH30_MSEL_O_1_8 0x00040000
-
-#define F_SDH30_ESD_CONTROL 0x124
-#define F_SDH30_EMMC_RST 0x00000002
-#define F_SDH30_EMMC_HS200 0x01000000
-
-#define F_SDH30_CMD_DAT_DELAY 0x200
-
-#define F_SDH30_MIN_CLOCK 400000
+#include "sdhci_f_sdh30.h"
struct f_sdhost_priv {
struct clk *clk_iface;
diff --git a/drivers/mmc/host/sdhci_f_sdh30.h b/drivers/mmc/host/sdhci_f_sdh30.h
new file mode 100644
index 000000000000..fc1ad28f7ca9
--- /dev/null
+++ b/drivers/mmc/host/sdhci_f_sdh30.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ *
+ */
+
+/* F_SDH30 extended Controller registers */
+#define F_SDH30_AHB_CONFIG 0x100
+#define F_SDH30_AHB_BIGED BIT(6)
+#define F_SDH30_BUSLOCK_DMA BIT(5)
+#define F_SDH30_BUSLOCK_EN BIT(4)
+#define F_SDH30_SIN BIT(3)
+#define F_SDH30_AHB_INCR_16 BIT(2)
+#define F_SDH30_AHB_INCR_8 BIT(1)
+#define F_SDH30_AHB_INCR_4 BIT(0)
+
+#define F_SDH30_TUNING_SETTING 0x108
+#define F_SDH30_CMD_CHK_DIS BIT(16)
+
+#define F_SDH30_IO_CONTROL2 0x114
+#define F_SDH30_CRES_O_DN BIT(19)
+#define F_SDH30_MSEL_O_1_8 BIT(18)
+
+#define F_SDH30_ESD_CONTROL 0x124
+#define F_SDH30_EMMC_RST BIT(1)
+#define F_SDH30_CMD_DAT_DELAY BIT(9)
+#define F_SDH30_EMMC_HS200 BIT(24)
+
+#define F_SDH30_MIN_CLOCK 400000
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 2f0b092d6dcc..c5ba13fae399 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -163,7 +163,6 @@ struct tmio_mmc_host {
unsigned long last_req_ts;
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
- bool runtime_synced;
bool sdio_irq_enabled;
/* Mandatory callback */
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 9b6e1001e77c..c4a1d49fbea4 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -1184,7 +1185,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (ret == -EPROBE_DEFER)
return ret;
- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = pdata->max_segs ? : 32;
mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
@@ -1248,10 +1249,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
+ dev_pm_domain_start(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
ret = mmc_add_host(mmc);
if (ret)
@@ -1333,11 +1336,6 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
- if (!host->runtime_synced) {
- host->runtime_synced = true;
- return 0;
- }
-
tmio_mmc_clk_enable(host);
tmio_mmc_hw_reset(host->mmc);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index a3680c900689..6ced1b7f642f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2070,18 +2070,11 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
-{ /* NOT irq */
- struct vub300_mmc_host *vub300 = mmc_priv(mmc);
- dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
-}
-
static const struct mmc_host_ops vub300_mmc_ops = {
.request = vub300_mmc_request,
.set_ios = vub300_mmc_set_ios,
.get_ro = vub300_mmc_get_ro,
.enable_sdio_irq = vub300_enable_sdio_irq,
- .init_card = vub300_init_card,
};
static int vub300_probe(struct usb_interface *interface,
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 79a53cb8507b..00a79489067c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1353,7 +1353,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
adr += chip->start;
@@ -1383,7 +1383,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, last_end = 0;
int chipnum;
- int ret = 0;
+ int ret;
if (!map->virt)
return -EINVAL;
@@ -1550,7 +1550,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd;
- int ret=0;
+ int ret;
adr += chip->start;
@@ -1624,7 +1624,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -1871,7 +1871,7 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs, vec_seek, i;
size_t len = 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index cf8c8be40a9c..04b383bc3947 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -123,19 +123,23 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}
-static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
- unsigned long adr)
+static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status;
if (!cfi_use_status_reg(cfi))
- return;
+ return 0;
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
status = map_read(map, adr);
+ /* The error bits are invalid while the chip's busy */
+ if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
+ return 0;
+
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned long chipstatus = MERGESTATUS(status);
@@ -151,7 +155,12 @@ static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
if (chipstatus & CFI_SR_SLSB)
pr_err("%s sector write protected, status %lx\n",
map->name, chipstatus);
+
+ /* Erase/Program status bits are set on the operation failure */
+ if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
+ return 1;
}
+ return 0;
}
/* #define DEBUG_CFI_FEATURES */
@@ -785,7 +794,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
- kfree(cfi->cfiq);
return NULL;
}
@@ -848,20 +856,16 @@ static int __xipram chip_good(struct map_info *map, struct flchip *chip,
if (cfi_use_status_reg(cfi)) {
map_word ready = CMD(CFI_SR_DRB);
- map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
+
/*
* For chips that support status register, check device
- * ready bit and Erase/Program status bit to know if
- * operation succeeded.
+ * ready bit
*/
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
curd = map_read(map, addr);
- if (map_word_andequal(map, curd, ready, ready))
- return !map_word_bitsset(map, curd, err);
-
- return 0;
+ return map_word_andequal(map, curd, ready, ready);
}
oldd = map_read(map, addr);
@@ -1699,8 +1703,11 @@ static int __xipram do_write_oneword_once(struct map_info *map,
break;
}
- if (chip_good(map, chip, adr, datum))
+ if (chip_good(map, chip, adr, datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
@@ -1713,7 +1720,7 @@ static int __xipram do_write_oneword_start(struct map_info *map,
struct flchip *chip,
unsigned long adr, int mode)
{
- int ret = 0;
+ int ret;
mutex_lock(&chip->mutex);
@@ -1773,7 +1780,6 @@ static int __xipram do_write_oneword_retry(struct map_info *map,
ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -1791,7 +1797,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum,
int mode)
{
- int ret = 0;
+ int ret;
adr += chip->start;
@@ -1815,7 +1821,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs, chipstart;
DECLARE_WAITQUEUE(wait, current);
@@ -1970,12 +1976,17 @@ static int __xipram do_write_buffer_wait(struct map_info *map,
*/
if (time_after(jiffies, timeo) &&
!chip_good(map, chip, adr, datum)) {
+ pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
+ __func__, adr);
ret = -EIO;
break;
}
- if (chip_good(map, chip, adr, datum))
+ if (chip_good(map, chip, adr, datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
@@ -2014,7 +2025,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
int len)
{
struct cfi_private *cfi = map->fldrv_priv;
- int ret = -EIO;
+ int ret;
unsigned long cmd_adr;
int z, words;
map_word datum;
@@ -2071,12 +2082,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
chip->word_write_time);
ret = do_write_buffer_wait(map, chip, adr, datum);
- if (ret) {
- cfi_check_err_status(map, chip, adr);
+ if (ret)
do_write_buffer_reset(map, chip, cfi);
- pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
- __func__, adr);
- }
xip_enable(map, chip, adr);
@@ -2095,7 +2102,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -2232,7 +2239,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int retry_cnt = 0;
map_word oldd;
- int ret = 0;
+ int ret;
int i;
adr += chip->start;
@@ -2271,9 +2278,9 @@ retry:
udelay(1);
}
- if (!chip_good(map, chip, adr, datum)) {
+ if (!chip_good(map, chip, adr, datum) ||
+ cfi_check_err_status(map, chip, adr)) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -2307,7 +2314,7 @@ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, chipstart;
- int ret = 0;
+ int ret;
int chipnum;
chipnum = to >> cfi->chipshift;
@@ -2411,7 +2418,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
unsigned long timeo = jiffies + HZ;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
+ int ret;
int retry_cnt = 0;
adr = cfi->addr_unlock1;
@@ -2467,8 +2474,11 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map)))
+ if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
@@ -2483,7 +2493,6 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -2508,7 +2517,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
+ int ret;
int retry_cnt = 0;
adr += chip->start;
@@ -2564,8 +2573,11 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map)))
+ if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
@@ -2580,7 +2592,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index e752067526a5..54edae63b92d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -611,7 +611,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -895,7 +895,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
{ struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr, len;
- int chipnum, ret = 0;
+ int chipnum, ret;
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
@@ -1132,7 +1132,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
- int chipnum, ret = 0;
+ int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
@@ -1279,7 +1279,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
- int chipnum, ret = 0;
+ int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e3b266ee06af..e2d4db05aeb3 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -26,7 +26,7 @@
void cfi_udelay(int us)
{
if (us >= 1000) {
- msleep((us+999)/1000);
+ msleep(DIV_ROUND_UP(us, 1000));
} else {
udelay(us);
cond_resched();
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index b20d02b4f830..77c872fd3d83 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -64,15 +64,17 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
- int ret;
+ int ret, cmd_len;
spi_message_init(&message);
+ cmd_len = mchp23k256_cmdsz(flash);
+
command[0] = MCHP23K256_CMD_WRITE;
mchp23k256_addr2cmd(flash, to, command);
transfer[0].tx_buf = command;
- transfer[0].len = mchp23k256_cmdsz(flash);
+ transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].tx_buf = buf;
@@ -88,8 +90,8 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- if (retlen && message.actual_length > sizeof(command))
- *retlen += message.actual_length - sizeof(command);
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
return 0;
}
@@ -101,16 +103,18 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
- int ret;
+ int ret, cmd_len;
spi_message_init(&message);
+ cmd_len = mchp23k256_cmdsz(flash);
+
memset(&transfer, 0, sizeof(transfer));
command[0] = MCHP23K256_CMD_READ;
mchp23k256_addr2cmd(flash, from, command);
transfer[0].tx_buf = command;
- transfer[0].len = mchp23k256_cmdsz(flash);
+ transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].rx_buf = buf;
@@ -126,8 +130,8 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
if (ret)
return ret;
- if (retlen && message.actual_length > sizeof(command))
- *retlen += message.actual_length - sizeof(command);
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
return 0;
}
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 986f81d2f93e..79dcca16481d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -592,6 +592,26 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
+/*
+ * The purpose of this function is to ensure a memcpy_toio() with byte writes
+ * only. Its structure is inspired from the ARM implementation of _memcpy_toio()
+ * which also does single byte writes but cannot be used here as this is just an
+ * implementation detail and not part of the API. Not mentioning the comment
+ * stating that _memcpy_toio() should be optimized.
+ */
+static void spear_smi_memcpy_toio_b(volatile void __iomem *dest,
+ const void *src, size_t len)
+{
+ const unsigned char *from = src;
+
+ while (len) {
+ len--;
+ writeb(*from, dest);
+ from++;
+ dest++;
+ }
+}
+
static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
void __iomem *dest, const void *src, size_t len)
{
@@ -614,7 +634,23 @@ static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
ctrlreg1 = readl(dev->io_base + SMI_CR1);
writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
- memcpy_toio(dest, src, len);
+ /*
+ * In Write Burst mode (WB_MODE), the specs states that writes must be:
+ * - incremental
+ * - of the same size
+ * The ARM implementation of memcpy_toio() will optimize the number of
+ * I/O by using as much 4-byte writes as possible, surrounded by
+ * 2-byte/1-byte access if:
+ * - the destination is not 4-byte aligned
+ * - the length is not a multiple of 4-byte.
+ * Avoid this alternance of write access size by using our own 'byte
+ * access' helper if at least one of the two conditions above is true.
+ */
+ if (IS_ALIGNED(len, sizeof(u32)) &&
+ IS_ALIGNED((uintptr_t)dest, sizeof(u32)))
+ memcpy_toio(dest, src, len);
+ else
+ spear_smi_memcpy_toio_b(dest, src, len);
writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -777,9 +813,6 @@ static int spear_smi_probe_config_dt(struct platform_device *pdev,
/* Fill structs for each subnode (flash device) */
while ((pp = of_get_next_child(np, pp))) {
- struct spear_smi_flash_info *flash_info;
-
- flash_info = &pdata->board_flash_info[i];
pdata->np[i] = pp;
/* Read base-addr and size from DT */
@@ -933,7 +966,6 @@ static int spear_smi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENODEV;
- dev_err(&pdev->dev, "invalid smi irq\n");
goto err;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index f4d1667daaf9..1888523d9745 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -255,7 +255,6 @@ struct stfsm_seq {
struct stfsm {
struct device *dev;
void __iomem *base;
- struct resource *region;
struct mtd_info mtd;
struct mutex lock;
struct flash_info *info;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index bc82305ebb4c..b28225a7c4f3 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -96,6 +96,17 @@ config MTD_PHYSMAP_GEMINI
platforms, some detection and setting up parallel mode on the
external interface.
+config MTD_PHYSMAP_IXP4XX
+ bool "Intel IXP4xx OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on ARM
+ select MTD_COMPLEX_MAPPINGS
+ select MTD_CFI_BE_BYTE_SWAP if CPU_BIG_ENDIAN
+ default ARCH_IXP4XX
+ help
+ This provides some extra DT physmap parsing for the Intel IXP4xx
+ platforms, some elaborate endianness handling in particular.
+
config MTD_PHYSMAP_GPIO_ADDR
bool "GPIO-assisted Flash Chip Support"
depends on MTD_PHYSMAP
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 1146009f41df..c0da86a5d26f 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
physmap-objs-y += physmap-core.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
physmap-objs := $(physmap-objs-y)
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 876f12f40018..0eeadfeb620d 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -86,7 +86,7 @@ static int __init init_l440gx(void)
return -ENOMEM;
}
simple_map_init(&l440gx_map);
- printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
+ pr_debug("window_addr = %p\n", l440gx_map.virt);
/* Setup the pm iobase resource
* This code should move into some kind of generic bridge
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 21b556afc305..a9f7964e2edb 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -41,6 +41,7 @@
#include <linux/gpio/consumer.h>
#include "physmap-gemini.h"
+#include "physmap-ixp4xx.h"
#include "physmap-versatile.h"
struct physmap_flash_info {
@@ -370,6 +371,10 @@ static int physmap_flash_of_init(struct platform_device *dev)
if (err)
return err;
+ err = of_flash_probe_ixp4xx(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
err = of_flash_probe_versatile(dev, dp, &info->maps[i]);
if (err)
return err;
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
new file mode 100644
index 000000000000..6a054229a8a0
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel IXP4xx OF physmap add-on
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the ixp4xx.c map driver, originally written by:
+ * Intel Corporation
+ * Deepak Saxena <dsaxena@mvista.com>
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include "physmap-ixp4xx.h"
+
+/*
+ * Read/write a 16 bit word from flash address 'addr'.
+ *
+ * When the cpu is in little-endian mode it swizzles the address lines
+ * ('address coherency') so we need to undo the swizzling to ensure commands
+ * and the like end up on the correct flash address.
+ *
+ * To further complicate matters, due to the way the expansion bus controller
+ * handles 32 bit reads, the byte stream ABCD is stored on the flash as:
+ * D15 D0
+ * +---+---+
+ * | A | B | 0
+ * +---+---+
+ * | C | D | 2
+ * +---+---+
+ * This means that on LE systems each 16 bit word must be swapped. Note that
+ * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
+ * data and other flash commands which are always in D7-D0.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+
+static inline u16 flash_read16(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
+}
+
+#define BYTE0(h) ((h) & 0xFF)
+#define BYTE1(h) (((h) >> 8) & 0xFF)
+
+#else
+
+static inline u16 flash_read16(const void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(d, addr);
+}
+
+#define BYTE0(h) (((h) >> 8) & 0xFF)
+#define BYTE1(h) ((h) & 0xFF)
+#endif
+
+static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+
+ val.x[0] = flash_read16(map->virt + ofs);
+ return val;
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void ixp4xx_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ u8 *dest = (u8 *) to;
+ void __iomem *src = map->virt + from;
+
+ if (len <= 0)
+ return;
+
+ if (from & 1) {
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
+ --len;
+ }
+
+ while (len >= 2) {
+ u16 data = flash_read16(src);
+ *dest++ = BYTE0(data);
+ *dest++ = BYTE1(data);
+ src += 2;
+ len -= 2;
+ }
+
+ if (len > 0)
+ *dest++ = BYTE0(flash_read16(src));
+}
+
+static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device *dev = &pdev->dev;
+
+ /* Multiplatform guard */
+ if (!of_device_is_compatible(np, "intel,ixp4xx-flash"))
+ return 0;
+
+ map->read = ixp4xx_read16;
+ map->write = ixp4xx_write16;
+ map->copy_from = ixp4xx_copy_from;
+ map->copy_to = NULL;
+
+ dev_info(dev, "initialized Intel IXP4xx-specific physmap control\n");
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
new file mode 100644
index 000000000000..b0fc49b7f3ed
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 975aed94f06c..b841008a9eb7 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -174,7 +174,7 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
break;
case MTD_FILE_MODE_RAW:
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
@@ -268,7 +268,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
case MTD_FILE_MODE_RAW:
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
@@ -350,7 +350,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint32_t __user *retp)
{
struct mtd_file_info *mfi = file->private_data;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
uint32_t retlen;
int ret = 0;
@@ -394,7 +394,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint32_t __user *retp)
{
struct mtd_file_info *mfi = file->private_data;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
int ret = 0;
if (length > 4096)
@@ -587,7 +587,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_write_req req;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
int ret;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6cc7ecb0c788..5fac4355b9c2 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -382,33 +382,21 @@ static struct dentry *dfs_dir_mtd;
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
struct device *dev = &mtd->dev;
- struct dentry *root, *dent;
+ struct dentry *root;
if (IS_ERR_OR_NULL(dfs_dir_mtd))
return;
root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
- if (IS_ERR_OR_NULL(root)) {
- dev_dbg(dev, "won't show data in debugfs\n");
- return;
- }
-
mtd->dbg.dfs_dir = root;
- if (mtd->dbg.partid) {
- dent = debugfs_create_file("partid", 0400, root, mtd,
- &mtd_partid_debug_fops);
- if (IS_ERR_OR_NULL(dent))
- dev_err(dev, "can't create debugfs entry for partid\n");
- }
+ if (mtd->dbg.partid)
+ debugfs_create_file("partid", 0400, root, mtd,
+ &mtd_partid_debug_fops);
- if (mtd->dbg.partname) {
- dent = debugfs_create_file("partname", 0400, root, mtd,
- &mtd_partname_debug_fops);
- if (IS_ERR_OR_NULL(dent))
- dev_err(dev,
- "can't create debugfs entry for partname\n");
- }
+ if (mtd->dbg.partname)
+ debugfs_create_file("partname", 0400, root, mtd,
+ &mtd_partname_debug_fops);
}
#ifndef CONFIG_MMU
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f92414eb4c86..58eefa43af14 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1257,7 +1257,6 @@ DEFINE_SHOW_ATTRIBUTE(mtdswap);
static int mtdswap_add_debugfs(struct mtdswap_dev *d)
{
struct dentry *root = d->mtd->dbg.dfs_dir;
- struct dentry *dent;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -1265,12 +1264,7 @@ static int mtdswap_add_debugfs(struct mtdswap_dev *d)
if (IS_ERR_OR_NULL(root))
return -1;
- dent = debugfs_create_file("mtdswap_stats", S_IRUSR, root, d,
- &mtdswap_fops);
- if (!dent) {
- dev_err(d->dev, "debugfs_create_file failed\n");
- return -1;
- }
+ debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops);
return 0;
}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index e59de3f60cf6..74fb91adeb46 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -450,6 +450,13 @@ config MTD_NAND_PLATFORM
devices. You will need to provide platform-specific functions
via platform_data.
+config MTD_NAND_CADENCE
+ tristate "Support Cadence NAND (HPNFC) controller"
+ depends on OF || COMPILE_TEST
+ help
+ Enable the driver for NAND flash on platforms using a Cadence NAND
+ controller.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index a98721988e61..2d136b158fb7 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 15ef30b368a5..1a66b1cd51c0 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -117,6 +117,18 @@ enum flash_dma_reg {
FLASH_DMA_CURRENT_DESC_EXT,
};
+/* flash_dma registers v0*/
+static const u16 flash_dma_regs_v0[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x04,
+ [FLASH_DMA_CTRL] = 0x08,
+ [FLASH_DMA_MODE] = 0x0c,
+ [FLASH_DMA_STATUS] = 0x10,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x14,
+ [FLASH_DMA_ERROR_STATUS] = 0x18,
+ [FLASH_DMA_CURRENT_DESC] = 0x1c,
+};
+
/* flash_dma registers v1*/
static const u16 flash_dma_regs_v1[] = {
[FLASH_DMA_REVISION] = 0x00,
@@ -597,6 +609,8 @@ static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
/* flash_dma register offsets */
if (ctrl->nand_version >= 0x0703)
ctrl->flash_dma_offsets = flash_dma_regs_v4;
+ else if (ctrl->nand_version == 0x0602)
+ ctrl->flash_dma_offsets = flash_dma_regs_v0;
else
ctrl->flash_dma_offsets = flash_dma_regs_v1;
}
@@ -918,7 +932,7 @@ static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
return;
if (has_flash_dma(ctrl)) {
- ctrl->flash_dma_base = 0;
+ ctrl->flash_dma_base = NULL;
disable_irq(ctrl->dma_irq);
}
@@ -1673,8 +1687,11 @@ static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
- flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
- (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ if (ctrl->nand_version > 0x0602) {
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
+ upper_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ }
/* Start FLASH_DMA engine */
ctrl->dma_pending = true;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
new file mode 100644
index 000000000000..3a36285a8d8a
--- /dev/null
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -0,0 +1,3030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cadence NAND flash controller driver
+ *
+ * Copyright (C) 2019 Cadence
+ *
+ * Author: Piotr Sroka <piotrs@cadence.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+
+/*
+ * HPNFC can work in 3 modes:
+ * - PIO - can work in master or slave DMA
+ * - CDMA - needs Master DMA for accessing command descriptors.
+ * - Generic mode - can use only slave DMA.
+ * CDMA and PIO modes can be used to execute only base commands.
+ * Generic mode can be used to execute any command
+ * on NAND flash memory. Driver uses CDMA mode for
+ * block erasing, page reading, page programing.
+ * Generic mode is used for executing rest of commands.
+ */
+
+#define MAX_OOB_SIZE_PER_SECTOR 32
+#define MAX_ADDRESS_CYC 6
+#define MAX_ERASE_ADDRESS_CYC 3
+#define MAX_DATA_SIZE 0xFFFC
+#define DMA_DATA_SIZE_ALIGN 8
+
+/* Register definition. */
+/*
+ * Command register 0.
+ * Writing data to this register will initiate a new transaction
+ * of the NF controller.
+ */
+#define CMD_REG0 0x0000
+/* Command type field mask. */
+#define CMD_REG0_CT GENMASK(31, 30)
+/* Command type CDMA. */
+#define CMD_REG0_CT_CDMA 0uL
+/* Command type generic. */
+#define CMD_REG0_CT_GEN 3uL
+/* Command thread number field mask. */
+#define CMD_REG0_TN GENMASK(27, 24)
+
+/* Command register 2. */
+#define CMD_REG2 0x0008
+/* Command register 3. */
+#define CMD_REG3 0x000C
+/* Pointer register to select which thread status will be selected. */
+#define CMD_STATUS_PTR 0x0010
+/* Command status register for selected thread. */
+#define CMD_STATUS 0x0014
+
+/* Interrupt status register. */
+#define INTR_STATUS 0x0110
+#define INTR_STATUS_SDMA_ERR BIT(22)
+#define INTR_STATUS_SDMA_TRIGG BIT(21)
+#define INTR_STATUS_UNSUPP_CMD BIT(19)
+#define INTR_STATUS_DDMA_TERR BIT(18)
+#define INTR_STATUS_CDMA_TERR BIT(17)
+#define INTR_STATUS_CDMA_IDL BIT(16)
+
+/* Interrupt enable register. */
+#define INTR_ENABLE 0x0114
+#define INTR_ENABLE_INTR_EN BIT(31)
+#define INTR_ENABLE_SDMA_ERR_EN BIT(22)
+#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
+#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
+#define INTR_ENABLE_DDMA_TERR_EN BIT(18)
+#define INTR_ENABLE_CDMA_TERR_EN BIT(17)
+#define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
+
+/* Controller internal state. */
+#define CTRL_STATUS 0x0118
+#define CTRL_STATUS_INIT_COMP BIT(9)
+#define CTRL_STATUS_CTRL_BUSY BIT(8)
+
+/* Command Engine threads state. */
+#define TRD_STATUS 0x0120
+
+/* Command Engine interrupt thread error status. */
+#define TRD_ERR_INT_STATUS 0x0128
+/* Command Engine interrupt thread error enable. */
+#define TRD_ERR_INT_STATUS_EN 0x0130
+/* Command Engine interrupt thread complete status. */
+#define TRD_COMP_INT_STATUS 0x0138
+
+/*
+ * Transfer config 0 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_0 0x0400
+/* Offset value from the beginning of the page. */
+#define TRAN_CFG_0_OFFSET GENMASK(31, 16)
+/* Numbers of sectors to transfer within singlNF device's page. */
+#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
+
+/*
+ * Transfer config 1 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_1 0x0404
+/* Size of last data sector. */
+#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
+/* Size of not-last data sector. */
+#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
+
+/* ECC engine configuration register 0. */
+#define ECC_CONFIG_0 0x0428
+/* Correction strength. */
+#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
+/* Enable erased pages detection mechanism. */
+#define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
+/* Enable controller ECC check bits generation and correction. */
+#define ECC_CONFIG_0_ECC_EN BIT(0)
+
+/* ECC engine configuration register 1. */
+#define ECC_CONFIG_1 0x042C
+
+/* Multiplane settings register. */
+#define MULTIPLANE_CFG 0x0434
+/* Cache operation settings. */
+#define CACHE_CFG 0x0438
+
+/* DMA settings register. */
+#define DMA_SETINGS 0x043C
+/* Enable SDMA error report on access unprepared slave DMA interface. */
+#define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
+
+/* Transferred data block size for the slave DMA module. */
+#define SDMA_SIZE 0x0440
+
+/* Thread number associated with transferred data block
+ * for the slave DMA module.
+ */
+#define SDMA_TRD_NUM 0x0444
+/* Thread number mask. */
+#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
+
+#define CONTROL_DATA_CTRL 0x0494
+/* Thread number mask. */
+#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
+
+#define CTRL_VERSION 0x800
+#define CTRL_VERSION_REV GENMASK(7, 0)
+
+/* Available hardware features of the controller. */
+#define CTRL_FEATURES 0x804
+/* Support for NV-DDR2/3 work mode. */
+#define CTRL_FEATURES_NVDDR_2_3 BIT(28)
+/* Support for NV-DDR work mode. */
+#define CTRL_FEATURES_NVDDR BIT(27)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_ASYNC BIT(26)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
+/* Slave and Master DMA data width. */
+#define CTRL_FEATURES_DMA_DWITH64 BIT(21)
+/* Availability of Control Data feature.*/
+#define CTRL_FEATURES_CONTROL_DATA BIT(10)
+
+/* BCH Engine identification register 0 - correction strengths. */
+#define BCH_CFG_0 0x838
+#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
+#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
+#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
+#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
+
+/* BCH Engine identification register 1 - correction strengths. */
+#define BCH_CFG_1 0x83C
+#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
+#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
+#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
+#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
+
+/* BCH Engine identification register 2 - sector sizes. */
+#define BCH_CFG_2 0x840
+#define BCH_CFG_2_SECT_0 GENMASK(15, 0)
+#define BCH_CFG_2_SECT_1 GENMASK(31, 16)
+
+/* BCH Engine identification register 3. */
+#define BCH_CFG_3 0x844
+
+/* Ready/Busy# line status. */
+#define RBN_SETINGS 0x1004
+
+/* Common settings. */
+#define COMMON_SET 0x1008
+/* 16 bit device connected to the NAND Flash interface. */
+#define COMMON_SET_DEVICE_16BIT BIT(8)
+
+/* Skip_bytes registers. */
+#define SKIP_BYTES_CONF 0x100C
+#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
+#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
+
+#define SKIP_BYTES_OFFSET 0x1010
+#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
+
+/* Timings configuration. */
+#define ASYNC_TOGGLE_TIMINGS 0x101c
+#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
+#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
+#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
+#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
+
+#define TIMINGS0 0x1024
+#define TIMINGS0_TADL GENMASK(31, 24)
+#define TIMINGS0_TCCS GENMASK(23, 16)
+#define TIMINGS0_TWHR GENMASK(15, 8)
+#define TIMINGS0_TRHW GENMASK(7, 0)
+
+#define TIMINGS1 0x1028
+#define TIMINGS1_TRHZ GENMASK(31, 24)
+#define TIMINGS1_TWB GENMASK(23, 16)
+#define TIMINGS1_TVDLY GENMASK(7, 0)
+
+#define TIMINGS2 0x102c
+#define TIMINGS2_TFEAT GENMASK(25, 16)
+#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
+#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
+
+/* Configuration of the resynchronization of slave DLL of PHY. */
+#define DLL_PHY_CTRL 0x1034
+#define DLL_PHY_CTRL_DLL_RST_N BIT(24)
+#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
+#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
+#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
+#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
+
+/* Register controlling DQ related timing. */
+#define PHY_DQ_TIMING 0x2000
+/* Register controlling DSQ related timing. */
+#define PHY_DQS_TIMING 0x2004
+#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
+#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
+#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
+
+/* Register controlling the gate and loopback control related timing. */
+#define PHY_GATE_LPBK_CTRL 0x2008
+#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
+
+/* Register holds the control for the master DLL logic. */
+#define PHY_DLL_MASTER_CTRL 0x200C
+#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
+
+/* Register holds the control for the slave DLL logic. */
+#define PHY_DLL_SLAVE_CTRL 0x2010
+
+/* This register handles the global control settings for the PHY. */
+#define PHY_CTRL 0x2080
+#define PHY_CTRL_SDR_DQS BIT(14)
+#define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
+
+/*
+ * This register handles the global control settings
+ * for the termination selects for reads.
+ */
+#define PHY_TSEL 0x2084
+
+/* Generic command layout. */
+#define GCMD_LAY_CS GENMASK_ULL(11, 8)
+/*
+ * This bit informs the minicotroller if it has to wait for tWB
+ * after sending the last CMD/ADDR/DATA in the sequence.
+ */
+#define GCMD_LAY_TWB BIT_ULL(6)
+/* Type of generic instruction. */
+#define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
+
+/* Generic CMD sequence type. */
+#define GCMD_LAY_INSTR_CMD 0
+/* Generic ADDR sequence type. */
+#define GCMD_LAY_INSTR_ADDR 1
+/* Generic data transfer sequence type. */
+#define GCMD_LAY_INSTR_DATA 2
+
+/* Input part of generic command type of input is command. */
+#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
+
+/* Generic command address sequence - address fields. */
+#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
+/* Generic command address sequence - address size. */
+#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
+
+/* Transfer direction field of generic command data sequence. */
+#define GCMD_DIR BIT_ULL(11)
+/* Read transfer direction of generic command data sequence. */
+#define GCMD_DIR_READ 0
+/* Write transfer direction of generic command data sequence. */
+#define GCMD_DIR_WRITE 1
+
+/* ECC enabled flag of generic command data sequence - ECC enabled. */
+#define GCMD_ECC_EN BIT_ULL(12)
+/* Generic command data sequence - sector size. */
+#define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
+/* Generic command data sequence - sector count. */
+#define GCMD_SECT_CNT GENMASK_ULL(39, 32)
+/* Generic command data sequence - last sector size. */
+#define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
+
+/* CDMA descriptor fields. */
+/* Erase command type of CDMA descriptor. */
+#define CDMA_CT_ERASE 0x1000
+/* Program page command type of CDMA descriptor. */
+#define CDMA_CT_WR 0x2100
+/* Read page command type of CDMA descriptor. */
+#define CDMA_CT_RD 0x2200
+
+/* Flash pointer memory shift. */
+#define CDMA_CFPTR_MEM_SHIFT 24
+/* Flash pointer memory mask. */
+#define CDMA_CFPTR_MEM GENMASK(26, 24)
+
+/*
+ * Command DMA descriptor flags. If set causes issue interrupt after
+ * the completion of descriptor processing.
+ */
+#define CDMA_CF_INT BIT(8)
+/*
+ * Command DMA descriptor flags - the next descriptor
+ * address field is valid and descriptor processing should continue.
+ */
+#define CDMA_CF_CONT BIT(9)
+/* DMA master flag of command DMA descriptor. */
+#define CDMA_CF_DMA_MASTER BIT(10)
+
+/* Operation complete status of command descriptor. */
+#define CDMA_CS_COMP BIT(15)
+/* Operation complete status of command descriptor. */
+/* Command descriptor status - operation fail. */
+#define CDMA_CS_FAIL BIT(14)
+/* Command descriptor status - page erased. */
+#define CDMA_CS_ERP BIT(11)
+/* Command descriptor status - timeout occurred. */
+#define CDMA_CS_TOUT BIT(10)
+/*
+ * Maximum amount of correction applied to one ECC sector.
+ * It is part of command descriptor status.
+ */
+#define CDMA_CS_MAXERR GENMASK(9, 2)
+/* Command descriptor status - uncorrectable ECC error. */
+#define CDMA_CS_UNCE BIT(1)
+/* Command descriptor status - descriptor error. */
+#define CDMA_CS_ERR BIT(0)
+
+/* Status of operation - OK. */
+#define STAT_OK 0
+/* Status of operation - FAIL. */
+#define STAT_FAIL 2
+/* Status of operation - uncorrectable ECC error. */
+#define STAT_ECC_UNCORR 3
+/* Status of operation - page erased. */
+#define STAT_ERASED 5
+/* Status of operation - correctable ECC error. */
+#define STAT_ECC_CORR 6
+/* Status of operation - unsuspected state. */
+#define STAT_UNKNOWN 7
+/* Status of operation - operation is not completed yet. */
+#define STAT_BUSY 0xFF
+
+#define BCH_MAX_NUM_CORR_CAPS 8
+#define BCH_MAX_NUM_SECTOR_SIZES 2
+
+struct cadence_nand_timings {
+ u32 async_toggle_timings;
+ u32 timings0;
+ u32 timings1;
+ u32 timings2;
+ u32 dll_phy_ctrl;
+ u32 phy_ctrl;
+ u32 phy_dqs_timing;
+ u32 phy_gate_lpbk_ctrl;
+};
+
+/* Command DMA descriptor. */
+struct cadence_nand_cdma_desc {
+ /* Next descriptor address. */
+ u64 next_pointer;
+
+ /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
+ u32 flash_pointer;
+ /*field appears in HPNFC version 13*/
+ u16 bank;
+ u16 rsvd0;
+
+ /* Operation the controller needs to perform. */
+ u16 command_type;
+ u16 rsvd1;
+ /* Flags for operation of this command. */
+ u16 command_flags;
+ u16 rsvd2;
+
+ /* System/host memory address required for data DMA commands. */
+ u64 memory_pointer;
+
+ /* Status of operation. */
+ u32 status;
+ u32 rsvd3;
+
+ /* Address pointer to sync buffer location. */
+ u64 sync_flag_pointer;
+
+ /* Controls the buffer sync mechanism. */
+ u32 sync_arguments;
+ u32 rsvd4;
+
+ /* Control data pointer. */
+ u64 ctrl_data_ptr;
+};
+
+/* Interrupt status. */
+struct cadence_nand_irq_status {
+ /* Thread operation complete status. */
+ u32 trd_status;
+ /* Thread operation error. */
+ u32 trd_error;
+ /* Controller status. */
+ u32 status;
+};
+
+/* Cadence NAND flash controller capabilities get from driver data. */
+struct cadence_nand_dt_devdata {
+ /* Skew value of the output signals of the NAND Flash interface. */
+ u32 if_skew;
+ /* It informs if slave DMA interface is connected to DMA engine. */
+ unsigned int has_dma:1;
+};
+
+/* Cadence NAND flash controller capabilities read from registers. */
+struct cdns_nand_caps {
+ /* Maximum number of banks supported by hardware. */
+ u8 max_banks;
+ /* Slave and Master DMA data width in bytes (4 or 8). */
+ u8 data_dma_width;
+ /* Control Data feature supported. */
+ bool data_control_supp;
+ /* Is PHY type DLL. */
+ bool is_phy_type_dll;
+};
+
+struct cdns_nand_ctrl {
+ struct device *dev;
+ struct nand_controller controller;
+ struct cadence_nand_cdma_desc *cdma_desc;
+ /* IP capability. */
+ const struct cadence_nand_dt_devdata *caps1;
+ struct cdns_nand_caps caps2;
+ u8 ctrl_rev;
+ dma_addr_t dma_cdma_desc;
+ u8 *buf;
+ u32 buf_size;
+ u8 curr_corr_str_idx;
+
+ /* Register interface. */
+ void __iomem *reg;
+
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
+ } io;
+
+ int irq;
+ /* Interrupts that have happened. */
+ struct cadence_nand_irq_status irq_status;
+ /* Interrupts we are waiting for. */
+ struct cadence_nand_irq_status irq_mask;
+ struct completion complete;
+ /* Protect irq_mask and irq_status. */
+ spinlock_t irq_lock;
+
+ int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
+ struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
+ struct nand_ecc_caps ecc_caps;
+
+ int curr_trans_type;
+
+ struct dma_chan *dmac;
+
+ u32 nf_clk_rate;
+ /*
+ * Estimated Board delay. The value includes the total
+ * round trip delay for the signals and is used for deciding on values
+ * associated with data read capture.
+ */
+ u32 board_delay;
+
+ struct nand_chip *selected_chip;
+
+ unsigned long assigned_cs;
+ struct list_head chips;
+};
+
+struct cdns_nand_chip {
+ struct cadence_nand_timings timings;
+ struct nand_chip chip;
+ u8 nsels;
+ struct list_head node;
+
+ /*
+ * part of oob area of NAND flash memory page.
+ * This part is available for user to read or write.
+ */
+ u32 avail_oob_size;
+
+ /* Sector size. There are few sectors per mtd->writesize */
+ u32 sector_size;
+ u32 sector_count;
+
+ /* Offset of BBM. */
+ u8 bbm_offs;
+ /* Number of bytes reserved for BBM. */
+ u8 bbm_len;
+ /* ECC strength index. */
+ u8 corr_str_idx;
+
+ u8 cs[];
+};
+
+struct ecc_info {
+ int (*calc_ecc_bytes)(int step_size, int strength);
+ int max_step_size;
+};
+
+static inline struct
+cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct cdns_nand_chip, chip);
+}
+
+static inline struct
+cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
+{
+ return container_of(controller, struct cdns_nand_ctrl, controller);
+}
+
+static bool
+cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
+ u32 buf_len)
+{
+ u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+ return buf && virt_addr_valid(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
+ likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
+}
+
+static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 reg_offset, u32 timeout_us,
+ u32 mask, bool is_clear)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
+ val, !(val & mask) == is_clear,
+ 10, timeout_us);
+
+ if (ret < 0) {
+ dev_err(cdns_ctrl->dev,
+ "Timeout while waiting for reg %x with mask %x is clear %d\n",
+ reg_offset, mask, is_clear);
+ }
+
+ return ret;
+}
+
+static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ECC_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ECC_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ return 0;
+}
+
+static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 corr_str_idx)
+{
+ u32 reg;
+
+ if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
+ return;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+ reg &= ~ECC_CONFIG_0_CORR_STR;
+ reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ cdns_ctrl->curr_corr_str_idx = corr_str_idx;
+}
+
+static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 strength)
+{
+ int i, corr_str_idx = -1;
+
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] == strength) {
+ corr_str_idx = i;
+ break;
+ }
+ }
+
+ return corr_str_idx;
+}
+
+static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
+ u16 marker_value)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_MARKER_VALUE;
+ reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
+ marker_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+
+ return 0;
+}
+
+static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 num_of_bytes,
+ u32 offset_value,
+ int enable)
+{
+ u32 reg, skip_bytes_offset;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ if (!enable) {
+ num_of_bytes = 0;
+ offset_value = 0;
+ }
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_NUM_OF_BYTES;
+ reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
+ num_of_bytes);
+ skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
+ offset_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+ writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
+
+ return 0;
+}
+
+/* Functions enables/disables hardware detection of erased data */
+static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable,
+ u8 bitflips_threshold)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ERASE_DET_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+ writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
+}
+
+static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
+ bool bit_bus16)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
+
+ if (!bit_bus16)
+ reg &= ~COMMON_SET_DEVICE_16BIT;
+ else
+ reg |= COMMON_SET_DEVICE_16BIT;
+ writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
+
+ return 0;
+}
+
+static void
+cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
+ writel_relaxed(irq_status->trd_status,
+ cdns_ctrl->reg + TRD_COMP_INT_STATUS);
+ writel_relaxed(irq_status->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS);
+}
+
+static void
+cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
+ irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
+ + TRD_COMP_INT_STATUS);
+ irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
+ + TRD_ERR_INT_STATUS);
+}
+
+static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ cadence_nand_read_int_status(cdns_ctrl, irq_status);
+
+ return irq_status->status || irq_status->trd_status ||
+ irq_status->trd_error;
+}
+
+static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
+ memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
+ memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
+ spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device.
+ */
+static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = dev_id;
+ struct cadence_nand_irq_status irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&cdns_ctrl->irq_lock);
+
+ if (irq_detected(cdns_ctrl, &irq_status)) {
+ /* Handle interrupt. */
+ /* First acknowledge it. */
+ cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
+ /* Status in the device context for someone to read. */
+ cdns_ctrl->irq_status.status |= irq_status.status;
+ cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
+ cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
+ /* Notify anyone who cares that it happened. */
+ complete(&cdns_ctrl->complete);
+ /* Tell the OS that we've handled this. */
+ result = IRQ_HANDLED;
+ }
+ spin_unlock(&cdns_ctrl->irq_lock);
+
+ return result;
+}
+
+static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask)
+{
+ writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
+ cdns_ctrl->reg + INTR_ENABLE);
+
+ writel_relaxed(irq_mask->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
+}
+
+static void
+cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask,
+ struct cadence_nand_irq_status *irq_status)
+{
+ unsigned long timeout = msecs_to_jiffies(10000);
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
+ timeout);
+
+ *irq_status = cdns_ctrl->irq_status;
+ if (time_left == 0) {
+ /* Timeout error. */
+ dev_err(cdns_ctrl->dev, "timeout occurred:\n");
+ dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
+ irq_status->status, irq_mask->status);
+ dev_err(cdns_ctrl->dev,
+ "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
+ irq_status->trd_status, irq_mask->trd_status);
+ dev_err(cdns_ctrl->dev,
+ "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
+ irq_status->trd_error, irq_mask->trd_error);
+ }
+}
+
+/* Execute generic command on NAND controller. */
+static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 chip_nr,
+ u64 mini_ctrl_cmd)
+{
+ u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
+ mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
+ mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select generic command. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, 0);
+
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Wait for data on slave DMA interface. */
+static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *out_sdma_trd,
+ u32 *out_sdma_size)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status;
+
+ irq_mask.trd_status = 0;
+ irq_mask.trd_error = 0;
+ irq_mask.status = INTR_STATUS_SDMA_TRIGG
+ | INTR_STATUS_SDMA_ERR
+ | INTR_STATUS_UNSUPP_CMD;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+ if (irq_status.status == 0) {
+ dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
+ return -ETIMEDOUT;
+ }
+
+ if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
+ *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
+ *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
+ *out_sdma_trd =
+ FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
+ } else {
+ dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
+ irq_status.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
+
+ cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
+
+ if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
+ cdns_ctrl->caps2.data_dma_width = 8;
+ else
+ cdns_ctrl->caps2.data_dma_width = 4;
+
+ if (reg & CTRL_FEATURES_CONTROL_DATA)
+ cdns_ctrl->caps2.data_control_supp = true;
+
+ if (reg & (CTRL_FEATURES_NVDDR_2_3
+ | CTRL_FEATURES_NVDDR))
+ cdns_ctrl->caps2.is_phy_type_dll = true;
+}
+
+/* Prepare CDMA descriptor. */
+static void
+cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
+ char nf_mem, u32 flash_ptr, char *mem_ptr,
+ char *ctrl_data_ptr, u16 ctype)
+{
+ struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
+
+ memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
+
+ /* Set fields for one descriptor. */
+ cdma_desc->flash_pointer = flash_ptr;
+ if (cdns_ctrl->ctrl_rev >= 13)
+ cdma_desc->bank = nf_mem;
+ else
+ cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
+
+ cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
+ cdma_desc->command_flags |= CDMA_CF_INT;
+
+ cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+ cdma_desc->status = 0;
+ cdma_desc->sync_flag_pointer = 0;
+ cdma_desc->sync_arguments = 0;
+
+ cdma_desc->command_type = ctype;
+ cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+}
+
+static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 desc_status)
+{
+ if (desc_status & CDMA_CS_ERP)
+ return STAT_ERASED;
+
+ if (desc_status & CDMA_CS_UNCE)
+ return STAT_ECC_UNCORR;
+
+ if (desc_status & CDMA_CS_ERR) {
+ dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
+ return STAT_FAIL;
+ }
+
+ if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
+ return STAT_ECC_CORR;
+
+ return STAT_FAIL;
+}
+
+static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
+ u8 status = STAT_BUSY;
+
+ if (desc_ptr->status & CDMA_CS_FAIL) {
+ status = cadence_nand_check_desc_error(cdns_ctrl,
+ desc_ptr->status);
+ dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
+ } else if (desc_ptr->status & CDMA_CS_COMP) {
+ /* Descriptor finished with no errors. */
+ if (desc_ptr->command_flags & CDMA_CF_CONT) {
+ dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
+ status = STAT_UNKNOWN;
+ } else {
+ /* Last descriptor. */
+ status = STAT_OK;
+ }
+ }
+
+ return status;
+}
+
+static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ u32 reg;
+ int status;
+
+ /* Wait for thread ready. */
+ status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
+ 1000000,
+ BIT(thread), true);
+ if (status)
+ return status;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
+ cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select CDMA mode. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, thread);
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Send SDMA command and wait for finish. */
+static u32
+cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status = {0};
+ int status;
+
+ irq_mask.trd_status = BIT(thread);
+ irq_mask.trd_error = BIT(thread);
+ irq_mask.status = INTR_STATUS_CDMA_TERR;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+
+ status = cadence_nand_cdma_send(cdns_ctrl, thread);
+ if (status)
+ return status;
+
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+
+ if (irq_status.status == 0 && irq_status.trd_status == 0 &&
+ irq_status.trd_error == 0) {
+ dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
+ return -ETIMEDOUT;
+ }
+ if (irq_status.status & irq_mask.status) {
+ dev_err(cdns_ctrl->dev, "CDMA command failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * ECC size depends on configured ECC strength and on maximum supported
+ * ECC step size.
+ */
+static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
+{
+ int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
+
+ return ALIGN(nbytes, 2);
+}
+
+#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
+ static int \
+ cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
+ int strength)\
+ {\
+ return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
+ }
+
+CADENCE_NAND_CALC_ECC_BYTES(256)
+CADENCE_NAND_CALC_ECC_BYTES(512)
+CADENCE_NAND_CALC_ECC_BYTES(1024)
+CADENCE_NAND_CALC_ECC_BYTES(2048)
+CADENCE_NAND_CALC_ECC_BYTES(4096)
+
+/* Function reads BCH capabilities. */
+static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
+ int max_step_size = 0, nstrengths, i;
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
+ cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
+ cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
+ cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
+ cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
+ cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
+ cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
+ cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
+ cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
+ cdns_ctrl->ecc_stepinfos[0].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_0, reg);
+
+ cdns_ctrl->ecc_stepinfos[1].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_1, reg);
+
+ nstrengths = 0;
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] != 0)
+ nstrengths++;
+ }
+
+ ecc_caps->nstepinfos = 0;
+ for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
+ /* ECC strengths are common for all step infos. */
+ cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
+ cdns_ctrl->ecc_stepinfos[i].strengths =
+ cdns_ctrl->ecc_strengths;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
+ ecc_caps->nstepinfos++;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
+ max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
+ }
+ ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
+
+ switch (max_step_size) {
+ case 256:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
+ break;
+ case 512:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
+ break;
+ case 1024:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
+ break;
+ case 2048:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
+ break;
+ case 4096:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
+ break;
+ default:
+ dev_err(cdns_ctrl->dev,
+ "Unsupported sector size(ecc step size) %d\n",
+ max_step_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization. */
+static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ int status;
+ u32 reg;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_INIT_COMP, false);
+ if (status)
+ return status;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
+ cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
+
+ dev_info(cdns_ctrl->dev,
+ "%s: cadence nand controller version reg %x\n",
+ __func__, reg);
+
+ /* Disable cache and multiplane. */
+ writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
+ writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
+
+ /* Clear all interrupts. */
+ writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
+
+ cadence_nand_get_caps(cdns_ctrl);
+ cadence_nand_read_bch_caps(cdns_ctrl);
+
+ /*
+ * Set IO width access to 8.
+ * It is because during SW device discovering width access
+ * is expected to be 8.
+ */
+ status = cadence_nand_set_access_width16(cdns_ctrl, false);
+
+ return status;
+}
+
+#define TT_MAIN_OOB_AREAS 2
+#define TT_RAW_PAGE 3
+#define TT_BBM 4
+#define TT_MAIN_OOB_AREA_EXT 5
+
+/* Prepare size of data to transfer. */
+static void
+cadence_nand_prepare_data_size(struct nand_chip *chip,
+ int transfer_type)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 sec_size = 0, offset = 0, sec_cnt = 1;
+ u32 last_sec_size = cdns_chip->sector_size;
+ u32 data_ctrl_size = 0;
+ u32 reg = 0;
+
+ if (cdns_ctrl->curr_trans_type == transfer_type)
+ return;
+
+ switch (transfer_type) {
+ case TT_MAIN_OOB_AREA_EXT:
+ sec_cnt = cdns_chip->sector_count;
+ sec_size = cdns_chip->sector_size;
+ data_ctrl_size = cdns_chip->avail_oob_size;
+ break;
+ case TT_MAIN_OOB_AREAS:
+ sec_cnt = cdns_chip->sector_count;
+ last_sec_size = cdns_chip->sector_size
+ + cdns_chip->avail_oob_size;
+ sec_size = cdns_chip->sector_size;
+ break;
+ case TT_RAW_PAGE:
+ last_sec_size = mtd->writesize + mtd->oobsize;
+ break;
+ case TT_BBM:
+ offset = mtd->writesize + cdns_chip->bbm_offs;
+ last_sec_size = 8;
+ break;
+ }
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
+ reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
+ reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
+
+ if (cdns_ctrl->caps2.data_control_supp) {
+ reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ reg &= ~CONTROL_DATA_CTRL_SIZE;
+ reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
+ writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ }
+
+ cdns_ctrl->curr_trans_type = transfer_type;
+}
+
+static int
+cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
+ int page, void *buf, void *ctrl_dat, u32 buf_size,
+ u32 ctrl_dat_size, enum dma_data_direction dir,
+ bool with_ecc)
+{
+ dma_addr_t dma_buf, dma_ctrl_dat = 0;
+ u8 thread_nr = chip_nr;
+ int status;
+ u16 ctype;
+
+ if (dir == DMA_FROM_DEVICE)
+ ctype = CDMA_CT_RD;
+ else
+ ctype = CDMA_CT_WR;
+
+ cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
+
+ dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+
+ if (ctrl_dat && ctrl_dat_size) {
+ dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
+ ctrl_dat_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+ }
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
+ (void *)dma_buf, (void *)dma_ctrl_dat,
+ ctype);
+
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+
+ if (ctrl_dat && ctrl_dat_size)
+ dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
+ ctrl_dat_size, dir);
+ if (status)
+ return status;
+
+ return cadence_nand_cdma_finish(cdns_ctrl);
+}
+
+static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_timings *t)
+{
+ writel_relaxed(t->async_toggle_timings,
+ cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
+ writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
+ writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
+ writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
+
+ writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
+ writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
+ writel_relaxed(t->phy_dqs_timing,
+ cdns_ctrl->reg + PHY_DQS_TIMING);
+ writel_relaxed(t->phy_gate_lpbk_ctrl,
+ cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
+ writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
+ cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
+ writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
+ }
+}
+
+static int cadence_nand_select_target(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (chip == cdns_ctrl->selected_chip)
+ return 0;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ cdns_ctrl->curr_trans_type = -1;
+ cdns_ctrl->selected_chip = chip;
+
+ return 0;
+}
+
+static int cadence_nand_erase(struct nand_chip *chip, u32 page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ int status;
+ u8 thread_nr = cdns_chip->cs[chip->cur_cs];
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, NULL, NULL,
+ CDMA_CT_ERASE);
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "erase operation failed\n");
+ return -EIO;
+ }
+
+ status = cadence_nand_cdma_finish(cdns_ctrl);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
+{
+ int status;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ cadence_nand_prepare_data_size(chip, TT_BBM);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /*
+ * Read only bad block marker from offset
+ * defined by a memory manufacturer.
+ */
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "read BBM failed\n");
+ return -EIO;
+ }
+
+ memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
+
+ return 0;
+}
+
+static int cadence_nand_write_page(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+ u16 marker_val = 0xFFFF;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs,
+ 1);
+
+ if (oob_required) {
+ marker_val = *(u16 *)(chip->oob_poi
+ + cdns_chip->bbm_offs);
+ } else {
+ /* Set oob data to 0xFF. */
+ memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
+ cdns_chip->avail_oob_size);
+ }
+
+ cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, (void *)buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_TO_DEVICE, true);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "write page failed\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ if (oob_required) {
+ /* Transfer the data to the oob area. */
+ memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
+ cdns_chip->avail_oob_size);
+ }
+
+ memcpy(cdns_ctrl->buf, buf, mtd->writesize);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_TO_DEVICE, true);
+}
+
+static int cadence_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
+
+ return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int oob_skip = cdns_chip->bbm_len;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ /*
+ * Fill the buffer with 0xff first except the full page transfer.
+ * This simplifies the logic.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, size);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, buf, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(tmp_buf + writesize + oob_skip, buf,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ const u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(tmp_buf + writesize, oob, oob_skip);
+
+ /* OOB free. */
+ memcpy(tmp_buf + oob_data_offset, oob,
+ cdns_chip->avail_oob_size);
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC. */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, oob, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(tmp_buf + writesize + oob_skip, oob,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize +
+ mtd->oobsize,
+ 0, DMA_TO_DEVICE, false);
+}
+
+static int cadence_nand_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_write_page_raw(chip, NULL, true, page);
+}
+
+static int cadence_nand_read_page(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status = 0;
+ int ecc_err_count = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs, 1);
+
+ /*
+ * If data buffer can be accessed by DMA and data_control feature
+ * is supported then transfer data and oob directly.
+ */
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_FROM_DEVICE, true);
+ /* Otherwise use bounce buffer. */
+ } else {
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf,
+ NULL, mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_FROM_DEVICE, true);
+
+ memcpy(buf, cdns_ctrl->buf, mtd->writesize);
+ if (oob_required)
+ memcpy(chip->oob_poi,
+ cdns_ctrl->buf + mtd->writesize,
+ mtd->oobsize);
+ }
+
+ switch (status) {
+ case STAT_ECC_UNCORR:
+ mtd->ecc_stats.failed++;
+ ecc_err_count++;
+ break;
+ case STAT_ECC_CORR:
+ ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
+ cdns_ctrl->cdma_desc->status);
+ mtd->ecc_stats.corrected += ecc_err_count;
+ break;
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read page failed\n");
+ return -EIO;
+ }
+
+ if (oob_required)
+ if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
+ return -EIO;
+
+ return ecc_err_count;
+}
+
+/* Reads OOB data from the device. */
+static int cadence_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+
+ return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int oob_skip = cdns_chip->bbm_len;
+ int writesize = mtd->writesize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+
+ switch (status) {
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read raw page failed\n");
+ return -EIO;
+ }
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(buf, tmp_buf + pos, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(buf, tmp_buf + writesize + oob_skip,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* OOB free. */
+ memcpy(oob, tmp_buf + oob_data_offset,
+ cdns_chip->avail_oob_size);
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(oob, tmp_buf + writesize, oob_skip);
+
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(oob, tmp_buf + pos, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(oob, tmp_buf + writesize + oob_skip,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cadence_nand_read_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_read_page_raw(chip, NULL, true, page);
+}
+
+static void cadence_nand_slave_dma_transfer_finished(void *data)
+{
+ struct completion *finished = data;
+
+ complete(finished);
+}
+
+static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ void *buf,
+ dma_addr_t dev_dma, size_t len,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(finished);
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ dma_addr_t src_dma, dst_dma, buf_dma;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ chan = cdns_ctrl->dmac;
+ dma_dev = chan->device;
+
+ buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
+ if (dma_mapping_error(dma_dev->dev, buf_dma)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ goto err;
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+ src_dma = cdns_ctrl->io.dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+ dst_dma = cdns_ctrl->io.dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
+ goto err_unmap;
+ }
+
+ tx->callback = cadence_nand_slave_dma_transfer_finished;
+ tx->callback_param = &finished;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cdns_ctrl->dmac);
+ wait_for_completion(&finished);
+
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+err:
+ dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+
+ return -EIO;
+}
+
+static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ /* read alingment data */
+ ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* read rest data from slave DMA interface if any */
+ ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ /* copy rest of data */
+ memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
+ len - (len_in_words << 2));
+ }
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
+ cdns_ctrl->io.dma,
+ len, DMA_FROM_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_FROM_DEVICE);
+
+ if (status) {
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+ return status;
+ }
+
+ memcpy(buf, cdns_ctrl->buf, len);
+
+ return 0;
+}
+
+static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ const u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* copy rest of data */
+ memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
+ len - (len_in_words << 2));
+ /* write all expected by nand controller data */
+ iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ }
+
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
+ cdns_ctrl->io.dma,
+ len, DMA_TO_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ memcpy(cdns_ctrl->buf, buf, len);
+
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_TO_DEVICE);
+
+ if (status)
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+
+ return status;
+}
+
+static int cadence_nand_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ int status;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return 0;
+
+ status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
+
+ return status;
+}
+
+static int cadence_nand_cmd_opcode(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_CMD);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
+ instr->ctx.cmd.opcode);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
+ instr->ctx.cmd.opcode);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_address(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ unsigned int offset, naddrs;
+ u64 address = 0;
+ const u8 *addrs;
+ int ret;
+ int i;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_ADDR);
+
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ address |= (u64)addrs[i] << (8 * i);
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
+ address);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
+ naddrs - 1);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_erase(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ unsigned int op_id;
+
+ if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
+ int i;
+ const struct nand_op_instr *instr = NULL;
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ u32 page = 0;
+
+ instr = &subop->instrs[1];
+ offset = nand_subop_get_addr_start_off(subop, 1);
+ naddrs = nand_subop_get_num_addr_cyc(subop, 1);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ page |= (u32)addrs[i] << (8 * i);
+
+ return cadence_nand_erase(chip, page);
+ }
+
+ /*
+ * If it is not an erase operation then handle operation
+ * by calling exec_op function.
+ */
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ int ret;
+ const struct nand_operation nand_op = {
+ .cs = chip->cur_cs,
+ .instrs = &subop->instrs[op_id],
+ .ninstrs = 1};
+ ret = chip->controller->ops->exec_op(chip, &nand_op, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cadence_nand_cmd_data(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int offset, op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int len = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_DATA);
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
+ GCMD_DIR_WRITE);
+
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, true);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ return ret;
+ }
+ }
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
+ return ret;
+ }
+
+ if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ void *buf = instr->ctx.data.buf.in + offset;
+
+ ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
+ } else {
+ const void *buf = instr->ctx.data.buf.out + offset;
+
+ ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
+ }
+
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
+ return ret;
+ }
+
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, false);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ }
+ }
+
+ return ret;
+}
+
+static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ int status;
+ unsigned int op_id = 0;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
+ timeout_us,
+ BIT(cdns_chip->cs[chip->cur_cs]),
+ false);
+ return status;
+}
+
+static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_erase,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_opcode,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_address,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
+ );
+
+static int cadence_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int status = cadence_nand_select_target(chip);
+
+ if (status)
+ return status;
+
+ return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
+ check_only);
+}
+
+static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->bbm_len;
+ oobregion->length = cdns_chip->avail_oob_size
+ - cdns_chip->bbm_len;
+
+ return 0;
+}
+
+static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->avail_oob_size;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
+ .free = cadence_nand_ooblayout_free,
+ .ecc = cadence_nand_ooblayout_ecc,
+};
+
+static int calc_cycl(u32 timing, u32 clock)
+{
+ if (timing == 0 || clock == 0)
+ return 0;
+
+ if ((timing % clock) > 0)
+ return timing / clock;
+ else
+ return timing / clock - 1;
+}
+
+/* Calculate max data valid window. */
+static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 board_delay_skew_min, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min +
+ board_delay_skew_min;
+}
+
+/* Calculate data valid window. */
+static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 trea_max, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
+}
+
+static int
+cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ const struct nand_sdr_timings *sdr;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct cadence_nand_timings *t = &cdns_chip->timings;
+ u32 reg;
+ u32 board_delay = cdns_ctrl->board_delay;
+ u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
+ cdns_ctrl->nf_clk_rate);
+ u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
+ u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
+ u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
+ u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
+ u32 if_skew = cdns_ctrl->caps1->if_skew;
+ u32 board_delay_skew_min = board_delay - if_skew;
+ u32 board_delay_skew_max = board_delay + if_skew;
+ u32 dqs_sampl_res, phony_dqs_mod;
+ u32 tdvw, tdvw_min, tdvw_max;
+ u32 ext_rd_mode, ext_wr_mode;
+ u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
+ u32 sampling_point;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ memset(t, 0, sizeof(*t));
+ /* Sampling point calculation. */
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_mod = 2;
+ else
+ phony_dqs_mod = 1;
+
+ dqs_sampl_res = clk_period / phony_dqs_mod;
+
+ tdvw_min = sdr->tREA_max + board_delay_skew_max;
+ /*
+ * The idea of those calculation is to get the optimum value
+ * for tRP and tRH timings. If it is NOT possible to sample data
+ * with optimal tRP/tRH settings, the parameters will be extended.
+ * If clk_period is 50ns (the lowest value) this condition is met
+ * for asynchronous timing modes 1, 2, 3, 4 and 5.
+ * If clk_period is 20ns the condition is met only
+ * for asynchronous timing mode 5.
+ */
+ if (sdr->tRC_min <= clk_period &&
+ sdr->tRP_min <= (clk_period / 2) &&
+ sdr->tREH_min <= (clk_period / 2)) {
+ /* Performance mode. */
+ ext_rd_mode = 0;
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * and is not on the edge (ie. we have hold margin).
+ * If not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ if (tdvw_max <= tdvw_min ||
+ (tdvw_max % dqs_sampl_res) == 0) {
+ /*
+ * No valid sampling point so the RE pulse need
+ * to be widen widening by half clock cycle.
+ */
+ ext_rd_mode = 1;
+ }
+ } else {
+ /*
+ * There is no valid window
+ * to be able to sample data the tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ ext_rd_mode = 1;
+ }
+
+ } else {
+ /* Extended read mode. */
+ u32 trh;
+
+ ext_rd_mode = 1;
+ trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
+ trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
+ if (sdr->tREH_min >= trh)
+ trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
+ else
+ trh_cnt = calc_cycl(trh, clk_period);
+
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * or if it is at the edge check if previous is valid
+ * - if not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+
+ if ((((tdvw_max / dqs_sampl_res)
+ * dqs_sampl_res) <= tdvw_min) ||
+ (((tdvw_max % dqs_sampl_res) == 0) &&
+ (((tdvw_max / dqs_sampl_res - 1)
+ * dqs_sampl_res) <= tdvw_min))) {
+ /*
+ * Data valid window width is lower than
+ * sampling resolution and do not hit any
+ * sampling point to be sure the sampling point
+ * will be found the RE low pulse width will be
+ * extended by one clock cycle.
+ */
+ trp_cnt = trp_cnt + 1;
+ }
+ } else {
+ /*
+ * There is no valid window to be able to sample data.
+ * The tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ }
+ }
+
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min, ext_rd_mode);
+
+ if (sdr->tWC_min <= clk_period &&
+ (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
+ (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
+ ext_wr_mode = 0;
+ } else {
+ u32 twh;
+
+ ext_wr_mode = 1;
+ twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
+ if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
+ twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
+ clk_period);
+
+ twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
+ if (sdr->tWH_min >= twh)
+ twh = sdr->tWH_min;
+
+ twh_cnt = calc_cycl(twh + if_skew, clk_period);
+ }
+
+ reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
+ t->async_toggle_timings = reg;
+ dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
+
+ tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
+ tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
+ twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
+ trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
+ reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
+
+ /*
+ * If timing exceeds delay field in timing register
+ * then use maximum value.
+ */
+ if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
+ reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
+ else
+ reg |= TIMINGS0_TCCS;
+
+ reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
+ t->timings0 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
+
+ /* The following is related to single signal so skew is not needed. */
+ trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
+ trhz_cnt = trhz_cnt + 1;
+ twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
+ /*
+ * Because of the two stage syncflop the value must be increased by 3
+ * first value is related with sync, second value is related
+ * with output if delay.
+ */
+ twb_cnt = twb_cnt + 3 + 5;
+ /*
+ * The following is related to the we edge of the random data input
+ * sequence so skew is not needed.
+ */
+ tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
+ reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
+ t->timings1 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
+
+ tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
+ if (tfeat_cnt < twb_cnt)
+ tfeat_cnt = twb_cnt;
+
+ tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
+ tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
+
+ reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
+ t->timings2 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ reg = DLL_PHY_CTRL_DLL_RST_N;
+ if (ext_wr_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
+ if (ext_rd_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
+
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
+ t->dll_phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
+ }
+
+ /* Sampling point calculation. */
+ if ((tdvw_max % dqs_sampl_res) > 0)
+ sampling_point = tdvw_max / dqs_sampl_res;
+ else
+ sampling_point = (tdvw_max / dqs_sampl_res - 1);
+
+ if (sampling_point * dqs_sampl_res > tdvw_min) {
+ dll_phy_dqs_timing =
+ FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
+ phony_dqs_timing = sampling_point / phony_dqs_mod;
+
+ if ((sampling_point % 2) > 0) {
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
+ if ((tdvw_max % dqs_sampl_res) == 0)
+ /*
+ * Calculation for sampling point at the edge
+ * of data and being odd number.
+ */
+ phony_dqs_timing = (tdvw_max / dqs_sampl_res)
+ / phony_dqs_mod - 1;
+
+ if (!cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_timing--;
+
+ } else {
+ phony_dqs_timing--;
+ }
+ rd_del_sel = phony_dqs_timing + 3;
+ } else {
+ dev_warn(cdns_ctrl->dev,
+ "ERROR : cannot find valid sampling point\n");
+ }
+
+ reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ reg |= PHY_CTRL_SDR_DQS;
+ t->phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
+ dll_phy_dqs_timing);
+ t->phy_dqs_timing = dll_phy_dqs_timing;
+
+ reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
+ dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
+ reg);
+ t->phy_gate_lpbk_ctrl = reg;
+
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
+ PHY_DLL_MASTER_CTRL_BYPASS_MODE);
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
+ }
+
+ return 0;
+}
+
+int cadence_nand_attach_chip(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 max_oob_data_size;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ret = cadence_nand_set_access_width16(cdns_ctrl, true);
+ if (ret)
+ return ret;
+ }
+
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->ecc.mode = NAND_ECC_HW;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ cdns_chip->bbm_offs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ cdns_chip->bbm_offs &= ~0x01;
+ cdns_chip->bbm_len = 2;
+ } else {
+ cdns_chip->bbm_len = 1;
+ }
+
+ ret = nand_ecc_choose_conf(chip,
+ &cdns_ctrl->ecc_caps,
+ mtd->oobsize - cdns_chip->bbm_len);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
+ return ret;
+ }
+
+ dev_dbg(cdns_ctrl->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ /* Error correction configuration. */
+ cdns_chip->sector_size = chip->ecc.size;
+ cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+
+ cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
+
+ max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
+
+ if (cdns_chip->avail_oob_size > max_oob_data_size)
+ cdns_chip->avail_oob_size = max_oob_data_size;
+
+ if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
+ > mtd->oobsize)
+ cdns_chip->avail_oob_size -= 4;
+
+ ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
+ if (ret < 0)
+ return -EINVAL;
+
+ cdns_chip->corr_str_idx = (u8)ret;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ /* Override the default read operations. */
+ chip->ecc.read_page = cadence_nand_read_page;
+ chip->ecc.read_page_raw = cadence_nand_read_page_raw;
+ chip->ecc.write_page = cadence_nand_write_page;
+ chip->ecc.write_page_raw = cadence_nand_write_page_raw;
+ chip->ecc.read_oob = cadence_nand_read_oob;
+ chip->ecc.write_oob = cadence_nand_write_oob;
+ chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
+ chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
+
+ if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
+ cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
+ return ret;
+ }
+
+ mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
+
+ return 0;
+}
+
+static const struct nand_controller_ops cadence_nand_controller_ops = {
+ .attach_chip = cadence_nand_attach_chip,
+ .exec_op = cadence_nand_exec_op,
+ .setup_data_interface = cadence_nand_setup_data_interface,
+};
+
+static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
+ struct device_node *np)
+{
+ struct cdns_nand_chip *cdns_chip;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Allocate the nand chip structure. */
+ cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
+ (nsels * sizeof(u8)),
+ GFP_KERNEL);
+ if (!cdns_chip) {
+ dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ cdns_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ /* Retrieve CS id. */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs >= cdns_ctrl->caps2.max_banks) {
+ dev_err(cdns_ctrl->dev,
+ "invalid reg value: %u (max CS = %d)\n",
+ cs, cdns_ctrl->caps2.max_banks);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
+ dev_err(cdns_ctrl->dev,
+ "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ cdns_chip->cs[i] = cs;
+ }
+
+ chip = &cdns_chip->chip;
+ chip->controller = &cdns_ctrl->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = cdns_ctrl->dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ ret = nand_scan(chip, cdns_chip->nsels);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
+
+ return 0;
+}
+
+static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cdns_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
+ nand_release(&entry->chip);
+ list_del(&entry->node);
+ }
+}
+
+static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct device_node *np = cdns_ctrl->dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = cdns_ctrl->caps2.max_banks;
+ int nchips, ret;
+
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(cdns_ctrl->dev,
+ "too many NAND chips: %d (max = %d CS)\n",
+ nchips, max_cs);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+{
+ /* Disable interrupts. */
+ writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
+}
+
+static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+ sizeof(*cdns_ctrl->cdma_desc),
+ &cdns_ctrl->dma_cdma_desc,
+ GFP_KERNEL);
+ if (!cdns_ctrl->dma_cdma_desc)
+ return -ENOMEM;
+
+ cdns_ctrl->buf_size = SZ_16K;
+ cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto free_buf_desc;
+ }
+
+ if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
+ IRQF_SHARED, "cadence-nand-controller",
+ cdns_ctrl)) {
+ dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto free_buf;
+ }
+
+ spin_lock_init(&cdns_ctrl->irq_lock);
+ init_completion(&cdns_ctrl->complete);
+
+ ret = cadence_nand_hw_init(cdns_ctrl);
+ if (ret)
+ goto disable_irq;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ if (cdns_ctrl->caps1->has_dma) {
+ cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+ if (!cdns_ctrl->dmac) {
+ dev_err(cdns_ctrl->dev,
+ "Unable to get a DMA channel\n");
+ ret = -EBUSY;
+ goto disable_irq;
+ }
+ }
+
+ nand_controller_init(&cdns_ctrl->controller);
+ INIT_LIST_HEAD(&cdns_ctrl->chips);
+
+ cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
+ cdns_ctrl->curr_corr_str_idx = 0xFF;
+
+ ret = cadence_nand_chips_init(cdns_ctrl);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ ret);
+ goto dma_release_chnl;
+ }
+
+ kfree(cdns_ctrl->buf);
+ cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto dma_release_chnl;
+ }
+
+ return 0;
+
+dma_release_chnl:
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+
+disable_irq:
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+
+free_buf:
+ kfree(cdns_ctrl->buf);
+
+free_buf_desc:
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ return ret;
+}
+
+/* Driver exit point. */
+static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ kfree(cdns_ctrl->buf);
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+}
+
+struct cadence_nand_dt {
+ struct cdns_nand_ctrl cdns_ctrl;
+ struct clk *clk;
+};
+
+static const struct cadence_nand_dt_devdata cadence_nand_default = {
+ .if_skew = 0,
+ .has_dma = 1,
+};
+
+static const struct of_device_id cadence_nand_dt_ids[] = {
+ {
+ .compatible = "cdns,hp-nfc",
+ .data = &cadence_nand_default
+ }, {}
+};
+
+MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
+
+static int cadence_nand_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *res;
+ struct cadence_nand_dt *dt;
+ struct cdns_nand_ctrl *cdns_ctrl;
+ int ret;
+ const struct of_device_id *of_id;
+ const struct cadence_nand_dt_devdata *devdata;
+ u32 val;
+
+ of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ devdata = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+
+ cdns_ctrl = &dt->cdns_ctrl;
+ cdns_ctrl->caps1 = devdata;
+
+ cdns_ctrl->dev = &ofdev->dev;
+ cdns_ctrl->irq = platform_get_irq(ofdev, 0);
+ if (cdns_ctrl->irq < 0)
+ return cdns_ctrl->irq;
+
+ dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
+
+ cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(cdns_ctrl->reg)) {
+ dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n");
+ return PTR_ERR(cdns_ctrl->reg);
+ }
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
+ cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
+ if (IS_ERR(cdns_ctrl->io.virt)) {
+ dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n");
+ return PTR_ERR(cdns_ctrl->io.virt);
+ }
+
+ dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ if (IS_ERR(dt->clk))
+ return PTR_ERR(dt->clk);
+
+ cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
+
+ ret = of_property_read_u32(ofdev->dev.of_node,
+ "cdns,board-delay-ps", &val);
+ if (ret) {
+ val = 4830;
+ dev_info(cdns_ctrl->dev,
+ "missing cdns,board-delay-ps property, %d was set\n",
+ val);
+ }
+ cdns_ctrl->board_delay = val;
+
+ ret = cadence_nand_init(cdns_ctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+}
+
+static int cadence_nand_dt_remove(struct platform_device *ofdev)
+{
+ struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
+
+ cadence_nand_remove(&dt->cdns_ctrl);
+
+ return 0;
+}
+
+static struct platform_driver cadence_nand_dt_driver = {
+ .probe = cadence_nand_dt_probe,
+ .remove = cadence_nand_dt_remove,
+ .driver = {
+ .name = "cadence-nand-controller",
+ .of_match_table = cadence_nand_dt_ids,
+ },
+};
+
+module_platform_driver(cadence_nand_dt_driver);
+
+MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
+
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 5e14836f6bd5..8b779a899dcf 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -102,47 +102,6 @@ static int denali_dt_chip_init(struct denali_controller *denali,
return denali_chip_init(denali, dchip);
}
-/* Backward compatibility for old platforms */
-static int denali_dt_legacy_chip_init(struct denali_controller *denali)
-{
- struct denali_chip *dchip;
- int nsels, i;
-
- nsels = denali->nbanks;
-
- dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
- GFP_KERNEL);
- if (!dchip)
- return -ENOMEM;
-
- dchip->nsels = nsels;
-
- for (i = 0; i < nsels; i++)
- dchip->sels[i].bank = i;
-
- nand_set_flash_node(&dchip->chip, denali->dev->of_node);
-
- return denali_chip_init(denali, dchip);
-}
-
-/*
- * Check the DT binding.
- * The new binding expects chip subnodes in the controller node.
- * So, #address-cells = <1>; #size-cells = <0>; are required.
- * Check the #size-cells to distinguish the binding.
- */
-static bool denali_dt_is_legacy_binding(struct device_node *np)
-{
- u32 cells;
- int ret;
-
- ret = of_property_read_u32(np, "#size-cells", &cells);
- if (ret)
- return true;
-
- return cells != 0;
-}
-
static int denali_dt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -167,10 +126,8 @@ static int denali_dt_probe(struct platform_device *pdev)
denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
- if (denali->irq < 0) {
- dev_err(dev, "no irq defined\n");
+ if (denali->irq < 0)
return denali->irq;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
denali->reg = devm_ioremap_resource(dev, res);
@@ -213,17 +170,11 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk_ecc;
- if (denali_dt_is_legacy_binding(dev->of_node)) {
- ret = denali_dt_legacy_chip_init(denali);
- if (ret)
+ for_each_child_of_node(dev->of_node, np) {
+ ret = denali_dt_chip_init(denali, np);
+ if (ret) {
+ of_node_put(np);
goto out_remove_denali;
- } else {
- for_each_child_of_node(dev->of_node, np) {
- ret = denali_dt_chip_init(denali, np);
- if (ret) {
- of_node_put(np);
- goto out_remove_denali;
- }
}
}
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 6a4626a8bf95..0b48be54ba6f 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -751,10 +751,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
mtd = nand_to_mtd(chip);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no IRQ resource defined\n");
+ if (irq < 0)
return -ENXIO;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->iobase = devm_ioremap_resource(dev, res);
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 78b31f845c50..241b58b83240 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -773,7 +773,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
goto release_dma_chan;
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index fc49e13d81ec..fb5abdcfb007 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2862,10 +2862,8 @@ static int marvell_nfc_probe(struct platform_device *pdev)
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
nfc->core_clk = devm_clk_get(&pdev->dev, "core");
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 1b82b687e5a5..9f17b5b8efbf 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1399,10 +1399,8 @@ static int meson_nfc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no NFC IRQ resource\n");
+ if (irq < 0)
return -EINVAL;
- }
ret = meson_nfc_clk_init(nfc);
if (ret) {
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 74595b644b7c..75f1fa3d4d35 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -527,10 +527,8 @@ static int mtk_ecc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 373d47d1ba4c..b8305e39ab51 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1540,7 +1540,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "no nfi irq resource\n");
ret = -EINVAL;
goto clk_disable;
}
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
index 9d49e6c845e1..ed7a4e021bf5 100644
--- a/drivers/mtd/nand/raw/mxic_nand.c
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -524,10 +524,8 @@ static int mxic_nfc_probe(struct platform_device *pdev)
nand_chip->controller = &nfc->controller;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
mxic_nfc_hw_init(nfc);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 5c2c30a7dffa..f64e3b6605c6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -292,12 +292,16 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page)
struct mtd_info *mtd = nand_to_mtd(chip);
int last_page = ((mtd->erasesize - mtd->writesize) >>
chip->page_shift) & chip->pagemask;
+ unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
+ | NAND_BBM_LASTPAGE;
+ if (page == 0 && !(chip->options & bbm_flags))
+ return 0;
if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
return 0;
- else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
return 1;
- else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
return last_page;
return -EINVAL;
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 8ca9fad6e6ad..56654030ec7f 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -446,8 +446,10 @@ static int micron_nand_init(struct nand_chip *chip)
if (ret)
goto err_free_manuf_data;
+ chip->options |= NAND_BBM_FIRSTPAGE;
+
if (mtd->writesize == 2048)
- chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+ chip->options |= NAND_BBM_SECONDPAGE;
ondie = micron_supports_on_die_ecc(chip);
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 6ec65f48501c..ad77c112a78a 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1967,10 +1967,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_IRQ:
info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
- if (info->gpmc_irq_fifo <= 0) {
- dev_err(dev, "Error getting fifo IRQ\n");
+ if (info->gpmc_irq_fifo <= 0)
return -ENODEV;
- }
err = devm_request_irq(dev, info->gpmc_irq_fifo,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-fifo", info);
@@ -1982,10 +1980,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
}
info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
- if (info->gpmc_irq_count <= 0) {
- dev_err(dev, "Error getting IRQ count\n");
+ if (info->gpmc_irq_count <= 0)
return -ENODEV;
- }
err = devm_request_irq(dev, info->gpmc_irq_count,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-count", info);
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index e509c93737c4..058e99d0cbcf 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1129,10 +1129,8 @@ static int flctl_probe(struct platform_device *pdev)
flctl->fifo = res->start + 0x24; /* FLDTFIFO */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
"flste", flctl);
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 8cc852dc7d54..9e63800f768a 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1880,11 +1880,8 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "IRQ error missing or invalid\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
dev_name(dev), fmc2);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 89773293c64d..37a4ac0dd85b 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -2071,10 +2071,8 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
nfc->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
index 009c1da8574c..2b7cabbb680c 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -320,7 +320,8 @@ static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_unlock(&chip->controller->mutex);
}
-static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -331,8 +332,8 @@ static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return 0;
}
-static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -746,6 +747,15 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
return 0;
}
+static const struct spi_nor_controller_ops aspeed_smc_controller_ops = {
+ .prepare = aspeed_smc_prep,
+ .unprepare = aspeed_smc_unprep,
+ .read_reg = aspeed_smc_read_reg,
+ .write_reg = aspeed_smc_write_reg,
+ .read = aspeed_smc_read_user,
+ .write = aspeed_smc_write_user,
+};
+
static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
struct device_node *np, struct resource *r)
{
@@ -805,12 +815,7 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
nor->dev = dev;
nor->priv = chip;
spi_nor_set_flash_node(nor, child);
- nor->read = aspeed_smc_read_user;
- nor->write = aspeed_smc_write_user;
- nor->read_reg = aspeed_smc_read_reg;
- nor->write_reg = aspeed_smc_write_reg;
- nor->prepare = aspeed_smc_prep;
- nor->unprepare = aspeed_smc_unprep;
+ nor->controller_ops = &aspeed_smc_controller_ops;
ret = aspeed_smc_chip_setup_init(chip, r);
if (ret)
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 7bef63947b29..06f997247d0f 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -285,7 +285,7 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
return IRQ_HANDLED;
}
-static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
+static unsigned int cqspi_calc_rdreg(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
u32 rdreg = 0;
@@ -354,27 +354,27 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
-static int cqspi_command_read(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
+ u8 *rxbuf, size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int rdreg;
unsigned int reg;
- unsigned int read_len;
+ size_t read_len;
int status;
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
- dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
+ dev_err(nor->dev,
+ "Invalid input argument, len %zu rxbuf 0x%p\n",
n_rx, rxbuf);
return -EINVAL;
}
- reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
- rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
+ rdreg = cqspi_calc_rdreg(nor);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
@@ -404,19 +404,19 @@ static int cqspi_command_read(struct spi_nor *nor,
}
static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
- const u8 *txbuf, const unsigned n_tx)
+ const u8 *txbuf, size_t n_tx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
unsigned int data;
- u32 write_len;
+ size_t write_len;
int ret;
if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(nor->dev,
- "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+ "Invalid input argument, cmdlen %zu txbuf 0x%p\n",
n_tx, txbuf);
return -EINVAL;
}
@@ -470,7 +470,7 @@ static int cqspi_read_setup(struct spi_nor *nor)
unsigned int reg;
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
- reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
+ reg |= cqspi_calc_rdreg(nor);
/* Setup dummy clock cycles */
dummy_clk = nor->read_dummy;
@@ -603,7 +603,7 @@ static int cqspi_write_setup(struct spi_nor *nor)
/* Set opcode. */
reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
- reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+ reg = cqspi_calc_rdreg(nor);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
reg = readl(reg_base + CQSPI_REG_SIZE);
@@ -1050,7 +1050,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs)
return ret;
/* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
if (ret)
return ret;
@@ -1080,18 +1080,19 @@ static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_unlock(&cqspi->bus_mutex);
}
-static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
{
int ret;
ret = cqspi_set_protocol(nor, 0);
if (!ret)
- ret = cqspi_command_read(nor, &opcode, 1, buf, len);
+ ret = cqspi_command_read(nor, opcode, buf, len);
return ret;
}
-static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
int ret;
@@ -1216,6 +1217,16 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
init_completion(&cqspi->rx_dma_complete);
}
+static const struct spi_nor_controller_ops cqspi_controller_ops = {
+ .prepare = cqspi_prep,
+ .unprepare = cqspi_unprep,
+ .read_reg = cqspi_read_reg,
+ .write_reg = cqspi_write_reg,
+ .read = cqspi_read,
+ .write = cqspi_write,
+ .erase = cqspi_erase,
+};
+
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
{
struct platform_device *pdev = cqspi->pdev;
@@ -1265,14 +1276,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
nor->dev = dev;
spi_nor_set_flash_node(nor, np);
nor->priv = f_pdata;
-
- nor->read_reg = cqspi_read_reg;
- nor->write_reg = cqspi_write_reg;
- nor->read = cqspi_read;
- nor->write = cqspi_write;
- nor->erase = cqspi_erase;
- nor->prepare = cqspi_prep;
- nor->unprepare = cqspi_unprep;
+ nor->controller_ops = &cqspi_controller_ops;
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
dev_name(dev), cs);
@@ -1366,10 +1370,8 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Cannot obtain IRQ.\n");
+ if (irq < 0)
return -ENXIO;
- }
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index 6dac9dd8bf42..a1258216f89d 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -177,7 +177,7 @@ static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
}
static int hisi_spi_nor_op_reg(struct spi_nor *nor,
- u8 opcode, int len, u8 optype)
+ u8 opcode, size_t len, u8 optype)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -200,7 +200,7 @@ static int hisi_spi_nor_op_reg(struct spi_nor *nor,
}
static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+ size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -215,7 +215,7 @@ static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
}
static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
- u8 *buf, int len)
+ const u8 *buf, size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -311,6 +311,15 @@ static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
return len;
}
+static const struct spi_nor_controller_ops hisi_controller_ops = {
+ .prepare = hisi_spi_nor_prep,
+ .unprepare = hisi_spi_nor_unprep,
+ .read_reg = hisi_spi_nor_read_reg,
+ .write_reg = hisi_spi_nor_write_reg,
+ .read = hisi_spi_nor_read,
+ .write = hisi_spi_nor_write,
+};
+
/**
* Get spi flash device information and register it as a mtd device.
*/
@@ -357,14 +366,8 @@ static int hisi_spi_nor_register(struct device_node *np,
}
priv->host = host;
nor->priv = priv;
+ nor->controller_ops = &hisi_controller_ops;
- nor->prepare = hisi_spi_nor_prep;
- nor->unprepare = hisi_spi_nor_unprep;
- nor->read_reg = hisi_spi_nor_read_reg;
- nor->write_reg = hisi_spi_nor_write_reg;
- nor->read = hisi_spi_nor_read;
- nor->write = hisi_spi_nor_write;
- nor->erase = NULL;
ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
return ret;
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 3cda8e7a68f8..3d8987baea2a 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -20,6 +20,10 @@ static const struct intel_spi_boardinfo bxt_info = {
.type = INTEL_SPI_BXT,
};
+static const struct intel_spi_boardinfo cnl_info = {
+ .type = INTEL_SPI_CNL,
+};
+
static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -61,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
@@ -68,6 +73,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 43e55a2e9b27..61d2a0ad2131 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -108,6 +108,10 @@
#define BXT_FREG_NUM 12
#define BXT_PR_NUM 6
+#define CNL_PR 0x84
+#define CNL_FREG_NUM 6
+#define CNL_PR_NUM 5
+
#define LVSCC 0xc4
#define UVSCC 0xc8
#define ERASE_OPCODE_SHIFT 8
@@ -187,12 +191,16 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
- value = readl(ispi->sregs + SSFSTS_CTL);
- dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
- dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
- readl(ispi->sregs + PREOP_OPTYPE));
- dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
- dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
+ if (ispi->sregs) {
+ value = readl(ispi->sregs + SSFSTS_CTL);
+ dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+ dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+ readl(ispi->sregs + PREOP_OPTYPE));
+ dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
+ readl(ispi->sregs + OPMENU0));
+ dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
+ readl(ispi->sregs + OPMENU1));
+ }
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
@@ -340,6 +348,13 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->erase_64k = true;
break;
+ case INTEL_SPI_CNL:
+ ispi->sregs = NULL;
+ ispi->pregs = ispi->base + CNL_PR;
+ ispi->nregions = CNL_FREG_NUM;
+ ispi->pr_num = CNL_PR_NUM;
+ break;
+
default:
return -EINVAL;
}
@@ -367,6 +382,11 @@ static int intel_spi_init(struct intel_spi *ispi)
!(uvscc & ERASE_64K_OPCODE_MASK))
ispi->erase_64k = false;
+ if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
+ dev_err(ispi->dev, "software sequencer not supported, but required\n");
+ return -EINVAL;
+ }
+
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
@@ -383,7 +403,7 @@ static int intel_spi_init(struct intel_spi *ispi)
val = readl(ispi->base + HSFSTS_CTL);
ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
- if (ispi->locked) {
+ if (ispi->locked && ispi->sregs) {
/*
* BIOS programs allowed opcodes and then locks down the
* register. So read back what opcodes it decided to support.
@@ -426,7 +446,7 @@ static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
{
u32 val, status;
int ret;
@@ -469,7 +489,7 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
return 0;
}
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
int optype)
{
u32 val = 0, status;
@@ -535,7 +555,8 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
return 0;
}
-static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct intel_spi *ispi = nor->priv;
int ret;
@@ -555,7 +576,8 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return intel_spi_read_block(ispi, buf, len);
}
-static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct intel_spi *ispi = nor->priv;
int ret;
@@ -864,6 +886,14 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
}
}
+static const struct spi_nor_controller_ops intel_spi_controller_ops = {
+ .read_reg = intel_spi_read_reg,
+ .write_reg = intel_spi_write_reg,
+ .read = intel_spi_read,
+ .write = intel_spi_write,
+ .erase = intel_spi_erase,
+};
+
struct intel_spi *intel_spi_probe(struct device *dev,
struct resource *mem, const struct intel_spi_boardinfo *info)
{
@@ -897,11 +927,7 @@ struct intel_spi *intel_spi_probe(struct device *dev,
ispi->nor.dev = ispi->dev;
ispi->nor.priv = ispi;
- ispi->nor.read_reg = intel_spi_read_reg;
- ispi->nor.write_reg = intel_spi_write_reg;
- ispi->nor.read = intel_spi_read;
- ispi->nor.write = intel_spi_write;
- ispi->nor.erase = intel_spi_erase;
+ ispi->nor.controller_ops = &intel_spi_controller_ops;
ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
if (ret) {
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index 34db01ab6cab..b1691680d174 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -151,9 +151,9 @@ static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
}
static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+ const u8 *tx, size_t txlen, u8 *rx, size_t rxlen)
{
- int len = 1 + txlen + rxlen;
+ size_t len = 1 + txlen + rxlen;
int i, ret, idx;
if (len > MTK_NOR_MAX_SHIFT)
@@ -193,7 +193,7 @@ static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
}
/* Do a WRSR (Write Status Register) command */
-static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, const u8 sr)
{
writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
@@ -354,7 +354,7 @@ static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
{
int ret;
struct mtk_nor *mtk_nor = nor->priv;
@@ -376,8 +376,8 @@ static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return ret;
}
-static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
int ret;
struct mtk_nor *mtk_nor = nor->priv;
@@ -419,6 +419,13 @@ static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
return 0;
}
+static const struct spi_nor_controller_ops mtk_controller_ops = {
+ .read_reg = mtk_nor_read_reg,
+ .write_reg = mtk_nor_write_reg,
+ .read = mtk_nor_read,
+ .write = mtk_nor_write,
+};
+
static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
@@ -438,12 +445,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor,
nor->dev = mtk_nor->dev;
nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
+ nor->controller_ops = &mtk_controller_ops;
- /* fill the hooks to spi nor */
- nor->read = mtk_nor_read;
- nor->read_reg = mtk_nor_read_reg;
- nor->write = mtk_nor_write;
- nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index 4a871587392b..9a5b1a7c636a 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -123,7 +123,8 @@ static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
return ret;
}
-static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
@@ -145,7 +146,8 @@ static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return nxp_spifi_wait_for_cmd(spifi);
}
-static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
@@ -263,9 +265,18 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
{
u8 id[SPI_NOR_MAX_ID_LEN];
- nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+ nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
}
+static const struct spi_nor_controller_ops nxp_spifi_controller_ops = {
+ .read_reg = nxp_spifi_read_reg,
+ .write_reg = nxp_spifi_write_reg,
+ .read = nxp_spifi_read,
+ .write = nxp_spifi_write,
+ .erase = nxp_spifi_erase,
+};
+
static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
struct device_node *np)
{
@@ -332,11 +343,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
spifi->nor.dev = spifi->dev;
spi_nor_set_flash_node(&spifi->nor, np);
spifi->nor.priv = spifi;
- spifi->nor.read = nxp_spifi_read;
- spifi->nor.write = nxp_spifi_write;
- spifi->nor.erase = nxp_spifi_erase;
- spifi->nor.read_reg = nxp_spifi_read_reg;
- spifi->nor.write_reg = nxp_spifi_write_reg;
+ spifi->nor.controller_ops = &nxp_spifi_controller_ops;
/*
* The first read on a hard reset isn't reliable so do a
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 7acf4a93b592..f4afe123e9dc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -338,7 +338,7 @@ static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
if (nor->spimem)
return spi_nor_spimem_read_data(nor, from, len, buf);
- return nor->read(nor, from, len, buf);
+ return nor->controller_ops->read(nor, from, len, buf);
}
/**
@@ -385,239 +385,172 @@ static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
if (nor->spimem)
return spi_nor_spimem_write_data(nor, to, len, buf);
- return nor->write(nor, to, len, buf);
+ return nor->controller_ops->write(nor, to, len, buf);
}
-/*
- * Read the status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_sr(struct spi_nor *nor)
+static int spi_nor_write_enable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_NO_DATA);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
}
- if (ret < 0) {
- pr_err("error %d reading SR\n", (int) ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Read the flag status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_fsr(struct spi_nor *nor)
+static int spi_nor_write_disable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_NO_DATA);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
}
- if (ret < 0) {
- pr_err("error %d reading FSR\n", ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Read configuration register, returning its value in the
- * location. Return the configuration register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_cr(struct spi_nor *nor)
+static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
+ sr, 1);
}
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading CR\n", ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Write status register 1 byte
- * Returns negative if error occurred.
+/**
+ * spi_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int write_sr(struct spi_nor *nor, u8 val)
+static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
{
- nor->bouncebuf[0] = val;
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
-
- return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1);
-}
+ int ret;
-/*
- * Set write enable latch with Write Enable command.
- * Returns negative if error occurred.
- */
-static int write_enable(struct spi_nor *nor)
-{
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
+ SPI_MEM_OP_DATA_IN(1, fsr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
+ fsr, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
}
-/*
- * Send write disable instruction to the chip.
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor: pointer to 'struct spi_nor'
+ * @cr: pointer to a DMA-able buffer where the value of the
+ * Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int write_disable(struct spi_nor *nor)
+static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
+ SPI_MEM_OP_DATA_IN(1, cr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
-}
-
-static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
-{
- return mtd->priv;
-}
-
-
-static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == opcode)
- return table[i][1];
-
- /* No conversion found, keep input op code. */
- return opcode;
-}
-
-static u8 spi_nor_convert_3to4_read(u8 opcode)
-{
- static const u8 spi_nor_3to4_read[][2] = {
- { SPINOR_OP_READ, SPINOR_OP_READ_4B },
- { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
- { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
- { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
- { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
- { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
- { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
- { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
-
- { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
- { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
- { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
- ARRAY_SIZE(spi_nor_3to4_read));
-}
-
-static u8 spi_nor_convert_3to4_program(u8 opcode)
-{
- static const u8 spi_nor_3to4_program[][2] = {
- { SPINOR_OP_PP, SPINOR_OP_PP_4B },
- { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
- { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
- { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
- { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
- ARRAY_SIZE(spi_nor_3to4_program));
-}
-
-static u8 spi_nor_convert_3to4_erase(u8 opcode)
-{
- static const u8 spi_nor_3to4_erase[][2] = {
- { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
- { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
- { SPINOR_OP_SE, SPINOR_OP_SE_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
- ARRAY_SIZE(spi_nor_3to4_erase));
-}
-
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
-{
- nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
- nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
- nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
-
- if (!spi_nor_has_uniform_erase(nor)) {
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- struct spi_nor_erase_type *erase;
- int i;
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- erase = &map->erase_type[i];
- erase->opcode =
- spi_nor_convert_3to4_erase(erase->opcode);
- }
- }
+ return ret;
}
+/**
+ * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int macronix_set_4byte(struct spi_nor *nor, bool enable)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
@@ -628,26 +561,55 @@ static int macronix_set_4byte(struct spi_nor *nor, bool enable)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
}
- return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B,
- NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
}
+/**
+ * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
{
int ret;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
ret = macronix_set_4byte(nor, enable);
- write_disable(nor);
+ if (ret)
+ return ret;
- return ret;
+ return spi_nor_write_disable(nor);
}
+/**
+ * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spansion_set_4byte(struct spi_nor *nor, bool enable)
{
+ int ret;
+
nor->bouncebuf[0] = enable << 7;
if (nor->spimem) {
@@ -657,14 +619,29 @@ static int spansion_set_4byte(struct spi_nor *nor, bool enable)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
}
- return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
}
+/**
+ * spi_nor_write_ear() - Write Extended Address Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @ear: value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
{
+ int ret;
+
nor->bouncebuf[0] = ear;
if (nor->spimem) {
@@ -674,12 +651,26 @@ static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+ return ret;
}
+/**
+ * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int winbond_set_4byte(struct spi_nor *nor, bool enable)
{
int ret;
@@ -693,15 +684,29 @@ static int winbond_set_4byte(struct spi_nor *nor, bool enable)
* Register to be set to 1, so all 3-byte-address reads come from the
* second 16M. We must clear the register to enable normal behavior.
*/
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
ret = spi_nor_write_ear(nor, 0);
- write_disable(nor);
+ if (ret)
+ return ret;
- return ret;
+ return spi_nor_write_disable(nor);
}
+/**
+ * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
@@ -709,27 +714,44 @@ static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_IN(1, sr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
+ sr, 1);
}
- return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
+
+ return ret;
}
+/**
+ * s3an_sr_ready() - Query the Status Register of the S3AN flash to see if the
+ * flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int s3an_sr_ready(struct spi_nor *nor)
{
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ if (ret)
return ret;
- }
return !!(nor->bouncebuf[0] & XSR_RDY);
}
-static int spi_nor_clear_sr(struct spi_nor *nor)
+/**
+ * spi_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_sr(struct spi_nor *nor)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
@@ -737,20 +759,33 @@ static int spi_nor_clear_sr(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
}
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_sr_ready(struct spi_nor *nor)
{
- int sr = read_sr(nor);
- if (sr < 0)
- return sr;
+ int ret = spi_nor_read_sr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
- if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
- if (sr & SR_E_ERR)
+ if (nor->flags & SNOR_F_USE_CLSR &&
+ nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
dev_err(nor->dev, "Erase Error occurred\n");
else
dev_err(nor->dev, "Programming Error occurred\n");
@@ -759,11 +794,17 @@ static int spi_nor_sr_ready(struct spi_nor *nor)
return -EIO;
}
- return !(sr & SR_WIP);
+ return !(nor->bouncebuf[0] & SR_WIP);
}
-static int spi_nor_clear_fsr(struct spi_nor *nor)
+/**
+ * spi_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_fsr(struct spi_nor *nor)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
@@ -771,25 +812,37 @@ static int spi_nor_clear_fsr(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
}
+/**
+ * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
+ * ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_fsr_ready(struct spi_nor *nor)
{
- int fsr = read_fsr(nor);
- if (fsr < 0)
- return fsr;
+ int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
- if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
- if (fsr & FSR_E_ERR)
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
dev_err(nor->dev, "Erase operation failed.\n");
else
dev_err(nor->dev, "Program operation failed.\n");
- if (fsr & FSR_PT_ERR)
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
dev_err(nor->dev,
"Attempted to modify a protected sector.\n");
@@ -797,9 +850,15 @@ static int spi_nor_fsr_ready(struct spi_nor *nor)
return -EIO;
}
- return fsr & FSR_READY;
+ return nor->bouncebuf[0] & FSR_READY;
}
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_ready(struct spi_nor *nor)
{
int sr, fsr;
@@ -816,9 +875,13 @@ static int spi_nor_ready(struct spi_nor *nor)
return sr && fsr;
}
-/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ * @timeout_jiffies: jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
unsigned long timeout_jiffies)
@@ -841,24 +904,305 @@ static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
cond_resched();
}
- dev_err(nor->dev, "flash operation timed out\n");
+ dev_dbg(nor->dev, "flash operation timed out\n");
return -ETIMEDOUT;
}
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_wait_till_ready(struct spi_nor *nor)
{
return spi_nor_wait_till_ready_with_timeout(nor,
DEFAULT_READY_WAIT_JIFFIES);
}
-/*
- * Erase the whole flash memory
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to DMA-able buffer to write to the Status Register.
+ * @len: number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
+ sr, len);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+
+ nor->bouncebuf[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != sr1) {
+ dev_dbg(nor->dev, "SR1: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 cr_written;
+
+ /* Make sure we don't overwrite the contents of Status Register 2. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+ } else if (nor->params.quad_enable) {
+ /*
+ * If the Status Register 2 Read command (35h) is not
+ * supported, we should at least be sure we don't
+ * change the value of the SR2 Quad Enable bit.
+ *
+ * We can safely assume that when the Quad Enable method is
+ * set, the value of the QE bit is one, as a consequence of the
+ * nor->params.quad_enable() call.
+ *
+ * We can safely assume that the Quad Enable bit is present in
+ * the Status Register 2 at BIT(1). According to the JESD216
+ * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+ * Write Status (01h) command is available just for the cases
+ * in which the QE bit is described in SR2 at BIT(1).
+ */
+ sr_cr[1] = SR2_QUAD_EN_BIT1;
+ } else {
+ sr_cr[1] = 0;
+ }
+
+ sr_cr[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ cr_written = sr_cr[1];
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr_written != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @cr: byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 sr_written;
+
+ /* Keep the current value of the Status Register 1. */
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ sr_cr[1] = cr;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ sr_written = sr_cr[0];
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr_written != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ if (nor->flags & SNOR_F_HAS_16BIT_SR)
+ return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+ return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer where the value of the
+ * Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
+ sr2, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor: pointer to 'struct spi_nor'.
*
- * Returns 0 if successful, non-zero otherwise.
+ * Return: 0 on success, -errno otherwise.
*/
-static int erase_chip(struct spi_nor *nor)
+static int spi_nor_erase_chip(struct spi_nor *nor)
{
+ int ret;
+
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
if (nor->spimem) {
@@ -868,10 +1212,99 @@ static int erase_chip(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+ return ret;
+}
+
+static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+static u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->params.erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
@@ -880,10 +1313,9 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_lock(&nor->lock);
- if (nor->prepare) {
- ret = nor->prepare(nor, ops);
+ if (nor->controller_ops && nor->controller_ops->prepare) {
+ ret = nor->controller_ops->prepare(nor, ops);
if (ret) {
- dev_err(nor->dev, "failed in the preparation.\n");
mutex_unlock(&nor->lock);
return ret;
}
@@ -893,8 +1325,8 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
{
- if (nor->unprepare)
- nor->unprepare(nor, ops);
+ if (nor->controller_ops && nor->controller_ops->unprepare)
+ nor->controller_ops->unprepare(nor, ops);
mutex_unlock(&nor->lock);
}
@@ -935,9 +1367,6 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr = spi_nor_convert_addr(nor, addr);
- if (nor->erase)
- return nor->erase(nor, addr);
-
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
@@ -946,6 +1375,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
SPI_MEM_OP_NO_DATA);
return spi_mem_exec_op(nor->spimem, &op);
+ } else if (nor->controller_ops->erase) {
+ return nor->controller_ops->erase(nor, addr);
}
/*
@@ -957,8 +1388,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr >>= 8;
}
- return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf,
- nor->addr_width);
+ return nor->controller_ops->write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
}
/**
@@ -1208,7 +1639,9 @@ static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
list_for_each_entry_safe(cmd, next, &erase_list, list) {
nor->erase_opcode = cmd->opcode;
while (cmd->count) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
ret = spi_nor_erase_sector(nor, addr);
if (ret)
@@ -1263,12 +1696,13 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
unsigned long timeout;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
- if (erase_chip(nor)) {
- ret = -EIO;
+ ret = spi_nor_erase_chip(nor);
+ if (ret)
goto erase_err;
- }
/*
* Scale the timeout linearly with the size of the flash, with
@@ -1291,7 +1725,9 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
ret = spi_nor_erase_sector(nor, addr);
if (ret)
@@ -1312,7 +1748,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
goto erase_err;
}
- write_disable(nor);
+ ret = spi_nor_write_disable(nor);
erase_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
@@ -1320,27 +1756,6 @@ erase_err:
return ret;
}
-/* Write status register and ensure bits in mask match written values */
-static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
-{
- int ret;
-
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- ret = read_sr(nor);
- if (ret < 0)
- return ret;
-
- return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
-}
-
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -1433,16 +1848,18 @@ static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
- int status_old, status_new;
+ int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- status_old = read_sr(nor);
- if (status_old < 0)
- return status_old;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
/* If nothing in our range is unlocked, we don't need to do anything */
if (stm_is_locked_sr(nor, ofs, len, status_old))
@@ -1502,7 +1919,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- return write_sr_and_check(nor, status_new, mask);
+ return spi_nor_write_sr_and_check(nor, status_new);
}
/*
@@ -1513,16 +1930,18 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
- int status_old, status_new;
+ int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- status_old = read_sr(nor);
- if (status_old < 0)
- return status_old;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
/* If nothing in our range is locked, we don't need to do anything */
if (stm_is_unlocked_sr(nor, ofs, len, status_old))
@@ -1585,7 +2004,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- return write_sr_and_check(nor, status_new, mask);
+ return spi_nor_write_sr_and_check(nor, status_new);
}
/*
@@ -1597,13 +2016,13 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*/
static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- int status;
+ int ret;
- status = read_sr(nor);
- if (status < 0)
- return status;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
- return stm_is_locked_sr(nor, ofs, len, status);
+ return stm_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
}
static const struct spi_nor_locking_ops stm_locking_ops = {
@@ -1657,242 +2076,59 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-/*
- * Write status Register and configuration register with 2 bytes
- * The first byte will be written to the status register, while the
- * second byte will be written to the configuration register.
- * Return negative if error occurred.
- */
-static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
-{
- int ret;
-
- write_enable(nor);
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(2, sr_cr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
- }
-
- if (ret < 0) {
- dev_err(nor->dev,
- "error while writing configuration register\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret) {
- dev_err(nor->dev,
- "timeout while writing configuration register\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * macronix_quad_enable() - set QE bit in Status Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register.
- *
- * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_quad_enable(struct spi_nor *nor)
-{
- int ret, val;
-
- val = read_sr(nor);
- if (val < 0)
- return val;
- if (val & SR_QUAD_EN_MX)
- return 0;
-
- write_enable(nor);
-
- write_sr(nor, val | SR_QUAD_EN_MX);
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- ret = read_sr(nor);
- if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
- dev_err(nor->dev, "Macronix Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
- * spansion_quad_enable() - set QE bit in Configuraiton Register.
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
* @nor: pointer to a 'struct spi_nor'
*
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function is kept for legacy purpose because it has been used for a
- * long time without anybody complaining but it should be considered as
- * deprecated and maybe buggy.
- * First, this function doesn't care about the previous values of the Status
- * and Configuration Registers when it sets the QE bit (bit 1) in the
- * Configuration Register: all other bits are cleared, which may have unwanted
- * side effects like removing some block protections.
- * Secondly, it uses the Read Configuration Register (35h) instruction though
- * some very old and few memories don't support this instruction. If a pull-up
- * resistor is present on the MISO/IO1 line, we might still be able to pass the
- * "read back" test because the QSPI memory doesn't recognize the command,
- * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spansion_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
{
- u8 *sr_cr = nor->bouncebuf;
int ret;
- sr_cr[0] = 0;
- sr_cr[1] = CR_QUAD_EN_SPAN;
- ret = write_sr_cr(nor, sr_cr);
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
- /* read back and check it */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories not supporting the Read
- * Configuration Register (35h) instruction.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
-{
- u8 *sr_cr = nor->bouncebuf;
- int ret;
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+ return 0;
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
- sr_cr[1] = CR_QUAD_EN_SPAN;
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
- return write_sr_cr(nor, sr_cr);
+ return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
}
/**
- * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories supporting the Read
- * Configuration Register (35h) instruction.
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
*
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
{
- struct device *dev = nor->dev;
- u8 *sr_cr = nor->bouncebuf;
int ret;
- /* Check current Quad Enable bit value. */
- ret = read_cr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading configuration register\n");
- return -EINVAL;
- }
-
- if (ret & CR_QUAD_EN_SPAN)
- return 0;
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
- sr_cr[1] = ret | CR_QUAD_EN_SPAN;
-
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
-
- ret = write_sr_cr(nor, sr_cr);
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
if (ret)
return ret;
- /* Read back and check it. */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2)
-{
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
-
- return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1);
-}
-
-static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
-{
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+ return 0;
- return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1);
+ return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
/**
- * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
* @nor: pointer to a 'struct spi_nor'
*
* Set the Quad Enable (QE) bit in the Status Register 2.
@@ -1903,10 +2139,11 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
*
* Return: 0 on success, -errno otherwise.
*/
-static int sr2_bit7_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
{
u8 *sr2 = nor->bouncebuf;
int ret;
+ u8 sr2_written;
/* Check current Quad Enable bit value. */
ret = spi_nor_read_sr2(nor, sr2);
@@ -1918,117 +2155,23 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor)
/* Update the Quad Enable bit. */
*sr2 |= SR2_QUAD_EN_BIT7;
- write_enable(nor);
-
ret = spi_nor_write_sr2(nor, sr2);
- if (ret < 0) {
- dev_err(nor->dev, "error while writing status register 2\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret < 0) {
- dev_err(nor->dev, "timeout while writing status register 2\n");
+ if (ret)
return ret;
- }
+
+ sr2_written = *sr2;
/* Read back and check it. */
ret = spi_nor_read_sr2(nor, sr2);
- if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) {
- dev_err(nor->dev, "SR2 Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_clear_sr_bp(struct spi_nor *nor)
-{
- int ret;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev, "error while reading status register\n");
- return ret;
- }
-
- write_enable(nor);
-
- ret = write_sr(nor, ret & ~mask);
- if (ret) {
- dev_err(nor->dev, "write to status register failed\n");
- return ret;
- }
-
- ret = spi_nor_wait_till_ready(nor);
if (ret)
- dev_err(nor->dev, "timeout while writing status register\n");
- return ret;
-}
-
-/**
- * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
- * bits on spansion flashes.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits. The function is tightly
- * coupled with the spansion_quad_enable() function. Both assume that the Write
- * Register with 16 bits, together with the Read Configuration Register (35h)
- * instructions are supported.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
-{
- int ret;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 *sr_cr = nor->bouncebuf;
-
- /* Check current Quad Enable bit value. */
- ret = read_cr(nor);
- if (ret < 0) {
- dev_err(nor->dev,
- "error while reading configuration register\n");
return ret;
- }
-
- /*
- * When the configuration register Quad Enable bit is one, only the
- * Write Status (01h) command with two data bytes may be used.
- */
- if (ret & CR_QUAD_EN_SPAN) {
- sr_cr[1] = ret;
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev,
- "error while reading status register\n");
- return ret;
- }
- sr_cr[0] = ret & ~mask;
-
- ret = write_sr_cr(nor, sr_cr);
- if (ret)
- dev_err(nor->dev, "16-bit write register failed\n");
- return ret;
+ if (*sr2 != sr2_written) {
+ dev_dbg(nor->dev, "SR2: Read back test failed\n");
+ return -EIO;
}
- /*
- * If the Quad Enable bit is zero, use the Write Status (01h) command
- * with one data byte.
- */
- return spi_nor_clear_sr_bp(nor);
+ return 0;
}
/* Used when the "_ext_id" is two bytes at most */
@@ -2136,7 +2279,7 @@ static void gd25q256_default_init(struct spi_nor *nor)
* indicate the quad_enable method for this case, we need
* to set it in the default_init fixup hook.
*/
- nor->params.quad_enable = macronix_quad_enable;
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
}
static struct spi_nor_fixups gd25q256_fixups = {
@@ -2179,6 +2322,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
{ "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ) },
@@ -2267,6 +2412,10 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -2482,6 +2631,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
@@ -2520,11 +2671,11 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
tmp = spi_mem_exec_op(nor->spimem, &op);
} else {
- tmp = nor->read_reg(nor, SPINOR_OP_RDID, id,
- SPI_NOR_MAX_ID_LEN);
+ tmp = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
}
- if (tmp < 0) {
- dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
+ if (tmp) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
return ERR_PTR(tmp);
}
@@ -2544,7 +2695,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
+ ssize_t ret;
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
@@ -2583,7 +2734,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t actual;
+ size_t actual = 0;
int ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
@@ -2592,26 +2743,28 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
nor->sst_write_second = false;
- actual = to % 2;
/* Start write from odd address. */
- if (actual) {
+ if (to % 2) {
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
ret = spi_nor_write_data(nor, to, 1, buf);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
+
+ to++;
+ actual++;
}
- to += actual;
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
@@ -2620,39 +2773,44 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* write two bytes. */
ret = spi_nor_write_data(nor, to, 2, buf + actual);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
to += 2;
nor->sst_write_second = true;
}
nor->sst_write_second = false;
- write_disable(nor);
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ goto out;
+
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
/* Write out trailing byte if it exists. */
if (actual != len) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
nor->program_opcode = SPINOR_OP_BP;
ret = spi_nor_write_data(nor, to, 1, buf + actual);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
- write_disable(nor);
+ goto out;
+
actual += 1;
+
+ ret = spi_nor_write_disable(nor);
}
-sst_write_err:
+out:
*retlen += actual;
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
@@ -2701,7 +2859,10 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
addr = spi_nor_convert_addr(nor, addr);
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto write_err;
+
ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
if (ret < 0)
goto write_err;
@@ -2722,13 +2883,21 @@ write_err:
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev ||
- (!nor->spimem &&
- (!nor->read || !nor->write || !nor->read_reg ||
- !nor->write_reg))) {
+ (!nor->spimem && !nor->controller_ops) ||
+ (!nor->spimem && nor->controller_ops &&
+ (!nor->controller_ops->read ||
+ !nor->controller_ops->write ||
+ !nor->controller_ops->read_reg ||
+ !nor->controller_ops->write_reg))) {
pr_err("spi-nor: please fill all the necessary fields!\n");
return -EINVAL;
}
+ if (nor->spimem && nor->controller_ops) {
+ dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2738,10 +2907,8 @@ static int s3an_nor_setup(struct spi_nor *nor,
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ if (ret)
return ret;
- }
nor->erase_opcode = SPINOR_OP_XSE;
nor->program_opcode = SPINOR_OP_XPP;
@@ -2865,7 +3032,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
*/
static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
{
- int ret;
+ ssize_t ret;
while (len) {
ret = spi_nor_read_data(nor, addr, len, buf);
@@ -3489,20 +3656,39 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
break;
case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ /*
+ * Writing only one byte to the Status Register has the
+ * side-effect of clearing Status Register 2.
+ */
case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
- params->quad_enable = spansion_no_read_cr_quad_enable;
+ /*
+ * Read Configuration Register (35h) instruction is not
+ * supported.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
case BFPT_DWORD15_QER_SR1_BIT6:
- params->quad_enable = macronix_quad_enable;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT7:
- params->quad_enable = sr2_bit7_quad_enable;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr2_bit7_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT1:
- params->quad_enable = spansion_read_cr_quad_enable;
+ /*
+ * JESD216 rev B or later does not specify if writing only one
+ * byte to the Status Register clears or not the Status
+ * Register 2, so let's be cautious and keep the default
+ * assumption of a 16-bit Write Status (01h) command.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
default:
@@ -4101,7 +4287,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
err = spi_nor_read_sfdp(nor, sizeof(header),
psize, param_headers);
if (err < 0) {
- dev_err(dev, "failed to read SFDP parameter headers\n");
+ dev_dbg(dev, "failed to read SFDP parameter headers\n");
goto exit;
}
}
@@ -4348,7 +4534,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the (Fast) Read command. */
err = spi_nor_select_read(nor, shared_mask);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select read settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4356,7 +4542,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the Page Program command. */
err = spi_nor_select_pp(nor, shared_mask);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select write settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4364,7 +4550,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the Sector Erase command. */
err = spi_nor_select_erase(nor);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select erase settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4381,12 +4567,32 @@ static int spi_nor_setup(struct spi_nor *nor,
return nor->params.setup(nor, hwcaps);
}
+static void atmel_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void intel_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void issi_set_default_init(struct spi_nor *nor)
+{
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
static void macronix_set_default_init(struct spi_nor *nor)
{
- nor->params.quad_enable = macronix_quad_enable;
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
nor->params.set_4byte = macronix_set_4byte;
}
+static void sst_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
static void st_micron_set_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
@@ -4408,6 +4614,18 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
{
/* Init flash parameters based on MFR */
switch (JEDEC_MFR(nor->info)) {
+ case SNOR_MFR_ATMEL:
+ atmel_set_default_init(nor);
+ break;
+
+ case SNOR_MFR_INTEL:
+ intel_set_default_init(nor);
+ break;
+
+ case SNOR_MFR_ISSI:
+ issi_set_default_init(nor);
+ break;
+
case SNOR_MFR_MACRONIX:
macronix_set_default_init(nor);
break;
@@ -4417,6 +4635,10 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
st_micron_set_default_init(nor);
break;
+ case SNOR_MFR_SST:
+ sst_set_default_init(nor);
+ break;
+
case SNOR_MFR_WINBOND:
winbond_set_default_init(nor);
break;
@@ -4465,9 +4687,11 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
u8 i, erase_mask;
/* Initialize legacy flash parameters and settings. */
- params->quad_enable = spansion_quad_enable;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
params->set_4byte = spansion_set_4byte;
params->setup = spi_nor_default_setup;
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->size = (u64)info->sector_size * info->n_sectors;
@@ -4675,25 +4899,36 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
return nor->params.quad_enable(nor);
}
+/**
+ * spi_nor_unlock_all() - Unlocks the entire flash memory array.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ */
+static int spi_nor_unlock_all(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_LOCK)
+ return spi_nor_unlock(&nor->mtd, 0, nor->params.size);
+
+ return 0;
+}
+
static int spi_nor_init(struct spi_nor *nor)
{
int err;
- if (nor->clear_sr_bp) {
- if (nor->params.quad_enable == spansion_quad_enable)
- nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
-
- err = nor->clear_sr_bp(nor);
- if (err) {
- dev_err(nor->dev,
- "fail to clear block protection bits\n");
- return err;
- }
+ err = spi_nor_quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
}
- err = spi_nor_quad_enable(nor);
+ err = spi_nor_unlock_all(nor);
if (err) {
- dev_err(nor->dev, "quad mode not supported\n");
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
return err;
}
@@ -4761,7 +4996,7 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
}
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_err(nor->dev, "address width is too large: %u\n",
+ dev_dbg(nor->dev, "address width is too large: %u\n",
nor->addr_width);
return -EINVAL;
}
@@ -4879,16 +5114,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set.
- */
- if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
- nor->info->flags & SPI_NOR_HAS_LOCK)
- nor->clear_sr_bp = spi_nor_clear_sr_bp;
-
/* Init flash parameters based on flash_info struct and SFDP */
spi_nor_init_params(nor);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index a1dff92ceedf..0f847d510950 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -509,11 +509,9 @@ static const struct file_operations eraseblk_count_fops = {
*/
int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
- int err, n;
unsigned long ubi_num = ubi->ubi_num;
- const char *fname;
- struct dentry *dent;
struct ubi_debug_info *d = &ubi->dbg;
+ int n;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -522,95 +520,52 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
ubi->ubi_num);
if (n == UBI_DFS_DIR_LEN) {
/* The array size is too small */
- fname = UBI_DFS_DIR_NAME;
- dent = ERR_PTR(-EINVAL);
- goto out;
+ return -EINVAL;
}
- fname = d->dfs_dir_name;
- dent = debugfs_create_dir(fname, dfs_rootdir);
- if (IS_ERR_OR_NULL(dent))
- goto out;
- d->dfs_dir = dent;
-
- fname = "chk_gen";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_gen = dent;
-
- fname = "chk_io";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_io = dent;
-
- fname = "chk_fastmap";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_fastmap = dent;
-
- fname = "tst_disable_bgt";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_disable_bgt = dent;
-
- fname = "tst_emulate_bitflips";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_bitflips = dent;
-
- fname = "tst_emulate_io_failures";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_io_failures = dent;
-
- fname = "tst_emulate_power_cut";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_power_cut = dent;
-
- fname = "tst_emulate_power_cut_min";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_power_cut_min = dent;
-
- fname = "tst_emulate_power_cut_max";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_power_cut_max = dent;
-
- fname = "detailed_erase_block_info";
- dent = debugfs_create_file(fname, S_IRUSR, d->dfs_dir, (void *)ubi_num,
- &eraseblk_count_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
+ d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
- return 0;
+ d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
-out_remove:
- debugfs_remove_recursive(d->dfs_dir);
-out:
- err = dent ? PTR_ERR(dent) : -ENODEV;
- ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
- fname, err);
- return err;
+ d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
+ (void *)ubi_num, &eraseblk_count_fops);
+
+ return 0;
}
/**
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index df1c7989e13d..d02f12a5254e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -153,22 +153,22 @@ config IPVLAN_L3S
select NET_L3_MASTER_DEV
config IPVLAN
- tristate "IP-VLAN support"
- depends on INET
- depends on IPV6 || !IPV6
- ---help---
- This allows one to create virtual devices off of a main interface
- and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
- on packets. All interfaces (including the main interface) share L2
- making it transparent to the connected L2 switch.
+ tristate "IP-VLAN support"
+ depends on INET
+ depends on IPV6 || !IPV6
+ ---help---
+ This allows one to create virtual devices off of a main interface
+ and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
+ on packets. All interfaces (including the main interface) share L2
+ making it transparent to the connected L2 switch.
- Ipvlan devices can be added using the "ip" command from the
- iproute2 package starting with the iproute2-3.19 release:
+ Ipvlan devices can be added using the "ip" command from the
+ iproute2 package starting with the iproute2-3.19 release:
- "ip link add link <main-dev> [ NAME ] type ipvlan"
+ "ip link add link <main-dev> [ NAME ] type ipvlan"
- To compile this driver as a module, choose M here: the module
- will be called ipvlan.
+ To compile this driver as a module, choose M here: the module
+ will be called ipvlan.
config IPVTAP
tristate "IP-VLAN based tap driver"
@@ -185,11 +185,11 @@ config IPVTAP
will be called ipvtap.
config VXLAN
- tristate "Virtual eXtensible Local Area Network (VXLAN)"
- depends on INET
- select NET_UDP_TUNNEL
- select GRO_CELLS
- ---help---
+ tristate "Virtual eXtensible Local Area Network (VXLAN)"
+ depends on INET
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ ---help---
This allows one to create vxlan virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
to tunnel virtual network infrastructure in virtualized environments.
@@ -200,12 +200,12 @@ config VXLAN
will be called vxlan.
config GENEVE
- tristate "Generic Network Virtualization Encapsulation"
- depends on INET
- depends on IPV6 || !IPV6
- select NET_UDP_TUNNEL
- select GRO_CELLS
- ---help---
+ tristate "Generic Network Virtualization Encapsulation"
+ depends on INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ ---help---
This allows one to create geneve virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. GENEVE is often used
to tunnel virtual network infrastructure in virtualized environments.
@@ -244,8 +244,8 @@ config MACSEC
config NETCONSOLE
tristate "Network console logging support"
---help---
- If you want to log kernel messages over the network, enable this.
- See <file:Documentation/networking/netconsole.txt> for details.
+ If you want to log kernel messages over the network, enable this.
+ See <file:Documentation/networking/netconsole.txt> for details.
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
@@ -362,12 +362,12 @@ config NET_VRF
support enables VRF devices.
config VSOCKMON
- tristate "Virtual vsock monitoring device"
- depends on VHOST_VSOCK
- ---help---
- This option enables a monitoring net device for vsock sockets. It is
- mostly intended for developers or support to debug vsock issues. If
- unsure, say N.
+ tristate "Virtual vsock monitoring device"
+ depends on VHOST_VSOCK
+ ---help---
+ This option enables a monitoring net device for vsock sockets. It is
+ mostly intended for developers or support to debug vsock issues. If
+ unsure, say N.
endif # NET_CORE
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62f65573eb04..fcb7c2f7f001 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -41,6 +41,8 @@
#include <linux/in.h>
#include <net/ip.h>
#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/slab.h>
@@ -200,6 +202,51 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
+static const struct flow_dissector_key flow_keys_bonding_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v4addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v6addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_TIPC,
+ .offset = offsetof(struct flow_keys, addrs.tipckey),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_PORTS,
+ .offset = offsetof(struct flow_keys, ports),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_ICMP,
+ .offset = offsetof(struct flow_keys, icmp),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_VLAN,
+ .offset = offsetof(struct flow_keys, vlan),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
+ .offset = offsetof(struct flow_keys, tags),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
+ .offset = offsetof(struct flow_keys, keyid),
+ },
+};
+
+static struct flow_dissector flow_keys_bonding __read_mostly;
+
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
@@ -3252,39 +3299,78 @@ static inline u32 bond_eth_hash(struct sk_buff *skb)
return 0;
}
-/* Extract the appropriate headers based on bond's xmit policy */
-static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
- struct flow_keys *fk)
+static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
+ int *noff, int *proto, bool l34)
{
const struct ipv6hdr *iph6;
const struct iphdr *iph;
- int noff, proto = -1;
-
- if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
- return skb_flow_dissect_flow_keys(skb, fk, 0);
- fk->ports.ports = 0;
- noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
- if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+ if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph))))
return false;
- iph = ip_hdr(skb);
+ iph = (const struct iphdr *)(skb->data + *noff);
iph_to_flow_copy_v4addrs(fk, iph);
- noff += iph->ihl << 2;
+ *noff += iph->ihl << 2;
if (!ip_is_fragment(iph))
- proto = iph->protocol;
+ *proto = iph->protocol;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
+ if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6))))
return false;
- iph6 = ipv6_hdr(skb);
+ iph6 = (const struct ipv6hdr *)(skb->data + *noff);
iph_to_flow_copy_v6addrs(fk, iph6);
- noff += sizeof(*iph6);
- proto = iph6->nexthdr;
+ *noff += sizeof(*iph6);
+ *proto = iph6->nexthdr;
} else {
return false;
}
- if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
- fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
+
+ if (l34 && *proto >= 0)
+ fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto);
+
+ return true;
+}
+
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+ struct flow_keys *fk)
+{
+ bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
+ int noff, proto = -1;
+
+ if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
+ memset(fk, 0, sizeof(*fk));
+ return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
+ fk, NULL, 0, 0, 0, 0);
+ }
+
+ fk->ports.ports = 0;
+ memset(&fk->icmp, 0, sizeof(fk->icmp));
+ noff = skb_network_offset(skb);
+ if (!bond_flow_ip(skb, fk, &noff, &proto, l34))
+ return false;
+
+ /* ICMP error packets contains at least 8 bytes of the header
+ * of the packet which generated the error. Use this information
+ * to correlate ICMP error packets within the same flow which
+ * generated the error.
+ */
+ if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
+ skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
+ skb_transport_offset(skb),
+ skb_headlen(skb));
+ if (proto == IPPROTO_ICMP) {
+ if (!icmp_is_err(fk->icmp.type))
+ return true;
+
+ noff += sizeof(struct icmphdr);
+ } else if (proto == IPPROTO_ICMPV6) {
+ if (!icmpv6_is_err(fk->icmp.type))
+ return true;
+
+ noff += sizeof(struct icmp6hdr);
+ }
+ return bond_flow_ip(skb, fk, &noff, &proto, l34);
+ }
return true;
}
@@ -3311,10 +3397,14 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
- bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+ bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
hash = bond_eth_hash(skb);
- else
- hash = (__force u32)flow.ports.ports;
+ } else {
+ if (flow.icmp.id)
+ memcpy(&hash, &flow.icmp, sizeof(hash));
+ else
+ memcpy(&hash, &flow.ports.ports, sizeof(hash));
+ }
hash ^= (__force u32)flow_get_u32_dst(&flow) ^
(__force u32)flow_get_u32_src(&flow);
hash ^= (hash >> 16);
@@ -4891,6 +4981,10 @@ static int __init bonding_init(void)
goto err;
}
+ skb_flow_dissector_init(&flow_keys_bonding,
+ flow_keys_bonding_keys,
+ ARRAY_SIZE(flow_keys_bonding_keys));
+
register_netdevice_notifier(&bond_netdev_notifier);
out:
return res;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 2b9a2f117113..e74e2bb61236 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -3,44 +3,50 @@
# CAIF physical drivers
#
-comment "CAIF transport drivers"
+menuconfig CAIF_DRIVERS
+ bool "CAIF transport drivers"
+ depends on CAIF
+ help
+ Enable this to see CAIF physical drivers.
+
+if CAIF_DRIVERS
config CAIF_TTY
tristate "CAIF TTY transport driver"
depends on CAIF && TTY
default n
---help---
- The CAIF TTY transport driver is a Line Discipline (ldisc)
- identified as N_CAIF. When this ldisc is opened from user space
- it will redirect the TTY's traffic into the CAIF stack.
+ The CAIF TTY transport driver is a Line Discipline (ldisc)
+ identified as N_CAIF. When this ldisc is opened from user space
+ it will redirect the TTY's traffic into the CAIF stack.
config CAIF_SPI_SLAVE
tristate "CAIF SPI transport driver for slave interface"
depends on CAIF && HAS_DMA
default n
---help---
- The CAIF Link layer SPI Protocol driver for Slave SPI interface.
- This driver implements a platform driver to accommodate for a
- platform specific SPI device. A sample CAIF SPI Platform device is
- provided in Documentation/networking/caif/spi_porting.txt
+ The CAIF Link layer SPI Protocol driver for Slave SPI interface.
+ This driver implements a platform driver to accommodate for a
+ platform specific SPI device. A sample CAIF SPI Platform device is
+ provided in <file:Documentation/networking/caif/spi_porting.txt>.
config CAIF_SPI_SYNC
bool "Next command and length in start of frame"
depends on CAIF_SPI_SLAVE
default n
---help---
- Putting the next command and length in the start of the frame can
- help to synchronize to the next transfer in case of over or under-runs.
- This option also needs to be enabled on the modem.
+ Putting the next command and length in the start of the frame can
+ help to synchronize to the next transfer in case of over or under-runs.
+ This option also needs to be enabled on the modem.
config CAIF_HSI
- tristate "CAIF HSI transport driver"
- depends on CAIF
- default n
- ---help---
- The caif low level driver for CAIF over HSI.
- Be aware that if you enable this then you also need to
- enable a low-level HSI driver.
+ tristate "CAIF HSI transport driver"
+ depends on CAIF
+ default n
+ ---help---
+ The CAIF low level driver for CAIF over HSI.
+ Be aware that if you enable this then you also need to
+ enable a low-level HSI driver.
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
@@ -50,8 +56,10 @@ config CAIF_VIRTIO
select GENERIC_ALLOCATOR
default n
---help---
- The caif driver for CAIF over Virtio.
+ The CAIF driver for CAIF over Virtio.
if CAIF_VIRTIO
source "drivers/vhost/Kconfig.vringh"
endif
+
+endif # CAIF_DRIVERS
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 40b079162804..bd40b114d6cd 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -102,8 +102,8 @@ static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir,
&ser->rx_blob);
- debugfs_create_x32("ser_state", 0400, ser->debugfs_tty_dir,
- (u32 *)&ser->state);
+ debugfs_create_xul("ser_state", 0400, ser->debugfs_tty_dir,
+ &ser->state);
debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir,
&ser->tty_status);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index b5145a7f874c..05f425ceb53a 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -39,10 +39,11 @@
#include "c_can.h"
-#define DCAN_RAM_INIT_BIT (1 << 3)
+#define DCAN_RAM_INIT_BIT BIT(3)
+
static DEFINE_SPINLOCK(raminit_lock);
-/*
- * 16-bit c_can registers can be arranged differently in the memory
+
+/* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
@@ -54,7 +55,7 @@ static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
}
static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
@@ -66,7 +67,7 @@ static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
}
static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
@@ -144,13 +145,13 @@ static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
u32 val;
val = priv->read_reg(priv, index);
- val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+ val |= ((u32)priv->read_reg(priv, index + 1)) << 16;
return val;
}
-static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
- u32 val)
+static void c_can_plat_write_reg32(const struct c_can_priv *priv,
+ enum reg index, u32 val)
{
priv->write_reg(priv, index + 1, val >> 16);
priv->write_reg(priv, index, val);
@@ -161,8 +162,8 @@ static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
return readl(priv->base + priv->regs[index]);
}
-static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
- u32 val)
+static void d_can_plat_write_reg32(const struct c_can_priv *priv,
+ enum reg index, u32 val)
{
writel(val, priv->base + priv->regs[index]);
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 1c88c361938c..6ee06a49fb4c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -553,10 +553,9 @@ static void can_restart(struct net_device *dev)
/* send restart message upstream */
skb = alloc_can_err_skb(dev, &cf);
- if (!skb) {
- err = -ENOMEM;
+ if (!skb)
goto restart;
- }
+
cf->can_id |= CAN_ERR_RESTARTED;
netif_rx(skb);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 57f9a2f51085..a929cdda9ab2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -142,7 +142,7 @@
#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
-#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f)
+#define FLEXCAN_IFLAG_MB(x) BIT_ULL(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -277,9 +277,9 @@ struct flexcan_priv {
u8 mb_size;
u8 clk_src; /* clock source of CAN Protocol Engine */
+ u64 rx_mask;
+ u64 tx_mask;
u32 reg_ctrl_default;
- u32 reg_imask1_default;
- u32 reg_imask2_default;
struct clk *clk_ipg;
struct clk *clk_per;
@@ -743,8 +743,6 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
u32 timestamp;
int err;
- timestamp = priv->read(&regs->timer) << 16;
-
flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ?
@@ -764,6 +762,8 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (likely(new_state == priv->can.state))
return;
+ timestamp = priv->read(&regs->timer) << 16;
+
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return;
@@ -778,21 +778,58 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
dev->stats.rx_fifo_errors++;
}
+static inline u64 flexcan_read64_mask(struct flexcan_priv *priv, void __iomem *addr, u64 mask)
+{
+ u64 reg = 0;
+
+ if (upper_32_bits(mask))
+ reg = (u64)priv->read(addr - 4) << 32;
+ if (lower_32_bits(mask))
+ reg |= priv->read(addr);
+
+ return reg & mask;
+}
+
+static inline void flexcan_write64(struct flexcan_priv *priv, u64 val, void __iomem *addr)
+{
+ if (upper_32_bits(val))
+ priv->write(upper_32_bits(val), addr - 4);
+ if (lower_32_bits(val))
+ priv->write(lower_32_bits(val), addr);
+}
+
+static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
+{
+ return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->rx_mask);
+}
+
+static inline u64 flexcan_read_reg_iflag_tx(struct flexcan_priv *priv)
+{
+ return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->tx_mask);
+}
+
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
{
return container_of(offload, struct flexcan_priv, offload);
}
-static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int n)
+static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
+ unsigned int n, u32 *timestamp,
+ bool drop)
{
struct flexcan_priv *priv = rx_offload_to_priv(offload);
struct flexcan_regs __iomem *regs = priv->regs;
struct flexcan_mb __iomem *mb;
+ struct sk_buff *skb;
+ struct can_frame *cf;
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -806,7 +843,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
(code != FLEXCAN_MB_CODE_RX_OVERRUN))
- return 0;
+ return NULL;
if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
/* This MB was overrun, we lost data */
@@ -816,11 +853,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
} else {
reg_iflag1 = priv->read(&regs->iflag1);
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
- return 0;
+ return NULL;
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (!skb) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
/* increase timstamp to full 32 bit */
*timestamp = reg_ctrl << 16;
@@ -839,16 +882,11 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
*(__be32 *)(cf->data + i) = data;
}
- /* mark as read */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- /* Clear IRQ */
- if (n < 32)
- priv->write(BIT(n), &regs->iflag1);
- else
- priv->write(BIT(n - 32), &regs->iflag2);
- } else {
+ mark_as_read:
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1);
+ else
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- }
/* Read the Free Running Timer. It is optional but recommended
* to unlock Mailbox as soon as possible and make it available
@@ -856,20 +894,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
*/
priv->read(&regs->timer);
- return 1;
-}
-
-
-static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
-{
- struct flexcan_regs __iomem *regs = priv->regs;
- u32 iflag1, iflag2;
-
- iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
- ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
- iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
-
- return (u64)iflag2 << 32 | iflag1;
+ return skb;
}
static irqreturn_t flexcan_irq(int irq, void *dev_id)
@@ -879,18 +904,19 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
irqreturn_t handled = IRQ_NONE;
- u32 reg_iflag2, reg_esr;
+ u64 reg_iflag_tx;
+ u32 reg_esr;
enum can_state last_state = priv->can.state;
/* reception interrupt */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- u64 reg_iflag;
+ u64 reg_iflag_rx;
int ret;
- while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) {
+ while ((reg_iflag_rx = flexcan_read_reg_iflag_rx(priv))) {
handled = IRQ_HANDLED;
ret = can_rx_offload_irq_offload_timestamp(&priv->offload,
- reg_iflag);
+ reg_iflag_rx);
if (!ret)
break;
}
@@ -913,10 +939,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
}
}
- reg_iflag2 = priv->read(&regs->iflag2);
+ reg_iflag_tx = flexcan_read_reg_iflag_tx(priv);
/* transmission complete interrupt */
- if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+ if (reg_iflag_tx & priv->tx_mask) {
u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl);
handled = IRQ_HANDLED;
@@ -928,7 +954,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb->can_ctrl);
- priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag2);
+ flexcan_write64(priv, priv->tx_mask, &regs->iflag1);
netif_wake_queue(dev);
}
@@ -1040,6 +1066,7 @@ static int flexcan_chip_start(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
+ u64 reg_imask;
int err, i;
struct flexcan_mb __iomem *mb;
@@ -1214,8 +1241,9 @@ static int flexcan_chip_start(struct net_device *dev)
/* enable interrupts atomically */
disable_irq(dev->irq);
priv->write(priv->reg_ctrl_default, &regs->ctrl);
- priv->write(priv->reg_imask1_default, &regs->imask1);
- priv->write(priv->reg_imask2_default, &regs->imask2);
+ reg_imask = priv->rx_mask | priv->tx_mask;
+ priv->write(upper_32_bits(reg_imask), &regs->imask2);
+ priv->write(lower_32_bits(reg_imask), &regs->imask1);
enable_irq(dev->irq);
/* print chip status */
@@ -1283,26 +1311,19 @@ static int flexcan_open(struct net_device *dev)
flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
priv->tx_mb_idx = priv->mb_count - 1;
priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
-
- priv->reg_imask1_default = 0;
- priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+ priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
priv->offload.mailbox_read = flexcan_mailbox_read;
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- u64 imask;
-
priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
priv->offload.mb_last = priv->mb_count - 2;
- imask = GENMASK_ULL(priv->offload.mb_last,
- priv->offload.mb_first);
- priv->reg_imask1_default |= imask;
- priv->reg_imask2_default |= imask >> 32;
-
+ priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
+ priv->offload.mb_first);
err = can_rx_offload_add_timestamp(dev, &priv->offload);
} else {
- priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+ priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
err = can_rx_offload_add_fifo(dev, &priv->offload,
FLEXCAN_NAPI_WEIGHT);
@@ -1534,7 +1555,6 @@ static int flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
- struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
int err, irq;
@@ -1569,12 +1589,11 @@ static int flexcan_probe(struct platform_device *pdev)
clock_freq = clk_get_rate(clk_per);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -ENODEV;
- regs = devm_ioremap_resource(&pdev->dev, mem);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index b8f1f2b69dd3..378200b682fa 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1652,7 +1652,6 @@ exit_free_candev:
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
- struct resource *res;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1672,8 +1671,7 @@ static int grcan_probe(struct platform_device *ofdev)
goto exit_error;
}
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&ofdev->dev, res);
+ base = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(base)) {
err = PTR_ERR(base);
goto exit_error;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index fedd927ba6ed..04d59bede5ea 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -942,13 +942,11 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct ifi_canfd_priv *priv;
- struct resource *res;
void __iomem *addr;
int irq, ret;
u32 id, rev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
irq = platform_get_irq(pdev, 0);
if (IS_ERR(addr) || irq < 0)
return -EINVAL;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 562c8317e3aa..02c5795b7393 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -123,6 +123,7 @@ enum m_can_reg {
#define CCCR_CME_CANFD_BRS 0x2
#define CCCR_TXP BIT(14)
#define CCCR_TEST BIT(7)
+#define CCCR_DAR BIT(6)
#define CCCR_MON BIT(5)
#define CCCR_CSR BIT(4)
#define CCCR_CSA BIT(3)
@@ -777,6 +778,43 @@ static inline bool is_lec_err(u32 psr)
return psr && (psr != LEC_UNUSED);
}
+static inline bool m_can_is_protocol_err(u32 irqstatus)
+{
+ return irqstatus & IR_ERR_LEC_31X;
+}
+
+static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+
+ /* update tx error stats since there is protocol error */
+ stats->tx_errors++;
+
+ /* update arbitration lost status */
+ if (cdev->version >= 31 && (irqstatus & IR_PEA)) {
+ netdev_dbg(dev, "Protocol error in Arbitration fail\n");
+ cdev->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ if (unlikely(!skb)) {
+ netdev_dbg(dev, "allocation of skb failed\n");
+ return 0;
+ }
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
u32 psr)
{
@@ -791,6 +829,11 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
is_lec_err(psr))
work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
+ /* handle protocol errors in arbitration phase */
+ if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ m_can_is_protocol_err(irqstatus))
+ work_done += m_can_handle_protocol_error(dev, irqstatus);
+
/* other unproccessed error interrupts */
m_can_handle_other_err(dev, irqstatus);
@@ -1135,7 +1178,7 @@ static void m_can_chip_config(struct net_device *dev)
if (cdev->version == 30) {
/* Version 3.0.x */
- cccr &= ~(CCCR_TEST | CCCR_MON |
+ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
(CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
(CCCR_CME_MASK << CCCR_CME_SHIFT));
@@ -1145,7 +1188,7 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
- CCCR_NISO);
+ CCCR_NISO | CCCR_DAR);
/* Only 3.2.x has NISO Bit implemented */
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1165,6 +1208,10 @@ static void m_can_chip_config(struct net_device *dev)
if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
cccr |= CCCR_MON;
+ /* Disable Auto Retransmission (all versions) */
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ cccr |= CCCR_DAR;
+
/* Write config */
m_can_write(cdev, M_CAN_CCCR, cccr);
m_can_write(cdev, M_CAN_TEST, test);
@@ -1310,7 +1357,8 @@ static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
- CAN_CTRLMODE_FD;
+ CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_ONE_SHOT;
/* Set properties depending on M_CAN version */
switch (m_can_dev->version) {
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 6ac4c35f247a..38ea5e600fb8 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -107,7 +107,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->is_peripheral = false;
- platform_set_drvdata(pdev, mcan_class->dev);
+ platform_set_drvdata(pdev, mcan_class->net);
m_can_init_ram(mcan_class);
@@ -166,8 +166,6 @@ static int __maybe_unused m_can_runtime_resume(struct device *dev)
if (err)
clk_disable_unprepare(mcan_class->hclk);
- m_can_class_resume(dev);
-
return err;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 6b0c6a99fc8d..10aa3e457c33 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Copyright (C) 2016 PEAK System-Technik GmbH
@@ -122,7 +121,8 @@ static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
- priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
+ priv->can.ctrlmode &
+ CAN_CTRLMODE_3_SAMPLES);
cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
@@ -232,6 +232,20 @@ static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
return pucan_write_cmd(priv);
}
+static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
+{
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+ u64 ts_us;
+
+ ts_us = (u64)le32_to_cpu(ts_high) << 32;
+ ts_us |= le32_to_cpu(ts_low);
+
+ /* IP core timestamps are µs. */
+ hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
+
+ return netif_rx(skb);
+}
+
/* handle the reception of one CAN frame */
static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
struct pucan_rx_msg *msg)
@@ -299,7 +313,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
stats->rx_bytes += cf->len;
stats->rx_packets++;
- netif_rx(skb);
+ pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
}
@@ -325,7 +339,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
if (pucan_status_is_rx_barrier(msg)) {
-
if (priv->enable_tx_path) {
int err = priv->enable_tx_path(priv);
@@ -393,7 +406,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index 95b23caa7dd6..a72719dc3b74 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * CAN driver for PEAK System micro-CAN based adapters
+/* CAN driver for PEAK System micro-CAN based adapters
*
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 13b10cbf236a..d08a3d559114 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
@@ -841,7 +840,8 @@ err_disable_pci:
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
* the probe() function must return a negative errno in case of failure
- * (err is unchanged if negative) */
+ * (err is unchanged if negative)
+ */
return pcibios_err_to_errno(err);
}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index bf5adea9c0a3..48575900adb7 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -744,7 +744,6 @@ static int rcar_can_probe(struct platform_device *pdev)
{
struct rcar_can_priv *priv;
struct net_device *ndev;
- struct resource *mem;
void __iomem *addr;
u32 clock_select = CLKR_CLKP1;
int err = -ENODEV;
@@ -759,8 +758,7 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto fail;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index edaa1ca972c1..de59dd6aad29 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1630,7 +1630,6 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
static int rcar_canfd_probe(struct platform_device *pdev)
{
- struct resource *mem;
void __iomem *addr;
u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
@@ -1704,8 +1703,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* CANFD clock is further divided by (1/2) within the IP */
fcan_freq /= 2;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto fail_dev;
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index 84cae167e42f..e8328910a234 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+/* Copyright (c) 2014 Protonic Holland,
+ * David Jander
+ * Copyright (C) 2014-2017 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
*/
#include <linux/can/dev.h>
@@ -11,14 +12,17 @@ struct can_rx_offload_cb {
u32 timestamp;
};
-static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
+static inline struct can_rx_offload_cb *
+can_rx_offload_get_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
return (struct can_rx_offload_cb *)skb->cb;
}
-static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
+static inline bool
+can_rx_offload_le(struct can_rx_offload *offload,
+ unsigned int a, unsigned int b)
{
if (offload->inc)
return a <= b;
@@ -26,7 +30,8 @@ static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned in
return a >= b;
}
-static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+static inline unsigned int
+can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
{
if (offload->inc)
return (*val)++;
@@ -36,7 +41,9 @@ static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, un
static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
{
- struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
+ struct can_rx_offload *offload = container_of(napi,
+ struct can_rx_offload,
+ napi);
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
@@ -65,8 +72,9 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
return work_done;
}
-static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
- int (*compare)(struct sk_buff *a, struct sk_buff *b))
+static inline void
+__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+ int (*compare)(struct sk_buff *a, struct sk_buff *b))
{
struct sk_buff *pos, *insert = NULL;
@@ -101,7 +109,7 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
cb_a = can_rx_offload_get_cb(a);
cb_b = can_rx_offload_get_cb(b);
- /* Substract two u32 and return result as int, to keep
+ /* Subtract two u32 and return result as int, to keep
* difference steady around the u32 overflow.
*/
return cb_b->timestamp - cb_a->timestamp;
@@ -131,75 +139,40 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
static struct sk_buff *
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
{
- struct sk_buff *skb = NULL, *skb_error = NULL;
+ struct sk_buff *skb;
struct can_rx_offload_cb *cb;
- struct can_frame *cf;
- int ret;
-
- if (likely(skb_queue_len(&offload->skb_queue) <
- offload->skb_queue_len_max)) {
- skb = alloc_can_skb(offload->dev, &cf);
- if (unlikely(!skb))
- skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
- } else {
- skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
- }
-
- /* If queue is full or skb not available, drop by reading into
- * overflow buffer.
- */
- if (unlikely(skb_error)) {
- struct can_frame cf_overflow;
- u32 timestamp;
-
- ret = offload->mailbox_read(offload, &cf_overflow,
- &timestamp, n);
-
- /* Mailbox was empty. */
- if (unlikely(!ret))
- return NULL;
-
- /* Mailbox has been read and we're dropping it or
- * there was a problem reading the mailbox.
- *
- * Increment error counters in any case.
- */
- offload->dev->stats.rx_dropped++;
- offload->dev->stats.rx_fifo_errors++;
-
- /* There was a problem reading the mailbox, propagate
- * error value.
- */
- if (unlikely(ret < 0))
- return ERR_PTR(ret);
-
- return skb_error;
- }
+ bool drop = false;
+ u32 timestamp;
- cb = can_rx_offload_get_cb(skb);
- ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
+ /* If queue is full drop frame */
+ if (unlikely(skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max))
+ drop = true;
+ skb = offload->mailbox_read(offload, n, &timestamp, drop);
/* Mailbox was empty. */
- if (unlikely(!ret)) {
- kfree_skb(skb);
+ if (unlikely(!skb))
return NULL;
- }
-
- /* There was a problem reading the mailbox, propagate error value. */
- if (unlikely(ret < 0)) {
- kfree_skb(skb);
+ /* There was a problem reading the mailbox, propagate
+ * error value.
+ */
+ if (unlikely(IS_ERR(skb))) {
offload->dev->stats.rx_dropped++;
offload->dev->stats.rx_fifo_errors++;
- return ERR_PTR(ret);
+ return skb;
}
/* Mailbox was read. */
+ cb = can_rx_offload_get_cb(skb);
+ cb->timestamp = timestamp;
+
return skb;
}
-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+ u64 pending)
{
struct sk_buff_head skb_queue;
unsigned int i;
@@ -229,8 +202,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
- if ((queue_len = skb_queue_len(&offload->skb_queue)) >
- (offload->skb_queue_len_max / 8))
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
__func__, queue_len);
@@ -328,7 +301,9 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
-static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+static int can_rx_offload_init_queue(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
{
offload->dev = dev;
@@ -337,7 +312,6 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
- can_rx_offload_reset(offload);
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
@@ -346,7 +320,8 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo
return 0;
}
-int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
+int can_rx_offload_add_timestamp(struct net_device *dev,
+ struct can_rx_offload *offload)
{
unsigned int weight;
@@ -366,7 +341,8 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
-int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+int can_rx_offload_add_fifo(struct net_device *dev,
+ struct can_rx_offload *offload, unsigned int weight)
{
if (!offload->mailbox_read)
return -EINVAL;
@@ -377,7 +353,6 @@ EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
void can_rx_offload_enable(struct can_rx_offload *offload)
{
- can_rx_offload_reset(offload);
napi_enable(&offload->napi);
}
EXPORT_SYMBOL_GPL(can_rx_offload_enable);
@@ -388,8 +363,3 @@ void can_rx_offload_del(struct can_rx_offload *offload)
skb_queue_purge(&offload->skb_queue);
}
EXPORT_SYMBOL_GPL(can_rx_offload_del);
-
-void can_rx_offload_reset(struct can_rx_offload *offload)
-{
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_reset);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index bb6032211043..0a9f42e5fedf 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -617,6 +617,7 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ free_netdev(sl->dev);
err_exit:
rtnl_unlock();
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bb20a9b75cc6..5009ff294941 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -22,7 +22,6 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/led.h>
-#include <linux/can/platform/mcp251x.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -321,6 +320,18 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
mcp251x_spi_trans(spi, 3);
}
+static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+ priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
+ priv->spi_tx_buf[1] = reg;
+ priv->spi_tx_buf[2] = v1;
+ priv->spi_tx_buf[3] = v2;
+
+ mcp251x_spi_trans(spi, 4);
+}
+
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
u8 mask, u8 val)
{
@@ -457,6 +468,39 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
}
+/* May only be called when device is sleeping! */
+static int mcp251x_hw_wake(struct spi_device *spi)
+{
+ unsigned long timeout;
+
+ /* Force wakeup interrupt to wake device, but don't execute IST */
+ disable_irq(spi->irq);
+ mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
+
+ /* Wait for oscillator startup timer after wake up */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+ /* Put device into config mode */
+ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF);
+
+ /* Wait for the device to enter config mode */
+ timeout = jiffies + HZ;
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+ CANCTRL_REQOP_CONF) {
+ schedule();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
+ return -EBUSY;
+ }
+ }
+
+ /* Disable and clear pending interrupts */
+ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00);
+ enable_irq(spi->irq);
+
+ return 0;
+}
+
static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -646,8 +690,7 @@ static int mcp251x_stop(struct net_device *net)
mutex_lock(&priv->mcp_lock);
/* Disable and clear pending interrupts */
- mcp251x_write_reg(spi, CANINTE, 0x00);
- mcp251x_write_reg(spi, CANINTF, 0x00);
+ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00);
mcp251x_write_reg(spi, TXBCTRL(0), 0);
mcp251x_clean(net);
@@ -715,8 +758,12 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
- mcp251x_hw_reset(spi);
- mcp251x_setup(net, spi);
+ if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+ mcp251x_hw_reset(spi);
+ mcp251x_setup(net, spi);
+ } else {
+ mcp251x_hw_wake(spi);
+ }
priv->force_quit = 0;
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
mcp251x_set_normal_mode(spi);
@@ -913,7 +960,7 @@ static int mcp251x_open(struct net_device *net)
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
- ret = mcp251x_hw_reset(spi);
+ ret = mcp251x_hw_wake(spi);
if (ret)
goto out_free_wq;
ret = mcp251x_setup(net, spi);
@@ -986,19 +1033,19 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static int mcp251x_can_probe(struct spi_device *spi)
{
const void *match = device_get_match_data(&spi->dev);
- struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
struct clk *clk;
- int freq, ret;
+ u32 freq;
+ int ret;
clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
freq = clk_get_rate(clk);
- if (freq == 0 && pdata)
- freq = pdata->oscillator_frequency;
+ if (freq == 0)
+ device_property_read_u32(&spi->dev, "clock-frequency", &freq);
/* Sanity check */
if (freq < 1000000 || freq > 25000000)
@@ -1155,13 +1202,13 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
if (priv->after_suspend & AFTER_SUSPEND_POWER)
mcp251x_power_enable(priv->power, 1);
-
- if (priv->after_suspend & AFTER_SUSPEND_UP) {
+ if (priv->after_suspend & AFTER_SUSPEND_UP)
mcp251x_power_enable(priv->transceiver, 1);
+
+ if (priv->after_suspend & (AFTER_SUSPEND_POWER | AFTER_SUSPEND_UP))
queue_work(priv->wq, &priv->restart_work);
- } else {
+ else
priv->after_suspend = 0;
- }
priv->force_quit = 0;
enable_irq(spi->irq);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index f4cd88196404..e3ba8ab0cbf4 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -771,7 +771,6 @@ static int sun4ican_remove(struct platform_device *pdev)
static int sun4ican_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *mem;
struct clk *clk;
void __iomem *addr;
int err, irq;
@@ -791,8 +790,7 @@ static int sun4ican_probe(struct platform_device *pdev)
goto exit;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = -EBUSY;
goto exit;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 31ad364a89bb..94b1491b569f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -535,15 +535,28 @@ struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
return container_of(offload, struct ti_hecc_priv, offload);
}
-static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int mbxno)
+static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
+ unsigned int mbxno, u32 *timestamp,
+ bool drop)
{
struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
+ struct sk_buff *skb;
+ struct can_frame *cf;
u32 data, mbx_mask;
- int ret = 1;
mbx_mask = BIT(mbxno);
+
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (unlikely(!skb)) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
if (data & HECC_CANMID_IDE)
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -578,11 +591,12 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
*/
if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
hecc_read(priv, HECC_CANRML) & mbx_mask))
- ret = -ENOBUFS;
+ skb = ERR_PTR(-ENOBUFS);
+ mark_as_read:
hecc_write(priv, HECC_CANRMP, mbx_mask);
- return ret;
+ return skb;
}
static int ti_hecc_error(struct net_device *ndev, int int_status,
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 7c482b2d78d2..4a96e2dd7d77 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -194,7 +194,7 @@ struct xcan_devtype_data {
*/
struct xcan_priv {
struct can_priv can;
- spinlock_t tx_lock;
+ spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@@ -400,7 +400,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
XCAN_SR_CONFIG_MASK;
if (!is_config_mode) {
netdev_alert(ndev,
- "BUG! Cannot set bittiming - CAN is not in config mode\n");
+ "BUG! Cannot set bittiming - CAN is not in config mode\n");
return -EPERM;
}
@@ -470,7 +470,13 @@ static int xcan_chip_start(struct net_device *ndev)
if (err < 0)
return err;
- /* Enable interrupts */
+ /* Enable interrupts
+ *
+ * We enable the ERROR interrupt even with
+ * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
+ * dedicated interrupt for a state change to
+ * ERROR_WARNING/ERROR_PASSIVE.
+ */
ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
@@ -482,11 +488,10 @@ static int xcan_chip_start(struct net_device *ndev)
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
/* Check whether it is loopback mode or normal mode */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
reg_msr = XCAN_MSR_LBACK_MASK;
- } else {
+ else
reg_msr = 0x0;
- }
/* enable the first extended filter, if any, as cores with extended
* filtering default to non-receipt if all filters are disabled
@@ -981,12 +986,9 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
+ struct can_frame cf = { };
u32 err_status;
- skb = alloc_can_err_skb(ndev, &cf);
-
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
@@ -996,32 +998,27 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
/* Leave device in Config Mode in bus-off state */
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
can_bus_off(ndev);
- if (skb)
- cf->can_id |= CAN_ERR_BUSOFF;
+ cf.can_id |= CAN_ERR_BUSOFF;
} else {
enum can_state new_state = xcan_current_error_state(ndev);
if (new_state != priv->can.state)
- xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+ xcan_set_error_state(ndev, new_state, &cf);
}
/* Check for Arbitration lost interrupt */
if (isr & XCAN_IXR_ARBLST_MASK) {
priv->can.can_stats.arbitration_lost++;
- if (skb) {
- cf->can_id |= CAN_ERR_LOSTARB;
- cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
- }
+ cf.can_id |= CAN_ERR_LOSTARB;
+ cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
}
/* Check for RX FIFO Overflow interrupt */
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- }
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
}
/* Check for RX Match Not Finished interrupt */
@@ -1029,68 +1026,77 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
stats->rx_dropped++;
stats->rx_errors++;
netdev_err(ndev, "RX match not finished, frame discarded\n");
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
- }
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
}
/* Check for error interrupt */
if (isr & XCAN_IXR_ERROR_MASK) {
- if (skb)
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ bool berr_reporting = false;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ berr_reporting = true;
+ cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ }
/* Check for Ack error interrupt */
if (err_status & XCAN_ESR_ACKER_MASK) {
stats->tx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_ACK;
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_ACK;
+ cf.data[3] = CAN_ERR_PROT_LOC_ACK;
}
}
/* Check for Bit error interrupt */
if (err_status & XCAN_ESR_BERR_MASK) {
stats->tx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_BIT;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_BIT;
}
}
/* Check for Stuff error interrupt */
if (err_status & XCAN_ESR_STER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_STUFF;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_STUFF;
}
}
/* Check for Form error interrupt */
if (err_status & XCAN_ESR_FMER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_FORM;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_FORM;
}
}
/* Check for CRC error interrupt */
if (err_status & XCAN_ESR_CRCER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
}
priv->can.can_stats.bus_error++;
}
- if (skb) {
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ if (cf.can_id) {
+ struct can_frame *skb_cf;
+ struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
+
+ if (skb) {
+ skb_cf->can_id |= cf.can_id;
+ memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
+ stats->rx_packets++;
+ stats->rx_bytes += CAN_ERR_DLC;
+ netif_rx(skb);
+ }
}
netdev_dbg(ndev, "%s: error status register:0x%x\n",
@@ -1651,7 +1657,6 @@ MODULE_DEVICE_TABLE(of, xcan_of_match);
*/
static int xcan_probe(struct platform_device *pdev)
{
- struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
const struct of_device_id *of_id;
@@ -1663,8 +1668,7 @@ static int xcan_probe(struct platform_device *pdev)
const char *hw_tx_max_property;
/* Get the virtual base address for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f6232ce8481f..c7667645f04a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -52,6 +52,8 @@ source "drivers/net/dsa/microchip/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+source "drivers/net/dsa/ocelot/Kconfig"
+
source "drivers/net/dsa/sja1105/Kconfig"
config NET_DSA_QCA8K
@@ -77,6 +79,7 @@ config NET_DSA_REALTEK_SMI
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
+ select REGMAP
---help---
This enables support for the SMSC/Microchip LAN9303 3 port ethernet
switch chips.
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index ae70b79628d6..9d384a32b3a2 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
+obj-y += ocelot/
obj-y += sja1105/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index cc3536315eff..36828f210030 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -524,7 +524,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
if (!dsa_is_user_port(ds, port))
return 0;
- cpu_port = ds->ports[port].cpu_dp->index;
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -1503,11 +1503,25 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
idx = 1;
}
- memset(&ent, 0, sizeof(ent));
- ent.port = port;
+ /* For multicast address, the port is a bitmask and the validity
+ * is determined by having at least one port being still active
+ */
+ if (!is_multicast_ether_addr(addr)) {
+ ent.port = port;
+ ent.is_valid = is_valid;
+ } else {
+ if (is_valid)
+ ent.port |= BIT(port);
+ else
+ ent.port &= ~BIT(port);
+
+ ent.is_valid = !!(ent.port);
+ }
+
ent.is_valid = is_valid;
ent.vid = vid;
ent.is_static = true;
+ ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
@@ -1626,10 +1640,51 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_dump);
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_mdb_prepare);
+
+void b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ if (ret)
+ dev_err(ds->dev, "failed to add MDB entry\n");
+}
+EXPORT_SYMBOL(b53_mdb_add);
+
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
+ if (ret)
+ dev_err(ds->dev, "failed to delete MDB entry\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_mdb_del);
+
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
@@ -1675,7 +1730,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1994,6 +2049,9 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fdb_del = b53_fdb_del,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
+ .port_mdb_prepare = b53_mdb_prepare,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
};
struct b53_chip_data {
@@ -2341,10 +2399,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
struct dsa_switch *ds;
struct b53_device *dev;
- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
+ ds->dev = base;
+ ds->num_ports = DSA_MAX_PORTS;
+
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index a7dd8acc281b..1877acf05081 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -250,7 +250,7 @@ b53_build_op(write48, u64);
b53_build_op(write64, u64);
struct b53_arl_entry {
- u8 port;
+ u16 port;
u8 mac[ETH_ALEN];
u16 vid;
u8 is_valid:1;
@@ -351,6 +351,12 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+void b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 69fc13046ac7..e43040c9f9ee 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -350,6 +350,18 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
+ int ret;
+
+ /* The watchdog reset does not work on 7278, we need to hit the
+ * "external" reset line through the reset controller.
+ */
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) {
+ ret = reset_control_assert(priv->rcdev);
+ if (ret)
+ return ret;
+
+ return reset_control_deassert(priv->rcdev);
+ }
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
@@ -381,8 +393,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
struct device_node *dn)
{
struct device_node *port;
- int mode;
unsigned int port_num;
+ phy_interface_t mode;
+ int err;
priv->moca_port = -1;
@@ -395,8 +408,8 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
* has completed, since they might be turned off at that
* time
*/
- mode = of_get_phy_mode(port);
- if (mode < 0)
+ err = of_get_phy_mode(port, &mode);
+ if (err)
continue;
if (mode == PHY_INTERFACE_MODE_INTERNAL)
@@ -668,7 +681,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(ds->ports[port].slave);
+ netif_carrier_off(dsa_to_port(ds, port)->slave);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
@@ -734,7 +747,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -758,9 +771,9 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
if (p->ethtool_ops->get_wol)
@@ -974,6 +987,9 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_rxnfc = bcm_sf2_set_rxnfc,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
+ .port_mdb_prepare = b53_mdb_prepare,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
};
struct bcm_sf2_of_data {
@@ -1088,6 +1104,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
+ priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "switch");
+ if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER)
+ return PTR_ERR(priv->rcdev);
+
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
* b53_common to work with
@@ -1220,6 +1241,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
+ reset_control_assert(priv->rcdev);
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 1df30ccec42d..de386dd96d66 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/reset.h>
#include <net/dsa.h>
@@ -64,6 +65,8 @@ struct bcm_sf2_priv {
void __iomem *fcb;
void __iomem *acb;
+ struct reset_control *rcdev;
+
/* Register offsets indirection tables */
u32 type;
const u16 *reg_offsets;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index d264776a95a3..f3f0c3f07391 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -821,7 +821,7 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
int ret;
@@ -1049,7 +1049,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1092,7 +1092,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 925ed135a4d9..c8d7ef27fd72 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -286,10 +286,13 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
dev_info(&mdiodev->dev, "%s: 0x%0x\n",
pdata->name, pdata->enabled_ports);
- ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = DSA_MAX_PORTS;
+
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index bbec86b9418e..e3c333a8f45d 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1283,10 +1283,12 @@ static int lan9303_register_switch(struct lan9303 *chip)
{
int base;
- chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
+ chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
+ chip->ds->dev = chip->dev;
+ chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
base = chip->phy_addr_base;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index a69c9b9878b7..955324968b74 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1854,10 +1854,12 @@ static int gswip_probe(struct platform_device *pdev)
if (!priv->hw_info)
return -EINVAL;
- priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
priv->ds->ops = &gswip_switch_ops;
priv->dev = dev;
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index fdffd9e0c518..7d050fab0889 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -87,7 +87,6 @@ MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
.probe = ksz9477_i2c_probe,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index fe47180c908b..d8fda4a02640 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -398,10 +398,13 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
struct dsa_switch *ds;
struct ksz_device *swdev;
- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
+ ds->dev = base;
+ ds->num_ports = DSA_MAX_PORTS;
+
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
return NULL;
@@ -419,6 +422,7 @@ EXPORT_SYMBOL(ksz_switch_alloc);
int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops)
{
+ phy_interface_t interface;
int ret;
if (dev->pdata)
@@ -453,9 +457,9 @@ int ksz_switch_register(struct ksz_device *dev,
* device tree.
*/
if (dev->dev->of_node) {
- ret = of_get_phy_mode(dev->dev->of_node);
- if (ret >= 0)
- dev->interface = ret;
+ ret = of_get_phy_mode(dev->dev->of_node, &interface);
+ if (ret == 0)
+ dev->interface = interface;
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1d8d36de4d20..ed1ec10ec62b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -862,7 +862,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
- dsa_port_is_vlan_filtering(&ds->ports[i])) {
+ dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
break;
}
@@ -922,7 +922,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
* other port is still a VLAN-aware port.
*/
if (dsa_is_user_port(ds, i) && i != port &&
- !dsa_port_is_vlan_filtering(&ds->ports[i])) {
+ !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -1165,7 +1165,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return;
mutex_lock(&priv->reg_mutex);
@@ -1196,7 +1196,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return 0;
mutex_lock(&priv->reg_mutex);
@@ -1252,7 +1252,7 @@ mt7530_setup(struct dsa_switch *ds)
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent;
+ dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
if (priv->id == ID_MT7530) {
priv->ethernet = syscon_node_to_regmap(dn);
@@ -1340,7 +1340,9 @@ mt7530_setup(struct dsa_switch *ds)
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- interface = of_get_phy_mode(ds->ports[5].dn);
+ ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
} else {
/* Scan the ethernet nodes. look for GMAC1, lookup used phy */
for_each_child_of_node(dn, mac_np) {
@@ -1354,7 +1356,9 @@ mt7530_setup(struct dsa_switch *ds)
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
if (phy_node->parent == priv->dev->of_node->parent) {
- interface = of_get_phy_mode(mac_np);
+ ret = of_get_phy_mode(mac_np, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
@@ -1632,10 +1636,13 @@ mt7530_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = DSA_MAX_PORTS;
+
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
*/
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 2a2489b5196d..a5a37f47b320 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -270,10 +270,12 @@ static int mv88e6060_probe(struct mdio_device *mdiodev)
dev_info(dev, "switch %s detected\n", name);
- ds = dsa_switch_alloc(dev, MV88E6060_PORTS);
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = MV88E6060_PORTS;
ds->priv = priv;
ds->dev = dev;
ds->ops = &mv88e6060_switch_ops;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 6787d560e9e3..3bd988529178 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1057,35 +1057,43 @@ static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
+/* Mask of the local ports allowed to receive frames from a given fabric port */
static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
- struct dsa_switch *ds = NULL;
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
struct net_device *br;
+ struct dsa_port *dp;
+ bool found = false;
u16 pvlan;
- int i;
- if (dev < DSA_MAX_SWITCHES)
- ds = chip->ds->dst->ds[dev];
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->ds->index == dev && dp->index == port) {
+ found = true;
+ break;
+ }
+ }
/* Prevent frames from unknown switch or port */
- if (!ds || port >= ds->num_ports)
+ if (!found)
return 0;
/* Frames from DSA links and CPU ports can egress any local port */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
return mv88e6xxx_port_mask(chip);
- br = ds->ports[port].bridge_dev;
+ br = dp->bridge_dev;
pvlan = 0;
/* Frames from user ports can egress any local DSA links and CPU ports,
* as well as any local member of their bridge group.
*/
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- if (dsa_is_cpu_port(chip->ds, i) ||
- dsa_is_dsa_port(chip->ds, i) ||
- (br && dsa_to_port(chip->ds, i)->bridge_dev == br))
- pvlan |= BIT(i);
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds &&
+ (dp->type == DSA_PORT_TYPE_CPU ||
+ dp->type == DSA_PORT_TYPE_DSA ||
+ (br && dp->bridge_dev == br)))
+ pvlan |= BIT(dp->index);
return pvlan;
}
@@ -1135,6 +1143,7 @@ static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
{
+ struct dsa_switch *ds = chip->ds;
int target, port;
int err;
@@ -1143,10 +1152,9 @@ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
/* Initialize the routing port to the 32 possible target devices */
for (target = 0; target < 32; target++) {
- port = 0x1f;
- if (target < DSA_MAX_SWITCHES)
- if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
- port = chip->ds->rtable[target];
+ port = dsa_routing_port(ds, target);
+ if (port == ds->num_ports)
+ port = 0x1f;
err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
if (err)
@@ -1253,7 +1261,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
- return -EOPNOTSUPP;
+ return 0;
/* Skip the local source device, which uses in-chip port VLAN */
if (dev != chip->ds->index)
@@ -1370,6 +1378,22 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ if (chip->info->ops->atu_get_hash)
+ return chip->info->ops->atu_get_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ if (chip->info->ops->atu_set_hash)
+ return chip->info->ops->atu_set_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
@@ -1402,7 +1426,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
- if (!ds->ports[i].slave)
+ if (!dsa_to_port(ds, i)->slave)
continue;
if (vlan.member[i] ==
@@ -1410,7 +1434,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
continue;
if (dsa_to_port(ds, i)->bridge_dev ==
- ds->ports[port].bridge_dev)
+ dsa_to_port(ds, port)->bridge_dev)
break; /* same bridge, check next VLAN */
if (!dsa_to_port(ds, i)->bridge_dev)
@@ -2035,32 +2059,26 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
struct net_device *br)
{
- struct dsa_switch *ds;
- int port;
- int dev;
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
int err;
- /* Remap the Port VLAN of each local bridge group member */
- for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
- if (chip->ds->ports[port].bridge_dev == br) {
- err = mv88e6xxx_port_vlan_map(chip, port);
- if (err)
- return err;
- }
- }
-
- if (!mv88e6xxx_has_pvt(chip))
- return 0;
-
- /* Remap the Port VLAN of each cross-chip bridge group member */
- for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) {
- ds = chip->ds->dst->ds[dev];
- if (!ds)
- break;
-
- for (port = 0; port < ds->num_ports; ++port) {
- if (ds->ports[port].bridge_dev == br) {
- err = mv88e6xxx_pvt_map(chip, dev, port);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->bridge_dev == br) {
+ if (dp->ds == ds) {
+ /* This is a local bridge group member,
+ * remap its Port VLAN Map.
+ */
+ err = mv88e6xxx_port_vlan_map(chip, dp->index);
+ if (err)
+ return err;
+ } else {
+ /* This is an external bridge group member,
+ * remap its cross-chip Port VLAN Table entry.
+ */
+ err = mv88e6xxx_pvt_map(chip, dp->ds->index,
+ dp->index);
if (err)
return err;
}
@@ -2101,9 +2119,6 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_has_pvt(chip))
- return 0;
-
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, dev, port);
mv88e6xxx_reg_unlock(chip);
@@ -2116,9 +2131,6 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
{
struct mv88e6xxx_chip *chip = ds->priv;
- if (!mv88e6xxx_has_pvt(chip))
- return;
-
mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_pvt_map(chip, dev, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
@@ -2378,7 +2390,14 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
if (chip->info->ops->set_egress_port) {
err = chip->info->ops->set_egress_port(chip,
- upstream_port);
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ upstream_port);
+ if (err)
+ return err;
+
+ err = chip->info->ops->set_egress_port(chip,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+ upstream_port);
if (err)
return err;
}
@@ -2641,6 +2660,248 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
+enum mv88e6xxx_devlink_param_id {
+ MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+ "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static void mv88e6xxx_teardown(struct dsa_switch *ds)
+{
+ mv88e6xxx_teardown_devlink_params(ds);
+ dsa_devlink_resources_unregister(ds);
+}
+
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2757,6 +3018,22 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
unlock:
mv88e6xxx_reg_unlock(chip);
+ if (err)
+ return err;
+
+ /* Have to be called without holding the register lock, since
+ * they take the devlink lock, and we later take the locks in
+ * the reverse order when getting/setting parameters or
+ * resource occupancy.
+ */
+ err = mv88e6xxx_setup_devlink_resources(ds);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_devlink_params(ds);
+ if (err)
+ dsa_devlink_resources_unregister(ds);
+
return err;
}
@@ -3117,6 +3394,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3246,6 +3525,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
@@ -3280,6 +3561,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
@@ -3322,6 +3605,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3366,6 +3651,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3409,6 +3696,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3453,6 +3742,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3538,6 +3829,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3587,6 +3880,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3635,6 +3930,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3686,6 +3983,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3777,6 +4076,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3963,6 +4264,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -4003,6 +4306,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
@@ -4049,6 +4354,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -4105,6 +4412,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4158,6 +4467,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4177,6 +4488,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6085",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4199,6 +4511,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6095,
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4219,6 +4532,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6097/88E6097F",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
@@ -4241,6 +4555,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6123",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4263,6 +4578,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6131",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 8,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4283,6 +4599,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
.num_databases = 4096,
+ .num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
.num_gpio = 11,
@@ -4306,6 +4623,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6161",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4329,6 +4647,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6165",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4352,6 +4671,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6171",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4374,6 +4694,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6172",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4397,6 +4718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6175",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4419,6 +4741,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6176",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4442,6 +4765,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6185",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4462,6 +4786,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4485,6 +4810,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4508,6 +4834,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6191",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
@@ -4558,6 +4885,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6240",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4628,6 +4956,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6320",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4652,6 +4981,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6321",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4675,6 +5005,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
+ .num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
.num_gpio = 11,
@@ -4699,6 +5030,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6350",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4721,6 +5053,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6351",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4743,6 +5076,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6352",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4766,6 +5100,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4789,6 +5124,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4914,6 +5250,80 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
return err;
}
+static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ enum mv88e6xxx_egress_direction direction = ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+ int err;
+
+ if (!chip->info->ops->set_egress_port)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+ if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) !=
+ mirror->to_local_port) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Can't change egress port when other mirror is active */
+ if (other_mirrors) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = chip->info->ops->set_egress_port(chip,
+ direction,
+ mirror->to_local_port);
+ if (err)
+ goto out;
+ }
+
+ err = mv88e6xxx_port_set_mirror(chip, port, direction, true);
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ enum mv88e6xxx_egress_direction direction = mirror->ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+
+ mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_port_set_mirror(chip, port, direction, false))
+ dev_err(ds->dev, "p%d: failed to disable mirroring\n", port);
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= mirror->ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Reset egress port when no other mirror is active */
+ if (!other_mirrors) {
+ if (chip->info->ops->set_egress_port(chip,
+ direction,
+ dsa_upstream_port(ds,
+ port)))
+ dev_err(ds->dev, "failed to set egress port\n");
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
bool unicast, bool multicast)
{
@@ -4933,6 +5343,7 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
+ .teardown = mv88e6xxx_teardown,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_link_state,
.phylink_mac_config = mv88e6xxx_mac_config,
@@ -4968,6 +5379,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_mdb_prepare = mv88e6xxx_port_mdb_prepare,
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
+ .port_mirror_add = mv88e6xxx_port_mirror_add,
+ .port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
.crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
.port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set,
@@ -4975,6 +5388,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_txtstamp = mv88e6xxx_port_txtstamp,
.port_rxtstamp = mv88e6xxx_port_rxtstamp,
.get_ts_info = mv88e6xxx_get_ts_info,
+ .devlink_param_get = mv88e6xxx_devlink_param_get,
+ .devlink_param_set = mv88e6xxx_devlink_param_set,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -4982,10 +5397,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
struct device *dev = chip->dev;
struct dsa_switch *ds;
- ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip));
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = mv88e6xxx_num_ports(chip);
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index e9b1a1ac9a8e..8a8e38bfb161 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -33,6 +33,11 @@ enum mv88e6xxx_egress_mode {
MV88E6XXX_EGRESS_MODE_ETHERTYPE,
};
+enum mv88e6xxx_egress_direction {
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+};
+
enum mv88e6xxx_frame_mode {
MV88E6XXX_FRAME_MODE_NORMAL,
MV88E6XXX_FRAME_MODE_DSA,
@@ -94,6 +99,7 @@ struct mv88e6xxx_info {
u16 prod_num;
const char *name;
unsigned int num_databases;
+ unsigned int num_macs;
unsigned int num_ports;
unsigned int num_internal_phys;
unsigned int num_gpio;
@@ -227,6 +233,8 @@ struct mv88e6xxx_port {
u64 vtu_member_violation;
u64 vtu_miss_violation;
u8 cmode;
+ bool mirror_ingress;
+ bool mirror_egress;
unsigned int serdes_irq;
};
@@ -310,6 +318,10 @@ struct mv88e6xxx_chip {
u16 evcap_config;
u16 enable_count;
+ /* Current ingress and egress monitor ports */
+ int egress_dest_port;
+ int ingress_dest_port;
+
/* Per-port timestamping resources. */
struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
@@ -464,7 +476,9 @@ struct mv88e6xxx_ops {
int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
- int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+ int (*set_egress_port)(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
#define MV88E6XXX_CASCADE_PORT_NONE 0xe
#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf
@@ -497,6 +511,10 @@ struct mv88e6xxx_ops {
int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+ /* Address Translation Unit operations */
+ int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
+ int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
+
/* VLAN Translation Unit operations */
int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -609,6 +627,11 @@ static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
return chip->info->num_databases;
}
+static inline unsigned int mv88e6xxx_num_macs(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_macs;
+}
+
static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
{
return chip->info->num_ports;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 25ec4c0ac589..120a65d3e3ef 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -263,8 +263,11 @@ int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
-int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
{
+ int *dest_port_chip;
u16 reg;
int err;
@@ -272,13 +275,28 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg &= ~(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK |
- MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ dest_port_chip = &chip->ingress_dest_port;
+ reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ dest_port_chip = &chip->egress_dest_port;
+ reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+ if (!err)
+ *dest_port_chip = port;
- reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK) |
- port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
-
- return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+ return err;
}
/* Older generations also call this the ARP destination. It has been
@@ -310,22 +328,32 @@ static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g1_write(chip, MV88E6390_G1_MONITOR_MGMT_CTL, reg);
}
-int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
{
+ int *dest_port_chip;
u16 ptr;
int err;
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
- err = mv88e6390_g1_monitor_write(chip, ptr, port);
- if (err)
- return err;
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ dest_port_chip = &chip->ingress_dest_port;
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ dest_port_chip = &chip->egress_dest_port;
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
+ break;
+ default:
+ return -EINVAL;
+ }
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
err = mv88e6390_g1_monitor_write(chip, ptr, port);
- if (err)
- return err;
+ if (!err)
+ *dest_port_chip = port;
- return 0;
+ return err;
}
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 0870fcc8bfc8..bc5a6b2bb1e4 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -109,6 +109,7 @@
/* Offset 0x0A: ATU Control Register */
#define MV88E6XXX_G1_ATU_CTL 0x0a
#define MV88E6XXX_G1_ATU_CTL_LEARN2ALL 0x0008
+#define MV88E6161_G1_ATU_CTL_HASH_MASK 0x0003
/* Offset 0x0B: ATU Operation Register */
#define MV88E6XXX_G1_ATU_OP 0x0b
@@ -287,8 +288,12 @@ int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val);
int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip);
-int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
@@ -318,6 +323,8 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
bool all);
int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash);
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash);
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -338,5 +345,6 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 792a96ef418f..bdcd25560dd2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -73,6 +73,38 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
return 0;
}
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ int err;
+ u16 val;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ *hash = val & MV88E6161_G1_ATU_CTL_HASH_MASK;
+
+ return 0;
+}
+
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ int err;
+ u16 val;
+
+ if (hash & ~MV88E6161_G1_ATU_CTL_HASH_MASK)
+ return -EINVAL;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6161_G1_ATU_CTL_HASH_MASK;
+ val |= hash;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
+}
+
/* Offset 0x0B: ATU Operation Register */
static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
@@ -122,6 +154,11 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
return mv88e6xxx_g1_atu_op_wait(chip);
}
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
+}
+
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index bdbb72fc20ed..87bfe7c8c9cd 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -280,6 +280,19 @@ int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
return err;
}
+/* Offset 0x0E: ATU Statistics */
+
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS,
+ kind | bin);
+}
+
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats)
+{
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats);
+}
+
/* Offset 0x0F: Priority Override Table */
static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 42da4bca73e8..1f42ee656816 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -113,7 +113,16 @@
#define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff
/* Offset 0x0E: ATU Stats Register */
-#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS_BIN_0 (0x0 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_1 (0x1 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_2 (0x2 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_3 (0x3 << 14)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL (0x0 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL_DYNAMIC (0x1 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL (0x2 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL_DYNAMIC (0x3 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MASK 0x0fff
/* Offset 0x0F: Priority Override Table */
#define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f
@@ -353,6 +362,8 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
@@ -515,6 +526,18 @@ static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip,
+ u16 kind, u16 bin)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip,
+ u16 *stats)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 15ef81654b67..7fe256c5739d 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -1181,6 +1181,43 @@ int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror)
+{
+ bool *mirror_port;
+ u16 reg;
+ u16 bit;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ bit = MV88E6XXX_PORT_CTL2_INGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_ingress;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ bit = MV88E6XXX_PORT_CTL2_EGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_egress;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg &= ~bit;
+ if (mirror)
+ reg |= bit;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+ if (!err)
+ *mirror_port = mirror;
+
+ return err;
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03a480cd71b9..0ec4327c2b42 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -368,6 +368,9 @@ int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror);
int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 073cbd0bb91b..d838c174dc0d 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -273,6 +273,19 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
new file mode 100644
index 000000000000..0031ca814346
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX
+ tristate "Ocelot / Felix Ethernet switch support"
+ depends on NET_DSA && PCI
+ select MSCC_OCELOT_SWITCH
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC9959 network switch, which is a member of
+ the Vitesse / Microsemi / Microchip Ocelot family of switching cores.
+ It is embedded as a PCIe function of the NXP LS1028A ENETC integrated
+ endpoint.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
new file mode 100644
index 000000000000..37ad403e0b2a
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+
+mscc_felix-objs := \
+ felix.o \
+ felix_vsc9959.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
new file mode 100644
index 000000000000..b7f92464815d
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 NXP Semiconductors
+ */
+#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/packing.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <net/dsa.h>
+#include "felix.h"
+
+static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_OCELOT;
+}
+
+static int felix_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_set_ageing_time(ocelot, ageing_time);
+
+ return 0;
+}
+
+static void felix_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_adjust_link(ocelot, port, phydev);
+}
+
+static int felix_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_fdb_dump(ocelot, port, cb, data);
+}
+
+static int felix_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot *ocelot = ds->priv;
+ bool vlan_aware;
+
+ vlan_aware = dsa_port_is_vlan_filtering(dsa_to_port(ds, port));
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, vlan_aware);
+}
+
+static int felix_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid);
+}
+
+static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_bridge_stp_state_set(ocelot, port, state);
+}
+
+static int felix_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_bridge_join(ocelot, port, br);
+}
+
+static void felix_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_bridge_leave(ocelot, port, br);
+}
+
+/* This callback needs to be present */
+static int felix_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return 0;
+}
+
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_vlan_filtering(ocelot, port, enabled);
+
+ return 0;
+}
+
+static void felix_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_add(ocelot, port, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (err) {
+ dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
+ vid, port, err);
+ return;
+ }
+ }
+}
+
+static int felix_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_del(ocelot, port, vid);
+ if (err) {
+ dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
+ vid, port, err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_enable(ocelot, port, phy);
+
+ return 0;
+}
+
+static void felix_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_disable(ocelot, port);
+}
+
+static void felix_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_strings(ocelot, port, stringset, data);
+}
+
+static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+
+static int felix_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
+
+static int felix_init_structs(struct felix *felix, int num_phys_ports)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ resource_size_t base;
+ int port, i, err;
+
+ ocelot->num_phys_ports = num_phys_ports;
+ ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+ if (!ocelot->ports)
+ return -ENOMEM;
+
+ ocelot->map = felix->info->map;
+ ocelot->stats_layout = felix->info->stats_layout;
+ ocelot->num_stats = felix->info->num_stats;
+ ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+ ocelot->ops = felix->info->ops;
+
+ base = pci_resource_start(felix->pdev, felix->info->pci_bar);
+
+ for (i = 0; i < TARGET_MAX; i++) {
+ struct regmap *target;
+ struct resource *res;
+
+ if (!felix->info->target_io_res[i].name)
+ continue;
+
+ res = &felix->info->target_io_res[i];
+ res->flags = IORESOURCE_MEM;
+ res->start += base;
+ res->end += base;
+
+ target = ocelot_regmap_init(ocelot, res);
+ if (IS_ERR(target)) {
+ dev_err(ocelot->dev,
+ "Failed to map device memory space\n");
+ return PTR_ERR(target);
+ }
+
+ ocelot->targets[i] = target;
+ }
+
+ err = ocelot_regfields_init(ocelot, felix->info->regfields);
+ if (err) {
+ dev_err(ocelot->dev, "failed to init reg fields map\n");
+ return err;
+ }
+
+ for (port = 0; port < num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port;
+ void __iomem *port_regs;
+ struct resource *res;
+
+ ocelot_port = devm_kzalloc(ocelot->dev,
+ sizeof(struct ocelot_port),
+ GFP_KERNEL);
+ if (!ocelot_port) {
+ dev_err(ocelot->dev,
+ "failed to allocate port memory\n");
+ return -ENOMEM;
+ }
+
+ res = &felix->info->port_io_res[port];
+ res->flags = IORESOURCE_MEM;
+ res->start += base;
+ res->end += base;
+
+ port_regs = devm_ioremap_resource(ocelot->dev, res);
+ if (IS_ERR(port_regs)) {
+ dev_err(ocelot->dev,
+ "failed to map registers for port %d\n", port);
+ return PTR_ERR(port_regs);
+ }
+
+ ocelot_port->ocelot = ocelot;
+ ocelot_port->regs = port_regs;
+ ocelot->ports[port] = ocelot_port;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization done here so that we can allocate structures with
+ * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
+ * us to allocate structures twice (leak memory) and map PCI memory twice
+ * (which will not work).
+ */
+static int felix_setup(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port, err;
+
+ err = felix_init_structs(felix, ds->num_ports);
+ if (err)
+ return err;
+
+ ocelot_init(ocelot);
+
+ for (port = 0; port < ds->num_ports; port++) {
+ ocelot_init_port(ocelot, port);
+
+ if (dsa_is_cpu_port(ds, port))
+ ocelot_set_cpu_port(ocelot, port,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_LONG);
+ }
+
+ return 0;
+}
+
+static void felix_teardown(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ /* stop workqueue thread */
+ ocelot_deinit(ocelot);
+}
+
+static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_hwstamp_get(ocelot, port, ifr);
+}
+
+static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_hwstamp_set(ocelot, port, ifr);
+}
+
+static bool felix_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct ocelot *ocelot = ds->priv;
+ u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN;
+ u32 tstamp_lo, tstamp_hi;
+ struct timespec64 ts;
+ u64 tstamp, val;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+ tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ packing(extraction, &val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+ tstamp_lo = (u32)val;
+
+ tstamp_hi = tstamp >> 32;
+ if ((tstamp & 0xffffffff) < tstamp_lo)
+ tstamp_hi--;
+
+ tstamp = ((u64)tstamp_hi << 32) | tstamp_lo;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = tstamp;
+ return false;
+}
+
+static bool felix_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port_add_txtstamp_skb(ocelot_port, clone))
+ return true;
+
+ return false;
+}
+
+static const struct dsa_switch_ops felix_switch_ops = {
+ .get_tag_protocol = felix_get_tag_protocol,
+ .setup = felix_setup,
+ .teardown = felix_teardown,
+ .set_ageing_time = felix_set_ageing_time,
+ .get_strings = felix_get_strings,
+ .get_ethtool_stats = felix_get_ethtool_stats,
+ .get_sset_count = felix_get_sset_count,
+ .get_ts_info = felix_get_ts_info,
+ .adjust_link = felix_adjust_link,
+ .port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
+ .port_fdb_dump = felix_fdb_dump,
+ .port_fdb_add = felix_fdb_add,
+ .port_fdb_del = felix_fdb_del,
+ .port_bridge_join = felix_bridge_join,
+ .port_bridge_leave = felix_bridge_leave,
+ .port_stp_state_set = felix_bridge_stp_state_set,
+ .port_vlan_prepare = felix_vlan_prepare,
+ .port_vlan_filtering = felix_vlan_filtering,
+ .port_vlan_add = felix_vlan_add,
+ .port_vlan_del = felix_vlan_del,
+ .port_hwtstamp_get = felix_hwtstamp_get,
+ .port_hwtstamp_set = felix_hwtstamp_set,
+ .port_rxtstamp = felix_rxtstamp,
+ .port_txtstamp = felix_txtstamp,
+};
+
+static struct felix_info *felix_instance_tbl[] = {
+ [FELIX_INSTANCE_VSC9959] = &felix_info_vsc9959,
+};
+
+static irqreturn_t felix_irq_handler(int irq, void *data)
+{
+ struct ocelot *ocelot = (struct ocelot *)data;
+
+ /* The INTB interrupt is used for both PTP TX timestamp interrupt
+ * and preemption status change interrupt on each port.
+ *
+ * - Get txtstamp if have
+ * - TODO: handle preemption. Without handling it, driver may get
+ * interrupt storm.
+ */
+
+ ocelot_get_txtstamp(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static int felix_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ enum felix_instance instance = id->driver_data;
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "device enable failed\n");
+ goto err_pci_enable;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+ }
+
+ felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
+ if (!felix) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate driver memory\n");
+ goto err_alloc_felix;
+ }
+
+ pci_set_drvdata(pdev, felix);
+ ocelot = &felix->ocelot;
+ ocelot->dev = &pdev->dev;
+ felix->pdev = pdev;
+ felix->info = felix_instance_tbl[instance];
+
+ pci_set_master(pdev);
+
+ err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
+ &felix_irq_handler, IRQF_ONESHOT,
+ "felix-intb", ocelot);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq\n");
+ goto err_alloc_irq;
+ }
+
+ ocelot->ptp = 1;
+
+ ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
+ goto err_alloc_ds;
+ }
+
+ ds->dev = &pdev->dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ goto err_register_ds;
+ }
+
+ return 0;
+
+err_register_ds:
+ kfree(ds);
+err_alloc_ds:
+err_alloc_irq:
+err_alloc_felix:
+ kfree(felix);
+err_dma:
+ pci_disable_device(pdev);
+err_pci_enable:
+ return err;
+}
+
+static void felix_pci_remove(struct pci_dev *pdev)
+{
+ struct felix *felix;
+
+ felix = pci_get_drvdata(pdev);
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id felix_ids[] = {
+ {
+ /* NXP LS1028A */
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0),
+ .driver_data = FELIX_INSTANCE_VSC9959,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, felix_ids);
+
+static struct pci_driver felix_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = felix_ids,
+ .probe = felix_pci_probe,
+ .remove = felix_pci_remove,
+};
+
+module_pci_driver(felix_pci_driver);
+
+MODULE_DESCRIPTION("Felix Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
new file mode 100644
index 000000000000..204296e51d0c
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP Semiconductors
+ */
+#ifndef _MSCC_FELIX_H
+#define _MSCC_FELIX_H
+
+#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
+
+/* Platform-specific information */
+struct felix_info {
+ struct resource *target_io_res;
+ struct resource *port_io_res;
+ const struct reg_field *regfields;
+ const u32 *const *map;
+ const struct ocelot_ops *ops;
+ int shared_queue_sz;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+ int num_ports;
+ int pci_bar;
+};
+
+extern struct felix_info felix_info_vsc9959;
+
+enum felix_instance {
+ FELIX_INSTANCE_VSC9959 = 0,
+};
+
+/* DSA glue / front-end for struct ocelot */
+struct felix {
+ struct dsa_switch *ds;
+ struct pci_dev *pdev;
+ struct felix_info *info;
+ struct ocelot ocelot;
+};
+
+#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
new file mode 100644
index 000000000000..b9758b0d18c7
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2017 Microsemi Corporation
+ * Copyright 2018-2019 NXP Semiconductors
+ */
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include "felix.h"
+
+static const u32 vsc9959_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x0089a0),
+ REG(ANA_VLANMASK, 0x0089a4),
+ REG_RESERVED(ANA_PORT_B_DOMAIN),
+ REG(ANA_ANAGEFIL, 0x0089ac),
+ REG(ANA_ANEVENTS, 0x0089b0),
+ REG(ANA_STORMLIMIT_BURST, 0x0089b4),
+ REG(ANA_STORMLIMIT_CFG, 0x0089b8),
+ REG(ANA_ISOLATED_PORTS, 0x0089c8),
+ REG(ANA_COMMUNITY_PORTS, 0x0089cc),
+ REG(ANA_AUTOAGE, 0x0089d0),
+ REG(ANA_MACTOPTIONS, 0x0089d4),
+ REG(ANA_LEARNDISC, 0x0089d8),
+ REG(ANA_AGENCTRL, 0x0089dc),
+ REG(ANA_MIRRORPORTS, 0x0089e0),
+ REG(ANA_EMIRRORPORTS, 0x0089e4),
+ REG(ANA_FLOODING, 0x0089e8),
+ REG(ANA_FLOODING_IPMC, 0x008a08),
+ REG(ANA_SFLOW_CFG, 0x008a0c),
+ REG(ANA_PORT_MODE, 0x008a28),
+ REG(ANA_CUT_THRU_CFG, 0x008a48),
+ REG(ANA_PGID_PGID, 0x008400),
+ REG(ANA_TABLES_ANMOVED, 0x007f1c),
+ REG(ANA_TABLES_MACHDATA, 0x007f20),
+ REG(ANA_TABLES_MACLDATA, 0x007f24),
+ REG(ANA_TABLES_STREAMDATA, 0x007f28),
+ REG(ANA_TABLES_MACACCESS, 0x007f2c),
+ REG(ANA_TABLES_MACTINDX, 0x007f30),
+ REG(ANA_TABLES_VLANACCESS, 0x007f34),
+ REG(ANA_TABLES_VLANTIDX, 0x007f38),
+ REG(ANA_TABLES_ISDXACCESS, 0x007f3c),
+ REG(ANA_TABLES_ISDXTIDX, 0x007f40),
+ REG(ANA_TABLES_ENTRYLIM, 0x007f00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x007f44),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x007f48),
+ REG(ANA_TABLES_STREAMACCESS, 0x007f4c),
+ REG(ANA_TABLES_STREAMTIDX, 0x007f50),
+ REG(ANA_TABLES_SEQ_HISTORY, 0x007f54),
+ REG(ANA_TABLES_SEQ_MASK, 0x007f58),
+ REG(ANA_TABLES_SFID_MASK, 0x007f5c),
+ REG(ANA_TABLES_SFIDACCESS, 0x007f60),
+ REG(ANA_TABLES_SFIDTIDX, 0x007f64),
+ REG(ANA_MSTI_STATE, 0x008600),
+ REG(ANA_OAM_UPM_LM_CNT, 0x008000),
+ REG(ANA_SG_ACCESS_CTRL, 0x008a64),
+ REG(ANA_SG_CONFIG_REG_1, 0x007fb0),
+ REG(ANA_SG_CONFIG_REG_2, 0x007fb4),
+ REG(ANA_SG_CONFIG_REG_3, 0x007fb8),
+ REG(ANA_SG_CONFIG_REG_4, 0x007fbc),
+ REG(ANA_SG_CONFIG_REG_5, 0x007fc0),
+ REG(ANA_SG_GCL_GS_CONFIG, 0x007f80),
+ REG(ANA_SG_GCL_TI_CONFIG, 0x007f90),
+ REG(ANA_SG_STATUS_REG_1, 0x008980),
+ REG(ANA_SG_STATUS_REG_2, 0x008984),
+ REG(ANA_SG_STATUS_REG_3, 0x008988),
+ REG(ANA_PORT_VLAN_CFG, 0x007800),
+ REG(ANA_PORT_DROP_CFG, 0x007804),
+ REG(ANA_PORT_QOS_CFG, 0x007808),
+ REG(ANA_PORT_VCAP_CFG, 0x00780c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007810),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00781c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007820),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007860),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007864),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007868),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00786c),
+ REG(ANA_PORT_PORT_CFG, 0x007870),
+ REG(ANA_PORT_POL_CFG, 0x007874),
+ REG(ANA_PORT_PTP_CFG, 0x007878),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00787c),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007880),
+ REG(ANA_PORT_SFID_CFG, 0x007884),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG_RESERVED(ANA_PFC_PFC_TIMER),
+ REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
+ REG_RESERVED(ANA_IPT_IPT),
+ REG_RESERVED(ANA_PPT_PPT),
+ REG_RESERVED(ANA_FID_MAP_FID_MAP),
+ REG(ANA_AGGR_CFG, 0x008a68),
+ REG(ANA_CPUQ_CFG, 0x008a6c),
+ REG_RESERVED(ANA_CPUQ_CFG2),
+ REG(ANA_CPUQ_8021_CFG, 0x008a74),
+ REG(ANA_DSCP_CFG, 0x008ab4),
+ REG(ANA_DSCP_REWR_CFG, 0x008bb4),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x008bf4),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x008c14),
+ REG_RESERVED(ANA_VRAP_CFG),
+ REG_RESERVED(ANA_VRAP_HDR_DATA),
+ REG_RESERVED(ANA_VRAP_HDR_MASK),
+ REG(ANA_DISCARD_CFG, 0x008c40),
+ REG(ANA_FID_CFG, 0x008c44),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG_RESERVED(ANA_POL_STATE),
+ REG(ANA_POL_FLOWC, 0x008c48),
+ REG(ANA_POL_HYST, 0x008cb4),
+ REG_RESERVED(ANA_POL_MISC_CFG),
+};
+
+static const u32 vsc9959_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG_RESERVED(QS_INH_DBG),
+};
+
+static const u32 vsc9959_s2_regmap[] = {
+ REG(S2_CORE_UPDATE_CTRL, 0x000000),
+ REG(S2_CORE_MV_CFG, 0x000004),
+ REG(S2_CACHE_ENTRY_DAT, 0x000008),
+ REG(S2_CACHE_MASK_DAT, 0x000108),
+ REG(S2_CACHE_ACTION_DAT, 0x000208),
+ REG(S2_CACHE_CNT_DAT, 0x000308),
+ REG(S2_CACHE_TG_DAT, 0x000388),
+};
+
+static const u32 vsc9959_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x00f460),
+ REG(QSYS_SWITCH_PORT_MODE, 0x00f480),
+ REG(QSYS_STAT_CNT_CFG, 0x00f49c),
+ REG(QSYS_EEE_CFG, 0x00f4a0),
+ REG(QSYS_EEE_THRES, 0x00f4b8),
+ REG(QSYS_IGR_NO_SHARING, 0x00f4bc),
+ REG(QSYS_EGR_NO_SHARING, 0x00f4c0),
+ REG(QSYS_SW_STATUS, 0x00f4c4),
+ REG(QSYS_EXT_CPU_CFG, 0x00f4e0),
+ REG_RESERVED(QSYS_PAD_CFG),
+ REG(QSYS_CPU_GROUP_MAP, 0x00f4e8),
+ REG_RESERVED(QSYS_QMAP),
+ REG_RESERVED(QSYS_ISDX_SGRP),
+ REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
+ REG(QSYS_TFRM_MISC, 0x00f50c),
+ REG(QSYS_TFRM_PORT_DLY, 0x00f510),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x00f514),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x00f518),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x00f51c),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x00f520),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x00f524),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x00f528),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x00f52c),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x00f530),
+ REG(QSYS_RED_PROFILE, 0x00f534),
+ REG(QSYS_RES_QOS_MODE, 0x00f574),
+ REG(QSYS_RES_CFG, 0x00c000),
+ REG(QSYS_RES_STAT, 0x00c004),
+ REG(QSYS_EGR_DROP_MODE, 0x00f578),
+ REG(QSYS_EQ_CTRL, 0x00f57c),
+ REG_RESERVED(QSYS_EVENTS_CORE),
+ REG(QSYS_QMAXSDU_CFG_0, 0x00f584),
+ REG(QSYS_QMAXSDU_CFG_1, 0x00f5a0),
+ REG(QSYS_QMAXSDU_CFG_2, 0x00f5bc),
+ REG(QSYS_QMAXSDU_CFG_3, 0x00f5d8),
+ REG(QSYS_QMAXSDU_CFG_4, 0x00f5f4),
+ REG(QSYS_QMAXSDU_CFG_5, 0x00f610),
+ REG(QSYS_QMAXSDU_CFG_6, 0x00f62c),
+ REG(QSYS_QMAXSDU_CFG_7, 0x00f648),
+ REG(QSYS_PREEMPTION_CFG, 0x00f664),
+ REG_RESERVED(QSYS_CIR_CFG),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG_RESERVED(QSYS_SE_CONNECT),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG_RESERVED(QSYS_SE_STATE),
+ REG(QSYS_HSCH_MISC_CFG, 0x00f67c),
+ REG(QSYS_TAG_CONFIG, 0x00f680),
+ REG(QSYS_TAS_PARAM_CFG_CTRL, 0x00f698),
+ REG(QSYS_PORT_MAX_SDU, 0x00f69c),
+ REG(QSYS_PARAM_CFG_REG_1, 0x00f440),
+ REG(QSYS_PARAM_CFG_REG_2, 0x00f444),
+ REG(QSYS_PARAM_CFG_REG_3, 0x00f448),
+ REG(QSYS_PARAM_CFG_REG_4, 0x00f44c),
+ REG(QSYS_PARAM_CFG_REG_5, 0x00f450),
+ REG(QSYS_GCL_CFG_REG_1, 0x00f454),
+ REG(QSYS_GCL_CFG_REG_2, 0x00f458),
+ REG(QSYS_PARAM_STATUS_REG_1, 0x00f400),
+ REG(QSYS_PARAM_STATUS_REG_2, 0x00f404),
+ REG(QSYS_PARAM_STATUS_REG_3, 0x00f408),
+ REG(QSYS_PARAM_STATUS_REG_4, 0x00f40c),
+ REG(QSYS_PARAM_STATUS_REG_5, 0x00f410),
+ REG(QSYS_PARAM_STATUS_REG_6, 0x00f414),
+ REG(QSYS_PARAM_STATUS_REG_7, 0x00f418),
+ REG(QSYS_PARAM_STATUS_REG_8, 0x00f41c),
+ REG(QSYS_PARAM_STATUS_REG_9, 0x00f420),
+ REG(QSYS_GCL_STATUS_REG_1, 0x00f424),
+ REG(QSYS_GCL_STATUS_REG_2, 0x00f428),
+};
+
+static const u32 vsc9959_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_RED_TAG_CFG, 0x000058),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000410),
+ REG(REW_DSCP_REMAP_CFG, 0x000510),
+ REG_RESERVED(REW_STAT_CFG),
+ REG_RESERVED(REW_REW_STICKY),
+ REG_RESERVED(REW_PPT),
+};
+
+static const u32 vsc9959_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_COLLISION, 0x000210),
+ REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_64, 0x00021c),
+ REG(SYS_COUNT_TX_65_127, 0x000220),
+ REG(SYS_COUNT_TX_128_511, 0x000224),
+ REG(SYS_COUNT_TX_512_1023, 0x000228),
+ REG(SYS_COUNT_TX_1024_1526, 0x00022c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_RESET_CFG, 0x000e00),
+ REG(SYS_SR_ETYPE_CFG, 0x000e04),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
+ REG(SYS_PORT_MODE, 0x000e0c),
+ REG(SYS_FRONT_PORT_MODE, 0x000e2c),
+ REG(SYS_FRM_AGING, 0x000e44),
+ REG(SYS_STAT_CFG, 0x000e48),
+ REG(SYS_SW_STATUS, 0x000e4c),
+ REG_RESERVED(SYS_MISC_CFG),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x000e6c),
+ REG(SYS_REW_MAC_LOW_CFG, 0x000e84),
+ REG(SYS_TIMESTAMP_OFFSET, 0x000e9c),
+ REG(SYS_PAUSE_CFG, 0x000ea0),
+ REG(SYS_PAUSE_TOT_CFG, 0x000ebc),
+ REG(SYS_ATOP, 0x000ec0),
+ REG(SYS_ATOP_TOT_CFG, 0x000edc),
+ REG(SYS_MAC_FC_CFG, 0x000ee0),
+ REG(SYS_MMGT, 0x000ef8),
+ REG_RESERVED(SYS_MMGT_FAST),
+ REG_RESERVED(SYS_EVENTS_DIF),
+ REG_RESERVED(SYS_EVENTS_CORE),
+ REG_RESERVED(SYS_CNT),
+ REG(SYS_PTP_STATUS, 0x000f14),
+ REG(SYS_PTP_TXSTAMP, 0x000f18),
+ REG(SYS_PTP_NXT, 0x000f1c),
+ REG(SYS_PTP_CFG, 0x000f20),
+ REG(SYS_RAM_INIT, 0x000f24),
+ REG_RESERVED(SYS_CM_ADDR),
+ REG_RESERVED(SYS_CM_DATA_WR),
+ REG_RESERVED(SYS_CM_DATA_RD),
+ REG_RESERVED(SYS_CM_OP),
+ REG_RESERVED(SYS_CM_DATA),
+};
+
+static const u32 vsc9959_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+
+static const u32 vsc9959_gcb_regmap[] = {
+ REG(GCB_SOFT_RST, 0x000004),
+};
+
+static const u32 *vsc9959_regmap[] = {
+ [ANA] = vsc9959_ana_regmap,
+ [QS] = vsc9959_qs_regmap,
+ [QSYS] = vsc9959_qsys_regmap,
+ [REW] = vsc9959_rew_regmap,
+ [SYS] = vsc9959_sys_regmap,
+ [S2] = vsc9959_s2_regmap,
+ [PTP] = vsc9959_ptp_regmap,
+ [GCB] = vsc9959_gcb_regmap,
+};
+
+/* Addresses are relative to the PCI device's base address and
+ * will be fixed up at ioremap time.
+ */
+static struct resource vsc9959_target_io_res[] = {
+ [ANA] = {
+ .start = 0x0280000,
+ .end = 0x028ffff,
+ .name = "ana",
+ },
+ [QS] = {
+ .start = 0x0080000,
+ .end = 0x00800ff,
+ .name = "qs",
+ },
+ [QSYS] = {
+ .start = 0x0200000,
+ .end = 0x021ffff,
+ .name = "qsys",
+ },
+ [REW] = {
+ .start = 0x0030000,
+ .end = 0x003ffff,
+ .name = "rew",
+ },
+ [SYS] = {
+ .start = 0x0010000,
+ .end = 0x001ffff,
+ .name = "sys",
+ },
+ [S2] = {
+ .start = 0x0060000,
+ .end = 0x00603ff,
+ .name = "s2",
+ },
+ [PTP] = {
+ .start = 0x0090000,
+ .end = 0x00900cb,
+ .name = "ptp",
+ },
+ [GCB] = {
+ .start = 0x0070000,
+ .end = 0x00701ff,
+ .name = "devcpu_gcb",
+ },
+};
+
+static struct resource vsc9959_port_io_res[] = {
+ {
+ .start = 0x0100000,
+ .end = 0x010ffff,
+ .name = "port0",
+ },
+ {
+ .start = 0x0110000,
+ .end = 0x011ffff,
+ .name = "port1",
+ },
+ {
+ .start = 0x0120000,
+ .end = 0x012ffff,
+ .name = "port2",
+ },
+ {
+ .start = 0x0130000,
+ .end = 0x013ffff,
+ .name = "port3",
+ },
+ {
+ .start = 0x0140000,
+ .end = 0x014ffff,
+ .name = "port4",
+ },
+ {
+ .start = 0x0150000,
+ .end = 0x015ffff,
+ .name = "port5",
+ },
+};
+
+static const struct reg_field vsc9959_regfields[] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
+ [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_0] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_1] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
+};
+
+static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
+ { .offset = 0x00, .name = "rx_octets", },
+ { .offset = 0x01, .name = "rx_unicast", },
+ { .offset = 0x02, .name = "rx_multicast", },
+ { .offset = 0x03, .name = "rx_broadcast", },
+ { .offset = 0x04, .name = "rx_shorts", },
+ { .offset = 0x05, .name = "rx_fragments", },
+ { .offset = 0x06, .name = "rx_jabbers", },
+ { .offset = 0x07, .name = "rx_crc_align_errs", },
+ { .offset = 0x08, .name = "rx_sym_errs", },
+ { .offset = 0x09, .name = "rx_frames_below_65_octets", },
+ { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
+ { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
+ { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
+ { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
+ { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
+ { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
+ { .offset = 0x10, .name = "rx_pause", },
+ { .offset = 0x11, .name = "rx_control", },
+ { .offset = 0x12, .name = "rx_longs", },
+ { .offset = 0x13, .name = "rx_classified_drops", },
+ { .offset = 0x14, .name = "rx_red_prio_0", },
+ { .offset = 0x15, .name = "rx_red_prio_1", },
+ { .offset = 0x16, .name = "rx_red_prio_2", },
+ { .offset = 0x17, .name = "rx_red_prio_3", },
+ { .offset = 0x18, .name = "rx_red_prio_4", },
+ { .offset = 0x19, .name = "rx_red_prio_5", },
+ { .offset = 0x1A, .name = "rx_red_prio_6", },
+ { .offset = 0x1B, .name = "rx_red_prio_7", },
+ { .offset = 0x1C, .name = "rx_yellow_prio_0", },
+ { .offset = 0x1D, .name = "rx_yellow_prio_1", },
+ { .offset = 0x1E, .name = "rx_yellow_prio_2", },
+ { .offset = 0x1F, .name = "rx_yellow_prio_3", },
+ { .offset = 0x20, .name = "rx_yellow_prio_4", },
+ { .offset = 0x21, .name = "rx_yellow_prio_5", },
+ { .offset = 0x22, .name = "rx_yellow_prio_6", },
+ { .offset = 0x23, .name = "rx_yellow_prio_7", },
+ { .offset = 0x24, .name = "rx_green_prio_0", },
+ { .offset = 0x25, .name = "rx_green_prio_1", },
+ { .offset = 0x26, .name = "rx_green_prio_2", },
+ { .offset = 0x27, .name = "rx_green_prio_3", },
+ { .offset = 0x28, .name = "rx_green_prio_4", },
+ { .offset = 0x29, .name = "rx_green_prio_5", },
+ { .offset = 0x2A, .name = "rx_green_prio_6", },
+ { .offset = 0x2B, .name = "rx_green_prio_7", },
+ { .offset = 0x80, .name = "tx_octets", },
+ { .offset = 0x81, .name = "tx_unicast", },
+ { .offset = 0x82, .name = "tx_multicast", },
+ { .offset = 0x83, .name = "tx_broadcast", },
+ { .offset = 0x84, .name = "tx_collision", },
+ { .offset = 0x85, .name = "tx_drops", },
+ { .offset = 0x86, .name = "tx_pause", },
+ { .offset = 0x87, .name = "tx_frames_below_65_octets", },
+ { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
+ { .offset = 0x89, .name = "tx_frames_128_255_octets", },
+ { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
+ { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
+ { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
+ { .offset = 0x8E, .name = "tx_yellow_prio_0", },
+ { .offset = 0x8F, .name = "tx_yellow_prio_1", },
+ { .offset = 0x90, .name = "tx_yellow_prio_2", },
+ { .offset = 0x91, .name = "tx_yellow_prio_3", },
+ { .offset = 0x92, .name = "tx_yellow_prio_4", },
+ { .offset = 0x93, .name = "tx_yellow_prio_5", },
+ { .offset = 0x94, .name = "tx_yellow_prio_6", },
+ { .offset = 0x95, .name = "tx_yellow_prio_7", },
+ { .offset = 0x96, .name = "tx_green_prio_0", },
+ { .offset = 0x97, .name = "tx_green_prio_1", },
+ { .offset = 0x98, .name = "tx_green_prio_2", },
+ { .offset = 0x99, .name = "tx_green_prio_3", },
+ { .offset = 0x9A, .name = "tx_green_prio_4", },
+ { .offset = 0x9B, .name = "tx_green_prio_5", },
+ { .offset = 0x9C, .name = "tx_green_prio_6", },
+ { .offset = 0x9D, .name = "tx_green_prio_7", },
+ { .offset = 0x9E, .name = "tx_aged", },
+ { .offset = 0x100, .name = "drop_local", },
+ { .offset = 0x101, .name = "drop_tail", },
+ { .offset = 0x102, .name = "drop_yellow_prio_0", },
+ { .offset = 0x103, .name = "drop_yellow_prio_1", },
+ { .offset = 0x104, .name = "drop_yellow_prio_2", },
+ { .offset = 0x105, .name = "drop_yellow_prio_3", },
+ { .offset = 0x106, .name = "drop_yellow_prio_4", },
+ { .offset = 0x107, .name = "drop_yellow_prio_5", },
+ { .offset = 0x108, .name = "drop_yellow_prio_6", },
+ { .offset = 0x109, .name = "drop_yellow_prio_7", },
+ { .offset = 0x10A, .name = "drop_green_prio_0", },
+ { .offset = 0x10B, .name = "drop_green_prio_1", },
+ { .offset = 0x10C, .name = "drop_green_prio_2", },
+ { .offset = 0x10D, .name = "drop_green_prio_3", },
+ { .offset = 0x10E, .name = "drop_green_prio_4", },
+ { .offset = 0x10F, .name = "drop_green_prio_5", },
+ { .offset = 0x110, .name = "drop_green_prio_6", },
+ { .offset = 0x111, .name = "drop_green_prio_7", },
+};
+
+#define VSC9959_INIT_TIMEOUT 50000
+#define VSC9959_GCB_RST_SLEEP 100
+#define VSC9959_SYS_RAMINIT_SLEEP 80
+
+static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
+{
+ int val;
+
+ regmap_field_read(ocelot->regfields[GCB_SOFT_RST_SWC_RST], &val);
+
+ return val;
+}
+
+static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, SYS_RAM_INIT);
+}
+
+static int vsc9959_reset(struct ocelot *ocelot)
+{
+ int val, err;
+
+ /* soft-reset the switch core */
+ regmap_field_write(ocelot->regfields[GCB_SOFT_RST_SWC_RST], 1);
+
+ err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
+ VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch core reset\n");
+ return err;
+ }
+
+ /* initialize switch mem ~40us */
+ ocelot_write(ocelot, SYS_RAM_INIT_RAM_INIT, SYS_RAM_INIT);
+ err = readx_poll_timeout(vsc9959_sys_ram_init_status, ocelot, val, !val,
+ VSC9959_SYS_RAMINIT_SLEEP,
+ VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch sram init\n");
+ return err;
+ }
+
+ /* enable switch core */
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+ return 0;
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+};
+
+struct felix_info felix_info_vsc9959 = {
+ .target_io_res = vsc9959_target_io_res,
+ .port_io_res = vsc9959_port_io_res,
+ .regfields = vsc9959_regfields,
+ .map = vsc9959_regmap,
+ .ops = &vsc9959_ops,
+ .stats_layout = vsc9959_stats_layout,
+ .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
+ .shared_queue_sz = 128 * 1024,
+ .num_ports = 6,
+ .pci_bar = 4,
+};
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index b00274caae4f..e548289df31e 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -639,7 +639,8 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i, phy_mode = -1;
+ phy_interface_t phy_mode = PHY_INTERFACE_MODE_NA;
+ int ret, i;
u32 mask;
/* Make sure that port 0 is the cpu port */
@@ -661,10 +662,10 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Initialize CPU port pad mode (xMII type, delays...) */
- phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn, &phy_mode);
+ if (ret) {
pr_err("Can't find phy-mode for master device\n");
- return phy_mode;
+ return ret;
}
ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
if (ret < 0)
@@ -1077,10 +1078,13 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
- priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds),
+ QCA8K_NUM_PORTS);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index dc0509c02d29..fae188c60191 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -444,9 +444,12 @@ static int realtek_smi_probe(struct platform_device *pdev)
return ret;
}
- smi->ds = dsa_switch_alloc(dev, smi->num_ports);
+ smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
if (!smi->ds)
return -ENOMEM;
+
+ smi->ds->dev = dev;
+ smi->ds->num_ports = smi->num_ports;
smi->ds->priv = smi;
smi->ds->ops = var->ds_ops;
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index ffac0ea4e8d5..0fe1ae173aa1 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -28,6 +28,7 @@ config NET_DSA_SJA1105_TAS
bool "Support for the Time-Aware Scheduler on NXP SJA1105"
depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
+ depends on NET_DSA_SJA1105_PTP
help
This enables support for the TTEthernet-based egress scheduling
engine in the SJA1105 DSA driver, which is controlled using a
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index fbb564c3beb8..d801fc204d19 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -20,7 +20,13 @@
*/
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
#include "sja1105_tas.h"
+#include "sja1105_ptp.h"
/* Keeps the different addresses between E/T and P/Q/R/S */
struct sja1105_regs {
@@ -32,9 +38,10 @@ struct sja1105_regs {
u64 config;
u64 rmii_pll1;
u64 ptp_control;
- u64 ptpclk;
+ u64 ptpclkval;
u64 ptpclkrate;
- u64 ptptsclk;
+ u64 ptpclkcorp;
+ u64 ptpschtm;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
u64 pad_mii_id[SJA1105_NUM_PORTS];
@@ -71,14 +78,15 @@ struct sja1105_info {
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
- int (*ptp_cmd)(const void *ctx, const void *data);
- int (*reset_cmd)(const void *ctx, const void *data);
+ int (*reset_cmd)(struct dsa_switch *ds);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
+ void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
const char *name;
};
@@ -91,26 +99,16 @@ struct sja1105_private {
struct spi_device *spidev;
struct dsa_switch *ds;
struct sja1105_port ports[SJA1105_NUM_PORTS];
- struct ptp_clock_info ptp_caps;
- struct ptp_clock *clock;
- /* The cycle counter translates the PTP timestamps (based on
- * a free-running counter) into a software time domain.
- */
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
- struct delayed_work refresh_work;
- /* Serializes all operations on the cycle counter */
- struct mutex ptp_lock;
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
struct sja1105_tagger_data tagger_data;
+ struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
};
#include "sja1105_dynamic_config.h"
-#include "sja1105_ptp.h"
struct sja1105_spi_message {
u64 access;
@@ -118,24 +116,27 @@ struct sja1105_spi_message {
u64 address;
};
-typedef enum {
- SPI_READ = 0,
- SPI_WRITE = 1,
-} sja1105_spi_rw_mode_t;
-
/* From sja1105_main.c */
-int sja1105_static_config_reload(struct sja1105_private *priv);
+enum sja1105_reset_reason {
+ SJA1105_VLAN_FILTERING = 0,
+ SJA1105_RX_HWTSTAMPING,
+ SJA1105_AGEING_TIME,
+ SJA1105_SCHEDULING,
+};
+
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason);
/* From sja1105_spi.c */
-int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- void *packed_buf, size_t size_bytes);
-int sja1105_spi_send_int(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- u64 *value, u64 size_bytes);
-int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 base_addr,
- void *packed_buf, u64 buf_len);
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len);
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts);
int sja1105_static_config_upload(struct sja1105_private *priv);
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 608126a15d72..9082e52b55e9 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -118,9 +118,8 @@ static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
idiv.pd = enabled ? 0 : 1; /* Power down? */
sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->cgu_idiv[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
@@ -167,9 +166,8 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
mii_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_tx_clk[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -192,9 +190,8 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
mii_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_rx_clk[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -217,9 +214,8 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_ext_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -242,9 +238,8 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_ext_rx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
@@ -337,9 +332,8 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
txc.pd = 0;
sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rgmii_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
/* AGU */
@@ -383,9 +377,8 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_tx[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
@@ -405,7 +398,7 @@ sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
}
/* Valid range in degrees is an integer between 73.8 and 101.7 */
-static inline u64 sja1105_rgmii_delay(u64 phase)
+static u64 sja1105_rgmii_delay(u64 phase)
{
/* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
* To avoid floating point operations we'll multiply by 10
@@ -442,9 +435,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
pad_mii_id.txc_pd = 1;
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_id[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
if (rc < 0)
return rc;
@@ -459,9 +451,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
}
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_id[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
@@ -547,9 +538,8 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
ref_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rmii_ref_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -565,9 +555,8 @@ sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rmii_ext_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
@@ -595,8 +584,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
pll.pd = 0x1;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to configure PLL1 for 50MHz\n");
return rc;
@@ -606,8 +595,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
pll.pd = 0x0;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to enable PLL1\n");
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 91da430045ff..25381bd65ed7 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -686,8 +686,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
@@ -698,8 +698,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
memset(packed_buf, 0, ops->packed_size);
/* Retrieve the read operation's result */
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
@@ -771,8 +771,8 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index ab581a28cd41..064301cc7d5b 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -167,8 +167,8 @@ static int sja1105_port_status_get_mac(struct sja1105_private *priv,
int rc;
/* MAC area */
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac[port],
- packed_buf, SJA1105_SIZE_MAC_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac[port], packed_buf,
+ SJA1105_SIZE_MAC_AREA);
if (rc < 0)
return rc;
@@ -185,8 +185,8 @@ static int sja1105_port_status_get_hl1(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0};
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl1[port],
- packed_buf, SJA1105_SIZE_HL1_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl1[port], packed_buf,
+ SJA1105_SIZE_HL1_AREA);
if (rc < 0)
return rc;
@@ -203,8 +203,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0};
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl2[port],
- packed_buf, SJA1105_SIZE_HL2_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl2[port], packed_buf,
+ SJA1105_SIZE_HL2_AREA);
if (rc < 0)
return rc;
@@ -215,8 +215,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
priv->info->device_id == SJA1105T_DEVICE_ID)
return 0;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->qlevel[port],
- packed_buf, SJA1105_SIZE_QLEVEL_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->qlevel[port], packed_buf,
+ SJA1105_SIZE_QLEVEL_AREA);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 7687ddcae159..a51ac088c0bc 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -382,8 +382,8 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
- /* Disallow dynamic changing of the mirror port */
- .mirr_ptacu = 0,
+ /* Allow dynamic changing of the mirror port */
+ .mirr_ptacu = true,
.switchid = priv->ds->index,
/* Priority queue for link-local management frames
* (both ingress to and egress from CPU - PTP, STP etc)
@@ -403,8 +403,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
* by installing a temporary 'management route'
*/
.host_port = dsa_upstream_port(priv->ds, 0),
- /* Same as host port */
- .mirr_port = dsa_upstream_port(priv->ds, 0),
+ /* Default to an invalid value */
+ .mirr_port = SJA1105_NUM_PORTS,
/* Link-local traffic received on casc_port will be forwarded
* to host_port without embedding the source port and device ID
* info in the destination MAC address (presumably because it
@@ -458,9 +458,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
-static inline void
-sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
- int index)
+static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
+ int index)
{
policing[index].sharindx = index;
policing[index].smax = 65535; /* Burst size in bytes */
@@ -507,39 +506,6 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
return 0;
}
-static int sja1105_init_avb_params(struct sja1105_private *priv,
- bool on)
-{
- struct sja1105_avb_params_entry *avb;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
-
- /* Discard previous AVB Parameters Table */
- if (table->entry_count) {
- kfree(table->entries);
- table->entry_count = 0;
- }
-
- /* Configure the reception of meta frames only if requested */
- if (!on)
- return 0;
-
- table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
-
- avb = table->entries;
-
- avb->destmeta = SJA1105_META_DMAC;
- avb->srcmeta = SJA1105_META_SMAC;
-
- return 0;
-}
-
static int sja1105_static_config_load(struct sja1105_private *priv,
struct sja1105_dt_port *ports)
{
@@ -580,9 +546,6 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
- rc = sja1105_init_avb_params(priv, false);
- if (rc < 0)
- return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
@@ -594,15 +557,15 @@ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
int i;
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (ports->role == XMII_MAC)
+ if (ports[i].role == XMII_MAC)
continue;
- if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
- ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
priv->rgmii_rx_delay[i] = true;
- if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
- ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
priv->rgmii_tx_delay[i] = true;
if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
@@ -621,8 +584,9 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
for_each_child_of_node(ports_node, child) {
struct device_node *phy_node;
- int phy_mode;
+ phy_interface_t phy_mode;
u32 index;
+ int err;
/* Get switch port number from DT */
if (of_property_read_u32(child, "reg", &index) < 0) {
@@ -633,8 +597,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
/* Get PHY mode from DT */
- phy_mode = of_get_phy_mode(child);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
@@ -951,7 +915,7 @@ sja1105_static_fdb_change(struct sja1105_private *priv, int port,
* For the placement of a newly learnt FDB entry, the switch selects the bin
* based on a hash function, and the way within that bin incrementally.
*/
-static inline int sja1105et_fdb_index(int bin, int way)
+static int sja1105et_fdb_index(int bin, int way)
{
return bin * SJA1105ET_FDB_BIN_SIZE + way;
}
@@ -1095,7 +1059,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1158,7 +1122,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1204,7 +1168,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
* for what gets printed in 'bridge fdb show'. In the case of zero,
* no VID gets printed at all.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_add_cmd(ds, port, addr, vid);
@@ -1215,7 +1179,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_del_cmd(ds, port, addr, vid);
@@ -1254,7 +1218,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
@@ -1377,17 +1341,33 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
sja1105_bridge_member(ds, port, br, false);
}
+static const char * const sja1105_reset_reasons[] = {
+ [SJA1105_VLAN_FILTERING] = "VLAN filtering",
+ [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
+ [SJA1105_AGEING_TIME] = "Ageing time",
+ [SJA1105_SCHEDULING] = "Time-aware scheduling",
+};
+
/* For situations where we need to change a setting at runtime that is only
* available through the static configuration, resetting the switch in order
* to upload the new static config is unavoidable. Back up the settings we
* modify at runtime (currently only MAC) and restore them after uploading,
* such that this operation is relatively seamless.
*/
-int sja1105_static_config_reload(struct sja1105_private *priv)
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason)
{
+ struct ptp_system_timestamp ptp_sts_before;
+ struct ptp_system_timestamp ptp_sts_after;
struct sja1105_mac_config_entry *mac;
int speed_mbps[SJA1105_NUM_PORTS];
+ struct dsa_switch *ds = priv->ds;
+ s64 t1, t2, t3, t4;
+ s64 t12, t34;
int rc, i;
+ s64 now;
+
+ mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@@ -1401,10 +1381,41 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
mac[i].speed = SJA1105_SPEED_AUTO;
}
+ /* No PTP operations can run right now */
+ mutex_lock(&priv->ptp_data.lock);
+
+ rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
/* Reset switch and send updated static configuration */
rc = sja1105_static_config_upload(priv);
if (rc < 0)
- goto out;
+ goto out_unlock_ptp;
+
+ rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
+ t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
+ t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
+ t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
+ t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
+ /* Mid point, corresponds to pre-reset PTPCLKVAL */
+ t12 = t1 + (t2 - t1) / 2;
+ /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
+ t34 = t3 + (t4 - t3) / 2;
+ /* Advance PTPCLKVAL by the time it took since its readout */
+ now += (t34 - t12);
+
+ __sja1105_ptp_adjtime(ds, now);
+
+out_unlock_ptp:
+ mutex_unlock(&priv->ptp_data.lock);
+
+ dev_info(priv->ds->dev,
+ "Reset switch and programmed static config. Reason: %s\n",
+ sja1105_reset_reasons[reason]);
/* Configure the CGU (PLLs) for MII and RMII PHYs.
* For these interfaces there is no dynamic configuration
@@ -1420,6 +1431,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
goto out;
}
out:
+ mutex_unlock(&priv->mgmt_lock);
+
return rc;
}
@@ -1598,7 +1611,7 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
l2_lookup_params = table->entries;
l2_lookup_params->shared_learn = !enabled;
- rc = sja1105_static_config_reload(priv);
+ rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@@ -1687,7 +1700,7 @@ static int sja1105_setup(struct dsa_switch *ds)
return rc;
}
- rc = sja1105_ptp_clock_register(priv);
+ rc = sja1105_ptp_clock_register(ds);
if (rc < 0) {
dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
return rc;
@@ -1729,9 +1742,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
struct sja1105_private *priv = ds->priv;
sja1105_tas_teardown(ds);
- cancel_work_sync(&priv->tagger_data.rxtstamp_work);
- skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
- sja1105_ptp_clock_unregister(priv);
+ sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
}
@@ -1743,7 +1754,7 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
if (!dsa_is_user_port(ds, port))
return 0;
- slave = ds->ports[port].slave;
+ slave = dsa_to_port(ds, port)->slave;
slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1775,7 +1786,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
}
/* Transfer skb to the host port. */
- dsa_enqueue_skb(skb, ds->ports[port].slave);
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
/* Wait until the switch has processed the frame */
do {
@@ -1817,11 +1828,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port];
- struct skb_shared_hwtstamps shwt = {0};
int slot = sp->mgmt_slot;
struct sk_buff *clone;
- u64 now, ts;
- int rc;
/* The tragic fact about the switch having 4x2 slots for installing
* management routes is that all of them except one are actually
@@ -1847,27 +1855,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
if (!clone)
goto out;
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
-
- mutex_lock(&priv->ptp_lock);
+ sja1105_ptp_txtstamp_skb(ds, slot, clone);
- now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
- rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
- if (rc < 0) {
- dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
- kfree_skb(clone);
- goto out_unlock_ptp;
- }
-
- ts = sja1105_tstamp_reconstruct(priv, now, ts);
- ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
-
- shwt.hwtstamp = ns_to_ktime(ts);
- skb_complete_tx_timestamp(clone, &shwt);
-
-out_unlock_ptp:
- mutex_unlock(&priv->ptp_lock);
out:
mutex_unlock(&priv->mgmt_lock);
return NETDEV_TX_OK;
@@ -1894,183 +1883,97 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
l2_lookup_params->maxage = maxage;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
}
-/* Must be called only with priv->tagger_data.state bit
- * SJA1105_HWTS_RX_EN cleared
+static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return sja1105_setup_tc_taprio(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* We have a single mirror (@to) port, but can configure ingress and egress
+ * mirroring on all other (@from) ports.
+ * We need to allow mirroring rules only as long as the @to port is always the
+ * same, and we need to unset the @to port from mirr_port only when there is no
+ * mirroring rule that references it.
*/
-static int sja1105_change_rxtstamping(struct sja1105_private *priv,
- bool on)
+static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ bool ingress, bool enabled)
{
struct sja1105_general_params_entry *general_params;
+ struct sja1105_mac_config_entry *mac;
struct sja1105_table *table;
+ bool already_enabled;
+ u64 new_mirr_port;
int rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
- general_params->send_meta1 = on;
- general_params->send_meta0 = on;
- rc = sja1105_init_avb_params(priv, on);
- if (rc < 0)
- return rc;
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- /* Initialize the meta state machine to a known state */
- if (priv->tagger_data.stampable_skb) {
- kfree_skb(priv->tagger_data.stampable_skb);
- priv->tagger_data.stampable_skb = NULL;
+ already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
+ if (already_enabled && enabled && general_params->mirr_port != to) {
+ dev_err(priv->ds->dev,
+ "Delete mirroring rules towards port %llu first\n",
+ general_params->mirr_port);
+ return -EBUSY;
}
- return sja1105_static_config_reload(priv);
-}
+ new_mirr_port = to;
+ if (!enabled) {
+ bool keep = false;
+ int port;
-static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
-{
- struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- bool rx_on;
- int rc;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- priv->ports[port].hwts_tx_en = false;
- break;
- case HWTSTAMP_TX_ON:
- priv->ports[port].hwts_tx_en = true;
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- rx_on = false;
- break;
- default:
- rx_on = true;
- break;
+ /* Anybody still referencing mirr_port? */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ if (mac[port].ing_mirr || mac[port].egr_mirr) {
+ keep = true;
+ break;
+ }
+ }
+ /* Unset already_enabled for next time */
+ if (!keep)
+ new_mirr_port = SJA1105_NUM_PORTS;
}
+ if (new_mirr_port != general_params->mirr_port) {
+ general_params->mirr_port = new_mirr_port;
- if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
- clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
-
- rc = sja1105_change_rxtstamping(priv, rx_on);
- if (rc < 0) {
- dev_err(ds->dev,
- "Failed to change RX timestamping: %d\n", rc);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
+ 0, general_params, true);
+ if (rc < 0)
return rc;
- }
- if (rx_on)
- set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
- return 0;
-}
-
-static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
-{
- struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
-
- config.flags = 0;
- if (priv->ports[port].hwts_tx_en)
- config.tx_type = HWTSTAMP_TX_ON;
+ if (ingress)
+ mac[from].ing_mirr = enabled;
else
- config.tx_type = HWTSTAMP_TX_OFF;
- if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-#define to_tagger(d) \
- container_of((d), struct sja1105_tagger_data, rxtstamp_work)
-#define to_sja1105(d) \
- container_of((d), struct sja1105_private, tagger_data)
-
-static void sja1105_rxtstamp_work(struct work_struct *work)
-{
- struct sja1105_tagger_data *data = to_tagger(work);
- struct sja1105_private *priv = to_sja1105(data);
- struct sk_buff *skb;
- u64 now;
-
- mutex_lock(&priv->ptp_lock);
-
- while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
- struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
- u64 ts;
-
- now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
- *shwt = (struct skb_shared_hwtstamps) {0};
+ mac[from].egr_mirr = enabled;
- ts = SJA1105_SKB_CB(skb)->meta_tstamp;
- ts = sja1105_tstamp_reconstruct(priv, now, ts);
- ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
-
- shwt->hwtstamp = ns_to_ktime(ts);
- netif_rx_ni(skb);
- }
-
- mutex_unlock(&priv->ptp_lock);
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
+ &mac[from], true);
}
-/* Called from dsa_skb_defer_rx_timestamp */
-static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+static int sja1105_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *data = &priv->tagger_data;
-
- if (!test_bit(SJA1105_HWTS_RX_EN, &data->state))
- return false;
-
- /* We need to read the full PTP clock to reconstruct the Rx
- * timestamp. For that we need a sleepable context.
- */
- skb_queue_tail(&data->skb_rxtstamp_queue, skb);
- schedule_work(&data->rxtstamp_work);
- return true;
+ return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ ingress, true);
}
-/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
- * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
- * callback, where we will timestamp it synchronously.
- */
-static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+static void sja1105_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!sp->hwts_tx_en)
- return false;
-
- return true;
-}
-
-static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
- enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_TAPRIO:
- return sja1105_setup_tc_taprio(ds, port, type_data);
- default:
- return -EOPNOTSUPP;
- }
+ sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ mirror->ingress, false);
}
static const struct dsa_switch_ops sja1105_switch_ops = {
@@ -2106,6 +2009,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_rxtstamp = sja1105_port_rxtstamp,
.port_txtstamp = sja1105_port_txtstamp,
.port_setup_tc = sja1105_port_setup_tc,
+ .port_mirror_add = sja1105_mirror_add,
+ .port_mirror_del = sja1105_mirror_del,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -2113,23 +2018,23 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
const struct sja1105_regs *regs = priv->info->regs;
u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
struct device *dev = &priv->spidev->dev;
- u64 device_id;
+ u32 device_id;
u64 part_no;
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
- &device_id, SJA1105_SIZE_DEVICE_ID);
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
+ NULL);
if (rc < 0)
return rc;
if (device_id != priv->info->device_id) {
- dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
+ dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n",
priv->info->device_id, device_id);
return -ENODEV;
}
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
- prod_id, SJA1105_SIZE_DEVICE_ID);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
+ SJA1105_SIZE_DEVICE_ID);
if (rc < 0)
return rc;
@@ -2193,32 +2098,37 @@ static int sja1105_probe(struct spi_device *spi)
dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
- ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = SJA1105_NUM_PORTS;
ds->ops = &sja1105_switch_ops;
ds->priv = priv;
priv->ds = ds;
tagger_data = &priv->tagger_data;
- skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
- INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
- spin_lock_init(&tagger_data->meta_lock);
+
+ mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->mgmt_lock);
+
+ sja1105_tas_setup(ds);
+
+ rc = dsa_register_switch(priv->ds);
+ if (rc)
+ return rc;
/* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
struct sja1105_port *sp = &priv->ports[i];
- ds->ports[i].priv = sp;
- sp->dp = &ds->ports[i];
+ dsa_to_port(ds, i)->priv = sp;
+ sp->dp = dsa_to_port(ds, i);
sp->data = tagger_data;
}
- mutex_init(&priv->mgmt_lock);
-
- sja1105_tas_setup(ds);
- return dsa_register_switch(priv->ds);
+ return 0;
}
static int sja1105_remove(struct spi_device *spi)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index d8e8dd59f3d1..54258a25031d 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
+#include <linux/spi/spi.h>
#include "sja1105.h"
/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
@@ -13,24 +14,6 @@
#define SJA1105_MAX_ADJ_PPB 32000000
#define SJA1105_SIZE_PTP_CMD 4
-/* Timestamps are in units of 8 ns clock ticks (equivalent to a fixed
- * 125 MHz clock) so the scale factor (MULT / SHIFT) needs to be 8.
- * Furthermore, wisely pick SHIFT as 28 bits, which translates
- * MULT into 2^31 (0x80000000). This is the same value around which
- * the hardware PTPCLKRATE is centered, so the same ppb conversion
- * arithmetic can be reused.
- */
-#define SJA1105_CC_SHIFT 28
-#define SJA1105_CC_MULT (8 << SJA1105_CC_SHIFT)
-
-/* Having 33 bits of cycle counter left until a 64-bit overflow during delta
- * conversion, we multiply this by the 8 ns counter resolution and arrive at
- * a comfortable 68.71 second refresh interval until the delta would cause
- * an integer overflow, in absence of any other readout.
- * Approximate to 1 minute.
- */
-#define SJA1105_REFRESH_INTERVAL (HZ * 60)
-
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
* "fractional" part (actually fixed point).
@@ -41,7 +24,7 @@
*
* This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
* and defines the scaling factor between scaled_ppm and the actual
- * frequency adjustments (both cycle counter and hardware).
+ * frequency adjustments of the PHC.
*
* ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
* simplifies to
@@ -49,22 +32,154 @@
*/
#define SJA1105_CC_MULT_NUM (1 << 9)
#define SJA1105_CC_MULT_DEM 15625
+#define SJA1105_CC_MULT 0x80000000
-#define ptp_to_sja1105(d) container_of((d), struct sja1105_private, ptp_caps)
-#define cc_to_sja1105(d) container_of((d), struct sja1105_private, tstamp_cc)
-#define dw_to_sja1105(d) container_of((d), struct sja1105_private, refresh_work)
-
-struct sja1105_ptp_cmd {
- u64 resptp; /* reset */
+enum sja1105_ptp_clk_mode {
+ PTP_ADD_MODE = 1,
+ PTP_SET_MODE = 0,
};
+#define ptp_caps_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, caps)
+#define ptp_data_to_sja1105(d) \
+ container_of((d), struct sja1105_private, ptp_data)
+
+static int sja1105_init_avb_params(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Configure the reception of meta frames only if requested */
+ if (!on)
+ return 0;
+
+ table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+
+ avb = table->entries;
+
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+
+ return 0;
+}
+
+/* Must be called only with priv->tagger_data.state bit
+ * SJA1105_HWTS_RX_EN cleared
+ */
+static int sja1105_change_rxtstamping(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ general_params->send_meta1 = on;
+ general_params->send_meta0 = on;
+
+ rc = sja1105_init_avb_params(priv, on);
+ if (rc < 0)
+ return rc;
+
+ /* Initialize the meta state machine to a known state */
+ if (priv->tagger_data.stampable_skb) {
+ kfree_skb(priv->tagger_data.stampable_skb);
+ priv->tagger_data.stampable_skb = NULL;
+ }
+
+ return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
+}
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+ bool rx_on;
+ int rc;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->ports[port].hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->ports[port].hwts_tx_en = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ rx_on = false;
+ break;
+ default:
+ rx_on = true;
+ break;
+ }
+
+ if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
+ clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+
+ rc = sja1105_change_rxtstamping(priv, rx_on);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to change RX timestamping: %d\n", rc);
+ return rc;
+ }
+ if (rx_on)
+ set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ if (priv->ports[port].hwts_tx_en)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+ if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
/* Called during cleanup */
- if (!priv->clock)
+ if (!ptp_data->clock)
return -ENODEV;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
@@ -74,42 +189,58 @@ int sja1105_get_ts_info(struct dsa_switch *ds, int port,
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
- info->phc_index = ptp_clock_index(priv->clock);
+ info->phc_index = ptp_clock_index(ptp_data->clock);
return 0;
}
-int sja1105et_ptp_cmd(const void *ctx, const void *data)
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
{
- const struct sja1105_ptp_cmd *cmd = data;
- const struct sja1105_private *priv = ctx;
- const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
- u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
/* No need to keep this as part of the structure */
u64 valid = 1;
- sja1105_pack(buf, &valid, 31, 31, size);
- sja1105_pack(buf, &cmd->resptp, 2, 2, size);
-
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
- buf, SJA1105_SIZE_PTP_CMD);
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
}
-int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
{
- const struct sja1105_ptp_cmd *cmd = data;
- const struct sja1105_private *priv = ctx;
- const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
- u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
/* No need to keep this as part of the structure */
u64 valid = 1;
- sja1105_pack(buf, &valid, 31, 31, size);
- sja1105_pack(buf, &cmd->resptp, 3, 3, size);
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ const struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
+ int rc;
+
+ if (rw == SPI_WRITE)
+ priv->info->ptp_cmd_packing(buf, cmd, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
- buf, SJA1105_SIZE_PTP_CMD);
+ if (rw == SPI_READ)
+ priv->info->ptp_cmd_packing(buf, cmd, UNPACK);
+
+ return rc;
}
/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
@@ -126,9 +257,10 @@ int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
* Must be called within one wraparound period of the partial timestamp since
* it was generated by the MAC.
*/
-u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
- u64 ts_partial)
+static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now,
+ u64 ts_partial)
{
+ struct sja1105_private *priv = ds->priv;
u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
u64 ts_reconstructed;
@@ -170,8 +302,9 @@ u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
* To have common code for E/T and P/Q/R/S for reading the timestamp,
* we need to juggle with the offset and the bit indices.
*/
-int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
{
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
int tstamp_bit_start, tstamp_bit_end;
int timeout = 10;
@@ -180,10 +313,8 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
int rc;
do {
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
- regs->ptpegr_ts[port],
- packed_buf,
- priv->info->ptpegr_ts_bytes);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port],
+ packed_buf, priv->info->ptpegr_ts_bytes);
if (rc < 0)
return rc;
@@ -216,177 +347,350 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
return 0;
}
-int sja1105_ptp_reset(struct sja1105_private *priv)
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks,
+ ptp_sts);
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
+ struct ptp_system_timestamp *ptp_sts)
{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks,
+ ptp_sts);
+}
+
+#define rxtstamp_to_tagger(d) \
+ container_of((d), struct sja1105_tagger_data, rxtstamp_work)
+#define tagger_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tagger_data)
+
+static void sja1105_rxtstamp_work(struct work_struct *work)
+{
+ struct sja1105_tagger_data *tagger_data = rxtstamp_to_tagger(work);
+ struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct dsa_switch *ds = priv->ds;
- struct sja1105_ptp_cmd cmd = {0};
+ struct sk_buff *skb;
+
+ mutex_lock(&ptp_data->lock);
+
+ while ((skb = skb_dequeue(&tagger_data->skb_rxtstamp_queue)) != NULL) {
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ticks, ts;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ continue;
+ }
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ ts = SJA1105_SKB_CB(skb)->meta_tstamp;
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ netif_rx_ni(skb);
+ }
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+
+ if (!test_bit(SJA1105_HWTS_RX_EN, &tagger_data->state))
+ return false;
+
+ /* We need to read the full PTP clock to reconstruct the Rx
+ * timestamp. For that we need a sleepable context.
+ */
+ skb_queue_tail(&tagger_data->skb_rxtstamp_queue, skb);
+ schedule_work(&tagger_data->rxtstamp_work);
+ return true;
+}
+
+/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
+ * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
+ * callback, where we will timestamp it synchronously.
+ */
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!sp->hwts_tx_en)
+ return false;
+
+ return true;
+}
+
+static int sja1105_ptp_reset(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
int rc;
- mutex_lock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
cmd.resptp = 1;
+
dev_dbg(ds->dev, "Resetting PTP clock\n");
- rc = priv->info->ptp_cmd(priv, &cmd);
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE);
- timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
+ sja1105_tas_clockstep(priv->ds);
- mutex_unlock(&priv->ptp_lock);
+ mutex_unlock(&ptp_data->lock);
return rc;
}
-static int sja1105_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+/* Caller must hold ptp_data->lock */
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *ptp_sts)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
- u64 ns;
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks;
+ int rc;
- mutex_lock(&priv->ptp_lock);
- ns = timecounter_read(&priv->tstamp_tc);
- mutex_unlock(&priv->ptp_lock);
+ rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ return rc;
+ }
- *ts = ns_to_timespec64(ns);
+ *ns = sja1105_ticks_to_ns(ticks);
return 0;
}
+static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 now = 0;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts);
+ *ts = ns_to_timespec64(now);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptp_mode_set(struct sja1105_private *priv,
+ enum sja1105_ptp_clk_mode mode)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (ptp_data->cmd.ptpclkadd == mode)
+ return 0;
+
+ ptp_data->cmd.ptpclkadd = mode;
+
+ return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE);
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 0 */
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks = ns_to_sja1105_ticks(ns);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
u64 ns = timespec64_to_ns(ts);
+ int rc;
- mutex_lock(&priv->ptp_lock);
- timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc, ns);
- mutex_unlock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
- return 0;
+ rc = __sja1105_ptp_settime(priv->ds, ns, NULL);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
}
static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 clkrate32;
s64 clkrate;
+ int rc;
clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
- mutex_lock(&priv->ptp_lock);
-
- /* Force a readout to update the timer *before* changing its frequency.
- *
- * This way, its corrected time curve can at all times be modeled
- * as a linear "A * x + B" function, where:
- *
- * - B are past frequency adjustments and offset shifts, all
- * accumulated into the cycle_last variable.
- *
- * - A is the new frequency adjustments we're just about to set.
- *
- * Reading now makes B accumulate the correct amount of time,
- * corrected at the old rate, before changing it.
- *
- * Hardware timestamps then become simple points on the curve and
- * are approximated using the above function. This is still better
- * than letting the switch take the timestamps using the hardware
- * rate-corrected clock (PTPCLKVAL) - the comparison in this case would
- * be that we're shifting the ruler at the same time as we're taking
- * measurements with it.
- *
- * The disadvantage is that it's possible to receive timestamps when
- * a frequency adjustment took place in the near past.
- * In this case they will be approximated using the new ppb value
- * instead of a compound function made of two segments (one at the old
- * and the other at the new rate) - introducing some inaccuracy.
- */
- timecounter_read(&priv->tstamp_tc);
+ /* Take a +/- value and re-center it around 2^31. */
+ clkrate = SJA1105_CC_MULT + clkrate;
+ WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0));
+ clkrate32 = clkrate;
- priv->tstamp_cc.mult = SJA1105_CC_MULT + clkrate;
+ mutex_lock(&ptp_data->lock);
- mutex_unlock(&priv->ptp_lock);
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32,
+ NULL);
- return 0;
+ sja1105_tas_adjfreq(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
}
-static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+/* Write to PTPCLKVAL while PTPCLKADD is 1 */
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_private *priv = ds->priv;
+ s64 ticks = ns_to_sja1105_ticks(delta);
+ int rc;
- mutex_lock(&priv->ptp_lock);
- timecounter_adjtime(&priv->tstamp_tc, delta);
- mutex_unlock(&priv->ptp_lock);
+ rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
+ return rc;
+ }
- return 0;
+ rc = sja1105_ptpclkval_write(priv, ticks, NULL);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
}
-static u64 sja1105_ptptsclk_read(const struct cyclecounter *cc)
+static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct sja1105_private *priv = cc_to_sja1105(cc);
- const struct sja1105_regs *regs = priv->info->regs;
- u64 ptptsclk = 0;
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->ptptsclk,
- &ptptsclk, 8);
- if (rc < 0)
- dev_err_ratelimited(priv->ds->dev,
- "failed to read ptp cycle counter: %d\n",
- rc);
- return ptptsclk;
-}
+ mutex_lock(&ptp_data->lock);
-static void sja1105_ptp_overflow_check(struct work_struct *work)
-{
- struct delayed_work *dw = to_delayed_work(work);
- struct sja1105_private *priv = dw_to_sja1105(dw);
- struct timespec64 ts;
+ rc = __sja1105_ptp_adjtime(priv->ds, delta);
- sja1105_ptp_gettime(&priv->ptp_caps, &ts);
+ mutex_unlock(&ptp_data->lock);
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+ return rc;
}
-static const struct ptp_clock_info sja1105_ptp_caps = {
- .owner = THIS_MODULE,
- .name = "SJA1105 PHC",
- .adjfine = sja1105_ptp_adjfine,
- .adjtime = sja1105_ptp_adjtime,
- .gettime64 = sja1105_ptp_gettime,
- .settime64 = sja1105_ptp_settime,
- .max_adj = SJA1105_MAX_ADJ_PPB,
-};
-
-int sja1105_ptp_clock_register(struct sja1105_private *priv)
+int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
- struct dsa_switch *ds = priv->ds;
-
- /* Set up the cycle counter */
- priv->tstamp_cc = (struct cyclecounter) {
- .read = sja1105_ptptsclk_read,
- .mask = CYCLECOUNTER_MASK(64),
- .shift = SJA1105_CC_SHIFT,
- .mult = SJA1105_CC_MULT,
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ ptp_data->caps = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "SJA1105 PHC",
+ .adjfine = sja1105_ptp_adjfine,
+ .adjtime = sja1105_ptp_adjtime,
+ .gettimex64 = sja1105_ptp_gettimex,
+ .settime64 = sja1105_ptp_settime,
+ .max_adj = SJA1105_MAX_ADJ_PPB,
};
- mutex_init(&priv->ptp_lock);
- priv->ptp_caps = sja1105_ptp_caps;
- priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
- if (IS_ERR_OR_NULL(priv->clock))
- return PTR_ERR(priv->clock);
+ skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
+ INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+ spin_lock_init(&tagger_data->meta_lock);
- INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
- return sja1105_ptp_reset(priv);
+ ptp_data->cmd.corrclk4ts = true;
+ ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+
+ return sja1105_ptp_reset(ds);
}
-void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
- if (IS_ERR_OR_NULL(priv->clock))
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- cancel_delayed_work_sync(&priv->refresh_work);
- ptp_clock_unregister(priv->clock);
- priv->clock = NULL;
+ cancel_work_sync(&priv->tagger_data.rxtstamp_work);
+ skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ ptp_clock_unregister(ptp_data->clock);
+ ptp_data->clock = NULL;
+}
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct skb_shared_hwtstamps shwt = {0};
+ u64 ticks, ts;
+ int rc;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ goto out;
+ }
+
+ rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
+ if (rc < 0) {
+ dev_err(ds->dev, "timed out polling for tstamp\n");
+ kfree_skb(skb);
+ goto out;
+ }
+
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ skb_complete_tx_timestamp(skb, &shwt);
+
+out:
+ mutex_unlock(&ptp_data->lock);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 394e12a6ad59..470f44b76318 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -6,59 +6,136 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
-int sja1105_ptp_clock_register(struct sja1105_private *priv);
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
-void sja1105_ptp_clock_unregister(struct sja1105_private *priv);
+struct sja1105_ptp_cmd {
+ u64 ptpstrtsch; /* start schedule */
+ u64 ptpstopsch; /* stop schedule */
+ u64 resptp; /* reset */
+ u64 corrclk4ts; /* use the corrected clock for timestamps */
+ u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */
+};
-int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts);
+struct sja1105_ptp_data {
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct sja1105_ptp_cmd cmd;
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+};
-int sja1105et_ptp_cmd(const void *ctx, const void *data);
+int sja1105_ptp_clock_register(struct dsa_switch *ds);
-int sja1105pqrs_ptp_cmd(const void *ctx, const void *data);
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds);
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *ts);
-u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
- u64 ts_partial);
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone);
+
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts);
-int sja1105_ptp_reset(struct sja1105_private *priv);
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts);
+
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw);
#else
-static inline int sja1105_ptp_clock_register(struct sja1105_private *priv)
+struct sja1105_ptp_cmd;
+
+/* Structures cannot be empty in C. Bah!
+ * Keep the mutex as the only element, which is a bit more difficult to
+ * refactor out of sja1105_main.c anyway.
+ */
+struct sja1105_ptp_data {
+ struct mutex lock;
+};
+
+static inline int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
return 0;
}
-static inline void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
+static inline void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone)
+{
+}
+
+static inline int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts)
{
- return;
+ return 0;
}
-static inline int
-sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+static inline int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
{
return 0;
}
-static inline u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv,
- u64 now, u64 ts_partial)
+static inline int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
{
return 0;
}
-static inline int sja1105_ptp_reset(struct sja1105_private *priv)
+static inline int sja1105_ptp_commit(struct dsa_switch *ds,
+ struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
{
return 0;
}
-#define sja1105et_ptp_cmd NULL
+#define sja1105et_ptp_cmd_packing NULL
-#define sja1105pqrs_ptp_cmd NULL
+#define sja1105pqrs_ptp_cmd_packing NULL
#define sja1105_get_ts_info NULL
+#define sja1105_port_rxtstamp NULL
+
+#define sja1105_port_txtstamp NULL
+
+#define sja1105_hwtstamp_get NULL
+
+#define sja1105_hwtstamp_set NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 58dd37ecde17..29b127f3bf9c 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -7,42 +7,15 @@
#include <linux/packing.h>
#include "sja1105.h"
-#define SJA1105_SIZE_PORT_CTRL 4
#define SJA1105_SIZE_RESET_CMD 4
#define SJA1105_SIZE_SPI_MSG_HEADER 4
#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
-#define SJA1105_SIZE_SPI_TRANSFER_MAX \
- (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN)
-static int sja1105_spi_transfer(const struct sja1105_private *priv,
- const void *tx, void *rx, int size)
-{
- struct spi_device *spi = priv->spidev;
- struct spi_transfer transfer = {
- .tx_buf = tx,
- .rx_buf = rx,
- .len = size,
- };
- struct spi_message msg;
- int rc;
-
- if (size > SJA1105_SIZE_SPI_TRANSFER_MAX) {
- dev_err(&spi->dev, "SPI message (%d) longer than max of %d\n",
- size, SJA1105_SIZE_SPI_TRANSFER_MAX);
- return -EMSGSIZE;
- }
-
- spi_message_init(&msg);
- spi_message_add_tail(&transfer, &msg);
-
- rc = spi_sync(spi, &msg);
- if (rc < 0) {
- dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
- return rc;
- }
-
- return rc;
-}
+struct sja1105_chunk {
+ u8 *buf;
+ size_t len;
+ u64 reg_addr;
+};
static void
sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
@@ -56,242 +29,219 @@ sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
sja1105_pack(buf, &msg->address, 24, 4, size);
}
+#define sja1105_hdr_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk))
+#define sja1105_chunk_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk) + 1)
+#define sja1105_hdr_buf(hdr_bufs, chunk) \
+ ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
+
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
- * address reg_addr, taking size_bytes from *packed_buf
+ * address reg_addr, taking @len bytes from *buf
* - SPI_READ: creates and sends an SPI read message from absolute
- * address reg_addr, writing size_bytes into *packed_buf
- *
- * This function should only be called if it is priorly known that
- * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers
- * are chunked in smaller pieces by sja1105_spi_send_long_packed_buf below.
+ * address reg_addr, writing @len bytes into *buf
*/
-int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- void *packed_buf, size_t size_bytes)
+static int sja1105_xfer(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
+ size_t len, struct ptp_system_timestamp *ptp_sts)
{
- u8 tx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
- u8 rx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
- const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER;
- struct sja1105_spi_message msg = {0};
- int rc;
+ struct sja1105_chunk chunk = {
+ .len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
+ .reg_addr = reg_addr,
+ .buf = buf,
+ };
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer *xfers;
+ int num_chunks;
+ int rc, i = 0;
+ u8 *hdr_bufs;
- if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX)
- return -ERANGE;
+ num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
- msg.access = rw;
- msg.address = reg_addr;
- if (rw == SPI_READ)
- msg.read_count = size_bytes / 4;
+ /* One transfer for each message header, one for each message
+ * payload (chunk).
+ */
+ xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
+ GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
- sja1105_spi_message_pack(tx_buf, &msg);
+ /* Packed buffers for the num_chunks SPI message headers,
+ * stored as a contiguous array
+ */
+ hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
+ GFP_KERNEL);
+ if (!hdr_bufs) {
+ kfree(xfers);
+ return -ENOMEM;
+ }
- if (rw == SPI_WRITE)
- memcpy(tx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
- packed_buf, size_bytes);
+ for (i = 0; i < num_chunks; i++) {
+ struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
+ struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
+ u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
+ struct spi_transfer *ptp_sts_xfer;
+ struct sja1105_spi_message msg;
+
+ /* Populate the transfer's header buffer */
+ msg.address = chunk.reg_addr;
+ msg.access = rw;
+ if (rw == SPI_READ)
+ msg.read_count = chunk.len / 4;
+ else
+ /* Ignored */
+ msg.read_count = 0;
+ sja1105_spi_message_pack(hdr_buf, &msg);
+ hdr_xfer->tx_buf = hdr_buf;
+ hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ /* Populate the transfer's data buffer */
+ if (rw == SPI_READ)
+ chunk_xfer->rx_buf = chunk.buf;
+ else
+ chunk_xfer->tx_buf = chunk.buf;
+ chunk_xfer->len = chunk.len;
+
+ /* Request timestamping for the transfer. Instead of letting
+ * callers specify which byte they want to timestamp, we can
+ * make certain assumptions:
+ * - A read operation will request a software timestamp when
+ * what's being read is the PTP time. That is snapshotted by
+ * the switch hardware at the end of the command portion
+ * (hdr_xfer).
+ * - A write operation will request a software timestamp on
+ * actions that modify the PTP time. Taking clock stepping as
+ * an example, the switch writes the PTP time at the end of
+ * the data portion (chunk_xfer).
+ */
+ if (rw == SPI_READ)
+ ptp_sts_xfer = hdr_xfer;
+ else
+ ptp_sts_xfer = chunk_xfer;
+ ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts = ptp_sts;
+
+ /* Calculate next chunk */
+ chunk.buf += chunk.len;
+ chunk.reg_addr += chunk.len / 4;
+ chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
+ SJA1105_SIZE_SPI_MSG_MAXLEN);
+
+ /* De-assert the chip select after each chunk. */
+ if (chunk.len)
+ chunk_xfer->cs_change = 1;
+ }
- rc = sja1105_spi_transfer(priv, tx_buf, rx_buf, msg_len);
+ rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
if (rc < 0)
- return rc;
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
- if (rw == SPI_READ)
- memcpy(packed_buf, rx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
- size_bytes);
+ kfree(hdr_bufs);
+ kfree(xfers);
- return 0;
+ return rc;
+}
+
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len)
+{
+ return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
}
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
- * address reg_addr, taking size_bytes from *packed_buf
+ * address reg_addr
* - SPI_READ: creates and sends an SPI read message from absolute
- * address reg_addr, writing size_bytes into *packed_buf
+ * address reg_addr
*
* The u64 *value is unpacked, meaning that it's stored in the native
* CPU endianness and directly usable by software running on the core.
- *
- * This is a wrapper around sja1105_spi_send_packed_buf().
*/
-int sja1105_spi_send_int(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- u64 *value, u64 size_bytes)
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts)
{
- u8 packed_buf[SJA1105_SIZE_SPI_MSG_MAXLEN];
+ u8 packed_buf[8];
int rc;
- if (size_bytes > SJA1105_SIZE_SPI_MSG_MAXLEN)
- return -ERANGE;
-
if (rw == SPI_WRITE)
- sja1105_pack(packed_buf, value, 8 * size_bytes - 1, 0,
- size_bytes);
+ sja1105_pack(packed_buf, value, 63, 0, 8);
- rc = sja1105_spi_send_packed_buf(priv, rw, reg_addr, packed_buf,
- size_bytes);
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
if (rw == SPI_READ)
- sja1105_unpack(packed_buf, value, 8 * size_bytes - 1, 0,
- size_bytes);
+ sja1105_unpack(packed_buf, value, 63, 0, 8);
return rc;
}
-/* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN
- * must be sent/received. Splitting the buffer into chunks and assembling
- * those into SPI messages is done automatically by this function.
- */
-int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 base_addr,
- void *packed_buf, u64 buf_len)
+/* Same as above, but transfers only a 4 byte word */
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts)
{
- struct chunk {
- void *buf_ptr;
- int len;
- u64 spi_address;
- } chunk;
- int distance_to_end;
+ u8 packed_buf[4];
+ u64 tmp;
int rc;
- /* Initialize chunk */
- chunk.buf_ptr = packed_buf;
- chunk.spi_address = base_addr;
- chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN);
-
- while (chunk.len) {
- rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address,
- chunk.buf_ptr, chunk.len);
- if (rc < 0)
- return rc;
-
- chunk.buf_ptr += chunk.len;
- chunk.spi_address += chunk.len / 4;
- distance_to_end = (uintptr_t)(packed_buf + buf_len -
- chunk.buf_ptr);
- chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN);
+ if (rw == SPI_WRITE) {
+ /* The packing API only supports u64 as CPU word size,
+ * so we need to convert.
+ */
+ tmp = *value;
+ sja1105_pack(packed_buf, &tmp, 31, 0, 4);
}
- return 0;
-}
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
-/* Back-ported structure from UM11040 Table 112.
- * Reset control register (addr. 100440h)
- * In the SJA1105 E/T, only warm_rst and cold_rst are
- * supported (exposed in UM10944 as rst_ctrl), but the bit
- * offsets of warm_rst and cold_rst are actually reversed.
- */
-struct sja1105_reset_cmd {
- u64 switch_rst;
- u64 cfg_rst;
- u64 car_rst;
- u64 otp_rst;
- u64 warm_rst;
- u64 cold_rst;
- u64 por_rst;
-};
-
-static void
-sja1105et_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
-{
- const int size = SJA1105_SIZE_RESET_CMD;
-
- memset(buf, 0, size);
-
- sja1105_pack(buf, &reset->cold_rst, 3, 3, size);
- sja1105_pack(buf, &reset->warm_rst, 2, 2, size);
-}
-
-static void
-sja1105pqrs_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
-{
- const int size = SJA1105_SIZE_RESET_CMD;
-
- memset(buf, 0, size);
+ if (rw == SPI_READ) {
+ sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
+ *value = tmp;
+ }
- sja1105_pack(buf, &reset->switch_rst, 8, 8, size);
- sja1105_pack(buf, &reset->cfg_rst, 7, 7, size);
- sja1105_pack(buf, &reset->car_rst, 5, 5, size);
- sja1105_pack(buf, &reset->otp_rst, 4, 4, size);
- sja1105_pack(buf, &reset->warm_rst, 3, 3, size);
- sja1105_pack(buf, &reset->cold_rst, 2, 2, size);
- sja1105_pack(buf, &reset->por_rst, 1, 1, size);
+ return rc;
}
-static int sja1105et_reset_cmd(const void *ctx, const void *data)
+static int sja1105et_reset_cmd(struct dsa_switch *ds)
{
- const struct sja1105_private *priv = ctx;
- const struct sja1105_reset_cmd *reset = data;
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- struct device *dev = priv->ds->dev;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD];
-
- if (reset->switch_rst ||
- reset->cfg_rst ||
- reset->car_rst ||
- reset->otp_rst ||
- reset->por_rst) {
- dev_err(dev, "Only warm and cold reset is supported "
- "for SJA1105 E/T!\n");
- return -EINVAL;
- }
-
- if (reset->warm_rst)
- dev_dbg(dev, "Warm reset requested\n");
- if (reset->cold_rst)
- dev_dbg(dev, "Cold reset requested\n");
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
- sja1105et_reset_cmd_pack(packed_buf, reset);
+ sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
- packed_buf, SJA1105_SIZE_RESET_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
}
-static int sja1105pqrs_reset_cmd(const void *ctx, const void *data)
+static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
{
- const struct sja1105_private *priv = ctx;
- const struct sja1105_reset_cmd *reset = data;
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- struct device *dev = priv->ds->dev;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD];
-
- if (reset->switch_rst)
- dev_dbg(dev, "Main reset for all functional modules requested\n");
- if (reset->cfg_rst)
- dev_dbg(dev, "Chip configuration reset requested\n");
- if (reset->car_rst)
- dev_dbg(dev, "Clock and reset control logic reset requested\n");
- if (reset->otp_rst)
- dev_dbg(dev, "OTP read cycle for reading product "
- "config settings requested\n");
- if (reset->warm_rst)
- dev_dbg(dev, "Warm reset requested\n");
- if (reset->cold_rst)
- dev_dbg(dev, "Cold reset requested\n");
- if (reset->por_rst)
- dev_dbg(dev, "Power-on reset requested\n");
-
- sja1105pqrs_reset_cmd_pack(packed_buf, reset);
-
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
- packed_buf, SJA1105_SIZE_RESET_CMD);
-}
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
-static int sja1105_cold_reset(const struct sja1105_private *priv)
-{
- struct sja1105_reset_cmd reset = {0};
+ sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
- reset.cold_rst = 1;
- return priv->info->reset_cmd(priv, &reset);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
}
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited)
{
const struct sja1105_regs *regs = priv->info->regs;
- u64 inhibit_cmd;
+ u32 inhibit_cmd;
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control,
- &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd, NULL);
if (rc < 0)
return rc;
@@ -300,8 +250,8 @@ int sja1105_inhibit_tx(const struct sja1105_private *priv,
else
inhibit_cmd &= ~port_bitmap;
- return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control,
- &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd, NULL);
}
struct sja1105_status {
@@ -339,9 +289,7 @@ static int sja1105_status_get(struct sja1105_private *priv,
u8 packed_buf[4];
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
- regs->status,
- packed_buf, 4);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
if (rc < 0)
return rc;
@@ -429,7 +377,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
usleep_range(500, 1000);
do {
/* Put the SJA1105 in programming mode */
- rc = sja1105_cold_reset(priv);
+ rc = priv->info->reset_cmd(priv->ds);
if (rc < 0) {
dev_err(dev, "Failed to reset switch, retrying...\n");
continue;
@@ -437,9 +385,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
/* Wait for the switch to come out of reset */
usleep_range(1000, 5000);
/* Upload the static config to the device */
- rc = sja1105_spi_send_long_packed_buf(priv, SPI_WRITE,
- regs->config,
- config_buf, buf_len);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
+ config_buf, buf_len);
if (rc < 0) {
dev_err(dev, "Failed to upload config, retrying...\n");
continue;
@@ -482,12 +429,6 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
}
- rc = sja1105_ptp_reset(priv);
- if (rc < 0)
- dev_err(dev, "Failed to reset PTP clock: %d\n", rc);
-
- dev_info(dev, "Reset switch and programmed static config\n");
-
out:
kfree(config_buf);
return rc;
@@ -516,10 +457,11 @@ static struct sja1105_regs sja1105et_regs = {
.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
+ .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
.ptp_control = 0x17,
- .ptpclk = 0x18, /* Spans 0x18 to 0x19 */
+ .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
- .ptptsclk = 0x1B, /* Spans 0x1B to 0x1C */
+ .ptpclkcorp = 0x1D,
};
static struct sja1105_regs sja1105pqrs_regs = {
@@ -547,10 +489,11 @@ static struct sja1105_regs sja1105pqrs_regs = {
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
+ .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
.ptp_control = 0x18,
- .ptpclk = 0x19,
+ .ptpclkval = 0x19,
.ptpclkrate = 0x1B,
- .ptptsclk = 0x1C,
+ .ptpclkcorp = 0x1E,
};
struct sja1105_info sja1105e_info = {
@@ -563,7 +506,7 @@ struct sja1105_info sja1105e_info = {
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
- .ptp_cmd = sja1105et_ptp_cmd,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.regs = &sja1105et_regs,
.name = "SJA1105E",
};
@@ -577,7 +520,7 @@ struct sja1105_info sja1105t_info = {
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
- .ptp_cmd = sja1105et_ptp_cmd,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.regs = &sja1105et_regs,
.name = "SJA1105T",
};
@@ -592,7 +535,7 @@ struct sja1105_info sja1105p_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105P",
};
@@ -607,7 +550,7 @@ struct sja1105_info sja1105q_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105Q",
};
@@ -622,7 +565,7 @@ struct sja1105_info sja1105r_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105R",
};
@@ -638,6 +581,6 @@ struct sja1105_info sja1105s_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.name = "SJA1105S",
};
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 33eca6a82ec5..26b925b5dace 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -10,6 +10,11 @@
#define SJA1105_TAS_MAX_DELTA BIT(19)
#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
+#define work_to_sja1105_tas(d) \
+ container_of((d), struct sja1105_tas_data, tas_work)
+#define tas_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tas_data)
+
/* This is not a preprocessor macro because the "ns" argument may or may not be
* s64 at caller side. This ensures it is properly type-cast before div_s64.
*/
@@ -18,6 +23,100 @@ static s64 ns_to_sja1105_delta(s64 ns)
return div_s64(ns, 200);
}
+static s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
+static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ s64 earliest_base_time = S64_MAX;
+ s64 latest_base_time = 0;
+ s64 its_cycle_time = 0;
+ s64 max_cycle_time = 0;
+ int port;
+
+ tas_data->enabled = false;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ tas_data->enabled = true;
+
+ if (max_cycle_time < offload->cycle_time)
+ max_cycle_time = offload->cycle_time;
+ if (latest_base_time < offload->base_time)
+ latest_base_time = offload->base_time;
+ if (earliest_base_time > offload->base_time) {
+ earliest_base_time = offload->base_time;
+ its_cycle_time = offload->cycle_time;
+ }
+ }
+
+ if (!tas_data->enabled)
+ return 0;
+
+ /* Roll the earliest base time over until it is in a comparable
+ * time base with the latest, then compare their deltas.
+ * We want to enforce that all ports' base times are within
+ * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
+ */
+ earliest_base_time = future_base_time(earliest_base_time,
+ its_cycle_time,
+ latest_base_time);
+ while (earliest_base_time > latest_base_time)
+ earliest_base_time -= its_cycle_time;
+ if (latest_base_time - earliest_base_time >
+ sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
+ dev_err(ds->dev,
+ "Base times too far apart: min %llu max %llu\n",
+ earliest_base_time, latest_base_time);
+ return -ERANGE;
+ }
+
+ tas_data->earliest_base_time = earliest_base_time;
+ tas_data->max_cycle_time = max_cycle_time;
+
+ dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
+ dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
+ dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
+
+ return 0;
+}
+
/* Lo and behold: the egress scheduler from hell.
*
* At the hardware level, the Time-Aware Shaper holds a global linear arrray of
@@ -99,7 +198,11 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
int num_cycles = 0;
int cycle = 0;
int i, k = 0;
- int port;
+ int port, rc;
+
+ rc = sja1105_tas_set_runtime_params(priv);
+ if (rc < 0)
+ return rc;
/* Discard previous Schedule Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
@@ -184,11 +287,13 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_entry_points = table->entries;
/* Finally start populating the static config tables */
- schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_STANDALONE;
+ schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
schedule_entry_points_params->actsubsch = num_cycles - 1;
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
const struct tc_taprio_qopt_offload *offload;
+ /* Relative base time */
+ s64 rbt;
offload = tas_data->offload[port];
if (!offload)
@@ -196,15 +301,21 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_start_idx = k;
schedule_end_idx = k + offload->num_entries - 1;
- /* TODO this is the base time for the port's subschedule,
- * relative to PTPSCHTM. But as we're using the standalone
- * clock source and not PTP clock as time reference, there's
- * little point in even trying to put more logic into this,
- * like preserving the phases between the subschedules of
- * different ports. We'll get all of that when switching to the
- * PTP clock source.
+ /* This is the base time expressed as a number of TAS ticks
+ * relative to PTPSCHTM, which we'll (perhaps improperly) call
+ * the operational base time.
+ */
+ rbt = future_base_time(offload->base_time,
+ offload->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
+ * delta cannot be zero, which is shitty. Advance all relative
+ * base times by 1 TAS delta, so that even the earliest base
+ * time becomes 1 in relative terms. Then start the operational
+ * base time (PTPSCHTM) one TAS delta earlier than planned.
*/
- entry_point_delta = 1;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
schedule_entry_points[cycle].subschindx = cycle;
schedule_entry_points[cycle].delta = entry_point_delta;
@@ -352,7 +463,7 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (rc < 0)
return rc;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
}
/* The cycle time extension is the amount of time the last cycle from
@@ -400,11 +511,306 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (rc < 0)
return rc;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+}
+
+static int sja1105_tas_check_running(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_ptp_cmd cmd = {0};
+ int rc;
+
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
+ if (rc < 0)
+ return rc;
+
+ if (cmd.ptpstrtsch == 1)
+ /* Schedule successfully started */
+ tas_data->state = SJA1105_TAS_STATE_RUNNING;
+ else if (cmd.ptpstopsch == 1)
+ /* Schedule is stopped */
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ else
+ /* Schedule is probably not configured with PTP clock source */
+ rc = -EINVAL;
+
+ return rc;
+}
+
+/* Write to PTPCLKCORP */
+static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
+ u64 correction)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
+ &ptpclkcorp, NULL);
+}
+
+/* Write to PTPSCHTM */
+static int sja1105_tas_set_base_time(struct sja1105_private *priv,
+ u64 base_time)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 ptpschtm = ns_to_sja1105_ticks(base_time);
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
+ &ptpschtm, NULL);
+}
+
+static int sja1105_tas_start(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Starting the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
+ tas_data->state == SJA1105_TAS_STATE_RUNNING) {
+ dev_err(ds->dev, "TAS already started\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstrtsch = 1;
+ cmd->ptpstopsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
+
+ return 0;
+}
+
+static int sja1105_tas_stop(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Stopping the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
+ dev_err(ds->dev, "TAS already disabled\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstopsch = 1;
+ cmd->ptpstrtsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+
+ return 0;
+}
+
+/* The schedule engine and the PTP clock are driven by the same oscillator, and
+ * they run in parallel. But whilst the PTP clock can keep an absolute
+ * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
+ * up a delta, which is 200ns), and wrapping around at the end of each cycle.
+ * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
+ * (in PTP domain).
+ * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
+ * a software servo, and the schedule engine clock runs in parallel to the PTP
+ * clock, there is logic internal to the switch that periodically keeps the
+ * schedule engine from drifting away. The frequency with which this internal
+ * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
+ * a value also in the PTP clock domain, and is also rate-corrected.
+ * To be precise, during a correction period, there is logic to determine by
+ * how many scheduler clock ticks has the PTP clock drifted. At the end of each
+ * correction period/beginning of new one, the length of a delta is shrunk or
+ * expanded with an integer number of ticks, compared with the typical 25.
+ * So a delta lasts for 200ns (or 25 ticks) only on average.
+ * Sometimes it is longer, sometimes it is shorter. The internal syntonization
+ * logic can adjust for at most 5 ticks each 20 ticks.
+ *
+ * The first implication is that you should choose your schedule correction
+ * period to be an integer multiple of the schedule length. Preferably one.
+ * In case there are schedules of multiple ports active, then the correction
+ * period needs to be a multiple of them all. Given the restriction that the
+ * cycle times have to be multiples of one another anyway, this means the
+ * correction period can simply be the largest cycle time, hence the current
+ * choice. This way, the updates are always synchronous to the transmission
+ * cycle, and therefore predictable.
+ *
+ * The second implication is that at the beginning of a correction period, the
+ * first few deltas will be modulated in time, until the schedule engine is
+ * properly phase-aligned with the PTP clock. For this reason, you should place
+ * your best-effort traffic at the beginning of a cycle, and your
+ * time-triggered traffic afterwards.
+ *
+ * The third implication is that once the schedule engine is started, it can
+ * only adjust for so much drift within a correction period. In the servo you
+ * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
+ * want to do the latter, you need to stop and restart the schedule engine,
+ * which is what the state machine handles.
+ */
+static void sja1105_tas_state_machine(struct work_struct *work)
+{
+ struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
+ struct sja1105_private *priv = tas_to_sja1105(tas_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct timespec64 base_time_ts, now_ts;
+ struct dsa_switch *ds = priv->ds;
+ struct timespec64 diff;
+ s64 base_time, now;
+ int rc = 0;
+
+ mutex_lock(&ptp_data->lock);
+
+ switch (tas_data->state) {
+ case SJA1105_TAS_STATE_DISABLED:
+ /* Can't do anything at all if clock is still being stepped */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
+ break;
+
+ rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
+ if (rc < 0)
+ break;
+
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ /* Plan to start the earliest schedule first. The others
+ * will be started in hardware, by way of their respective
+ * entry points delta.
+ * Try our best to avoid fringe cases (race condition between
+ * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
+ * least one second in the future from now. This is not ideal,
+ * but this only needs to buy us time until the
+ * sja1105_tas_start command below gets executed.
+ */
+ base_time = future_base_time(tas_data->earliest_base_time,
+ tas_data->max_cycle_time,
+ now + 1ull * NSEC_PER_SEC);
+ base_time -= sja1105_delta_to_ns(1);
+
+ rc = sja1105_tas_set_base_time(priv, base_time);
+ if (rc < 0)
+ break;
+
+ tas_data->oper_base_time = base_time;
+
+ rc = sja1105_tas_start(priv);
+ if (rc < 0)
+ break;
+
+ base_time_ts = ns_to_timespec64(base_time);
+ now_ts = ns_to_timespec64(now);
+
+ dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
+ base_time_ts.tv_sec, base_time_ts.tv_nsec,
+ now_ts.tv_sec, now_ts.tv_nsec);
+
+ break;
+
+ case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ /* Clock was stepped.. bad news for TAS */
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ /* Check if TAS has actually started, by comparing the
+ * scheduled start time with the SJA1105 PTP clock
+ */
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ if (now < tas_data->oper_base_time) {
+ /* TAS has not started yet */
+ diff = ns_to_timespec64(tas_data->oper_base_time - now);
+ dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
+ diff.tv_sec, diff.tv_nsec);
+ break;
+ }
+
+ /* Time elapsed, what happened? */
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ /* TAS has started */
+ dev_err(ds->dev,
+ "TAS not started despite time elapsed\n");
+
+ break;
+
+ case SJA1105_TAS_STATE_RUNNING:
+ /* Clock was stepped.. bad news for TAS */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ dev_err(ds->dev, "TAS surprisingly stopped\n");
+
+ break;
+
+ default:
+ if (net_ratelimit())
+ dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
+ }
+
+ if (rc && net_ratelimit())
+ dev_err(ds->dev, "An operation returned %d\n", rc);
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+void sja1105_tas_clockstep(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ /* No reason to schedule the workqueue, nothing changed */
+ if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
+ schedule_work(&tas_data->tas_work);
}
void sja1105_tas_setup(struct dsa_switch *ds)
{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ tas_data->last_op = SJA1105_PTP_NONE;
}
void sja1105_tas_teardown(struct dsa_switch *ds)
@@ -413,6 +819,8 @@ void sja1105_tas_teardown(struct dsa_switch *ds)
struct tc_taprio_qopt_offload *offload;
int port;
+ cancel_work_sync(&priv->tas_data.tas_work);
+
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
offload = priv->tas_data.offload[port];
if (!offload)
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index 0aad212d88b2..b226c3dfd5b1 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -8,8 +8,27 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
+enum sja1105_tas_state {
+ SJA1105_TAS_STATE_DISABLED,
+ SJA1105_TAS_STATE_ENABLED_NOT_RUNNING,
+ SJA1105_TAS_STATE_RUNNING,
+};
+
+enum sja1105_ptp_op {
+ SJA1105_PTP_NONE,
+ SJA1105_PTP_CLOCKSTEP,
+ SJA1105_PTP_ADJUSTFREQ,
+};
+
struct sja1105_tas_data {
struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ enum sja1105_tas_state state;
+ enum sja1105_ptp_op last_op;
+ struct work_struct tas_work;
+ s64 earliest_base_time;
+ s64 oper_base_time;
+ u64 max_cycle_time;
+ bool enabled;
};
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
@@ -19,6 +38,10 @@ void sja1105_tas_setup(struct dsa_switch *ds);
void sja1105_tas_teardown(struct dsa_switch *ds);
+void sja1105_tas_clockstep(struct dsa_switch *ds);
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds);
+
#else
/* C doesn't allow empty structures, bah! */
@@ -36,6 +59,10 @@ static inline void sja1105_tas_setup(struct dsa_switch *ds) { }
static inline void sja1105_tas_teardown(struct dsa_switch *ds) { }
+static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 614377ef7956..42c1574d45f2 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1178,9 +1178,12 @@ int vsc73xx_probe(struct vsc73xx *vsc)
* We allocate 8 ports and avoid access to the nonexistant
* ports.
*/
- vsc->ds = dsa_switch_alloc(dev, 8);
+ vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
+
+ vsc->ds->dev = dev;
+ vsc->ds->num_ports = 8;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 54e4d8b07f0e..3031a5fc5427 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -51,41 +51,15 @@ static void set_multicast_list(struct net_device *dev)
{
}
-struct pcpu_dstats {
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
static void dummy_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_dstats *dstats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- dstats = per_cpu_ptr(dev->dstats, i);
- do {
- start = u64_stats_fetch_begin_irq(&dstats->syncp);
- tbytes = dstats->tx_bytes;
- tpackets = dstats->tx_packets;
- } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpackets;
- }
+ dev_lstats_read(dev, &stats->tx_packets, &stats->tx_bytes);
}
static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- dstats->tx_packets++;
- dstats->tx_bytes += skb->len;
- u64_stats_update_end(&dstats->syncp);
+ dev_lstats_add(dev, skb->len);
skb_tx_timestamp(skb);
dev_kfree_skb(skb);
@@ -94,8 +68,8 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
- if (!dev->dstats)
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+ if (!dev->lstats)
return -ENOMEM;
return 0;
@@ -103,7 +77,7 @@ static int dummy_dev_init(struct net_device *dev)
static void dummy_dev_uninit(struct net_device *dev)
{
- free_percpu(dev->dstats);
+ free_percpu(dev->lstats);
}
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e8e9c166185d..4ded81b27d0a 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
-source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 05abebc17804..f8f38dcb5f8a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
-obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index bb032be7fe31..4cd53fc338b5 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -730,12 +730,12 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
struct device_node *np = priv->device->of_node;
- int ret = 0;
+ int ret;
- priv->phy_iface = of_get_phy_mode(np);
+ ret = of_get_phy_mode(np, &priv->phy_iface);
/* Avoid get phy addr and create mdio if no phy is present */
- if (!priv->phy_iface)
+ if (ret)
return 0;
/* try to get PHY address from device tree, use PHY autodetection if
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 16553d92fad2..a3250dcf7d53 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -133,7 +133,7 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
u64 *ptr;
int i, j;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
ring = &adapter->tx_ring[i];
@@ -205,7 +205,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
}
@@ -214,7 +214,7 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
const struct ena_stats *ena_stats;
int i, j;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
@@ -333,7 +333,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->tx_ring[i].smoothed_interval = val;
}
@@ -344,7 +344,7 @@ static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].smoothed_interval = val;
}
@@ -612,7 +612,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = adapter->num_queues;
+ info->data = adapter->num_io_queues;
rc = 0;
break;
case ETHTOOL_GRXFH:
@@ -734,14 +734,20 @@ static void ena_get_channels(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = adapter->num_queues;
- channels->max_tx = adapter->num_queues;
- channels->max_other = 0;
- channels->max_combined = 0;
- channels->rx_count = adapter->num_queues;
- channels->tx_count = adapter->num_queues;
- channels->other_count = 0;
- channels->combined_count = 0;
+ channels->max_combined = adapter->max_num_io_queues;
+ channels->combined_count = adapter->num_io_queues;
+}
+
+static int ena_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ u32 count = channels->combined_count;
+ /* The check for max value is already done in ethtool */
+ if (count < ENA_MIN_NUM_IO_QUEUES)
+ return -EINVAL;
+
+ return ena_update_queue_count(adapter, count);
}
static int ena_get_tunable(struct net_device *netdev,
@@ -807,6 +813,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
.get_channels = ena_get_channels,
+ .set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c487d2a7d6dd..d46a912002ff 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -101,7 +101,7 @@ static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].mtu = mtu;
}
@@ -129,10 +129,10 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
u32 i;
int rc;
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
+ adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
if (!adapter->netdev->rx_cpu_rmap)
return -ENOMEM;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
int irq_idx = ENA_IO_IRQ_IDX(i);
rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
@@ -172,7 +172,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_dev = adapter->ena_dev;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
@@ -294,7 +294,7 @@ static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc)
goto err_setup_tx;
@@ -322,7 +322,7 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_tx_resources(adapter, i);
}
@@ -428,7 +428,7 @@ static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_rx_resources(adapter, i);
if (rc)
goto err_setup_rx;
@@ -456,7 +456,7 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_resources(adapter, i);
}
@@ -600,7 +600,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
struct ena_ring *rx_ring;
int i, rc, bufs_num;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
bufs_num = rx_ring->ring_size - 1;
rc = ena_refill_rx_bufs(rx_ring, bufs_num);
@@ -616,7 +616,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_bufs(adapter, i);
}
@@ -688,7 +688,7 @@ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
ena_free_tx_bufs(tx_ring);
}
@@ -699,7 +699,7 @@ static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
@@ -710,7 +710,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
@@ -1331,7 +1331,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
* the number of potential io queues is the minimum of what the device
* supports and the number of vCPUs.
*/
-static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
+static int ena_enable_msix(struct ena_adapter *adapter)
{
int msix_vecs, irq_cnt;
@@ -1342,7 +1342,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
}
/* Reserved the max msix vectors we might need */
- msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
+ msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
@@ -1359,7 +1359,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_notice(adapter, probe, adapter->netdev,
"enable only %d MSI-X (out of %d), reduce the number of queues\n",
irq_cnt, msix_vecs);
- adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
+ adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
if (ena_init_rx_cpu_rmap(adapter))
@@ -1397,7 +1397,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
netdev = adapter->netdev;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
cpu = i % num_online_cpus();
@@ -1529,7 +1529,7 @@ static void ena_del_napi(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
netif_napi_del(&adapter->ena_napi[i].napi);
}
@@ -1538,7 +1538,7 @@ static void ena_init_napi(struct ena_adapter *adapter)
struct ena_napi *napi;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
napi = &adapter->ena_napi[i];
netif_napi_add(adapter->netdev,
@@ -1555,7 +1555,7 @@ static void ena_napi_disable_all(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_disable(&adapter->ena_napi[i].napi);
}
@@ -1563,7 +1563,7 @@ static void ena_napi_enable_all(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_enable(&adapter->ena_napi[i].napi);
}
@@ -1673,7 +1673,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_tx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1741,7 +1741,7 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_rx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1764,7 +1764,7 @@ static void set_io_rings_size(struct ena_adapter *adapter,
{
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
adapter->tx_ring[i].ring_size = new_tx_size;
adapter->rx_ring[i].ring_size = new_rx_size;
}
@@ -1902,14 +1902,14 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
/* Enable completion queues interrupt */
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_unmask_interrupt(&adapter->tx_ring[i],
&adapter->rx_ring[i]);
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
@@ -1984,13 +1984,13 @@ static int ena_open(struct net_device *netdev)
int rc;
/* Notify the stack of the actual queue counts. */
- rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
+ rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
return rc;
}
- rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
+ rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
return rc;
@@ -2043,14 +2043,30 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size)
{
- bool dev_up;
+ bool dev_was_up;
- dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
ena_init_io_rings(adapter);
- return dev_up ? ena_up(adapter) : 0;
+ return dev_was_up ? ena_up(adapter) : 0;
+}
+
+int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ bool dev_was_up;
+
+ dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ ena_close(adapter->netdev);
+ adapter->num_io_queues = new_channel_count;
+ /* We need to destroy the rss table so that the indirection
+ * table will be reinitialized by ena_up()
+ */
+ ena_com_rss_destroy(ena_dev);
+ ena_init_io_rings(adapter);
+ return dev_was_up ? ena_open(adapter->netdev) : 0;
}
static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
@@ -2495,7 +2511,7 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
u64 bytes, packets;
tx_ring = &adapter->tx_ring[i];
@@ -2682,14 +2698,13 @@ err_mmio_read_less:
return rc;
}
-static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
- int io_vectors)
+static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct device *dev = &adapter->pdev->dev;
int rc;
- rc = ena_enable_msix(adapter, io_vectors);
+ rc = ena_enable_msix(adapter);
if (rc) {
dev_err(dev, "Can not reserve msix vectors\n");
return rc;
@@ -2782,8 +2797,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
- rc = ena_enable_msix_and_set_admin_interrupts(adapter,
- adapter->num_queues);
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev, "Enable MSI-X failed\n");
goto err_device_destroy;
@@ -2948,7 +2962,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
budget = ENA_MONITORED_TX_QUEUES;
- for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
+ for (i = adapter->last_monitored_tx_qid; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
@@ -2965,7 +2979,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
break;
}
- adapter->last_monitored_tx_qid = i % adapter->num_queues;
+ adapter->last_monitored_tx_qid = i % adapter->num_io_queues;
}
/* trigger napi schedule after 2 consecutive detections */
@@ -2995,7 +3009,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
refill_required =
@@ -3137,16 +3151,16 @@ static void ena_timer_service(struct timer_list *t)
mod_timer(&adapter->timer_service, jiffies + HZ);
}
-static int ena_calc_io_queue_num(struct pci_dev *pdev,
- struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
+ int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&get_feat_ctx->max_queue_ext.max_queue_ext;
- io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
+ io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
max_queue_ext->max_rx_cq_num);
io_tx_sq_num = max_queue_ext->max_tx_sq_num;
@@ -3156,25 +3170,25 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
&get_feat_ctx->max_queues;
io_tx_sq_num = max_queues->max_sq_num;
io_tx_cq_num = max_queues->max_cq_num;
- io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
+ io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
}
/* In case of LLQ use the llq fields for the tx SQ/CQ */
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
- io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
- io_queue_num = min_t(int, io_queue_num, io_rx_num);
- io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
- io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
+ max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
- io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
- if (unlikely(!io_queue_num)) {
+ max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
+ if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
return -EFAULT;
}
- return io_queue_num;
+ return max_num_io_queues;
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
@@ -3302,7 +3316,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
- val = ethtool_rxfh_indir_default(i, adapter->num_queues);
+ val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
@@ -3349,7 +3363,7 @@ static void set_default_llq_configurations(struct ena_llq_configurations *llq_co
llq_config->llq_ring_entry_size_value = 128;
}
-static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
+static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev;
@@ -3358,7 +3372,7 @@ static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
u32 max_tx_queue_size;
u32 max_rx_queue_size;
- if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
@@ -3432,11 +3446,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
- int io_queue_num, bars, rc;
struct net_device *netdev;
static int adapters_found;
+ u32 max_num_io_queues;
char *queue_type_str;
bool wd_state;
+ int bars, rc;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -3497,27 +3512,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.pdev = pdev;
/* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
- * Updated during device initialization with the real granularity
- */
+ * Updated during device initialization with the real granularity
+ */
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
- io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- rc = ena_calc_queue_size(&calc_queue_ctx);
- if (rc || io_queue_num <= 0) {
+ max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
+ rc = ena_calc_io_queue_size(&calc_queue_ctx);
+ if (rc || !max_num_io_queues) {
rc = -EFAULT;
goto err_device_destroy;
}
- dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
- io_queue_num,
- calc_queue_ctx.rx_queue_size,
- calc_queue_ctx.tx_queue_size,
- (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
- "ENABLED" : "DISABLED");
-
/* dev zeroed in init_etherdev */
- netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
+ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
if (!netdev) {
dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
rc = -ENOMEM;
@@ -3545,7 +3553,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
- adapter->num_queues = io_queue_num;
+ adapter->num_io_queues = max_num_io_queues;
+ adapter->max_num_io_queues = max_num_io_queues;
+
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
@@ -3569,7 +3579,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&adapter->syncp);
- rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev,
"Failed to enable and set the admin interrupts\n");
@@ -3611,9 +3621,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_type_str = "Low Latency";
dev_info(&pdev->dev,
- "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
+ "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, io_queue_num, queue_type_str);
+ netdev->dev_addr, queue_type_str);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 72ee51a82ec7..bffd778f2ce3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -82,6 +82,8 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_MIN_RING_SIZE (256)
+#define ENA_MIN_NUM_IO_QUEUES (1)
+
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
@@ -161,10 +163,10 @@ struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
struct pci_dev *pdev;
- u16 tx_queue_size;
- u16 rx_queue_size;
- u16 max_tx_queue_size;
- u16 max_rx_queue_size;
+ u32 tx_queue_size;
+ u32 rx_queue_size;
+ u32 max_tx_queue_size;
+ u32 max_rx_queue_size;
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
};
@@ -324,7 +326,8 @@ struct ena_adapter {
u32 rx_copybreak;
u32 max_mtu;
- int num_queues;
+ u32 num_io_queues;
+ u32 max_num_io_queues;
int msix_vecs;
@@ -387,6 +390,7 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
+int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 131cab855be7..6e0a6e234483 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -4,15 +4,8 @@
# aQuantia Ethernet Controller AQtion Linux Driver
# Copyright(c) 2014-2017 aQuantia Corporation.
#
-# Contact Information: <rdc-drv@aquantia.com>
-# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
-#
################################################################################
-#
-# Makefile for the AQtion(tm) Ethernet driver
-#
-
obj-$(CONFIG_AQTION) += atlantic.o
atlantic-objs := aq_main.o \
@@ -24,8 +17,11 @@ atlantic-objs := aq_main.o \
aq_ethtool.o \
aq_drvinfo.o \
aq_filters.o \
+ aq_phy.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o
+
+atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 02f1b70c4e25..f0c41f7408e5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_cfg.h: Definition of configuration parameters and constants. */
@@ -27,7 +27,7 @@
#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
-#define AQ_CFG_IRQ_MASK 0x1FFU
+#define AQ_CFG_IRQ_MASK 0x3FFU
#define AQ_CFG_VECS_MAX 8U
#define AQ_CFG_TCS_MAX 8U
@@ -70,14 +70,11 @@
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
-#define AQ_NIC_FC_OFF 0U
-#define AQ_NIC_FC_TX 1U
-#define AQ_NIC_FC_RX 2U
-#define AQ_NIC_FC_FULL 3U
-#define AQ_NIC_FC_AUTO 4U
-
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
+/* Default WOL modes used on initialization */
+#define AQ_CFG_WOL_MODES WAKE_MAGIC
+
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
#define AQ_CFG_IS_AUTONEG_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 24df132384fb..a1f99bef4a68 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ethtool.c: Definition of ethertool related functions. */
@@ -9,13 +9,18 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_ptp.h"
#include "aq_filters.h"
+#include <linux/ptp_clock_kernel.h>
+
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
memset(p, 0, regs_count * sizeof(u32));
aq_nic_get_regs(aq_nic, regs, p);
@@ -24,7 +29,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev,
static int aq_ethtool_get_regs_len(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
return regs_count * sizeof(u32);
}
@@ -89,11 +96,21 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InErrors",
};
+static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "DMASystemLoopback",
+ "PKTSystemLoopback",
+ "DMANetworkLoopback",
+ "PHYInternalLoopback",
+ "PHYExternalLoopback",
+};
+
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) *
@@ -104,11 +121,15 @@ static void aq_ethtool_stats(struct net_device *ndev,
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
- struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
- u32 firmware_version = aq_nic_get_fw_version(aq_nic);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u32 firmware_version;
+ u32 regs_count;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ firmware_version = aq_nic_get_fw_version(aq_nic);
+ regs_count = aq_nic_get_regs_count(aq_nic);
strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
@@ -129,12 +150,15 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
- int i, si;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
u8 *p = data;
+ int i, si;
- if (stringset == ETH_SS_STATS) {
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
@@ -147,23 +171,63 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
p += ETH_GSTRING_LEN;
}
}
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(p, aq_ethtool_priv_flag_names,
+ sizeof(aq_ethtool_priv_flag_names));
+ break;
}
}
-static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+static int aq_ethtool_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_hw_s *hw = aq_nic->aq_hw;
int ret = 0;
+
+ if (!aq_nic->aq_fw_ops->led_control)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_BLINK |
+ AQ_HW_LED_BLINK << 2 | AQ_HW_LED_BLINK << 4);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_DEFAULT);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return ret;
+}
+
+static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+ int ret = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
switch (stringset) {
case ETH_SS_STATS:
ret = ARRAY_SIZE(aq_ethtool_stat_names) +
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
break;
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
+ break;
default:
ret = -EOPNOTSUPP;
}
+
return ret;
}
@@ -175,7 +239,9 @@ static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
return sizeof(cfg->aq_rss.hash_secret_key);
}
@@ -184,9 +250,11 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
unsigned int i = 0U;
+ cfg = aq_nic_get_cfg(aq_nic);
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
@@ -196,6 +264,7 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
if (key)
memcpy(key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
+
return 0;
}
@@ -239,9 +308,11 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
u32 *rule_locs)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
int err = 0;
+ cfg = aq_nic_get_cfg(aq_nic);
+
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
@@ -266,8 +337,8 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
static int aq_ethtool_set_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@@ -288,7 +359,9 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
@@ -302,6 +375,7 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
coal->rx_max_coalesced_frames = 1;
coal->tx_max_coalesced_frames = 1;
}
+
return 0;
}
@@ -309,7 +383,9 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
/* This is not yet supported
*/
@@ -351,13 +427,12 @@ static void aq_ethtool_get_wol(struct net_device *ndev,
struct ethtool_wolinfo *wol)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
- wol->supported = WAKE_MAGIC;
- wol->wolopts = 0;
+ cfg = aq_nic_get_cfg(aq_nic);
- if (cfg->wol)
- wol->wolopts |= WAKE_MAGIC;
+ wol->supported = AQ_NIC_WOL_MODES;
+ wol->wolopts = cfg->wol;
}
static int aq_ethtool_set_wol(struct net_device *ndev,
@@ -365,18 +440,50 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
{
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
int err = 0;
- if (wol->wolopts & WAKE_MAGIC)
- cfg->wol |= AQ_NIC_WOL_ENABLED;
- else
- cfg->wol &= ~AQ_NIC_WOL_ENABLED;
- err = device_set_wakeup_enable(&pdev->dev, wol->wolopts);
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (wol->wolopts & ~AQ_NIC_WOL_MODES)
+ return -EOPNOTSUPP;
+
+ cfg->wol = wol->wolopts;
+
+ err = device_set_wakeup_enable(&pdev->dev, !!cfg->wol);
return err;
}
+static int aq_ethtool_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ ethtool_op_get_ts_info(ndev, info);
+
+ if (!aq_nic->aq_ptp)
+ return 0;
+
+ info->so_timestamping |=
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->phc_index = ptp_clock_index(aq_ptp_get_ptp_clock(aq_nic->aq_ptp));
+
+ return 0;
+}
+
static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
{
u32 rate = 0;
@@ -481,7 +588,7 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 fc = aq_nic->aq_nic_cfg.flow_control;
+ u32 fc = aq_nic->aq_nic_cfg.fc.req;
pause->autoneg = 0;
@@ -503,14 +610,14 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
return -EOPNOTSUPP;
if (pause->rx_pause)
- aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_RX;
else
- aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_RX;
if (pause->tx_pause)
- aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_TX;
else
- aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_TX;
mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
@@ -523,23 +630,28 @@ static void aq_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
- ring->rx_pending = aq_nic_cfg->rxds;
- ring->tx_pending = aq_nic_cfg->txds;
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ ring->rx_pending = cfg->rxds;
+ ring->tx_pending = cfg->txds;
- ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
- ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+ ring->rx_max_pending = cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = cfg->aq_hw_caps->txds_max;
}
static int aq_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
- int err = 0;
- bool ndev_running = false;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
- const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+ const struct aq_hw_caps_s *hw_caps;
+ bool ndev_running = false;
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ hw_caps = cfg->aq_hw_caps;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
err = -EOPNOTSUPP;
@@ -553,18 +665,18 @@ static int aq_set_ringparam(struct net_device *ndev,
aq_nic_free_vectors(aq_nic);
- aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
- aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
- aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+ cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
+ cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
- aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
- aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
- aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+ cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ cfg->txds = min(cfg->txds, hw_caps->txds_max);
+ cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
- for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+ for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
aq_nic->aq_vecs++) {
aq_nic->aq_vec[aq_nic->aq_vecs] =
- aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+ aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
err = -ENOMEM;
goto err_exit;
@@ -577,12 +689,61 @@ err_exit:
return err;
}
+static u32 aq_get_msg_level(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->msg_enable;
+}
+
+static void aq_set_msg_level(struct net_device *ndev, u32 data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ aq_nic->msg_enable = data;
+}
+
+static u32 aq_ethtool_get_priv_flags(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->aq_nic_cfg.priv_flags;
+}
+
+static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u32 priv_flags;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ priv_flags = cfg->priv_flags;
+
+ if (flags & ~AQ_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ cfg->priv_flags = flags;
+
+ if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+
+ dev_open(ndev, NULL);
+ }
+ } else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) {
+ aq_nic_set_loopback(aq_nic);
+ }
+
+ return 0;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
+ .set_phys_id = aq_ethtool_set_phys_id,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
.get_wol = aq_ethtool_get_wol,
.set_wol = aq_ethtool_set_wol,
@@ -598,10 +759,15 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.set_rxnfc = aq_ethtool_set_rxnfc,
+ .get_msglevel = aq_get_msg_level,
+ .set_msglevel = aq_set_msg_level,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
+ .get_priv_flags = aq_ethtool_get_priv_flags,
+ .set_priv_flags = aq_ethtool_set_priv_flags,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
.set_link_ksettings = aq_ethtool_set_link_ksettings,
.get_coalesce = aq_ethtool_get_coalesce,
.set_coalesce = aq_ethtool_set_coalesce,
+ .get_ts_info = aq_ethtool_get_ts_info,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
index 632b5531db4a..6d5be5ebeb13 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -12,5 +12,6 @@
#include "aq_common.h"
extern const struct ethtool_ops aq_ethtool_ops;
+#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index aee827f07c16..6102251bb909 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2014-2017 aQuantia Corporation. */
+/* Copyright (C) 2014-2019 aQuantia Corporation. */
/* File aq_filters.c: RX filters related functions. */
@@ -89,12 +89,14 @@ static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
+ aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
- fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
- AQ_RX_FIRST_LOC_FL3L4,
- AQ_RX_LAST_LOC_FL3L4);
+ AQ_RX_FIRST_LOC_FL3L4, last_location);
return -EINVAL;
}
if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
@@ -124,12 +126,15 @@ aq_check_approve_fl2(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ u32 last_location = AQ_RX_LAST_LOC_FETHERT -
+ aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
+
if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
- fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FETHERT,
- AQ_RX_LAST_LOC_FETHERT);
+ last_location);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 53d7478689a0..cc70c606b6ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
@@ -15,6 +15,9 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_HW_MAC_COUNTER_HZ 312500000ll
+#define AQ_HW_PHY_COUNTER_HZ 160000000ll
+
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
@@ -94,6 +97,7 @@ struct aq_stats_s {
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
#define AQ_HW_FLAG_CLOSING 0x00000020U
+#define AQ_HW_PTP_AVAILABLE 0x01000000U
#define AQ_HW_LINK_DOWN 0x04000000U
#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_HW_FLAG_ERR_HW 0x80000000U
@@ -115,6 +119,23 @@ struct aq_stats_s {
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+#define AQ_HW_LED_BLINK 0x2U
+#define AQ_HW_LED_DEFAULT 0x0U
+
+enum aq_priv_flags {
+ AQ_HW_LOOPBACK_DMA_SYS,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ AQ_HW_LOOPBACK_DMA_NET,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+};
+
+#define AQ_HW_LOOPBACK_MASK (BIT(AQ_HW_LOOPBACK_DMA_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PKT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_DMA_NET) |\
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -133,8 +154,11 @@ struct aq_hw_s {
atomic_t dpc;
u32 mbox_addr;
u32 rpc_addr;
+ u32 settings_addr;
u32 rpc_tid;
struct hw_atl_utils_fw_rpc rpc;
+ s64 ptp_clk_offset;
+ u16 phy_id;
};
struct aq_ring_s;
@@ -235,7 +259,43 @@ struct aq_hw_ops {
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
+ int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
+
+ int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
+
+ int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_hwts_rx_receive)(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+ void (*hw_get_ptp_ts)(struct aq_hw_s *self, u64 *stamp);
+
+ int (*hw_adj_clock_freq)(struct aq_hw_s *self, s32 delta);
+
+ int (*hw_adj_sys_clock)(struct aq_hw_s *self, s64 delta);
+
+ int (*hw_set_sys_clock)(struct aq_hw_s *self, u64 time, u64 ts);
+
+ int (*hw_ts_to_sys_clock)(struct aq_hw_s *self, u64 ts, u64 *time);
+
+ int (*hw_gpio_pulse)(struct aq_hw_s *self, u32 index, u64 start,
+ u32 period);
+
+ int (*hw_extts_gpio_enable)(struct aq_hw_s *self, u32 index,
+ u32 enable);
+
+ int (*hw_get_sync_ts)(struct aq_hw_s *self, u64 *ts);
+
+ u16 (*rx_extract_ts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
+ int (*extract_hwts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
+
+ int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
};
struct aq_fw_ops {
@@ -264,9 +324,19 @@ struct aq_fw_ops {
int (*set_flow_control)(struct aq_hw_s *self);
+ int (*led_control)(struct aq_hw_s *self, u32 mode);
+
+ int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
+
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
u8 *mac);
+ int (*send_fw_request)(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size);
+
+ void (*enable_ptp)(struct aq_hw_s *self, int enable);
+
int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 9c7a226d81b6..7dbf49adcea6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -59,6 +59,7 @@ u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
u64 value = aq_hw_read_reg(hw, reg);
value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
+
return value;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index bb65dd39f847..538f460a3da7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_main.c: Main file for aQuantia Linux driver. */
@@ -10,10 +10,13 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_ptp.h"
#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
@@ -50,8 +53,8 @@ struct net_device *aq_ndev_alloc(void)
static int aq_ndev_open(struct net_device *ndev)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
err = aq_nic_init(aq_nic);
if (err < 0)
@@ -71,19 +74,20 @@ static int aq_ndev_open(struct net_device *ndev)
err_exit:
if (err < 0)
- aq_nic_deinit(aq_nic);
+ aq_nic_deinit(aq_nic, true);
+
return err;
}
static int aq_ndev_close(struct net_device *ndev)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
err = aq_nic_stop(aq_nic);
if (err < 0)
goto err_exit;
- aq_nic_deinit(aq_nic);
+ aq_nic_deinit(aq_nic, true);
err_exit:
return err;
@@ -93,13 +97,33 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
+ /* Hardware adds the Timestamp for PTPv2 802.AS1
+ * and PTPv2 IPv4 UDP.
+ * We have to push even general 320 port messages to the ptp
+ * queue explicitly. This is a limitation of current firmware
+ * and hardware PTP design of the chip. Otherwise ptp stream
+ * will fail to sync
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ unlikely((ip_hdr(skb)->version == 4) &&
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ ((udp_hdr(skb)->dest == htons(319)) ||
+ (udp_hdr(skb)->dest == htons(320)))) ||
+ unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
+ return aq_ptp_xmit(aq_nic, skb);
+ }
+
+ skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
+ int err;
+
+ err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
goto err_exit;
@@ -112,8 +136,8 @@ err_exit:
static int aq_ndev_set_features(struct net_device *ndev,
netdev_features_t features)
{
- bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
+ bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
bool need_ndev_restart = false;
struct aq_nic_cfg_s *aq_cfg;
@@ -197,6 +221,87 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
(void)aq_nic_set_multicast_list(aq_nic, ndev);
}
+static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
+ struct hwtstamp_config *config)
+{
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
+}
+
+static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int ret_val;
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
+ if (ret_val)
+ return ret_val;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return aq_ndev_hwtstamp_set(aq_nic, ifr);
+
+ case SIOCGHWTSTAMP:
+ return aq_ndev_hwtstamp_get(aq_nic, ifr);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
u16 vid)
{
@@ -234,6 +339,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 137c1de4c6ec..a17a4da7bc15 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.c: Definition of common code for NIC. */
@@ -12,6 +12,9 @@
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "aq_main.h"
+#include "aq_phy.h"
+#include "aq_ptp.h"
+#include "aq_filters.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -38,10 +41,6 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
- struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
- struct aq_rss_parameters *rss_params = &cfg->aq_rss;
- int i = 0;
-
static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
@@ -49,6 +48,11 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
};
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ struct aq_rss_parameters *rss_params;
+ int i = 0;
+
+ rss_params = &cfg->aq_rss;
rss_params->hash_secret_key_size = sizeof(rss_key);
memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
@@ -75,7 +79,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
- cfg->flow_control = AQ_CFG_FC_MODE;
+ cfg->fc.req = AQ_CFG_FC_MODE;
+ cfg->wol = AQ_CFG_WOL_MODES;
cfg->mtu = AQ_CFG_MTU_DEF;
cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
@@ -139,18 +144,27 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (err)
return err;
+ if (self->aq_fw_ops->get_flow_control)
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+ self->aq_nic_cfg.fc.cur = fc;
+
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
- pr_info("%s: link change old %d new %d\n",
- AQ_CFG_DRV_NAME, self->link_status.mbps,
- self->aq_hw->aq_link_status.mbps);
+ netdev_info(self->ndev, "%s: link change old %d new %d\n",
+ AQ_CFG_DRV_NAME, self->link_status.mbps,
+ self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
+ if (self->aq_ptp) {
+ aq_ptp_clock_init(self);
+ aq_ptp_tm_offset_set(self,
+ self->aq_hw->aq_link_status.mbps);
+ aq_ptp_link_change(self);
+ }
+
/* Driver has to update flow control settings on RX block
* on any link event.
* We should query FW whether it negotiated FC.
*/
- if (self->aq_fw_ops->get_flow_control)
- self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
if (self->aq_hw_ops->hw_set_fc)
self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
@@ -169,6 +183,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
netif_tx_disable(self->ndev);
aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
+
return 0;
}
@@ -183,6 +198,7 @@ static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
BIT(self->aq_nic_cfg.link_irq_vec));
+
return IRQ_HANDLED;
}
@@ -192,6 +208,8 @@ static void aq_nic_service_task(struct work_struct *work)
service_task);
int err;
+ aq_ptp_service_task(self);
+
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
return;
@@ -211,7 +229,8 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, service_timer);
- mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+ mod_timer(&self->service_timer,
+ jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
aq_ndev_schedule_work(&self->service_task);
}
@@ -290,9 +309,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_RXHASH | NETIF_F_SG |
NETIF_F_LRO | NETIF_F_TSO;
+ self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
@@ -312,8 +333,8 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
int aq_nic_init(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
- int err = 0;
unsigned int i = 0U;
+ int err = 0;
self->power_state = AQ_HW_POWER_STATE_D0;
mutex_lock(&self->fwreq_mutex);
@@ -327,10 +348,27 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
+ self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ err = aq_phy_init(self->aq_hw);
+ }
+
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+ err = aq_ptp_init(self, self->irqvecs - 1);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_init(self);
+ if (err < 0)
+ goto err_exit;
+
netif_carrier_off(self->ndev);
err_exit:
@@ -340,8 +378,8 @@ err_exit:
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
- int err = 0;
unsigned int i = 0U;
+ int err = 0;
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
@@ -361,6 +399,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
+ err = aq_ptp_ring_start(self);
+ if (err < 0)
+ goto err_exit;
+
err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -371,6 +413,8 @@ int aq_nic_start(struct aq_nic_s *self)
INIT_WORK(&self->service_task, aq_nic_service_task);
+ aq_nic_set_loopback(self);
+
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
@@ -388,6 +432,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
+ err = aq_ptp_irq_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
if (self->aq_nic_cfg.link_irq_vec) {
int irqvec = pci_irq_vector(self->pdev,
self->aq_nic_cfg.link_irq_vec);
@@ -420,30 +468,48 @@ err_exit:
return err;
}
-static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
- struct sk_buff *skb,
- struct aq_ring_s *ring)
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring)
{
- unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int frag_count = 0U;
- unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *first = NULL;
- struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
+ u8 ipver = ip_hdr(skb)->version;
+ struct aq_ring_buff_s *dx_buff;
bool need_context_tag = false;
+ unsigned int frag_count = 0U;
+ unsigned int ret = 0U;
+ unsigned int dx;
+ u8 l4proto = 0;
+
+ if (ipver == 4)
+ l4proto = ip_hdr(skb)->protocol;
+ else if (ipver == 6)
+ l4proto = ipv6_hdr(skb)->nexthdr;
+ dx = ring->sw_tail;
+ dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
if (unlikely(skb_is_gso(skb))) {
dx_buff->mss = skb_shinfo(skb)->gso_size;
- dx_buff->is_gso = 1U;
+ if (l4proto == IPPROTO_TCP) {
+ dx_buff->is_gso_tcp = 1U;
+ dx_buff->len_l4 = tcp_hdrlen(skb);
+ } else if (l4proto == IPPROTO_UDP) {
+ dx_buff->is_gso_udp = 1U;
+ dx_buff->len_l4 = sizeof(struct udphdr);
+ /* UDP GSO Hardware does not replace packet length. */
+ udp_hdr(skb)->len = htons(dx_buff->mss +
+ dx_buff->len_l4);
+ } else {
+ WARN_ONCE(true, "Bad GSO mode");
+ goto exit;
+ }
dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN;
- dx_buff->len_l3 = ip_hdrlen(skb);
- dx_buff->len_l4 = tcp_hdrlen(skb);
+ dx_buff->len_l3 = skb_network_header_len(skb);
dx_buff->eop_index = 0xffffU;
- dx_buff->is_ipv6 =
- (ip_hdr(skb)->version == 6) ? 1U : 0U;
+ dx_buff->is_ipv6 = (ipver == 6);
need_context_tag = true;
}
@@ -477,24 +543,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
- 1U : 0U;
-
- if (ip_hdr(skb)->version == 4) {
- dx_buff->is_tcp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
- 1U : 0U;
- dx_buff->is_udp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
- 1U : 0U;
- } else if (ip_hdr(skb)->version == 6) {
- dx_buff->is_tcp_cso =
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
- 1U : 0U;
- dx_buff->is_udp_cso =
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
- 1U : 0U;
- }
+ dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
+ dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
+ dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
}
for (; nr_frags--; ++frag_count) {
@@ -549,7 +600,8 @@ mapping_error:
--ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
- if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
+ if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
+ !dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa,
@@ -570,11 +622,11 @@ exit:
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
+ unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
- unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
- unsigned int tc = 0U;
int err = NETDEV_TX_OK;
+ unsigned int tc = 0U;
frags = skb_shinfo(skb)->nr_frags + 1;
@@ -587,6 +639,11 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
aq_ring_update_queue_state(ring);
+ if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
/* Above status update may stop the queue. Check this. */
if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
err = NETDEV_TX_BUSY;
@@ -667,6 +724,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
if (err < 0)
return err;
}
+
return aq_nic_set_packet_filter(self, packet_filter);
}
@@ -711,10 +769,10 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- unsigned int i = 0U;
- unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
+ unsigned int count = 0U;
+ unsigned int i = 0U;
if (self->aq_fw_ops->update_stats) {
mutex_lock(&self->fwreq_mutex);
@@ -764,8 +822,8 @@ err_exit:;
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
- struct net_device *ndev = self->ndev;
struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+ struct net_device *ndev = self->ndev;
ndev->stats.rx_packets = stats->dma_pkt_rc;
ndev->stats.rx_bytes = stats->dma_oct_rc;
@@ -810,9 +868,12 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
- if (self->aq_nic_cfg.aq_hw_caps->flow_control)
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Asym_Pause);
+ }
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
@@ -846,13 +907,13 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
+ if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
/* Asym is when either RX or TX, but not both */
- if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
- !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
+ if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
+ !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
@@ -935,6 +996,44 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
return fw_version;
}
+int aq_nic_set_loopback(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (!self->aq_hw_ops->hw_set_loopback ||
+ !self->aq_fw_ops->set_phyloopback)
+ return -ENOTSUPP;
+
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PKT_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_NET,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_NET)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
+ mutex_unlock(&self->fwreq_mutex);
+
+ return 0;
+}
+
int aq_nic_stop(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
@@ -953,14 +1052,31 @@ int aq_nic_stop(struct aq_nic_s *self)
else
aq_pci_func_free_irqs(self);
+ aq_ptp_irq_free(self);
+
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
+ aq_ptp_ring_stop(self);
+
return self->aq_hw_ops->hw_stop(self->aq_hw);
}
-void aq_nic_deinit(struct aq_nic_s *self)
+void aq_nic_set_power(struct aq_nic_s *self)
+{
+ if (self->power_state != AQ_HW_POWER_STATE_D0 ||
+ self->aq_hw->aq_nic_cfg->wol)
+ if (likely(self->aq_fw_ops->set_power)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->set_power(self->aq_hw,
+ self->power_state,
+ self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+}
+
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
{
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
@@ -972,23 +1088,17 @@ void aq_nic_deinit(struct aq_nic_s *self)
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
- if (likely(self->aq_fw_ops->deinit)) {
+ aq_ptp_unregister(self);
+ aq_ptp_ring_deinit(self);
+ aq_ptp_ring_free(self);
+ aq_ptp_free(self);
+
+ if (likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
}
- if (self->power_state != AQ_HW_POWER_STATE_D0 ||
- self->aq_hw->aq_nic_cfg->wol)
- if (likely(self->aq_fw_ops->set_power)) {
- mutex_lock(&self->fwreq_mutex);
- self->aq_fw_ops->set_power(self->aq_hw,
- self->power_state,
- self->ndev->dev_addr);
- mutex_unlock(&self->fwreq_mutex);
- }
-
-
err_exit:;
}
@@ -1009,44 +1119,6 @@ void aq_nic_free_vectors(struct aq_nic_s *self)
err_exit:;
}
-int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
-{
- int err = 0;
-
- if (!netif_running(self->ndev)) {
- err = 0;
- goto out;
- }
- rtnl_lock();
- if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
- self->power_state = AQ_HW_POWER_STATE_D3;
- netif_device_detach(self->ndev);
- netif_tx_stop_all_queues(self->ndev);
-
- err = aq_nic_stop(self);
- if (err < 0)
- goto err_exit;
-
- aq_nic_deinit(self);
- } else {
- err = aq_nic_init(self);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_start(self);
- if (err < 0)
- goto err_exit;
-
- netif_device_attach(self->ndev);
- netif_tx_start_all_queues(self->ndev);
- }
-
-err_exit:
- rtnl_unlock();
-out:
- return err;
-}
-
void aq_nic_shutdown(struct aq_nic_s *self)
{
int err = 0;
@@ -1063,8 +1135,52 @@ void aq_nic_shutdown(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
}
- aq_nic_deinit(self);
+ aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(self);
err_exit:
rtnl_unlock();
}
+
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
+{
+ u8 location = 0xFF;
+ u32 fltr_cnt;
+ u32 n_bit;
+
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
+ self->aq_hw_rx_fltrs.fet_reserved_count;
+ self->aq_hw_rx_fltrs.fet_reserved_count++;
+ break;
+ case aq_rx_filter_l3l4:
+ fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
+ n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
+ location = n_bit;
+ break;
+ default:
+ break;
+ }
+
+ return location;
+}
+
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location)
+{
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ self->aq_hw_rx_fltrs.fet_reserved_count--;
+ break;
+ case aq_rx_filter_l3l4:
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 255b54a6ae07..a752f8bb4b08 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.h: Declaration of common code for NIC. */
@@ -17,6 +17,20 @@ struct aq_ring_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
+struct aq_ptp_s;
+enum aq_rx_filter_type;
+
+enum aq_fc_mode {
+ AQ_NIC_FC_OFF = 0,
+ AQ_NIC_FC_TX,
+ AQ_NIC_FC_RX,
+ AQ_NIC_FC_FULL,
+};
+
+struct aq_fc_info {
+ enum aq_fc_mode req;
+ enum aq_fc_mode cur;
+};
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
@@ -32,7 +46,7 @@ struct aq_nic_cfg_s {
u32 rxpageorder;
u32 num_rss_queues;
u32 mtu;
- u32 flow_control;
+ struct aq_fc_info fc;
u32 link_speed_msk;
u32 wol;
u8 is_vlan_rx_strip;
@@ -44,6 +58,7 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
+ u32 priv_flags;
u8 tcs;
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
@@ -53,11 +68,13 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_STOPPING 0x00000008U
#define AQ_NIC_FLAG_RESETTING 0x00000010U
#define AQ_NIC_FLAG_CLOSING 0x00000020U
+#define AQ_NIC_PTP_DPATH_UP 0x02000000U
#define AQ_NIC_LINK_DOWN 0x04000000U
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
-#define AQ_NIC_WOL_ENABLED BIT(0)
+#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
+ WAKE_PHY)
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
@@ -67,9 +84,10 @@ struct aq_hw_rx_fl2 {
};
struct aq_hw_rx_fl3l4 {
- u8 active_ipv4;
- u8 active_ipv6:2;
+ u8 active_ipv4;
+ u8 active_ipv6:2;
u8 is_ipv6;
+ u8 reserved_count;
};
struct aq_hw_rx_fltrs_s {
@@ -77,10 +95,13 @@ struct aq_hw_rx_fltrs_s {
u16 active_filters;
struct aq_hw_rx_fl2 fl2;
struct aq_hw_rx_fl3l4 fl3l4;
+ /*filter ether type */
+ u8 fet_reserved_count;
};
struct aq_nic_s {
atomic_t flags;
+ u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_hw_s *aq_hw;
@@ -108,6 +129,8 @@ struct aq_nic_s {
u32 irqvecs;
/* mutex to serialize FW interface access operations */
struct mutex fwreq_mutex;
+ /* PTP support */
+ struct aq_ptp_s *aq_ptp;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
@@ -126,12 +149,15 @@ void aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
-void aq_nic_deinit(struct aq_nic_s *self);
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
+void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
@@ -145,8 +171,10 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
const struct ethtool_link_ksettings *cmd);
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
-int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+int aq_nic_set_loopback(struct aq_nic_s *self);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
-
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 74b9f3f1da81..2bb329606794 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_pci_func.c: Definition of PCI functions. */
@@ -185,6 +185,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
return AQ_HW_IRQ_MSIX;
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
+
return AQ_HW_IRQ_LEGACY;
}
@@ -196,12 +197,12 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
- struct aq_nic_s *self;
- int err;
struct net_device *ndev;
resource_size_t mmio_pa;
- u32 bar;
+ struct aq_nic_s *self;
u32 numvecs;
+ u32 bar;
+ int err;
err = pci_enable_device(pdev);
if (err)
@@ -269,6 +270,9 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs = min((u8)AQ_CFG_VECS_DEF,
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
numvecs = min(numvecs, num_online_cpus());
+ /* Request IRQ vector for PTP */
+ numvecs += 1;
+
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
@@ -308,6 +312,7 @@ err_ndev:
pci_release_regions(pdev);
err_pci_func:
pci_disable_device(pdev);
+
return err;
}
@@ -344,29 +349,98 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+static int aq_suspend_common(struct device *dev, bool deep)
{
- struct aq_nic_s *self = pci_get_drvdata(pdev);
+ struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
- return aq_nic_change_pm_state(self, &pm_msg);
+ nic->power_state = AQ_HW_POWER_STATE_D3;
+ netif_device_detach(nic->ndev);
+ netif_tx_stop_all_queues(nic->ndev);
+
+ aq_nic_stop(nic);
+
+ if (deep) {
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
+ }
+
+ rtnl_unlock();
+
+ return 0;
}
-static int aq_pci_resume(struct pci_dev *pdev)
+static int atl_resume_common(struct device *dev, bool deep)
{
- struct aq_nic_s *self = pci_get_drvdata(pdev);
- pm_message_t pm_msg = PMSG_RESTORE;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct aq_nic_s *nic;
+ int ret;
+
+ nic = pci_get_drvdata(pdev);
+
+ rtnl_lock();
- return aq_nic_change_pm_state(self, &pm_msg);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (deep) {
+ ret = aq_nic_init(nic);
+ if (ret)
+ goto err_exit;
+ }
+
+ ret = aq_nic_start(nic);
+ if (ret)
+ goto err_exit;
+
+ netif_device_attach(nic->ndev);
+ netif_tx_start_all_queues(nic->ndev);
+
+err_exit:
+ rtnl_unlock();
+
+ return ret;
}
+static int aq_pm_freeze(struct device *dev)
+{
+ return aq_suspend_common(dev, false);
+}
+
+static int aq_pm_suspend_poweroff(struct device *dev)
+{
+ return aq_suspend_common(dev, true);
+}
+
+static int aq_pm_thaw(struct device *dev)
+{
+ return atl_resume_common(dev, false);
+}
+
+static int aq_pm_resume_restore(struct device *dev)
+{
+ return atl_resume_common(dev, true);
+}
+
+static const struct dev_pm_ops aq_pm_ops = {
+ .suspend = aq_pm_suspend_poweroff,
+ .poweroff = aq_pm_suspend_poweroff,
+ .freeze = aq_pm_freeze,
+ .resume = aq_pm_resume_restore,
+ .restore = aq_pm_resume_restore,
+ .thaw = aq_pm_thaw,
+};
+
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
- .suspend = aq_pci_suspend,
- .resume = aq_pci_resume,
.shutdown = aq_pci_shutdown,
+#ifdef CONFIG_PM
+ .driver.pm = &aq_pm_ops,
+#endif
};
int aq_pci_func_register_driver(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
new file mode 100644
index 000000000000..51ae921e3e1f
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* aQuantia Corporation Network Driver
+ * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+ */
+
+#include "aq_phy.h"
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw,
+ val, val == 0U, 10U, 100000U);
+
+ if (err < 0)
+ return false;
+
+ return true;
+}
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ /* Send Read command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (1 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+ /* Read result. */
+ aq_mdio_busy_wait(aq_hw);
+
+ return (u16)hw_atl_glb_mdio_iface5_get(aq_hw);
+}
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) <<
+ HW_ATL_MDIO_WRITE_DATA_SHIFT);
+ /* Send Write command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (2 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+}
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+
+ if (err < 0) {
+ err = 0xffff;
+ goto err_exit;
+ }
+
+ err = aq_mdio_read_word(aq_hw, mmd, address);
+
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+
+err_exit:
+ return err;
+}
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+ if (err < 0)
+ return;
+
+ aq_mdio_write_word(aq_hw, mmd, address, data);
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+}
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw)
+{
+ u16 val;
+
+ for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX;
+ ++aq_hw->phy_id) {
+ /* PMA Standard Device Identifier 2: Address 1.3 */
+ val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (val != 0xffff)
+ return true;
+ }
+
+ return false;
+}
+
+bool aq_phy_init(struct aq_hw_s *aq_hw)
+{
+ u32 dev_id;
+
+ if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX)
+ if (!aq_phy_init_phy_id(aq_hw))
+ return false;
+
+ /* PMA Standard Device Identifier:
+ * Address 1.2 = MSW,
+ * Address 1.3 = LSW
+ */
+ dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2);
+ dev_id <<= 16;
+ dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (dev_id == 0xffffffff) {
+ aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
new file mode 100644
index 000000000000..84b72ad04a4a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* aQuantia Corporation Network Driver
+ * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+ */
+
+#ifndef AQ_PHY_H
+#define AQ_PHY_H
+
+#include <linux/mdio.h>
+
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_llh_internal.h"
+#include "aq_hw_utils.h"
+#include "aq_hw.h"
+
+#define HW_ATL_PHY_ID_MAX 32U
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw);
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr);
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data);
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address);
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data);
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
+
+bool aq_phy_init(struct aq_hw_s *aq_hw);
+
+#endif /* AQ_PHY_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
new file mode 100644
index 000000000000..58e8c641e8b3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Aquantia Corporation Network Driver
+ * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+ */
+
+/* File aq_ptp.c:
+ * Definition of functions for Linux PTP support.
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+
+#include "aq_nic.h"
+#include "aq_ptp.h"
+#include "aq_ring.h"
+#include "aq_phy.h"
+#include "aq_filters.h"
+
+#define AQ_PTP_TX_TIMEOUT (HZ * 10)
+
+#define POLL_SYNC_TIMER_MS 15
+
+enum ptp_speed_offsets {
+ ptp_offset_idx_10 = 0,
+ ptp_offset_idx_100,
+ ptp_offset_idx_1000,
+ ptp_offset_idx_2500,
+ ptp_offset_idx_5000,
+ ptp_offset_idx_10000,
+};
+
+struct ptp_skb_ring {
+ struct sk_buff **buff;
+ spinlock_t lock;
+ unsigned int size;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct ptp_tx_timeout {
+ spinlock_t lock;
+ bool active;
+ unsigned long tx_start;
+};
+
+struct aq_ptp_s {
+ struct aq_nic_s *aq_nic;
+ struct hwtstamp_config hwtstamp_config;
+ spinlock_t ptp_lock;
+ spinlock_t ptp_ring_lock;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+
+ atomic_t offset_egress;
+ atomic_t offset_ingress;
+
+ struct aq_ring_param_s ptp_ring_param;
+
+ struct ptp_tx_timeout ptp_tx_timeout;
+
+ unsigned int idx_vector;
+ struct napi_struct napi;
+
+ struct aq_ring_s ptp_tx;
+ struct aq_ring_s ptp_rx;
+ struct aq_ring_s hwts_rx;
+
+ struct ptp_skb_ring skb_ring;
+
+ struct aq_rx_filter_l3l4 udp_filter;
+ struct aq_rx_filter_l2 eth_type_filter;
+
+ struct delayed_work poll_sync;
+ u32 poll_timeout_ms;
+
+ bool extts_pin_enabled;
+ u64 last_sync1588_ts;
+};
+
+struct ptp_tm_offset {
+ unsigned int mbps;
+ int egress;
+ int ingress;
+};
+
+static struct ptp_tm_offset ptp_offset[6];
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int i, egress, ingress;
+
+ if (!aq_ptp)
+ return;
+
+ egress = 0;
+ ingress = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ if (mbps == ptp_offset[i].mbps) {
+ egress = ptp_offset[i].egress;
+ ingress = ptp_offset[i].ingress;
+ break;
+ }
+ }
+
+ atomic_set(&aq_ptp->offset_egress, egress);
+ atomic_set(&aq_ptp->offset_ingress, ingress);
+}
+
+static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned int next_head = (ring->head + 1) % ring->size;
+
+ if (next_head == ring->tail)
+ return -ENOMEM;
+
+ ring->buff[ring->head] = skb_get(skb);
+ ring->head = next_head;
+
+ return 0;
+}
+
+static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ ret = __aq_ptp_skb_put(ring, skb);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ if (ring->tail == ring->head)
+ return NULL;
+
+ skb = ring->buff[ring->tail];
+ ring->tail = (ring->tail + 1) % ring->size;
+
+ return skb;
+}
+
+static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ skb = __aq_ptp_skb_get(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return skb;
+}
+
+static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ len = (ring->head >= ring->tail) ?
+ ring->head - ring->tail :
+ ring->size - ring->tail + ring->head;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return len;
+}
+
+static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
+{
+ struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_init(&ring->lock);
+
+ ring->buff = buff;
+ ring->size = size;
+ ring->head = 0;
+ ring->tail = 0;
+
+ return 0;
+}
+
+static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ while ((skb = aq_ptp_skb_get(ring)) != NULL)
+ dev_kfree_skb_any(skb);
+}
+
+static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
+{
+ if (ring->buff) {
+ aq_ptp_skb_ring_clean(ring);
+ kfree(ring->buff);
+ ring->buff = NULL;
+ }
+}
+
+static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
+{
+ spin_lock_init(&timeout->lock);
+ timeout->active = false;
+}
+
+static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = true;
+ timeout->tx_start = jiffies;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+}
+
+static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
+{
+ if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = false;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+ }
+}
+
+static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+ bool timeout_flag;
+
+ timeout_flag = false;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ if (timeout->active) {
+ timeout_flag = time_is_before_jiffies(timeout->tx_start +
+ AQ_PTP_TX_TIMEOUT);
+ /* reset active flag if timeout detected */
+ if (timeout_flag)
+ timeout->active = false;
+ }
+ spin_unlock_irqrestore(&timeout->lock, flags);
+
+ if (timeout_flag) {
+ aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
+ netdev_err(aq_ptp->aq_nic->ndev,
+ "PTP Timeout. Clearing Tx Timestamp SKBs\n");
+ }
+}
+
+/* aq_ptp_adjfine
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
+ scaled_ppm_to_ppb(scaled_ppm));
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+/* aq_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+/* aq_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/* aq_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int aq_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns = timespec64_to_ns(ts);
+ u64 now;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
+
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = ns_to_ktime(timestamp);
+}
+
+static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
+ u64 period)
+{
+ if (period)
+ netdev_dbg(aq_nic->ndev,
+ "Enable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+ else
+ netdev_dbg(aq_nic->ndev,
+ "Disable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+
+ /* Notify hardware of request to being sending pulses.
+ * If period is ZERO then pulsen is disabled.
+ */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
+ start, (u32)period);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct ptp_clock_time *t = &rq->perout.period;
+ struct ptp_clock_time *s = &rq->perout.start;
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = rq->perout.index;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ /* we cannot support periods greater
+ * than 4 seconds due to reg limit
+ */
+ if (t->sec > 4 || t->sec < 0)
+ return -ERANGE;
+
+ /* convert to unsigned 64b ns,
+ * verify we can put it in a 32b register
+ */
+ period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
+
+ /* verify the value is in range supported by hardware */
+ if (period > U32_MAX)
+ return -ERANGE;
+ /* convert to unsigned 64b ns */
+ /* TODO convert to AQ time */
+ start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = 0;
+ u32 rest = 0;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
+ div_u64_rem(start, NSEC_PER_SEC, &rest);
+ period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
+ start = on ? start - rest + NSEC_PER_SEC *
+ (rest > 990000000LL ? 2 : 1) : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u32 enable = aq_ptp->extts_pin_enabled;
+
+ if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
+ aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
+ enable);
+}
+
+static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+
+ u32 pin_index = rq->extts.index;
+
+ if (pin_index >= ptp->n_ext_ts)
+ return -EINVAL;
+
+ aq_ptp->extts_pin_enabled = !!on;
+ if (on) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+
+ aq_ptp_extts_pin_ctrl(aq_ptp);
+ return 0;
+}
+
+/* aq_ptp_gpio_feature_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ */
+static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return aq_ptp_extts_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return aq_ptp_perout_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return aq_ptp_pps_pin_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* aq_ptp_verify
+ * @ptp: the ptp clock structure
+ * @pin: index of the pin in question
+ * @func: the desired function to use
+ * @chan: the function channel index to use
+ */
+static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* verify the requested pin is there */
+ if (!ptp->pin_config || pin >= ptp->n_pins)
+ return -EINVAL;
+
+ /* enforce locked channels, no changing them */
+ if (chan != ptp->pin_config[pin].chan)
+ return -EINVAL;
+
+ /* we want to keep the functions locked as well */
+ if (func != ptp->pin_config[pin].func)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: the private adapter struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
+ struct skb_shared_hwtstamps hwtstamp;
+
+ if (!skb) {
+ netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
+ return;
+ }
+
+ timestamp += atomic_read(&aq_ptp->offset_egress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
+ skb_tstamp_tx(skb, &hwtstamp);
+ dev_kfree_skb_any(skb);
+
+ aq_ptp_tx_timeout_update(aq_ptp);
+}
+
+/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
+ u64 timestamp)
+{
+ timestamp -= atomic_read(&aq_ptp->offset_ingress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
+}
+
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ *config = aq_ptp->hwtstamp_config;
+}
+
+static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
+{
+ aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 |
+ HW_ATL_RX_UDP |
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 |
+ aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
+
+ aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
+ aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
+}
+
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ const struct aq_hw_ops *hw_ops;
+ int err = 0;
+
+ hw_ops = aq_nic->aq_hw_ops;
+ if (config->tx_type == HWTSTAMP_TX_ON ||
+ config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
+ aq_ptp_prepare_filters(aq_ptp);
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_set) {
+ err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ } else {
+ aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_clear) {
+ err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ }
+
+ if (err)
+ return -EREMOTEIO;
+
+ aq_ptp->hwtstamp_config = *config;
+
+ return 0;
+}
+
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return false;
+
+ return &aq_ptp->ptp_tx == ring ||
+ &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
+}
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ unsigned int len)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ u64 timestamp = 0;
+ u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
+ p, len, &timestamp);
+
+ if (ret > 0)
+ aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
+
+ return ret;
+}
+
+static int aq_ptp_poll(struct napi_struct *napi, int budget)
+{
+ struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ bool was_cleaned = false;
+ int work_done = 0;
+ int err;
+
+ /* Processing PTP TX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+
+ was_cleaned = true;
+ }
+
+ /* Processing HW_TIMESTAMP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
+ aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
+
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ was_cleaned = true;
+ }
+
+ /* Processing PTP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
+ unsigned int sw_tail_old;
+
+ err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
+ if (err < 0)
+ goto err_exit;
+
+ sw_tail_old = aq_ptp->ptp_rx.sw_tail;
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ sw_tail_old);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ if (was_cleaned)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
+ BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
+ }
+
+err_exit:
+ return work_done;
+}
+
+static irqreturn_t aq_ptp_isr(int irq, void *private)
+{
+ struct aq_ptp_s *aq_ptp = private;
+ int err = 0;
+
+ if (!aq_ptp) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ napi_schedule(&aq_ptp->napi);
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct aq_ring_s *ring = &aq_ptp->ptp_tx;
+ unsigned long irq_flags;
+ int err = NETDEV_TX_OK;
+ unsigned int frags;
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+ /* Frags cannot be bigger 16KB
+ * because PTP usually works
+ * without Jumbo even in a background
+ */
+ if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
+ /* Drop packet because it doesn't make sence to delay it */
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
+ if (err) {
+ netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
+ ring->size);
+ return NETDEV_TX_BUSY;
+ }
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ aq_ptp_tx_timeout_start(aq_ptp);
+ skb_tx_timestamp(skb);
+
+ spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+ frags = aq_nic_map_skb(aq_nic, skb, ring);
+
+ if (likely(frags)) {
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
+ ring, frags);
+ if (err >= 0) {
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ } else {
+ err = NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ptp_tx_timeout_check(aq_ptp);
+}
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ struct pci_dev *pdev = aq_nic->pdev;
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (pdev->msix_enabled || pdev->msi_enabled) {
+ err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
+ aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
+ } else {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct pci_dev *pdev = aq_nic->pdev;
+
+ if (!aq_ptp)
+ return;
+
+ free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
+}
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_ring_init(&aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_init(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ 0U);
+ if (err < 0)
+ goto err_rx_free;
+
+ err = aq_ring_init(&aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ return err;
+
+err_rx_free:
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+err_exit:
+ return err;
+}
+
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ napi_enable(&aq_ptp->napi);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
+
+ napi_disable(&aq_ptp->napi);
+}
+
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
+ return;
+
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+}
+
+#define PTP_8TC_RING_IDX 8
+#define PTP_4TC_RING_IDX 16
+#define PTP_HWST_RING_IDX 31
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ unsigned int tx_ring_idx, rx_ring_idx;
+ struct aq_ring_s *hwts;
+ u32 tx_tc_mode, rx_tc_mode;
+ struct aq_ring_s *ring;
+ int err;
+
+ if (!aq_ptp)
+ return 0;
+
+ /* Index must to be 8 (8 TCs) or 16 (4 TCs).
+ * It depends from Traffic Class mode.
+ */
+ aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
+ if (tx_tc_mode == 0)
+ tx_ring_idx = PTP_8TC_RING_IDX;
+ else
+ tx_ring_idx = PTP_4TC_RING_IDX;
+
+ ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+ tx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
+ if (rx_tc_mode == 0)
+ rx_ring_idx = PTP_8TC_RING_IDX;
+ else
+ rx_ring_idx = PTP_4TC_RING_IDX;
+
+ ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+ rx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit_ptp_tx;
+ }
+
+ hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+ aq_nic->aq_nic_cfg.rxds,
+ aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+ if (!hwts) {
+ err = -ENOMEM;
+ goto err_exit_ptp_rx;
+ }
+
+ err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
+ if (err != 0) {
+ err = -ENOMEM;
+ goto err_exit_hwts_rx;
+ }
+
+ aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
+ aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
+ aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
+ cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
+ &aq_ptp->ptp_ring_param.affinity_mask);
+
+ return 0;
+
+err_exit_hwts_rx:
+ aq_ring_free(&aq_ptp->hwts_rx);
+err_exit_ptp_rx:
+ aq_ring_free(&aq_ptp->ptp_rx);
+err_exit_ptp_tx:
+ aq_ring_free(&aq_ptp->ptp_tx);
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ring_free(&aq_ptp->ptp_tx);
+ aq_ring_free(&aq_ptp->ptp_rx);
+ aq_ring_free(&aq_ptp->hwts_rx);
+
+ aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
+}
+
+#define MAX_PTP_GPIO_COUNT 4
+
+static struct ptp_clock_info aq_ptp_clock = {
+ .owner = THIS_MODULE,
+ .name = "atlantic ptp",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfine = aq_ptp_adjfine,
+ .adjtime = aq_ptp_adjtime,
+ .gettime64 = aq_ptp_gettime,
+ .settime64 = aq_ptp_settime,
+ .n_per_out = 0,
+ .enable = aq_ptp_gpio_feature_enable,
+ .n_pins = 0,
+ .verify = aq_ptp_verify,
+ .pin_config = NULL,
+};
+
+#define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \
+ ptp_offset[__idx].mbps = (__mbps); \
+ ptp_offset[__idx].egress = (__egress); \
+ ptp_offset[__idx].ingress = (__ingress); } \
+ while (0)
+
+static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
+{
+ int i;
+
+ /* Load offsets for PTP */
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ switch (i) {
+ /* 100M */
+ case ptp_offset_idx_100:
+ ptp_offset_init(i, 100,
+ offsets->egress_100,
+ offsets->ingress_100);
+ break;
+ /* 1G */
+ case ptp_offset_idx_1000:
+ ptp_offset_init(i, 1000,
+ offsets->egress_1000,
+ offsets->ingress_1000);
+ break;
+ /* 2.5G */
+ case ptp_offset_idx_2500:
+ ptp_offset_init(i, 2500,
+ offsets->egress_2500,
+ offsets->ingress_2500);
+ break;
+ /* 5G */
+ case ptp_offset_idx_5000:
+ ptp_offset_init(i, 5000,
+ offsets->egress_5000,
+ offsets->ingress_5000);
+ break;
+ /* 10G */
+ case ptp_offset_idx_10000:
+ ptp_offset_init(i, 10000,
+ offsets->egress_10000,
+ offsets->ingress_10000);
+ break;
+ }
+ }
+}
+
+static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
+{
+ memset(ptp_offset, 0, sizeof(ptp_offset));
+
+ aq_ptp_offset_init_from_fw(offsets);
+}
+
+static void aq_ptp_gpio_init(struct ptp_clock_info *info,
+ struct hw_atl_info *hw_info)
+{
+ struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
+ u32 extts_pin_cnt = 0;
+ u32 out_pin_cnt = 0;
+ u32 i;
+
+ memset(pin_desc, 0, sizeof(pin_desc));
+
+ for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
+ if (hw_info->gpio_pin[i] ==
+ (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", i);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = out_pin_cnt;
+ pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
+ }
+ }
+
+ info->n_per_out = out_pin_cnt;
+
+ if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
+ extts_pin_cnt += 1;
+
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", out_pin_cnt);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = 0;
+ pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
+ }
+
+ info->n_pins = out_pin_cnt + extts_pin_cnt;
+ info->n_ext_ts = extts_pin_cnt;
+
+ if (!info->n_pins)
+ return;
+
+ info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
+ GFP_KERNEL);
+
+ if (!info->pin_config)
+ return;
+
+ memcpy(info->pin_config, &pin_desc,
+ sizeof(struct ptp_pin_desc) * info->n_pins);
+}
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ aq_ptp_settime(&aq_ptp->ptp_info, &ts);
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
+
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ struct hw_atl_utils_mbox mbox;
+ struct ptp_clock *clock;
+ struct aq_ptp_s *aq_ptp;
+ int err = 0;
+
+ if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ if (!aq_nic->aq_fw_ops->enable_ptp) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
+
+ if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ aq_ptp_offset_init(&mbox.info.ptp_offset);
+
+ aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
+ if (!aq_ptp) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ aq_ptp->aq_nic = aq_nic;
+
+ spin_lock_init(&aq_ptp->ptp_lock);
+ spin_lock_init(&aq_ptp->ptp_ring_lock);
+
+ aq_ptp->ptp_info = aq_ptp_clock;
+ aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
+ clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
+ if (IS_ERR(clock)) {
+ netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
+ err = PTR_ERR(clock);
+ goto err_exit;
+ }
+ aq_ptp->ptp_clock = clock;
+ aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
+
+ atomic_set(&aq_ptp->offset_egress, 0);
+ atomic_set(&aq_ptp->offset_ingress, 0);
+
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
+ aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
+
+ aq_ptp->idx_vector = idx_vec;
+
+ aq_nic->aq_ptp = aq_ptp;
+
+ /* enable ptp counter */
+ aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
+ aq_ptp_clock_init(aq_nic);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
+ aq_ptp->eth_type_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
+ aq_ptp->udp_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
+
+ return 0;
+
+err_exit:
+ if (aq_ptp)
+ kfree(aq_ptp->ptp_info.pin_config);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+ return err;
+}
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ ptp_clock_unregister(aq_ptp->ptp_clock);
+}
+
+void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
+ aq_ptp->eth_type_filter.location);
+ aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
+ aq_ptp->udp_filter.location);
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ /* disable ptp */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ kfree(aq_ptp->ptp_info.pin_config);
+
+ netif_napi_del(&aq_ptp->napi);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+}
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return aq_ptp->ptp_clock;
+}
+
+/* PTP external GPIO nanoseconds count */
+static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
+{
+ u64 ts = 0;
+
+ if (aq_nic->aq_hw_ops->hw_get_sync_ts)
+ aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
+
+ return ts;
+}
+
+static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
+{
+ if (aq_ptp->extts_pin_enabled) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ aq_ptp->last_sync1588_ts =
+ aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+}
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (aq_nic->aq_hw->aq_link_status.mbps)
+ aq_ptp_start_work(aq_ptp);
+ else
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+
+ return 0;
+}
+
+static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts2;
+ u64 sync_ts;
+
+ sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
+
+ if (sync_ts != aq_ptp->last_sync1588_ts) {
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ sync_ts = sync_ts2;
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ netdev_err(aq_nic->ndev,
+ "%s: Unable to get correct GPIO TS",
+ __func__);
+ sync_ts = 0;
+ }
+ }
+
+ *new_ts = sync_ts;
+ return true;
+ }
+ return false;
+}
+
+static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts;
+
+ /* Sync1588 pin was triggered */
+ if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
+ if (aq_ptp->extts_pin_enabled) {
+ struct ptp_clock_event ptp_event;
+ u64 time = 0;
+
+ aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
+ sync_ts, &time);
+ ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
+ ptp_event.timestamp = time;
+
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
+ }
+
+ aq_ptp->last_sync1588_ts = sync_ts;
+ }
+
+ return 0;
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
+{
+ struct delayed_work *dw = to_delayed_work(w);
+ struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
+
+ aq_ptp_check_sync1588(aq_ptp);
+
+ if (aq_ptp->extts_pin_enabled) {
+ unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
+
+ schedule_delayed_work(&aq_ptp->poll_sync, timeout);
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
new file mode 100644
index 000000000000..231906431a48
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Aquantia Corporation Network Driver
+ * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+ */
+
+/* File aq_ptp.h: Declaration of PTP functions.
+ */
+#ifndef AQ_PTP_H
+#define AQ_PTP_H
+
+#include <linux/net_tstamp.h>
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+/* Common functions */
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec);
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic);
+void aq_ptp_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic);
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic);
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic);
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps);
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic);
+
+/* Traffic processing functions */
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb);
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
+
+/* Must be to check available of PTP before call */
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+
+/* Return either ring is belong to PTP or not*/
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ unsigned int len);
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic);
+
+#else
+
+static inline int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ return 0;
+}
+
+static inline void aq_ptp_unregister(struct aq_nic_s *aq_nic) {}
+
+static inline void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_free(struct aq_nic_s *aq_nic) {}
+
+static inline int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_stop(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_service_task(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic,
+ unsigned int mbps) {}
+static inline void aq_ptp_clock_init(struct aq_nic_s *aq_nic) {}
+static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {}
+static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config) {}
+static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ return 0;
+}
+
+static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ return false;
+}
+
+static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
+ struct sk_buff *skb, u8 *p,
+ unsigned int len)
+{
+ return 0;
+}
+
+static inline struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return NULL;
+}
+
+static inline int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+#endif
+
+#endif /* AQ_PTP_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 76bdbe1596d6..951d86f8b66e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
@@ -10,6 +10,7 @@
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
+#include "aq_ptp.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -29,8 +30,8 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
struct device *dev)
{
struct page *page;
- dma_addr_t daddr;
int ret = -ENOMEM;
+ dma_addr_t daddr;
page = dev_alloc_pages(order);
if (unlikely(!page))
@@ -117,6 +118,7 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
return self;
}
@@ -143,6 +145,7 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
return self;
}
@@ -174,6 +177,31 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
+ return self;
+}
+
+struct aq_ring_s *
+aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, unsigned int size, unsigned int dx_size)
+{
+ struct device *dev = aq_nic_get_dev(aq_nic);
+ size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
+
+ memset(self, 0, sizeof(*self));
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = size;
+ self->dx_size = dx_size;
+
+ self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
+ GFP_KERNEL);
+ if (!self->dx_ring) {
+ aq_ring_free(self);
+ return NULL;
+ }
+
return self;
}
@@ -182,6 +210,7 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
+
return 0;
}
@@ -290,6 +319,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
@@ -354,6 +384,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
skb_put(skb, buff->len);
page_ref_inc(buff->rxdata.page);
} else {
@@ -362,6 +397,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
hdr_len = buff->len;
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
@@ -421,8 +461,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
-
- skb_record_rx_queue(skb, self->idx);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
@@ -434,6 +474,21 @@ err_exit:
return err;
}
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
+{
+ while (self->sw_head != self->hw_head) {
+ u64 ns;
+
+ aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
+ self->dx_ring +
+ (self->sw_head * self->dx_size),
+ self->dx_size, &ns);
+ aq_ptp_tx_hwtstamp(aq_nic, ns);
+
+ self->sw_head = aq_ring_next_dx(self, self->sw_head);
+ }
+}
+
int aq_ring_rx_fill(struct aq_ring_s *self)
{
unsigned int page_order = self->page_order;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 47abd09d06c2..991e4d31b094 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
@@ -65,19 +65,20 @@ struct __packed aq_ring_buff_s {
};
union {
struct {
- u16 len;
+ u32 len:16;
u32 is_ip_cso:1;
u32 is_udp_cso:1;
u32 is_tcp_cso:1;
u32 is_cso_err:1;
u32 is_sop:1;
u32 is_eop:1;
- u32 is_gso:1;
+ u32 is_gso_tcp:1;
+ u32 is_gso_udp:1;
u32 is_mapped:1;
u32 is_cleaned:1;
u32 is_error:1;
u32 is_vlan:1;
- u32 rsvd3:5;
+ u32 rsvd3:4;
u16 eop_index;
u16 rsvd4;
};
@@ -174,4 +175,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget);
int aq_ring_rx_fill(struct aq_ring_s *self);
+struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic, unsigned int idx,
+ unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+
#endif /* AQ_RING_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index a95c263a45aa..f40a427970dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -103,8 +103,8 @@ err_exit:
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
- struct aq_vec_s *self = NULL;
struct aq_ring_s *ring = NULL;
+ struct aq_vec_s *self = NULL;
unsigned int i = 0U;
int err = 0;
@@ -159,6 +159,7 @@ err_exit:
aq_vec_free(self);
self = NULL;
}
+
return self;
}
@@ -263,6 +264,7 @@ void aq_vec_deinit(struct aq_vec_s *self)
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
+
err_exit:;
}
@@ -361,9 +363,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
{
- unsigned int count = 0U;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
+ unsigned int count = 0U;
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 359a4d387185..9b1062b8af64 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -119,10 +119,10 @@ err_exit:
static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
{
- u32 tc = 0U;
- u32 buff_size = 0U;
- unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
+ unsigned int i_priority = 0U;
+ u32 buff_size = 0U;
+ u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -155,7 +155,7 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req);
buff_size = HW_ATL_A0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -180,9 +180,9 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
- int err = 0;
- unsigned int i = 0U;
unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@@ -207,12 +207,12 @@ err_exit:
static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- u8 *indirection_table = rss_params->indirection_table;
- u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
- int err = 0;
+ u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX *
HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -321,9 +321,9 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
- int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
+ int err = 0;
if (!mac_addr) {
err = -EINVAL;
@@ -352,10 +352,9 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
-
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
- struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
@@ -404,6 +403,7 @@ static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -411,6 +411,7 @@ static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -418,6 +419,7 @@ static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -425,6 +427,7 @@ static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
return 0;
}
@@ -435,8 +438,8 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
- unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail];
@@ -451,7 +454,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_gso) {
+ if (buff->is_gso_tcp) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_A0_TXD_CTL_CMD_TCP |
@@ -500,6 +503,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_a0_hw_tx_ring_tail_update(self, ring);
+
return aq_hw_err_from_flags(self);
}
@@ -507,8 +511,8 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@@ -549,8 +553,8 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@@ -599,8 +603,8 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- int err = 0;
unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+ int err = 0;
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@@ -720,6 +724,7 @@ static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
+
return aq_hw_err_from_flags(self);
}
@@ -737,6 +742,7 @@ static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
+
return aq_hw_err_from_flags(self);
}
@@ -859,6 +865,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
{
hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
+
return aq_hw_err_from_flags(self);
}
@@ -866,6 +873,7 @@ static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -873,6 +881,7 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 2ad3fa6316ce..58e891af6e09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
@@ -10,6 +10,7 @@
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "../aq_nic.h"
+#include "../aq_phy.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
@@ -42,13 +43,17 @@
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
- NETIF_F_HW_VLAN_CTAG_TX, \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
.mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
+#define FRAC_PER_NS 0x100000000LL
+
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
@@ -104,14 +109,15 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
{
hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+
return 0;
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
- u32 tc = 0U;
- u32 buff_size = 0U;
unsigned int i_priority = 0U;
+ u32 buff_size = 0U;
+ u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -124,13 +130,16 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ tc = 0;
+
+ /* TX Packet Scheduler Data TC0 */
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
- /* Tx buf size */
- buff_size = HW_ATL_B0_TXBUF_MAX;
+ /* Tx buf size TC0 */
+ buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
@@ -141,10 +150,15 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024 / 32U) * 50U) /
100U, tc);
+ /* Init TC2 for PTP_TX */
+ tc = 2;
+
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
+ tc);
/* QoS Rx buf size per TC */
tc = 0;
- buff_size = HW_ATL_B0_RXBUF_MAX;
+ buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
@@ -156,7 +170,15 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+
+ /* Init TC2 for PTP_RX */
+ tc = 2;
+
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
+ tc);
+ /* No flow control for PTP */
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@@ -169,9 +191,9 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
- int err = 0;
- unsigned int i = 0U;
unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@@ -196,12 +218,12 @@ err_exit:
static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- u8 *indirection_table = rss_params->indirection_table;
- u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
- int err = 0;
+ u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -285,6 +307,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_itr_rsc_delay_set(self, 1U);
}
+
return aq_hw_err_from_flags(self);
}
@@ -363,9 +386,9 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
- int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
+ int err = 0;
if (!mac_addr) {
err = -EINVAL;
@@ -394,11 +417,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
-
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
u32 val;
- struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
@@ -441,8 +463,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
/* Interrupts */
hw_atl_reg_gen_irq_map_set(self,
- ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
- ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+ ((HW_ATL_B0_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL_B0_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
/* Enable link interrupt */
if (aq_nic_cfg->link_irq_vec)
@@ -459,6 +483,7 @@ static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -466,6 +491,7 @@ static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -473,6 +499,7 @@ static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -480,6 +507,7 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
return 0;
}
@@ -490,8 +518,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
- unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
bool is_vlan = false;
bool is_gso = false;
@@ -507,8 +535,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_gso) {
- txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
+ if (buff->is_gso_tcp || buff->is_gso_udp) {
+ if (buff->is_gso_tcp)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24);
@@ -528,7 +557,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
txd->ctl |= buff->vlan_tx_tag << 4;
is_vlan = true;
}
- if (!buff->is_gso && !buff->is_vlan) {
+ if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
buff_pa_len = buff->len;
txd->buf_addr = buff->pa;
@@ -567,6 +596,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_b0_hw_tx_ring_tail_update(self, ring);
+
return aq_hw_err_from_flags(self);
}
@@ -574,9 +604,9 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@@ -617,8 +647,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@@ -664,11 +694,53 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ unsigned int i;
+
+ for (i = aq_ring_avail_dx(ring); i--;
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)
+ &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
+
+ rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
+ rxd->hdr_addr = 0U;
+ }
+ /* Make sure descriptors are updated before bump tail*/
+ wmb();
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ while (ring->hw_head != ring->sw_tail) {
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb =
+ (struct hw_atl_rxd_hwts_wb_s *)
+ (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
+
+ /* RxD is not done */
+ if (!(hwts_wb->sec_lw0 & 0x1U))
+ break;
+
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
+ unsigned int hw_head_;
int err = 0;
- unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@@ -784,6 +856,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
+
return aq_hw_err_from_flags(self);
}
@@ -793,12 +866,14 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
atomic_inc(&self->dpc);
+
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
+
return aq_hw_err_from_flags(self);
}
@@ -807,8 +882,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
- unsigned int i = 0U;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int i = 0U;
hw_atl_rpfl2promiscuous_mode_en_set(self,
IS_FILTER_ENABLED(IFF_PROMISC));
@@ -846,29 +921,30 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 count)
{
int err = 0;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
err = -EBADRQC;
goto err_exit;
}
- for (self->aq_nic_cfg->mc_list_count = 0U;
- self->aq_nic_cfg->mc_list_count < count;
- ++self->aq_nic_cfg->mc_list_count) {
- u32 i = self->aq_nic_cfg->mc_list_count;
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
- hw_atl_rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL_B0_MAC_MIN + i);
- hw_atl_rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
+ (cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@@ -995,6 +1071,7 @@ static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -1002,9 +1079,231 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
+{
+ *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
+{
+ *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+#define get_ptp_ts_val_u64(self, indx) \
+ ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
+
+static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
+{
+ u64 ns;
+
+ hw_atl_pcs_ptp_clock_read_enable(self, 1);
+ hw_atl_pcs_ptp_clock_read_enable(self, 0);
+ ns = (get_ptp_ts_val_u64(self, 0) +
+ (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
+ (get_ptp_ts_val_u64(self, 3) +
+ (get_ptp_ts_val_u64(self, 4) << 16));
+
+ *stamp = ns + self->ptp_clk_offset;
+}
+
+static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
+{
+ /* For accuracy, the digit is extended */
+ s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
+ u64 nsi_frac = 0;
+ u64 nsi;
+
+ base_ns = div64_s64(base_ns, freq);
+ nsi = div64_u64(base_ns, NSEC_PER_SEC);
+
+ if (base_ns != nsi * NSEC_PER_SEC) {
+ s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
+ base_ns - nsi * NSEC_PER_SEC);
+ nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
+ }
+
+ *ns = (u32)nsi;
+ *fns = (u32)nsi_frac;
+}
+
+static void
+hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
+ u64 phyfreq, u64 macfreq)
+{
+ s64 adj_fns_val;
+ s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
+ FRAC_PER_NS * ptp_adj_freq->ns_phy);
+ s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
+ FRAC_PER_NS * ptp_adj_freq->ns_mac);
+ s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
+ s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
+ /* MAC MCP counter freq is macfreq / 4 */
+ s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
+ 4 * FRAC_PER_NS;
+
+ diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
+ AQ_HW_MAC_COUNTER_HZ);
+ adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
+ ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
+
+ ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
+ ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
+ FRAC_PER_NS;
+}
+
+static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
+{
+ self->ptp_clk_offset += delta;
+
+ return 0;
+}
+
+static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
+{
+ s64 delta = time - (self->ptp_clk_offset + ts);
+
+ return hw_atl_b0_adj_sys_clock(self, delta);
+}
+
+static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
+{
+ *time = self->ptp_clk_offset + ts;
+ return 0;
+}
+
+static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
+ hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_mac,
+ &fwreq.ptp_adj_freq.fns_mac);
+ hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_phy,
+ &fwreq.ptp_adj_freq.fns_phy);
+ hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
+ AQ_HW_PHY_COUNTER_HZ,
+ AQ_HW_MAC_COUNTER_HZ);
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
+ u64 start, u32 period)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
+ fwreq.ptp_gpio_ctrl.index = index;
+ fwreq.ptp_gpio_ctrl.period = period;
+ /* Apply time offset */
+ fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
+ u32 enable)
+{
+ /* Enable/disable Sync1588 GPIO Timestamping */
+ aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
+
+ return 0;
+}
+
+static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
+{
+ u64 sec_l;
+ u64 sec_h;
+ u64 nsec_l;
+ u64 nsec_h;
+
+ if (!ts)
+ return -1;
+
+ /* PTP external GPIO clock seconds count 15:0 */
+ sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
+ /* PTP external GPIO clock seconds count 31:16 */
+ sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
+ /* PTP external GPIO clock nanoseconds count 15:0 */
+ nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
+ /* PTP external GPIO clock nanoseconds count 31:16 */
+ nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
+
+ *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
+
+ return 0;
+}
+
+static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
+ unsigned int len, u64 *timestamp)
+{
+ unsigned int offset = 14;
+ struct ethhdr *eth;
+ __be64 sec;
+ __be32 ns;
+ u8 *ptr;
+
+ if (len <= offset || !timestamp)
+ return 0;
+
+ /* The TIMESTAMP in the end of package has following format:
+ * (big-endian)
+ * struct {
+ * uint64_t sec;
+ * uint32_t ns;
+ * uint16_t stream_id;
+ * };
+ */
+ ptr = p + (len - offset);
+ memcpy(&sec, ptr, sizeof(sec));
+ ptr += sizeof(sec);
+ memcpy(&ns, ptr, sizeof(ns));
+
+ *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
+ be32_to_cpu(ns) + self->ptp_clk_offset;
+
+ eth = (struct ethhdr *)p;
+
+ return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
+}
+
+static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp)
+{
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
+ u64 tmp, sec, ns;
+
+ sec = 0;
+ tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
+ sec += tmp;
+ tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
+ sec += tmp;
+ ns = sec * NSEC_PER_SEC + hwts_wb->ns;
+ if (timestamp)
+ *timestamp = ns + self->ptp_clk_offset;
+ return 0;
+}
+
static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
@@ -1038,7 +1337,8 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
hw_atl_b0_hw_fl3l4_clear(self, data);
- if (data->cmd) {
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
if (!data->is_ipv6) {
hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
location,
@@ -1055,8 +1355,13 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
data->ip_src);
}
}
- hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
- hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ }
+
hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
return aq_hw_err_from_flags(self);
@@ -1141,6 +1446,31 @@ static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ switch (mode) {
+ case AQ_HW_LOOPBACK_DMA_SYS:
+ hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_PKT_SYS:
+ hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
+ hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_DMA_NET:
+ hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
+ hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
+ hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_net_lbk_set(self, enable);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -1177,6 +1507,27 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_set_offload = hw_atl_b0_hw_offload_set,
- .hw_set_fc = hw_atl_b0_set_fc,
+
+ .hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
+ .hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
+
+ .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
+ .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
+
+ .hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
+ .hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
+ .hw_set_sys_clock = hw_atl_b0_set_sys_clock,
+ .hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock,
+ .hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
+ .hw_gpio_pulse = hw_atl_b0_gpio_pulse,
+ .hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable,
+ .hw_get_sync_ts = hw_atl_b0_get_sync_ts,
+ .rx_extract_ts = hw_atl_b0_rx_extract_ts,
+ .extract_hwts = hw_atl_b0_extract_hwts,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_loopback = hw_atl_b0_set_loopback,
+ .hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 808d8cd4252a..7ab23a1751d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
@@ -64,8 +64,11 @@
#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
-#define HW_ATL_B0_TXBUF_MAX 160U
-#define HW_ATL_B0_RXBUF_MAX 320U
+#define HW_ATL_B0_TXBUF_MAX 160U
+#define HW_ATL_B0_PTP_TXBUF_SIZE 8U
+
+#define HW_ATL_B0_RXBUF_MAX 320U
+#define HW_ATL_B0_PTP_RXBUF_SIZE 16U
#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 6f340695e6bd..d1f68fc16291 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
@@ -563,6 +563,13 @@ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_NET_LBK_ADR,
+ HW_ATL_RPB_DMA_NET_LBK_MSK,
+ HW_ATL_RPB_DMA_NET_LBK_SHIFT, dma_net_lbk);
+}
+
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode)
{
@@ -572,6 +579,13 @@ void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
rx_traf_class_mode);
}
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
+}
+
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
@@ -636,8 +650,8 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
rx_pkt_buff_size_per_tc);
}
-void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
HW_ATL_RPB_RXBXOFF_EN_MSK,
@@ -1290,6 +1304,13 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT);
+}
+
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
@@ -1327,7 +1348,26 @@ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_
tx_dma_sys_lbk_en);
}
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_NET_LBK_ADR,
+ HW_ATL_TPB_DMA_NET_LBK_MSK,
+ HW_ATL_TPB_DMA_NET_LBK_SHIFT,
+ tx_dma_net_lbk_en);
+}
+
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_CLK_GATE_EN_ADR,
+ HW_ATL_TPB_TX_CLK_GATE_EN_MSK,
+ HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT,
+ tx_clk_gate_en);
+}
+
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+
u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
@@ -1526,6 +1566,20 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
glb_cpu_scratch_scp);
}
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT,
+ ptp_clock_read_enable);
+}
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_PCS_PTP_TS_VAL_ADDR(index));
+}
+
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR,
@@ -1616,6 +1670,11 @@ u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
}
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_MDIO);
+}
+
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp)
{
return aq_hw_read_reg(aq_hw,
@@ -1631,3 +1690,60 @@ u32 hw_atl_scrpad25_get(struct aq_hw_s *self)
{
return hw_atl_scrpad_get(self, 0x18);
}
+
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1), value);
+}
+
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1));
+}
+
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2), value);
+}
+
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2));
+}
+
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3), value);
+}
+
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3));
+}
+
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4), value);
+}
+
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4));
+}
+
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5), value);
+}
+
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5));
+}
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MDIO_BUSY_ADR,
+ HW_ATL_MDIO_BUSY_MSK,
+ HW_ATL_MDIO_BUSY_SHIFT);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index c3ee278c3747..62992b23c0e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
@@ -288,10 +288,16 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
/* set dma system loopback */
void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+/* set dma network loopback */
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk);
+
/* set rx traffic class mode */
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
+/* get rx traffic class mode */
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
+
/* set rx buffer enable */
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
@@ -306,7 +312,8 @@ void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 buffer);
/* set rx flow control mode */
-void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
@@ -320,7 +327,8 @@ void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
/* set rx xoff enable (per tc) */
-void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc,
u32 buffer);
/* rpf */
@@ -605,6 +613,9 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
+/* get TX Traffic Class Mode */
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
@@ -621,9 +632,18 @@ void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
/* set tx dma system loopback enable */
void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+/* set tx dma network loopback enable */
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en);
+
+/* set tx clock gating enable */
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en);
+
/* set tx packet buffer size (per tc) */
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+ u32 tx_pkt_buff_size_per_tc,
+ u32 buffer);
/* set tx path pad insert enable */
void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
@@ -715,6 +735,12 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* set pci register reset disable */
void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+/* pcs */
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable);
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index);
+
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
@@ -752,9 +778,44 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_dest);
+/* set Global MDIO Interface 1 */
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 1 */
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 2 */
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 2 */
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 3 */
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 3 */
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 4 */
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 4 */
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 5 */
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 5 */
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *hw);
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw);
+
/* get global microprocessor ram semaphore */
u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
+/* get global microprocessor mdio semaphore */
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self);
+
/* get global microprocessor scratch pad register */
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 35887ad89025..18de2f7b8959 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh_internal.h: Preprocessor definitions
@@ -554,6 +554,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
+/* rx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_rpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_ADR 0x00005000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_DEFAULT 0x0
+
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
* port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
@@ -1308,6 +1326,52 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l3_l4_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_l4_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_l4_en_i[0]"
+ */
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(filter) (0x00005380 + (filter) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
/* RX l4_sp{D}[F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
* Parameter: srcport {D} | stride size 0x4 | range [0, 7]
@@ -2061,6 +2125,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
+/* tx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_tpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_ADR 0x00007000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_DEFAULT 0x0
+
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
@@ -2098,6 +2180,24 @@
/* default value of bitfield tx_scp_ins_en */
#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
+/* tx tx_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_clk_gate_en".
+ * port="pif_tpb_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSK 0x00000010
+/* inverted bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSKN 0xffffffef
+/* lower bit position of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT 4
+/* width of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_DEFAULT 0x1
+
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_tpo_ipv4_chk_en_i"
@@ -2440,6 +2540,22 @@
/* default value of bitfield register write strobe */
#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
+/* register address for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR 0x00004628
+/* bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK 0x00000010
+/* inverted bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSKN 0xFFFFFFEF
+/* lower bit position of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT 4
+/* width of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_WIDTH 1
+/* default value of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_DEFAULT 0x0
+
+/* register address for ptp counter reading */
+#define HW_ATL_PCS_PTP_TS_VAL_ADDR(index) (0x00004900 + (index) * 0x4)
+
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
* port="pif_glb_res_i"
@@ -2532,50 +2648,121 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
-#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
-#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
-#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
-
-#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
-
-/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
- * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
- * Parameter: location {D} | stride size 0x4 | range [0, 7]
- * PORT="pif_rpf_l3_sa0_i[31:0]"
- */
-
-/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
-#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
-/* Bitmask for bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
-/* Inverted bitmask for bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
-/* Lower bit position of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_SHIFT 0
-/* Width of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_WIDTH 32
-/* Default value of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
-
-/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
- * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
- * Parameter: location {D} | stride size 0x4 | range [0, 7]
- * PORT="pif_rpf_l3_da0_i[31:0]"
- */
-
- /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
-/* Bitmask for bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
-/* Inverted bitmask for bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
-/* Lower bit position of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_SHIFT 0
-/* Width of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_WIDTH 32
-/* Default value of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
-
+/* Preprocessor definitions for Global MDIO Interfaces
+ * Address: 0x00000280 + 0x4 * Number of interface
+ */
+#define HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN 0x00000280u
+
+#define HW_ATL_GLB_MDIO_IFACE_N_ADR(number) \
+ (HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN + (((number) - 1) * 0x4))
+
+/* MIF MDIO Busy Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Busy".
+ * PORT="mdio_pif_busy_o"
+ */
+
+/* Register address for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_ADR 0x00000284
+/* Bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSK 0x80000000
+/* Inverted bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_SHIFT 31
+/* Width of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_WIDTH 1
+
+/* MIF MDIO Execute Operation Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Execute Operation".
+ * PORT="pif_mdio_op_start_i"
+ */
+
+/* Register address for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_ADR 0x00000284
+/* Bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSK 0x00008000
+/* Inverted bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSKN 0xFFFF7FFF
+/* Lower bit position of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_SHIFT 15
+/* Width of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_WIDTH 1
+/* Default value of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_DEFAULT 0x0
+
+/* MIF Op Mode [1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "Op Mode [1:0]".
+ * PORT="pif_mdio_mode_i[1:0]"
+ */
+
+/* Register address for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_ADR 0x00000284
+/* Bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSK 0x00003000
+/* Inverted bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSKN 0xFFFFCFFF
+/* Lower bit position of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_SHIFT 12
+/* Width of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_WIDTH 2
+/* Default value of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_DEFAULT 0x0
+
+/* MIF PHY address Bitfield Definitions
+ * Preprocessor definitions for the bitfield "PHY address".
+ * PORT="pif_mdio_phy_addr_i[9:0]"
+ */
+
+/* Register address for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_ADR 0x00000284
+/* Bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSK 0x000003FF
+/* Inverted bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSKN 0xFFFFFC00
+/* Lower bit position of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_SHIFT 0
+/* Width of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_WIDTH 10
+/* Default value of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_DEFAULT 0x0
+
+/* MIF MDIO WriteData [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO WriteData [F:0]".
+ * PORT="pif_mdio_wdata_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_ADR 0x00000288
+/* Bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_SHIFT 0
+/* Width of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_WIDTH 16
+/* Default value of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_DEFAULT 0x0
+
+/* MIF MDIO Address [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Address [F:0]".
+ * PORT="pif_mdio_addr_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_ADR 0x0000028C
+/* Bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_SHIFT 0
+/* Width of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_WIDTH 16
+/* Default value of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_DEFAULT 0x0
+
+#define HW_ATL_FW_SM_MDIO 0x0U
#define HW_ATL_FW_SM_RAM 0x2U
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 52646855495e..8910b62e67ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
@@ -47,6 +47,11 @@
#define FORCE_FLASHLESS 0
+enum mcp_area {
+ MCP_AREA_CONFIG = 0x80000000,
+ MCP_AREA_SETTINGS = 0x20000000,
+};
+
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
@@ -87,6 +92,7 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
}
self->aq_fw_ops = *fw_ops;
err = self->aq_fw_ops->init(self);
+
return err;
}
@@ -237,9 +243,9 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
int hw_atl_utils_soft_reset(struct aq_hw_s *self)
{
- int k;
u32 boot_exit_code = 0;
u32 val;
+ int k;
for (k = 0; k < 1000; ++k) {
u32 flb_status = aq_hw_read_reg(self,
@@ -327,11 +333,75 @@ err_exit:
return err;
}
-static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
- u32 cnt)
+static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt, enum mcp_area area)
{
+ u32 data_offset = 0;
+ u32 offset = addr;
+ int err = 0;
u32 val;
+
+ switch (area) {
+ case MCP_AREA_CONFIG:
+ offset -= self->rpc_addr;
+ break;
+
+ case MCP_AREA_SETTINGS:
+ offset -= self->settings_addr;
+ break;
+ }
+
+ offset = offset / sizeof(u32);
+
+ for (; data_offset < cnt; ++data_offset, ++offset) {
+ aq_hw_write_reg(self, 0x328, p[data_offset]);
+ aq_hw_write_reg(self, 0x32C,
+ (area | (0xFFFF & (offset * 4))));
+ hw_atl_mcp_up_force_intr_set(self, 1);
+ /* 1000 times by 10us = 10ms */
+ err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
+ self, val,
+ (val & 0xF0000000) !=
+ area,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt)
+{
+ u32 offset = 0;
int err = 0;
+ u32 val;
+
+ aq_hw_write_reg(self, 0x208, addr);
+
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x20C, p[offset]);
+ aq_hw_write_reg(self, 0x200, 0xC000);
+
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
+ self, val,
+ (val & 0x100) == 0U,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
+ u32 cnt, enum mcp_area area)
+{
+ int err = 0;
+ u32 val;
err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
val, val == 1U,
@@ -339,54 +409,47 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_B1)) {
- u32 offset = 0;
-
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x328, p[offset]);
- aq_hw_write_reg(self, 0x32C,
- (0x80000000 | (0xFFFF & (offset * 4))));
- hw_atl_mcp_up_force_intr_set(self, 1);
- /* 1000 times by 10us = 10ms */
- err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
- self, val,
- (val & 0xF0000000) !=
- 0x80000000,
- 10U, 10000U);
- }
- } else {
- u32 offset = 0;
-
- aq_hw_write_reg(self, 0x208, a);
+ if (IS_CHIP_FEATURE(REVISION_B1))
+ err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
+ else
+ err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x20C, p[offset]);
- aq_hw_write_reg(self, 0x200, 0xC000);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
- self, val,
- (val & 0x100) == 0,
- 1000U, 10000U);
- }
- }
+ if (err < 0)
+ goto err_exit;
- hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
+ cnt, MCP_AREA_CONFIG);
+}
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
+ p, cnt, MCP_AREA_SETTINGS);
+}
+
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
- int err = 0;
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
+ int err = 0;
err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
if (err < 0)
goto err_exit;
err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
-EOPNOTSUPP : 0;
+
err_exit:
return err;
}
@@ -431,17 +494,16 @@ struct aq_hw_atl_utils_fw_rpc_tid_s {
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
{
- int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ int err = 0;
if (!IS_CHIP_FEATURE(MIPS)) {
err = -1;
goto err_exit;
}
- err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
- (u32 *)(void *)&self->rpc,
- (rpc_size + sizeof(u32) -
- sizeof(u8)) / sizeof(u32));
+ err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
+ (rpc_size + sizeof(u32) -
+ sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
@@ -456,9 +518,9 @@ err_exit:
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc)
{
- int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+ int err = 0;
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
@@ -562,10 +624,10 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
- int err = 0;
- u32 transaction_id = 0;
- struct hw_atl_utils_mbox_header mbox;
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ struct hw_atl_utils_mbox_header mbox;
+ u32 transaction_id = 0;
+ int err = 0;
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -593,20 +655,26 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
val |= state & HW_ATL_MPI_STATE_MSK;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+
err_exit:
return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
- u32 cp0x036C = hw_atl_utils_mpi_get_state(self);
- u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
- if (!link_speed_mask) {
+ mpi_state = hw_atl_utils_mpi_get_state(self);
+ speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G |
+ FW2X_RATE_10G);
+
+ if (!speed) {
link_status->mbps = 0U;
} else {
- switch (link_speed_mask) {
+ switch (speed) {
case HAL_ATLANTIC_RATE_10G:
link_status->mbps = 10000U;
break;
@@ -639,14 +707,15 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u8 *mac)
{
+ u32 mac_addr[2];
+ u32 efuse_addr;
int err = 0;
u32 h = 0U;
u32 l = 0U;
- u32 mac_addr[2];
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
- unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
+ unsigned int rnd = 0;
get_random_bytes(&rnd, sizeof(unsigned int));
@@ -654,11 +723,10 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
- err = hw_atl_utils_fw_downld_dwords(self,
- aq_hw_read_reg(self, 0x00000374U) +
- (40U * 4U),
- mac_addr,
- ARRAY_SIZE(mac_addr));
+ efuse_addr = aq_hw_read_reg(self, 0x00000374U);
+
+ err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
+ mac_addr, ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -720,14 +788,15 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
default:
break;
}
+
return ret;
}
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
- u32 chip_features = 0U;
u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
+ u32 chip_features = 0U;
if ((0xFU & mif_rev) == 1U) {
chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
@@ -754,13 +823,14 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
hw_atl_utils_mpi_set_speed(self, 0);
hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+
return 0;
}
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_atl_utils_mbox mbox;
struct aq_stats_s *cs = &self->curr_stats;
+ struct hw_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
@@ -837,16 +907,19 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
hw_atl_utils_hw_mac_regs[i]);
+
return 0;
}
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
{
*fw_version = aq_hw_read_reg(self, 0x18U);
+
return 0;
}
-static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
+static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
+ u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@@ -859,22 +932,26 @@ static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
memset(prpc, 0, sizeof(*prpc));
if (wol_enabled) {
- rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol);
+ rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
+ sizeof(prpc->msg_wol_add);
+
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
- prpc->msg_wol.priority =
+ prpc->msg_wol_add.priority =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
- prpc->msg_wol.pattern_id =
+ prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
- prpc->msg_wol.wol_packet_type =
+ prpc->msg_wol_add.packet_type =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
- ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac);
+ ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
+ mac);
} else {
- rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id);
+ rpc_size = sizeof(prpc->msg_wol_remove) +
+ offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
- prpc->msg_wol.pattern_id =
+ prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
}
@@ -891,8 +968,8 @@ static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
unsigned int rpc_size = 0U;
int err = 0;
- if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
- err = aq_fw1x_set_wol(self, 1, mac);
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ err = aq_fw1x_set_wake_magic(self, 1, mac);
if (err < 0)
goto err_exit;
@@ -964,4 +1041,7 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_eee_rate = NULL,
.get_eee_rate = NULL,
.set_flow_control = NULL,
+ .send_fw_request = NULL,
+ .enable_ptp = NULL,
+ .led_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 692bed70e104..42f0c5c6ec2d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
@@ -41,7 +41,15 @@ struct __packed hw_atl_rxd_wb_s {
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
- u16 vlan;
+ __le16 vlan;
+};
+
+/* Hardware rx HW TIMESTAMP writeback */
+struct __packed hw_atl_rxd_hwts_wb_s {
+ u32 sec_hw;
+ u32 ns;
+ u32 sec_lw0;
+ u32 sec_lw1;
};
struct __packed hw_atl_stats_s {
@@ -62,104 +70,41 @@ struct __packed hw_atl_stats_s {
u32 dpc;
};
-union __packed ip_addr {
- struct {
- u8 addr[16];
- } v6;
- struct {
- u8 padding[12];
- u8 addr[4];
- } v4;
-};
-
-struct __packed hw_atl_utils_fw_rpc {
- u32 msg_id;
-
+struct __packed drv_msg_enable_wakeup {
union {
- struct {
- u32 pong;
- } msg_ping;
+ u32 pattern_mask;
struct {
- u8 mac_addr[6];
- u32 ip_addr_cnt;
+ u32 reason_arp_v4_pkt : 1;
+ u32 reason_ipv4_ping_pkt : 1;
+ u32 reason_ipv6_ns_pkt : 1;
+ u32 reason_ipv6_ping_pkt : 1;
+ u32 reason_link_up : 1;
+ u32 reason_link_down : 1;
+ u32 reason_maximum : 1;
+ };
+ };
- struct {
- union ip_addr addr;
- union ip_addr mask;
- } ip[1];
- } msg_arp;
+ union {
+ u32 offload_mask;
+ };
+};
- struct {
- u32 len;
- u8 packet[1514U];
- } msg_inject;
+struct __packed magic_packet_pattern_s {
+ u8 mac_addr[ETH_ALEN];
+};
- struct {
- u32 priority;
- u32 wol_packet_type;
- u32 pattern_id;
- u32 next_wol_pattern_offset;
-
- union {
- struct {
- u32 flags;
- u8 ipv4_source_address[4];
- u8 ipv4_dest_address[4];
- u16 tcp_source_port_number;
- u16 tcp_dest_port_number;
- } ipv4_tcp_syn_parameters;
-
- struct {
- u32 flags;
- u8 ipv6_source_address[16];
- u8 ipv6_dest_address[16];
- u16 tcp_source_port_number;
- u16 tcp_dest_port_number;
- } ipv6_tcp_syn_parameters;
-
- struct {
- u32 flags;
- } eapol_request_id_message_parameters;
-
- struct {
- u32 flags;
- u32 mask_offset;
- u32 mask_size;
- u32 pattern_offset;
- u32 pattern_size;
- } wol_bit_map_pattern;
-
- struct {
- u8 mac_addr[ETH_ALEN];
- } wol_magic_packet_patter;
- } wol_pattern;
- } msg_wol;
+struct __packed drv_msg_wol_add {
+ u32 priority;
+ u32 packet_type;
+ u32 pattern_id;
+ u32 next_pattern_offset;
- struct {
- union {
- u32 pattern_mask;
-
- struct {
- u32 reason_arp_v4_pkt : 1;
- u32 reason_ipv4_ping_pkt : 1;
- u32 reason_ipv6_ns_pkt : 1;
- u32 reason_ipv6_ping_pkt : 1;
- u32 reason_link_up : 1;
- u32 reason_link_down : 1;
- u32 reason_maximum : 1;
- };
- };
-
- union {
- u32 offload_mask;
- };
- } msg_enable_wakeup;
+ struct magic_packet_pattern_s magic_packet_pattern;
+};
- struct {
- u32 id;
- } msg_del_id;
- };
+struct __packed drv_msg_wol_remove {
+ u32 id;
};
struct __packed hw_atl_utils_mbox_header {
@@ -168,43 +113,89 @@ struct __packed hw_atl_utils_mbox_header {
u32 error;
};
-struct __packed hw_aq_info {
+struct __packed hw_atl_ptp_offset {
+ u16 ingress_100;
+ u16 egress_100;
+ u16 ingress_1000;
+ u16 egress_1000;
+ u16 ingress_2500;
+ u16 egress_2500;
+ u16 ingress_5000;
+ u16 egress_5000;
+ u16 ingress_10000;
+ u16 egress_10000;
+};
+
+struct __packed hw_atl_cable_diag {
+ u8 fault;
+ u8 distance;
+ u8 far_distance;
+ u8 reserved;
+};
+
+enum gpio_pin_function {
+ GPIO_PIN_FUNCTION_NC,
+ GPIO_PIN_FUNCTION_VAUX_ENABLE,
+ GPIO_PIN_FUNCTION_EFUSE_BURN_ENABLE,
+ GPIO_PIN_FUNCTION_SFP_PLUS_DETECT,
+ GPIO_PIN_FUNCTION_TX_DISABLE,
+ GPIO_PIN_FUNCTION_RATE_SEL_0,
+ GPIO_PIN_FUNCTION_RATE_SEL_1,
+ GPIO_PIN_FUNCTION_TX_FAULT,
+ GPIO_PIN_FUNCTION_PTP0,
+ GPIO_PIN_FUNCTION_PTP1,
+ GPIO_PIN_FUNCTION_PTP2,
+ GPIO_PIN_FUNCTION_SIZE
+};
+
+struct __packed hw_atl_info {
u8 reserved[6];
u16 phy_fault_code;
u16 phy_temperature;
u8 cable_len;
u8 reserved1;
- u32 cable_diag_data[4];
- u8 reserved2[32];
+ struct hw_atl_cable_diag cable_diag_data[4];
+ struct hw_atl_ptp_offset ptp_offset;
+ u8 reserved2[12];
u32 caps_lo;
u32 caps_hi;
+ u32 reserved_datapath;
+ u32 reserved3[7];
+ u32 reserved_simpleresp[3];
+ u32 reserved_linkstat[7];
+ u32 reserved_wakes_count;
+ u32 reserved_eee_stat[12];
+ u32 tx_stuck_cnt;
+ u32 setting_address;
+ u32 setting_length;
+ u32 caps_ex;
+ enum gpio_pin_function gpio_pin[3];
+ u32 pcie_aer_dump[18];
+ u16 snr_margin[4];
};
struct __packed hw_atl_utils_mbox {
struct hw_atl_utils_mbox_header header;
struct hw_atl_stats_s stats;
- struct hw_aq_info info;
+ struct hw_atl_info info;
};
-/* fw2x */
-typedef u32 fw_offset_t;
-
struct __packed offload_ip_info {
u8 v4_local_addr_count;
u8 v4_addr_count;
u8 v6_local_addr_count;
u8 v6_addr_count;
- fw_offset_t v4_addr;
- fw_offset_t v4_prefix;
- fw_offset_t v6_addr;
- fw_offset_t v6_prefix;
+ u32 v4_addr;
+ u32 v4_prefix;
+ u32 v6_addr;
+ u32 v6_prefix;
};
struct __packed offload_port_info {
u16 udp_port_count;
u16 tcp_port_count;
- fw_offset_t udp_port;
- fw_offset_t tcp_port;
+ u32 udp_port;
+ u32 tcp_port;
};
struct __packed offload_ka_info {
@@ -212,15 +203,15 @@ struct __packed offload_ka_info {
u16 v6_ka_count;
u32 retry_count;
u32 retry_interval;
- fw_offset_t v4_ka;
- fw_offset_t v6_ka;
+ u32 v4_ka;
+ u32 v6_ka;
};
struct __packed offload_rr_info {
u32 rr_count;
u32 rr_buf_len;
- fw_offset_t rr_id_x;
- fw_offset_t rr_buf;
+ u32 rr_id_x;
+ u32 rr_buf;
};
struct __packed offload_info {
@@ -237,9 +228,103 @@ struct __packed offload_info {
u8 buf[0];
};
+struct __packed hw_atl_utils_fw_rpc {
+ u32 msg_id;
+
+ union {
+ /* fw1x structures */
+ struct drv_msg_wol_add msg_wol_add;
+ struct drv_msg_wol_remove msg_wol_remove;
+ struct drv_msg_enable_wakeup msg_enable_wakeup;
+ /* fw2x structures */
+ struct offload_info fw2x_offloads;
+ };
+};
+
+/* Mailbox FW Request interface */
+struct __packed hw_fw_request_ptp_gpio_ctrl {
+ u32 index;
+ u32 period;
+ u64 start;
+};
+
+struct __packed hw_fw_request_ptp_adj_freq {
+ u32 ns_mac;
+ u32 fns_mac;
+ u32 ns_phy;
+ u32 fns_phy;
+ u32 mac_ns_adj;
+ u32 mac_fns_adj;
+};
+
+struct __packed hw_fw_request_ptp_adj_clock {
+ u32 ns;
+ u32 sec;
+ int sign;
+};
+
+#define HW_AQ_FW_REQUEST_PTP_GPIO_CTRL 0x11
+#define HW_AQ_FW_REQUEST_PTP_ADJ_FREQ 0x12
+#define HW_AQ_FW_REQUEST_PTP_ADJ_CLOCK 0x13
+
+struct __packed hw_fw_request_iface {
+ u32 msg_id;
+ union {
+ /* PTP FW Request */
+ struct hw_fw_request_ptp_gpio_ctrl ptp_gpio_ctrl;
+ struct hw_fw_request_ptp_adj_freq ptp_adj_freq;
+ struct hw_fw_request_ptp_adj_clock ptp_adj_clock;
+ };
+};
+
+struct __packed hw_atl_utils_settings {
+ u32 mtu;
+ u32 downshift_retry_count;
+ u32 link_pause_frame_quanta_100m;
+ u32 link_pause_frame_threshold_100m;
+ u32 link_pause_frame_quanta_1g;
+ u32 link_pause_frame_threshold_1g;
+ u32 link_pause_frame_quanta_2p5g;
+ u32 link_pause_frame_threshold_2p5g;
+ u32 link_pause_frame_quanta_5g;
+ u32 link_pause_frame_threshold_5g;
+ u32 link_pause_frame_quanta_10g;
+ u32 link_pause_frame_threshold_10g;
+ u32 pfc_quanta_class_0;
+ u32 pfc_threshold_class_0;
+ u32 pfc_quanta_class_1;
+ u32 pfc_threshold_class_1;
+ u32 pfc_quanta_class_2;
+ u32 pfc_threshold_class_2;
+ u32 pfc_quanta_class_3;
+ u32 pfc_threshold_class_3;
+ u32 pfc_quanta_class_4;
+ u32 pfc_threshold_class_4;
+ u32 pfc_quanta_class_5;
+ u32 pfc_threshold_class_5;
+ u32 pfc_quanta_class_6;
+ u32 pfc_threshold_class_6;
+ u32 pfc_quanta_class_7;
+ u32 pfc_threshold_class_7;
+ u32 eee_link_down_timeout;
+ u32 eee_link_up_timeout;
+ u32 eee_max_link_drops;
+ u32 eee_rates_mask;
+ u32 wake_timer;
+ u32 thermal_shutdown_off_temp;
+ u32 thermal_shutdown_warning_temp;
+ u32 thermal_shutdown_cold_temp;
+ u32 msm_options;
+ u32 dac_cable_serdes_modes;
+ u32 media_detect;
+};
+
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
+ HW_ATL_RX_MNGMNT,
+ HW_ATL_RX_HOST_AND_MNGMNT,
+ HW_ATL_RX_WOL
};
struct aq_rx_filter_vlan {
@@ -321,20 +406,12 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
-#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U
-#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U
-#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U
#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U
-#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U
-#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U
-#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U
-#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU
-#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU
enum hw_atl_fw2x_rate {
FW2X_RATE_100M = 0x20,
@@ -344,91 +421,135 @@ enum hw_atl_fw2x_rate {
FW2X_RATE_10G = 0x800,
};
+/* 0x370
+ * Link capabilities resolution register
+ */
enum hw_atl_fw2x_caps_lo {
- CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_HD = 0,
CAPS_LO_10BASET_FD,
CAPS_LO_100BASETX_HD,
CAPS_LO_100BASET4_HD,
CAPS_LO_100BASET2_HD,
- CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASETX_FD = 5,
CAPS_LO_100BASET2_FD,
CAPS_LO_1000BASET_HD,
CAPS_LO_1000BASET_FD,
CAPS_LO_2P5GBASET_FD,
- CAPS_LO_5GBASET_FD,
+ CAPS_LO_5GBASET_FD = 10,
CAPS_LO_10GBASET_FD,
};
+/* 0x374
+ * Status register
+ */
enum hw_atl_fw2x_caps_hi {
- CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_RESERVED1 = 0,
CAPS_HI_10BASET_EEE,
CAPS_HI_RESERVED2,
CAPS_HI_PAUSE,
CAPS_HI_ASYMMETRIC_PAUSE,
- CAPS_HI_100BASETX_EEE,
+ CAPS_HI_100BASETX_EEE = 5,
CAPS_HI_RESERVED3,
CAPS_HI_RESERVED4,
CAPS_HI_1000BASET_FD_EEE,
CAPS_HI_2P5GBASET_FD_EEE,
- CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE = 10,
CAPS_HI_10GBASET_FD_EEE,
- CAPS_HI_RESERVED5,
+ CAPS_HI_FW_REQUEST,
CAPS_HI_RESERVED6,
CAPS_HI_RESERVED7,
- CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED8 = 15,
CAPS_HI_RESERVED9,
CAPS_HI_CABLE_DIAG,
CAPS_HI_TEMPERATURE,
CAPS_HI_DOWNSHIFT,
- CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_PTP_AVB_EN_FW2X = 20,
CAPS_HI_MEDIA_DETECT,
CAPS_HI_LINK_DROP,
CAPS_HI_SLEEP_PROXY,
CAPS_HI_WOL,
- CAPS_HI_MAC_STOP,
+ CAPS_HI_MAC_STOP = 25,
CAPS_HI_EXT_LOOPBACK,
CAPS_HI_INT_LOOPBACK,
CAPS_HI_EFUSE_AGENT,
CAPS_HI_WOL_TIMER,
- CAPS_HI_STATISTICS,
+ CAPS_HI_STATISTICS = 30,
CAPS_HI_TRANSACTION_ID,
};
+/* 0x36C
+ * Control register
+ */
enum hw_atl_fw2x_ctrl {
- CTRL_RESERVED1 = 0x00,
+ CTRL_RESERVED1 = 0,
CTRL_RESERVED2,
CTRL_RESERVED3,
CTRL_PAUSE,
CTRL_ASYMMETRIC_PAUSE,
- CTRL_RESERVED4,
+ CTRL_RESERVED4 = 5,
CTRL_RESERVED5,
CTRL_RESERVED6,
CTRL_1GBASET_FD_EEE,
CTRL_2P5GBASET_FD_EEE,
- CTRL_5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE = 10,
CTRL_10GBASET_FD_EEE,
CTRL_THERMAL_SHUTDOWN,
CTRL_PHY_LOGS,
CTRL_EEE_AUTO_DISABLE,
- CTRL_PFC,
+ CTRL_PFC = 15,
CTRL_WAKE_ON_LINK,
CTRL_CABLE_DIAG,
CTRL_TEMPERATURE,
CTRL_DOWNSHIFT,
- CTRL_PTP_AVB,
+ CTRL_PTP_AVB = 20,
CTRL_RESERVED7,
CTRL_LINK_DROP,
CTRL_SLEEP_PROXY,
CTRL_WOL,
- CTRL_MAC_STOP,
+ CTRL_MAC_STOP = 25,
CTRL_EXT_LOOPBACK,
CTRL_INT_LOOPBACK,
CTRL_RESERVED8,
CTRL_WOL_TIMER,
- CTRL_STATISTICS,
+ CTRL_STATISTICS = 30,
CTRL_FORCE_RECONNECT,
};
+enum hw_atl_caps_ex {
+ CAPS_EX_LED_CONTROL = 0,
+ CAPS_EX_LED0_MODE_LO,
+ CAPS_EX_LED0_MODE_HI,
+ CAPS_EX_LED1_MODE_LO,
+ CAPS_EX_LED1_MODE_HI,
+ CAPS_EX_LED2_MODE_LO = 5,
+ CAPS_EX_LED2_MODE_HI,
+ CAPS_EX_RESERVED07,
+ CAPS_EX_RESERVED08,
+ CAPS_EX_RESERVED09,
+ CAPS_EX_RESERVED10 = 10,
+ CAPS_EX_RESERVED11,
+ CAPS_EX_RESERVED12,
+ CAPS_EX_RESERVED13,
+ CAPS_EX_RESERVED14,
+ CAPS_EX_RESERVED15 = 15,
+ CAPS_EX_PHY_PTP_EN,
+ CAPS_EX_MAC_PTP_EN,
+ CAPS_EX_EXT_CLK_EN,
+ CAPS_EX_SCHED_DMA_EN,
+ CAPS_EX_PTP_GPIO_EN = 20,
+ CAPS_EX_UPDATE_SETTINGS,
+ CAPS_EX_PHY_CTRL_TS_PIN,
+ CAPS_EX_SNR_OPERATING_MARGIN,
+ CAPS_EX_RESERVED24,
+ CAPS_EX_RESERVED25 = 25,
+ CAPS_EX_RESERVED26,
+ CAPS_EX_RESERVED27,
+ CAPS_EX_RESERVED28,
+ CAPS_EX_RESERVED29,
+ CAPS_EX_RESERVED30 = 30,
+ CAPS_EX_RESERVED31
+};
+
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
@@ -475,6 +596,11 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt);
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt);
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt);
+
int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 7bc51f8d6f2f..97ebf849695f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
@@ -17,26 +17,34 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
-#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
+#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c
+#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
-#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
-#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
-#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
-#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
-#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
-#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+#define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
+#define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
+#define HW_ATL_FW2X_CTRL_WAKE_ON_LINK BIT(CTRL_WAKE_ON_LINK)
#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY)
#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE)
#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
+#define HW_ATL_FW2X_CTRL_INT_LOOPBACK BIT(CTRL_INT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_EXT_LOOPBACK BIT(CTRL_EXT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_DOWNSHIFT BIT(CTRL_DOWNSHIFT)
#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE)
@@ -47,6 +55,9 @@
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
+#define HW_ATL_FW_VER_LED 0x03010026U
+#define HW_ATL_FW_VER_MEDIA_CONTROL 0x0301005aU
+
struct __packed fw2x_msg_wol_pattern {
u8 mask[16];
u32 crc;
@@ -71,6 +82,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
static int aq_fw2x_init(struct aq_hw_s *self)
@@ -88,6 +100,8 @@ static int aq_fw2x_init(struct aq_hw_s *self)
self->rpc_addr != 0U,
1000U, 100000U);
+ err = aq_fw2x_settings_get(self, &self->settings_addr);
+
return err;
}
@@ -167,17 +181,26 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
-static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+static void aq_fw2x_upd_flow_control_bits(struct aq_hw_s *self,
+ u32 *mpi_state, u32 fc)
{
- if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
- *mpi_state |= BIT(CAPS_HI_PAUSE);
- else
- *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+ *mpi_state &= ~(HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE);
- if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
- *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
- else
- *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ switch (fc) {
+ /* There is not explicit mode of RX only pause frames,
+ * thus, we join this mode with FC full.
+ * FC full is either Rx, either Tx, or both.
+ */
+ case AQ_NIC_FC_FULL:
+ case AQ_NIC_FC_RX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ case AQ_NIC_FC_TX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ }
}
static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts,
@@ -201,7 +224,8 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
case MPI_INIT:
mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds);
- aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
break;
case MPI_DEINIT:
mpi_state |= BIT(CAPS_HI_LINK_DROP);
@@ -212,15 +236,20 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
break;
}
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
return 0;
}
static int aq_fw2x_update_link_status(struct aq_hw_s *self)
{
- u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
- u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
- FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
+
+ mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G |
+ FW2X_RATE_10G);
if (speed) {
if (speed & FW2X_RATE_10G)
@@ -244,11 +273,11 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self)
static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
{
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+ u32 mac_addr[2] = { 0 };
int err = 0;
u32 h = 0U;
u32 l = 0U;
- u32 mac_addr[2] = { 0 };
- u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -282,15 +311,16 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
h >>= 8;
mac[0] = (u8)(0xFFU & h);
}
+
return err;
}
static int aq_fw2x_update_stats(struct aq_hw_s *self)
{
- int err = 0;
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
u32 stats_val;
+ int err = 0;
/* Toggle statistics bit for FW to update */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
@@ -317,9 +347,9 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
int err = 0;
u32 val;
- phy_temp_offset = self->mbox_addr +
- offsetof(struct hw_atl_utils_mbox, info) +
- offsetof(struct hw_aq_info, phy_temperature);
+ phy_temp_offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.phy_temperature);
+
/* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */
mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
@@ -342,106 +372,117 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
-static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
- struct offload_info *cfg = NULL;
- unsigned int rpc_size = 0U;
- u32 mpi_opts;
+ struct offload_info *info = NULL;
+ u32 wol_bits = 0;
+ u32 rpc_size;
int err = 0;
u32 val;
- rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg);
-
- err = hw_atl_utils_fw_rpc_wait(self, &rpc);
- if (err < 0)
- goto err_exit;
-
- memset(rpc, 0, rpc_size);
- cfg = (struct offload_info *)(&rpc->msg_id + 1);
+ if (self->aq_nic_cfg->wol & WAKE_PHY) {
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR,
+ HW_ATL_FW2X_CTRL_LINK_DROP);
+ readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ (val &
+ HW_ATL_FW2X_CTRL_LINK_DROP) != 0,
+ 1000, 100000);
+ wol_bits |= HW_ATL_FW2X_CTRL_WAKE_ON_LINK;
+ }
- memcpy(cfg->mac_addr, mac, ETH_ALEN);
- cfg->len = sizeof(*cfg);
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ wol_bits |= HW_ATL_FW2X_CTRL_SLEEP_PROXY |
+ HW_ATL_FW2X_CTRL_WOL;
- /* Clear bit 0x36C.23 and 0x36C.22 */
- mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY;
- mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP;
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ rpc_size = sizeof(*info) +
+ offsetof(struct hw_atl_utils_fw_rpc, fw2x_offloads);
+ memset(rpc, 0, rpc_size);
+ info = &rpc->fw2x_offloads;
+ memcpy(info->mac_addr, mac, ETH_ALEN);
+ info->len = sizeof(*info);
- err = hw_atl_utils_fw_rpc_call(self, rpc_size);
- if (err < 0)
- goto err_exit;
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+ }
- /* Set bit 0x36C.23 */
- mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
-
- err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
- self, val,
- val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
- 1U, 100000U);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, wol_bits);
err_exit:
return err;
}
-static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
+ u8 *mac)
{
- struct hw_atl_utils_fw_rpc *rpc = NULL;
- struct fw2x_msg_wol *msg = NULL;
- u32 mpi_opts;
int err = 0;
- u32 val;
-
- err = hw_atl_utils_fw_rpc_wait(self, &rpc);
- if (err < 0)
- goto err_exit;
-
- msg = (struct fw2x_msg_wol *)rpc;
-
- memset(msg, 0, sizeof(*msg));
- msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
- msg->magic_packet_enabled = true;
- memcpy(msg->hw_addr, mac, ETH_ALEN);
+ if (self->aq_nic_cfg->wol)
+ err = aq_fw2x_set_wol(self, mac);
- mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL);
+ return err;
+}
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+static int aq_fw2x_send_fw_request(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size)
+{
+ u32 ctrl2, orig_ctrl2;
+ u32 dword_cnt;
+ int err = 0;
+ u32 val;
- err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg));
+ /* Write data to drvIface Mailbox */
+ dword_cnt = size / sizeof(u32);
+ if (size % sizeof(u32))
+ dword_cnt++;
+ err = hw_atl_write_fwcfg_dwords(self, (void *)fw_req, dword_cnt);
if (err < 0)
goto err_exit;
- /* Set bit 0x36C.24 */
- mpi_opts |= HW_ATL_FW2X_CTRL_WOL;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ /* Toggle statistics bit for FW to update */
+ ctrl2 = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ orig_ctrl2 = ctrl2 & BIT(CAPS_HI_FW_REQUEST);
+ ctrl2 = ctrl2 ^ BIT(CAPS_HI_FW_REQUEST);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, ctrl2);
- err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
- self, val, val & HW_ATL_FW2X_CTRL_WOL,
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ orig_ctrl2 != (val &
+ BIT(CAPS_HI_FW_REQUEST)),
1U, 10000U);
err_exit:
return err;
}
-static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
{
- int err = 0;
+ u32 ptp_opts = aq_hw_read_reg(self, HW_ATL_FW3X_EXT_STATE_ADDR);
+ u32 all_ptp_features = BIT(CAPS_EX_PHY_PTP_EN) |
+ BIT(CAPS_EX_PTP_GPIO_EN);
- if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
- err = aq_fw2x_set_sleep_proxy(self, mac);
- if (err < 0)
- goto err_exit;
- err = aq_fw2x_set_wol_params(self, mac);
- }
+ if (enable)
+ ptp_opts |= all_ptp_features;
+ else
+ ptp_opts &= ~all_ptp_features;
-err_exit:
- return err;
+ aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
+}
+
+static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
+{
+ if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
+ return -EOPNOTSUPP;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode);
+
+ return 0;
}
static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
@@ -461,11 +502,12 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
u32 mpi_state;
u32 caps_hi;
int err = 0;
- u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) +
- offsetof(struct hw_aq_info, caps_hi);
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.caps_hi);
- err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi,
- sizeof(caps_hi) / sizeof(u32));
+ err = hw_atl_utils_fw_downld_dwords(self, offset, &caps_hi, 1);
if (err)
return err;
@@ -493,7 +535,8 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
@@ -503,17 +546,41 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
{
u32 mpi_state = aq_fw2x_state2_get(self);
+ *fcmode = 0;
if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
- if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
- *fcmode = AQ_NIC_FC_RX;
+ *fcmode |= AQ_NIC_FC_RX;
+
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode |= AQ_NIC_FC_TX;
+
+ return 0;
+}
+
+static int aq_fw2x_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ u32 mpi_opts;
+
+ switch (mode) {
+ case AQ_HW_LOOPBACK_PHYINT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_INT_LOOPBACK;
else
- *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
- else
- if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
- *fcmode = AQ_NIC_FC_TX;
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_INT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ case AQ_HW_LOOPBACK_PHYEXT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
else
- *fcmode = 0;
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -528,25 +595,42 @@ static u32 aq_fw2x_rpc_get(struct aq_hw_s *self)
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR);
}
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
+{
+ int err = 0;
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.setting_address);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, addr, 1);
+
+ return err;
+}
+
static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
}
const struct aq_fw_ops aq_fw_2x_ops = {
- .init = aq_fw2x_init,
- .deinit = aq_fw2x_deinit,
- .reset = NULL,
- .renegotiate = aq_fw2x_renegotiate,
- .get_mac_permanent = aq_fw2x_get_mac_permanent,
- .set_link_speed = aq_fw2x_set_link_speed,
- .set_state = aq_fw2x_set_state,
+ .init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
+ .reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
- .update_stats = aq_fw2x_update_stats,
- .get_phy_temp = aq_fw2x_get_phy_temp,
- .set_power = aq_fw2x_set_power,
- .set_eee_rate = aq_fw2x_set_eee_rate,
- .get_eee_rate = aq_fw2x_get_eee_rate,
- .set_flow_control = aq_fw2x_set_flow_control,
- .get_flow_control = aq_fw2x_get_flow_control
+ .update_stats = aq_fw2x_update_stats,
+ .get_phy_temp = aq_fw2x_get_phy_temp,
+ .set_power = aq_fw2x_set_power,
+ .set_eee_rate = aq_fw2x_set_eee_rate,
+ .get_eee_rate = aq_fw2x_get_eee_rate,
+ .set_flow_control = aq_fw2x_set_flow_control,
+ .get_flow_control = aq_fw2x_get_flow_control,
+ .send_fw_request = aq_fw2x_send_fw_request,
+ .enable_ptp = aq_fw3x_enable_ptp,
+ .led_control = aq_fw2x_led_control,
+ .set_phyloopback = aq_fw2x_set_phyloopback,
};
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 78e52d217e56..539166112993 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -20,9 +20,10 @@
static int emac_arc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct net_device *ndev;
struct arc_emac_priv *priv;
- int interface, err;
+ phy_interface_t interface;
+ struct net_device *ndev;
+ int err;
if (!dev->of_node)
return -ENODEV;
@@ -37,9 +38,13 @@ static int emac_arc_probe(struct platform_device *pdev)
priv->drv_name = DRV_NAME;
priv->drv_version = DRV_VERSION;
- interface = of_get_phy_mode(dev->of_node);
- if (interface < 0)
- interface = PHY_INTERFACE_MODE_MII;
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err) {
+ if (err == -ENODEV)
+ interface = PHY_INTERFACE_MODE_MII;
+ else
+ goto out_netdev;
+ }
priv->clk = devm_clk_get(dev, "hclk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 664d664e0925..aae231c5224f 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -97,8 +97,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
struct net_device *ndev;
struct rockchip_priv_data *priv;
const struct of_device_id *match;
+ phy_interface_t interface;
u32 data;
- int err, interface;
+ int err;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -114,7 +115,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
priv->emac.drv_version = DRV_VERSION;
priv->emac.set_mac_speed = emac_rockchip_set_mac_speed;
- interface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err)
+ goto out_netdev;
/* RK3036/RK3066/RK3188 SoCs only support RMII */
if (interface != PHY_INTERFACE_MODE_RMII) {
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 1b1a09095c0d..8f5021091eee 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1744,10 +1744,9 @@ static int ag71xx_probe(struct platform_device *pdev)
eth_random_addr(ndev->dev_addr);
}
- ag->phy_if_mode = of_get_phy_mode(np);
- if (ag->phy_if_mode < 0) {
+ err = of_get_phy_mode(np, ag->phy_if_mode);
+ if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- err = ag->phy_if_mode;
goto err_free;
}
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 37752d9514e7..30b455013bf3 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1371,8 +1371,8 @@ static int nb8800_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->base = base;
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if (priv->phy_mode < 0)
+ ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (ret)
priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
priv->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index aacc3cce2cc0..40941fb6065b 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -287,7 +287,7 @@ struct nb8800_priv {
struct device_node *phy_node;
/* PHY connection type from DT */
- int phy_mode;
+ phy_interface_t phy_mode;
/* Current link status */
int speed;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 97ab0dd25552..035dbb1b2c98 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -511,9 +511,6 @@ static void b44_stats_update(struct b44 *bp)
*val++ += br32(bp, reg);
}
- /* Pad */
- reg += 8*4UL;
-
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a977a459bd20..825af709708e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2479,9 +2479,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->netdev = dev;
priv->pdev = pdev;
- priv->phy_interface = of_get_phy_mode(dn);
+ ret = of_get_phy_mode(dn, &priv->phy_interface);
/* Default to GMII interface mode */
- if ((int)priv->phy_interface < 0)
+ if (ret)
priv->phy_interface = PHY_INTERFACE_MODE_GMII;
/* In the case of a fixed PHY, the DT node associated
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d10b421ed1f1..5e037a305b83 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1934,7 +1934,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
+ return netdev_pick_tx(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 226ab29f4cb6..3f8435208bf4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -32,31 +32,31 @@
* IRO[142].m2) + ((sbId) * IRO[142].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[323].base + ((pfId) * IRO[323].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[324].base + ((pfId) * IRO[324].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[325].base + ((pfId) * IRO[325].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
+ (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
+ (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
+ (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
+ (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+ (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
+ (IRO[322].base + ((pfId) * IRO[322].m1) + ((iscsiEqId) * IRO[322].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
+ (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[322].base + ((pfId) * IRO[322].m1))
+ (IRO[323].base + ((pfId) * IRO[323].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[314].base + ((pfId) * IRO[314].m1))
+ (IRO[315].base + ((pfId) * IRO[315].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[313].base + ((pfId) * IRO[313].m1))
+ (IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[312].base + ((pfId) * IRO[312].m1))
+ (IRO[313].base + ((pfId) * IRO[313].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[155].base + ((funcId) * IRO[155].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -99,81 +99,81 @@
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[107].base + ((funcId) * IRO[107].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[276].base + ((pfId) * IRO[276].m1))
+ (IRO[277].base + ((pfId) * IRO[277].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[275].base + ((pfId) * IRO[275].m1))
+ (IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[274].base + ((pfId) * IRO[274].m1))
+ (IRO[275].base + ((pfId) * IRO[275].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[270].base + ((pfId) * IRO[270].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
-#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[206].base + ((pfId) * IRO[206].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[109].base + ((funcId) * IRO[109].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
- (IRO[223].base + ((pfId) * IRO[223].m1))
+ (IRO[224].base + ((pfId) * IRO[224].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[108].base + ((funcId) * IRO[108].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
-#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_AGG_DATA_OFFSET (IRO[213].base)
+#define USTORM_AGG_DATA_SIZE (IRO[213].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[180].base + ((assertListEntry) * IRO[180].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[187].base + ((portId) * IRO[187].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[325].base + ((pfId) * IRO[325].m1))
+ (IRO[326].base + ((pfId) * IRO[326].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[182].base + ((funcId) * IRO[182].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[286].base + ((pfId) * IRO[286].m1))
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
+ (IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[186].base + ((pfId) * IRO[186].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[184].base + ((funcId) * IRO[184].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
- (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
- IRO[215].m2))
+ (IRO[216].base + ((portId) * IRO[216].m1) + ((clientId) * \
+ IRO[216].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
- (IRO[216].base + ((qzoneId) * IRO[216].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
-#define USTORM_TPA_BTR_SIZE (IRO[213].size)
+ (IRO[217].base + ((qzoneId) * IRO[217].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[214].base)
+#define USTORM_TPA_BTR_SIZE (IRO[214].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[183].base + ((funcId) * IRO[183].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
@@ -188,39 +188,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[302].base + ((pfId) * IRO[302].m1))
+ (IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[308].base + ((pfId) * IRO[308].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[309].base + ((pfId) * IRO[309].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[310].base + ((pfId) * IRO[310].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[311].base + ((pfId) * IRO[311].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[301].base + ((pfId) * IRO[301].m1))
+ (IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[300].base + ((pfId) * IRO[300].m1))
+ (IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[299].base + ((pfId) * IRO[299].m1))
+ (IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[304].base + ((pfId) * IRO[304].m1))
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[303].base + ((pfId) * IRO[303].m1))
+ (IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
+ (IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -233,12 +233,12 @@
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
- (IRO[217].base + ((portId) * IRO[217].m1))
-#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[218].base + ((portId) * IRO[218].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[219].base + ((portId) * IRO[219].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
- (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
- IRO[220].m2))
+ (IRO[221].base + (((pfId)>>1) * IRO[221].m1) + (((pfId)&1) * \
+ IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 78326a6c0aba..622fadc50316 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 11
+#define BCM_5710_FW_REVISION_VERSION 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d581d0ae6584..9638d65d8261 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -5611,9 +5611,9 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
@@ -5685,7 +5685,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
return rc;
}
-static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
@@ -7364,9 +7364,9 @@ static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
}
}
-static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 val = 0, tmp1;
@@ -7427,7 +7427,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
- return 0;
+ return;
} else {
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
@@ -7509,7 +7509,6 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
- return 0;
}
static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
@@ -7676,9 +7675,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8705 PHY SECTION */
/******************************************************************/
-static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "init 8705\n");
@@ -7700,7 +7699,6 @@ static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
/* BCM8705 doesn't have microcode, hence the 0 */
bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
- return 0;
}
static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
@@ -8887,9 +8885,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8706 PHY SECTION */
/******************************************************************/
-static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 tx_en_mode;
u16 cnt, val, tmp1;
@@ -8989,13 +8987,11 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
}
-
- return 0;
}
-static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
return bnx2x_8706_8726_read_status(phy, params, vars);
}
@@ -9070,9 +9066,9 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
}
-static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
@@ -9150,9 +9146,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
MDIO_PMA_REG_8726_TX_CTRL2,
phy->tx_preemphasis[1]);
}
-
- return 0;
-
}
static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
@@ -9288,9 +9281,9 @@ static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
}
}
-static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 tx_en_mode;
u16 tmp1, mod_abs, tmp2;
@@ -9370,8 +9363,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
(tmp2 & 0x7fff));
}
-
- return 0;
}
static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
@@ -9946,9 +9937,9 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
@@ -9960,7 +9951,7 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
- return bnx2x_848xx_cmn_config_init(phy, params, vars);
+ bnx2x_848xx_cmn_config_init(phy, params, vars);
}
#define PHY848xx_CMDHDLR_WAIT 300
@@ -10210,8 +10201,8 @@ static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
return reset_gpios;
}
-static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
- struct link_params *params)
+static void bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u8 reset_gpios;
@@ -10239,8 +10230,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
udelay(10);
DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
reset_gpios);
-
- return 0;
}
static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
@@ -10283,9 +10272,9 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
}
#define PHY84833_CONSTANT_LATENCY 1193
-static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
@@ -10430,7 +10419,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
if (rc) {
DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
bnx2x_8483x_disable_eee(phy, params, vars);
- return rc;
+ return;
}
if ((phy->req_duplex == DUPLEX_FULL) &&
@@ -10442,7 +10431,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
rc = bnx2x_8483x_disable_eee(phy, params, vars);
if (rc) {
DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
- return rc;
+ return;
}
} else {
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
@@ -10481,7 +10470,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_84833_TOP_CFG_XGPHY_STRAP1,
(u16)~MDIO_84833_SUPER_ISOLATE);
}
- return rc;
}
static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
@@ -11038,9 +11026,9 @@ static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
}
}
-static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port;
@@ -11240,8 +11228,6 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
bnx2x_cl22_write(bp, phy,
MDIO_PMA_REG_CTRL, autoneg_val);
-
- return 0;
}
@@ -11465,9 +11451,9 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
}
-static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u16 fw_ver1, fw_ver2, val;
struct bnx2x *bp = params->bp;
@@ -11502,7 +11488,6 @@ static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
bnx2x_save_spirom_version(bp, params->port,
(u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
- return 0;
}
static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
@@ -11636,14 +11621,14 @@ static const struct bnx2x_phy phy_null = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)NULL,
- .read_status = (read_status_t)NULL,
- .link_reset = (link_reset_t)NULL,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = NULL,
+ .read_status = NULL,
+ .link_reset = NULL,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_serdes = {
@@ -11671,14 +11656,14 @@ static const struct bnx2x_phy phy_serdes = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_xgxs_config_init,
- .read_status = (read_status_t)bnx2x_link_settings_status,
- .link_reset = (link_reset_t)bnx2x_int_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_xgxs = {
@@ -11707,14 +11692,14 @@ static const struct bnx2x_phy phy_xgxs = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_xgxs_config_init,
- .read_status = (read_status_t)bnx2x_link_settings_status,
- .link_reset = (link_reset_t)bnx2x_int_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = bnx2x_set_xgxs_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_xgxs_specific_func
};
static const struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
@@ -11745,14 +11730,14 @@ static const struct bnx2x_phy phy_warpcore = {
.speed_cap_mask = 0,
/* req_duplex = */0,
/* rsrv = */0,
- .config_init = (config_init_t)bnx2x_warpcore_config_init,
- .read_status = (read_status_t)bnx2x_warpcore_read_status,
- .link_reset = (link_reset_t)bnx2x_warpcore_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_warpcore_config_init,
+ .read_status = bnx2x_warpcore_read_status,
+ .link_reset = bnx2x_warpcore_link_reset,
+ .config_loopback = bnx2x_set_warpcore_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = bnx2x_warpcore_hw_reset,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
@@ -11776,14 +11761,14 @@ static const struct bnx2x_phy phy_7101 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_7101_config_init,
- .read_status = (read_status_t)bnx2x_7101_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
- .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_7101_config_init,
+ .read_status = bnx2x_7101_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = bnx2x_7101_config_loopback,
+ .format_fw_ver = bnx2x_7101_format_ver,
+ .hw_reset = bnx2x_7101_hw_reset,
+ .set_link_led = bnx2x_7101_set_link_led,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8073 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
@@ -11807,14 +11792,14 @@ static const struct bnx2x_phy phy_8073 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8073_config_init,
- .read_status = (read_status_t)bnx2x_8073_read_status,
- .link_reset = (link_reset_t)bnx2x_8073_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
+ .config_init = bnx2x_8073_config_init,
+ .read_status = bnx2x_8073_read_status,
+ .link_reset = bnx2x_8073_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_8073_specific_func
};
static const struct bnx2x_phy phy_8705 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11835,14 +11820,14 @@ static const struct bnx2x_phy phy_8705 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8705_config_init,
- .read_status = (read_status_t)bnx2x_8705_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8705_config_init,
+ .read_status = bnx2x_8705_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_null_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
@@ -11864,14 +11849,14 @@ static const struct bnx2x_phy phy_8706 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8706_config_init,
- .read_status = (read_status_t)bnx2x_8706_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8706_config_init,
+ .read_status = bnx2x_8706_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8726 = {
@@ -11896,14 +11881,14 @@ static const struct bnx2x_phy phy_8726 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8726_config_init,
- .read_status = (read_status_t)bnx2x_8726_read_status,
- .link_reset = (link_reset_t)bnx2x_8726_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8726_config_init,
+ .read_status = bnx2x_8726_read_status,
+ .link_reset = bnx2x_8726_link_reset,
+ .config_loopback = bnx2x_8726_config_loopback,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8727 = {
@@ -11927,14 +11912,14 @@ static const struct bnx2x_phy phy_8727 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8727_config_init,
- .read_status = (read_status_t)bnx2x_8727_read_status,
- .link_reset = (link_reset_t)bnx2x_8727_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+ .config_init = bnx2x_8727_config_init,
+ .read_status = bnx2x_8727_read_status,
+ .link_reset = bnx2x_8727_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = bnx2x_8727_hw_reset,
+ .set_link_led = bnx2x_8727_set_link_led,
+ .phy_specific_func = bnx2x_8727_specific_func
};
static const struct bnx2x_phy phy_8481 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
@@ -11962,14 +11947,14 @@ static const struct bnx2x_phy phy_8481 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8481_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_8481_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8481_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_8481_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_8481_hw_reset,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_84823 = {
@@ -11999,14 +11984,14 @@ static const struct bnx2x_phy phy_84823 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84833 = {
@@ -12034,14 +12019,14 @@ static const struct bnx2x_phy phy_84833 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84834 = {
@@ -12068,14 +12053,14 @@ static const struct bnx2x_phy phy_84834 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84858 = {
@@ -12102,14 +12087,14 @@ static const struct bnx2x_phy phy_84858 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_8485x_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_54618se = {
@@ -12136,14 +12121,14 @@ static const struct bnx2x_phy phy_54618se = {
.speed_cap_mask = 0,
/* req_duplex = */0,
/* rsrv = */0,
- .config_init = (config_init_t)bnx2x_54618se_config_init,
- .read_status = (read_status_t)bnx2x_54618se_read_status,
- .link_reset = (link_reset_t)bnx2x_54618se_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
+ .config_init = bnx2x_54618se_config_init,
+ .read_status = bnx2x_54618se_read_status,
+ .link_reset = bnx2x_54618se_link_reset,
+ .config_loopback = bnx2x_54618se_config_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_5461x_set_link_led,
+ .phy_specific_func = bnx2x_54618se_specific_func
};
/*****************************************************************/
/* */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 7115f5025664..cae03c89dc73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -127,15 +127,15 @@ struct link_vars;
struct link_params;
struct bnx2x_phy;
-typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
- struct link_vars *vars);
+typedef void (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
struct link_vars *vars);
typedef void (*link_reset_t)(struct bnx2x_phy *phy,
struct link_params *params);
typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
struct link_params *params);
-typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef int (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0edbb0a76847..5097a44686b3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2397,15 +2397,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
/* send the ramrod on all the queues of the PF */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ int tx_idx;
/* Set the appropriate Queue object */
q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure Tx switching\n");
- return rc;
+ for (tx_idx = FIRST_TX_COS_INDEX;
+ tx_idx < fp->max_cos; tx_idx++) {
+ q_params.params.update.cid_index = tx_idx;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 04ec909e06df..85983f0e3134 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -250,10 +250,12 @@ static const u16 bnxt_vf_req_snif[] = {
static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
};
@@ -1767,8 +1769,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
- bnxt_sched_reset(bp, rxr);
+ bnapi->cp_ring.rx_buf_errors++;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ netdev_warn(bp->dev, "RX buffer error %x\n",
+ rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
}
goto next_rx_no_len;
}
@@ -1964,6 +1970,10 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
}
/* fall through */
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
+ set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
+ /* fall through */
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
@@ -2684,6 +2694,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
+ if (rmem->init_val)
+ memset(rmem->pg_arr[i], rmem->init_val,
+ rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
@@ -3171,13 +3184,8 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
bnxt_init_rxbd_pages(ring, type);
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
- if (IS_ERR(rxr->xdp_prog)) {
- int rc = PTR_ERR(rxr->xdp_prog);
-
- rxr->xdp_prog = NULL;
- return rc;
- }
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
}
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
@@ -4274,6 +4282,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
i++ < tmo_count) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
@@ -4297,6 +4310,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Check if response len is updated */
for (i = 0; i < tmo_count; i++) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
@@ -4385,59 +4403,29 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
return rc;
}
-int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
- int bmap_size)
+int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
+ bool async_only)
{
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap;
- int i;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
-
- req.enables =
- cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
-
- memset(async_events_bmap, 0, sizeof(async_events_bmap));
- for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
- u16 event_id = bnxt_async_events_arr[i];
-
- if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
- !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
- continue;
- __set_bit(bnxt_async_events_arr[i], async_events_bmap);
- }
- if (bmap && bmap_size) {
- for (i = 0; i < bmap_size; i++) {
- if (test_bit(i, bmap))
- __set_bit(i, async_events_bmap);
- }
- }
-
- for (i = 0; i < 8; i++)
- req.async_event_fwd[i] |= cpu_to_le32(events[i]);
-
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-}
-
-static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
-{
- struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_drv_rgtr_input req = {0};
u32 flags;
- int rc;
+ int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
- FUNC_DRV_RGTR_REQ_ENABLES_VER);
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
- flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
+ FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
req.flags = cpu_to_le32(flags);
req.ver_maj_8b = DRV_VER_MAJ;
req.ver_min_8b = DRV_VER_MIN;
@@ -4471,11 +4459,36 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
req.flags |= cpu_to_le32(
FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
+ memset(async_events_bmap, 0, sizeof(async_events_bmap));
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
+ u16 event_id = bnxt_async_events_arr[i];
+
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ continue;
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ }
+ if (bmap && bmap_size) {
+ for (i = 0; i < bmap_size; i++) {
+ if (test_bit(i, bmap))
+ __set_bit(i, async_events_bmap);
+ }
+ }
+ for (i = 0; i < 8; i++)
+ req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+ if (async_only)
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc && (resp->flags &
- cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
- bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ if (!rc) {
+ set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
+ if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -4484,6 +4497,9 @@ static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
struct hwrm_func_drv_unrgtr_input req = {0};
+ if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -4601,21 +4617,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_vnic_info *vnic;
- u32 dst_ena = 0;
+ u32 flags = 0;
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
- dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
- req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
- vnic = &bp->vnic_info[0];
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req.dst_id = cpu_to_le16(fltr->rxq);
} else {
vnic = &bp->vnic_info[fltr->rxq + 1];
+ req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
- req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
@@ -6480,6 +6496,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
} else {
rc = 0;
}
@@ -6634,7 +6651,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth)
+ u8 depth, bool use_init_val)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -6672,6 +6689,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
+ if (use_init_val)
+ rmem->init_val = bp->ctx->ctx_kind_initializer;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -6686,6 +6705,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
+ if (use_init_val)
+ rmem->init_val = bp->ctx->ctx_kind_initializer;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -6776,21 +6797,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
@@ -6798,14 +6819,14 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc)
return rc;
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc)
return rc;
@@ -6821,7 +6842,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
if (rc)
return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
@@ -6833,7 +6854,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
@@ -6847,7 +6868,7 @@ skip_rdma:
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
@@ -6943,6 +6964,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
@@ -7042,8 +7065,8 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
flags = le32_to_cpu(resp->flags);
if (flags &
- CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
- bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
hwrm_cfa_adv_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -8402,7 +8425,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_EEE_CAP;
if (bp->test_info)
- bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
+ bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
+ BNXT_TEST_FL_AN_PHY_LPBK);
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -8428,6 +8452,14 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (bp->test_info)
bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
+ if (bp->test_info)
+ bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
+ }
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
+ if (BNXT_PF(bp))
+ bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -8542,7 +8574,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return 0;
diff = link_info->support_auto_speeds ^ link_info->advertising;
@@ -8762,6 +8794,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
}
if (resc_reinit || fw_reset) {
if (fw_reset) {
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_ulp_stop(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -9224,13 +9258,16 @@ static int bnxt_open(struct net_device *dev)
if (rc) {
bnxt_hwrm_if_change(bp, false);
} else {
- if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
- BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
- int n = pf->active_vfs;
+ if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+ int n = pf->active_vfs;
- if (n)
- bnxt_cfg_hw_sriov(bp, &n, true);
+ if (n)
+ bnxt_cfg_hw_sriov(bp, &n, true);
+ }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_ulp_start(bp, 0);
}
bnxt_hwmon_open(bp);
}
@@ -9688,7 +9725,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
static bool bnxt_rfs_supported(struct bnxt *bp)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true;
return false;
}
@@ -9927,12 +9964,15 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (netif_running(bp->dev)) {
int rc;
- if (!silent)
+ if (silent) {
+ bnxt_close_nic(bp, false, false);
+ bnxt_open_nic(bp, false, false);
+ } else {
bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, false, false);
- rc = bnxt_open_nic(bp, false, false);
- if (!silent && !rc)
- bnxt_ulp_start(bp);
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
+ bnxt_ulp_start(bp, rc);
+ }
}
}
@@ -10004,7 +10044,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->link_info.phy_retry) {
if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
- bp->link_info.phy_retry = 0;
+ bp->link_info.phy_retry = false;
netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
} else {
set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
@@ -10048,8 +10088,8 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
+ bnxt_ulp_stop(bp);
__bnxt_close_nic(bp, true, false);
- bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_ctx_mem(bp);
@@ -10107,6 +10147,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
void bnxt_fw_exception(struct bnxt *bp)
{
+ netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_rtnl_lock_sp(bp);
bnxt_force_fw_reset(bp);
@@ -10218,6 +10259,31 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
static void bnxt_cfg_ntp_filters(struct bnxt *);
+static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ link_info->autoneg = BNXT_AUTONEG_SPEED;
+ if (bp->hwrm_spec_code >= 0x10201) {
+ if (link_info->auto_pause_setting &
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ } else {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ }
+ link_info->advertising = link_info->auto_link_speeds;
+ } else {
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_duplex = link_info->duplex_setting;
+ }
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->req_flow_ctrl =
+ link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
+ else
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -10268,6 +10334,10 @@ static void bnxt_sp_task(struct work_struct *work)
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
+ if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
+ &bp->sp_event))
+ bnxt_init_ethtool_link_settings(bp);
+
rc = bnxt_update_link(bp, true);
mutex_unlock(&bp->link_lock);
if (rc)
@@ -10463,11 +10533,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
rc);
- rc = bnxt_hwrm_func_drv_rgtr(bp);
- if (rc)
- return -ENODEV;
-
- rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
if (rc)
return -ENODEV;
@@ -10582,14 +10648,23 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
- int i;
+ int i, rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
+#ifdef CONFIG_TEE_BNXT_FW
+ rc = tee_bnxt_fw_load();
+ if (rc)
+ netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ bp->fw_reset_timestamp = jiffies;
+#endif
+ return;
+ }
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
bnxt_fw_reset_writel(bp, i);
} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
struct hwrm_fw_reset_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
@@ -10720,19 +10795,22 @@ static void bnxt_fw_reset_task(struct work_struct *work)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
dev_close(bp->dev);
}
- bnxt_ulp_irq_restart(bp, rc);
- rtnl_unlock();
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_status_update(bp, true);
+ rtnl_unlock();
break;
}
return;
fw_reset_abort:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
+ bnxt_dl_health_status_update(bp, false);
bp->fw_reset_state = 0;
rtnl_lock();
dev_close(bp->dev);
@@ -10934,7 +11012,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static LIST_HEAD(bnxt_block_cb_list);
+LIST_HEAD(bnxt_block_cb_list);
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
@@ -11372,26 +11450,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
if (!fw_dflt)
return 0;
- /*initialize the ethool setting copy with NVM settings */
- if (BNXT_AUTO_MODE(link_info->auto_mode)) {
- link_info->autoneg = BNXT_AUTONEG_SPEED;
- if (bp->hwrm_spec_code >= 0x10201) {
- if (link_info->auto_pause_setting &
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
- link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- } else {
- link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- }
- link_info->advertising = link_info->auto_link_speeds;
- } else {
- link_info->req_link_speed = link_info->force_link_speed;
- link_info->req_duplex = link_info->duplex_setting;
- }
- if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
- link_info->req_flow_ctrl =
- link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
- else
- link_info->req_flow_ctrl = link_info->force_pause_setting;
+ bnxt_init_ethtool_link_settings(bp);
return 0;
}
@@ -11831,6 +11890,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_free_ctx_mem(bp);
@@ -11882,11 +11942,16 @@ static int bnxt_suspend(struct device *device)
int rc = 0;
rtnl_lock();
+ bnxt_ulp_stop(bp);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
}
bnxt_hwrm_func_drv_unrgtr(bp);
+ pci_disable_device(bp->pdev);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
rtnl_unlock();
return rc;
}
@@ -11898,7 +11963,14 @@ static int bnxt_resume(struct device *device)
int rc = 0;
rtnl_lock();
- if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+ rc = pci_enable_device(bp->pdev);
+ if (rc) {
+ netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
+ rc);
+ goto resume_exit;
+ }
+ pci_set_master(bp->pdev);
+ if (bnxt_hwrm_ver_get(bp)) {
rc = -ENODEV;
goto resume_exit;
}
@@ -11907,6 +11979,26 @@ static int bnxt_resume(struct device *device)
rc = -EBUSY;
goto resume_exit;
}
+
+ if (bnxt_hwrm_queue_qportcfg(bp)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (bnxt_alloc_ctx_mem(bp)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+ }
+ if (BNXT_NEW_RM(bp))
+ bnxt_hwrm_func_resc_qcaps(bp, false);
+
+ if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
rc = bnxt_open(dev);
@@ -11915,6 +12007,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ bnxt_ulp_start(bp, rc);
rtnl_unlock();
return rc;
}
@@ -11994,10 +12087,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- if (!err) {
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- bnxt_ulp_start(bp);
- }
+ bnxt_ulp_start(bp, err);
}
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d333589811a5..505af5cfb1bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.10.0"
+#define DRV_MODULE_VERSION "1.10.1"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 0
+#define DRV_VER_UPD 1
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -25,6 +25,11 @@
#include <net/dst_metadata.h>
#include <net/xdp.h>
#include <linux/dim.h>
+#ifdef CONFIG_TEE_BNXT_FW
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+#endif
+
+extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -716,6 +721,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
+ u8 init_val;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -927,6 +933,7 @@ struct bnxt_cp_ring_info {
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
u64 rx_l4_csum_errors;
+ u64 rx_buf_errors;
u64 missed_irqs;
struct bnxt_ring_struct cp_ring_struct;
@@ -1219,7 +1226,8 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
u8 flags;
-#define BNXT_TEST_FL_EXT_LPBK 0x1
+#define BNXT_TEST_FL_EXT_LPBK 0x1
+#define BNXT_TEST_FL_AN_PHY_LPBK 0x2
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1241,6 +1249,14 @@ struct bnxt_tc_flow_stats {
u64 bytes;
};
+#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
+struct bnxt_flower_indr_block_cb_priv {
+ struct net_device *tunnel_netdev;
+ struct bnxt *bp;
+ struct list_head list;
+};
+#endif
+
struct bnxt_tc_info {
bool enabled;
@@ -1338,6 +1354,7 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
@@ -1370,6 +1387,7 @@ struct bnxt_fw_health {
u32 last_fw_reset_cnt;
u8 enabled:1;
u8 master:1;
+ u8 fatal:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1428,6 +1446,8 @@ struct bnxt {
#define CHIP_NUM_57414L 0x16db
#define CHIP_NUM_5745X 0xd730
+#define CHIP_NUM_57452 0xc452
+#define CHIP_NUM_57454 0xc454
#define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751
@@ -1460,7 +1480,10 @@ struct bnxt {
((chip_num) == CHIP_NUM_58700)
#define BNXT_CHIP_NUM_5745X(chip_num) \
- ((chip_num) == CHIP_NUM_5745X)
+ ((chip_num) == CHIP_NUM_5745X || \
+ (chip_num) == CHIP_NUM_57452 || \
+ (chip_num) == CHIP_NUM_57454)
+
#define BNXT_CHIP_NUM_57X0X(chip_num) \
(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
@@ -1525,6 +1548,8 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
+#define BNXT_PHY_CFG_ABLE(bp) (BNXT_SINGLE_PF(bp) || \
+ ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
@@ -1626,6 +1651,7 @@ struct bnxt {
#define BNXT_STATE_IN_FW_RESET 4
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
+#define BNXT_STATE_DRV_REGISTERED 7
struct bnxt_irq *irq_tbl;
int total_irqs;
@@ -1653,10 +1679,12 @@ struct bnxt {
#define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
- #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
+ #define BNXT_FW_CAP_HOT_RESET 0x00200000
+ #define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1738,6 +1766,7 @@ struct bnxt {
#define BNXT_RING_COAL_NOW_SP_EVENT 17
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
+#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -1804,6 +1833,9 @@ struct bnxt {
u8 num_leds;
struct bnxt_led_info leds[BNXT_MAX_LED];
+ u16 dump_flag;
+#define BNXT_DUMP_LIVE 0
+#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
@@ -1815,6 +1847,8 @@ struct bnxt {
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
+ struct list_head tc_indr_block_list;
+ struct notifier_block tc_netdev_nb;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
};
@@ -1969,8 +2003,8 @@ int _hwrm_send_message(struct bnxt *, void *, u32, int);
int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
-int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
- int bmap_size);
+int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
+ int bmap_size, bool async_only);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 7151244f8c7d..acb2dd64c023 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -14,9 +14,29 @@
#include "bnxt.h"
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
+#include "bnxt_ethtool.h"
+
+static int
+bnxt_dl_flash_update(struct devlink *dl, const char *filename,
+ const char *region, struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (region)
+ return -EOPNOTSUPP;
+
+ if (!BNXT_PF(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flash update not supported from a VF");
+ return -EPERM;
+ }
+
+ return bnxt_flash_package_from_file(bp->dev, filename, 0);
+}
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_health *health = bp->fw_health;
@@ -61,7 +81,8 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
};
static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
@@ -79,7 +100,8 @@ struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
};
static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
@@ -88,6 +110,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
if (!priv_ctx)
return -EOPNOTSUPP;
+ bp->fw_health->fatal = true;
event = fw_reporter_ctx->sp_event;
if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
bnxt_fw_reset(bp);
@@ -196,11 +219,32 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
}
}
+void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+ u8 state;
+
+ if (healthy)
+ state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ else
+ state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+
+ if (health->fatal)
+ devlink_health_reporter_state_update(health->fw_fatal_reporter,
+ state);
+ else
+ devlink_health_reporter_state_update(health->fw_reset_reporter,
+ state);
+
+ health->fatal = false;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
.eswitch_mode_get = bnxt_dl_eswitch_mode_get,
#endif /* CONFIG_BNXT_SRIOV */
+ .flash_update = bnxt_dl_flash_update,
};
enum bnxt_dl_param_id {
@@ -311,10 +355,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
} else {
rc = hwrm_send_message_silent(bp, msg, msg_len,
HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
bnxt_copy_from_nvm_data(val, data,
nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
+ } else {
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (resp->cmd_err ==
+ NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
+ rc = -EOPNOTSUPP;
+ }
}
dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
if (rc == -EACCES)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 2f4fd0a7d04b..665d4bdcd8c0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -57,6 +57,7 @@ struct bnxt_dl_nvm_param {
};
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
+void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 51c140476717..2ccf79cdcb1e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -173,6 +173,7 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
static const char * const bnxt_ring_sw_stats_str[] = {
"rx_l4_csum_errors",
+ "rx_buf_errors",
"missed_irqs",
};
@@ -552,6 +553,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors;
+ buf[j++] = cpr->rx_buf_errors;
buf[j++] = cpr->missed_irqs;
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
@@ -1588,7 +1590,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
u32 speed;
int rc = 0;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -1660,7 +1662,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
if (epause->autoneg) {
@@ -1785,6 +1787,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNXT_FW_RESET_CHIP:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
break;
case BNXT_FW_RESET_AP:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
@@ -1996,8 +2000,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc;
}
-static int bnxt_flash_package_from_file(struct net_device *dev,
- char *filename, u32 install_type)
+int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
+ u32 install_type)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
@@ -2395,7 +2399,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
int rc = 0;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP))
@@ -2582,7 +2586,7 @@ static int bnxt_nway_reset(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
@@ -2694,7 +2698,8 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
u16 fw_speed;
int rc;
- if (!link_info->autoneg)
+ if (!link_info->autoneg ||
+ (bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK))
return 0;
rc = bnxt_query_force_speeds(bp, &fw_advertising);
@@ -2981,7 +2986,8 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EOPNOTSUPP;
}
- if (pci_vfs_assigned(bp->pdev)) {
+ if (pci_vfs_assigned(bp->pdev) &&
+ !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
netdev_err(dev,
"Reset not allowed when VFs are assigned to VMs\n");
return -EBUSY;
@@ -2994,7 +3000,9 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc) {
- netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ netdev_info(dev, "Reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ netdev_info(dev, "Reload driver to complete reset\n");
*flags = 0;
}
} else if (*flags == ETH_RESET_AP) {
@@ -3038,7 +3046,8 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
mutex_lock(&bp->hwrm_cmd_lock);
while (1) {
*seq_ptr = cpu_to_le16(seq);
- rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ rc = _hwrm_send_message(bp, msg, msg_len,
+ HWRM_COREDUMP_TIMEOUT);
if (rc)
break;
@@ -3311,6 +3320,24 @@ err:
return rc;
}
+static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (dump->flag > BNXT_DUMP_CRASH) {
+ netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
+ netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ }
+
+ bp->dump_flag = dump->flag;
+ return 0;
+}
+
static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3323,7 +3350,12 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_bld_8b << 8 |
bp->ver_resp.hwrm_fw_rsvd_8b;
- return bnxt_get_coredump(bp, NULL, &dump->len);
+ dump->flag = bp->dump_flag;
+ if (bp->dump_flag == BNXT_DUMP_CRASH)
+ dump->len = BNXT_CRASH_DUMP_LEN;
+ else
+ bnxt_get_coredump(bp, NULL, &dump->len);
+ return 0;
}
static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
@@ -3336,7 +3368,16 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
- return bnxt_get_coredump(bp, buf, &dump->len);
+ dump->flag = bp->dump_flag;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+#ifdef CONFIG_TEE_BNXT_FW
+ return tee_bnxt_copy_coredump(buf, 0, dump->len);
+#endif
+ } else {
+ return bnxt_get_coredump(bp, buf, &dump->len);
+ }
+
+ return 0;
}
void bnxt_ethtool_init(struct bnxt *bp)
@@ -3446,6 +3487,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
.reset = bnxt_reset,
+ .set_dump = bnxt_set_dump,
.get_dump_flag = bnxt_get_dump_flag,
.get_dump_data = bnxt_get_dump_data,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index b5b65b3f8534..4428d0abcbc1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -59,6 +59,8 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_CRASH_DUMP_LEN (8 << 20)
+
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
@@ -79,6 +81,8 @@ extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
+int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
+ u32 install_type);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 03b197eb793b..7cf27dffadb5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -176,6 +176,9 @@ struct cmd_nums {
#define HWRM_RESERVED6 0x65UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -208,7 +211,7 @@ struct cmd_nums {
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
#define HWRM_FW_SYNC 0xc3UL
- #define HWRM_FW_STATE_BUFFER_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
#define HWRM_FW_STATE_QUIESCE 0xc5UL
#define HWRM_FW_STATE_BACKUP 0xc6UL
#define HWRM_FW_STATE_RESTORE 0xc7UL
@@ -225,8 +228,11 @@ struct cmd_nums {
#define HWRM_PORT_PRBS_TEST 0xd5UL
#define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
#define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -308,6 +314,7 @@ struct cmd_nums {
#define HWRM_ENGINE_STATS_CONFIG 0x155UL
#define HWRM_ENGINE_STATS_CLEAR 0x156UL
#define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
#define HWRM_ENGINE_RQ_ALLOC 0x15eUL
#define HWRM_ENGINE_RQ_FREE 0x15fUL
#define HWRM_ENGINE_CQ_ALLOC 0x160UL
@@ -390,6 +397,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -420,9 +428,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 100
-#define HWRM_VERSION_STR "1.10.0.100"
+#define HWRM_VERSION_UPDATE 1
+#define HWRM_VERSION_RSVD 12
+#define HWRM_VERSION_STR "1.10.1.12"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -637,6 +645,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1115,6 +1125,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1255,7 +1266,8 @@ struct hwrm_func_qcfg_output {
u8 unused_1;
u8 always_1;
__le32 reset_addr_poll;
- u8 unused_2[3];
+ __le16 legacy_l2_db_size_kb;
+ u8 unused_2[1];
u8 valid;
};
@@ -1500,6 +1512,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1762,7 +1775,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
+/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1792,6 +1805,10 @@ struct hwrm_func_backing_store_qcaps_output {
__le32 tim_max_entries;
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le32 rsvd;
+ __le16 rsvd1;
+ u8 rsvd2;
u8 valid;
};
@@ -2524,6 +2541,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
__le32 preemphasis;
@@ -2761,8 +2779,8 @@ struct hwrm_port_mac_ptp_qcfg_output {
__le16 resp_len;
u8 flags;
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -3177,10 +3195,12 @@ struct hwrm_port_phy_qcaps_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -4980,6 +5000,15 @@ struct hwrm_vnic_rss_cfg_output {
u8 valid;
};
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
+ u8 unused_0[7];
+};
+
/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
__le16 req_type;
@@ -5807,7 +5836,7 @@ struct hwrm_cfa_encap_record_free_output {
u8 valid;
};
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5815,10 +5844,12 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -5887,8 +5918,6 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__be16 dst_port;
__be16 dst_port_mask;
__le64 ntuple_filter_id_hint;
- __le16 rfs_ring_tbl_idx;
- u8 unused_0[6];
};
/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
@@ -5954,7 +5983,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
__le32 flags;
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
@@ -6534,18 +6564,21 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
u8 unused_0[3];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f6f3454d6059..2aba1e02a8f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -515,6 +515,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
struct bnxt_pf_info *pf = &bp->pf;
int i, rc = 0, min = 1;
u16 vf_msix = 0;
+ u16 vf_rss;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
@@ -533,9 +534,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+ vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
- req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
min = 0;
req.min_rsscos_ctx = cpu_to_le16(min);
@@ -557,6 +558,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_vnics /= num_vfs;
vf_stat_ctx /= num_vfs;
vf_ring_grps /= num_vfs;
+ vf_rss /= num_vfs;
req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.min_tx_rings = cpu_to_le16(vf_tx_rings);
@@ -565,6 +567,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req.min_vnics = cpu_to_le16(vf_vnics);
req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ req.min_rsscos_ctx = cpu_to_le16(vf_rss);
}
req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.max_tx_rings = cpu_to_le16(vf_tx_rings);
@@ -573,6 +576,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ req.max_rsscos_ctx = cpu_to_le16(vf_rss);
if (bp->flags & BNXT_FLAG_CHIP_P5)
req.max_msix = cpu_to_le16(vf_msix / num_vfs);
@@ -598,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
n;
hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
- hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+ hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index c8062d020d1e..0cc6ec51f45f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -16,7 +16,9 @@
#include <net/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
+#include <net/vxlan.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -36,6 +38,8 @@
#define is_vid_exactmatch(vlan_tci_mask) \
((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+static bool is_wildcard(void *mask, int len);
+static bool is_exactmatch(void *mask, int len);
/* Return the dst fid of the func for flow forwarding
* For PFs: src_fid is the fid of the PF
* For VF-reps: src_fid the fid of the VF
@@ -111,10 +115,182 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
return 0;
}
+/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes
+ * each(u32).
+ * This routine consolidates such multiple unaligned values into one
+ * field each for Key & Mask (for src and dst macs separately)
+ * For example,
+ * Mask/Key Offset Iteration
+ * ========== ====== =========
+ * dst mac 0xffffffff 0 1
+ * dst mac 0x0000ffff 4 2
+ *
+ * src mac 0xffff0000 4 1
+ * src mac 0xffffffff 8 2
+ *
+ * The above combination coming from the stack will be consolidated as
+ * Mask/Key
+ * ==============
+ * src mac: 0xffffffffffff
+ * dst mac: 0xffffffffffff
+ */
+static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask,
+ u8 *actual_key, u8 *actual_mask)
+{
+ u32 key = get_unaligned((u32 *)actual_key);
+ u32 mask = get_unaligned((u32 *)actual_mask);
+
+ part_key &= part_mask;
+ part_key |= key & ~part_mask;
+
+ put_unaligned(mask | part_mask, (u32 *)actual_mask);
+ put_unaligned(part_key, (u32 *)actual_key);
+}
+
+static int
+bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions,
+ u16 *eth_addr, u16 *eth_addr_mask)
+{
+ u16 *p;
+ int j;
+
+ if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask)))
+ return -EINVAL;
+
+ if (!is_wildcard(&eth_addr_mask[0], ETH_ALEN)) {
+ if (!is_exactmatch(&eth_addr_mask[0], ETH_ALEN))
+ return -EINVAL;
+ /* FW expects dmac to be in u16 array format */
+ p = eth_addr;
+ for (j = 0; j < 3; j++)
+ actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
+ }
+
+ if (!is_wildcard(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN)) {
+ if (!is_exactmatch(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN))
+ return -EINVAL;
+ /* FW expects smac to be in u16 array format */
+ p = &eth_addr[ETH_ALEN / 2];
+ for (j = 0; j < 3; j++)
+ actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j));
+ }
+
+ return 0;
+}
+
+static int
+bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
+ struct flow_action_entry *act, int act_idx, u8 *eth_addr,
+ u8 *eth_addr_mask)
+{
+ size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr);
+ size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr);
+ u32 mask, val, offset, idx;
+ u8 htype;
+
+ offset = act->mangle.offset;
+ htype = act->mangle.htype;
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) {
+ netdev_err(bp->dev,
+ "%s: eth_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+ actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE;
+
+ bnxt_set_l2_key_mask(val, mask, &eth_addr[offset],
+ &eth_addr_mask[offset]);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+ actions->nat.l3_is_ipv4 = true;
+ if (offset == offsetof(struct iphdr, saddr)) {
+ actions->nat.src_xlate = true;
+ actions->nat.l3.ipv4.saddr.s_addr = htonl(val);
+ } else if (offset == offsetof(struct iphdr, daddr)) {
+ actions->nat.src_xlate = false;
+ actions->nat.l3.ipv4.daddr.s_addr = htonl(val);
+ } else {
+ netdev_err(bp->dev,
+ "%s: IPv4_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n",
+ actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr,
+ &actions->nat.l3.ipv4.daddr);
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+ actions->nat.l3_is_ipv4 = false;
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
+ offset < offset_of_ip6_daddr) {
+ /* 16 byte IPv6 address comes in 4 iterations of
+ * 4byte chunks each
+ */
+ actions->nat.src_xlate = true;
+ idx = (offset - offset_of_ip6_saddr) / 4;
+ /* First 4bytes will be copied to idx 0 and so on */
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ } else if (offset >= offset_of_ip6_daddr &&
+ offset < offset_of_ip6_daddr + 16) {
+ actions->nat.src_xlate = false;
+ idx = (offset - offset_of_ip6_daddr) / 4;
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ } else {
+ netdev_err(bp->dev,
+ "%s: IPv6_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* HW does not support L4 rewrite alone without L3
+ * rewrite
+ */
+ if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) {
+ netdev_err(bp->dev,
+ "Need to specify L3 rewrite as well\n");
+ return -EINVAL;
+ }
+ if (actions->nat.src_xlate)
+ actions->nat.l4.ports.sport = htons(val);
+ else
+ actions->nat.l4.ports.dport = htons(val);
+ netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n",
+ actions->nat.l4.ports.sport,
+ actions->nat.l4.ports.dport);
+ break;
+ default:
+ netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
struct flow_action *flow_action)
{
+ /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
+ * dmac or smac
+ */
+ u16 eth_addr_mask[ETH_ALEN] = { 0 };
+ /* Used to store the L2 rewrite key for dmac (6 bytes) followed by
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
+ * dmac or smac
+ */
+ u16 eth_addr[ETH_ALEN] = { 0 };
struct flow_action_entry *act;
int i, rc;
@@ -148,11 +324,26 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
case FLOW_ACTION_TUNNEL_DECAP:
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
break;
+ /* Packet edit: L2 rewrite, NAT, NAPT */
+ case FLOW_ACTION_MANGLE:
+ rc = bnxt_tc_parse_pedit(bp, actions, act, i,
+ (u8 *)eth_addr,
+ (u8 *)eth_addr_mask);
+ if (rc)
+ return rc;
+ break;
default:
break;
}
}
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+ rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr,
+ eth_addr_mask);
+ if (rc)
+ return rc;
+ }
+
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
/* dst_fid is PF's fid */
@@ -401,6 +592,76 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
req.src_fid = cpu_to_le16(flow->src_fid);
req.ref_flow_handle = ref_flow_handle;
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+ memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
+ ETH_ALEN);
+ memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
+ ETH_ALEN);
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
+ }
+
+ if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) {
+ if (actions->nat.l3_is_ipv4) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS;
+
+ if (actions->nat.src_xlate) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+ /* L3 source rewrite */
+ req.nat_ip_address[0] =
+ actions->nat.l3.ipv4.saddr.s_addr;
+ /* L4 source port */
+ if (actions->nat.l4.ports.sport)
+ req.nat_port =
+ actions->nat.l4.ports.sport;
+ } else {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+ /* L3 destination rewrite */
+ req.nat_ip_address[0] =
+ actions->nat.l3.ipv4.daddr.s_addr;
+ /* L4 destination port */
+ if (actions->nat.l4.ports.dport)
+ req.nat_port =
+ actions->nat.l4.ports.dport;
+ }
+ netdev_dbg(bp->dev,
+ "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
+ req.nat_ip_address, actions->nat.src_xlate,
+ req.nat_port);
+ } else {
+ if (actions->nat.src_xlate) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+ /* L3 source rewrite */
+ memcpy(req.nat_ip_address,
+ actions->nat.l3.ipv6.saddr.s6_addr32,
+ sizeof(req.nat_ip_address));
+ /* L4 source port */
+ if (actions->nat.l4.ports.sport)
+ req.nat_port =
+ actions->nat.l4.ports.sport;
+ } else {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+ /* L3 destination rewrite */
+ memcpy(req.nat_ip_address,
+ actions->nat.l3.ipv6.daddr.s6_addr32,
+ sizeof(req.nat_ip_address));
+ /* L4 destination port */
+ if (actions->nat.l4.ports.dport)
+ req.nat_port =
+ actions->nat.l4.ports.dport;
+ }
+ netdev_dbg(bp->dev,
+ "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
+ req.nat_ip_address, actions->nat.src_xlate,
+ req.nat_port);
+ }
+ }
+
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
req.tunnel_handle = tunnel_handle;
@@ -1274,7 +1535,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -EOPNOTSUPP;
- goto free_node;
+ kfree_rcu(new_node, rcu);
+ return rc;
}
/* If a flow exists with the same cookie, delete it */
@@ -1580,6 +1842,147 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
}
}
+static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+ struct flow_cls_offload *flower = type_data;
+ struct bnxt *bp = priv->bp;
+
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct bnxt_flower_indr_block_cb_priv *
+bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
+{
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
+ if (cb_priv->tunnel_netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static void bnxt_tc_setup_indr_rel(void *cb_priv)
+{
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
+ struct flow_block_offload *f)
+{
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->tunnel_netdev = netdev;
+ cb_priv->bp = bp;
+ list_add(&cb_priv->list, &bp->tc_indr_block_list);
+
+ block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ bnxt_tc_setup_indr_rel);
+ if (IS_ERR(block_cb)) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ return PTR_ERR(block_cb);
+ }
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev);
+ if (!cb_priv)
+ return -ENOENT;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ bnxt_tc_setup_indr_block_cb,
+ cb_priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
+{
+ return netif_is_vxlan(netdev);
+}
+
+static int bnxt_tc_indr_block_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev;
+ struct bnxt *bp;
+ int rc;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ if (!bnxt_is_netdev_indr_offload(netdev))
+ return NOTIFY_OK;
+
+ bp = container_of(nb, struct bnxt, tc_netdev_nb);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ rc = __flow_indr_block_cb_register(netdev, bp,
+ bnxt_tc_setup_indr_cb,
+ bp);
+ if (rc)
+ netdev_info(bp->dev,
+ "Failed to register indirect blk: dev: %s",
+ netdev->name);
+ break;
+ case NETDEV_UNREGISTER:
+ __flow_indr_block_cb_unregister(netdev,
+ bnxt_tc_setup_indr_cb,
+ bp);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
.head_offset = offsetof(struct bnxt_tc_flow_node, node),
.key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
@@ -1663,7 +2066,15 @@ int bnxt_init_tc(struct bnxt *bp)
bp->dev->hw_features |= NETIF_F_HW_TC;
bp->dev->features |= NETIF_F_HW_TC;
bp->tc_info = tc_info;
- return 0;
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&bp->tc_indr_block_list);
+ bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
+ rc = register_netdevice_notifier(&bp->tc_netdev_nb);
+ if (!rc)
+ return 0;
+
+ rhashtable_destroy(&tc_info->encap_table);
destroy_decap_table:
rhashtable_destroy(&tc_info->decap_table);
@@ -1685,6 +2096,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
if (!bnxt_tc_flower_enabled(bp))
return;
+ unregister_netdevice_notifier(&bp->tc_netdev_nb);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 4f05305052f2..10c62b094914 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -62,6 +62,12 @@ struct bnxt_tc_tunnel_key {
__be32 id;
};
+#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \
+ ((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \
+ is_wildcard(&(eth_addr)[ETH_ALEN / 2], ETH_ALEN)) || \
+ (is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \
+ is_wildcard(&(eth_addr_mask)[ETH_ALEN / 2], ETH_ALEN)))
+
struct bnxt_tc_actions {
u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
@@ -71,6 +77,8 @@ struct bnxt_tc_actions {
#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6)
#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7)
+#define BNXT_TC_ACTION_FLAG_L2_REWRITE BIT(8)
+#define BNXT_TC_ACTION_FLAG_NAT_XLATE BIT(9)
u16 dst_fid;
struct net_device *dst_dev;
@@ -79,6 +87,18 @@ struct bnxt_tc_actions {
/* tunnel encap */
struct ip_tunnel_key tun_encap_key;
+#define PEDIT_OFFSET_SMAC_LAST_4_BYTES 0x8
+ __be16 l2_rewrite_dmac[3];
+ __be16 l2_rewrite_smac[3];
+ struct {
+ bool src_xlate; /* true => translate src,
+ * false => translate dst
+ * Mutually exclusive, i.e cannot set both
+ */
+ bool l3_is_ipv4; /* false means L3 is ipv6 */
+ struct bnxt_tc_l3_key l3;
+ struct bnxt_tc_l4_key l4;
+ } nat;
};
struct bnxt_tc_flow {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b2c160947fc8..c601ff7b8f61 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -81,7 +81,7 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
edev->en_ops->bnxt_free_msix(edev, ulp_id);
if (ulp->max_async_event_id)
- bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu();
@@ -182,7 +182,7 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
edev->ulp_tbl[ulp_id].msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev)) {
+ if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
bnxt_close_nic(bp, true, false);
bnxt_open_nic(bp, true, false);
}
@@ -266,6 +266,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
+ edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
@@ -276,7 +277,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
}
}
-void bnxt_ulp_start(struct bnxt *bp)
+void bnxt_ulp_start(struct bnxt *bp, int err)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
@@ -285,6 +286,11 @@ void bnxt_ulp_start(struct bnxt *bp)
if (!edev)
return;
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+
+ if (err)
+ return;
+
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
@@ -435,7 +441,7 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
ulp->max_async_event_id = max_id;
- bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
+ bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index cd78453d0bf0..9895406b9830 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -64,6 +64,7 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
+ #define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
};
@@ -92,7 +93,7 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
-void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 155599dcee76..61ab7d21f6bd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5208,6 +5208,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_init_bnx2x_tx_ring(dev, data);
cnic_init_bnx2x_rx_ring(dev, data);
+ data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
+
l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1de51811fcb4..120fa05a39ff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2576,7 +2576,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
}
/* Init rDma */
- bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
+ DMA_SCB_BURST_SIZE);
/* Initialize Rx queues */
ret = bcmgenet_init_rx_queues(priv->dev);
@@ -2589,7 +2590,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
}
/* Init tDma */
- bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
+ DMA_SCB_BURST_SIZE);
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
@@ -3420,12 +3422,48 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
params->words_per_bd);
}
+struct bcmgenet_plat_data {
+ enum bcmgenet_version version;
+ u32 dma_max_burst_length;
+};
+
+static const struct bcmgenet_plat_data v1_plat_data = {
+ .version = GENET_V1,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v2_plat_data = {
+ .version = GENET_V2,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v3_plat_data = {
+ .version = GENET_V3,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v4_plat_data = {
+ .version = GENET_V4,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v5_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data bcm2711_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = 0x08,
+};
+
static const struct of_device_id bcmgenet_match[] = {
- { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
- { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
- { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
- { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
- { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
+ { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
+ { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
+ { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
+ { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
+ { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
+ { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3435,6 +3473,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
const struct of_device_id *of_id = NULL;
+ const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
@@ -3458,24 +3497,21 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
+ if (priv->irq0 < 0) {
+ err = priv->irq0;
+ goto err;
+ }
priv->irq1 = platform_get_irq(pdev, 1);
- priv->wol_irq = platform_get_irq(pdev, 2);
- if (!priv->irq0 || !priv->irq1) {
- dev_err(&pdev->dev, "can't find IRQs\n");
- err = -EINVAL;
+ if (priv->irq1 < 0) {
+ err = priv->irq1;
goto err;
}
+ priv->wol_irq = platform_get_irq_optional(pdev, 2);
- if (dn) {
+ if (dn)
macaddr = of_get_mac_address(dn);
- if (IS_ERR(macaddr)) {
- dev_err(&pdev->dev, "can't find MAC address\n");
- err = -EINVAL;
- goto err;
- }
- } else {
+ else
macaddr = pd->mac_address;
- }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -3487,7 +3523,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
- ether_addr_copy(dev->dev_addr, macaddr);
+ if (IS_ERR_OR_NULL(macaddr) || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(dev);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
dev->watchdog_timeo = 2 * HZ;
dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
@@ -3514,10 +3555,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dev = dev;
priv->pdev = pdev;
- if (of_id)
- priv->version = (enum bcmgenet_version)of_id->data;
- else
+ if (of_id) {
+ pdata = of_id->data;
+ priv->version = pdata->version;
+ priv->dma_max_burst_length = pdata->dma_max_burst_length;
+ } else {
priv->version = pd->genet_version;
+ priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
+ }
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
@@ -3602,6 +3647,11 @@ static int bcmgenet_remove(struct platform_device *pdev)
return 0;
}
+static void bcmgenet_shutdown(struct platform_device *pdev)
+{
+ bcmgenet_remove(pdev);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcmgenet_resume(struct device *d)
{
@@ -3721,6 +3771,7 @@ static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
.remove = bcmgenet_remove,
+ .shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbc69d8fa05f..a5659197598f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -664,6 +664,7 @@ struct bcmgenet_priv {
bool crc_fwd_en;
unsigned int dma_rx_chk_bit;
+ u32 dma_max_burst_length;
u32 msg_enable;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index dbe18cdf6c1b..6392a2530183 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -213,11 +213,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
udelay(2);
}
- priv->ext_phy = !priv->internal_phy &&
- (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
-
switch (priv->phy_interface) {
case PHY_INTERFACE_MODE_INTERNAL:
+ phy_name = "internal PHY";
+ /* fall through */
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
@@ -229,11 +228,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
else
port_ctrl = PORT_MODE_INT_EPHY;
- bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
-
- if (priv->internal_phy) {
- phy_name = "internal PHY";
- } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ if (!phy_name) {
phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv);
}
@@ -242,11 +237,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
case PHY_INTERFACE_MODE_MII:
phy_name = "external MII";
phy_set_max_speed(phydev, SPEED_100);
- bcmgenet_sys_writel(priv,
- PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
- /* Restore the MII PHY after isolation */
- if (bmcr >= 0)
- phy_write(phydev, MII_BMCR, bmcr);
+ port_ctrl = PORT_MODE_EXT_EPHY;
break;
case PHY_INTERFACE_MODE_REVMII:
@@ -261,31 +252,43 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
port_ctrl = PORT_MODE_EXT_RVMII_50;
else
port_ctrl = PORT_MODE_EXT_RVMII_25;
- bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
break;
case PHY_INTERFACE_MODE_RGMII:
/* RGMII_NO_ID: TXC transitions at the same time as TXD
* (requires PCB or receiver-side delay)
- * RGMII: Add 2ns delay on TXC (90 degree shift)
*
* ID is implicitly disabled for 100Mbps (RG)MII operation.
*/
+ phy_name = "external RGMII (no delay)";
id_mode_dis = BIT(16);
- /* fall through */
+ port_ctrl = PORT_MODE_EXT_GPHY;
+ break;
+
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (id_mode_dis)
- phy_name = "external RGMII (no delay)";
- else
- phy_name = "external RGMII (TX delay)";
- bcmgenet_sys_writel(priv,
- PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */
+ phy_name = "external RGMII (TX delay)";
+ port_ctrl = PORT_MODE_EXT_GPHY;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ phy_name = "external RGMII (RX delay)";
+ port_ctrl = PORT_MODE_EXT_GPHY;
break;
default:
dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
return -EINVAL;
}
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+ /* Restore the MII PHY after isolation */
+ if (bmcr >= 0)
+ phy_write(phydev, MII_BMCR, bmcr);
+
+ priv->ext_phy = !priv->internal_phy &&
+ (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
/* This is an external PHY (xMII), so we need to enable the RGMII
* block for the interface to work
*/
@@ -479,7 +482,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
struct phy_device *phydev;
- int phy_mode;
+ phy_interface_t phy_mode;
int ret;
/* Fetch the PHY phandle */
@@ -497,10 +500,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
}
/* Get the link mode */
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dn, &phy_mode);
+ if (ret) {
dev_err(kdev, "invalid PHY mode property\n");
- return phy_mode;
+ return ret;
}
priv->phy_interface = phy_mode;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 77f3511b97de..ca3aa1250dd1 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (rq->perout.index != 0)
return -EINVAL;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index f4b3bd85dfe3..53b50c24d9c9 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
- select PHYLIB
+ select PHYLINK
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 03983bd46eef..19fe4f4867c7 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -7,7 +7,7 @@
#ifndef _MACB_H
#define _MACB_H
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
@@ -1185,15 +1185,14 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
- struct device_node *phy_node;
- int link;
- int speed;
- int duplex;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
u32 caps;
unsigned int dma_burst_length;
phy_interface_t phy_interface;
+ int speed;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1e1b774e1953..d5ae2e1e0b0e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -25,7 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -388,6 +388,27 @@ mdio_pm_exit:
return status;
}
+static void macb_init_buffers(struct macb *bp)
+{
+ struct macb_queue *queue;
+ unsigned int q;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH,
+ upper_32_bits(queue->rx_ring_dma));
+#endif
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH,
+ upper_32_bits(queue->tx_ring_dma));
+#endif
+ }
+}
+
/**
* macb_set_tx_clk() - Set a clock to a new frequency
* @clk Pointer to the clock to change
@@ -432,114 +453,178 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
netdev_err(dev, "adjusting tx_clk failed.\n");
}
-static void macb_handle_link_change(struct net_device *dev)
+static void macb_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct macb *bp = netdev_priv(ndev);
+
+ /* We only support MII, RMII, GMII, RGMII & SGMII. */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_RMII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ if (!macb_is_gem(bp) &&
+ (state->interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(state->interface))) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
+ (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_GMII ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(state->interface))) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void macb_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ state->link = 0;
+}
+
+static void macb_mac_an_restart(struct phylink_config *config)
+{
+ /* Not supported */
+}
+
+static void macb_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
unsigned long flags;
- int status_change = 0;
+ u32 old_ctrl, ctrl;
spin_lock_irqsave(&bp->lock, flags);
- if (phydev->link) {
- if ((bp->speed != phydev->speed) ||
- (bp->duplex != phydev->duplex)) {
- u32 reg;
+ old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
- reg = macb_readl(bp, NCFGR);
- reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
- if (macb_is_gem(bp))
- reg &= ~GEM_BIT(GBE);
+ /* Clear all the bits we might set later */
+ ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) |
+ GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
- if (phydev->duplex)
- reg |= MACB_BIT(FD);
- if (phydev->speed == SPEED_100)
- reg |= MACB_BIT(SPD);
- if (phydev->speed == SPEED_1000 &&
- bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
- reg |= GEM_BIT(GBE);
+ if (state->speed == SPEED_1000)
+ ctrl |= GEM_BIT(GBE);
+ else if (state->speed == SPEED_100)
+ ctrl |= MACB_BIT(SPD);
- macb_or_gem_writel(bp, NCFGR, reg);
+ if (state->duplex)
+ ctrl |= MACB_BIT(FD);
- bp->speed = phydev->speed;
- bp->duplex = phydev->duplex;
- status_change = 1;
- }
- }
+ /* We do not support MLO_PAUSE_RX yet */
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl |= MACB_BIT(PAE);
- if (phydev->link != bp->link) {
- if (!phydev->link) {
- bp->speed = 0;
- bp->duplex = -1;
- }
- bp->link = phydev->link;
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
- status_change = 1;
- }
+ /* Apply the new configuration, if any */
+ if (old_ctrl ^ ctrl)
+ macb_or_gem_writel(bp, NCFGR, ctrl);
+
+ bp->speed = state->speed;
spin_unlock_irqrestore(&bp->lock, flags);
+}
- if (status_change) {
- if (phydev->link) {
- /* Update the TX clock rate if and only if the link is
- * up and there has been a link change.
- */
- macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ unsigned int q;
+ u32 ctrl;
- netif_carrier_on(dev);
- netdev_info(dev, "link up (%d/%s)\n",
- phydev->speed,
- phydev->duplex == DUPLEX_FULL ?
- "Full" : "Half");
- } else {
- netif_carrier_off(dev);
- netdev_info(dev, "link down\n");
- }
- }
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IDR,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+
+ /* Disable Rx and Tx */
+ ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(ndev);
}
-/* based on au1000_eth. c*/
-static int macb_mii_probe(struct net_device *dev)
+static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *np;
- int ret, i;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ unsigned int q;
- np = bp->pdev->dev.of_node;
- ret = 0;
+ macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
- if (np) {
- if (of_phy_is_fixed_link(np)) {
- bp->phy_node = of_node_get(np);
- } else {
- bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
- /* fallback to standard phy registration if no
- * phy-handle was found nor any phy found during
- * dt phy registration
- */
- if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev) &&
- PTR_ERR(phydev) != -ENODEV) {
- ret = PTR_ERR(phydev);
- break;
- }
- }
+ /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
+ * cleared the pipeline and control registers.
+ */
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_buffers(bp);
- if (ret)
- return -ENODEV;
- }
- }
- }
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IER,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+
+ /* Enable Rx and Tx */
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+
+ netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops macb_phylink_ops = {
+ .validate = macb_validate,
+ .mac_pcs_get_state = macb_mac_pcs_get_state,
+ .mac_an_restart = macb_mac_an_restart,
+ .mac_config = macb_mac_config,
+ .mac_link_down = macb_mac_link_down,
+ .mac_link_up = macb_mac_link_up,
+};
- if (bp->phy_node) {
- phydev = of_phy_connect(dev, bp->phy_node,
- &macb_handle_link_change, 0,
- bp->phy_interface);
- if (!phydev)
- return -ENODEV;
+static int macb_phylink_connect(struct macb *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (bp->pdev->dev.of_node &&
+ of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
+ ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
+ 0);
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
} else {
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
@@ -548,27 +633,33 @@ static int macb_mii_probe(struct net_device *dev)
}
/* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
- bp->phy_interface);
+ ret = phylink_connect_phy(bp->phylink, phydev);
if (ret) {
- netdev_err(dev, "Could not attach to PHY\n");
+ netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
return ret;
}
}
- /* mask with MAC supported features */
- if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
- phy_set_max_speed(phydev, SPEED_1000);
- else
- phy_set_max_speed(phydev, SPEED_100);
+ phylink_start(bp->phylink);
- if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ return 0;
+}
- bp->link = 0;
- bp->speed = 0;
- bp->duplex = -1;
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ bp->phylink_config.dev = &dev->dev;
+ bp->phylink_config.type = PHYLINK_NETDEV;
+
+ bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
+ bp->phy_interface, &macb_phylink_ops);
+ if (IS_ERR(bp->phylink)) {
+ netdev_err(dev, "Could not create a phylink instance (%ld)\n",
+ PTR_ERR(bp->phylink));
+ return PTR_ERR(bp->phylink);
+ }
return 0;
}
@@ -598,20 +689,10 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (np && of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification %pOF\n", np);
- goto err_out_free_mdiobus;
- }
-
- err = mdiobus_register(bp->mii_bus);
- } else {
- err = of_mdiobus_register(bp->mii_bus, np);
- }
+ err = of_mdiobus_register(bp->mii_bus, np);
if (err)
- goto err_out_free_fixed_link;
+ goto err_out_free_mdiobus;
err = macb_mii_probe(bp->dev);
if (err)
@@ -621,11 +702,7 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus);
-err_out_free_fixed_link:
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
err_out_free_mdiobus:
- of_node_put(bp->phy_node);
mdiobus_free(bp->mii_bus);
err_out:
return err;
@@ -1314,26 +1391,14 @@ static void macb_hresp_error_task(unsigned long data)
bp->macbgem_ops.mog_init_rings(bp);
/* Initialize TX and RX buffers */
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
-#endif
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
+ macb_init_buffers(bp);
- /* Enable interrupts */
+ /* Enable interrupts */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
queue_writel(queue, IER,
bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
- }
ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
macb_writel(bp, NCR, ctrl);
@@ -2221,19 +2286,13 @@ static void macb_configure_dma(struct macb *bp)
static void macb_init_hw(struct macb *bp)
{
- struct macb_queue *queue;
- unsigned int q;
-
u32 config;
macb_reset_hw(bp);
macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
- config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
- config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
@@ -2249,36 +2308,11 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCFGR, config);
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
gem_writel(bp, JML, bp->jumbo_max_len);
- bp->speed = SPEED_10;
- bp->duplex = DUPLEX_HALF;
bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
if (bp->caps & MACB_CAPS_JUMBO)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
-
- /* Initialize TX and RX buffers */
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
-#endif
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
-
- /* Enable interrupts */
- queue_writel(queue, IER,
- bp->rx_intr_mask |
- MACB_TX_INT_FLAGS |
- MACB_BIT(HRESP));
- }
-
- /* Enable TX and RX */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
}
/* The hash address register is 64 bits long and takes up two
@@ -2402,8 +2436,8 @@ static void macb_set_rx_mode(struct net_device *dev)
static int macb_open(struct net_device *dev)
{
- struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
unsigned int q;
int err;
@@ -2414,15 +2448,6 @@ static int macb_open(struct net_device *dev)
if (err < 0)
goto pm_exit;
- /* carrier starts down */
- netif_carrier_off(dev);
-
- /* if the phy is not yet register, retry later*/
- if (!dev->phydev) {
- err = -EAGAIN;
- goto pm_exit;
- }
-
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2436,11 +2461,11 @@ static int macb_open(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_enable(&queue->napi);
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
- /* schedule a link state check */
- phy_start(dev->phydev);
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto pm_exit;
netif_tx_start_all_queues(dev);
@@ -2467,8 +2492,8 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_disable(&queue->napi);
- if (dev->phydev)
- phy_stop(dev->phydev);
+ phylink_stop(bp->phylink);
+ phylink_disconnect_phy(bp->phylink);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -2702,17 +2727,18 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
wol->supported = 0;
wol->wolopts = 0;
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
-
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+ phylink_ethtool_get_wol(bp->phylink, wol);
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
+ int ret;
+
+ ret = phylink_ethtool_set_wol(bp->phylink, wol);
+ if (!ret)
+ return 0;
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
(wol->wolopts & ~WAKE_MAGIC))
@@ -2728,6 +2754,22 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int macb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *kset)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(bp->phylink, kset);
+}
+
+static int macb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *kset)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(bp->phylink, kset);
+}
+
static void macb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -3164,8 +3206,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = macb_get_link_ksettings,
+ .set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
};
@@ -3178,8 +3220,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = macb_get_link_ksettings,
+ .set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
.get_rxnfc = gem_get_rxnfc,
@@ -3188,26 +3230,21 @@ static const struct ethtool_ops gem_ethtool_ops = {
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct phy_device *phydev = dev->phydev;
struct macb *bp = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
- if (!phydev)
- return -ENODEV;
-
- if (!bp->ptp_info)
- return phy_mii_ioctl(phydev, rq, cmd);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
- case SIOCGHWTSTAMP:
- return bp->ptp_info->get_hwtst(dev, rq);
- default:
- return phy_mii_ioctl(phydev, rq, cmd);
+ if (bp->ptp_info) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bp->ptp_info->set_hwtst(dev, rq, cmd);
+ case SIOCGHWTSTAMP:
+ return bp->ptp_info->get_hwtst(dev, rq);
+ }
}
+
+ return phylink_mii_ioctl(bp->phylink, rq, cmd);
}
static inline void macb_set_txcsum_feature(struct macb *bp,
@@ -3330,7 +3367,8 @@ static void macb_configure_caps(struct macb *bp,
#ifdef CONFIG_MACB_USE_HWSTAMP
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
- pr_err("GEM doesn't support hardware ptp.\n");
+ dev_err(&bp->pdev->dev,
+ "GEM doesn't support hardware ptp.\n");
else {
bp->hw_dma_cap |= HW_DMA_CAP_PTP;
bp->ptp_info = &gem_ptp_info;
@@ -3707,8 +3745,9 @@ static int at91ether_open(struct net_device *dev)
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
- /* schedule a link state check */
- phy_start(dev->phydev);
+ ret = macb_phylink_connect(lp);
+ if (ret)
+ return ret;
netif_start_queue(dev);
@@ -3737,6 +3776,9 @@ static int at91ether_close(struct net_device *dev)
netif_stop_queue(dev);
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
@@ -4181,7 +4223,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
bool native_io;
- struct phy_device *phydev;
+ phy_interface_t interface;
struct net_device *dev;
struct resource *regs;
void __iomem *mem;
@@ -4308,12 +4350,14 @@ static int macb_probe(struct platform_device *pdev)
macb_get_hwaddr(bp);
}
- err = of_get_phy_mode(np);
- if (err < 0)
+ err = of_get_phy_mode(np, &interface);
+ if (err)
/* not found in DT, MII by default */
bp->phy_interface = PHY_INTERFACE_MODE_MII;
else
- bp->phy_interface = err;
+ bp->phy_interface = interface;
+
+ bp->speed = SPEED_UNKNOWN;
/* IP specific init */
err = init(pdev);
@@ -4324,8 +4368,6 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
- phydev = dev->phydev;
-
netif_carrier_off(dev);
err = register_netdev(dev);
@@ -4337,8 +4379,6 @@ static int macb_probe(struct platform_device *pdev)
tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
(unsigned long)bp);
- phy_attached_info(phydev);
-
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
@@ -4349,11 +4389,7 @@ static int macb_probe(struct platform_device *pdev)
return 0;
err_out_unregister_mdio:
- phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
- of_node_put(bp->phy_node);
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
err_out_free_netdev:
@@ -4377,18 +4413,12 @@ static int macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
- struct device_node *np = pdev->dev.of_node;
dev = platform_get_drvdata(pdev);
if (dev) {
bp = netdev_priv(dev);
- if (dev->phydev)
- phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
@@ -4403,7 +4433,7 @@ static int macb_remove(struct platform_device *pdev)
clk_disable_unprepare(bp->tsu_clk);
pm_runtime_set_suspended(&pdev->dev);
}
- of_node_put(bp->phy_node);
+ phylink_destroy(bp->phylink);
free_netdev(dev);
}
@@ -4421,7 +4451,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!netif_running(netdev))
return 0;
-
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG));
@@ -4432,8 +4461,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_disable(&queue->napi);
- phy_stop(netdev->phydev);
- phy_suspend(netdev->phydev);
+ rtnl_lock();
+ phylink_stop(bp->phylink);
+ rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
spin_unlock_irqrestore(&bp->lock, flags);
@@ -4481,12 +4511,11 @@ static int __maybe_unused macb_resume(struct device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_enable(&queue->napi);
- phy_resume(netdev->phydev);
- phy_init_hw(netdev->phydev);
- phy_start(netdev->phydev);
+ rtnl_lock();
+ phylink_start(bp->phylink);
+ rtnl_unlock();
}
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
macb_set_rx_mode(netdev);
macb_restore_features(bp);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index f96a42af1014..af04a2c81adb 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1914,10 +1914,10 @@ static struct platform_driver xgmac_driver = {
.driver = {
.name = "calxedaxgmac",
.of_match_table = xgmac_of_match,
+ .pm = &xgmac_pm_ops,
},
.probe = xgmac_probe,
.remove = xgmac_remove,
- .driver.pm = &xgmac_pm_ops,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 40a44dcb3d9b..f28409279ea4 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1876,13 +1876,8 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
if (nic->xdp_prog) {
/* Attach BPF program */
- nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
- if (!IS_ERR(nic->xdp_prog)) {
- bpf_attached = true;
- } else {
- ret = PTR_ERR(nic->xdp_prog);
- nic->xdp_prog = NULL;
- }
+ bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
+ bpf_attached = true;
}
/* Calculate Tx queues needed for XDP and network stack */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index acb016834f04..1e09fdb63c4f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1007,14 +1007,14 @@ static void bgx_poll_for_link(struct work_struct *work)
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
- lmac->link_up = 1;
+ lmac->link_up = true;
if (lmac->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = SPEED_40000;
else
lmac->last_speed = SPEED_10000;
lmac->last_duplex = DUPLEX_FULL;
} else {
- lmac->link_up = 0;
+ lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
@@ -1023,7 +1023,7 @@ static void bgx_poll_for_link(struct work_struct *work)
if (lmac->link_up) {
if (bgx_xaui_check_link(lmac)) {
/* Errors, clear link_up state */
- lmac->link_up = 0;
+ lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
@@ -1055,11 +1055,11 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
if ((lmac->lmac_type == BGX_MODE_SGMII) ||
(lmac->lmac_type == BGX_MODE_QSGMII) ||
(lmac->lmac_type == BGX_MODE_RGMII)) {
- lmac->is_sgmii = 1;
+ lmac->is_sgmii = true;
if (bgx_lmac_sgmii_init(bgx, lmac))
return -1;
} else {
- lmac->is_sgmii = 0;
+ lmac->is_sgmii = false;
if (bgx_lmac_xaui_init(bgx, lmac))
return -1;
}
@@ -1304,7 +1304,7 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
{
if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
(lmac->lmac_type != BGX_MODE_40G_KR)) {
- lmac->use_training = 0;
+ lmac->use_training = false;
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 20390f6afbb4..a4b4d475abf8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
- cudbg_common.o cudbg_lib.o cudbg_zlib.o
+ cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o \
+ cxgb4_tc_matchall.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 69746696a929..f5be3ee1bdb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype {
CUDBG_QTYPE_CRYPTO_FLQ,
CUDBG_QTYPE_TLS_RXQ,
CUDBG_QTYPE_TLS_FLQ,
+ CUDBG_QTYPE_ETHOFLD_TXQ,
+ CUDBG_QTYPE_ETHOFLD_RXQ,
+ CUDBG_QTYPE_ETHOFLD_FLQ,
CUDBG_QTYPE_MAX,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index c2e92786608b..19c11568113a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -4,6 +4,7 @@
*/
#include <linux/sort.h>
+#include <linux/string.h>
#include "t4_regs.h"
#include "cxgb4.h"
@@ -776,24 +777,18 @@ static int cudbg_get_mem_region(struct adapter *padap,
struct cudbg_mem_desc *mem_desc)
{
u8 mc, found = 0;
- u32 i, idx = 0;
- int rc;
+ u32 idx = 0;
+ int rc, i;
rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
if (rc)
return rc;
- for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
- if (!strcmp(cudbg_region[i], region_name)) {
- found = 1;
- idx = i;
- break;
- }
- }
- if (!found)
+ i = match_string(cudbg_region, ARRAY_SIZE(cudbg_region), region_name);
+ if (i < 0)
return -EINVAL;
- found = 0;
+ idx = i;
for (i = 0; i < meminfo->mem_c; i++) {
if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* Skip holes */
@@ -2930,6 +2925,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
MAX_RXQ_DESC_SIZE;
+ /* ETHOFLD TXQ, RXQ, and FLQ */
+ tot_entries += MAX_OFLD_QSETS * 3;
+ tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
+
tot_size += sizeof(struct cudbg_ver_hdr) +
sizeof(struct cudbg_qdesc_info) +
sizeof(struct cudbg_qdesc_entry) * tot_entries;
@@ -3087,6 +3086,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
}
}
+ /* ETHOFLD TXQ */
+ if (s->eohw_txq)
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_TXQ(&s->eohw_txq[i].q,
+ CUDBG_QTYPE_ETHOFLD_TXQ, out);
+
+ /* ETHOFLD RXQ and FLQ */
+ if (s->eohw_rxq) {
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
+ CUDBG_QTYPE_ETHOFLD_RXQ, out);
+
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
+ CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ }
+
out_unlock:
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1fbb640e896a..a70ac2097892 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -392,6 +392,7 @@ struct adapter_params {
struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char crypto; /* HW capability for crypto */
+ unsigned char ethofld; /* QoS support */
unsigned char bypass;
unsigned char hash_filter;
@@ -602,6 +603,8 @@ struct port_info {
u8 vivld;
u8 smt_idx;
u8 rx_cchan;
+
+ bool tc_block_shared;
};
struct dentry;
@@ -711,6 +714,7 @@ struct sge_eth_rxq { /* SW Ethernet Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_eth_stats stats;
+ struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct sge_ofld_stats { /* offload queue statistics */
@@ -724,13 +728,19 @@ struct sge_ofld_rxq { /* SW offload Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_ofld_stats stats;
+ struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct tx_desc {
__be64 flit[8];
};
-struct tx_sw_desc;
+struct ulptx_sgl;
+
+struct tx_sw_desc {
+ struct sk_buff *skb; /* SKB to free after getting completion */
+ dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
+};
struct sge_txq {
unsigned int in_use; /* # of in-use Tx descriptors */
@@ -762,6 +772,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
u8 dbqt; /* SGE Doorbell Queue Timer in use */
unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */
unsigned long tso; /* # of TSO requests */
+ unsigned long uso; /* # of USO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
@@ -788,7 +799,6 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_uld_rxq_info {
char name[IFNAMSIZ]; /* name of ULD driver */
struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
- u16 *msix_tbl; /* msix_tbl for uld */
u16 *rspq_id; /* response queue id's of rxq */
u16 nrxq; /* # of ingress uld queues */
u16 nciq; /* # of completion queues */
@@ -801,6 +811,51 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */
};
+enum sge_eosw_state {
+ CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
+ CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */
+ CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */
+ CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */
+ CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
+};
+
+struct sge_eosw_txq {
+ spinlock_t lock; /* Per queue lock to synchronize completions */
+ enum sge_eosw_state state; /* Current ETHOFLD State */
+ struct tx_sw_desc *desc; /* Descriptor ring to hold packets */
+ u32 ndesc; /* Number of descriptors */
+ u32 pidx; /* Current Producer Index */
+ u32 last_pidx; /* Last successfully transmitted Producer Index */
+ u32 cidx; /* Current Consumer Index */
+ u32 last_cidx; /* Last successfully reclaimed Consumer Index */
+ u32 flowc_idx; /* Descriptor containing a FLOWC request */
+ u32 inuse; /* Number of packets held in ring */
+
+ u32 cred; /* Current available credits */
+ u32 ncompl; /* # of completions posted */
+ u32 last_compl; /* # of credits consumed since last completion req */
+
+ u32 eotid; /* Index into EOTID table in software */
+ u32 hwtid; /* Hardware EOTID index */
+
+ u32 hwqid; /* Underlying hardware queue index */
+ struct net_device *netdev; /* Pointer to netdevice */
+ struct tasklet_struct qresume_tsk; /* Restarts the queue */
+ struct completion completion; /* completion for FLOWC rendezvous */
+};
+
+struct sge_eohw_txq {
+ spinlock_t lock; /* Per queue lock */
+ struct sge_txq q; /* HW Txq */
+ struct adapter *adap; /* Backpointer to adapter */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long uso; /* # of USO requests */
+ unsigned long tx_cso; /* # of Tx checksum offloads */
+ unsigned long vlan_ins; /* # of Tx VLAN insertions */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+};
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_txq ptptxq;
@@ -814,11 +869,16 @@ struct sge {
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
+ struct sge_eohw_txq *eohw_txq;
+ struct sge_ofld_rxq *eohw_rxq;
+
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */
+ u16 eoqsets; /* # of ETHOFLD queues */
+
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u16 dbqtimer_tick;
@@ -841,6 +901,9 @@ struct sge {
unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
+
+ int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */
+ int nd_msix_idx; /* Index to non-data interrupts MSI-X info */
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
@@ -870,13 +933,13 @@ struct hash_mac_addr {
unsigned int iface_mac;
};
-struct uld_msix_bmap {
+struct msix_bmap {
unsigned long *msix_bmap;
unsigned int mapsize;
spinlock_t lock; /* lock for acquiring bitmap */
};
-struct uld_msix_info {
+struct msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
unsigned int idx;
@@ -945,14 +1008,9 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- struct msix_info {
- unsigned short vec;
- char desc[IFNAMSIZ + 10];
- cpumask_var_t aff_mask;
- } msix_info[MAX_INGQ + 1];
- struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
- struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
- int msi_idx;
+ /* MSI-X Info for NIC and OFLD queues */
+ struct msix_info *msix_info;
+ struct msix_bmap msix_bmap;
struct doorbell_stats db_stats;
struct sge sge;
@@ -1044,6 +1102,12 @@ struct adapter {
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal ch_thermal;
#endif
+
+ /* TC MQPRIO offload */
+ struct cxgb4_tc_mqprio *tc_mqprio;
+
+ /* TC MATCHALL classifier offload */
+ struct cxgb4_tc_matchall *tc_matchall;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1073,10 +1137,12 @@ enum {
enum {
SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */
+ SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */
};
enum {
SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */
+ SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */
};
enum {
@@ -1087,11 +1153,6 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
@@ -1100,6 +1161,14 @@ struct ch_sched_queue {
s8 class; /* class index */
};
+/* Support for "sched_flowc" command to allow one or more FLOWC
+ * to be bound to a TX Scheduling Class.
+ */
+struct ch_sched_flowc {
+ s32 tid; /* TID to bind */
+ s8 class; /* class index */
+};
+
/* Defined bit width of user definable filter tuples
*/
#define ETHTYPE_BITWIDTH 16
@@ -1214,8 +1283,11 @@ struct ch_filter_specification {
u16 nat_lport; /* local port to use after NAT'ing */
u16 nat_fport; /* foreign port to use after NAT'ing */
+ u32 tc_prio; /* TC's filter priority index */
+ u64 tc_cookie; /* Unique cookie identifying TC rules */
+
/* reservation for future additions */
- u8 rsvd[24];
+ u8 rsvd[12];
/* Filter rule value/mask pairs.
*/
@@ -1293,6 +1365,11 @@ static inline int is_uld(const struct adapter *adap)
return (adap->params.offload || adap->params.crypto);
}
+static inline int is_ethofld(const struct adapter *adap)
+{
+ return adap->params.ethofld;
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -1426,6 +1503,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type);
+int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
+ struct net_device *dev, u32 iqid);
+void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
@@ -1890,6 +1970,12 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
+void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
+ u32 ndesc);
+int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
+void cxgb4_ethofld_restart(unsigned long data);
+int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q);
void cxgb4_reclaim_completed_tx(struct adapter *adap,
struct sge_txq *q, bool unmap);
@@ -1948,5 +2034,10 @@ int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx);
-
+int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
+void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
+int cxgb_open(struct net_device *dev);
+int cxgb_close(struct net_device *dev);
+void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
+void cxgb4_quiesce_rx(struct sge_rspq *q);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index ae6a47dd7dc9..93868dca186a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2658,6 +2658,7 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
+ int eth_entries, ctrl_entries, eo_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
@@ -2665,11 +2666,12 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
int i, n, r = (uintptr_t)v - 1;
- int eth_entries, ctrl_entries;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+ if (adap->sge.eohw_txq)
+ eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
mutex_lock(&uld_mutex);
if (s->uld_txq_info)
@@ -2746,6 +2748,7 @@ do { \
RL("RxDrops:", stats.rx_drops);
RL("RxBadPkts:", stats.bad_rx_pkts);
TL("TSO:", tso);
+ TL("USO:", uso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);
@@ -2761,6 +2764,55 @@ do { \
}
r -= eth_entries;
+ if (r < eo_entries) {
+ int base_qset = r * 4;
+ const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
+ const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
+
+ n = min(4, s->eoqsets - 4 * r);
+
+ S("QType:", "ETHOFLD");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImm:", stats.imm);
+ RL("RxAN", stats.an);
+ RL("RxNoMem", stats.nomem);
+ TL("TSO:", tso);
+ TL("USO:", uso);
+ TL("TxCSO:", tx_cso);
+ TL("VLANins:", vlan_ins);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
+ TL("TxMapErr:", mapping_err);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
+ RL("FLStarving:", fl.starving);
+
+ goto unlock;
+ }
+
+ r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
@@ -3007,6 +3059,7 @@ static int sge_queue_entries(const struct adapter *adap)
mutex_unlock(&uld_mutex);
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+ (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 76538f4cd595..20ab3b6285a2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -91,6 +91,7 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"rx_bg3_frames_trunc ",
"tso ",
+ "uso ",
"tx_csum_offload ",
"rx_csum_good ",
"vlan_extractions ",
@@ -220,6 +221,7 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
*/
struct queue_port_stats {
u64 tso;
+ u64 uso;
u64 tx_csum;
u64 rx_csum;
u64 vlan_ex;
@@ -240,13 +242,15 @@ static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
{
- int i;
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+ struct sge_eohw_txq *eohw_tx;
+ unsigned int i;
memset(s, 0, sizeof(*s));
for (i = 0; i < p->nqsets; i++, rx++, tx++) {
s->tso += tx->tso;
+ s->uso += tx->uso;
s->tx_csum += tx->tx_cso;
s->rx_csum += rx->stats.rx_cso;
s->vlan_ex += rx->stats.vlan_ex;
@@ -254,6 +258,16 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->gro_pkts += rx->stats.lro_pkts;
s->gro_merged += rx->stats.lro_merged;
}
+
+ if (adap->sge.eohw_txq) {
+ eohw_tx = &adap->sge.eohw_txq[p->first_qset];
+ for (i = 0; i < p->nqsets; i++, eohw_tx++) {
+ s->tso += eohw_tx->tso;
+ s->uso += eohw_tx->uso;
+ s->tx_csum += eohw_tx->tx_cso;
+ s->vlan_ins += eohw_tx->vlan_ins;
+ }
+ }
}
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 43b0f8c57da7..1d39fca11810 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -440,36 +440,48 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
{
struct adapter *adap = netdev2adap(dev);
struct tid_info *t = &adap->tids;
+ bool found = false;
+ u8 i, n, cnt;
int ftid;
- spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET) {
- ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
- if (ftid >= t->nftids)
- ftid = -1;
- } else {
- if (is_t6(adap->params.chip)) {
- ftid = bitmap_find_free_region(t->ftid_bmap,
- t->nftids, 1);
- if (ftid < 0)
- goto out_unlock;
-
- /* this is only a lookup, keep the found region
- * unallocated
- */
- bitmap_release_region(t->ftid_bmap, ftid, 1);
- } else {
- ftid = bitmap_find_free_region(t->ftid_bmap,
- t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
+ * on T5.
+ */
+ n = 1;
+ if (family == PF_INET6) {
+ n++;
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
+ n += 2;
+ }
+
+ if (n > t->nftids)
+ return -ENOMEM;
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ /* Find free filter slots from the end of TCAM. Appropriate
+ * checks must be done by caller later to ensure the prio
+ * passed by TC doesn't conflict with prio saved by existing
+ * rules in the TCAM.
+ */
+ spin_lock_bh(&t->ftid_lock);
+ ftid = t->nftids - 1;
+ while (ftid >= n - 1) {
+ cnt = 0;
+ for (i = 0; i < n; i++) {
+ if (test_bit(ftid - i, t->ftid_bmap))
+ break;
+ cnt++;
}
+ if (cnt == n) {
+ ftid &= ~(n - 1);
+ found = true;
+ break;
+ }
+
+ ftid -= n;
}
-out_unlock:
spin_unlock_bh(&t->ftid_lock);
- return ftid;
+
+ return found ? ftid : -ENOMEM;
}
static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
@@ -510,6 +522,60 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
spin_unlock_bh(&t->ftid_lock);
}
+bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct filter_entry *prev_fe, *next_fe;
+ struct tid_info *t = &adap->tids;
+ u32 prev_ftid, next_ftid;
+ bool valid = true;
+
+ /* Only insert the rule if both of the following conditions
+ * are met:
+ * 1. The immediate previous rule has priority <= @prio.
+ * 2. The immediate next rule has priority >= @prio.
+ */
+ spin_lock_bh(&t->ftid_lock);
+ /* Don't insert if there's a rule already present at @idx. */
+ if (test_bit(idx, t->ftid_bmap)) {
+ valid = false;
+ goto out_unlock;
+ }
+
+ next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
+ if (next_ftid >= t->nftids)
+ next_ftid = idx;
+
+ next_fe = &adap->tids.ftid_tab[next_ftid];
+
+ prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ if (prev_ftid >= idx)
+ prev_ftid = idx;
+
+ /* See if the filter entry belongs to an IPv6 rule, which
+ * occupy 4 slots on T5 and 2 slots on T6. Adjust the
+ * reference to the previously inserted filter entry
+ * accordingly.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
+ prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3];
+ if (!prev_fe->fs.type)
+ prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ } else {
+ prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1];
+ if (!prev_fe->fs.type)
+ prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ }
+
+ if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
+ (next_fe->valid && prio > next_fe->fs.tc_prio))
+ valid = false;
+
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
+ return valid;
+}
+
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
@@ -806,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs)
fs->mask.tos |= ~0;
if (fs->val.proto && !fs->mask.proto)
fs->mask.proto |= ~0;
+ if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
+ fs->mask.pfvf_vld |= ~0;
+ if (fs->val.pf && !fs->mask.pf)
+ fs->mask.pf |= ~0;
+ if (fs->val.vf && !fs->mask.vf)
+ fs->mask.vf |= ~0;
for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
lip |= fs->val.lip[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index b0751c0611ec..b3e4a645043d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -53,4 +53,5 @@ void clear_all_filters(struct adapter *adapter);
void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
+bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38024877751c..12ff69b3ba91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
+#include <net/xfrm.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -82,6 +83,8 @@
#include "sched.h"
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
+#include "cxgb4_tc_mqprio.h"
+#include "cxgb4_tc_matchall.h"
#include "cxgb4_ptp.h"
#include "cxgb4_cudbg.h"
@@ -184,6 +187,8 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
+static int cfg_queues(struct adapter *adap);
+
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
@@ -683,31 +688,6 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
return IRQ_HANDLED;
}
-/*
- * Name the MSI-X interrupts.
- */
-static void name_msix_vecs(struct adapter *adap)
-{
- int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
-
- /* non-data interrupts */
- snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
-
- /* FW events */
- snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
- adap->port[0]->name);
-
- /* Ethernet queues */
- for_each_port(adap, j) {
- struct net_device *d = adap->port[j];
- const struct port_info *pi = netdev_priv(d);
-
- for (i = 0; i < pi->nqsets; i++, msi_idx++)
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
- d->name, i);
- }
-}
-
int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
cpumask_var_t *aff_mask, int idx)
{
@@ -741,15 +721,19 @@ static int request_msix_queue_irqs(struct adapter *adap)
struct sge *s = &adap->sge;
struct msix_info *minfo;
int err, ethqidx;
- int msi_index = 2;
- err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
- adap->msix_info[1].desc, &s->fw_evtq);
+ if (s->fwevtq_msix_idx < 0)
+ return -ENOMEM;
+
+ err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[s->fwevtq_msix_idx].desc,
+ &s->fw_evtq);
if (err)
return err;
for_each_ethrxq(s, ethqidx) {
- minfo = &adap->msix_info[msi_index];
+ minfo = s->ethrxq[ethqidx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
@@ -759,18 +743,16 @@ static int request_msix_queue_irqs(struct adapter *adap)
cxgb4_set_msix_aff(adap, minfo->vec,
&minfo->aff_mask, ethqidx);
- msi_index++;
}
return 0;
unwind:
while (--ethqidx >= 0) {
- msi_index--;
- minfo = &adap->msix_info[msi_index];
+ minfo = s->ethrxq[ethqidx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
}
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
return err;
}
@@ -778,11 +760,11 @@ static void free_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
struct msix_info *minfo;
- int i, msi_index = 2;
+ int i;
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
for_each_ethrxq(s, i) {
- minfo = &adap->msix_info[msi_index++];
+ minfo = s->ethrxq[i].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[i].rspq);
}
@@ -899,6 +881,12 @@ static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}
+void cxgb4_quiesce_rx(struct sge_rspq *q)
+{
+ if (q->handler)
+ napi_disable(&q->napi);
+}
+
/*
* Wait until all NAPI handlers are descheduled.
*/
@@ -909,19 +897,24 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
- if (q && q->handler)
- napi_disable(&q->napi);
+ if (!q)
+ continue;
+
+ cxgb4_quiesce_rx(q);
}
}
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
+ struct sge *s = &adap->sge;
+
if (adap->flags & CXGB4_FULL_INIT_DONE) {
t4_intr_disable(adap);
if (adap->flags & CXGB4_USING_MSIX) {
free_msix_queue_irqs(adap);
- free_irq(adap->msix_info[0].vec, adap);
+ free_irq(adap->msix_info[s->nd_msix_idx].vec,
+ adap);
} else {
free_irq(adap->pdev->irq, adap);
}
@@ -929,6 +922,17 @@ static void disable_interrupts(struct adapter *adap)
}
}
+void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ if (q->handler)
+ napi_enable(&q->napi);
+
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ SEINTARM_V(q->intr_params) |
+ INGRESSQID_V(q->cntxt_id));
+}
+
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
@@ -941,37 +945,63 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
- if (q->handler)
- napi_enable(&q->napi);
- /* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
- SEINTARM_V(q->intr_params) |
- INGRESSQID_V(q->cntxt_id));
+ cxgb4_enable_rx(adap, q);
}
}
+static int setup_non_data_intr(struct adapter *adap)
+{
+ int msix;
+
+ adap->sge.nd_msix_idx = -1;
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ return 0;
+
+ /* Request MSI-X vector for non-data interrupt */
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0)
+ return -ENOMEM;
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s", adap->port[0]->name);
+
+ adap->sge.nd_msix_idx = msix;
+ return 0;
+}
static int setup_fw_sge_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err = 0;
+ int msix, err = 0;
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
- if (adap->flags & CXGB4_USING_MSIX)
- adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
- else {
+ if (adap->flags & CXGB4_USING_MSIX) {
+ s->fwevtq_msix_idx = -1;
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0)
+ return -ENOMEM;
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s-FWeventq", adap->port[0]->name);
+ } else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
NULL, NULL, NULL, -1);
if (err)
return err;
- adap->msi_idx = -((int)s->intrq.abs_id + 1);
+ msix = -((int)s->intrq.abs_id + 1);
}
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
+ msix, NULL, fwevtq_handler, NULL, -1);
+ if (err && msix >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap, msix);
+
+ s->fwevtq_msix_idx = msix;
return err;
}
@@ -985,14 +1015,17 @@ static int setup_fw_sge_queues(struct adapter *adap)
*/
static int setup_sge_queues(struct adapter *adap)
{
- int err, i, j;
- struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info = NULL;
+ struct sge *s = &adap->sge;
unsigned int cmplqid = 0;
+ int err, i, j, msix = 0;
if (is_uld(adap))
rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ msix = -((int)s->intrq.abs_id + 1);
+
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
@@ -1000,10 +1033,21 @@ static int setup_sge_queues(struct adapter *adap)
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) {
- if (adap->msi_idx > 0)
- adap->msi_idx++;
+ if (msix >= 0) {
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0) {
+ err = msix;
+ goto freeout;
+ }
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s-Rx%d", dev->name, j);
+ q->msix = &adap->msix_info[msix];
+ }
+
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
- adap->msi_idx, &q->fl,
+ msix, &q->fl,
t4_ethrx_handler,
NULL,
t4_get_tp_ch_map(adap,
@@ -1090,6 +1134,24 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif /* CONFIG_CHELSIO_T4_DCB */
+ if (dev->num_tc) {
+ struct port_info *pi = netdev2pinfo(dev);
+ u8 ver, proto;
+
+ ver = ip_hdr(skb)->version;
+ proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
+ ip_hdr(skb)->protocol;
+
+ /* Send unsupported traffic pattern to normal NIC queues. */
+ txq = netdev_pick_tx(dev, skb, sb_dev);
+ if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
+ skb->encapsulation ||
+ (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
+ txq = txq % pi->nqsets;
+
+ return txq;
+ }
+
if (select_queue) {
txq = (skb_rx_queue_recorded(skb)
? skb_get_rx_queue(skb)
@@ -1456,19 +1518,23 @@ static int tid_init(struct tid_info *t)
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
+ unsigned int eotid_bmap_size;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
+ eotid_bmap_size = BITS_TO_LONGS(t->neotids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
max_ftids * sizeof(*t->ftid_tab) +
- ftid_bmap_size * sizeof(long);
+ ftid_bmap_size * sizeof(long) +
+ t->neotids * sizeof(*t->eotid_tab) +
+ eotid_bmap_size * sizeof(long);
t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
@@ -1479,6 +1545,8 @@ static int tid_init(struct tid_info *t)
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
+ t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
+ t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
spin_lock_init(&t->ftid_lock);
@@ -1505,6 +1573,9 @@ static int tid_init(struct tid_info *t)
if (!t->stid_base &&
CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
__set_bit(0, t->stid_bmap);
+
+ if (t->neotids)
+ bitmap_zero(t->eotid_bmap, t->neotids);
}
bitmap_zero(t->ftid_bmap, t->nftids);
@@ -2361,6 +2432,7 @@ static void update_clip(const struct adapter *adap)
*/
static int cxgb_up(struct adapter *adap)
{
+ struct sge *s = &adap->sge;
int err;
mutex_lock(&uld_mutex);
@@ -2372,16 +2444,20 @@ static int cxgb_up(struct adapter *adap)
goto freeq;
if (adap->flags & CXGB4_USING_MSIX) {
- name_msix_vecs(adap);
- err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
- adap->msix_info[0].desc, adap);
+ if (s->nd_msix_idx < 0) {
+ err = -ENOMEM;
+ goto irq_err;
+ }
+
+ err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
+ t4_nondata_intr, 0,
+ adap->msix_info[s->nd_msix_idx].desc, adap);
if (err)
goto irq_err;
+
err = request_msix_queue_irqs(adap);
- if (err) {
- free_irq(adap->msix_info[0].vec, adap);
- goto irq_err;
- }
+ if (err)
+ goto irq_err_free_nd_msix;
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & CXGB4_USING_MSI) ? 0
@@ -2403,11 +2479,13 @@ static int cxgb_up(struct adapter *adap)
#endif
return err;
- irq_err:
+irq_err_free_nd_msix:
+ free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
+irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
- freeq:
+freeq:
t4_free_sge_resources(adap);
- rel_lock:
+rel_lock:
mutex_unlock(&uld_mutex);
return err;
}
@@ -2429,11 +2507,11 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-static int cxgb_open(struct net_device *dev)
+int cxgb_open(struct net_device *dev)
{
- int err;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ int err;
netif_carrier_off(dev);
@@ -2456,7 +2534,7 @@ static int cxgb_open(struct net_device *dev)
return err;
}
-static int cxgb_close(struct net_device *dev)
+int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3163,8 +3241,33 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev,
}
}
-static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+static int cxgb_setup_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!adap->tc_matchall)
+ return -ENOMEM;
+
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
+ case TC_CLSMATCHALL_STATS:
+ if (ingress)
+ return cxgb4_tc_matchall_stats(dev, cls_matchall);
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct net_device *dev = cb_priv;
struct port_info *pi = netdev2pinfo(dev);
@@ -3185,24 +3288,81 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return cxgb_setup_tc_cls_u32(dev, type_data);
case TC_SETUP_CLSFLOWER:
return cxgb_setup_tc_flower(dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return cxgb_setup_tc_matchall(dev, type_data, true);
default:
return -EOPNOTSUPP;
}
}
+static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+ return -EINVAL;
+ }
+
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return cxgb_setup_tc_matchall(dev, type_data, false);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int cxgb_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!is_ethofld(adap) || !adap->tc_mqprio)
+ return -ENOMEM;
+
+ return cxgb4_setup_tc_mqprio(dev, mqprio);
+}
+
static LIST_HEAD(cxgb_block_cb_list);
+static int cxgb_setup_tc_block(struct net_device *dev,
+ struct flow_block_offload *f)
+{
+ struct port_info *pi = netdev_priv(dev);
+ flow_setup_cb_t *cb;
+ bool ingress_only;
+
+ pi->tc_block_shared = f->block_shared;
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = cxgb_setup_tc_block_egress_cb;
+ ingress_only = false;
+ } else {
+ cb = cxgb_setup_tc_block_ingress_cb;
+ ingress_only = true;
+ }
+
+ return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
+ cb, pi, dev, ingress_only);
+}
+
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct port_info *pi = netdev2pinfo(dev);
-
switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return cxgb_setup_tc_mqprio(dev, type_data);
case TC_SETUP_BLOCK:
- return flow_block_cb_setup_simple(type_data,
- &cxgb_block_cb_list,
- cxgb_setup_tc_block_cb,
- pi, dev, true);
+ return cxgb_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4286,14 +4446,14 @@ static struct fw_info *find_fw_info(int chip)
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
-static int adap_init0(struct adapter *adap)
+static int adap_init0(struct adapter *adap, int vpd_skip)
{
- int ret;
- u32 v, port_vec;
- enum dev_state state;
- u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
+ u32 params[7], val[7];
+ enum dev_state state;
+ u32 v, port_vec;
int reset = 1;
+ int ret;
/* Grab Firmware Device Log parameters as early as possible so we have
* access to it for debugging, etc.
@@ -4448,9 +4608,11 @@ static int adap_init0(struct adapter *adap)
* could have FLASHed a new VPD which won't be read by the firmware
* until we do the RESET ...
*/
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
+ if (!vpd_skip) {
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+ }
/* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -4600,11 +4762,18 @@ static int adap_init0(struct adapter *adap)
adap->clipt_start = val[0];
adap->clipt_end = val[1];
- /* We don't yet have a PARAMs calls to retrieve the number of Traffic
- * Classes supported by the hardware/firmware so we hard code it here
- * for now.
- */
- adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+ /* Get the supported number of traffic classes */
+ params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+ if (ret < 0) {
+ /* We couldn't retrieve the number of Traffic Classes
+ * supported by the hardware/firmware. So we hard
+ * code it here.
+ */
+ adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+ } else {
+ adap->params.nsched_cls = val[0];
+ }
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
@@ -4689,7 +4858,8 @@ static int adap_init0(struct adapter *adap)
adap->params.offload = 1;
if (caps_cmd.ofldcaps ||
- (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -4731,6 +4901,19 @@ static int adap_init0(struct adapter *adap)
} else {
adap->num_ofld_uld += 1;
}
+
+ if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
+ params[0] = FW_PARAM_PFVF(ETHOFLD_START);
+ params[1] = FW_PARAM_PFVF(ETHOFLD_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (!ret) {
+ adap->tids.eotid_base = val[0];
+ adap->tids.neotids = min_t(u32, MAX_ATIDS,
+ val[1] - val[0] + 1);
+ adap->params.ethofld = 1;
+ }
+ }
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -5050,10 +5233,93 @@ static void eeh_resume(struct pci_dev *pdev)
rtnl_unlock();
}
+static void eeh_reset_prepare(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ if (adapter->pf != 4)
+ return;
+
+ adapter->flags &= ~CXGB4_FW_OK;
+
+ notify_ulds(adapter, CXGB4_STATE_DOWN);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ disable_interrupts(adapter);
+ cxgb4_free_mps_ref_entries(adapter);
+
+ adap_free_hma_mem(adapter);
+
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
+ cxgb_down(adapter);
+}
+
+static void eeh_reset_done(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int err, i;
+
+ if (adapter->pf != 4)
+ return;
+
+ err = t4_wait_dev_ready(adapter->regs);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev,
+ "Device not ready, err %d", err);
+ return;
+ }
+
+ setup_memwin(adapter);
+
+ err = adap_init0(adapter, 1);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Adapter init failed, err %d", err);
+ return;
+ }
+
+ setup_memwin_rdma(adapter);
+
+ if (adapter->flags & CXGB4_FW_OK) {
+ err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Port init failed, err %d", err);
+ return;
+ }
+ }
+
+ err = cfg_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Config queues failed, err %d", err);
+ return;
+ }
+
+ cxgb4_init_mps_ref_entries(adapter);
+
+ err = setup_fw_sge_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "FW sge queue allocation failed, err %d", err);
+ return;
+ }
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_open(adapter->port[i]);
+}
+
static const struct pci_error_handlers cxgb4_eeh = {
.error_detected = eeh_err_detected,
.slot_reset = eeh_slot_reset,
.resume = eeh_resume,
+ .reset_prepare = eeh_reset_prepare,
+ .reset_done = eeh_reset_done,
};
/* Return true if the Link Configuration supports "High Speeds" (those greater
@@ -5070,26 +5336,25 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
-/*
- * Perform default configuration of DMA queues depending on the number and type
+/* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
static int cfg_queues(struct adapter *adap)
{
+ u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+ u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
- int i, n10g = 0, qidx = 0;
- int niqflint, neq, avail_eth_qsets;
- int max_eth_qsets = 32;
+ u32 i, n10g = 0, qidx = 0;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
- /* Reduce memory usage in kdump environment, disable all offload.
- */
+ /* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
adap->params.offload = 0;
adap->params.crypto = 0;
+ adap->params.ethofld = 0;
}
/* Calculate the number of Ethernet Queue Sets available based on
@@ -5108,14 +5373,11 @@ static int cfg_queues(struct adapter *adap)
if (!(adap->flags & CXGB4_USING_MSIX))
niqflint--;
neq = adap->params.pfres.neq / 2;
- avail_eth_qsets = min(niqflint, neq);
+ avail_qsets = min(niqflint, neq);
- if (avail_eth_qsets > max_eth_qsets)
- avail_eth_qsets = max_eth_qsets;
-
- if (avail_eth_qsets < adap->params.nports) {
+ if (avail_qsets < adap->params.nports) {
dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
- avail_eth_qsets, adap->params.nports);
+ avail_qsets, adap->params.nports);
return -ENOMEM;
}
@@ -5123,6 +5385,7 @@ static int cfg_queues(struct adapter *adap)
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -5142,8 +5405,7 @@ static int cfg_queues(struct adapter *adap)
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
- /*
- * We default to 1 queue per non-10G port and up to # of cores queues
+ /* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
@@ -5165,19 +5427,40 @@ static int cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
+ avail_qsets -= qidx;
if (is_uld(adap)) {
- /*
- * For offload we use 1 queue/channel if all ports are up to 1G,
+ /* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
- if (n10g) {
- i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
- s->ofldqsets = roundup(i, adap->params.nports);
- } else {
+ num_ulds = adap->num_uld + adap->num_ofld_uld;
+ i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+ avail_uld_qsets = roundup(i, adap->params.nports);
+ if (avail_qsets < num_ulds * adap->params.nports) {
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ s->ofldqsets = 0;
+ } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
s->ofldqsets = adap->params.nports;
+ } else {
+ s->ofldqsets = avail_uld_qsets;
+ }
+
+ avail_qsets -= num_ulds * s->ofldqsets;
+ }
+
+ /* ETHOFLD Queues used for QoS offload should follow same
+ * allocation scheme as normal Ethernet Queues.
+ */
+ if (is_ethofld(adap)) {
+ if (avail_qsets < s->max_ethqsets) {
+ adap->params.ethofld = 0;
+ s->eoqsets = 0;
+ } else {
+ s->eoqsets = s->max_ethqsets;
}
+ avail_qsets -= s->eoqsets;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5230,42 +5513,62 @@ static void reduce_ethqs(struct adapter *adap, int n)
}
}
-static int get_msix_info(struct adapter *adap)
+static int alloc_msix_info(struct adapter *adap, u32 num_vec)
{
- struct uld_msix_info *msix_info;
- unsigned int max_ingq = 0;
-
- if (is_offload(adap))
- max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
- if (is_pci_uld(adap))
- max_ingq += MAX_OFLD_QSETS * adap->num_uld;
-
- if (!max_ingq)
- goto out;
+ struct msix_info *msix_info;
- msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
+ msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
- sizeof(long), GFP_KERNEL);
- if (!adap->msix_bmap_ulds.msix_bmap) {
+ adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
}
- spin_lock_init(&adap->msix_bmap_ulds.lock);
- adap->msix_info_ulds = msix_info;
-out:
+
+ spin_lock_init(&adap->msix_bmap.lock);
+ adap->msix_bmap.mapsize = num_vec;
+
+ adap->msix_info = msix_info;
return 0;
}
static void free_msix_info(struct adapter *adap)
{
- if (!(adap->num_uld && adap->num_ofld_uld))
- return;
+ kfree(adap->msix_bmap.msix_bmap);
+ kfree(adap->msix_info);
+}
- kfree(adap->msix_info_ulds);
- kfree(adap->msix_bmap_ulds.msix_bmap);
+int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
+{
+ struct msix_bmap *bmap = &adap->msix_bmap;
+ unsigned int msix_idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
+ if (msix_idx < bmap->mapsize) {
+ __set_bit(msix_idx, bmap->msix_bmap);
+ } else {
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return -ENOSPC;
+ }
+
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return msix_idx;
+}
+
+void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
+ unsigned int msix_idx)
+{
+ struct msix_bmap *bmap = &adap->msix_bmap;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ __clear_bit(msix_idx, bmap->msix_bmap);
+ spin_unlock_irqrestore(&bmap->lock, flags);
}
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
@@ -5273,88 +5576,161 @@ static void free_msix_info(struct adapter *adap)
static int enable_msix(struct adapter *adap)
{
- int ofld_need = 0, uld_need = 0;
- int i, j, want, need, allocated;
+ u32 eth_need, uld_need = 0, ethofld_need = 0;
+ u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
+ u8 num_uld = 0, nchan = adap->params.nports;
+ u32 i, want, need, num_vec;
struct sge *s = &adap->sge;
- unsigned int nchan = adap->params.nports;
struct msix_entry *entries;
- int max_ingq = MAX_INGQ;
-
- if (is_pci_uld(adap))
- max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
- if (is_offload(adap))
- max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
- entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- /* map for msix */
- if (get_msix_info(adap)) {
- adap->params.offload = 0;
- adap->params.crypto = 0;
- }
-
- for (i = 0; i < max_ingq + 1; ++i)
- entries[i].entry = i;
+ struct port_info *pi;
+ int allocated, ret;
- want = s->max_ethqsets + EXTRA_VECS;
- if (is_offload(adap)) {
- want += adap->num_ofld_uld * s->ofldqsets;
- ofld_need = adap->num_ofld_uld * nchan;
- }
- if (is_pci_uld(adap)) {
- want += adap->num_uld * s->ofldqsets;
- uld_need = adap->num_uld * nchan;
- }
+ want = s->max_ethqsets;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
* each port.
*/
- need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
+ need = 8 * nchan;
#else
- need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
+ need = nchan;
#endif
+ eth_need = need;
+ if (is_uld(adap)) {
+ num_uld = adap->num_ofld_uld + adap->num_uld;
+ want += num_uld * s->ofldqsets;
+ uld_need = num_uld * nchan;
+ need += uld_need;
+ }
+
+ if (is_ethofld(adap)) {
+ want += s->eoqsets;
+ ethofld_need = eth_need;
+ need += ethofld_need;
+ }
+
+ want += EXTRA_VECS;
+ need += EXTRA_VECS;
+
+ entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < want; i++)
+ entries[i].entry = i;
+
allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
if (allocated < 0) {
- dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
- " not using MSI-X\n");
- kfree(entries);
- return allocated;
+ /* Disable offload and attempt to get vectors for NIC
+ * only mode.
+ */
+ want = s->max_ethqsets + EXTRA_VECS;
+ need = eth_need + EXTRA_VECS;
+ allocated = pci_enable_msix_range(adap->pdev, entries,
+ need, want);
+ if (allocated < 0) {
+ dev_info(adap->pdev_dev,
+ "Disabling MSI-X due to insufficient MSI-X vectors\n");
+ ret = allocated;
+ goto out_free;
+ }
+
+ dev_info(adap->pdev_dev,
+ "Disabling offload due to insufficient MSI-X vectors\n");
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ adap->params.ethofld = 0;
+ s->ofldqsets = 0;
+ s->eoqsets = 0;
+ uld_need = 0;
+ ethofld_need = 0;
+ }
+
+ num_vec = allocated;
+ if (num_vec < want) {
+ /* Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ ethqsets = eth_need;
+ if (is_uld(adap))
+ ofldqsets = nchan;
+ if (is_ethofld(adap))
+ eoqsets = ethofld_need;
+
+ num_vec -= need;
+ while (num_vec) {
+ if (num_vec < eth_need + ethofld_need ||
+ ethqsets > s->max_ethqsets)
+ break;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->nqsets < 2)
+ continue;
+
+ ethqsets++;
+ num_vec--;
+ if (ethofld_need) {
+ eoqsets++;
+ num_vec--;
+ }
+ }
+ }
+
+ if (is_uld(adap)) {
+ while (num_vec) {
+ if (num_vec < uld_need ||
+ ofldqsets > s->ofldqsets)
+ break;
+
+ ofldqsets++;
+ num_vec -= uld_need;
+ }
+ }
+ } else {
+ ethqsets = s->max_ethqsets;
+ if (is_uld(adap))
+ ofldqsets = s->ofldqsets;
+ if (is_ethofld(adap))
+ eoqsets = s->eoqsets;
}
- /* Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = allocated - EXTRA_VECS - ofld_need - uld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
+ if (ethqsets < s->max_ethqsets) {
+ s->max_ethqsets = ethqsets;
+ reduce_ethqs(adap, ethqsets);
}
+
if (is_uld(adap)) {
- if (allocated < want)
- s->nqs_per_uld = nchan;
- else
- s->nqs_per_uld = s->ofldqsets;
+ s->ofldqsets = ofldqsets;
+ s->nqs_per_uld = s->ofldqsets;
}
- for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
+ if (is_ethofld(adap))
+ s->eoqsets = eoqsets;
+
+ /* map for msix */
+ ret = alloc_msix_info(adap, allocated);
+ if (ret)
+ goto out_disable_msix;
+
+ for (i = 0; i < allocated; i++) {
adap->msix_info[i].vec = entries[i].vector;
- if (is_uld(adap)) {
- for (j = 0 ; i < allocated; ++i, j++) {
- adap->msix_info_ulds[j].vec = entries[i].vector;
- adap->msix_info_ulds[j].idx = i;
- }
- adap->msix_bmap_ulds.mapsize = j;
+ adap->msix_info[i].idx = i;
}
- dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
- "nic %d per uld %d\n",
- allocated, s->max_ethqsets, s->nqs_per_uld);
+
+ dev_info(adap->pdev_dev,
+ "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
+ allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
kfree(entries);
return 0;
+
+out_disable_msix:
+ pci_disable_msix(adap->pdev);
+
+out_free:
+ kfree(entries);
+ return ret;
}
#undef EXTRA_VECS
@@ -5441,6 +5817,8 @@ static void free_some_resources(struct adapter *adapter)
kvfree(adapter->srq);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_matchall(adapter);
+ cxgb4_cleanup_tc_mqprio(adapter);
cxgb4_cleanup_tc_flower(adapter);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
@@ -5466,7 +5844,8 @@ static void free_some_resources(struct adapter *adapter)
t4_fw_bye(adapter, adapter->pf);
}
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+ NETIF_F_GSO_UDP_L4)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
@@ -5837,7 +6216,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
setup_memwin(adapter);
- err = adap_init0(adapter);
+ err = adap_init0(adapter, 0);
#ifdef CONFIG_DEBUG_FS
bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
#endif
@@ -5855,8 +6234,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&adapter->mac_hlist);
for_each_port(adapter, i) {
+ /* For supporting MQPRIO Offload, need some extra
+ * queues for each ETHOFLD TIDs. Keep it equal to
+ * MAX_ATIDs for now. Once we connect to firmware
+ * later and query the EOTID params, we'll come to
+ * know the actual # of EOTIDs supported.
+ */
netdev = alloc_etherdev_mq(sizeof(struct port_info),
- MAX_ETH_QSETS);
+ MAX_ETH_QSETS + MAX_ATIDS);
if (!netdev) {
err = -ENOMEM;
goto out_free_dev;
@@ -6004,6 +6389,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cxgb4_init_tc_flower(adapter))
dev_warn(&pdev->dev,
"could not offload tc flower, continuing\n");
+
+ if (cxgb4_init_tc_mqprio(adapter))
+ dev_warn(&pdev->dev,
+ "could not offload tc mqprio, continuing\n");
+
+ if (cxgb4_init_tc_matchall(adapter))
+ dev_warn(&pdev->dev,
+ "could not offload tc matchall, continuing\n");
}
if (is_offload(adapter) || is_hashfilter(adapter)) {
@@ -6040,6 +6433,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_dev;
+ err = setup_non_data_intr(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Non Data interrupt allocation failed, err: %d\n", err);
+ goto out_free_dev;
+ }
+
err = setup_fw_sge_queues(adapter);
if (err) {
dev_err(adapter->pdev_dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index e447976bdd3e..0fa80bef575d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
}
-static void cxgb4_process_flow_actions(struct net_device *in,
- struct flow_cls_offload *cls,
- struct ch_filter_specification *fs)
+void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
int i;
- flow_action_for_each(i, act, &rule->action) {
+ flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS;
@@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev,
return true;
}
-static int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_cls_offload *cls)
+int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct flow_action *actions)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
int i;
- flow_action_for_each(i, act, &rule->action) {
+ flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_DROP:
@@ -636,14 +634,15 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
struct ch_filter_specification *fs;
struct filter_ctx ctx;
- int fidx;
- int ret;
+ int fidx, ret;
- if (cxgb4_validate_flow_actions(dev, cls))
+ if (cxgb4_validate_flow_actions(dev, &rule->action))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls))
@@ -658,20 +657,41 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
fs = &ch_flower->fs;
fs->hitcnts = 1;
cxgb4_process_flow_match(dev, cls, fs);
- cxgb4_process_flow_actions(dev, cls, fs);
+ cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
if (fs->hash) {
fidx = 0;
} else {
- fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
- if (fidx < 0) {
- netdev_err(dev, "%s: No fidx for offload.\n", __func__);
+ u8 inet_family;
+
+ inet_family = fs->type ? PF_INET6 : PF_INET;
+
+ /* Note that TC uses prio 0 to indicate stack to
+ * generate automatic prio and hence doesn't pass prio
+ * 0 to driver. However, the hardware TCAM index
+ * starts from 0. Hence, the -1 here.
+ */
+ if (cls->common.prio <= adap->tids.nftids)
+ fidx = cls->common.prio - 1;
+ else
+ fidx = cxgb4_get_free_ftid(dev, inet_family);
+
+ /* Only insert FLOWER rule if its priority doesn't
+ * conflict with existing rules in the LETCAM.
+ */
+ if (fidx < 0 ||
+ !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
ret = -ENOMEM;
goto free_entry;
}
}
+ fs->tc_prio = cls->common.prio;
+ fs->tc_cookie = cls->cookie;
+
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index eb4c95248baf..e132516e9868 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -108,6 +108,12 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
+void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs);
+int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct flow_action *actions);
+
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
int cxgb4_tc_flower_destroy(struct net_device *dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
new file mode 100644
index 000000000000..102b370fbd3e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#include "cxgb4.h"
+#include "cxgb4_tc_matchall.h"
+#include "sched.h"
+#include "cxgb4_uld.h"
+#include "cxgb4_filter.h"
+#include "cxgb4_tc_flower.h"
+
+static int cxgb4_matchall_egress_validate(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct flow_action_entry *entry;
+ u64 max_link_rate;
+ u32 i, speed;
+ int ret;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload needs at least 1 policing action");
+ return -EINVAL;
+ } else if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload only supports 1 policing action");
+ return -EINVAL;
+ } else if (pi->tc_block_shared) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload not supported with shared blocks");
+ return -EINVAL;
+ }
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to get max speed supported by the link");
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to bps */
+ max_link_rate = (u64)speed * 1000 * 1000;
+
+ flow_action_for_each(i, entry, actions) {
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Convert bytes per second to bits per second */
+ if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified policing max rate is larger than underlying link speed");
+ return -ERANGE;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only policing action supported with Egress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int cxgb4_matchall_alloc_tc(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct ch_sched_params p = {
+ .type = SCHED_CLASS_TYPE_PACKET,
+ .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
+ .u.params.mode = SCHED_CLASS_MODE_CLASS,
+ .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
+ .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
+ .u.params.class = SCHED_CLS_NONE,
+ .u.params.minrate = 0,
+ .u.params.weight = 0,
+ .u.params.pktsize = dev->mtu,
+ };
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct flow_action_entry *entry;
+ struct sched_class *e;
+ u32 i;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ flow_action_for_each(i, entry, &cls->rule->action)
+ if (entry->id == FLOW_ACTION_POLICE)
+ break;
+
+ /* Convert from bytes per second to Kbps */
+ p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
+ p.u.params.channel = pi->tx_chan;
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free traffic class available for policing action");
+ return -ENOMEM;
+ }
+
+ tc_port_matchall->egress.hwtc = e->idx;
+ tc_port_matchall->egress.cookie = cls->cookie;
+ tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
+ return 0;
+}
+
+static void cxgb4_matchall_free_tc(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
+
+ tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
+ tc_port_matchall->egress.cookie = 0;
+ tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
+}
+
+static int cxgb4_matchall_alloc_filter(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_filter_specification *fs;
+ int ret, fidx;
+
+ /* Note that TC uses prio 0 to indicate stack to generate
+ * automatic prio and hence doesn't pass prio 0 to driver.
+ * However, the hardware TCAM index starts from 0. Hence, the
+ * -1 here. 1 slot is enough to create a wildcard matchall
+ * VIID rule.
+ */
+ if (cls->common.prio <= adap->tids.nftids)
+ fidx = cls->common.prio - 1;
+ else
+ fidx = cxgb4_get_free_ftid(dev, PF_INET);
+
+ /* Only insert MATCHALL rule if its priority doesn't conflict
+ * with existing rules in the LETCAM.
+ */
+ if (fidx < 0 ||
+ !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ return -ENOMEM;
+ }
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ fs = &tc_port_matchall->ingress.fs;
+ memset(fs, 0, sizeof(*fs));
+
+ fs->tc_prio = cls->common.prio;
+ fs->tc_cookie = cls->cookie;
+ fs->hitcnts = 1;
+
+ fs->val.pfvf_vld = 1;
+ fs->val.pf = adap->pf;
+ fs->val.vf = pi->vin;
+
+ cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
+
+ ret = cxgb4_set_filter(dev, fidx, fs);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.tid = fidx;
+ tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
+ return 0;
+}
+
+static int cxgb4_matchall_free_filter(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
+ &tc_port_matchall->ingress.fs);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.packets = 0;
+ tc_port_matchall->ingress.bytes = 0;
+ tc_port_matchall->ingress.last_used = 0;
+ tc_port_matchall->ingress.tid = 0;
+ tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
+ return 0;
+}
+
+int cxgb4_tc_matchall_replace(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = cls_matchall->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (ingress) {
+ if (tc_port_matchall->ingress.state ==
+ CXGB4_MATCHALL_STATE_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 Ingress MATCHALL can be offloaded");
+ return -ENOMEM;
+ }
+
+ ret = cxgb4_validate_flow_actions(dev,
+ &cls_matchall->rule->action);
+ if (ret)
+ return ret;
+
+ return cxgb4_matchall_alloc_filter(dev, cls_matchall);
+ }
+
+ if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 Egress MATCHALL can be offloaded");
+ return -ENOMEM;
+ }
+
+ ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
+ if (ret)
+ return ret;
+
+ return cxgb4_matchall_alloc_tc(dev, cls_matchall);
+}
+
+int cxgb4_tc_matchall_destroy(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (ingress) {
+ if (cls_matchall->cookie !=
+ tc_port_matchall->ingress.fs.tc_cookie)
+ return -ENOENT;
+
+ return cxgb4_matchall_free_filter(dev);
+ }
+
+ if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
+ return -ENOENT;
+
+ cxgb4_matchall_free_tc(dev);
+ return 0;
+}
+
+int cxgb4_tc_matchall_stats(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u64 packets, bytes;
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
+ return -ENOENT;
+
+ ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
+ &packets, &bytes,
+ tc_port_matchall->ingress.fs.hash);
+ if (ret)
+ return ret;
+
+ if (tc_port_matchall->ingress.packets != packets) {
+ flow_stats_update(&cls_matchall->stats,
+ bytes - tc_port_matchall->ingress.bytes,
+ packets - tc_port_matchall->ingress.packets,
+ tc_port_matchall->ingress.last_used);
+
+ tc_port_matchall->ingress.packets = packets;
+ tc_port_matchall->ingress.bytes = bytes;
+ tc_port_matchall->ingress.last_used = jiffies;
+ }
+
+ return 0;
+}
+
+static void cxgb4_matchall_disable_offload(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
+ cxgb4_matchall_free_tc(dev);
+
+ if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
+ cxgb4_matchall_free_filter(dev);
+}
+
+int cxgb4_init_tc_matchall(struct adapter *adap)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct cxgb4_tc_matchall *tc_matchall;
+ int ret;
+
+ tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
+ if (!tc_matchall)
+ return -ENOMEM;
+
+ tc_port_matchall = kcalloc(adap->params.nports,
+ sizeof(*tc_port_matchall),
+ GFP_KERNEL);
+ if (!tc_port_matchall) {
+ ret = -ENOMEM;
+ goto out_free_matchall;
+ }
+
+ tc_matchall->port_matchall = tc_port_matchall;
+ adap->tc_matchall = tc_matchall;
+ return 0;
+
+out_free_matchall:
+ kfree(tc_matchall);
+ return ret;
+}
+
+void cxgb4_cleanup_tc_matchall(struct adapter *adap)
+{
+ u8 i;
+
+ if (adap->tc_matchall) {
+ if (adap->tc_matchall->port_matchall) {
+ for (i = 0; i < adap->params.nports; i++) {
+ struct net_device *dev = adap->port[i];
+
+ if (dev)
+ cxgb4_matchall_disable_offload(dev);
+ }
+ kfree(adap->tc_matchall->port_matchall);
+ }
+ kfree(adap->tc_matchall);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
new file mode 100644
index 000000000000..ab6b5683dfd3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#ifndef __CXGB4_TC_MATCHALL_H__
+#define __CXGB4_TC_MATCHALL_H__
+
+#include <net/pkt_cls.h>
+
+enum cxgb4_matchall_state {
+ CXGB4_MATCHALL_STATE_DISABLED = 0,
+ CXGB4_MATCHALL_STATE_ENABLED,
+};
+
+struct cxgb4_matchall_egress_entry {
+ enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
+ u8 hwtc; /* Traffic class bound to port */
+ u64 cookie; /* Used to identify the MATCHALL rule offloaded */
+};
+
+struct cxgb4_matchall_ingress_entry {
+ enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
+ u32 tid; /* Index to hardware filter entry */
+ struct ch_filter_specification fs; /* Filter entry */
+ u64 bytes; /* # of bytes hitting the filter */
+ u64 packets; /* # of packets hitting the filter */
+ u64 last_used; /* Last updated jiffies time */
+};
+
+struct cxgb4_tc_port_matchall {
+ struct cxgb4_matchall_egress_entry egress; /* Egress offload info */
+ struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */
+};
+
+struct cxgb4_tc_matchall {
+ struct cxgb4_tc_port_matchall *port_matchall; /* Per port entry */
+};
+
+int cxgb4_tc_matchall_replace(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress);
+int cxgb4_tc_matchall_destroy(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress);
+int cxgb4_tc_matchall_stats(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall);
+
+int cxgb4_init_tc_matchall(struct adapter *adap);
+void cxgb4_cleanup_tc_matchall(struct adapter *adap);
+#endif /* __CXGB4_TC_MATCHALL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
new file mode 100644
index 000000000000..477973d2e341
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#include "cxgb4.h"
+#include "cxgb4_tc_mqprio.h"
+#include "sched.h"
+
+static int cxgb4_mqprio_validate(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u64 min_rate = 0, max_rate = 0, max_link_rate;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u32 speed, qcount = 0, qoffset = 0;
+ int ret;
+ u8 i;
+
+ if (!mqprio->qopt.num_tc)
+ return 0;
+
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
+ netdev_err(dev, "Only full TC hardware offload is supported\n");
+ return -EINVAL;
+ } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
+ netdev_err(dev, "Only channel mode offload is supported\n");
+ return -EINVAL;
+ } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ netdev_err(dev, "Only bandwidth rate shaper supported\n");
+ return -EINVAL;
+ } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
+ netdev_err(dev,
+ "Only %u traffic classes supported by hardware\n",
+ adap->params.nsched_cls);
+ return -ERANGE;
+ }
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (ret) {
+ netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to bps */
+ max_link_rate = (u64)speed * 1000 * 1000;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
+ qcount += mqprio->qopt.count[i];
+
+ /* Convert byte per second to bits per second */
+ min_rate += (mqprio->min_rate[i] * 8);
+ max_rate += (mqprio->max_rate[i] * 8);
+ }
+
+ if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
+ return -ENOMEM;
+
+ if (min_rate > max_link_rate || max_rate > max_link_rate) {
+ netdev_err(dev,
+ "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
+ min_rate, max_rate, max_link_rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxgb4_init_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u32 eotid, u32 hwqid)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tx_sw_desc *ring;
+
+ memset(eosw_txq, 0, sizeof(*eosw_txq));
+
+ ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
+ sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return -ENOMEM;
+
+ eosw_txq->desc = ring;
+ eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
+ spin_lock_init(&eosw_txq->lock);
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+ eosw_txq->eotid = eotid;
+ eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
+ eosw_txq->cred = adap->params.ofldq_wr_cred;
+ eosw_txq->hwqid = hwqid;
+ eosw_txq->netdev = dev;
+ tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
+ (unsigned long)eosw_txq);
+ return 0;
+}
+
+static void cxgb4_clean_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
+ eosw_txq->pidx = 0;
+ eosw_txq->last_pidx = 0;
+ eosw_txq->cidx = 0;
+ eosw_txq->last_cidx = 0;
+ eosw_txq->flowc_idx = 0;
+ eosw_txq->inuse = 0;
+ eosw_txq->cred = adap->params.ofldq_wr_cred;
+ eosw_txq->ncompl = 0;
+ eosw_txq->last_compl = 0;
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+}
+
+static void cxgb4_free_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ spin_lock_bh(&eosw_txq->lock);
+ cxgb4_clean_eosw_txq(dev, eosw_txq);
+ kfree(eosw_txq->desc);
+ spin_unlock_bh(&eosw_txq->lock);
+ tasklet_kill(&eosw_txq->qresume_tsk);
+}
+
+static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_ofld_rxq *eorxq;
+ struct sge_eohw_txq *eotxq;
+ int ret, msix = 0;
+ u32 i;
+
+ /* Allocate ETHOFLD hardware queue structures if not done already */
+ if (!refcount_read(&adap->tc_mqprio->refcnt)) {
+ adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
+ sizeof(struct sge_ofld_rxq),
+ GFP_KERNEL);
+ if (!adap->sge.eohw_rxq)
+ return -ENOMEM;
+
+ adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
+ sizeof(struct sge_eohw_txq),
+ GFP_KERNEL);
+ if (!adap->sge.eohw_txq) {
+ kfree(adap->sge.eohw_rxq);
+ return -ENOMEM;
+ }
+ }
+
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ msix = -((int)adap->sge.intrq.abs_id + 1);
+
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ /* Allocate Rxqs for receiving ETHOFLD Tx completions */
+ if (msix >= 0) {
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0) {
+ ret = msix;
+ goto out_free_queues;
+ }
+
+ eorxq->msix = &adap->msix_info[msix];
+ snprintf(eorxq->msix->desc,
+ sizeof(eorxq->msix->desc),
+ "%s-eorxq%d", dev->name, i);
+ }
+
+ init_rspq(adap, &eorxq->rspq,
+ CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
+ CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
+ CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
+ CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
+
+ eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
+
+ ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
+ dev, msix, &eorxq->fl,
+ cxgb4_ethofld_rx_handler,
+ NULL, 0);
+ if (ret)
+ goto out_free_queues;
+
+ /* Allocate ETHOFLD hardware Txqs */
+ eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
+ ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
+ eorxq->rspq.cntxt_id);
+ if (ret)
+ goto out_free_queues;
+
+ /* Allocate IRQs, set IRQ affinity, and start Rx */
+ if (adap->flags & CXGB4_USING_MSIX) {
+ ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
+ eorxq->msix->desc, &eorxq->rspq);
+ if (ret)
+ goto out_free_msix;
+
+ cxgb4_set_msix_aff(adap, eorxq->msix->vec,
+ &eorxq->msix->aff_mask, i);
+ }
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ cxgb4_enable_rx(adap, &eorxq->rspq);
+ }
+
+ refcount_inc(&adap->tc_mqprio->refcnt);
+ return 0;
+
+out_free_msix:
+ while (i-- > 0) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ cxgb4_quiesce_rx(&eorxq->rspq);
+
+ if (adap->flags & CXGB4_USING_MSIX) {
+ cxgb4_clear_msix_aff(eorxq->msix->vec,
+ eorxq->msix->aff_mask);
+ free_irq(eorxq->msix->vec, &eorxq->rspq);
+ }
+ }
+
+out_free_queues:
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ if (eorxq->rspq.desc)
+ free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
+ if (eorxq->msix)
+ cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
+ t4_sge_free_ethofld_txq(adap, eotxq);
+ }
+
+ kfree(adap->sge.eohw_txq);
+ kfree(adap->sge.eohw_rxq);
+
+ return ret;
+}
+
+static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_ofld_rxq *eorxq;
+ struct sge_eohw_txq *eotxq;
+ u32 i;
+
+ /* Return if no ETHOFLD structures have been allocated yet */
+ if (!refcount_read(&adap->tc_mqprio->refcnt))
+ return;
+
+ /* Return if no hardware queues have been allocated */
+ if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
+ return;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ /* Device removal path will already disable NAPI
+ * before unregistering netdevice. So, only disable
+ * NAPI if we're not in device removal path
+ */
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ cxgb4_quiesce_rx(&eorxq->rspq);
+
+ if (adap->flags & CXGB4_USING_MSIX) {
+ cxgb4_clear_msix_aff(eorxq->msix->vec,
+ eorxq->msix->aff_mask);
+ free_irq(eorxq->msix->vec, &eorxq->rspq);
+ }
+
+ free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
+ t4_sge_free_ethofld_txq(adap, eotxq);
+ }
+
+ /* Free up ETHOFLD structures if there are no users */
+ if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
+ kfree(adap->sge.eohw_txq);
+ kfree(adap->sge.eohw_rxq);
+ }
+}
+
+static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct ch_sched_params p = {
+ .type = SCHED_CLASS_TYPE_PACKET,
+ .u.params.level = SCHED_CLASS_LEVEL_CL_RL,
+ .u.params.mode = SCHED_CLASS_MODE_FLOW,
+ .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
+ .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
+ .u.params.class = SCHED_CLS_NONE,
+ .u.params.weight = 0,
+ .u.params.pktsize = dev->mtu,
+ };
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sched_class *e;
+ int ret;
+ u8 i;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ p.u.params.channel = pi->tx_chan;
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ /* Convert from bytes per second to Kbps */
+ p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
+ p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
+
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ tc_port_mqprio->tc_hwtc_map[i] = e->idx;
+ }
+
+ return 0;
+
+out_err:
+ while (i--)
+ cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
+
+ return ret;
+}
+
+static void cxgb4_mqprio_free_tc(struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u8 i;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
+ cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
+}
+
+static int cxgb4_mqprio_class_bind(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u8 tc)
+{
+ struct ch_sched_flowc fe;
+ int ret;
+
+ init_completion(&eosw_txq->completion);
+
+ fe.tid = eosw_txq->eotid;
+ fe.class = tc;
+
+ ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&eosw_txq->completion,
+ CXGB4_FLOWC_WAIT_TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void cxgb4_mqprio_class_unbind(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u8 tc)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_sched_flowc fe;
+
+ /* If we're shutting down, interrupts are disabled and no completions
+ * come back. So, skip waiting for completions in this scenario.
+ */
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ init_completion(&eosw_txq->completion);
+
+ fe.tid = eosw_txq->eotid;
+ fe.class = tc;
+ cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
+
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ wait_for_completion_timeout(&eosw_txq->completion,
+ CXGB4_FLOWC_WAIT_TIMEOUT);
+}
+
+static int cxgb4_mqprio_enable_offload(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ u32 qoffset, qcount, tot_qcount, qid, hwqid;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ int eotid, ret;
+ u16 i, j;
+ u8 hwtc;
+
+ ret = cxgb4_mqprio_alloc_hw_resources(dev);
+ if (ret)
+ return -ENOMEM;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qoffset = mqprio->qopt.offset[i];
+ qcount = mqprio->qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eotid = cxgb4_get_free_eotid(&adap->tids);
+ if (eotid < 0) {
+ ret = -ENOMEM;
+ goto out_free_eotids;
+ }
+
+ qid = qoffset + j;
+ hwqid = pi->first_qset + (eotid % pi->nqsets);
+ eosw_txq = &tc_port_mqprio->eosw_txq[qid];
+ ret = cxgb4_init_eosw_txq(dev, eosw_txq,
+ eotid, hwqid);
+ if (ret)
+ goto out_free_eotids;
+
+ cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
+ if (ret)
+ goto out_free_eotids;
+ }
+ }
+
+ memcpy(&tc_port_mqprio->mqprio, mqprio,
+ sizeof(struct tc_mqprio_qopt_offload));
+
+ /* Inform the stack about the configured tc params.
+ *
+ * Set the correct queue map. If no queue count has been
+ * specified, then send the traffic through default NIC
+ * queues; instead of ETHOFLD queues.
+ */
+ ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+ if (ret)
+ goto out_free_eotids;
+
+ tot_qcount = pi->nqsets;
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qcount = mqprio->qopt.count[i];
+ if (qcount) {
+ qoffset = mqprio->qopt.offset[i] + pi->nqsets;
+ } else {
+ qcount = pi->nqsets;
+ qoffset = 0;
+ }
+
+ ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
+ if (ret)
+ goto out_reset_tc;
+
+ tot_qcount += mqprio->qopt.count[i];
+ }
+
+ ret = netif_set_real_num_tx_queues(dev, tot_qcount);
+ if (ret)
+ goto out_reset_tc;
+
+ tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
+ return 0;
+
+out_reset_tc:
+ netdev_reset_tc(dev);
+ i = mqprio->qopt.num_tc;
+
+out_free_eotids:
+ while (i-- > 0) {
+ qoffset = mqprio->qopt.offset[i];
+ qcount = mqprio->qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
+
+ cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
+ cxgb4_free_eosw_txq(dev, eosw_txq);
+ }
+ }
+
+ cxgb4_mqprio_free_hw_resources(dev);
+ return ret;
+}
+
+static void cxgb4_mqprio_disable_offload(struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ u32 qoffset, qcount;
+ u16 i, j;
+ u8 hwtc;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
+ return;
+
+ netdev_reset_tc(dev);
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+
+ for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
+ qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
+ qcount = tc_port_mqprio->mqprio.qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
+
+ cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
+ cxgb4_free_eosw_txq(dev, eosw_txq);
+ }
+ }
+
+ cxgb4_mqprio_free_hw_resources(dev);
+
+ /* Free up the traffic classes */
+ cxgb4_mqprio_free_tc(dev);
+
+ memset(&tc_port_mqprio->mqprio, 0,
+ sizeof(struct tc_mqprio_qopt_offload));
+
+ tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
+}
+
+int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ bool needs_bring_up = false;
+ int ret;
+
+ ret = cxgb4_mqprio_validate(dev, mqprio);
+ if (ret)
+ return ret;
+
+ /* To configure tc params, the current allocated EOTIDs must
+ * be freed up. However, they can't be freed up if there's
+ * traffic running on the interface. So, ensure interface is
+ * down before configuring tc params.
+ */
+ if (netif_running(dev)) {
+ cxgb_close(dev);
+ needs_bring_up = true;
+ }
+
+ cxgb4_mqprio_disable_offload(dev);
+
+ /* If requested for clear, then just return since resources are
+ * already freed up by now.
+ */
+ if (!mqprio->qopt.num_tc)
+ goto out;
+
+ /* Allocate free available traffic classes and configure
+ * their rate parameters.
+ */
+ ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
+ if (ret)
+ goto out;
+
+ ret = cxgb4_mqprio_enable_offload(dev, mqprio);
+ if (ret) {
+ cxgb4_mqprio_free_tc(dev);
+ goto out;
+ }
+
+out:
+ if (needs_bring_up)
+ cxgb_open(dev);
+
+ return ret;
+}
+
+int cxgb4_init_tc_mqprio(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
+ struct cxgb4_tc_mqprio *tc_mqprio;
+ struct sge_eosw_txq *eosw_txq;
+ int ret = 0;
+ u8 i;
+
+ tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
+ if (!tc_mqprio)
+ return -ENOMEM;
+
+ tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
+ GFP_KERNEL);
+ if (!tc_port_mqprio) {
+ ret = -ENOMEM;
+ goto out_free_mqprio;
+ }
+
+ tc_mqprio->port_mqprio = tc_port_mqprio;
+ for (i = 0; i < adap->params.nports; i++) {
+ port_mqprio = &tc_mqprio->port_mqprio[i];
+ eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
+ GFP_KERNEL);
+ if (!eosw_txq) {
+ ret = -ENOMEM;
+ goto out_free_ports;
+ }
+ port_mqprio->eosw_txq = eosw_txq;
+ }
+
+ adap->tc_mqprio = tc_mqprio;
+ refcount_set(&adap->tc_mqprio->refcnt, 0);
+ return 0;
+
+out_free_ports:
+ for (i = 0; i < adap->params.nports; i++) {
+ port_mqprio = &tc_mqprio->port_mqprio[i];
+ kfree(port_mqprio->eosw_txq);
+ }
+ kfree(tc_port_mqprio);
+
+out_free_mqprio:
+ kfree(tc_mqprio);
+ return ret;
+}
+
+void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *port_mqprio;
+ u8 i;
+
+ if (adap->tc_mqprio) {
+ if (adap->tc_mqprio->port_mqprio) {
+ for (i = 0; i < adap->params.nports; i++) {
+ struct net_device *dev = adap->port[i];
+
+ if (dev)
+ cxgb4_mqprio_disable_offload(dev);
+ port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ kfree(port_mqprio->eosw_txq);
+ }
+ kfree(adap->tc_mqprio->port_mqprio);
+ }
+ kfree(adap->tc_mqprio);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
new file mode 100644
index 000000000000..c532f1ef8451
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#ifndef __CXGB4_TC_MQPRIO_H__
+#define __CXGB4_TC_MQPRIO_H__
+
+#include <net/pkt_cls.h>
+
+#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
+
+#define CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM 1024
+
+#define CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM 1024
+#define CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE 64
+#define CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC 5
+#define CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT 8
+
+#define CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM 72
+
+#define CXGB4_FLOWC_WAIT_TIMEOUT (5 * HZ)
+
+enum cxgb4_mqprio_state {
+ CXGB4_MQPRIO_STATE_DISABLED = 0,
+ CXGB4_MQPRIO_STATE_ACTIVE,
+};
+
+struct cxgb4_tc_port_mqprio {
+ enum cxgb4_mqprio_state state; /* Current MQPRIO offload state */
+ struct tc_mqprio_qopt_offload mqprio; /* MQPRIO offload params */
+ struct sge_eosw_txq *eosw_txq; /* Netdev SW Tx queue array */
+ u8 tc_hwtc_map[TC_QOPT_MAX_QUEUE]; /* MQPRIO tc to hardware tc map */
+};
+
+struct cxgb4_tc_mqprio {
+ refcount_t refcnt; /* Refcount for adapter-wide resources */
+ struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
+};
+
+int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio);
+int cxgb4_init_tc_mqprio(struct adapter *adap);
+void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
+#endif /* __CXGB4_TC_MQPRIO_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 02fc63fa7f25..133f8623ba86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -36,6 +36,7 @@
#include <net/tc_act/tc_mirred.h>
#include "cxgb4.h"
+#include "cxgb4_filter.h"
#include "cxgb4_tc_u32_parse.h"
#include "cxgb4_tc_u32.h"
@@ -148,6 +149,7 @@ static int fill_action_fields(struct adapter *adap,
int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
{
const struct cxgb4_match_field *start, *link_start = NULL;
+ struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adapter = netdev2adap(dev);
__be16 protocol = cls->common.protocol;
struct ch_filter_specification fs;
@@ -164,14 +166,21 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
return -EOPNOTSUPP;
- /* Fetch the location to insert the filter. */
- filter_id = cls->knode.handle & 0xFFFFF;
+ /* Note that TC uses prio 0 to indicate stack to generate
+ * automatic prio and hence doesn't pass prio 0 to driver.
+ * However, the hardware TCAM index starts from 0. Hence, the
+ * -1 here.
+ */
+ filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id > adapter->tids.nftids) {
- dev_err(adapter->pdev_dev,
- "Location %d out of range for insertion. Max: %d\n",
- filter_id, adapter->tids.nftids);
- return -ERANGE;
+ /* Only insert U32 rule if its priority doesn't conflict with
+ * existing rules in the LETCAM.
+ */
+ if (filter_id >= adapter->tids.nftids ||
+ !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ return -ENOMEM;
}
t = adapter->tc_u32;
@@ -190,6 +199,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
memset(&fs, 0, sizeof(fs));
+ fs.tc_prio = cls->common.prio;
+ fs.tc_cookie = cls->knode.handle;
+
if (protocol == htons(ETH_P_IPV6)) {
start = cxgb4_ipv6_fields;
is_ipv6 = true;
@@ -350,14 +362,10 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
- filter_id = cls->knode.handle & 0xFFFFF;
-
- if (filter_id > adapter->tids.nftids) {
- dev_err(adapter->pdev_dev,
- "Location %d out of range for deletion. Max: %d\n",
- filter_id, adapter->tids.nftids);
+ filter_id = TC_U32_NODE(cls->knode.handle) - 1;
+ if (filter_id >= adapter->tids.nftids ||
+ cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie)
return -ERANGE;
- }
t = adapter->tc_u32;
handle = cls->knode.handle;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 86b528d8364c..cce33d279094 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -53,35 +53,6 @@
#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
-static int get_msix_idx_from_bmap(struct adapter *adap)
-{
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
- unsigned long flags;
- unsigned int msix_idx;
-
- spin_lock_irqsave(&bmap->lock, flags);
- msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
- if (msix_idx < bmap->mapsize) {
- __set_bit(msix_idx, bmap->msix_bmap);
- } else {
- spin_unlock_irqrestore(&bmap->lock, flags);
- return -ENOSPC;
- }
-
- spin_unlock_irqrestore(&bmap->lock, flags);
- return msix_idx;
-}
-
-static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
-{
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
- unsigned long flags;
-
- spin_lock_irqsave(&bmap->lock, flags);
- __clear_bit(msix_idx, bmap->msix_bmap);
- spin_unlock_irqrestore(&bmap->lock, flags);
-}
-
/* Flush the aggregated lro sessions */
static void uldrx_flush_handler(struct sge_rspq *q)
{
@@ -138,9 +109,9 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro)
{
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
- int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id;
+ int i, err, msi_idx, que_idx = 0;
struct sge *s = &adap->sge;
unsigned int per_chan;
@@ -159,12 +130,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
}
if (msi_idx >= 0) {
- bmap_idx = get_msix_idx_from_bmap(adap);
- if (bmap_idx < 0) {
+ msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msi_idx < 0) {
err = -ENOSPC;
goto freeout;
}
- msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+
+ snprintf(adap->msix_info[msi_idx].desc,
+ sizeof(adap->msix_info[msi_idx].desc),
+ "%s-%s%d",
+ adap->port[0]->name, rxq_info->name, i);
+
+ q->msix = &adap->msix_info[msi_idx];
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[que_idx++ / per_chan],
@@ -175,8 +152,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
0);
if (err)
goto freeout;
- if (msi_idx >= 0)
- rxq_info->msix_tbl[i] = bmap_idx;
+
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
@@ -188,6 +164,8 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
+ if (q->msix)
+ cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
}
return err;
}
@@ -198,14 +176,6 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
- if (adap->flags & CXGB4_USING_MSIX) {
- rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
- sizeof(unsigned short),
- GFP_KERNEL);
- if (!rxq_info->msix_tbl)
- return -ENOMEM;
- }
-
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
@@ -261,8 +231,6 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
- if (adap->flags & CXGB4_USING_MSIX)
- kfree(rxq_info->msix_tbl);
}
static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
@@ -355,13 +323,12 @@ static int
request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- struct uld_msix_info *minfo;
+ struct msix_info *minfo;
+ unsigned int idx;
int err = 0;
- unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
+ minfo = rxq_info->uldrxq[idx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
@@ -376,10 +343,9 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
unwind:
while (idx-- > 0) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
+ minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
- free_msix_idx_in_bmap(adap, bmap_idx);
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
return err;
@@ -389,69 +355,45 @@ static void
free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- struct uld_msix_info *minfo;
- unsigned int idx, bmap_idx;
+ struct msix_info *minfo;
+ unsigned int idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
-
+ minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
- free_msix_idx_in_bmap(adap, bmap_idx);
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
}
-static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
+static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int n = sizeof(adap->msix_info_ulds[0].desc);
- unsigned int idx, bmap_idx;
+ int idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
-
- snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
- adap->port[0]->name, rxq_info->name, idx);
- }
-}
-
-static void enable_rx(struct adapter *adap, struct sge_rspq *q)
-{
- if (!q)
- return;
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
- if (q->handler)
- napi_enable(&q->napi);
-
- /* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
- SEINTARM_V(q->intr_params) |
- INGRESSQID_V(q->cntxt_id));
-}
+ if (!q)
+ continue;
-static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
-{
- if (q && q->handler)
- napi_disable(&q->napi);
+ cxgb4_enable_rx(adap, q);
+ }
}
-static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
+static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
- for_each_uldrxq(rxq_info, idx)
- enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
-}
+ for_each_uldrxq(rxq_info, idx) {
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
-{
- struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx;
+ if (!q)
+ continue;
- for_each_uldrxq(rxq_info, idx)
- quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
+ cxgb4_quiesce_rx(q);
+ }
}
static void
@@ -750,7 +692,6 @@ void cxgb4_register_uld(enum cxgb4_uld type,
if (ret)
goto free_queues;
if (adap->flags & CXGB4_USING_MSIX) {
- name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index cee582e36134..861b25d28ed6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -89,6 +89,10 @@ union aopen_entry {
union aopen_entry *next;
};
+struct eotid_entry {
+ void *data;
+};
+
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables. The tables themselves are allocated dynamically.
@@ -126,6 +130,12 @@ struct tid_info {
unsigned int v6_stids_in_use;
unsigned int sftids_in_use;
+ /* ETHOFLD range */
+ struct eotid_entry *eotid_tab;
+ unsigned long *eotid_bmap;
+ unsigned int eotid_base;
+ unsigned int neotids;
+
/* TIDs in the TCAM */
atomic_t tids_in_use;
/* TIDs in the HASH */
@@ -176,6 +186,35 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
atomic_inc(&t->conns_in_use);
}
+static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t,
+ u32 eotid)
+{
+ return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL;
+}
+
+static inline int cxgb4_get_free_eotid(struct tid_info *t)
+{
+ int eotid;
+
+ eotid = find_first_zero_bit(t->eotid_bmap, t->neotids);
+ if (eotid >= t->neotids)
+ eotid = -1;
+
+ return eotid;
+}
+
+static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
+{
+ set_bit(eotid, t->eotid_bmap);
+ t->eotid_tab[eotid].data = data;
+}
+
+static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
+{
+ clear_bit(eotid, t->eotid_bmap);
+ t->eotid_tab[eotid].data = NULL;
+}
+
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1a407d3c1d67..e9e45006632d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -351,15 +351,13 @@ exists:
static void _t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
- struct sk_buff *skb;
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
}
- while ((skb = __skb_dequeue(&e->arpq)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&e->arpq);
}
d = container_of(e, struct l2t_data, l2tab[e->idx]);
@@ -370,7 +368,6 @@ static void _t4_l2e_free(struct l2t_entry *e)
static void t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
- struct sk_buff *skb;
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
@@ -378,8 +375,7 @@ static void t4_l2e_free(struct l2t_entry *e)
neigh_release(e->neigh);
e->neigh = NULL;
}
- while ((skb = __skb_dequeue(&e->arpq)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&e->arpq);
}
spin_unlock_bh(&e->lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 60218dc676a8..3e61bd5d0c29 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -50,6 +50,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
e = &s->tab[p->u.params.class];
switch (op) {
case SCHED_FW_OP_ADD:
+ case SCHED_FW_OP_DEL:
err = t4_sched_params(adap, p->type,
p->u.params.level, p->u.params.mode,
p->u.params.rateunit,
@@ -92,45 +93,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
pf = adap->pf;
vf = 0;
+
+ err = t4_set_params(adap, adap->mbox, pf, vf, 1,
+ &fw_param, &fw_class);
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ fe = (struct sched_flowc_entry *)arg;
+
+ fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
+ err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
+ fe->param.tid, fw_class);
break;
}
default:
err = -ENOTSUPP;
- goto out;
+ break;
}
- err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
-
-out:
return err;
}
-static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
- const unsigned int qid,
- int *index)
+static void *t4_sched_entry_lookup(struct port_info *pi,
+ enum sched_bind_type type,
+ const u32 val)
{
struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end;
- struct sched_class *found = NULL;
- int i;
+ void *found = NULL;
- /* Look for a class with matching bound queue parameters */
+ /* Look for an entry with matching @val */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
- struct sched_queue_entry *qe;
-
- i = 0;
- if (e->state == SCHED_STATE_UNUSED)
+ if (e->state == SCHED_STATE_UNUSED ||
+ e->bind_type != type)
continue;
- list_for_each_entry(qe, &e->queue_list, list) {
- if (qe->cntxt_id == qid) {
- found = e;
- if (index)
- *index = i;
- break;
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ list_for_each_entry(qe, &e->entry_list, list) {
+ if (qe->cntxt_id == val) {
+ found = qe;
+ break;
+ }
+ }
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list) {
+ if (fe->param.tid == val) {
+ found = fe;
+ break;
+ }
}
- i++;
+ break;
+ }
+ default:
+ return NULL;
}
if (found)
@@ -142,52 +167,41 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- unsigned int qid;
- int index = -1;
+ struct sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE;
txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
- qid = txq->q.cntxt_id;
- /* Find the existing class that the queue is bound to */
- e = t4_sched_queue_lookup(pi, qid, &index);
- if (e && index >= 0) {
- int i = 0;
-
- list_for_each_entry(qe, &e->queue_list, list) {
- if (i == index)
- break;
- i++;
- }
+ /* Find the existing entry that the queue is bound to */
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ if (qe) {
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
if (err)
return err;
+ e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list);
kvfree(qe);
- if (atomic_dec_and_test(&e->refcnt)) {
- e->state = SCHED_STATE_UNUSED;
- memset(&e->info, 0, sizeof(e->info));
- }
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
}
return err;
}
static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
+ struct sched_class *e;
unsigned int qid;
int err = 0;
@@ -215,7 +229,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
if (err)
goto out_err;
- list_add_tail(&qe->list, &e->queue_list);
+ list_add_tail(&qe->list, &e->entry_list);
+ e->bind_type = SCHED_QUEUE;
atomic_inc(&e->refcnt);
return err;
@@ -224,6 +239,71 @@ out_err:
return err;
}
+static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ /* Find the existing entry that the flowc is bound to */
+ fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
+ if (fe) {
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
+ false);
+ if (err)
+ return err;
+
+ e = &pi->sched_tbl->tab[fe->param.class];
+ list_del(&fe->list);
+ kvfree(fe);
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
+ }
+ return err;
+}
+
+static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
+ if (!fe)
+ return -ENOMEM;
+
+ /* Unbind flowc from any existing class */
+ err = t4_sched_flowc_unbind(pi, p);
+ if (err)
+ goto out_err;
+
+ /* Bind flowc to specified class */
+ memcpy(&fe->param, p, sizeof(fe->param));
+
+ e = &s->tab[fe->param.class];
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
+ if (err)
+ goto out_err;
+
+ list_add_tail(&fe->list, &e->entry_list);
+ e->bind_type = SCHED_FLOWC;
+ atomic_inc(&e->refcnt);
+ return err;
+
+out_err:
+ kvfree(fe);
+ return err;
+}
+
static void t4_sched_class_unbind_all(struct port_info *pi,
struct sched_class *e,
enum sched_bind_type type)
@@ -235,10 +315,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi,
case SCHED_QUEUE: {
struct sched_queue_entry *qe;
- list_for_each_entry(qe, &e->queue_list, list)
+ list_for_each_entry(qe, &e->entry_list, list)
t4_sched_queue_unbind(pi, &qe->param);
break;
}
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list)
+ t4_sched_flowc_unbind(pi, &fe->param);
+ break;
+ }
default:
break;
}
@@ -262,6 +349,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
err = t4_sched_queue_unbind(pi, qe);
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ if (bind)
+ err = t4_sched_flowc_bind(pi, fe);
+ else
+ err = t4_sched_flowc_unbind(pi, fe);
+ break;
+ }
default:
err = -ENOTSUPP;
break;
@@ -299,6 +395,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -340,6 +442,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -355,8 +463,8 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
struct sched_class *found = NULL;
+ struct sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -400,7 +508,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
- struct sched_class *e;
+ struct sched_class *e = NULL;
u8 class_id;
int err;
@@ -415,10 +523,13 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
- /* See if there's an exisiting class with same
- * requested sched params
+ /* See if there's an exisiting class with same requested sched
+ * params. Classes can only be shared among FLOWC types. For
+ * other types, always request a new class.
*/
- e = t4_sched_class_lookup(pi, p);
+ if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
+ e = t4_sched_class_lookup(pi, p);
+
if (!e) {
struct ch_sched_params np;
@@ -467,9 +578,57 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
return t4_sched_class_alloc(pi, p);
}
-static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
+/**
+ * cxgb4_sched_class_free - free a scheduling class
+ * @dev: net_device pointer
+ * @e: scheduling class
+ *
+ * Frees a scheduling class if there are no users.
+ */
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
{
- t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s = pi->sched_tbl;
+ struct ch_sched_params p;
+ struct sched_class *e;
+ u32 speed;
+ int ret;
+
+ e = &s->tab[classid];
+ if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
+ /* Port based rate limiting needs explicit reset back
+ * to max rate. But, we'll do explicit reset for all
+ * types, instead of just port based type, to be on
+ * the safer side.
+ */
+ memcpy(&p, &e->info, sizeof(p));
+ /* Always reset mode to 0. Otherwise, FLOWC mode will
+ * still be enabled even after resetting the traffic
+ * class.
+ */
+ p.u.params.mode = 0;
+ p.u.params.minrate = 0;
+ p.u.params.pktsize = 0;
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (!ret)
+ p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
+ else
+ p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
+
+ t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
+
+ e->state = SCHED_STATE_UNUSED;
+ memset(&e->info, 0, sizeof(e->info));
+ }
+}
+
+static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ t4_sched_class_unbind_all(pi, e, e->bind_type);
+ cxgb4_sched_class_free(dev, e->idx);
}
struct sched_table *t4_init_sched(unsigned int sched_size)
@@ -487,7 +646,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
- INIT_LIST_HEAD(&s->tab[i].queue_list);
+ INIT_LIST_HEAD(&s->tab[i].entry_list);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
@@ -510,7 +669,7 @@ void t4_cleanup_sched(struct adapter *adap)
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
- t4_sched_class_free(pi, e);
+ t4_sched_class_free(adap->port[j], e);
}
kvfree(s);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 168fb4ce3759..e92ff68bdd0a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -52,10 +52,12 @@ enum {
enum sched_fw_ops {
SCHED_FW_OP_ADD,
+ SCHED_FW_OP_DEL,
};
enum sched_bind_type {
SCHED_QUEUE,
+ SCHED_FLOWC,
};
struct sched_queue_entry {
@@ -64,11 +66,17 @@ struct sched_queue_entry {
struct ch_sched_queue param;
};
+struct sched_flowc_entry {
+ struct list_head list;
+ struct ch_sched_flowc param;
+};
+
struct sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
- struct list_head queue_list;
+ enum sched_bind_type bind_type;
+ struct list_head entry_list;
atomic_t refcnt;
};
@@ -102,6 +110,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
struct ch_sched_params *p);
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
void t4_cleanup_sched(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 928bfea5457b..97cda501e7e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -55,6 +55,8 @@
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
#include "cxgb4_uld.h"
+#include "cxgb4_tc_mqprio.h"
+#include "sched.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -269,7 +271,6 @@ out_err:
}
EXPORT_SYMBOL(cxgb4_map_skb);
-#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
const dma_addr_t *addr)
{
@@ -284,6 +285,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
+#ifdef CONFIG_NEED_DMA_MAP_STATE
/**
* deferred_unmap_destructor - unmap a packet when it is freed
* @skb: the packet
@@ -298,65 +300,6 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
}
#endif
-static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
- const struct ulptx_sgl *sgl, const struct sge_txq *q)
-{
- const struct ulptx_sge_pair *p;
- unsigned int nfrags = skb_shinfo(skb)->nr_frags;
-
- if (likely(skb_headlen(skb)))
- dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- else {
- dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- nfrags--;
- }
-
- /*
- * the complexity below is because of the possibility of a wrap-around
- * in the middle of an SGL
- */
- for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
- if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
-unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p++;
- } else if ((u8 *)p == (u8 *)q->stat) {
- p = (const struct ulptx_sge_pair *)q->desc;
- goto unmap;
- } else if ((u8 *)p + 8 == (u8 *)q->stat) {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[2];
- } else {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[1];
- }
- }
- if (nfrags) {
- __be64 addr;
-
- if ((u8 *)p == (u8 *)q->stat)
- p = (const struct ulptx_sge_pair *)q->desc;
- addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
- *(const __be64 *)q->desc;
- dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
- DMA_TO_DEVICE);
- }
-}
-
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
* @adapter: the adapter
@@ -370,15 +313,16 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap)
{
- struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
- struct device *dev = adap->pdev_dev;
+ struct tx_sw_desc *d;
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
- if (unmap)
- unmap_sgl(dev, d->skb, d->sgl, q);
+ if (unmap && d->addr[0]) {
+ unmap_skb(adap->pdev_dev, d->skb, d->addr);
+ memset(d->addr, 0, sizeof(d->addr));
+ }
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
@@ -790,6 +734,8 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
chip_ver > CHELSIO_T5) {
hdrlen = sizeof(struct cpl_tx_tnl_lso);
hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ return 0;
} else {
hdrlen = skb_shinfo(skb)->gso_size ?
sizeof(struct cpl_tx_pkt_lso_core) : 0;
@@ -831,12 +777,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size) {
- if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ if (skb->encapsulation && chip_ver > CHELSIO_T5) {
hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_tnl_lso);
- else
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ u32 pkt_hdrlen;
+
+ pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
+ skb_headlen(skb));
+ hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
+ round_up(pkt_hdrlen, 16);
+ } else {
hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_lso_core);
+ }
hdrlen += sizeof(struct cpl_tx_pkt_core);
flits += (hdrlen / sizeof(__be64));
@@ -1309,6 +1263,35 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
}
+static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
+ struct cpl_tx_pkt_lso_core *lso)
+{
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ int l3hdr_len = skb_network_header_len(skb);
+ const struct skb_shared_info *ssi;
+ bool ipv6 = false;
+
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_type & SKB_GSO_TCPV6)
+ ipv6 = true;
+
+ lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(ipv6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(ssi->gso_size);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(skb->len);
+ else
+ lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
+
+ return (void *)(lso + 1);
+}
+
/**
* t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
* @adap: the adapter
@@ -1347,6 +1330,50 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
return reclaimed;
}
+static inline int cxgb4_validate_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 min_pkt_len)
+{
+ u32 max_pkt_len;
+
+ /* The chip min packet length is 10 octets but some firmware
+ * commands have a minimum packet length requirement. So, play
+ * safe and reject anything shorter than @min_pkt_len.
+ */
+ if (unlikely(skb->len < min_pkt_len))
+ return -EINVAL;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+
+ if (skb_vlan_tagged(skb))
+ max_pkt_len += VLAN_HLEN;
+
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len)
+{
+ wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
+ wr->u.udpseg.ethlen = skb_network_offset(skb);
+ wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
+ wr->u.udpseg.udplen = sizeof(struct udphdr);
+ wr->u.udpseg.rtplen = 0;
+ wr->u.udpseg.r4 = 0;
+ if (skb_shinfo(skb)->gso_size)
+ wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ else
+ wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
+ wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
+ wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
+
+ return (void *)(wr + 1);
+}
+
/**
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1356,41 +1383,25 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
*/
static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid, ctrl0, op;
- u64 cntrl, *end, *sgl;
- int qidx, credits;
- unsigned int flits, ndesc;
- struct adapter *adap;
- struct sge_eth_txq *q;
- const struct port_info *pi;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int last_desc, flits, ndesc;
+ u32 wr_mid, ctrl0, op, sgl_off = 0;
+ const struct skb_shared_info *ssi;
+ int len, qidx, credits, ret, left;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_eo_wr *eowr;
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
- const struct skb_shared_info *ssi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct port_info *pi;
bool immediate = false;
- int len, max_pkt_len;
- bool ptp_enabled = is_ptp_enabled(skb, dev);
+ u64 cntrl, *end, *sgl;
+ struct sge_eth_txq *q;
unsigned int chip_ver;
- enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
-
-#ifdef CONFIG_CHELSIO_T4_FCOE
- int err;
-#endif /* CONFIG_CHELSIO_T4_FCOE */
-
- /*
- * The chip min packet length is 10 octets but play safe and reject
- * anything shorter than an Ethernet header.
- */
- if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ struct adapter *adap;
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tagged(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
goto out_free;
pi = netdev_priv(dev);
@@ -1421,8 +1432,8 @@ out_free: dev_kfree_skb_any(skb);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
- err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(err == -ENOTSUPP)) {
+ ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
+ if (unlikely(ret == -ENOTSUPP)) {
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
goto out_free;
@@ -1450,8 +1461,14 @@ out_free: dev_kfree_skb_any(skb);
if (skb->encapsulation && chip_ver > CHELSIO_T5)
tnl_type = cxgb_encap_offload_supported(skb);
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
if (!immediate &&
- unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1482,17 +1499,18 @@ out_free: dev_kfree_skb_any(skb);
}
wr = (void *)&q->q.desc[q->q.pidx];
+ eowr = (void *)&q->q.desc[q->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
wr->r3 = cpu_to_be64(0);
- end = (u64 *)wr + flits;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ end = (u64 *)eowr + flits;
+ else
+ end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
len += sizeof(*cpl);
- if (ssi->gso_size) {
+ if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
- bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
- int l3hdr_len = skb_network_header_len(skb);
- int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
if (tnl_type)
@@ -1519,46 +1537,33 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
} else {
- lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->ipid_ofst = htons(0);
- lso->mss = htons(ssi->gso_size);
- lso->seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->len = htonl(skb->len);
- else
- lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
-
- if (CHELSIO_CHIP_VERSION(adap->params.chip)
- <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
-
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ cpl = write_tso_wr(adap, skb, lso);
+ cntrl = hwcsum(adap->params.chip, skb);
}
sgl = (u64 *)(cpl + 1); /* sgl start here */
- if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
- /* If current position is already at the end of the
- * txq, reset the current to point to start of the queue
- * and update the end ptr as well.
- */
- if (sgl == (u64 *)q->q.stat) {
- int left = (u8 *)end - (u8 *)q->q.stat;
-
- end = (void *)q->q.desc + left;
- sgl = (void *)q->q.desc;
- }
- }
q->tso++;
q->tx_cso += ssi->gso_segs;
+ } else if (ssi->gso_size) {
+ u64 *start;
+ u32 hdrlen;
+
+ hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
+ len += hdrlen;
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
+ FW_ETH_TX_EO_WR_IMMDLEN_V(len));
+ cpl = write_eo_udp_wr(skb, eowr, hdrlen);
+ cntrl = hwcsum(adap->params.chip, skb);
+
+ start = (u64 *)(cpl + 1);
+ sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
+ hdrlen);
+ if (unlikely(start > sgl)) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ sgl_off = hdrlen;
+ q->uso++;
+ q->tx_cso += ssi->gso_segs;
} else {
if (ptp_enabled)
op = FW_PTP_TX_PKT_WR;
@@ -1575,6 +1580,16 @@ out_free: dev_kfree_skb_any(skb);
}
}
+ if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ sgl = (void *)q->q.desc;
+ }
+
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
@@ -1604,16 +1619,10 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
- int last_desc;
-
- cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
+ sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);
@@ -1622,6 +1631,10 @@ out_free: dev_kfree_skb_any(skb);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
+
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Constants ... */
@@ -1707,35 +1720,28 @@ static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ unsigned int last_desc, flits, ndesc;
const struct skb_shared_info *ssi;
struct fw_eth_tx_pkt_vm_wr *wr;
- int qidx, credits, max_pkt_len;
+ struct tx_sw_desc *sgl_sdesc;
struct cpl_tx_pkt_core *cpl;
const struct port_info *pi;
- unsigned int flits, ndesc;
struct sge_eth_txq *txq;
struct adapter *adapter;
+ int qidx, credits, ret;
+ size_t fw_hdr_copy_len;
u64 cntrl, *end;
u32 wr_mid;
- const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci);
/* The chip minimum packet length is 10 octets but the firmware
* command that we are using requires that we copy the Ethernet header
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- if (unlikely(skb->len < fw_hdr_copy_len))
- goto out_free;
-
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
+ if (ret)
goto out_free;
/* Figure out which TX Queue we're going to use. */
@@ -1771,12 +1777,19 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ last_desc = txq->q.pidx + ndesc - 1;
+ if (last_desc >= txq->q.size)
+ last_desc -= txq->q.size;
+ sgl_sdesc = &txq->q.sdesc[last_desc];
+
if (!t4vf_is_eth_imm(skb) &&
- unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
/* We need to map the skb into PCI DMA space (because it can't
* be in-lined directly into the Work Request) and the mapping
* operation failed. Record the error and drop the packet.
*/
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
txq->mapping_err++;
goto out_free;
}
@@ -1951,7 +1964,6 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/
struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
struct sge_txq *tq = &txq->q;
- int last_desc;
/* If the Work Request header was an exact multiple of our TX
* Descriptor length, then it's possible that the starting SGL
@@ -1965,14 +1977,9 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
((void *)end - (void *)tq->stat));
}
- cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+ cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = tq->pidx + ndesc - 1;
- if (last_desc >= tq->size)
- last_desc -= tq->size;
- tq->sdesc[last_desc].skb = skb;
- tq->sdesc[last_desc].sgl = sgl;
+ sgl_sdesc->skb = skb;
}
/* Advance our internal TX Queue state, tell the hardware about
@@ -1991,34 +1998,473 @@ out_free:
return NETDEV_TX_OK;
}
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
+{
+ u32 val = *idx + n;
+
+ if (val >= max)
+ val -= max;
+
+ *idx = val;
+}
+
+void cxgb4_eosw_txq_free_desc(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq, u32 ndesc)
+{
+ struct tx_sw_desc *d;
+
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ while (ndesc--) {
+ if (d->skb) {
+ if (d->addr[0]) {
+ unmap_skb(adap->pdev_dev, d->skb, d->addr);
+ memset(d->addr, 0, sizeof(d->addr));
+ }
+ dev_consume_skb_any(d->skb);
+ d->skb = NULL;
+ }
+ eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
+ eosw_txq->ndesc);
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ }
+}
+
+static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
+{
+ eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
+ eosw_txq->inuse += n;
+}
+
+static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb)
+{
+ if (eosw_txq->inuse == eosw_txq->ndesc)
+ return -ENOMEM;
+
+ eosw_txq->desc[eosw_txq->pidx].skb = skb;
+ return 0;
+}
+
+static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
+{
+ return eosw_txq->desc[eosw_txq->last_pidx].skb;
+}
+
+static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
+ struct sk_buff *skb, u32 hdr_len)
+{
+ u8 flits, nsgl = 0;
+ u32 wrlen;
+
+ wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+ wrlen += sizeof(struct cpl_tx_pkt_lso_core);
+
+ wrlen += roundup(hdr_len, 16);
+
+ /* Packet headers + WR + CPLs */
+ flits = DIV_ROUND_UP(wrlen, 8);
+
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ if (skb_headlen(skb) - hdr_len)
+ nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ else
+ nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
+ } else if (skb->len - hdr_len) {
+ nsgl = sgl_len(1);
+ }
+
+ return flits + nsgl;
+}
+
+static inline void *write_eo_wr(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len, u32 wrlen)
+{
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ struct cpl_tx_pkt_core *cpl;
+ u32 immd_len, wrlen16;
+ bool compl = false;
+ u8 ver, proto;
+
+ ver = ip_hdr(skb)->version;
+ proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
+
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+ immd_len = sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+ immd_len += sizeof(struct cpl_tx_pkt_lso_core);
+ immd_len += hdr_len;
+
+ if (!eosw_txq->ncompl ||
+ eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
+ compl = true;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+ }
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
+ FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
+ FW_WR_COMPL_V(compl));
+ wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ wr->r3 = 0;
+ if (proto == IPPROTO_UDP) {
+ cpl = write_eo_udp_wr(skb, wr, hdr_len);
+ } else {
+ wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
+ wr->u.tcpseg.ethlen = skb_network_offset(skb);
+ wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
+ wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
+ wr->u.tcpseg.tsclk_tsoff = 0;
+ wr->u.tcpseg.r4 = 0;
+ wr->u.tcpseg.r5 = 0;
+ wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
+
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+
+ wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
+ cpl = write_tso_wr(adap, skb, lso);
+ } else {
+ wr->u.tcpseg.mss = cpu_to_be16(0xffff);
+ cpl = (void *)(wr + 1);
+ }
+ }
+
+ eosw_txq->cred -= wrlen16;
+ eosw_txq->last_compl += wrlen16;
+ return cpl;
+}
+
+static void ethofld_hard_xmit(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u32 wrlen, wrlen16, hdr_len, data_len;
+ enum sge_eosw_state next_state;
+ u64 cntrl, *start, *end, *sgl;
+ struct sge_eohw_txq *eohw_txq;
+ struct cpl_tx_pkt_core *cpl;
+ struct fw_eth_tx_eo_wr *wr;
+ bool skip_eotx_wr = false;
+ struct tx_sw_desc *d;
+ struct sk_buff *skb;
+ u8 flits, ndesc;
+ int left;
+
+ eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
+ spin_lock(&eohw_txq->lock);
+ reclaim_completed_tx_imm(&eohw_txq->q);
+
+ d = &eosw_txq->desc[eosw_txq->last_pidx];
+ skb = d->skb;
+ skb_tx_timestamp(skb);
+
+ wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
+ if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
+ eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
+ hdr_len = skb->len;
+ data_len = 0;
+ flits = DIV_ROUND_UP(hdr_len, 8);
+ if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
+ else
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
+ skip_eotx_wr = true;
+ } else {
+ hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
+ data_len = skb->len - hdr_len;
+ flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
+ }
+ ndesc = flits_to_desc(flits);
+ wrlen = flits * 8;
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+
+ /* If there are no CPL credits, then wait for credits
+ * to come back and retry again
+ */
+ if (unlikely(wrlen16 > eosw_txq->cred))
+ goto out_unlock;
+
+ if (unlikely(skip_eotx_wr)) {
+ start = (u64 *)wr;
+ eosw_txq->state = next_state;
+ goto write_wr_headers;
+ }
+
+ cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
+ cntrl = hwcsum(adap->params.chip, skb);
+ if (skb_vlan_tag_present(skb))
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf));
+ cpl->pack = 0;
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ start = (u64 *)(cpl + 1);
+
+write_wr_headers:
+ sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
+ hdr_len);
+ if (data_len) {
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
+ memset(d->addr, 0, sizeof(d->addr));
+ eohw_txq->mapping_err++;
+ goto out_unlock;
+ }
+
+ end = (u64 *)wr + flits;
+ if (unlikely(start > sgl)) {
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+ end = (void *)eohw_txq->q.desc + left;
+ }
+
+ if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+
+ end = (void *)eohw_txq->q.desc + left;
+ sgl = (void *)eohw_txq->q.desc;
+ }
+
+ cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
+ d->addr);
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ eohw_txq->uso++;
+ else
+ eohw_txq->tso++;
+ eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ eohw_txq->tx_cso++;
+ }
+
+ if (skb_vlan_tag_present(skb))
+ eohw_txq->vlan_ins++;
+
+ txq_advance(&eohw_txq->q, ndesc);
+ cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
+
+out_unlock:
+ spin_unlock(&eohw_txq->lock);
+}
+
+static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
+{
+ struct sk_buff *skb;
+ int pktcount;
+
+ switch (eosw_txq->state) {
+ case CXGB4_EO_STATE_ACTIVE:
+ case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
+ pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+ break;
+ case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
+ case CXGB4_EO_STATE_CLOSED:
+ default:
+ return;
+ }
+
+ while (pktcount--) {
+ skb = eosw_txq_peek(eosw_txq);
+ if (!skb) {
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
+ eosw_txq->ndesc);
+ continue;
+ }
+
+ ethofld_hard_xmit(dev, eosw_txq);
+ }
+}
+
+static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ u32 qid;
+ int ret;
+
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
+ goto out_free;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ qid = skb_get_queue_mapping(skb) - pi->nqsets;
+ eosw_txq = &tc_port_mqprio->eosw_txq[qid];
+ spin_lock_bh(&eosw_txq->lock);
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret)
+ goto out_unlock;
+
+ /* SKB is queued for processing until credits are available.
+ * So, call the destructor now and we'll free the skb later
+ * after it has been successfully transmitted.
+ */
+ skb_orphan(skb);
+
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+ spin_unlock_bh(&eosw_txq->lock);
+ return NETDEV_TX_OK;
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
+ u16 qid = skb_get_queue_mapping(skb);
if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
return cxgb4_vf_eth_xmit(skb, dev);
+ if (unlikely(qid >= pi->nqsets))
+ return cxgb4_ethofld_xmit(skb, dev);
+
return cxgb4_eth_xmit(skb, dev);
}
/**
- * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
- * @q: the SGE control Tx queue
+ * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
+ * @dev - netdevice
+ * @eotid - ETHOFLD tid to bind/unbind
+ * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
*
- * This is a variant of cxgb4_reclaim_completed_tx() that is used
- * for Tx queues that send only immediate data (presently just
- * the control queues) and thus do not have any sk_buffs to release.
+ * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
+ * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
+ * a traffic class.
*/
-static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
{
- int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
- int reclaim = hw_cidx - q->cidx;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ enum sge_eosw_state next_state;
+ struct sge_eosw_txq *eosw_txq;
+ u32 len, len16, nparams = 6;
+ struct fw_flowc_wr *flowc;
+ struct eotid_entry *entry;
+ struct sge_ofld_rxq *rxq;
+ struct sk_buff *skb;
+ int ret = 0;
- if (reclaim < 0)
- reclaim += q->size;
+ len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams;
+ len16 = DIV_ROUND_UP(len, 16);
- q->in_use -= reclaim;
- q->cidx = hw_cidx;
+ entry = cxgb4_lookup_eotid(&adap->tids, eotid);
+ if (!entry)
+ return -ENOMEM;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ return -ENOMEM;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ spin_lock_bh(&eosw_txq->lock);
+ if (tc != FW_SCHED_CLS_NONE) {
+ if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
+ } else {
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
+ }
+
+ flowc = __skb_put(skb, len);
+ memset(flowc, 0, len);
+
+ rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(nparams) |
+ FW_WR_COMPL_V(1));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[4].val = cpu_to_be32(tc);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
+ flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
+ FW_FLOWC_MNEM_EOSTATE_CLOSING :
+ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
+
+ eosw_txq->cred -= len16;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret) {
+ dev_consume_skb_any(skb);
+ goto out_unlock;
+ }
+
+ eosw_txq->state = next_state;
+ eosw_txq->flowc_idx = eosw_txq->pidx;
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+ return ret;
}
/**
@@ -3311,6 +3757,112 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
+void cxgb4_ethofld_restart(unsigned long data)
+{
+ struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
+ int pktcount;
+
+ spin_lock(&eosw_txq->lock);
+ pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+
+ if (pktcount) {
+ cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
+ eosw_txq, pktcount);
+ eosw_txq->inuse -= pktcount;
+ }
+
+ /* There may be some packets waiting for completions. So,
+ * attempt to send these packets now.
+ */
+ ethofld_xmit(eosw_txq->netdev, eosw_txq);
+ spin_unlock(&eosw_txq->lock);
+}
+
+/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the CPL message
+ * @si: the gather list of packet fragments
+ *
+ * Process a ETHOFLD Tx completion. Increment the cidx here, but
+ * free up the descriptors in a tasklet later.
+ */
+int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ /* skip RSS header */
+ rsp++;
+
+ if (opcode == CPL_FW4_ACK) {
+ const struct cpl_fw4_ack *cpl;
+ struct sge_eosw_txq *eosw_txq;
+ struct eotid_entry *entry;
+ struct sk_buff *skb;
+ u32 hdr_len, eotid;
+ u8 flits, wrlen16;
+ int credits;
+
+ cpl = (const struct cpl_fw4_ack *)rsp;
+ eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
+ q->adap->tids.eotid_base;
+ entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
+ if (!entry)
+ goto out_done;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ goto out_done;
+
+ spin_lock(&eosw_txq->lock);
+ credits = cpl->credits;
+ while (credits > 0) {
+ skb = eosw_txq->desc[eosw_txq->cidx].skb;
+ if (!skb)
+ break;
+
+ if (unlikely((eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
+ eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
+ eosw_txq->cidx == eosw_txq->flowc_idx)) {
+ flits = DIV_ROUND_UP(skb->len, 8);
+ if (eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
+ eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
+ else
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+ complete(&eosw_txq->completion);
+ } else {
+ hdr_len = eth_get_headlen(eosw_txq->netdev,
+ skb->data,
+ skb_headlen(skb));
+ flits = ethofld_calc_tx_flits(q->adap, skb,
+ hdr_len);
+ }
+ eosw_txq_advance_index(&eosw_txq->cidx, 1,
+ eosw_txq->ndesc);
+ wrlen16 = DIV_ROUND_UP(flits * 8, 16);
+ credits -= wrlen16;
+ }
+
+ eosw_txq->cred += cpl->credits;
+ eosw_txq->ncompl--;
+
+ spin_unlock(&eosw_txq->lock);
+
+ /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
+ * if there were packets waiting for completion.
+ */
+ tasklet_schedule(&eosw_txq->qresume_tsk);
+ }
+
+out_done:
+ return 0;
+}
+
/*
* The MSI-X interrupt handler for an SGE response queue.
*/
@@ -3835,7 +4387,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->q.q_type = CXGB4_TXQ_ETH;
init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
- txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+ txq->tso = 0;
+ txq->uso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
txq->mapping_err = 0;
txq->dbqt = dbqt;
@@ -3912,30 +4467,30 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
-int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
- struct net_device *dev, unsigned int iqid,
- unsigned int uld_type)
+static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
+ struct net_device *dev, u32 cmd, u32 iqid)
{
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
- int ret, nentries;
- struct fw_eq_ofld_cmd c;
- struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
- int cmd = FW_EQ_OFLD_CMD;
+ struct sge *s = &adap->sge;
+ struct fw_eq_ofld_cmd c;
+ u32 fb_min, nentries;
+ int ret;
/* Add status entries */
- nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
-
- txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
- sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
- NUMA_NO_NODE);
- if (!txq->q.desc)
+ nentries = q->size + s->stat_len / sizeof(struct tx_desc);
+ q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc), &q->phys_addr,
+ &q->sdesc, s->stat_len, NUMA_NO_NODE);
+ if (!q->desc)
return -ENOMEM;
+ if (chip_ver <= CHELSIO_T5)
+ fb_min = FETCHBURSTMIN_64B_X;
+ else
+ fb_min = FETCHBURSTMIN_64B_T6_X;
+
memset(&c, 0, sizeof(c));
- if (unlikely(uld_type == CXGB4_TX_CRYPTO))
- cmd = FW_EQ_CTRL_CMD;
c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
@@ -3947,27 +4502,42 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
- ? FETCHBURSTMIN_64B_X
- : FETCHBURSTMIN_64B_T6_X) |
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
- c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+ c.eqaddr = cpu_to_be64(q->phys_addr);
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
- kfree(txq->q.sdesc);
- txq->q.sdesc = NULL;
+ kfree(q->sdesc);
+ q->sdesc = NULL;
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
+ q->desc, q->phys_addr);
+ q->desc = NULL;
return ret;
}
+ init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
+ return 0;
+}
+
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type)
+{
+ u32 cmd = FW_EQ_OFLD_CMD;
+ int ret;
+
+ if (unlikely(uld_type == CXGB4_TX_CRYPTO))
+ cmd = FW_EQ_CTRL_CMD;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
+ if (ret)
+ return ret;
+
txq->q.q_type = CXGB4_TXQ_ULD;
- init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
@@ -3976,6 +4546,26 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
return 0;
}
+int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
+ struct net_device *dev, u32 iqid)
+{
+ int ret;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
+ if (ret)
+ return ret;
+
+ txq->q.q_type = CXGB4_TXQ_ULD;
+ spin_lock_init(&txq->lock);
+ txq->adap = adap;
+ txq->tso = 0;
+ txq->uso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
@@ -4031,6 +4621,17 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
q->fl.size ? &q->fl : NULL);
}
+void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
+{
+ if (txq->q.desc) {
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
+ txq->q.cntxt_id);
+ free_tx_desc(adap, &txq->q, txq->q.in_use, false);
+ kfree(txq->q.sdesc);
+ free_txq(adap, &txq->q);
+ }
+}
+
/**
* t4_free_sge_resources - free SGE resources
* @adap: the adapter
@@ -4060,6 +4661,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+ if (eq->msix) {
+ cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
+ eq->msix = NULL;
+ }
etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
@@ -4086,8 +4691,15 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- if (adap->sge.fw_evtq.desc)
+ if (adap->sge.fw_evtq.desc) {
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+ if (adap->sge.fwevtq_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap,
+ adap->sge.fwevtq_msix_idx);
+ }
+
+ if (adap->sge.nd_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f2a7824da42b..19d18acfc9a6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -8777,8 +8777,8 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
unsigned int *speedp, unsigned int *mtup)
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
- struct fw_port_cmd port_cmd;
unsigned int action, link_ok, mtu;
+ struct fw_port_cmd port_cmd;
fw_port_cap32_t linkattr;
int ret;
@@ -8813,9 +8813,12 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
- *link_okp = link_ok;
- *speedp = fwcap_to_speed(linkattr);
- *mtup = mtu;
+ if (link_okp)
+ *link_okp = link_ok;
+ if (speedp)
+ *speedp = fwcap_to_speed(linkattr);
+ if (mtup)
+ *mtup = mtu;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 38dd41eb959e..575c6abcdae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1421,6 +1421,11 @@ enum {
CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
};
+#define CPL_FW4_ACK_FLOWID_S 0
+#define CPL_FW4_ACK_FLOWID_M 0xffffff
+#define CPL_FW4_ACK_FLOWID_G(x) \
+ (((x) >> CPL_FW4_ACK_FLOWID_S) & CPL_FW4_ACK_FLOWID_M)
+
struct cpl_fw6_msg {
u8 opcode;
u8 type;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 65313f6b5704..ac4fb43bdec6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -87,6 +87,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
+ FW_ETH_TX_EO_WR = 0x1c,
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
@@ -534,6 +535,47 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
+enum fw_eth_tx_eo_type {
+ FW_ETH_TX_EO_TYPE_UDPSEG = 0,
+ FW_ETH_TX_EO_TYPE_TCPSEG,
+};
+
+struct fw_eth_tx_eo_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+ union fw_eth_tx_eo {
+ struct fw_eth_tx_eo_udpseg {
+ __u8 type;
+ __u8 ethlen;
+ __be16 iplen;
+ __u8 udplen;
+ __u8 rtplen;
+ __be16 r4;
+ __be16 mss;
+ __be16 schedpktsize;
+ __be32 plen;
+ } udpseg;
+ struct fw_eth_tx_eo_tcpseg {
+ __u8 type;
+ __u8 ethlen;
+ __be16 iplen;
+ __u8 tcplen;
+ __u8 tsclk_tsoff;
+ __be16 r4;
+ __be16 mss;
+ __be16 r5;
+ __be32 plen;
+ } tcpseg;
+ } u;
+};
+
+#define FW_ETH_TX_EO_WR_IMMDLEN_S 0
+#define FW_ETH_TX_EO_WR_IMMDLEN_M 0x1ff
+#define FW_ETH_TX_EO_WR_IMMDLEN_V(x) ((x) << FW_ETH_TX_EO_WR_IMMDLEN_S)
+#define FW_ETH_TX_EO_WR_IMMDLEN_G(x) \
+ (((x) >> FW_ETH_TX_EO_WR_IMMDLEN_S) & FW_ETH_TX_EO_WR_IMMDLEN_M)
+
struct fw_ofld_connection_wr {
__be32 op_compl;
__be32 len16_pkd;
@@ -660,6 +702,12 @@ enum fw_flowc_mnem_tcpstate {
FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */
};
+enum fw_flowc_mnem_eostate {
+ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */
+ /* graceful close, after sending outstanding payload */
+ FW_FLOWC_MNEM_EOSTATE_CLOSING = 2,
+};
+
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
@@ -1134,6 +1182,7 @@ enum fw_caps_config_nic {
FW_CAPS_CONFIG_NIC = 0x00000001,
FW_CAPS_CONFIG_NIC_VM = 0x00000002,
FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
+ FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
};
enum fw_caps_config_ofld {
@@ -1276,6 +1325,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
+ FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index f1a0c4dceda0..f37c9a08c4cf 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
dev = platform_get_drvdata(pdev);
if (dev == NULL)
@@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
iounmap(ep->base_addr);
if (ep->res != NULL) {
- release_resource(ep->res);
- kfree(ep->res);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
}
free_netdev(dev);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index e736ce2c58ca..a8f4c69252ff 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
+ free_netdev(port->netdev);
return 0;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 96e9565f1e08..a6f2063f1475 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -90,6 +90,9 @@ struct ftgmac100 {
struct mii_bus *mii_bus;
struct clk *clk;
+ /* AST2500/AST2600 RMII ref clock gate */
+ struct clk *rclk;
+
/* Link management */
int cur_speed;
int cur_duplex;
@@ -1609,7 +1612,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
- int phy_intf = PHY_INTERFACE_MODE_RGMII;
+ phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII;
struct device_node *np = pdev->dev.of_node;
int i, err = 0;
u32 reg;
@@ -1634,8 +1637,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
/* Get PHY mode from device-tree */
if (np) {
/* Default to RGMII. It's a gigabit part after all */
- phy_intf = of_get_phy_mode(np);
- if (phy_intf < 0)
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
phy_intf = PHY_INTERFACE_MODE_RGMII;
/* Aspeed only supports these. I don't know about other IP
@@ -1717,20 +1720,41 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
nd->link_up ? "up" : "down");
}
-static void ftgmac100_setup_clk(struct ftgmac100 *priv)
+static int ftgmac100_setup_clk(struct ftgmac100 *priv)
{
- priv->clk = devm_clk_get(priv->dev, NULL);
- if (IS_ERR(priv->clk))
- return;
+ struct clk *clk;
+ int rc;
- clk_prepare_enable(priv->clk);
+ clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ priv->clk = clk;
+ rc = clk_prepare_enable(priv->clk);
+ if (rc)
+ return rc;
/* Aspeed specifies a 100MHz clock is required for up to
* 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
* is sufficient
*/
- clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
- FTGMAC_100MHZ);
+ rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
+ FTGMAC_100MHZ);
+ if (rc)
+ goto cleanup_clk;
+
+ /* RCLK is for RMII, typically used for NCSI. Optional because its not
+ * necessary if it's the AST2400 MAC, or the MAC is configured for
+ * RGMII, or the controller is not an ASPEED-based controller.
+ */
+ priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
+ rc = clk_prepare_enable(priv->rclk);
+ if (!rc)
+ return 0;
+
+cleanup_clk:
+ clk_disable_unprepare(priv->clk);
+
+ return rc;
}
static int ftgmac100_probe(struct platform_device *pdev)
@@ -1852,8 +1876,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_setup_mdio;
}
- if (priv->is_aspeed)
- ftgmac100_setup_clk(priv);
+ if (priv->is_aspeed) {
+ err = ftgmac100_setup_clk(priv);
+ if (err)
+ goto err_ncsi_dev;
+ }
/* Default ring sizes */
priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
@@ -1885,8 +1912,10 @@ static int ftgmac100_probe(struct platform_device *pdev)
return 0;
-err_ncsi_dev:
err_register_netdev:
+ clk_disable_unprepare(priv->rclk);
+ clk_disable_unprepare(priv->clk);
+err_ncsi_dev:
ftgmac100_destroy_mdio(netdev);
err_setup_mdio:
iounmap(priv->base);
@@ -1908,6 +1937,7 @@ static int ftgmac100_remove(struct platform_device *pdev)
unregister_netdev(netdev);
+ clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
/* There's a small chance the reset task will have been re-queued,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index b4b82b9c5cd6..6a9d12dad5d9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -178,31 +178,9 @@ struct fm_port_fqs {
/* All the dpa bps in use at any moment */
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
-/* The raw buffer size must be cacheline aligned */
#define DPAA_BP_RAW_SIZE 4096
-/* When using more than one buffer pool, the raw sizes are as follows:
- * 1 bp: 4KB
- * 2 bp: 2KB, 4KB
- * 3 bp: 1KB, 2KB, 4KB
- * 4 bp: 1KB, 2KB, 4KB, 8KB
- */
-static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
-{
- size_t res = DPAA_BP_RAW_SIZE / 4;
- u8 i;
-
- for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
- res *= 2;
- return res;
-}
-/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers, so we reserve some more space for start-of-buffer
- * alignment.
- */
-#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
static int dpaa_max_frm;
@@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
/* Allow the Fman (Tx) port to process in-flight frames before we
* try switching it off.
*/
- usleep_range(5000, 10000);
+ msleep(200);
err = mac_dev->stop(mac_dev);
if (err < 0)
@@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
phy_disconnect(net_dev->phydev);
net_dev->phydev = NULL;
+ msleep(200);
+
return err;
}
@@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
static void dpaa_bps_free(struct dpaa_priv *priv)
{
- int i;
-
- for (i = 0; i < DPAA_BPS_NUM; i++)
- dpaa_bp_free(priv->dpaa_bps[i]);
+ dpaa_bp_free(priv->dpaa_bp);
}
/* Use multiple WQs for FQ assignment:
@@ -773,7 +750,7 @@ static void dpaa_release_channel(void)
qman_release_pool(rx_pool_channel);
}
-static void dpaa_eth_add_channel(u16 channel)
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{
u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
const cpumask_t *cpus = qman_affine_cpus();
@@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
for_each_cpu_and(cpu, cpus, cpu_online_mask) {
portal = qman_get_affine_portal(cpu);
qman_p_static_dequeue_add(portal, pool);
+ qman_start_using_portal(portal, dev);
}
}
@@ -901,7 +879,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
if (num_portals == 0)
dev_err(priv->net_dev->dev.parent,
- "No Qman software (affine) channels found");
+ "No Qman software (affine) channels found\n");
/* Initialize each FQ in the list */
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
@@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
return err;
}
-static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
- size_t count, struct dpaa_fq *errq,
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
+ struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p;
struct fman_port_params params;
- int i, err;
+ int err;
memset(&params, 0, sizeof(params));
memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
@@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
}
- count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
- rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
- for (i = 0; i < count; i++) {
- rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
- rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
- }
+ rx_p->ext_buf_pools.num_of_pools_used = 1;
+ rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
err = fman_port_config(port, &params);
if (err) {
@@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
}
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpaa_bp **bps, size_t count,
+ struct dpaa_bp *bp,
struct fm_port_fqs *port_fqs,
struct dpaa_buffer_layout *buf_layout,
struct device *dev)
@@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
if (err)
return err;
- err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
port_fqs->rx_defq, port_fqs->rx_pcdq,
&buf_layout[RX]);
@@ -1335,15 +1310,16 @@ static void dpaa_fd_release(const struct net_device *net_dev,
vaddr = phys_to_virt(qm_fd_addr(fd));
sgt = vaddr + qm_fd_get_offset(fd);
- dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
dpaa_release_sgt_members(sgt);
- addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dpaa_bp->dev, addr)) {
- dev_err(dpaa_bp->dev, "DMA mapping failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
+ virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
+ netdev_err(net_dev, "DMA mapping failed\n");
return;
}
bm_buffer_set64(&bmb, addr);
@@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
struct sk_buff *skb,
struct qm_fd *fd,
- char *parse_results)
+ void *parse_results)
{
struct fman_prs_result *parse_result;
u16 ethertype = ntohs(skb->protocol);
@@ -1488,25 +1464,24 @@ return_error:
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
{
- struct device *dev = dpaa_bp->dev;
+ struct net_device *net_dev = dpaa_bp->priv->net_dev;
struct bm_buffer bmb[8];
dma_addr_t addr;
- void *new_buf;
+ struct page *p;
u8 i;
for (i = 0; i < 8; i++) {
- new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
- if (unlikely(!new_buf)) {
- dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
- dpaa_bp->raw_size);
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
goto release_previous_buffs;
}
- new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
- addr = dma_map_single(dev, new_buf,
- dpaa_bp->size, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dpaa_bp->dev, "DMA map failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
+ addr))) {
+ netdev_err(net_dev, "DMA map failed\n");
goto release_previous_buffs;
}
@@ -1581,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{
struct dpaa_bp *dpaa_bp;
int *countptr;
- int res, i;
+ int res;
+
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+ if (res)
+ return res;
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bp = priv->dpaa_bps[i];
- if (!dpaa_bp)
- return -EINVAL;
- countptr = this_cpu_ptr(dpaa_bp->percpu_count);
- res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
- if (res)
- return res;
- }
return 0;
}
@@ -1600,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
* Skb freeing is not handled here.
*
* This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
+ * against cases when not all fd relevant fields were filled in. To avoid
+ * reading the invalid transmission timestamp for the error paths set ts to
+ * false.
*
* Return the skb backpointer, since for S/G frames the buffer containing it
* gets freed here.
*/
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
- const struct qm_fd *fd)
+ const struct qm_fd *fd, bool ts)
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
+ void *vaddr = phys_to_virt(addr);
const struct qm_sg_entry *sgt;
- struct sk_buff **skbh, *skb;
- int nr_frags, i;
+ struct sk_buff *skb;
u64 ns;
-
- skbh = (struct sk_buff **)phys_to_virt(addr);
- skb = *skbh;
-
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
- &ns)) {
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else {
- dev_warn(dev, "fman_port_get_tstamp failed!\n");
- }
- }
+ int i;
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dev, addr,
- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
- dma_dir);
+ dma_unmap_page(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+ dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(),
* it's from lowmem.
*/
- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
/* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+ dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */
- for (i = 1; i <= nr_frags; i++) {
+ for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
- dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
qm_sg_entry_get_len(&sgt[i]), dma_dir);
}
-
- /* Free the page frag that we allocated on Tx */
- skb_free_frag(phys_to_virt(addr));
} else {
- dma_unmap_single(dev, addr,
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ dma_unmap_single(priv->tx_dma_dev, addr,
+ priv->tx_headroom + qm_fd_get_length(fd),
+ dma_dir);
+ }
+
+ skb = *(struct sk_buff **)vaddr;
+
+ /* DMA unmapping is required before accessing the HW provided info */
+ if (ts && priv->tx_tstamp &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
}
+ if (qm_fd_get_format(fd) == qm_fd_sg)
+ /* Free the page that we allocated on Tx for the SGT */
+ free_pages((unsigned long)vaddr, 0);
+
return skb;
}
@@ -1715,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
return skb;
free_buffer:
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1762,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
goto free_buffers;
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) {
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1815,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
/* free the SG table buffer */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return skb;
@@ -1832,7 +1812,7 @@ free_buffers:
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
- skb_free_frag(sg_vaddr);
+ free_pages((unsigned long)sg_vaddr, 0);
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -1843,7 +1823,7 @@ free_buffers:
break;
}
/* free the SGT fragment */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1853,9 +1833,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
int *offset)
{
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
enum dma_data_direction dma_dir;
- unsigned char *buffer_start;
+ unsigned char *buff_start;
struct sk_buff **skbh;
dma_addr_t addr;
int err;
@@ -1864,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* available, so just use that for offset.
*/
fd->bpid = FSL_DPAA_BPID_INV;
- buffer_start = skb->data - priv->tx_headroom;
+ buff_start = skb->data - priv->tx_headroom;
dma_dir = DMA_TO_DEVICE;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
/* Enable L3/L4 hardware checksum computation.
@@ -1876,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1889,9 +1868,9 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */
- addr = dma_map_single(dev, skbh,
- skb_tail_pointer(skb) - buffer_start, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
+ priv->tx_headroom + skb->len, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
return -EINVAL;
@@ -1907,24 +1886,22 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
const int nr_frags = skb_shinfo(skb)->nr_frags;
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
struct qm_sg_entry *sgt;
struct sk_buff **skbh;
- int i, j, err, sz;
- void *buffer_start;
+ void *buff_start;
skb_frag_t *frag;
dma_addr_t addr;
size_t frag_len;
- void *sgt_buf;
-
- /* get a page frag to store the SGTable */
- sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
- sgt_buf = netdev_alloc_frag(sz);
- if (unlikely(!sgt_buf)) {
- netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
- sz);
+ struct page *p;
+ int i, j, err;
+
+ /* get a page to store the SGTable */
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
return -ENOMEM;
}
+ buff_start = page_address(p);
/* Enable L3/L4 hardware checksum computation.
*
@@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1941,15 +1918,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
}
/* SGT[0] is used by the linear part */
- sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
frag_len = skb_headlen(skb);
qm_sg_entry_set_len(&sgt[0], frag_len);
sgt[0].bpid = FSL_DPAA_BPID_INV;
sgt[0].offset = 0;
- addr = dma_map_single(dev, skb->data,
+ addr = dma_map_single(priv->tx_dma_dev, skb->data,
skb_headlen(skb), dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg0_map_failed;
}
@@ -1960,10 +1937,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
- addr = skb_frag_dma_map(dev, frag, 0,
+ addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
frag_len, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg_map_failed;
}
@@ -1979,17 +1956,17 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* Set the final bit in the last used entry of the SGT */
qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
+ /* set fd offset to priv->tx_headroom */
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
/* DMA map the SGT page */
- buffer_start = (void *)sgt - priv->tx_headroom;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
- addr = dma_map_single(dev, buffer_start,
- priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ addr = dma_map_page(priv->tx_dma_dev, p, 0,
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sgt_map_failed;
}
@@ -2003,11 +1980,11 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
sgt_map_failed:
sg_map_failed:
for (j = 0; j < i; j++)
- dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
qm_sg_entry_get_len(&sgt[j]), dma_dir);
sg0_map_failed:
csum_failed:
- skb_free_frag(sgt_buf);
+ free_pages((unsigned long)buff_start, 0);
return err;
}
@@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
- dpaa_cleanup_tx_fd(priv, &fd);
+ dpaa_cleanup_tx_fd(priv, &fd, false);
skb_to_fd_failed:
enomem:
percpu_stats->tx_errors++;
@@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
percpu_priv->stats.tx_errors++;
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb(skb);
}
@@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
percpu_priv->tx_confirm++;
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, true);
consume_skb(skb);
}
@@ -2304,11 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_consume;
}
- dpaa_bp = dpaa_bpid2pool(fd->bpid);
- if (!dpaa_bp)
- return qman_cb_dqrr_consume;
-
- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
/* prefetch the first 64 bytes of the frame or the SGT start */
vaddr = phys_to_virt(addr);
@@ -2430,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal,
percpu_priv->stats.tx_fifo_errors++;
count_ern(percpu_priv, msg);
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb_any(skb);
}
@@ -2663,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
{
dma_addr_t addr = bm_buf_addr(bmb);
- dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
skb_free_frag(phys_to_virt(addr));
}
@@ -2764,21 +2739,46 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
static int dpaa_eth_probe(struct platform_device *pdev)
{
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
struct net_device *net_dev = NULL;
+ struct dpaa_bp *dpaa_bp = NULL;
struct dpaa_fq *dpaa_fq, *tmp;
struct dpaa_priv *priv = NULL;
struct fm_port_fqs port_fqs;
struct mac_device *mac_dev;
- int err = 0, i, channel;
+ int err = 0, channel;
struct device *dev;
- /* device used for DMA mapping */
- dev = pdev->dev.parent;
- err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
- if (err) {
- dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
- return err;
+ dev = &pdev->dev;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+ err = bman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to bman portals probe error\n");
+ return -ENODEV;
+ }
+ err = qman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to qman portals probe error\n");
+ return -ENODEV;
}
/* Allocate this early, so we can store relevant information in
@@ -2801,11 +2801,23 @@ static int dpaa_eth_probe(struct platform_device *pdev)
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
- dev_err(dev, "dpaa_mac_dev_get() failed\n");
+ netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
err = PTR_ERR(mac_dev);
goto free_netdev;
}
+ /* Devices used for DMA mapping */
+ priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
+ priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
+ err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
+ if (!err)
+ err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
+ DMA_BIT_MASK(40));
+ if (err) {
+ netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
+ return err;
+ }
+
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
* we choose conservatively and let the user explicitly set a higher
* MTU via ifconfig. Otherwise, the user may end up with different MTUs
@@ -2822,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* bp init */
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bps[i] = dpaa_bp_alloc(dev);
- if (IS_ERR(dpaa_bps[i])) {
- err = PTR_ERR(dpaa_bps[i]);
- goto free_dpaa_bps;
- }
- /* the raw size of the buffers used for reception */
- dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
- /* avoid runtime computations by keeping the usable size here */
- dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
- dpaa_bps[i]->dev = dev;
-
- err = dpaa_bp_alloc_pool(dpaa_bps[i]);
- if (err < 0)
- goto free_dpaa_bps;
- priv->dpaa_bps[i] = dpaa_bps[i];
+ dpaa_bp = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bp)) {
+ err = PTR_ERR(dpaa_bp);
+ goto free_dpaa_bps;
}
+ /* the raw size of the buffers used for reception */
+ dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
+ dpaa_bp->priv = priv;
+
+ err = dpaa_bp_alloc_pool(dpaa_bp);
+ if (err < 0)
+ goto free_dpaa_bps;
+ priv->dpaa_bp = dpaa_bp;
INIT_LIST_HEAD(&priv->dpaa_fq_list);
@@ -2864,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask.
*/
- dpaa_eth_add_channel(priv->channel);
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
@@ -2896,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
&priv->buf_layout[0], dev);
if (err)
goto free_dpaa_fqs;
@@ -2955,7 +2965,7 @@ static int dpaa_remove(struct platform_device *pdev)
struct device *dev;
int err;
- dev = pdev->dev.parent;
+ dev = &pdev->dev;
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index f7e59e8db075..fc2cc4c48e06 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -47,8 +47,6 @@
/* Total number of Tx queues */
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
-#define DPAA_BPS_NUM 3 /* number of bpools per interface */
-
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
@@ -80,9 +78,11 @@ struct dpaa_fq_cbs {
struct qman_fq egress_ern;
};
+struct dpaa_priv;
+
struct dpaa_bp {
- /* device used in the DMA mapping operations */
- struct device *dev;
+ /* used in the DMA mapping operations */
+ struct dpaa_priv *priv;
/* current number of buffers in the buffer pool alloted to each CPU */
int __percpu *percpu_count;
/* all buffers allocated for this pool have this raw size */
@@ -146,13 +146,15 @@ struct dpaa_buffer_layout {
struct dpaa_priv {
struct dpaa_percpu_priv __percpu *percpu_priv;
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
+ struct dpaa_bp *dpaa_bp;
/* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout)
*/
u16 tx_headroom;
struct net_device *net_dev;
struct mac_device *mac_dev;
+ struct device *rx_dma_dev;
+ struct device *tx_dma_dev;
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 0d9b185e317f..ee62d25cac81 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev,
{
struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
ssize_t bytes = 0;
- int i = 0;
- for (i = 0; i < DPAA_BPS_NUM; i++)
- bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
- priv->dpaa_bps[i]->bpid);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+ priv->dpaa_bp->bpid);
return bytes;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 7ce2e99b594d..66d150872d48 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
"tx S/G",
"tx error",
"rx error",
+ "rx dropped",
+ "tx dropped",
};
static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
@@ -78,10 +80,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- if (!net_dev->phydev) {
- netdev_dbg(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return 0;
- }
phy_ethtool_ksettings_get(net_dev->phydev, cmd);
@@ -93,10 +93,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
{
int err;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return -ENODEV;
- }
err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if (err < 0)
@@ -140,10 +138,8 @@ static int dpaa_nway_reset(struct net_device *net_dev)
{
int err;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return -ENODEV;
- }
err = 0;
if (net_dev->phydev->autoneg) {
@@ -165,10 +161,8 @@ static void dpaa_get_pauseparam(struct net_device *net_dev,
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return;
- }
epause->autoneg = mac_dev->autoneg_pause;
epause->rx_pause = mac_dev->rx_pause_active;
@@ -223,7 +217,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
unsigned int total_stats, num_stats;
num_stats = num_online_cpus() + 1;
- total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
+ total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
DPAA_STATS_GLOBAL_LEN;
switch (type) {
@@ -235,10 +229,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
}
static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
- int crr_cpu, u64 *bp_count, u64 *data)
+ int crr_cpu, u64 bp_count, u64 *data)
{
int num_values = num_cpus + 1;
- int crr = 0, j;
+ int crr = 0;
/* update current CPU's stats and also add them to the total values */
data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
@@ -262,23 +256,27 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
- for (j = 0; j < DPAA_BPS_NUM; j++) {
- data[crr * num_values + crr_cpu] = bp_count[j];
- data[crr++ * num_values + num_cpus] += bp_count[j];
- }
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
+
+ data[crr * num_values + crr_cpu] = bp_count;
+ data[crr++ * num_values + num_cpus] += bp_count;
}
static void dpaa_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats, u64 *data)
{
- u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
struct dpaa_percpu_priv *percpu_priv;
struct dpaa_rx_errors rx_errors;
unsigned int num_cpus, offset;
+ u64 bp_count, cg_time, cg_num;
struct dpaa_ern_cnt ern_cnt;
struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv;
- int total_stats, i, j;
+ int total_stats, i;
bool cg_status;
total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- for (j = 0; j < DPAA_BPS_NUM; j++) {
- dpaa_bp = priv->dpaa_bps[j];
- if (!dpaa_bp->percpu_count)
- continue;
- bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
- }
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp->percpu_count)
+ continue;
+ bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
rx_errors.dme += percpu_priv->rx_errors.dme;
rx_errors.fpe += percpu_priv->rx_errors.fpe;
rx_errors.fse += percpu_priv->rx_errors.fse;
@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
copy_stats(percpu_priv, num_cpus, i, bp_count, data);
}
- offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
+ offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
}
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN,
- "bpool %c [CPU %d]", 'a' + i, j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
- 'a' + i);
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN,
+ "bpool [CPU %d]", j);
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
}
+ snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+
memcpy(strings, dpaa_stats_global, size);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index fbef2829f3de..c6fb8e4021ac 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -2,6 +2,7 @@
config FSL_DPAA2_ETH
tristate "Freescale DPAA2 Ethernet"
depends on FSL_MC_BUS && FSL_MC_DPIO
+ select PHYLINK
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index d1e78cdd512f..69184ca3b7b9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 19379bae0144..7ff147e89426 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2019 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -221,6 +221,7 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
dma_addr_t addr)
{
+ int retries = 0;
int err;
ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
@@ -229,8 +230,11 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
ch->xdp.drop_bufs,
- ch->xdp.drop_cnt)) == -EBUSY)
+ ch->xdp.drop_cnt)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
cpu_relax();
+ }
if (err) {
free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
@@ -458,7 +462,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
struct dpaa2_eth_fq *fq = NULL;
struct dpaa2_dq *dq;
const struct dpaa2_fd *fd;
- int cleaned = 0;
+ int cleaned = 0, retries = 0;
int is_last;
do {
@@ -469,6 +473,11 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
* the store until we get some sort of valid response
* token (either a valid frame or an "empty dequeue")
*/
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
+ netdev_err_once(priv->net_dev,
+ "Unable to read a valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
continue;
}
@@ -477,6 +486,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
fq->consume(priv, ch, fd, fq);
cleaned++;
+ retries = 0;
} while (!is_last);
if (!cleaned)
@@ -949,6 +959,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
struct page *page;
dma_addr_t addr;
+ int retries = 0;
int i, err;
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
@@ -980,8 +991,11 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
release_bufs:
/* In case the portal is busy, retry until successful */
while ((err = dpaa2_io_service_release(ch->dpio, bpid,
- buf_array, i)) == -EBUSY)
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
cpu_relax();
+ }
/* If release command failed, clean up and bail out;
* not much else we can do about it
@@ -1032,16 +1046,21 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ int retries = 0;
int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
buf_array, count);
if (ret < 0) {
+ if (ret == -EBUSY &&
+ retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ continue;
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
free_bufs(priv, buf_array, ret);
+ retries = 0;
} while (ret);
}
@@ -1094,7 +1113,7 @@ static int pull_channel(struct dpaa2_eth_channel *ch)
ch->store);
dequeues++;
cpu_relax();
- } while (err == -EBUSY);
+ } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
ch->stats.dequeue_portal_busy += dequeues;
if (unlikely(err))
@@ -1118,6 +1137,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct netdev_queue *nq;
int store_cleaned, work_done;
struct list_head rx_list;
+ int retries = 0;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -1136,7 +1156,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
refill_pool(priv, ch, priv->bpid);
store_cleaned = consume_frames(ch, &fq);
- if (!store_cleaned)
+ if (store_cleaned <= 0)
break;
if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
@@ -1163,7 +1183,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
do {
err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
cpu_relax();
- } while (err == -EBUSY);
+ } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
@@ -1235,8 +1255,6 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
priv->rx_td_enabled = enable;
}
-static void update_tx_fqids(struct dpaa2_eth_priv *priv);
-
static int link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
@@ -1258,12 +1276,17 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
!!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (priv->mac)
+ goto out;
+
/* Chech link state; speed / duplex changes are not treated yet */
if (priv->link_state.up == state.up)
goto out;
if (state.up) {
- update_tx_fqids(priv);
netif_carrier_on(priv->net_dev);
netif_tx_start_all_queues(priv->net_dev);
} else {
@@ -1295,17 +1318,21 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- /* We'll only start the txqs when the link is actually ready; make sure
- * we don't race against the link up notification, which may come
- * immediately after dpni_enable();
- */
- netif_tx_stop_all_queues(net_dev);
+ if (!priv->mac) {
+ /* We'll only start the txqs when the link is actually ready;
+ * make sure we don't race against the link up notification,
+ * which may come immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+
+ /* Also, explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(net_dev);
+ }
enable_ch_napi(priv);
- /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
- * return true and cause 'ip link show' to report the LOWER_UP flag,
- * even though the link notification wasn't even received.
- */
- netif_carrier_off(net_dev);
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
@@ -1313,13 +1340,17 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- /* If the DPMAC object has already processed the link up interrupt,
- * we have to learn the link state ourselves.
- */
- err = link_state_update(priv);
- if (err < 0) {
- netdev_err(net_dev, "Can't update link state\n");
- goto link_state_err;
+ if (!priv->mac) {
+ /* If the DPMAC object has already processed the link up
+ * interrupt, we have to learn the link state ourselves.
+ */
+ err = link_state_update(priv);
+ if (err < 0) {
+ netdev_err(net_dev, "Can't update link state\n");
+ goto link_state_err;
+ }
+ } else {
+ phylink_start(priv->mac->phylink);
}
return 0;
@@ -1394,8 +1425,12 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- netif_tx_stop_all_queues(net_dev);
- netif_carrier_off(net_dev);
+ if (!priv->mac) {
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+ } else {
+ phylink_stop(priv->mac->phylink);
+ }
/* On dpni_disable(), the MC firmware will:
* - stop MAC Rx and wait for all Rx frames to be enqueued to software
@@ -1772,11 +1807,8 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
if (prog && !xdp_mtu_valid(priv, dev->mtu))
return -EINVAL;
- if (prog) {
- prog = bpf_prog_add(prog, priv->num_channels);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
- }
+ if (prog)
+ bpf_prog_add(prog, priv->num_channels);
up = netif_running(dev);
need_update = (!!priv->xdp_prog != !!prog);
@@ -2046,7 +2078,6 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpcon;
struct device *dev = priv->net_dev->dev.parent;
- struct dpcon_attr attrs;
int err;
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
@@ -2071,12 +2102,6 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
goto close;
}
- err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
- if (err) {
- dev_err(dev, "dpcon_get_attributes() failed\n");
- goto close;
- }
-
err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_enable() failed\n");
@@ -2232,8 +2257,16 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
- if (err == -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER) {
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+ nctx = &channel->nctx;
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+ free_channel(priv, channel);
+ }
+ priv->num_channels = 0;
return err;
+ }
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
@@ -3332,12 +3365,56 @@ static int poll_link_state(void *arg)
return 0;
}
+static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpni_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
+
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ return 0;
+
+ mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = priv->mc_io;
+ mac->net_dev = priv->net_dev;
+
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ kfree(mac);
+ return err;
+ }
+ priv->mac = mac;
+
+ return 0;
+}
+
+static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
+{
+ if (!priv->mac)
+ return;
+
+ dpaa2_mac_disconnect(priv->mac);
+ kfree(priv->mac);
+ priv->mac = NULL;
+}
+
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
{
u32 status = ~0;
struct device *dev = (struct device *)arg;
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
@@ -3350,8 +3427,17 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
link_state_update(netdev_priv(net_dev));
- if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+ if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
set_mac_addr(netdev_priv(net_dev));
+ update_tx_fqids(priv);
+
+ rtnl_lock();
+ if (priv->mac)
+ dpaa2_eth_disconnect_mac(priv);
+ else
+ dpaa2_eth_connect_mac(priv);
+ rtnl_unlock();
+ }
return IRQ_HANDLED;
}
@@ -3527,6 +3613,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->do_link_poll = true;
}
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
@@ -3541,6 +3631,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
return 0;
err_netdev_reg:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
@@ -3583,6 +3675,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+ rtnl_lock();
+ dpaa2_eth_disconnect_mac(priv);
+ rtnl_unlock();
+
unregister_netdev(net_dev);
if (priv->do_link_poll)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 8a0e65b3267f..7635db3ef903 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -17,6 +17,7 @@
#include "dpaa2-eth-trace.h"
#include "dpaa2-eth-debugfs.h"
+#include "dpaa2-mac.h"
#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
@@ -245,6 +246,14 @@ static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
*/
#define DPAA2_ETH_ENQUEUE_RETRIES 10
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_ETH_SWP_BUSY_RETRIES 1000
+
/* Driver statistics, other than those in struct rtnl_link_stats64.
* These are usually collected per-CPU and aggregated by ethtool.
*/
@@ -407,6 +416,8 @@ struct dpaa2_eth_priv {
#ifdef CONFIG_DEBUG_FS
struct dpaa2_debugfs dbg;
#endif
+
+ struct dpaa2_mac *mac;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 0aa1c34019bb..96676abcebd5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,6 +85,10 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ if (priv->mac)
+ return phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+
link_settings->base.autoneg = AUTONEG_DISABLE;
if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
link_settings->base.duplex = DUPLEX_FULL;
@@ -93,12 +97,29 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
return 0;
}
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->mac)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+}
+
static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
+ if (priv->mac) {
+ phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ return;
+ }
+
pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
pause->tx_pause = pause->rx_pause ^
!!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
@@ -118,6 +139,9 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
+ if (priv->mac)
+ return phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -149,6 +173,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -162,15 +187,22 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ if (priv->mac)
+ dpaa2_mac_get_strings(p);
break;
}
}
static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
{
+ int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ if (priv->mac)
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
default:
return -EOPNOTSUPP;
}
@@ -216,7 +248,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
if (err == -EINVAL)
/* Older firmware versions don't support all pages */
memset(&dpni_stats, 0, sizeof(dpni_stats));
- else
+ else if (err)
netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
num_cnt = dpni_stats_page_size[j] / sizeof(u64);
@@ -269,6 +301,9 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
return;
}
*(data + i++) = buf_cnt;
+
+ if (priv->mac)
+ dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
@@ -728,6 +763,7 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
+ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
.get_pauseparam = dpaa2_eth_get_pauseparam,
.set_pauseparam = dpaa2_eth_set_pauseparam,
.get_sset_count = dpaa2_eth_get_sset_count,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
new file mode 100644
index 000000000000..84233e467ed1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "dpaa2-eth.h"
+#include "dpaa2-mac.h"
+
+#define phylink_to_dpaa2_mac(config) \
+ container_of((config), struct dpaa2_mac, phylink_config)
+
+static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
+{
+ *if_mode = PHY_INTERFACE_MODE_NA;
+
+ switch (eth_if) {
+ case DPMAC_ETH_IF_RGMII:
+ *if_mode = PHY_INTERFACE_MODE_RGMII;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Caller must call of_node_put on the returned value */
+static struct device_node *dpaa2_mac_get_node(u16 dpmac_id)
+{
+ struct device_node *dpmacs, *dpmac = NULL;
+ u32 id;
+ int err;
+
+ dpmacs = of_find_node_by_name(NULL, "dpmacs");
+ if (!dpmacs)
+ return NULL;
+
+ while ((dpmac = of_get_next_child(dpmacs, dpmac)) != NULL) {
+ err = of_property_read_u32(dpmac, "reg", &id);
+ if (err)
+ continue;
+ if (id == dpmac_id)
+ break;
+ }
+
+ of_node_put(dpmacs);
+
+ return dpmac;
+}
+
+static int dpaa2_mac_get_if_mode(struct device_node *node,
+ struct dpmac_attr attr)
+{
+ phy_interface_t if_mode;
+ int err;
+
+ err = of_get_phy_mode(node, &if_mode);
+ if (!err)
+ return if_mode;
+
+ err = phy_mode(attr.eth_if, &if_mode);
+ if (!err)
+ return if_mode;
+
+ return err;
+}
+
+static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return (interface != mac->if_mode);
+ default:
+ return true;
+ }
+}
+
+static void dpaa2_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
+ goto empty_set;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ default:
+ goto empty_set;
+ }
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+
+ return;
+
+empty_set:
+ linkmode_zero(supported);
+}
+
+static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ if (state->speed != SPEED_UNKNOWN)
+ dpmac_state->rate = state->speed;
+
+ if (state->duplex != DUPLEX_UNKNOWN) {
+ if (!state->duplex)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+ }
+
+ if (state->an_enabled)
+ dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
+
+ if (state->pause & MLO_PAUSE_RX)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (!!(state->pause & MLO_PAUSE_RX) ^ !!(state->pause & MLO_PAUSE_TX))
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 1;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 0;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
+ .validate = dpaa2_mac_validate,
+ .mac_config = dpaa2_mac_config,
+ .mac_link_up = dpaa2_mac_link_up,
+ .mac_link_down = dpaa2_mac_link_down,
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io)
+{
+ struct dpmac_attr attr;
+ bool fixed = false;
+ u16 mc_handle = 0;
+ int err;
+
+ err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
+ &mc_handle);
+ if (err || !mc_handle)
+ return false;
+
+ err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
+ if (err)
+ goto out;
+
+ if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
+ fixed = true;
+
+out:
+ dpmac_close(mc_io, 0, mc_handle);
+
+ return fixed;
+}
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ struct device_node *dpmac_node;
+ struct phylink *phylink;
+ struct dpmac_attr attr;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpmac_node = dpaa2_mac_get_node(attr.id);
+ if (!dpmac_node) {
+ netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
+ err = -ENODEV;
+ goto err_close_dpmac;
+ }
+
+ err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ if (err < 0) {
+ err = -EINVAL;
+ goto err_put_node;
+ }
+ mac->if_mode = err;
+
+ /* The MAC does not have the capability to add RGMII delays so
+ * error out if the interface mode requests them and there is no PHY
+ * to act upon them
+ */
+ if (of_phy_is_fixed_link(dpmac_node) &&
+ (mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_err(net_dev, "RGMII delay not supported\n");
+ err = -EINVAL;
+ goto err_put_node;
+ }
+
+ mac->phylink_config.dev = &net_dev->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(dpmac_node), mac->if_mode,
+ &dpaa2_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_node;
+ }
+ mac->phylink = phylink;
+
+ err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
+ if (err) {
+ netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
+ goto err_phylink_destroy;
+ }
+
+ of_node_put(dpmac_node);
+
+ return 0;
+
+err_phylink_destroy:
+ phylink_destroy(mac->phylink);
+err_put_node:
+ of_node_put(dpmac_node);
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
+{
+ if (!mac->phylink)
+ return;
+
+ phylink_disconnect_phy(mac->phylink);
+ phylink_destroy(mac->phylink);
+ dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+}
+
+static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
+ [DPMAC_CNT_ING_ALL_FRAME] = "[mac] rx all frames",
+ [DPMAC_CNT_ING_GOOD_FRAME] = "[mac] rx frames ok",
+ [DPMAC_CNT_ING_ERR_FRAME] = "[mac] rx frame errors",
+ [DPMAC_CNT_ING_FRAME_DISCARD] = "[mac] rx frame discards",
+ [DPMAC_CNT_ING_UCAST_FRAME] = "[mac] rx u-cast",
+ [DPMAC_CNT_ING_BCAST_FRAME] = "[mac] rx b-cast",
+ [DPMAC_CNT_ING_MCAST_FRAME] = "[mac] rx m-cast",
+ [DPMAC_CNT_ING_FRAME_64] = "[mac] rx 64 bytes",
+ [DPMAC_CNT_ING_FRAME_127] = "[mac] rx 65-127 bytes",
+ [DPMAC_CNT_ING_FRAME_255] = "[mac] rx 128-255 bytes",
+ [DPMAC_CNT_ING_FRAME_511] = "[mac] rx 256-511 bytes",
+ [DPMAC_CNT_ING_FRAME_1023] = "[mac] rx 512-1023 bytes",
+ [DPMAC_CNT_ING_FRAME_1518] = "[mac] rx 1024-1518 bytes",
+ [DPMAC_CNT_ING_FRAME_1519_MAX] = "[mac] rx 1519-max bytes",
+ [DPMAC_CNT_ING_FRAG] = "[mac] rx frags",
+ [DPMAC_CNT_ING_JABBER] = "[mac] rx jabber",
+ [DPMAC_CNT_ING_ALIGN_ERR] = "[mac] rx align errors",
+ [DPMAC_CNT_ING_OVERSIZED] = "[mac] rx oversized",
+ [DPMAC_CNT_ING_VALID_PAUSE_FRAME] = "[mac] rx pause",
+ [DPMAC_CNT_ING_BYTE] = "[mac] rx bytes",
+ [DPMAC_CNT_EGR_GOOD_FRAME] = "[mac] tx frames ok",
+ [DPMAC_CNT_EGR_UCAST_FRAME] = "[mac] tx u-cast",
+ [DPMAC_CNT_EGR_MCAST_FRAME] = "[mac] tx m-cast",
+ [DPMAC_CNT_EGR_BCAST_FRAME] = "[mac] tx b-cast",
+ [DPMAC_CNT_EGR_ERR_FRAME] = "[mac] tx frame errors",
+ [DPMAC_CNT_EGR_UNDERSIZED] = "[mac] tx undersized",
+ [DPMAC_CNT_EGR_VALID_PAUSE_FRAME] = "[mac] tx b-pause",
+ [DPMAC_CNT_EGR_BYTE] = "[mac] tx bytes",
+};
+
+#define DPAA2_MAC_NUM_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats)
+
+int dpaa2_mac_get_sset_count(void)
+{
+ return DPAA2_MAC_NUM_STATS;
+}
+
+void dpaa2_mac_get_strings(u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ int i, err;
+ u64 value;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ err = dpmac_get_counter(mac->mc_io, 0, dpmac_dev->mc_handle,
+ i, &value);
+ if (err) {
+ netdev_err_once(mac->net_dev,
+ "dpmac_get_counter error %d\n", err);
+ *(data + i) = U64_MAX;
+ continue;
+ }
+ *(data + i) = value;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
new file mode 100644
index 000000000000..4da8079b9155
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+#ifndef DPAA2_MAC_H
+#define DPAA2_MAC_H
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+struct dpaa2_mac {
+ struct fsl_mc_device *mc_dev;
+ struct dpmac_link_state state;
+ struct net_device *net_dev;
+ struct fsl_mc_io *mc_io;
+
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+ phy_interface_t if_mode;
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io);
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac);
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+int dpaa2_mac_get_sset_count(void);
+
+void dpaa2_mac_get_strings(u8 *data);
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+
+#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
new file mode 100644
index 000000000000..3ea51dd9374b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef _FSL_DPMAC_CMD_H
+#define _FSL_DPMAC_CMD_H
+
+/* DPMAC Version */
+#define DPMAC_VER_MAJOR 4
+#define DPMAC_VER_MINOR 4
+#define DPMAC_CMD_BASE_VERSION 1
+#define DPMAC_CMD_2ND_VERSION 2
+#define DPMAC_CMD_ID_OFFSET 4
+
+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
+#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+
+#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
+
+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPMAC_MASK(field) \
+ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
+ DPMAC_##field##_SHIFT)
+
+#define dpmac_set_field(var, field, val) \
+ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
+#define dpmac_get_field(var, field) \
+ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
+
+struct dpmac_cmd_open {
+ __le32 dpmac_id;
+};
+
+struct dpmac_rsp_get_attributes {
+ u8 eth_if;
+ u8 link_type;
+ __le16 id;
+ __le32 max_rate;
+};
+
+#define DPMAC_STATE_SIZE 1
+#define DPMAC_STATE_SHIFT 0
+#define DPMAC_STATE_VALID_SIZE 1
+#define DPMAC_STATE_VALID_SHIFT 1
+
+struct dpmac_cmd_set_link_state {
+ __le64 options;
+ __le32 rate;
+ __le32 pad0;
+ /* from lsb: up:1, state_valid:1 */
+ u8 state;
+ u8 pad1[7];
+ __le64 supported;
+ __le64 advertising;
+};
+
+struct dpmac_cmd_get_counter {
+ u8 id;
+};
+
+struct dpmac_rsp_get_counter {
+ u64 pad;
+ u64 counter;
+};
+
+#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
new file mode 100644
index 000000000000..d5997b654562
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+/**
+ * dpmac_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmac_id: DPMAC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmac_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpmac_cmd_open *)cmd.params;
+ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmac_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_attributes - Retrieve DPMAC attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
+ attr->eth_if = rsp_params->eth_if;
+ attr->link_type = rsp_params->link_type;
+ attr->id = le16_to_cpu(rsp_params->id);
+ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
+ dpmac_set_field(cmd_params->state, STATE, link_state->up);
+ dpmac_set_field(cmd_params->state, STATE_VALID,
+ link_state->state_valid);
+ cmd_params->supported = cpu_to_le64(link_state->supported);
+ cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_counter() - Read a specific DPMAC counter
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @id: The requested counter ID
+ * @value: Returned counter value
+ *
+ * Return: The requested counter; '0' otherwise.
+ */
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value)
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
+ cmd_flags,
+ token);
+ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
+ dpmac_cmd->id = id;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
+ *value = le64_to_cpu(dpmac_rsp->counter);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
new file mode 100644
index 000000000000..135f143097a5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef __FSL_DPMAC_H
+#define __FSL_DPMAC_H
+
+/* Data Path MAC API
+ * Contains initialization APIs and runtime control APIs for DPMAC
+ */
+
+struct fsl_mc_io;
+
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token);
+
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpmac_link_type - DPMAC link type
+ * @DPMAC_LINK_TYPE_NONE: No link
+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
+ */
+enum dpmac_link_type {
+ DPMAC_LINK_TYPE_NONE,
+ DPMAC_LINK_TYPE_FIXED,
+ DPMAC_LINK_TYPE_PHY,
+ DPMAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * enum dpmac_eth_if - DPMAC Ethrnet interface
+ * @DPMAC_ETH_IF_MII: MII interface
+ * @DPMAC_ETH_IF_RMII: RMII interface
+ * @DPMAC_ETH_IF_SMII: SMII interface
+ * @DPMAC_ETH_IF_GMII: GMII interface
+ * @DPMAC_ETH_IF_RGMII: RGMII interface
+ * @DPMAC_ETH_IF_SGMII: SGMII interface
+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
+ * @DPMAC_ETH_IF_XAUI: XAUI interface
+ * @DPMAC_ETH_IF_XFI: XFI interface
+ * @DPMAC_ETH_IF_CAUI: CAUI interface
+ * @DPMAC_ETH_IF_1000BASEX: 1000BASEX interface
+ * @DPMAC_ETH_IF_USXGMII: USXGMII interface
+ */
+enum dpmac_eth_if {
+ DPMAC_ETH_IF_MII,
+ DPMAC_ETH_IF_RMII,
+ DPMAC_ETH_IF_SMII,
+ DPMAC_ETH_IF_GMII,
+ DPMAC_ETH_IF_RGMII,
+ DPMAC_ETH_IF_SGMII,
+ DPMAC_ETH_IF_QSGMII,
+ DPMAC_ETH_IF_XAUI,
+ DPMAC_ETH_IF_XFI,
+ DPMAC_ETH_IF_CAUI,
+ DPMAC_ETH_IF_1000BASEX,
+ DPMAC_ETH_IF_USXGMII,
+};
+
+/**
+ * struct dpmac_attr - Structure representing DPMAC attributes
+ * @id: DPMAC object ID
+ * @max_rate: Maximum supported rate - in Mbps
+ * @eth_if: Ethernet interface
+ * @link_type: link type
+ */
+struct dpmac_attr {
+ u16 id;
+ u32 max_rate;
+ enum dpmac_eth_if eth_if;
+ enum dpmac_link_type link_type;
+};
+
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr);
+
+/**
+ * DPMAC link configuration/state options
+ */
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0)
+/**
+ * Enable half-duplex mode
+ */
+#define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1)
+/**
+ * Enable pause frames
+ */
+#define DPMAC_LINK_OPT_PAUSE BIT_ULL(2)
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3)
+
+/**
+ * Advertised link speeds
+ */
+#define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0)
+#define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1)
+#define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2)
+#define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4)
+#define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5)
+
+/**
+ * Advertise auto-negotiation enable
+ */
+#define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3)
+
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+ int state_valid;
+ u64 supported;
+ u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
+/**
+ * enum dpmac_counter_id - DPMAC counter types
+ *
+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
+ * (up to max frame length specified),
+ * good or bad.
+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
+ * with a wrong CRC
+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
+ * specified, with a bad frame check sequence.
+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
+ * Occurs when a receive FIFO overflows.
+ * Includes also frames truncated as a result of
+ * the receive FIFO overflow.
+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
+ * (optional used for wrong SFD).
+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
+ * bytes long with a good CRC.
+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
+ * specified, with a good frame check sequence.
+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
+ * (regular and PFC).
+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
+ * frames and valid pause frames.
+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
+ * (except for undersized/fragment frame).
+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
+ * frames and valid pause frames transmitted.
+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
+ * pause frames.
+ * @DPMAC_CNT_EGR_GOOD_FRAME: counts frames transmitted without error, including
+ * pause frames.
+ */
+enum dpmac_counter_id {
+ DPMAC_CNT_ING_FRAME_64,
+ DPMAC_CNT_ING_FRAME_127,
+ DPMAC_CNT_ING_FRAME_255,
+ DPMAC_CNT_ING_FRAME_511,
+ DPMAC_CNT_ING_FRAME_1023,
+ DPMAC_CNT_ING_FRAME_1518,
+ DPMAC_CNT_ING_FRAME_1519_MAX,
+ DPMAC_CNT_ING_FRAG,
+ DPMAC_CNT_ING_JABBER,
+ DPMAC_CNT_ING_FRAME_DISCARD,
+ DPMAC_CNT_ING_ALIGN_ERR,
+ DPMAC_CNT_EGR_UNDERSIZED,
+ DPMAC_CNT_ING_OVERSIZED,
+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
+ DPMAC_CNT_ING_BYTE,
+ DPMAC_CNT_ING_MCAST_FRAME,
+ DPMAC_CNT_ING_BCAST_FRAME,
+ DPMAC_CNT_ING_ALL_FRAME,
+ DPMAC_CNT_ING_UCAST_FRAME,
+ DPMAC_CNT_ING_ERR_FRAME,
+ DPMAC_CNT_EGR_BYTE,
+ DPMAC_CNT_EGR_MCAST_FRAME,
+ DPMAC_CNT_EGR_BCAST_FRAME,
+ DPMAC_CNT_EGR_UCAST_FRAME,
+ DPMAC_CNT_EGR_ERR_FRAME,
+ DPMAC_CNT_ING_GOOD_FRAME,
+ DPMAC_CNT_EGR_GOOD_FRAME
+};
+
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value);
+
+#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index c219587bd334..edad4ca46327 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -50,3 +50,13 @@ config FSL_ENETC_HW_TIMESTAMPING
allocation has not been supported and it is too expensive to use
extended RX BDs if timestamping is not used, this option enables
extended RX BDs in order to support hardware timestamping.
+
+config FSL_ENETC_QOS
+ bool "ENETC hardware Time-sensitive Network support"
+ depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS)
+ help
+ There are Time-Sensitive Network(TSN) capabilities(802.1Qbv/802.1Qci
+ /802.1Qbu etc.) supported by ENETC. These TSN capabilities can be set
+ enable/disable from user space via Qos commands(tc). In the kernel
+ side, it can be loaded by Qos driver. Currently, it is only support
+ taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index d200c27c3bf6..d0db33e5b6b7 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -5,9 +5,11 @@ common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
+fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index b6ff89307409..9db1b96ed9b9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -742,9 +742,14 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
if (val & ENETC_SIPCAPR0_RSS) {
- val = enetc_rd(hw, ENETC_SIRSSCAPR);
- si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
+ u32 rss;
+
+ rss = enetc_rd(hw, ENETC_SIRSSCAPR);
+ si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
+
+ if (val & ENETC_SIPCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
}
static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
@@ -1314,8 +1319,12 @@ static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
static void adjust_link(struct net_device *ndev)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
+ if (priv->active_offloads & ENETC_F_QBV)
+ enetc_sched_speed_set(ndev);
+
phy_print_status(phydev);
}
@@ -1427,8 +1436,7 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
+static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
@@ -1436,9 +1444,6 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
u8 num_tc;
int i;
- if (type != TC_SETUP_QDISC_MQPRIO)
- return -EOPNOTSUPP;
-
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_tc = mqprio->num_tc;
@@ -1483,6 +1488,21 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return 0;
}
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1599,7 +1619,10 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (cmd == SIOCGHWTSTAMP)
return enetc_hwtstamp_get(ndev, rq);
#endif
- return -EINVAL;
+
+ if (!ndev->phydev)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(ndev->phydev, rq, cmd);
}
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 541b4e2073fe..7ee0da6d0015 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -118,6 +118,8 @@ enum enetc_errata {
ENETC_ERR_UCMCSWP = BIT(2),
};
+#define ENETC_SI_F_QBV BIT(0)
+
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
@@ -133,6 +135,7 @@ struct enetc_si {
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
+ int hw_features;
};
#define ENETC_SI_ALIGN 32
@@ -173,6 +176,7 @@ struct enetc_cls_rule {
enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(0),
ENETC_F_TX_TSTAMP = BIT(1),
+ ENETC_F_QBV = BIT(2),
};
struct enetc_ndev_priv {
@@ -188,6 +192,8 @@ struct enetc_ndev_priv {
u16 msg_enable;
int active_offloads;
+ u32 speed; /* store speed for compare update pspeed */
+
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
@@ -244,3 +250,14 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+
+#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
+void enetc_sched_speed_set(struct net_device *ndev);
+int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
+#else
+#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
+#define enetc_sched_speed_set(ndev) (void)0
+#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index de466b71bf8f..201cbc362e33 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -32,7 +32,7 @@ static int enetc_cbd_unused(struct enetc_cbdr *r)
r->bd_count;
}
-static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
{
struct enetc_cbdr *ring = &si->cbd_ring;
int timeout = ENETC_CBDR_TIMEOUT;
@@ -66,6 +66,9 @@ static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
if (!timeout)
return -EBUSY;
+ /* CBD may writeback data, feedback up level */
+ *cbd = *dest_cbd;
+
enetc_clean_cbdr(si);
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index fcb52efec075..880a8ed8bb47 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -584,6 +584,31 @@ static int enetc_get_ts_info(struct net_device *ndev,
return 0;
}
+static void enetc_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int enetc_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&dev->dev, wol->wolopts);
+
+ return ret;
+}
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_regs_len = enetc_get_reglen,
.get_regs = enetc_get_regs,
@@ -601,6 +626,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_ts_info = enetc_get_ts_info,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 88276299f447..51f543ef37a8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -18,6 +18,7 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
+#define ENETC_SIPCAPR0_QBV BIT(4)
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
@@ -148,6 +149,11 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PORT_BASE 0x10000
#define ENETC_PMR 0x0000
#define ENETC_PMR_EN GENMASK(18, 16)
+#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8)
+#define ENETC_PMR_PSPEED_10M 0
+#define ENETC_PMR_PSPEED_100M BIT(8)
+#define ENETC_PMR_PSPEED_1000M BIT(9)
+#define ENETC_PMR_PSPEED_2500M BIT(10)
#define ENETC_PSR 0x0004 /* RO */
#define ENETC_PSIPMR 0x0018
#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
@@ -179,6 +185,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */
#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_CBSE BIT(31)
+#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
@@ -440,22 +448,6 @@ union enetc_rx_bd {
#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
#define ENETC_MAX_NUM_VFS 2
-struct enetc_cbd {
- union {
- struct {
- __le32 addr[2];
- __le32 opt[4];
- };
- __le32 data[6];
- };
- __le16 index;
- __le16 length;
- u8 cmd;
- u8 cls;
- u8 _res;
- u8 status_flags;
-};
-
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
@@ -554,3 +546,72 @@ static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
val |= ENETC_TBMR_SET_PRIO(prio);
enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val);
}
+
+enum bdcr_cmd_class {
+ BDCR_CMD_UNSPEC = 0,
+ BDCR_CMD_MAC_FILTER,
+ BDCR_CMD_VLAN_FILTER,
+ BDCR_CMD_RSS,
+ BDCR_CMD_RFS,
+ BDCR_CMD_PORT_GCL,
+ BDCR_CMD_RECV_CLASSIFIER,
+ __BDCR_CMD_MAX_LEN,
+ BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
+};
+
+/* class 5, command 0 */
+struct tgs_gcl_conf {
+ u8 atc; /* init gate value */
+ u8 res[7];
+ struct {
+ u8 res1[4];
+ __le16 acl_len;
+ u8 res2[2];
+ };
+};
+
+/* gate control list entry */
+struct gce {
+ __le32 period;
+ u8 gate;
+ u8 res[3];
+};
+
+/* tgs_gcl_conf address point to this data space */
+struct tgs_gcl_data {
+ __le32 btl;
+ __le32 bth;
+ __le32 ct;
+ __le32 cte;
+ struct gce entry[0];
+};
+
+struct enetc_cbd {
+ union{
+ struct {
+ __le32 addr[2];
+ union {
+ __le32 opt[4];
+ struct tgs_gcl_conf gcl_conf;
+ };
+ }; /* Long format */
+ __le32 data[6];
+ };
+ __le16 index;
+ __le16 length;
+ u8 cmd;
+ u8 cls;
+ u8 _res;
+ u8 status_flags;
+};
+
+#define ENETC_CLK 400000000ULL
+
+/* port time gating control register */
+#define ENETC_QBV_PTGCR_OFFSET 0x11a00
+#define ENETC_QBV_TGE BIT(31)
+#define ENETC_QBV_TGPE BIT(30)
+
+/* Port time gating capability register */
+#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
+#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index b73421c3e25b..e7482d483b28 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -742,6 +742,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->priv_flags |= IFF_UNICAST_FLT;
+ if (si->hw_features & ENETC_SI_F_QBV)
+ priv->active_offloads |= ENETC_F_QBV;
+
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
@@ -784,8 +787,8 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
}
}
- priv->if_mode = of_get_phy_mode(np);
- if ((int)priv->if_mode < 0) {
+ err = of_get_phy_mode(np, &priv->if_mode);
+ if (err) {
dev_err(priv->dev, "missing phy type\n");
of_node_put(priv->phy_node);
if (of_phy_is_fixed_link(np))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
new file mode 100644
index 000000000000..2e99438cb1bf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "enetc.h"
+
+#include <net/pkt_sched.h>
+#include <linux/math64.h>
+
+static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
+{
+ return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
+ & ENETC_QBV_MAX_GCL_LEN_MASK;
+}
+
+void enetc_sched_speed_set(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u32 old_speed = priv->speed;
+ u32 speed, pspeed;
+
+ if (phydev->speed == old_speed)
+ return;
+
+ speed = phydev->speed;
+ switch (speed) {
+ case SPEED_1000:
+ pspeed = ENETC_PMR_PSPEED_1000M;
+ break;
+ case SPEED_2500:
+ pspeed = ENETC_PMR_PSPEED_2500M;
+ break;
+ case SPEED_100:
+ pspeed = ENETC_PMR_PSPEED_100M;
+ break;
+ case SPEED_10:
+ default:
+ pspeed = ENETC_PMR_PSPEED_10M;
+ netdev_err(ndev, "Qbv PSPEED set speed link down.\n");
+ }
+
+ priv->speed = speed;
+ enetc_port_wr(&priv->si->hw, ENETC_PMR,
+ (enetc_port_rd(&priv->si->hw, ENETC_PMR)
+ & (~ENETC_PMR_PSPEED_MASK))
+ | pspeed);
+}
+
+static int enetc_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *admin_conf)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct tgs_gcl_conf *gcl_config;
+ struct tgs_gcl_data *gcl_data;
+ struct gce *gce;
+ dma_addr_t dma;
+ u16 data_size;
+ u16 gcl_len;
+ u32 tge;
+ int err;
+ int i;
+
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ return -EINVAL;
+ gcl_len = admin_conf->num_entries;
+
+ tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ if (!admin_conf->enable) {
+ enetc_wr(&priv->si->hw,
+ ENETC_QBV_PTGCR_OFFSET,
+ tge & (~ENETC_QBV_TGE));
+ return 0;
+ }
+
+ if (admin_conf->cycle_time > U32_MAX ||
+ admin_conf->cycle_time_extension > U32_MAX)
+ return -EINVAL;
+
+ /* Configure the (administrative) gate control list using the
+ * control BD descriptor.
+ */
+ gcl_config = &cbd.gcl_conf;
+
+ data_size = struct_size(gcl_data, entry, gcl_len);
+ gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!gcl_data)
+ return -ENOMEM;
+
+ gce = (struct gce *)(gcl_data + 1);
+
+ /* Set all gates open as default */
+ gcl_config->atc = 0xff;
+ gcl_config->acl_len = cpu_to_le16(gcl_len);
+
+ if (!admin_conf->base_time) {
+ gcl_data->btl =
+ cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
+ gcl_data->bth =
+ cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
+ } else {
+ gcl_data->btl =
+ cpu_to_le32(lower_32_bits(admin_conf->base_time));
+ gcl_data->bth =
+ cpu_to_le32(upper_32_bits(admin_conf->base_time));
+ }
+
+ gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
+ gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
+
+ for (i = 0; i < gcl_len; i++) {
+ struct tc_taprio_sched_entry *temp_entry;
+ struct gce *temp_gce = gce + i;
+
+ temp_entry = &admin_conf->entries[i];
+
+ temp_gce->gate = (u8)temp_entry->gate_mask;
+ temp_gce->period = cpu_to_le32(temp_entry->interval);
+ }
+
+ cbd.length = cpu_to_le16(data_size);
+ cbd.status_flags = 0;
+
+ dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
+ data_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(gcl_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+ cbd.cls = BDCR_CMD_PORT_GCL;
+ cbd.status_flags = 0;
+
+ enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
+ tge | ENETC_QBV_TGE);
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ enetc_wr(&priv->si->hw,
+ ENETC_QBV_PTGCR_OFFSET,
+ tge & (~ENETC_QBV_TGE));
+
+ dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
+ kfree(gcl_data);
+
+ return err;
+}
+
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_set_bdr_prio(&priv->si->hw,
+ priv->tx_ring[i]->index,
+ taprio->enable ? i : 0);
+
+ err = enetc_setup_taprio(ndev, taprio);
+
+ if (err)
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_set_bdr_prio(&priv->si->hw,
+ priv->tx_ring[i]->index,
+ taprio->enable ? 0 : i);
+
+ return err;
+}
+
+static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
+{
+ return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
+}
+
+static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
+{
+ return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
+}
+
+int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_cbs_qopt_offload *cbs = type_data;
+ u32 port_transmit_rate = priv->speed;
+ u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_si *si = priv->si;
+ u32 hi_credit_bit, hi_credit_reg;
+ u32 max_interference_size;
+ u32 port_frame_max_size;
+ u32 tc_max_sized_frame;
+ u8 tc = cbs->queue;
+ u8 prio_top, prio_next;
+ int bw_sum = 0;
+ u8 bw;
+
+ prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
+ prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
+
+ /* Support highest prio and second prio tc in cbs mode */
+ if (tc != prio_top && tc != prio_next)
+ return -EOPNOTSUPP;
+
+ if (!cbs->enable) {
+ /* Make sure the other TC that are numerically
+ * lower than this TC have been disabled.
+ */
+ if (tc == prio_top &&
+ enetc_get_cbs_enable(&si->hw, prio_next)) {
+ dev_err(&ndev->dev,
+ "Disable TC%d before disable TC%d\n",
+ prio_next, tc);
+ return -EINVAL;
+ }
+
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+
+ return 0;
+ }
+
+ if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
+ cbs->idleslope < 0 || cbs->sendslope > 0)
+ return -EOPNOTSUPP;
+
+ port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ bw = cbs->idleslope / (port_transmit_rate * 10UL);
+
+ /* Make sure the other TC that are numerically
+ * higher than this TC have been enabled.
+ */
+ if (tc == prio_next) {
+ if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ dev_err(&ndev->dev,
+ "Enable TC%d first before enable TC%d\n",
+ prio_top, prio_next);
+ return -EINVAL;
+ }
+ bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ }
+
+ if (bw_sum + bw >= 100) {
+ dev_err(&ndev->dev,
+ "The sum of all CBS Bandwidth can't exceed 100\n");
+ return -EINVAL;
+ }
+
+ tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+
+ /* For top prio TC, the max_interfrence_size is maxSizedFrame.
+ *
+ * For next prio TC, the max_interfrence_size is calculated as below:
+ *
+ * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
+ *
+ * - RA: idleSlope for AVB Class A
+ * - R0: port transmit rate
+ * - M0: maximum sized frame for the port
+ * - MA: maximum sized frame for AVB Class A
+ */
+
+ if (tc == prio_top) {
+ max_interference_size = port_frame_max_size * 8;
+ } else {
+ u32 m0, ma, r0, ra;
+
+ m0 = port_frame_max_size * 8;
+ ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ port_transmit_rate * 10000ULL;
+ r0 = port_transmit_rate * 1000000ULL;
+ max_interference_size = m0 + ma +
+ (u32)div_u64((u64)ra * m0, r0 - ra);
+ }
+
+ /* hiCredit bits calculate by:
+ *
+ * maxSizedFrame * (idleSlope/portTxRate)
+ */
+ hi_credit_bit = max_interference_size * bw / 100;
+
+ /* hiCredit bits to hiCredit register need to calculated as:
+ *
+ * (enetClockFrequency / portTransmitRate) * 100
+ */
+ hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
+ port_transmit_rate * 1000000ULL);
+
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+
+ /* Set bw register and enable this traffic class */
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a9c386b63581..05c1899f6628 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2706,7 +2706,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_tx_queues; q++) {
txq = fep->tx_queue[q];
- bdp = txq->bd.base;
for (i = 0; i < txq->bd.ring_size; i++) {
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
@@ -3394,6 +3393,7 @@ fec_probe(struct platform_device *pdev)
{
struct fec_enet_private *fep;
struct fec_platform_data *pdata;
+ phy_interface_t interface;
struct net_device *ndev;
int i, irq, ret = 0;
const struct of_device_id *of_id;
@@ -3466,15 +3466,15 @@ fec_probe(struct platform_device *pdev)
}
fep->phy_node = phy_node;
- ret = of_get_phy_mode(pdev->dev.of_node);
- if (ret < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &interface);
+ if (ret) {
pdata = dev_get_platdata(&pdev->dev);
if (pdata)
fep->phy_interface = pdata->phy;
else
fep->phy_interface = PHY_INTERFACE_MODE_MII;
} else {
- fep->phy_interface = ret;
+ fep->phy_interface = interface;
}
fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -3636,6 +3636,11 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -3643,15 +3648,17 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
+
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
free_netdev(ndev);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 210749bf1eac..934111def0be 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -634,6 +634,9 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
{
u32 tmp;
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ return;
/* set LIODN base for this port */
tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
if (port_id % 2) {
@@ -644,7 +647,6 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
tmp |= liodn_base << DMA_LIODN_SHIFT;
}
iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
- iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
}
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
@@ -1942,6 +1944,8 @@ static int fman_init(struct fman *fman)
fman->liodn_offset[i] =
ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ continue;
liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
if (i % 2) {
/* FMDM_PLR LSB holds LIODN base for odd ports */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ee82ee1384eb..87b26f063cc8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -435,7 +435,6 @@ struct fman_port_cfg {
struct fman_port_rx_pools_params {
u8 num_of_pools;
- u16 second_largest_buf_size;
u16 largest_buf_size;
};
@@ -946,8 +945,6 @@ static int set_ext_buffer_pools(struct fman_port *port)
port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
port->rx_pools_params.largest_buf_size =
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
- port->rx_pools_params.second_largest_buf_size =
- sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
/* FMBM_RMPD reg. - pool depletion */
if (buf_pool_depletion->pools_grp_mode_enable) {
@@ -1728,6 +1725,20 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+/**
+ * fman_port_get_device
+ * port: Pointer to the FMan port device
+ *
+ * Get the 'struct device' associated to the specified FMan port device
+ *
+ * Return: pointer to associated 'struct device'
+ */
+struct device *fman_port_get_device(struct fman_port *port)
+{
+ return port->dev;
+}
+EXPORT_SYMBOL(fman_port_get_device);
+
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
{
if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 9dbb69f40121..82f12661a46d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -157,4 +157,6 @@ int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
struct fman_port *fman_port_bind(struct device *dev);
+struct device *fman_port_get_device(struct fman_port *port);
+
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7ab8095db192..f0806ace1ae2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -608,7 +608,7 @@ static int mac_probe(struct platform_device *_of_dev)
const u8 *mac_addr;
u32 val;
u8 fman_id;
- int phy_if;
+ phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
@@ -776,8 +776,8 @@ static int mac_probe(struct platform_device *_of_dev)
}
/* Get the PHY connection type */
- phy_if = of_get_phy_mode(mac_node);
- if (phy_if < 0) {
+ err = of_get_phy_mode(mac_node, &phy_if);
+ if (err) {
dev_warn(dev,
"of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n",
mac_node);
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
index 245d9a68a71f..7f20840fde07 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Kconfig
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config FS_ENET
- tristate "Freescale Ethernet Driver"
- depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
- select MII
- select PHYLIB
+ tristate "Freescale Ethernet Driver"
+ depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
+ select MII
+ select PHYLIB
config FS_ENET_MPC5121_FEC
def_bool y if (FS_ENET && PPC_MPC512x)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 51ad86417cb1..72868a28b621 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -641,6 +641,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
const char *model;
const void *mac_addr;
int err = 0, i;
+ phy_interface_t interface;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
struct device_node *np = ofdev->dev.of_node;
@@ -805,9 +806,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
* rgmii-id really needs to be specified. Other types can be
* detected by hardware
*/
- err = of_get_phy_mode(np);
- if (err >= 0)
- priv->interface = err;
+ err = of_get_phy_mode(np, &interface);
+ if (!err)
+ priv->interface = interface;
else
priv->interface = gfar_get_interface(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f472a6dbbe6f..432c6a818ae5 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -90,11 +90,11 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-/* prevent fragmenation by HW in DSA environments */
-#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
-#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
- + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048
+#define GFAR_SKBFRAG_OVR (RXBUF_ALIGNMENT \
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_SIZE rounddown(GFAR_RXB_TRUESIZE - GFAR_SKBFRAG_OVR, 64)
+#define GFAR_SKBFRAG_SIZE (GFAR_RXB_SIZE + GFAR_SKBFRAG_OVR)
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index aca95f64bde8..9b7a8db9860f 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
}
qpl->id = id;
- qpl->num_entries = pages;
+ qpl->num_entries = 0;
qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
@@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
/* caller handles clean up */
if (err)
return -ENOMEM;
+ qpl->num_entries++;
}
priv->num_registered_pages += pages;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 0a9a7ee2a866..f4889431f9b7 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
u64 iov_offset, u64 iov_len)
{
+ u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
+ u64 first_page = iov_offset / PAGE_SIZE;
dma_addr_t dma;
- u64 addr;
+ u64 page;
- for (addr = iov_offset; addr < iov_offset + iov_len;
- addr += PAGE_SIZE) {
- dma = page_buses[addr / PAGE_SIZE];
+ for (page = first_page; page <= last_page; page++) {
+ dma = page_buses[page];
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 4606a7e4a6d1..3e9b6d543c77 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -211,7 +211,7 @@ struct hip04_priv {
#if defined(CONFIG_HI13X1_GMAC)
void __iomem *sysctrl_base;
#endif
- int phy_mode;
+ phy_interface_t phy_mode;
int chan;
unsigned int port;
unsigned int group;
@@ -961,10 +961,9 @@ static int hip04_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->phy_mode = of_get_phy_mode(node);
- if (priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
dev_warn(d, "not find phy-mode\n");
- ret = -EINVAL;
goto init_fail;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c41b19c760f8..247de9105d10 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1193,10 +1193,9 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto err_free_mdio;
- priv->phy_mode = of_get_phy_mode(node);
- if ((int)priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
netdev_err(ndev, "not find phy-mode\n");
- ret = -EINVAL;
goto err_mdiobus;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 3a14bbc26ea2..1c5243cc1dc6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3049,7 +3049,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
u32 sl;
u32 credit;
int i;
- const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
{DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
{DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
{DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
@@ -3059,7 +3059,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
{DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
{DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
};
- const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index f8a87f8ca983..1b0313900f98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -45,8 +45,9 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
+ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
- HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
};
@@ -71,7 +72,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
-#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 03ca7d925e8e..eef1b2764d34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -146,7 +146,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
return;
mutex_lock(&hnae3_common_lock);
-
+ /* one system should only have one client for every type */
list_for_each_entry(client_tmp, &hnae3_client_list, node) {
if (client_tmp->type == client->type) {
existed = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a0998937727d..3b5e2d7251e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -130,7 +130,6 @@ enum hnae3_module_type {
HNAE3_MODULE_TYPE_CR = 0x04,
HNAE3_MODULE_TYPE_KR = 0x05,
HNAE3_MODULE_TYPE_TP = 0x06,
-
};
enum hnae3_fec_mode {
@@ -366,6 +365,19 @@ struct hnae3_ae_dev {
* Enable/disable HW GRO
* add_arfs_entry
* Check the 5-tuples of flow, and create flow director rule
+ * get_vf_config
+ * Get the VF configuration setting by the host
+ * set_vf_link_state
+ * Set VF link status
+ * set_vf_spoofchk
+ * Enable/disable spoof check for specified vf
+ * set_vf_trust
+ * Enable/disable trust for specified vf, if the vf being trusted, then
+ * it can enable promisc mode
+ * set_vf_rate
+ * Set the max tx rate of specified vf.
+ * set_vf_mac
+ * Configure the default MAC for specified VF
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -531,6 +543,16 @@ struct hnae3_ae_ops {
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
void (*restore_vlan_table)(struct hnae3_handle *handle);
+ int (*get_vf_config)(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf);
+ int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
+ int link_state);
+ int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf,
+ bool enable);
+ int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable);
+ int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force);
+ int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
};
struct hnae3_dcb_ops {
@@ -553,7 +575,8 @@ struct hnae3_ae_algo {
const struct pci_device_id *pdev_id_table;
};
-#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16)
+#define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */
+#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
#define HNAE3_ITR_COUNTDOWN_START 100
struct hnae3_tc_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 28961a68e333..6b328a259efc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
u32 value, i = 0;
int cnt;
- if (!priv->ring_data) {
- dev_err(&h->pdev->dev, "ring_data is NULL\n");
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
return -EFAULT;
}
@@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
return -EINVAL;
}
- ring_data = priv->ring_data;
for (i = queue_num; i < queue_max; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -54,73 +52,73 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
- ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+ dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
- ring = ring_data[i].ring;
+ ring = &priv->ring[i];
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+ dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+ dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i,
value);
}
@@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
static int hns3_dbg_queue_map(struct hnae3_handle *h)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
int i;
if (!h->ae_algo->ops->get_global_queue_id)
@@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
u16 global_qid;
global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
- ring_data = &priv->ring_data[i];
- if (!ring_data || !ring_data->ring ||
- !ring_data->ring->tqp_vector)
+ if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
dev_info(&h->pdev->dev,
" %4d %4d %4d\n",
- i, global_qid,
- ring_data->ring->tqp_vector->vector_irq);
+ i, global_qid, priv->ring[i].tqp_vector->vector_irq);
}
return 0;
@@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
struct hns3_desc *rx_desc, *tx_desc;
struct device *dev = &h->pdev->dev;
struct hns3_enet_ring *ring;
@@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL;
}
- ring_data = priv->ring_data;
- ring = ring_data[q_num].ring;
+ ring = &priv->ring[q_num];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index;
@@ -198,23 +190,26 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(tx_desc->addr);
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
dev_info(dev, "(TX)addr: %pad\n", &addr);
- dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
- dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
+ dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
+ dev_info(dev, "(TX)send_size: %u\n",
+ le16_to_cpu(tx_desc->tx.send_size));
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
- dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag);
- dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv);
+ dev_info(dev, "(TX)vlan_tag: %u\n",
+ le16_to_cpu(tx_desc->tx.outer_vlan_tag));
+ dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
- dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen);
- dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
- dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
+ dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
+ dev_info(dev, "(TX)vld_ra_ri: %u\n",
+ le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
+ dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
- ring = ring_data[q_num + h->kinfo.num_tqps].ring;
+ ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index;
rx_desc = &ring->desc[rx_index];
@@ -222,15 +217,19 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: %pad\n", &addr);
- dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info);
- dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
- dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
- dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
- dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id);
- dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag);
- dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb);
- dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag);
- dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info);
+ dev_info(dev, "(RX)l234_info: %u\n",
+ le32_to_cpu(rx_desc->rx.l234_info));
+ dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
+ dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
+ dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
+ dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
+ dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
+ dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
+ le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
+ dev_info(dev, "(RX)ot_vlan_tag: %u\n",
+ le16_to_cpu(rx_desc->rx.ot_vlan_tag));
+ dev_info(dev, "(RX)bd_base_info: %u\n",
+ le32_to_cpu(rx_desc->rx.bd_base_info));
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 616cad0faa21..ba0536802b13 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -483,7 +483,7 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
for (i = 0; i < h->kinfo.num_tqps; i++) {
dev_queue = netdev_get_tx_queue(ndev,
- priv->ring_data[i].queue_index);
+ priv->ring[i].queue_index);
netdev_tx_reset_queue(dev_queue);
}
}
@@ -681,7 +681,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
return 0;
ret = skb_cow_head(skb, 0);
- if (unlikely(ret))
+ if (unlikely(ret < 0))
return ret;
l3.hdr = skb_network_header(skb);
@@ -962,14 +962,6 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
-static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
-{
- /* Config bd buffer end */
- if (!!frag_end)
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
-}
-
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb)
{
@@ -1062,7 +1054,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l4_proto_err++;
u64_stats_update_end(&ring->syncp);
@@ -1072,7 +1064,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l2l3l4_err++;
u64_stats_update_end(&ring->syncp);
@@ -1081,7 +1073,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_set_tso(skb, &paylen, &mss,
&type_cs_vlan_tso);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_tso_err++;
u64_stats_update_end(&ring->syncp);
@@ -1102,9 +1094,10 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- unsigned int size, int frag_end,
- enum hns_desc_type type)
+ unsigned int size, enum hns_desc_type type)
{
+#define HNS3_LIKELY_BD_NUM 1
+
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
@@ -1118,7 +1111,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int ret;
ret = hns3_fill_skb_desc(ring, skb, desc);
- if (unlikely(ret))
+ if (unlikely(ret < 0))
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1137,19 +1130,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->length = size;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
- u16 bdtp_fe_sc_vld_ra_ri = 0;
-
desc_cb->priv = priv;
desc_cb->dma = dma;
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16(size);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
desc->tx.bdtp_fe_sc_vld_ra_ri =
- cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
ring_ptr_move_fw(ring, next_to_use);
- return 0;
+ return HNS3_LIKELY_BD_NUM;
}
frag_buf_num = hns3_tx_bd_count(size);
@@ -1158,8 +1148,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
- u16 bdtp_fe_sc_vld_ra_ri = 0;
-
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1170,11 +1158,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
- frag_end && (k == frag_buf_num - 1) ?
- 1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri =
- cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
@@ -1183,23 +1168,78 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc = &ring->desc[ring->next_to_use];
}
- return 0;
+ return frag_buf_num;
}
-static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
+static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
{
- unsigned int bd_num;
+ unsigned int size;
int i;
- /* if the total len is within the max bd limit */
- if (likely(skb->len <= HNS3_MAX_BD_SIZE))
- return skb_shinfo(skb)->nr_frags + 1;
+ size = skb_headlen(skb);
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
- bd_num = hns3_tx_bd_count(skb_headlen(skb));
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ if (size) {
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- bd_num += hns3_tx_bd_count(skb_frag_size(frag));
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
+
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ return bd_num;
+}
+
+static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size)
+{
+ struct sk_buff *frag_skb;
+ unsigned int bd_num = 0;
+
+ /* If the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM))
+ return skb_shinfo(skb)->nr_frags + 1U;
+
+ /* The below case will always be linearized, return
+ * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
+ */
+ if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)))
+ return HNS3_MAX_TSO_BD_NUM + 1U;
+
+ bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
+
+ if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+
+ skb_walk_frags(skb, frag_skb) {
+ bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
}
return bd_num;
@@ -1218,26 +1258,26 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
* 7 frags to to be larger than gso header len + mss, and the remaining
* continuous 7 frags to be larger than MSS except the last 7 frags.
*/
-static bool hns3_skb_need_linearized(struct sk_buff *skb)
+static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
{
- int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
unsigned int tot_len = 0;
int i;
- for (i = 0; i < bd_limit; i++)
- tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++)
+ tot_len += bd_size[i];
- /* ensure headlen + the first 7 frags is greater than mss + header
- * and the first 7 frags is greater than mss.
- */
- if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
- hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+ /* ensure the first 8 frags is greater than mss + header */
+ if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] <
+ skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
return true;
- /* ensure the remaining continuous 7 buffer is greater than mss */
- for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
- tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
- tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+ /* ensure every continuous 7 buffer is greater than mss
+ * except the last one.
+ */
+ for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) {
+ tot_len -= bd_size[i];
+ tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U];
if (tot_len < skb_shinfo(skb)->gso_size)
return true;
@@ -1249,15 +1289,16 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb)
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
+ unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
struct sk_buff *skb = *out_skb;
unsigned int bd_num;
- bd_num = hns3_nic_bd_num(skb);
- if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
+ bd_num = hns3_tx_bd_num(skb, bd_size);
+ if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
struct sk_buff *new_skb;
- if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
- !hns3_skb_need_linearized(skb))
+ if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
+ !hns3_skb_need_linearized(skb, bd_size, bd_num))
goto out;
/* manual split the send packet */
@@ -1267,9 +1308,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
dev_kfree_skb_any(skb);
*out_skb = new_skb;
- bd_num = hns3_nic_bd_num(new_skb);
- if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
- (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
+ bd_num = hns3_tx_bd_count(new_skb->len);
+ if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
+ (!skb_is_gso(new_skb) &&
+ bd_num > HNS3_MAX_NON_TSO_BD_NUM))
return -ENOMEM;
u64_stats_update_begin(&ring->syncp);
@@ -1314,73 +1356,98 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
}
}
+static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, enum hns_desc_type type)
+{
+ unsigned int size = skb_headlen(skb);
+ int i, ret, bd_num = 0;
+
+ if (size) {
+ ret = hns3_fill_desc(ring, skb, size, type);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ return bd_num;
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_nic_ring_data *ring_data =
- &tx_ring_data(priv, skb->queue_mapping);
- struct hns3_enet_ring *ring = ring_data->ring;
+ struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct netdev_queue *dev_queue;
- skb_frag_t *frag;
- int next_to_use_head;
- int buf_num;
- int seg_num;
- int size;
+ int pre_ntu, next_to_use_head;
+ struct sk_buff *frag_skb;
+ int bd_num = 0;
int ret;
- int i;
/* Prefetch the data used later */
prefetch(skb->data);
- buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
- if (unlikely(buf_num <= 0)) {
- if (buf_num == -EBUSY) {
+ ret = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(ret <= 0)) {
+ if (ret == -EBUSY) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
goto out_net_tx_busy;
- } else if (buf_num == -ENOMEM) {
+ } else if (ret == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
}
- hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
+ hns3_rl_err(netdev, "xmit error: %d!\n", ret);
goto out_err_tx_ok;
}
- /* No. of segments (plus a header) */
- seg_num = skb_shinfo(skb)->nr_frags + 1;
- /* Fill the first part */
- size = skb_headlen(skb);
-
next_to_use_head = ring->next_to_use;
- ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
- DESC_TYPE_SKB);
- if (unlikely(ret))
+ ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+ if (unlikely(ret < 0))
goto fill_err;
- /* Fill the fragments */
- for (i = 1; i < seg_num; i++) {
- frag = &skb_shinfo(skb)->frags[i - 1];
- size = skb_frag_size(frag);
+ bd_num += ret;
- ret = hns3_fill_desc(ring, frag, size,
- seg_num - 1 == i ? 1 : 0,
- DESC_TYPE_PAGE);
+ if (!skb_has_frag_list(skb))
+ goto out;
- if (unlikely(ret))
+ skb_walk_frags(skb, frag_skb) {
+ ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
goto fill_err;
+
+ bd_num += ret;
}
+out:
+ pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
+ (ring->desc_num - 1);
+ ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
+ cpu_to_le16(BIT(HNS3_TXD_FE_B));
/* Complete translate all packets */
- dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
+ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
netdev_tx_sent_queue(dev_queue, skb->len);
wmb(); /* Commit all data before submit */
- hnae3_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, bd_num);
return NETDEV_TX_OK;
@@ -1392,7 +1459,7 @@ out_err_tx_ok:
return NETDEV_TX_OK;
out_net_tx_busy:
- netif_stop_subqueue(netdev, ring_data->queue_index);
+ netif_stop_subqueue(netdev, ring->queue_index);
smp_mb(); /* Commit all data before submit */
return NETDEV_TX_BUSY;
@@ -1413,6 +1480,16 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
+ /* For VF device, if there is a perm_addr, then the user will not
+ * be allowed to change the address.
+ */
+ if (!hns3_is_phys_func(h->pdev) &&
+ !is_zero_ether_addr(netdev->perm_addr)) {
+ netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
+ netdev->perm_addr, mac_addr->sa_data);
+ return -EPERM;
+ }
+
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
if (ret) {
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
@@ -1505,7 +1582,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
- ring = priv->ring_data[idx].ring;
+ ring = &priv->ring[idx];
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
@@ -1523,7 +1600,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
- ring = priv->ring_data[idx + queue_num].ring;
+ ring = &priv->ring[idx + queue_num];
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
@@ -1633,8 +1710,8 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
int ret = -EIO;
netif_dbg(h, drv, netdev,
- "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
- vf, vlan, qos, vlan_proto);
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
+ vf, vlan, qos, ntohs(vlan_proto));
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
@@ -1643,6 +1720,29 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
return ret;
}
+static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!handle->ae_algo->ops->set_vf_spoofchk)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
+}
+
+static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (!handle->ae_algo->ops->set_vf_trust)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
+}
+
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -1671,7 +1771,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hns3_enet_ring *tx_ring = NULL;
+ struct hns3_enet_ring *tx_ring;
struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
@@ -1692,6 +1792,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) {
timeout_queue = i;
+ netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
+ q->state,
+ jiffies_to_msecs(jiffies - trans_start));
break;
}
}
@@ -1705,7 +1808,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
priv->tx_timeout_count++;
- tx_ring = priv->ring_data[timeout_queue].ring;
+ tx_ring = &priv->ring[timeout_queue];
napi = &tx_ring->tqp_vector->napi;
netdev_info(ndev,
@@ -1805,6 +1908,57 @@ static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
#endif
+static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->get_vf_config)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_vf_config(h, vf, ivf);
+}
+
+static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
+ int link_state)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_link_state)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
+}
+
+static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_rate)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
+ false);
+}
+
+static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->set_vf_mac)
+ return -EOPNOTSUPP;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev,
+ "Invalid MAC:%pM specified. Could not set MAC\n",
+ mac);
+ return -EINVAL;
+ }
+
+ return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -1820,10 +1974,15 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+ .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
+ .ndo_set_vf_trust = hns3_set_vf_trust,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = hns3_rx_flow_steer,
#endif
-
+ .ndo_get_vf_config = hns3_nic_get_vf_config,
+ .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
+ .ndo_set_vf_rate = hns3_nic_set_vf_rate,
+ .ndo_set_vf_mac = hns3_nic_set_vf_mac,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -1843,7 +2002,7 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
return false;
default:
- dev_warn(&pdev->dev, "un-recognized pci device-id %d",
+ dev_warn(&pdev->dev, "un-recognized pci device-id %u",
dev_id);
}
@@ -2069,9 +2228,8 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
-
- netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
@@ -2081,21 +2239,24 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
netdev->vlan_features |=
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
if (pdev->revision >= 0x21) {
netdev->hw_features |= NETIF_F_GRO_HW;
@@ -2320,18 +2481,19 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue;
int bytes, pkts;
int head;
head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean)
return; /* no data to poll */
+ rmb(); /* Make sure head is ready before touch any data */
+
if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2358,7 +2520,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
netdev_tx_completed_queue(dev_queue, pkts, bytes);
if (unlikely(pkts && netif_carrier_ok(netdev) &&
- (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
+ ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -2401,7 +2563,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
- hns3_rl_err(ring->tqp_vector->napi.dev,
+ hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
break;
@@ -2510,7 +2672,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
int l3_type, l4_type;
int ol4_type;
@@ -2626,7 +2788,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
{
#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb;
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
@@ -2672,10 +2834,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
}
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
- struct sk_buff **out_skb, bool pending)
+ bool pending)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *skb = ring->skb;
+ struct sk_buff *head_skb = skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *pre_desc;
@@ -2704,10 +2866,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return -ENXIO;
if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
- new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
if (unlikely(!new_skb)) {
- hns3_rl_err(ring->tqp_vector->napi.dev,
+ hns3_rl_err(ring_to_netdev(ring),
"alloc rx fraglist skb fail\n");
return -ENXIO;
}
@@ -2783,7 +2944,7 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
enum hns3_pkt_l2t_type l2_frame_type;
u32 bd_base_info, l234info, ol_info;
struct hns3_desc *desc;
@@ -2858,8 +3019,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
return 0;
}
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb)
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
@@ -2897,12 +3057,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
- *out_skb = skb = ring->skb;
+ skb = ring->skb;
if (ret < 0) /* alloc buffer fail */
return ret;
if (ret > 0) { /* need add frag */
- ret = hns3_add_frag(ring, desc, &skb, false);
+ ret = hns3_add_frag(ring, desc, false);
if (ret)
return ret;
@@ -2913,7 +3073,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
} else {
- ret = hns3_add_frag(ring, desc, &skb, true);
+ ret = hns3_add_frag(ring, desc, true);
if (ret)
return ret;
@@ -2931,8 +3091,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
skb_record_rx_queue(skb, ring->tqp->tqp_index);
- *out_skb = skb;
-
return 0;
}
@@ -2941,17 +3099,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = ring->skb;
int recv_pkts = 0;
int recv_bds = 0;
int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
- rmb(); /* Make sure num taken effect before the other data is touched */
-
num -= unused_count;
unused_count -= ring->pending_buf;
+ if (num <= 0)
+ goto out;
+
+ rmb(); /* Make sure num taken effect before the other data is touched */
+
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
@@ -2961,27 +3121,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb);
- if (unlikely(!skb)) /* This fault cannot be repaired */
- goto out;
-
- if (err == -ENXIO) { /* Do not get FE for the packet */
+ err = hns3_handle_rx_bd(ring);
+ /* Do not get FE for the packet or failed to alloc skb */
+ if (unlikely(!ring->skb || err == -ENXIO)) {
goto out;
- } else if (unlikely(err)) { /* Do jump the err */
- recv_bds += ring->pending_buf;
- unused_count += ring->pending_buf;
- ring->skb = NULL;
- ring->pending_buf = 0;
- continue;
+ } else if (likely(!err)) {
+ rx_fn(ring, ring->skb);
+ recv_pkts++;
}
- rx_fn(ring, skb);
recv_bds += ring->pending_buf;
unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
-
- recv_pkts++;
}
out:
@@ -3324,13 +3476,13 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[vector_i];
hns3_add_ring_to_group(&tqp_vector->tx_group,
- priv->ring_data[i].ring);
+ &priv->ring[i]);
hns3_add_ring_to_group(&tqp_vector->rx_group,
- priv->ring_data[i + tqp_num].ring);
+ &priv->ring[i + tqp_num]);
- priv->ring_data[i].ring->tqp_vector = tqp_vector;
- priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
+ priv->ring[i].tqp_vector = tqp_vector;
+ priv->ring[i + tqp_num].tqp_vector = tqp_vector;
tqp_vector->num_tqps++;
}
@@ -3474,28 +3626,22 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
return 0;
}
-static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
- unsigned int ring_type)
+static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
+ unsigned int ring_type)
{
- struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
- struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring;
int desc_num;
- ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return -ENOMEM;
-
if (ring_type == HNAE3_RING_TYPE_TX) {
+ ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc;
- ring_data[q->tqp_index].ring = ring;
- ring_data[q->tqp_index].queue_index = q->tqp_index;
+ ring->queue_index = q->tqp_index;
ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else {
+ ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc;
- ring_data[q->tqp_index + queue_num].ring = ring;
- ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
+ ring->queue_index = q->tqp_index;
ring->io_base = q->io_base;
}
@@ -3510,76 +3656,41 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
-
- return 0;
}
-static int hns3_queue_to_ring(struct hnae3_queue *tqp,
- struct hns3_nic_priv *priv)
+static void hns3_queue_to_ring(struct hnae3_queue *tqp,
+ struct hns3_nic_priv *priv)
{
- int ret;
-
- ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
- if (ret)
- return ret;
-
- ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
- if (ret) {
- devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
- return ret;
- }
-
- return 0;
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
}
static int hns3_get_ring_config(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev;
- int i, ret;
+ int i;
- priv->ring_data = devm_kzalloc(&pdev->dev,
- array3_size(h->kinfo.num_tqps,
- sizeof(*priv->ring_data),
- 2),
- GFP_KERNEL);
- if (!priv->ring_data)
+ priv->ring = devm_kzalloc(&pdev->dev,
+ array3_size(h->kinfo.num_tqps,
+ sizeof(*priv->ring), 2),
+ GFP_KERNEL);
+ if (!priv->ring)
return -ENOMEM;
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
- if (ret)
- goto err;
- }
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_queue_to_ring(h->kinfo.tqp[i], priv);
return 0;
-err:
- while (i--) {
- devm_kfree(priv->dev, priv->ring_data[i].ring);
- devm_kfree(priv->dev,
- priv->ring_data[i + h->kinfo.num_tqps].ring);
- }
-
- devm_kfree(&pdev->dev, priv->ring_data);
- priv->ring_data = NULL;
- return ret;
}
static void hns3_put_ring_config(struct hns3_nic_priv *priv)
{
- struct hnae3_handle *h = priv->ae_handle;
- int i;
-
- if (!priv->ring_data)
+ if (!priv->ring)
return;
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- devm_kfree(priv->dev, priv->ring_data[i].ring);
- devm_kfree(priv->dev,
- priv->ring_data[i + h->kinfo.num_tqps].ring);
- }
- devm_kfree(priv->dev, priv->ring_data);
- priv->ring_data = NULL;
+ devm_kfree(priv->dev, priv->ring);
+ priv->ring = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
@@ -3696,7 +3807,7 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
for (j = 0; j < tc_info->tqp_count; j++) {
struct hnae3_queue *q;
- q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+ q = priv->ring[tc_info->tqp_offset + j].tqp;
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
tc_info->tc);
}
@@ -3711,21 +3822,21 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int ret;
for (i = 0; i < ring_num; i++) {
- ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
+ ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
dev_err(priv->dev,
"Alloc ring memory fail! ret=%d\n", ret);
goto out_when_alloc_ring_memory;
}
- u64_stats_init(&priv->ring_data[i].ring->syncp);
+ u64_stats_init(&priv->ring[i].syncp);
}
return 0;
out_when_alloc_ring_memory:
for (j = i - 1; j >= 0; j--)
- hns3_fini_ring(priv->ring_data[j].ring);
+ hns3_fini_ring(&priv->ring[j]);
return -ENOMEM;
}
@@ -3736,30 +3847,31 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
int i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- hns3_fini_ring(priv->ring_data[i].ring);
- hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+ hns3_fini_ring(&priv->ring[i]);
+ hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
}
return 0;
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static int hns3_init_mac_addr(struct net_device *netdev, bool init)
+static int hns3_init_mac_addr(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
int ret = 0;
- if (h->ae_algo->ops->get_mac_addr && init) {
+ if (h->ae_algo->ops->get_mac_addr)
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
- ether_addr_copy(netdev->dev_addr, mac_addr_temp);
- }
/* Check if the MAC address is valid, if not get a random one */
- if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!is_valid_ether_addr(mac_addr_temp)) {
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
+ } else {
+ ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ ether_addr_copy(netdev->perm_addr, mac_addr_temp);
}
if (h->ae_algo->ops->set_mac_addr)
@@ -3827,14 +3939,14 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
- dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
- dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
- dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
- dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
- dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
- dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
- dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
- dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
}
static int hns3_client_init(struct hnae3_handle *handle)
@@ -3863,7 +3975,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
- hns3_init_mac_addr(netdev, true);
+ hns3_init_mac_addr(netdev);
hns3_set_default_feature(netdev);
@@ -3897,7 +4009,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
ret = hns3_init_all_ring(priv);
if (ret) {
ret = -ENOMEM;
- goto out_init_ring_data;
+ goto out_init_ring;
}
ret = hns3_init_phy(netdev);
@@ -3936,12 +4048,12 @@ out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
-out_init_ring_data:
+out_init_ring:
hns3_nic_uninit_vector_data(priv);
out_init_vector_data:
hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data:
- priv->ring_data = NULL;
+ priv->ring = NULL;
out_get_ring_cfg:
priv->ae_handle = NULL;
free_netdev(netdev);
@@ -4102,7 +4214,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
- netdev_warn(ring->tqp->handle->kinfo.netdev,
+ netdev_warn(ring_to_netdev(ring),
"reserve buffer map failed, ret = %d\n",
ret);
return ret;
@@ -4148,10 +4260,10 @@ static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
for (i = 0; i < h->kinfo.num_tqps; i++) {
struct hns3_enet_ring *ring;
- ring = priv->ring_data[i].ring;
+ ring = &priv->ring[i];
hns3_clear_tx_ring(ring);
- ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ ring = &priv->ring[i + h->kinfo.num_tqps];
/* Continue to clear other rings even if clearing some
* rings failed.
*/
@@ -4175,16 +4287,16 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
if (ret)
return ret;
- hns3_init_ring_hw(priv->ring_data[i].ring);
+ hns3_init_ring_hw(&priv->ring[i]);
/* We need to clear tx ring here because self test will
* use the ring and will not run down before up
*/
- hns3_clear_tx_ring(priv->ring_data[i].ring);
- priv->ring_data[i].ring->next_to_clean = 0;
- priv->ring_data[i].ring->next_to_use = 0;
+ hns3_clear_tx_ring(&priv->ring[i]);
+ priv->ring[i].next_to_clean = 0;
+ priv->ring[i].next_to_use = 0;
- rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ rx_ring = &priv->ring[i + h->kinfo.num_tqps];
hns3_init_ring_hw(rx_ring);
ret = hns3_clear_rx_ring(rx_ring);
if (ret)
@@ -4331,7 +4443,7 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
bool vlan_filter_enable;
int ret;
- ret = hns3_init_mac_addr(netdev, false);
+ ret = hns3_init_mac_addr(netdev);
if (ret)
return ret;
@@ -4454,7 +4566,7 @@ int hns3_set_channels(struct net_device *netdev,
if (new_tqp_num > hns3_get_max_available_channels(h) ||
new_tqp_num < 1) {
dev_err(&netdev->dev,
- "Change tqps fail, the tqp range is from 1 to %d",
+ "Change tqps fail, the tqp range is from 1 to %u",
hns3_get_max_available_channels(h));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 5d468ed404a6..9d47abd5c37c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -76,7 +76,7 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
#define HNS3_RING_MAX_PENDING 32760
-#define HNS3_RING_MIN_PENDING 24
+#define HNS3_RING_MIN_PENDING 72
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
#define HNS3_MAC_MAX_FRAME 9728
@@ -186,7 +186,7 @@ enum hns3_nic_state {
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
-#define HNS3_TX_LAST_SIZE_M 0xffff
+#define HNS3_TX_LAST_SIZE_M 0xffff
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
@@ -195,9 +195,13 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_NUM_NORMAL 8
-#define HNS3_MAX_BD_NUM_TSO 63
-#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
+#define HNS3_MAX_NON_TSO_BD_NUM 8U
+#define HNS3_MAX_TSO_BD_NUM 63U
+#define HNS3_MAX_TSO_SIZE \
+ (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
+
+#define HNS3_MAX_NON_TSO_SIZE \
+ (HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -309,7 +313,7 @@ struct hns3_desc_cb {
u16 reuse_flag;
- /* desc type, used by the ring user to mark the type of the priv data */
+ /* desc type, used by the ring user to mark the type of the priv data */
u16 type;
};
@@ -405,6 +409,7 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp;
+ int queue_index;
struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */
@@ -430,18 +435,7 @@ struct hns3_enet_ring {
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
-};
-
-struct hns_queue;
-
-struct hns3_nic_ring_data {
- struct hns3_enet_ring *ring;
- struct napi_struct napi;
- int queue_index;
- int (*poll_one)(struct hns3_nic_ring_data *, int, void *);
- void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *);
- void (*fini_process)(struct hns3_nic_ring_data *);
-};
+} ____cacheline_internodealigned_in_smp;
enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0,
@@ -518,7 +512,7 @@ struct hns3_nic_priv {
* the cb for nic to manage the ring buffer, the first half of the
* array is for tx_ring and vice versa for the second half
*/
- struct hns3_nic_ring_data *ring_data;
+ struct hns3_enet_ring *ring;
struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num;
@@ -613,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define ring_to_dev(ring) ((ring)->dev)
+#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
+
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)
-#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-
#define hns3_buf_size(_ring) ((_ring)->buf_size)
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 680c3508876d..6e0212b79438 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
-struct hns3_link_mode_mapping {
- u32 hns3_link_mode;
- u32 ethtool_link_mode;
-};
-
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -203,7 +198,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
kinfo = &h->kinfo;
for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+ struct hns3_enet_ring *ring = &priv->ring[i];
struct hns3_enet_ring_group *rx_group;
u64 pre_rx_pkt;
@@ -226,7 +221,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
u32 i;
for (i = start_ringid; i <= end_ringid; i++) {
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+ struct hns3_enet_ring *ring = &priv->ring[i];
hns3_clean_tx_ring(ring);
}
@@ -491,7 +486,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
- ring = nic_priv->ring_data[i].ring;
+ ring = &nic_priv->ring[i];
for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
@@ -500,7 +495,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
- ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
+ ring = &nic_priv->ring[i + kinfo->num_tqps];
for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
@@ -603,8 +598,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
- param->tx_pending = priv->ring_data[0].ring->desc_num;
- param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
+ param->tx_pending = priv->ring[0].desc_num;
+ param->rx_pending = priv->ring[queue_num].desc_num;
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -906,9 +901,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
h->kinfo.num_rx_desc = rx_desc_num;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- priv->ring_data[i].ring->desc_num = tx_desc_num;
- priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
- rx_desc_num;
+ priv->ring[i].desc_num = tx_desc_num;
+ priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
}
}
@@ -924,7 +918,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
return NULL;
for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
- memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+ memcpy(&tmp_rings[i], &priv->ring[i],
sizeof(struct hns3_enet_ring));
tmp_rings[i].skb = NULL;
}
@@ -972,8 +966,8 @@ static int hns3_set_ringparam(struct net_device *ndev,
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring_data[0].ring->desc_num;
- old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num;
+ old_tx_desc_num = priv->ring[0].desc_num;
+ old_rx_desc_num = priv->ring[queue_num].desc_num;
if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num)
return 0;
@@ -986,7 +980,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
old_tx_desc_num, old_rx_desc_num,
new_tx_desc_num, new_rx_desc_num);
@@ -1002,7 +996,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
- memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+ memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
} else {
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
@@ -1098,13 +1092,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
if (queue >= queue_num) {
netdev_err(netdev,
- "Invalid queue value %d! Queue max id=%d\n",
+ "Invalid queue value %u! Queue max id=%u\n",
queue, queue_num - 1);
return -EINVAL;
}
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce =
tx_vector->tx_group.coal.gl_adapt_enable;
@@ -1148,14 +1142,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev,
- "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->rx_coalesce_usecs, rx_gl);
}
tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
if (tx_gl != cmd->tx_coalesce_usecs) {
netdev_info(netdev,
- "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->tx_coalesce_usecs, tx_gl);
}
@@ -1183,7 +1177,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
if (rl != cmd->rx_coalesce_usecs_high) {
netdev_info(netdev,
- "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
cmd->rx_coalesce_usecs_high, rl);
}
@@ -1212,7 +1206,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev,
- "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
cmd->use_adaptive_tx_coalesce,
cmd->use_adaptive_rx_coalesce);
}
@@ -1229,8 +1223,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
tx_vector->tx_group.coal.gl_adapt_enable =
cmd->use_adaptive_tx_coalesce;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ecf58cfd253d..940ead3970d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -145,7 +145,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n");
@@ -314,11 +314,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
} while (timeout < hw->cmq.tx_timeout);
}
- if (!complete) {
+ if (!complete)
retval = -EBADE;
- } else {
+ else
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
- }
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 1426eb5ddf3d..d97da67f07a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -5,8 +5,10 @@
#define __HCLGE_CMD_H
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000
+#define HCLGE_DESC_DATA_LEN 6
struct hclge_dev;
struct hclge_desc {
@@ -18,7 +20,7 @@ struct hclge_desc {
__le16 flag;
__le16 retval;
__le16 rsv;
- __le32 data[6];
+ __le32 data[HCLGE_DESC_DATA_LEN];
};
struct hclge_cmq_ring {
@@ -244,7 +246,7 @@ enum hclge_opcode_type {
/* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
- HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
@@ -259,6 +261,7 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
@@ -428,8 +431,10 @@ struct hclge_rx_pkt_buf_cmd {
#define HCLGE_PF_MAC_NUM_MASK 0x3
#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
+#define HCLGE_VF_RST_STATUS_CMD 4
+
struct hclge_func_status_cmd {
- __le32 vf_rst_state[4];
+ __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
u8 pf_state;
u8 mac_id;
u8 rsv1;
@@ -485,10 +490,12 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+#define HCLGE_CFG_CMD_CNT 4
+
struct hclge_cfg_param_cmd {
__le32 offset;
__le32 rsv;
- __le32 param[4];
+ __le32 param[HCLGE_CFG_CMD_CNT];
};
#define HCLGE_MAC_MODE 0x0
@@ -712,8 +719,7 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- __le32 mac_addr_hi32;
- __le16 mac_addr_lo16;
+ u8 mac_addr[ETH_ALEN];
__le16 rsv1;
__le16 ethter_type;
__le16 egress_port;
@@ -758,20 +764,27 @@ struct hclge_vlan_filter_ctrl_cmd {
u8 rsv2[19];
};
+#define HCLGE_VLAN_ID_OFFSET_STEP 160
+#define HCLGE_VLAN_BYTE_SIZE 8
+#define HCLGE_VLAN_OFFSET_BITMAP \
+ (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
+
struct hclge_vlan_filter_pf_cfg_cmd {
u8 vlan_offset;
u8 vlan_cfg;
u8 rsv[2];
- u8 vlan_offset_bitmap[20];
+ u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
};
+#define HCLGE_MAX_VF_BYTES 16
+
struct hclge_vlan_filter_vf_cfg_cmd {
__le16 vlan_id;
u8 resp_code;
u8 rsv;
u8 vlan_cfg;
u8 rsv1[3];
- u8 vf_bitmap[16];
+ u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
};
#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
@@ -806,6 +819,7 @@ enum hclge_mac_vlan_cfg_sel {
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6
+#define HCLGE_VF_NUM_PER_BYTE 8
struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg;
@@ -813,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd {
u8 rsv1[2];
__le16 def_vlan_tag1;
__le16 def_vlan_tag2;
- u8 vf_bitmap[8];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
@@ -825,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
u8 rsv1[6];
- u8 vf_bitmap[8];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
@@ -864,7 +878,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
@@ -1090,9 +1104,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read);
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param);
-
enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
struct hclge_desc *desc);
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index c063301d6060..d6c3952aba04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -87,7 +87,7 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (prio_tc[i] >= num_tc) {
dev_err(&hdev->pdev->dev,
- "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+ "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
i, prio_tc[i], num_tc);
return -EINVAL;
}
@@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (ret)
return ret;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
+ int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
@@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hclge_tm_pfc_info_update(hdev);
- return hclge_pause_setup_hw(hdev, false);
+ ret = hclge_pause_setup_hw(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret) {
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
+ }
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
/* DCBX configuration */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index d0128d792717..112df34b3869 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -145,7 +145,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- buf_len = sizeof(struct hclge_desc) * bd_num;
+ buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src) {
dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
@@ -153,7 +153,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
}
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
if (ret) {
kfree(desc_src);
return;
@@ -169,7 +169,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message,
- desc->data[i % entries_per_desc]);
+ le32_to_cpu(desc->data[i % entries_per_desc]));
dfx_message++;
}
@@ -237,44 +237,48 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
if (ret)
return;
- dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return;
- dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return;
- dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
- dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
- dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
- dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
- dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
- dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);
+ dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
+ le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "tx_private_waterline: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
+ dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_CNT);
if (ret)
return;
- dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);
+ dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_STS_1);
if (ret)
return;
- dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
- dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
- dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
- dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
+ dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[5]));
}
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
@@ -364,7 +368,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
- pg_shap_cfg_cmd->pg_shapping_para);
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -375,7 +379,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
- pg_shap_cfg_cmd->pg_shapping_para);
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PORT_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -385,7 +389,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
- port_shap_cfg_cmd->port_shapping_para);
+ le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -393,7 +397,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
+ le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -401,7 +406,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
+ le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -409,7 +415,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
+ le32_to_cpu(desc.data[0]));
if (!hnae3_dev_dcb_supported(hdev)) {
dev_info(&hdev->pdev->dev,
@@ -429,7 +436,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
- bp_to_qs_map_cmd->qs_bit_map);
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
return;
err_tm_pg_cmd_send:
@@ -471,7 +478,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
- qs_to_pri_map->qs_id);
+ le16_to_cpu(qs_to_pri_map->qs_id));
dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
qs_to_pri_map->priority);
dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
@@ -484,9 +491,10 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send;
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
+ le16_to_cpu(nq_to_qs_map->nq_id));
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
- nq_to_qs_map->qset_id);
+ le16_to_cpu(nq_to_qs_map->qset_id));
cmd = HCLGE_OPC_TM_PG_WEIGHT;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -505,7 +513,8 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send;
qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+ dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
+ le16_to_cpu(qs_weight->qs_id));
dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
cmd = HCLGE_OPC_TM_PRI_WEIGHT;
@@ -527,7 +536,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
- shap_cfg_cmd->pri_shapping_para);
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -538,7 +547,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
- shap_cfg_cmd->pri_shapping_para);
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
hclge_dbg_dump_tm_pg(hdev);
@@ -658,7 +667,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
pause_param->pause_trans_gap);
dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
- pause_param->pause_trans_time);
+ le16_to_cpu(pause_param->pause_trans_time));
}
static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
@@ -712,7 +721,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
- tx_buf_cmd->tx_pkt_buff[i]);
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -724,10 +733,10 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
- rx_buf_cmd->buf_num[i]);
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
- rx_buf_cmd->shared_buf);
+ le16_to_cpu(rx_buf_cmd->shared_buf));
cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -738,7 +747,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -749,7 +759,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
dev_info(&hdev->pdev->dev, "\n");
if (!hnae3_dev_dcb_supported(hdev)) {
@@ -769,14 +780,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
- rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
@@ -791,16 +804,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- rx_com_thrd->com_thrd[i].high,
- rx_com_thrd->com_thrd[i].low);
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
- rx_com_thrd->com_thrd[i].high,
- rx_com_thrd->com_thrd[i].low);
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
return;
err_qos_cmd_send:
@@ -845,7 +858,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- req0->index, req0->mac_addr[0], req0->mac_addr[1],
+ le16_to_cpu(req0->index),
+ req0->mac_addr[0], req0->mac_addr[1],
req0->mac_addr[2], req0->mac_addr[3],
req0->mac_addr[4], req0->mac_addr[5]);
@@ -929,7 +943,7 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
}
}
-static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
{
dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
hdev->rst_stats.pf_rst_cnt);
@@ -945,8 +959,6 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hdev->rst_stats.hw_reset_done_cnt);
dev_info(&hdev->pdev->dev, "reset count: %u\n",
hdev->rst_stats.reset_cnt);
- dev_info(&hdev->pdev->dev, "reset count: %u\n",
- hdev->rst_stats.reset_cnt);
dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt);
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
@@ -961,6 +973,7 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
}
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
@@ -1110,6 +1123,82 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
}
}
+static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
+{
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ u8 ir_u, ir_b, ir_s, bs_b, bs_s;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "qs%u failed to get tx_rate, ret=%d\n",
+ qsid, ret);
+ return;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
+ ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ bs_s = hclge_tm_get_field(shapping_para, BS_S);
+
+ dev_info(&hdev->pdev->dev,
+ "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
+ qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
+}
+
+static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
+{
+ struct hnae3_knic_private_info *kinfo;
+ struct hclge_vport *vport;
+ int vport_id, i;
+
+ for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
+ vport = &hdev->vport[vport_id];
+ kinfo = &vport->nic.kinfo;
+
+ dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ u16 qsid = vport->qs_offset + i;
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+ }
+ }
+}
+
+static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+#define HCLGE_MAX_QSET_NUM 1024
+
+ u16 qsid;
+ int ret;
+
+ ret = kstrtou16(cmd_buf, 0, &qsid);
+ if (ret) {
+ hclge_dbg_dump_qs_shaper_all(hdev);
+ return;
+ }
+
+ if (qsid >= HCLGE_MAX_QSET_NUM) {
+ dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
+ qsid);
+ return;
+ }
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
@@ -1145,6 +1234,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
&cmd_buf[sizeof("dump ncl_config")]);
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
hclge_dbg_dump_mac_tnl_status(hdev);
+ } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
+ hclge_dbg_dump_qs_shaper(hdev,
+ &cmd_buf[sizeof("dump qs shaper")]);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 87dece0e745d..dc66b4e13377 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1747,7 +1747,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%d)\n", vf_id);
+ dev_err(dev, "invalid vf id(%u)\n", vf_id);
return;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 16f7d0e15b4f..7c7038676d6d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -55,6 +55,8 @@
#define HCLGE_LINK_STATUS_MS 10
+#define HCLGE_VF_VPORT_START_NUM 1
+
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
@@ -323,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
.ethter_type = cpu_to_le16(ETH_P_LLDP),
- .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
- .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
.i_port_bitmap = 0x1,
},
};
@@ -1194,6 +1195,35 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+static u32 hclge_get_max_speed(u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ return HCLGE_MAC_SPEED_100G;
+
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ return HCLGE_MAC_SPEED_50G;
+
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ return HCLGE_MAC_SPEED_40G;
+
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ return HCLGE_MAC_SPEED_25G;
+
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ return HCLGE_MAC_SPEED_10G;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ return HCLGE_MAC_SPEED_1G;
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
+ return HCLGE_MAC_SPEED_100M;
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
+ return HCLGE_MAC_SPEED_10M;
+
+ return HCLGE_MAC_SPEED_1G;
+}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1364,9 +1394,11 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_parse_link_mode(hdev, cfg.speed_ability);
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
+
if ((hdev->tc_max > HNAE3_MAX_TC) ||
(hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max);
hdev->tc_max = 1;
}
@@ -1626,7 +1658,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) {
- dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
hdev->num_tqps, num_vport);
return -EINVAL;
}
@@ -1649,6 +1681,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
@@ -2312,7 +2345,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
@@ -2744,7 +2777,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
mac->module_type = HNAE3_MODULE_TYPE_TP;
- if (mac->support_autoneg == true) {
+ if (mac->support_autoneg) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
linkmode_copy(mac->advertising, mac->supported);
} else {
@@ -2871,6 +2904,62 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
+{
+ if (pci_num_vf(hdev->pdev) == 0) {
+ dev_err(&hdev->pdev->dev,
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
+ return NULL;
+ }
+
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
+ vf, pci_num_vf(hdev->pdev));
+ return NULL;
+ }
+
+ /* VF start from 1 in vport */
+ vf += HCLGE_VF_VPORT_START_NUM;
+ return &hdev->vport[vf];
+}
+
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ ivf->vf = vf;
+ ivf->linkstate = vport->vf_info.link_state;
+ ivf->spoofchk = vport->vf_info.spoofchk;
+ ivf->trusted = vport->vf_info.trusted;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
+
+ return 0;
+}
+
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+ int link_state)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ vport->vf_info.link_state = link_state;
+
+ return 0;
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
@@ -3191,7 +3280,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
dev_err(&hdev->pdev->dev,
- "flr wait timeout: %d\n", cnt);
+ "flr wait timeout: %u\n", cnt);
return -EBUSY;
}
@@ -3241,7 +3330,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
if (ret) {
dev_err(&hdev->pdev->dev,
- "set vf(%d) rst failed %d!\n",
+ "set vf(%u) rst failed %d!\n",
vport->vport_id, ret);
return ret;
}
@@ -3256,7 +3345,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret)
dev_warn(&hdev->pdev->dev,
- "inform reset to vf(%d) failed %d!\n",
+ "inform reset to vf(%u) failed %d!\n",
vport->vport_id, ret);
}
@@ -3569,7 +3658,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hdev->rst_stats.reset_fail_cnt++;
set_bit(hdev->reset_type, &hdev->reset_pending);
dev_info(&hdev->pdev->dev,
- "re-schedule reset task(%d)\n",
+ "re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
return true;
}
@@ -3580,6 +3669,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hclge_reset_handshake(hdev, true);
dev_err(&hdev->pdev->dev, "Reset fail!\n");
+
+ hclge_dbg_dump_rst_info(hdev);
+
return false;
}
@@ -3779,12 +3871,13 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
HCLGE_RESET_INTERVAL))) {
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- } else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request) {
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
- else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
hdev->reset_level = HNAE3_FUNC_RESET;
+ }
dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
hdev->reset_level);
@@ -3909,6 +4002,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
hclge_sync_vlan_filter(hdev);
+
if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
@@ -4415,7 +4509,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
*/
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
dev_err(&hdev->pdev->dev,
- "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
rss_size);
return -EINVAL;
}
@@ -4593,8 +4687,8 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
return ret;
}
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param)
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
+ struct hclge_promisc_param *param)
{
struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
@@ -4621,8 +4715,9 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
return ret;
}
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id)
+static void hclge_promisc_param_init(struct hclge_promisc_param *param,
+ bool en_uc, bool en_mc, bool en_bc,
+ int vport_id)
{
if (!param)
return;
@@ -4637,12 +4732,21 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_promisc_param param;
+
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
+ vport->vport_id);
+ return hclge_cmd_set_promisc_mode(hdev, &param);
+}
+
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hclge_promisc_param param;
bool en_bc_pmc = true;
/* For revision 0x20, if broadcast promisc enabled, vlan filter is
@@ -4652,9 +4756,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
if (handle->pdev->revision == 0x20)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
- vport->vport_id);
- return hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
@@ -4756,7 +4859,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
break;
default:
dev_err(&hdev->pdev->dev,
- "Unsupported flow director mode %d\n",
+ "Unsupported flow director mode %u\n",
hdev->fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
@@ -5086,7 +5189,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret) {
dev_err(&hdev->pdev->dev,
- "fd key_y config fail, loc=%d, ret=%d\n",
+ "fd key_y config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -5095,7 +5198,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret)
dev_err(&hdev->pdev->dev,
- "fd key_x config fail, loc=%d, ret=%d\n",
+ "fd key_x config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -5344,7 +5447,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
}
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
- "delete fail, rule %d is inexistent\n",
+ "delete fail, rule %u is inexistent\n",
location);
return -EINVAL;
}
@@ -5584,7 +5687,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%d) > max vf num (%d)\n",
+ "Error: vf id (%u) > max vf num (%u)\n",
vf, hdev->num_req_vfs);
return -EINVAL;
}
@@ -5594,7 +5697,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
- "Error: queue id (%d) > max tqp num (%d)\n",
+ "Error: queue id (%u) > max tqp num (%u)\n",
ring, tqps - 1);
return -EINVAL;
}
@@ -5653,7 +5756,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
- "Delete fail, rule %d is inexistent\n", fs->location);
+ "Delete fail, rule %u is inexistent\n", fs->location);
return -ENOENT;
}
@@ -5730,7 +5833,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (ret) {
dev_warn(&hdev->pdev->dev,
- "Restore rule %d failed, remove it\n",
+ "Restore rule %u failed, remove it\n",
rule->location);
clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
@@ -6263,11 +6366,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+
+ /* read current config parameter */
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
- false);
+ true);
req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
req->func_id = cpu_to_le32(func_id);
- req->switch_param = switch_param;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6723,7 +6838,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -6975,7 +7090,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
if (allocated_size < hdev->wanted_umv_size)
dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %d, get %d\n",
+ "Alloc umv space failed, want %u, get %u\n",
hdev->wanted_umv_size, allocated_size);
mutex_init(&hdev->umv_mutex);
@@ -7143,7 +7258,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
/* check if we just hit the duplicate */
if (!ret) {
- dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+ dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
vport->vport_id, addr);
return 0;
}
@@ -7324,7 +7439,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
if (uc_flag && mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport, mac_addr);
@@ -7396,7 +7511,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -7418,7 +7533,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
break;
default:
dev_err(&hdev->pdev->dev,
- "add mac ethertype failed for undefined, code=%d.\n",
+ "add mac ethertype failed for undefined, code=%u.\n",
resp_code);
return_status = -EIO;
}
@@ -7426,6 +7541,67 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
return return_status;
}
+static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
+ u8 *mac_addr)
+{
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 egress_port = 0;
+ int i;
+
+ if (is_zero_ether_addr(mac_addr))
+ return false;
+
+ memset(&req, 0, sizeof(req));
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
+ req.egress_port = cpu_to_le16(egress_port);
+ hclge_prepare_mac_addr(&req, mac_addr, false);
+
+ if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
+ return true;
+
+ vf_idx += HCLGE_VF_VPORT_START_NUM;
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
+ if (i != vf_idx &&
+ ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
+ return true;
+
+ return false;
+}
+
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
+ u8 *mac_addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ dev_info(&hdev->pdev->dev,
+ "Specified MAC(=%pM) is same as before, no change committed!\n",
+ mac_addr);
+ return 0;
+ }
+
+ if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
+ dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
+ mac_addr);
+ return -EEXIST;
+ }
+
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+
+ return hclge_inform_reset_assert_to_vf(vport);
+}
+
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
const struct hclge_mac_mgr_tbl_entry_cmd *req)
{
@@ -7598,7 +7774,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan,
__be16 proto)
{
-#define HCLGE_MAX_VF_BYTES 16
+ struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
struct hclge_desc desc[2];
@@ -7607,10 +7783,18 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
int ret;
/* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
*/
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
return 0;
+ }
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
@@ -7654,7 +7838,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
}
dev_err(&hdev->pdev->dev,
- "Add vf vlan filter fail, ret =%d.\n",
+ "Add vf vlan filter fail, ret =%u.\n",
req0->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
@@ -7670,7 +7854,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return 0;
dev_err(&hdev->pdev->dev,
- "Kill vf vlan filter fail, ret =%d.\n",
+ "Kill vf vlan filter fail, ret =%u.\n",
req0->resp_code);
}
@@ -7689,9 +7873,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
- vlan_offset_160 = vlan_id / 160;
- vlan_offset_byte = (vlan_id % 160) / 8;
- vlan_offset_byte_val = 1 << (vlan_id % 8);
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+ HCLGE_VLAN_BYTE_SIZE;
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160;
@@ -7719,7 +7904,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
proto);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set %d vport vlan filter config fail, ret =%d.\n",
+ "Set %u vport vlan filter config fail, ret =%d.\n",
vport_id, ret);
return ret;
}
@@ -7731,7 +7916,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %d is already in vlan %d\n",
+ "Add port vlan failed, vport %u is already in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -7739,7 +7924,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill &&
!test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %d is not in vlan %d\n",
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -8107,12 +8292,15 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
}
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- if (vlan->hd_tbl_status)
- hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id,
- false);
+ int ret;
+
+ if (!vlan->hd_tbl_status)
+ continue;
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
}
}
@@ -8392,6 +8580,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
struct hclge_dev *hdev = vport->back;
int i, max_frm_size, ret;
+ /* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
@@ -8807,16 +8996,16 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info begin:\n");
- dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
- dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
- dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
- dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
- dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
- dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
- dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
- dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
- dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
- dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
@@ -8832,10 +9021,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt;
+ int rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
- rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->nic);
if (ret)
return ret;
@@ -8935,7 +9123,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
-
hdev->nic_client = client;
vport->nic.client = client;
ret = hclge_init_nic_client_instance(ae_dev, vport);
@@ -9134,7 +9321,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%d) rst failed %d!\n",
+ "clear vf(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
@@ -9156,6 +9343,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->reset_type = HNAE3_NONE_RESET;
hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+
+ /* HW supprt 2 layer vlan */
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
@@ -9354,6 +9543,219 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_config_switch_param(hdev, vf, enable,
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
+}
+
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
+ enable, vf);
+}
+
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
+{
+ int ret;
+
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+ return ret;
+ }
+
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+
+ return ret;
+}
+
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
+ bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_spoofchk = enable ? 1 : 0;
+ int ret;
+
+ if (hdev->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.spoofchk == new_spoofchk)
+ return 0;
+
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+ else if (enable && hclge_is_umv_space_full(vport))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
+ if (ret)
+ return ret;
+
+ vport->vf_info.spoofchk = new_spoofchk;
+ return 0;
+}
+
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ int i;
+
+ if (hdev->pdev->revision == 0x20)
+ return 0;
+
+ /* resume the vf spoof check state after reset */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
+ vport->vf_info.spoofchk);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_trusted = enable ? 1 : 0;
+ bool en_bc_pmc;
+ int ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.trusted == new_trusted)
+ return 0;
+
+ /* Disable promisc mode for VF if it is not trusted any more. */
+ if (!enable && vport->vf_info.promisc_enable) {
+ en_bc_pmc = hdev->pdev->revision != 0x20;
+ ret = hclge_set_vport_promisc_mode(vport, false, false,
+ en_bc_pmc);
+ if (ret)
+ return ret;
+ vport->vf_info.promisc_enable = 0;
+ hclge_inform_vf_promisc_info(vport);
+ }
+
+ vport->vf_info.trusted = new_trusted;
+
+ return 0;
+}
+
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
+{
+ int ret;
+ int vf;
+
+ /* reset vf rate to default value */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ struct hclge_vport *vport = &hdev->vport[vf];
+
+ vport->vf_info.max_tx_rate = 0;
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to reset to default, ret=%d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ if (min_tx_rate != 0 ||
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ dev_err(&hdev->pdev->dev,
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ return 0;
+
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport->vf_info.max_tx_rate = max_tx_rate;
+
+ return 0;
+}
+
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport->nic;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* resume the vf max_tx_rate after reset */
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ /* zero means max rate, after reset, firmware already set it to
+ * max rate, so just continue.
+ */
+ if (!vport->vf_info.max_tx_rate)
+ continue;
+
+ ret = hclge_set_vf_rate(handle, vf, 0,
+ vport->vf_info.max_tx_rate, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
+ vf, vport->vf_info.max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_reset_vport_state(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
@@ -9431,6 +9833,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
/* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset.
*/
@@ -9453,6 +9858,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
}
hclge_reset_vport_state(hdev);
+ ret = hclge_reset_vport_spoofchk(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_resume_vf_rate(hdev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -9465,6 +9877,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_reset_vf_rate(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
@@ -9529,8 +9942,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
- int cur_rss_size = kinfo->rss_size;
- int cur_tqps = kinfo->num_tqps;
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
@@ -9584,7 +9997,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
out:
if (!ret)
dev_info(&hdev->pdev->dev,
- "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size,
cur_tqps, kinfo->rss_size * kinfo->num_tc);
@@ -10187,6 +10600,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
.restore_vlan_table = hclge_restore_vlan_table,
+ .get_vf_config = hclge_get_vf_config,
+ .set_vf_link_state = hclge_set_vf_link_state,
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
+ .set_vf_trust = hclge_set_vf_trust,
+ .set_vf_rate = hclge_set_vf_rate,
+ .set_vf_mac = hclge_set_vf_mac,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 59b824347ba4..ebb4c6e9aed3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -141,7 +141,6 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
-#define HCLGE_VF_NUM_PER_BYTE 8
enum HLCGE_PORT_TYPE {
HOST_PORT,
@@ -166,7 +165,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2
-#define HCLGE_RESET_INT_M GENMASK(2, 0)
+#define HCLGE_RESET_INT_M GENMASK(7, 5)
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -226,8 +225,6 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_OTHER,
};
-#define HCLGE_MPF_ENBALE 1
-
enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
@@ -258,6 +255,7 @@ struct hclge_mac {
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
u32 speed;
+ u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
u32 fec_mode; /* active fec mode */
@@ -655,7 +653,6 @@ struct hclge_rst_stats {
u32 hw_reset_done_cnt; /* the number of HW reset has completed */
u32 pf_rst_cnt; /* the number of PF reset */
u32 flr_rst_cnt; /* the number of FLR */
- u32 core_rst_cnt; /* the number of CORE reset */
u32 global_rst_cnt; /* the number of GLOBAL */
u32 imp_rst_cnt; /* the number of IMP reset */
u32 reset_cnt; /* the number of reset */
@@ -886,6 +883,15 @@ struct hclge_port_base_vlan_config {
struct hclge_vlan_info vlan_info;
};
+struct hclge_vf_info {
+ int link_state;
+ u8 mac[ETH_ALEN];
+ u32 spoofchk;
+ u32 max_tx_rate;
+ u32 trusted;
+ u16 promisc_enable;
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -917,15 +923,15 @@ struct hclge_vport {
unsigned long state;
unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
+ struct hclge_vf_info vf_info;
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */
};
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id);
-
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr);
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
@@ -994,4 +1000,6 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
struct hclge_desc *desc);
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f5da28a60d00..0b433ebe6a2d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -26,7 +26,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
- "PF fail to gen resp to VF len %d exceeds max len %d\n",
+ "PF fail to gen resp to VF len %u exceeds max len %u\n",
resp_data_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
/* If resp_data_len is too long, set the value to max length
@@ -205,12 +205,38 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en_bc = req->msg[1] ? true : false;
- struct hclge_promisc_param param;
+#define HCLGE_MBX_BC_INDEX 1
+#define HCLGE_MBX_UC_INDEX 2
+#define HCLGE_MBX_MC_INDEX 3
- /* vf is not allowed to enable unicast/multicast broadcast */
- hclge_promisc_param_init(&param, false, false, en_bc, vport->vport_id);
- return hclge_cmd_set_promisc_mode(vport->back, &param);
+ bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
+ bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
+ bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
+ int ret;
+
+ if (!vport->vf_info.trusted) {
+ en_uc = false;
+ en_mc = false;
+ }
+
+ ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
+ if (req->mbx_need_resp)
+ hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
+
+ vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
+
+ return ret;
+}
+
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
+{
+ u8 dest_vfid = (u8)vport->vport_id;
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
+
+ hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
@@ -223,6 +249,20 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+ /* If VF MAC has been configured by the host then it
+ * cannot be overridden by the MAC specified by the VM.
+ */
+ if (!is_zero_ether_addr(vport->vf_info.mac) &&
+ !ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ status = -EPERM;
+ goto out;
+ }
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ status = -EINVAL;
+ goto out;
+ }
+
hclge_rm_uc_addr_common(vport, old_addr);
status = hclge_add_uc_addr_common(vport, mac_addr);
if (status) {
@@ -245,11 +285,12 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_UC);
} else {
dev_err(&hdev->pdev->dev,
- "failed to set unicast mac addr, unknown subcode %d\n",
+ "failed to set unicast mac addr, unknown subcode %u\n",
mbx_req->msg[1]);
return -EIO;
}
+out:
if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
@@ -278,7 +319,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_MC);
} else {
dev_err(&hdev->pdev->dev,
- "failed to set mcast mac addr, unknown subcode %d\n",
+ "failed to set mcast mac addr, unknown subcode %u\n",
mbx_req->msg[1]);
return -EIO;
}
@@ -324,6 +365,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
+ if (mbx_req->mbx_need_resp)
+ return hclge_gen_resp_to_vf(vport, mbx_req, status,
+ NULL, 0);
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = msg_cmd->is_kill ? true : false;
@@ -398,6 +442,13 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport,
HCLGE_TQPS_RSS_INFO_LEN);
}
+static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
+ ETH_ALEN);
+}
+
static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -428,6 +479,9 @@ static int hclge_get_vf_media_type(struct hclge_vport *vport,
static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
+#define HCLGE_VF_LINK_STATE_UP 1U
+#define HCLGE_VF_LINK_STATE_DOWN 0U
+
struct hclge_dev *hdev = vport->back;
u16 link_status;
u8 msg_data[8];
@@ -435,7 +489,19 @@ static int hclge_get_link_info(struct hclge_vport *vport,
u16 duplex;
/* mac.link can only be 0 or 1 */
- link_status = (u16)hdev->hw.mac.link;
+ switch (vport->vf_info.link_state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link_status = HCLGE_VF_LINK_STATE_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link_status = HCLGE_VF_LINK_STATE_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ default:
+ link_status = (u16)hdev->hw.mac.link;
+ break;
+ }
+
duplex = hdev->hw.mac.duplex;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
@@ -489,7 +555,7 @@ static void hclge_reset_vf(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
int ret;
- dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
+ dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
vport->vport_id);
ret = hclge_func_reset_cmd(hdev, vport->vport_id);
@@ -524,7 +590,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ sizeof(resp_data));
}
static int hclge_get_rss_key(struct hclge_vport *vport,
@@ -614,7 +681,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %d\n",
+ "dropped invalid mailbox message, code = %u\n",
req->msg[0]);
/* dropping/not processing this invalid message */
@@ -749,12 +816,19 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_PUSH_LINK_STATUS:
hclge_handle_link_change_event(hdev, req);
break;
+ case HCLGE_MBX_GET_MAC_ADDR:
+ ret = hclge_get_vf_mac_addr(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get MAC for VF\n",
+ ret);
+ break;
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
break;
default:
dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %d\n",
+ "un-supported mailbox message, code = %u\n",
req->msg[0]);
break;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index dc4dfd4602ab..696c5ae922e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -134,7 +134,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
"no phy device is connected to mdio bus\n");
return 0;
} else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
- dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+ dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n",
hdev->hw.mac.phy_addr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 62399cc1c5a6..fbc39a2480d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -46,7 +46,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
- const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
+ static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */
6 * 8, /* Port level */
@@ -511,6 +511,49 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u8 ir_b, ir_u, ir_s;
+ u32 shaper_para;
+ int ret, i;
+
+ if (!max_tx_rate)
+ max_tx_rate = HCLGE_ETHER_MAX_RATE;
+
+ ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
+ &ir_b, &ir_u, &ir_s);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
+ false);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
+ shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ vport->vport_id, shap_cfg_cmd->qs_id,
+ max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -532,7 +575,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
- dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+ dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
kinfo->rss_size, kinfo->req_rss_size);
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 260f22d19d81..45bcb67f90fd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -96,6 +96,12 @@ struct hclge_pg_shapping_cmd {
__le32 pg_shapping_para;
};
+struct hclge_qs_shapping_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ __le32 qs_shapping_para;
+};
+
#define HCLGE_BP_GRP_NUM 32
#define HCLGE_BP_SUB_GRP_ID_S 0
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
@@ -154,4 +160,6 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5d1cc5d1b6e..af2245e3bb95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -50,7 +50,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
rmb(); /* Make sure head is ready before touch any data */
if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n");
@@ -92,9 +92,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
u32 reg_val;
if (ring->flag == HCLGEVF_TYPE_CSQ) {
- reg_val = (u32)ring->desc_dma_addr;
+ reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
@@ -105,9 +105,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
} else {
- reg_val = (u32)ring->desc_dma_addr;
+ reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7d7e712691b9..25d78a5aaa34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1113,6 +1113,7 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
}
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
+ bool en_uc_pmc, bool en_mc_pmc,
bool en_bc_pmc)
{
struct hclge_mbx_vf_to_pf_cmd *req;
@@ -1120,10 +1121,11 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
int ret;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
-
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
req->msg[1] = en_bc_pmc ? 1 : 0;
+ req->msg[2] = en_uc_pmc ? 1 : 0;
+ req->msg[3] = en_mc_pmc ? 1 : 0;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -1133,9 +1135,17 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
return ret;
}
-static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
{
- return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct pci_dev *pdev = hdev->pdev;
+ bool en_bc_pmc;
+
+ en_bc_pmc = pdev->revision != 0x20;
+
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
@@ -1174,11 +1184,37 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
}
}
+static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
+{
+ u8 host_mac[ETH_ALEN];
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0,
+ true, host_mac, ETH_ALEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "fail to get VF MAC from host %d", status);
+ return status;
+ }
+
+ ether_addr_copy(p, host_mac);
+
+ return 0;
+}
+
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 host_mac_addr[ETH_ALEN];
- ether_addr_copy(p, hdev->hw.mac.mac_addr);
+ if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
+ return;
+
+ hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
+ if (hdev->has_pf_mac)
+ ether_addr_copy(p, host_mac_addr);
+ else
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
@@ -1275,7 +1311,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
memcpy(&msg_data[3], &proto, sizeof(proto));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+ HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
@@ -1513,12 +1549,39 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
return ret;
}
+static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
+ hdev->rst_stats.vf_func_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
+ hdev->rst_stats.vf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %u\n",
+ hdev->rst_stats.rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %u\n",
+ hdev->rst_stats.rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
+ hdev->rst_stats.rst_fail_cnt);
+ dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
+ dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG));
+ dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
+ dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
+}
+
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{
/* recover handshake status with IMP when reset fail */
hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++;
- dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
@@ -1527,6 +1590,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
+ } else {
+ hclgevf_dump_rst_info(hdev);
}
}
@@ -1748,6 +1813,8 @@ static void hclgevf_service_timer(struct timer_list *t)
static void hclgevf_reset_service_task(struct work_struct *work)
{
+#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
+
struct hclgevf_dev *hdev =
container_of(work, struct hclgevf_dev, rst_service_task);
int ret;
@@ -1800,7 +1867,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* We cannot do much for 2. but to check first we can try reset
* our PCIe + stack and see if it alleviates the problem.
*/
- if (hdev->reset_attempts > 3) {
+ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
@@ -2103,7 +2170,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
if (ret)
return ret;
-
}
/* Initialize RSS indirect table */
@@ -2272,7 +2338,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
@@ -2348,12 +2414,12 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
dev_info(dev, "VF info begin:\n");
- dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
- dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
- dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
- dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
- dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
- dev_info(dev, "PF media type of this VF: %d\n",
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %u\n",
hdev->hw.mac.media_type);
dev_info(dev, "VF info end.\n");
@@ -2648,12 +2714,6 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
- if (pdev->revision >= 0x21) {
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- return ret;
- }
-
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2728,17 +2788,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_config;
- /* vf is not allowed to enable unicast/multicast promisc mode.
- * For revision 0x20, default to disable broadcast promisc mode,
- * firmware makes sure broadcast packets can be accepted.
- * For revision 0x21, default to enable broadcast promisc mode.
- */
- if (pdev->revision >= 0x21) {
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- goto err_config;
- }
-
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -3152,6 +3201,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_global_queue_id = hclgevf_get_qid_global,
.set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 2b8d6bc6d224..2f4c81bf4169 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -150,8 +150,6 @@ enum hclgevf_states {
HCLGEVF_STATE_CMD_DISABLE,
};
-#define HCLGEVF_MPF_ENBALE 1
-
struct hclgevf_mac {
u8 media_type;
u8 module_type;
@@ -266,6 +264,7 @@ struct hclgevf_dev {
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
+ u8 has_pf_mac;
u16 num_msi;
u16 num_msi_left;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index a108191c9e50..7cbd715d5e7a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -33,7 +33,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
- "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+ "VF mbx response len(=%u) exceeds maximum(=%u)\n",
resp_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
return -EINVAL;
@@ -49,7 +49,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
- "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO;
}
@@ -68,10 +68,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev,
- "VF could not match resp code(code0=%d,code1=%d), %d\n",
+ "VF could not match resp code(code0=%u,code1=%u), %d\n",
code0, code1, mbx_resp->resp_status);
dev_err(&hdev->pdev->dev,
- "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
+ "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
r_code0, r_code1);
return -EIO;
}
@@ -168,7 +168,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %d\n",
+ "dropped invalid mailbox message, code = %u\n",
req->msg[0]);
/* dropping/not processing this invalid message */
@@ -187,7 +187,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_PF_VF_RESP:
if (resp->received_resp)
dev_warn(&hdev->pdev->dev,
- "VF mbx resp flag not clear(%d)\n",
+ "VF mbx resp flag not clear(%u)\n",
req->msg[1]);
resp->received_resp = true;
@@ -205,6 +205,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
case HCLGE_MBX_PUSH_VLAN_INFO:
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -218,7 +219,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
if (atomic_read(&hdev->arq.count) >=
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
- "Async Q full, dropping msg(%d)\n",
+ "Async Q full, dropping msg(%u)\n",
req->msg[1]);
break;
}
@@ -235,7 +236,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break;
default:
dev_err(&hdev->pdev->dev,
- "VF received unsupported(%d) mbx msg from PF\n",
+ "VF received unsupported(%u) mbx msg from PF\n",
req->msg[0]);
break;
}
@@ -248,6 +249,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq->next_to_use);
}
+static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
+ u16 promisc_info)
+{
+ if (!promisc_info)
+ dev_info(&hdev->pdev->dev,
+ "Promisc mode is closed by host for being untrusted.\n");
+}
+
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
@@ -313,9 +322,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8);
break;
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
+ hclgevf_parse_promisc_info(hdev, msg_q[1]);
+ break;
default:
dev_err(&hdev->pdev->dev,
- "fetched unsupported(%d) message from arq\n",
+ "fetched unsupported(%u) message from arq\n",
msg_q[0]);
break;
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 6e70658d50c4..db45373ea31c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -670,13 +670,10 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
static int ehea_is_hugepage(unsigned long pfn)
{
- int page_order;
-
if (pfn & EHEA_HUGEPAGE_PFN_MASK)
return 0;
- page_order = compound_order(pfn_to_page(pfn));
- if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
+ if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT)
return 0;
return 1;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9e43c9ace9c2..2e40425d8a34 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2849,6 +2849,7 @@ static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
+ int err;
/* Read config from device-tree */
if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
@@ -2897,8 +2898,8 @@ static int emac_init_config(struct emac_instance *dev)
dev->mal_burst_size = 256;
/* PHY mode needs some decoding */
- dev->phy_mode = of_get_phy_mode(np);
- if (dev->phy_mode < 0)
+ err = of_get_phy_mode(np, &dev->phy_mode);
+ if (err)
dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index e9cda024cbf6..89a1b0fea158 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -171,7 +171,7 @@ struct emac_instance {
struct mal_commac commac;
/* PHY infos */
- int phy_mode;
+ phy_interface_t phy_mode;
u32 phy_map;
u32 phy_address;
u32 phy_feat_exc;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index b9e821de2ac6..57a25c7a9e70 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -78,7 +78,8 @@ static inline u32 zmii_mode_mask(int mode, int input)
}
}
-int zmii_attach(struct platform_device *ofdev, int input, int *mode)
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode)
{
struct zmii_instance *dev = platform_get_drvdata(ofdev);
struct zmii_regs __iomem *p = dev->base;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 41d46e9b87ba..65daedc78594 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -50,7 +50,8 @@ struct zmii_instance {
int zmii_init(void);
void zmii_exit(void);
-int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode);
void zmii_detach(struct platform_device *ofdev, int input);
void zmii_get_mdio(struct platform_device *ofdev, int input);
void zmii_put_mdio(struct platform_device *ofdev, int input);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c5be4ebd8437..84121aab7ff1 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1011,6 +1011,29 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
return 0;
}
+static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ethhdr *ether_header;
+ int ret = 0;
+
+ ether_header = eth_hdr(skb);
+
+ if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
+ netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
+ netdev->stats.tx_dropped++;
+ ret = -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
+ netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
+ netdev->stats.tx_dropped++;
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -1022,6 +1045,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
dma_addr_t dma_addr;
unsigned long mss = 0;
+ if (ibmveth_is_packet_unsupported(skb, netdev))
+ goto out;
+
/* veth doesn't handle frag_list, so linearize the skb.
* When GRO is enabled SKB's can have frag_list.
*/
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f59d9a8e35e2..c90080781924 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -159,6 +159,40 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
+/**
+ * ibmvnic_wait_for_completion - Check device state and wait for completion
+ * @adapter: private device data
+ * @comp_done: completion structure to wait for
+ * @timeout: time to wait in milliseconds
+ *
+ * Wait for a completion signal or until the timeout limit is reached
+ * while checking that the device is still active.
+ */
+static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
+ struct completion *comp_done,
+ unsigned long timeout)
+{
+ struct net_device *netdev;
+ unsigned long div_timeout;
+ u8 retry;
+
+ netdev = adapter->netdev;
+ retry = 5;
+ div_timeout = msecs_to_jiffies(timeout / retry);
+ while (true) {
+ if (!adapter->crq.active) {
+ netdev_err(netdev, "Device down!\n");
+ return -ENODEV;
+ }
+ if (retry--)
+ break;
+ if (wait_for_completion_timeout(comp_done, div_timeout))
+ return 0;
+ }
+ netdev_err(netdev, "Operation timed out.\n");
+ return -ETIMEDOUT;
+}
+
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -176,21 +210,35 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = adapter->map_id;
adapter->map_id++;
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev,
+ "Long term map request aborted or timed out,rc = %d\n",
+ rc);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
return rc;
}
- wait_for_completion(&adapter->fw_done);
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
return -1;
}
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -211,22 +259,37 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
+ struct device *dev = &adapter->vdev->dev;
int rc;
memset(ltb->buff, 0, ltb->size);
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+
+ reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_info(dev,
+ "Reset failed, long term map request timed out or aborted\n");
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
if (adapter->fw_done_rc) {
- dev_info(&adapter->vdev->dev,
+ dev_info(dev,
"Reset failed, attempting to free and reallocate buffer\n");
free_long_term_buff(adapter, ltb);
+ mutex_unlock(&adapter->fw_lock);
return alloc_long_term_buff(adapter, ltb, ltb->size);
}
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -943,13 +1006,25 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (adapter->vpd->buff)
len = adapter->vpd->len;
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE;
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
+ mutex_unlock(&adapter->fw_lock);
if (!adapter->vpd->len)
return -ENODATA;
@@ -976,7 +1051,10 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
return -ENOMEM;
}
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
+
crq.get_vpd.first = IBMVNIC_CRQ_CMD;
crq.get_vpd.cmd = GET_VPD;
crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
@@ -985,10 +1063,20 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (rc) {
kfree(adapter->vpd->buff);
adapter->vpd->buff = NULL;
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
+ kfree(adapter->vpd->buff);
+ adapter->vpd->buff = NULL;
+ mutex_unlock(&adapter->fw_lock);
return rc;
}
- wait_for_completion(&adapter->fw_done);
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -1689,20 +1777,25 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
rc = ibmvnic_send_crq(adapter, &crq);
if (rc) {
rc = -EIO;
+ mutex_unlock(&adapter->fw_lock);
goto err;
}
- wait_for_completion(&adapter->fw_done);
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- if (adapter->fw_done_rc) {
+ if (rc || adapter->fw_done_rc) {
rc = -EIO;
+ mutex_unlock(&adapter->fw_lock);
goto err;
}
-
+ mutex_unlock(&adapter->fw_lock);
return 0;
err:
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
@@ -2316,12 +2409,19 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
- init_completion(&adapter->reset_done);
+ reinit_completion(&adapter->reset_done);
adapter->wait_for_reset = true;
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
- if (rc)
- return rc;
- wait_for_completion(&adapter->reset_done);
+
+ if (rc) {
+ ret = rc;
+ goto out;
+ }
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
+ if (rc) {
+ ret = -ENODEV;
+ goto out;
+ }
ret = 0;
if (adapter->reset_done_rc) {
@@ -2332,13 +2432,21 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->desired.rx_entries = adapter->fallback.rx_entries;
adapter->desired.tx_entries = adapter->fallback.tx_entries;
- init_completion(&adapter->reset_done);
+ reinit_completion(&adapter->reset_done);
adapter->wait_for_reset = true;
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
- if (rc)
- return ret;
- wait_for_completion(&adapter->reset_done);
+ if (rc) {
+ ret = rc;
+ goto out;
+ }
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
+ 60000);
+ if (rc) {
+ ret = -ENODEV;
+ goto out;
+ }
}
+out:
adapter->wait_for_reset = false;
return ret;
@@ -2603,11 +2711,13 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
cpu_to_be32(sizeof(struct ibmvnic_statistics));
/* Wait for data to be written */
- init_completion(&adapter->stats_done);
+ reinit_completion(&adapter->stats_done);
rc = ibmvnic_send_crq(adapter, &crq);
if (rc)
return;
- wait_for_completion(&adapter->stats_done);
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
+ if (rc)
+ return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
@@ -2878,10 +2988,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- struct irq_desc *desc = irq_to_desc(scrq->irq);
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ u64 val = (0xff000000) | scrq->hw_irq;
- chip->irq_eoi(&desc->irq_data);
+ rc = plpar_hcall_norets(H_EOI, val);
+ /* H_EOI would fail with rc = H_FUNCTION when running
+ * in XIVE mode which is expected, but not an error.
+ */
+ if (rc && (rc != H_FUNCTION))
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ val, rc);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -4403,11 +4518,24 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
memset(&crq, 0, sizeof(crq));
crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
- init_completion(&adapter->fw_done);
+
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ mutex_unlock(&adapter->fw_lock);
return adapter->fw_done_rc ? -EIO : 0;
}
@@ -4500,6 +4628,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_XPORT_EVENT:
netif_carrier_off(netdev);
adapter->crq.active = false;
+ /* terminate any thread waiting for a response
+ * from the device
+ */
+ if (!completion_done(&adapter->fw_done)) {
+ adapter->fw_done_rc = -EIO;
+ complete(&adapter->fw_done);
+ }
+ if (!completion_done(&adapter->stats_done))
+ complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
adapter->force_reset_recovery = true;
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
@@ -4954,7 +5091,11 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
__ibmvnic_delayed_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ mutex_init(&adapter->fw_lock);
init_completion(&adapter->init_done);
+ init_completion(&adapter->fw_done);
+ init_completion(&adapter->reset_done);
+ init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
do {
@@ -5012,6 +5153,7 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+ mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
return rc;
@@ -5036,6 +5178,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
rtnl_unlock();
+ mutex_destroy(&adapter->fw_lock);
device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index ebc39248b334..60eccaf91b12 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1026,6 +1026,8 @@ struct ibmvnic_adapter {
int init_done_rc;
struct completion fw_done;
+ /* Used for serialization of device commands */
+ struct mutex fw_lock;
int fw_done_rc;
struct completion reset_done;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 86493fea56e4..416da9619928 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3565,8 +3565,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- pr_info("%s changing MTU from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index de8c5818a305..adce7e319b9e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -894,8 +894,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- /* fall through */
case e1000_pch_cnp:
+ /* fall through */
+ case e1000_pch_tgp:
mask |= BIT(18);
break;
default:
@@ -1559,6 +1560,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index eff75bd8a8f0..f556163481cb 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -86,6 +86,17 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0
#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1
#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2
+#define E1000_DEV_ID_PCH_CMP_I219_LM10 0x0D4E
+#define E1000_DEV_ID_PCH_CMP_I219_V10 0x0D4F
+#define E1000_DEV_ID_PCH_CMP_I219_LM11 0x0D4C
+#define E1000_DEV_ID_PCH_CMP_I219_V11 0x0D4D
+#define E1000_DEV_ID_PCH_CMP_I219_LM12 0x0D53
+#define E1000_DEV_ID_PCH_CMP_I219_V12 0x0D55
+#define E1000_DEV_ID_PCH_TGP_I219_LM13 0x15FB
+#define E1000_DEV_ID_PCH_TGP_I219_V13 0x15FC
+#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
+#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
+#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_REVISION_4 4
@@ -109,6 +120,7 @@ enum e1000_mac_type {
e1000_pch_lpt,
e1000_pch_spt,
e1000_pch_cnp,
+ e1000_pch_tgp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a1fab77b2096..b4135c50e905 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -316,6 +316,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -458,6 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -700,6 +702,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1638,6 +1641,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2090,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3127,6 +3132,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
switch (hw->mac.type) {
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4070,6 +4076,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d7d56e42a6aa..fe7997c18a10 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3538,6 +3538,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
break;
case e1000_pch_cnp:
+ case e1000_pch_tgp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -4049,6 +4050,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ /* fall-through */
+ case e1000_pch_tgp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -4715,12 +4718,12 @@ int e1000e_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (netif_device_present(netdev)) {
e1000e_down(adapter, true);
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", netdev->name);
}
napi_disable(&adapter->napi);
@@ -6028,7 +6031,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
usleep_range(1000, 1100);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
adapter->max_frame_size = max_frame;
- e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
pm_runtime_get_sync(netdev->dev.parent);
@@ -6294,14 +6298,188 @@ fl_out:
pm_runtime_put_sync(netdev->dev.parent);
}
+#ifdef CONFIG_PM_SLEEP
+/* S0ix implementation */
+static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mac_data;
+ u16 phy_data;
+
+ /* Disable the periodic inband message,
+ * don't request PCIe clock in K1 page770_17[10:9] = 10b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
+ phy_data |= BIT(10);
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
+
+ /* Make sure we don't exit K1 every time a new packet arrives
+ * 772_29[5] = 1 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data |= BIT(5);
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to SMBus
+ * Force the SMBus in PHY page769_23[0] = 1
+ * Force the SMBus in MAC CTRL_EXT[11] = 1
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+
+ /* DFT control: PHY bit: page769_20[0] = 1
+ * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
+ */
+ e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
+ phy_data |= BIT(0);
+ e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
+
+ mac_data = er32(EXTCNF_CTRL);
+ mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, mac_data);
+
+ /* Check MAC Tx/Rx packet buffer pointers.
+ * Reset MAC Tx/Rx packet buffer pointers to suppress any
+ * pending traffic indication that would prevent power gating.
+ */
+ mac_data = er32(TDFH);
+ if (mac_data)
+ ew32(TDFH, 0);
+ mac_data = er32(TDFT);
+ if (mac_data)
+ ew32(TDFT, 0);
+ mac_data = er32(TDFHS);
+ if (mac_data)
+ ew32(TDFHS, 0);
+ mac_data = er32(TDFTS);
+ if (mac_data)
+ ew32(TDFTS, 0);
+ mac_data = er32(TDFPC);
+ if (mac_data)
+ ew32(TDFPC, 0);
+ mac_data = er32(RDFH);
+ if (mac_data)
+ ew32(RDFH, 0);
+ mac_data = er32(RDFT);
+ if (mac_data)
+ ew32(RDFT, 0);
+ mac_data = er32(RDFHS);
+ if (mac_data)
+ ew32(RDFHS, 0);
+ mac_data = er32(RDFTS);
+ if (mac_data)
+ ew32(RDFTS, 0);
+ mac_data = er32(RDFPC);
+ if (mac_data)
+ ew32(RDFPC, 0);
+
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(31);
+ mac_data &= ~BIT(0);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Dynamic Power Gating Enable */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= BIT(3);
+ ew32(CTRL_EXT, mac_data);
+
+ /* Enable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+}
+
+static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mac_data;
+ u16 phy_data;
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Enable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(0);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable Dynamic Power Gating */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFFFFFF7;
+ ew32(CTRL_EXT, mac_data);
+
+ /* Disable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFF7FFFF;
+ ew32(CTRL_EXT, mac_data);
+
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
+
+ /* Enable the periodic inband message,
+ * Request PCIe clock in K1 page770_17[10:9] =01b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= 0xFBFF;
+ phy_data |= HV_PM_CTRL_K1_CLK_REQ;
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
+
+ /* Return back configuration
+ * 772_29[5] = 0 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data &= 0xFFDF;
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to Kumeran
+ * Unforce the SMBus in PHY page769_23[0] = 0
+ * Unforce the SMBus in MAC CTRL_EXT[11] = 0
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+}
+#endif /* CONFIG_PM_SLEEP */
+
static int e1000e_pm_freeze(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool present;
+
+ rtnl_lock();
+ present = netif_device_present(netdev);
netif_device_detach(netdev);
- if (netif_running(netdev)) {
+ if (present && netif_running(netdev)) {
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
@@ -6313,6 +6491,8 @@ static int e1000e_pm_freeze(struct device *dev)
e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
+ rtnl_unlock();
+
e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
@@ -6560,6 +6740,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state, 1);
}
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ e1000e_set_interrupt_capability(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ rc = e1000_request_irq(adapter);
+ if (rc)
+ goto err_irq;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+err_irq:
+ rtnl_unlock();
+
+ return rc;
+}
+
#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
@@ -6627,29 +6831,12 @@ static int __e1000_resume(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_thaw(struct device *dev)
-{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- u32 err = e1000_request_irq(adapter);
-
- if (err)
- return err;
-
- e1000e_up(adapter);
- }
-
- netif_device_attach(netdev);
-
- return 0;
-}
-
static int e1000e_pm_suspend(struct device *dev)
{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6660,14 +6847,25 @@ static int e1000e_pm_suspend(struct device *dev)
if (rc)
e1000e_pm_thaw(dev);
+ /* Introduce S0ix implementation */
+ if (hw->mac.type >= e1000_pch_cnp)
+ e1000e_s0ix_entry_flow(adapter);
+
return rc;
}
static int e1000e_pm_resume(struct device *dev)
{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
int rc;
+ /* Introduce S0ix implementation */
+ if (hw->mac.type >= e1000_pch_cnp)
+ e1000e_s0ix_exit_flow(adapter);
+
rc = __e1000_resume(pdev);
if (rc)
return rc;
@@ -6818,16 +7016,11 @@ static void e1000_netpoll(struct net_device *netdev)
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
+ e1000e_pm_freeze(&pdev->dev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (netif_running(netdev))
- e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6893,10 +7086,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
+ e1000e_pm_thaw(&pdev->dev);
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
@@ -7407,15 +7597,13 @@ static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- bool down = test_bit(__E1000_DOWN, &adapter->state);
e1000e_ptp_remove(adapter);
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
- if (!down)
- set_bit(__E1000_DOWN, &adapter->state);
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
@@ -7435,9 +7623,6 @@ static void e1000_remove(struct pci_dev *pdev)
}
}
- /* Don't lie to e1000_close() down the road. */
- if (!down)
- clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
if (pci_dev_run_wake(pdev))
@@ -7567,6 +7752,17 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 1a4c65d9feb4..eaa5a0fb99f0 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -295,6 +295,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ /* fall-through */
+ case e1000_pch_tgp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 47f5ca793970..df59fd1d660c 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -18,6 +18,7 @@
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
@@ -234,4 +235,7 @@
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+/* PHY registers */
+#define I82579_DFT_CTRL PHY_REG(769, 20)
+
#endif
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b14441944b4b..f306084ca12c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -534,6 +534,7 @@ void fm10k_iov_suspend(struct pci_dev *pdev);
int fm10k_iov_resume(struct pci_dev *pdev);
void fm10k_iov_disable(struct pci_dev *pdev);
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
+void fm10k_iov_update_stats(struct fm10k_intfc *interface);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
@@ -542,6 +543,8 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
int __always_unused min_rate, int max_rate);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi);
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats);
/* DebugFS */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index afe1fafd2447..8c50a128df29 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -520,6 +520,27 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
+/**
+ * fm10k_iov_update_stats - Update stats for all VFs
+ * @interface: device private structure
+ *
+ * Updates the VF statistics for all enabled VFs. Expects to be called by
+ * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS
+ * bit is already handled.
+ */
+void fm10k_iov_update_stats(struct fm10k_intfc *interface)
+{
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ int i;
+
+ if (!iov_data)
+ return;
+
+ for (i = 0; i < iov_data->num_vfs; i++)
+ hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
+}
+
static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
struct fm10k_vf_info *vf_info)
{
@@ -650,3 +671,30 @@ int fm10k_ndo_get_vf_config(struct net_device *netdev,
return 0;
}
+
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ struct fm10k_hw_stats_q *hw_stats;
+ u32 idx, qpp;
+
+ /* verify SR-IOV is active and that vf idx is valid */
+ if (!iov_data || vf_idx >= iov_data->num_vfs)
+ return -EINVAL;
+
+ qpp = fm10k_queues_per_pool(hw);
+ hw_stats = iov_data->vf_info[vf_idx].stats;
+
+ for (idx = 0; idx < qpp; idx++) {
+ stats->rx_packets += hw_stats[idx].rx_packets.count;
+ stats->tx_packets += hw_stats[idx].tx_packets.count;
+ stats->rx_bytes += hw_stats[idx].rx_bytes.count;
+ stats->tx_bytes += hw_stats[idx].tx_bytes.count;
+ stats->rx_dropped += hw_stats[idx].rx_drops.count;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 2be9222510e7..17738b0a9873 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -11,7 +11,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.26.1-k"
+#define DRV_VERSION "0.27.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 09f7a246e134..68baee04dc58 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1643,6 +1643,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
+ .ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
.ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index bb236fa44048..d122d0087191 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -630,6 +630,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
+ /* Update VF statistics */
+ fm10k_iov_update_stats(interface);
+
clear_bit(__FM10K_UPDATING_STATS, interface->state);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index 160bc5b78f99..ceb9b791f799 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TLV_H_
#define _FM10K_TLV_H_
@@ -76,8 +76,8 @@ struct fm10k_tlv_attr {
#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
-#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
-#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
+#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED, 0 }
+#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR, 0, 0 }
struct fm10k_msg_data {
unsigned int id;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 15ac1c7885bc..63968c5d7c5d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -581,6 +581,7 @@ struct fm10k_vf_info {
* at the same offset as the mailbox
*/
struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
+ struct fm10k_hw_stats_q stats[FM10K_MAX_QUEUES_POOL];
int rate; /* Tx BW cap as defined by OS */
u16 glort; /* resource tag for this VF */
u16 sw_vid; /* Switch API assigned VLAN */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2af9f6308f84..cb6367334ca7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1118,6 +1118,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+int i40e_count_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 72c04881d290..9f0a4e92a231 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -508,6 +508,59 @@ shutdown_arq_out:
}
/**
+ * i40e_set_hw_flags - set HW flags
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_set_hw_flags(struct i40e_hw *hw)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ hw->flags = 0;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* The ability to RX (not drop) 802.1ad frames */
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ }
+ break;
+ case I40E_MAC_X722:
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* fall through */
+ default:
+ break;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 8)) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 9))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+}
+
+/**
* i40e_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
@@ -571,6 +624,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
+ /* Some features were introduced in different FW API version
+ * for different MAC type.
+ */
+ i40e_set_hw_flags(hw);
+
/* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
&hw->nvm.version);
@@ -596,25 +654,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
- /* Newer versions of firmware require lock when reading the NVM */
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 5))
- hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
hw->aq.api_min_ver >= 7))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 8)) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
- hw->flags |= I40E_HW_FLAG_DROP_MODE;
- }
-
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 69a2daaca5c5..aa5f1c0aa721 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2251,7 +2251,13 @@ struct i40e_aqc_phy_register_access {
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_address;
- u8 reserved1[2];
+ u8 cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01
+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
+ u8 reserved1;
__le32 reg_address;
__le32 reg_value;
u8 reserved2[4];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 7560f06768e0..d4055037af89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -29,6 +29,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_B:
case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
@@ -933,10 +934,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
- if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
status = i40e_init_nvm(hw);
return status;
}
@@ -1441,9 +1438,9 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
u32 gpio_val = 0;
u32 port;
- if (!hw->func_caps.led[idx])
+ if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ !hw->func_caps.led[idx])
return 0;
-
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
@@ -1462,8 +1459,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
#define I40E_MAC_ACTIVITY 0xD
+#define I40E_FW_LED BIT(4)
+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+
#define I40E_LED0 22
+#define I40E_PIN_FUNC_SDP 0x0
+#define I40E_PIN_FUNC_LED 0x1
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -1508,8 +1512,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
int i;
- if (mode & 0xfffffff0)
+ if (mode & ~I40E_LED_MODE_VALID) {
hw_dbg(hw, "invalid mode passed in %X\n", mode);
+ return;
+ }
/* as per the documentation GPIO 22-29 are the LED
* GPIO pins named LED0..LED7
@@ -1519,6 +1525,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
+
+ if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
+ u32 pin_func = 0;
+
+ if (mode & I40E_FW_LED)
+ pin_func = I40E_PIN_FUNC_SDP;
+ else
+ pin_func = I40E_PIN_FUNC_LED;
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
+ gpio_val |= ((pin_func <<
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+ }
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -2571,9 +2591,16 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- hw->phy.link_info.req_fec_info =
- abilities.fec_cfg_curr_mod_ext_info &
- (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+ if (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_ENABLE_FEC_AUTO)
+ hw->phy.link_info.req_fec_info =
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
+ else
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type));
@@ -4885,6 +4912,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -5044,7 +5072,7 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -5077,7 +5105,7 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
status =
i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -5116,7 +5144,7 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
if (status == I40E_SUCCESS)
@@ -5321,20 +5349,49 @@ do_retry:
}
/**
- * i40e_aq_set_phy_register
+ * i40e_mdio_if_number_selection - MDIO I/F number selection
+ * @hw: pointer to the hw struct
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @cmd: pointer to PHY Register command structure
+ **/
+static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
+ u8 mdio_num,
+ struct i40e_aqc_phy_register_access *cmd)
+{
+ if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ ((mdio_num <<
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
+ else
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "MDIO I/F number selection not supported by current FW version.\n");
+ }
+}
+
+/**
+ * i40e_aq_set_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: new register value
* @cmd_details: pointer to command details structure or NULL
*
* Write the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_set_phy_register.
**/
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -5349,26 +5406,36 @@ i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
cmd->reg_address = cpu_to_le32(reg_addr);
cmd->reg_value = cpu_to_le32(reg_val);
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
- * i40e_aq_get_phy_register
+ * i40e_aq_get_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: read register value
* @cmd_details: pointer to command details structure or NULL
*
* Read the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_get_phy_register.
**/
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -5382,6 +5449,11 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
*reg_val = le32_to_cpu(cmd->reg_value);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 200a1cb3b536..9de503c5f99b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -889,7 +889,9 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
ret = i40e_read_nvm_module_data(hw,
I40E_SR_EMP_SR_SETTINGS_PTR,
- offset, 1,
+ offset,
+ I40E_LLDP_CURRENT_STATUS_OFFSET,
+ I40E_LLDP_CURRENT_STATUS_SIZE,
&lldp_cfg.adminstatus);
} else {
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2a80c5daa376..ba86ad833bee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -32,6 +32,9 @@
#define I40E_CEE_MAX_FEAT_TYPE 3
#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
+#define I40E_LLDP_CURRENT_STATUS_SIZE 1
+
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index bac4da031f9b..bf15a868292f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -23,6 +23,8 @@
#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
+#define I40E_IS_X710TL_DEVICE(d) \
+ ((d) == I40E_DEV_ID_10G_BASE_T_BC)
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 41e1240acaea..d24d8731bef0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -722,7 +722,14 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
+ (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
} else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -730,12 +737,6 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
} else {
ethtool_link_ksettings_add_link_mode(ks, advertising,
FEC_NONE);
- if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
- }
}
}
@@ -1437,6 +1438,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
struct i40e_hw *hw = &pf->hw;
i40e_status status = 0;
int err = 0;
+ u8 fec_cfg;
/* Get the current phy config */
memset(&abilities, 0, sizeof(abilities));
@@ -1448,18 +1450,16 @@ static int i40e_get_fec_param(struct net_device *netdev,
}
fecparam->fec = 0;
- if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+ fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
fecparam->fec |= ETHTOOL_FEC_AUTO;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_RS) ||
- (abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_ABILITY_RS))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS))
fecparam->fec |= ETHTOOL_FEC_RS;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_KR) ||
- (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR))
fecparam->fec |= ETHTOOL_FEC_BASER;
- if (abilities.fec_cfg_curr_mod_ext_info == 0)
+ if (fec_cfg == 0)
fecparam->fec |= ETHTOOL_FEC_OFF;
if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
@@ -5112,7 +5112,7 @@ static int i40e_get_module_info(struct net_device *netdev,
case I40E_MODULE_TYPE_SFP:
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
I40E_MODULE_SFF_8472_COMP,
&sff8472_comp, NULL);
if (status)
@@ -5120,7 +5120,7 @@ static int i40e_get_module_info(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
I40E_MODULE_SFF_8472_SWAP,
&sff8472_swap, NULL);
if (status)
@@ -5152,7 +5152,7 @@ static int i40e_get_module_info(struct net_device *netdev,
/* Read from memory page 0. */
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- 0,
+ 0, true,
I40E_MODULE_REVISION_ADDR,
&sff8636_rev, NULL);
if (status)
@@ -5223,7 +5223,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- addr, offset, &value, NULL);
+ true, addr, offset, &value, NULL);
if (status)
return -EIO;
data[i] = value;
@@ -5242,6 +5242,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+ .get_drvinfo = i40e_get_drvinfo,
.set_eeprom = i40e_set_eeprom,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6031223eafab..1ccabeafa44c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1110,6 +1110,25 @@ void i40e_update_stats(struct i40e_vsi *vsi)
}
/**
+ * i40e_count_filters - counts VSI mac filters
+ * @vsi: the VSI to be searched
+ *
+ * Returns count of mac filters
+ **/
+int i40e_count_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
+ int cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ ++cnt;
+
+ return cnt;
+}
+
+/**
* i40e_find_filter - Search VSI filter list for specific mac/vlan filter
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -2645,8 +2664,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev_info(netdev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
@@ -3534,14 +3553,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->rx.target_itr =
ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
- q_vector->rx.target_itr);
+ q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr =
ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
- q_vector->tx.target_itr);
+ q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
wr32(hw, I40E_PFINT_RATEN(vector - 1),
@@ -3646,11 +3665,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
i40e_enable_misc_int_causes(pf);
@@ -7168,6 +7187,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ch->num_queue_pairs = qcnt;
if (!i40e_setup_channel(pf, vsi, ch)) {
ret = -EINVAL;
+ kfree(ch);
goto err_free;
}
ch->parent_vsi = vsi;
@@ -11396,7 +11416,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* associate no queues to the misc vector */
wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
i40e_flush(hw);
@@ -12850,6 +12870,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_get_vf_stats = i40e_get_vf_stats,
.ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
@@ -12911,6 +12932,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_SCTP_CRC |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index e4d8d20baf3b..7164f4ad8120 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -323,20 +323,24 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
/**
* i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
- * @hw: pointer to the HW structure
+ * @hw: Pointer to the HW structure
* @module_ptr: Pointer to module in words with respect to NVM beginning
- * @offset: offset in words from module start
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr)
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
i40e_status status;
+ u16 specific_ptr = 0;
u16 ptr_value = 0;
- u32 flat_offset;
+ u32 offset = 0;
if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -352,36 +356,35 @@ i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
/* Pointer not initialized */
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
- ptr_value == I40E_NVM_INVALID_VAL)
+ ptr_value == I40E_NVM_INVALID_VAL) {
+ i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
return I40E_ERR_BAD_PTR;
+ }
/* Check whether the module is in SR mapped area or outside */
if (ptr_value & I40E_PTR_TYPE) {
/* Pointer points outside of the Shared RAM mapped area */
- ptr_value &= ~I40E_PTR_TYPE;
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
- /* PtrValue in 4kB units, need to convert to words */
- ptr_value /= 2;
- flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
- status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!status) {
- status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
- 2 * words_data_size,
- data_ptr, true, NULL);
- i40e_release_nvm(hw);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Reading nvm aq failed.Error code: %d.\n",
- status);
- return I40E_ERR_NVM;
- }
- } else {
- return I40E_ERR_NVM;
- }
+ return I40E_ERR_PARAM;
} else {
/* Read from the Shadow RAM */
- status = i40e_read_nvm_buffer(hw, ptr_value + offset,
- &words_data_size, data_ptr);
+
+ status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+ &specific_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+
+ offset = ptr_value + module_offset + specific_ptr +
+ data_offset;
+
+ status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+ data_ptr);
if (status) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Reading nvm buffer failed.Error code: %d.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5250441bf75b..bbb478f09093 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,10 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr);
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
@@ -409,14 +411,24 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
+/* Convenience wrappers for most common use case */
+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index e3f29dc8b290..b8496037ef7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2960,10 +2960,16 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ }
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index b43ec94a0f29..6ea2867ff60f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -624,6 +624,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
u64 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3d2440838822..6a3f0fc56c3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -955,7 +955,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
- vf->num_mac = 0;
}
/* do the accounting and remove additional ADq VSI's */
@@ -2548,20 +2547,12 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct virtchnl_ether_addr_list *al)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ int mac2add_cnt = 0;
int i;
- /* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
- */
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
- }
-
for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
if (is_broadcast_ether_addr(addr) ||
@@ -2585,8 +2576,24 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
}
+
+ /*count filters that really will be added*/
+ f = i40e_find_mac(vsi, addr);
+ if (!f)
+ ++mac2add_cnt;
}
+ /* If this VF is not privileged, then we can't add more than a limited
+ * number of addresses. Check to make sure that the additions do not
+ * push us over the limit.
+ */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MAC_ADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
return 0;
}
@@ -2640,8 +2647,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac++;
}
}
}
@@ -2689,16 +2694,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
-
- if (vf->pf_set_mac &&
- ether_addr_equal(al->list[i].addr,
- vf->default_lan_addr.addr)) {
- dev_err(&pf->pdev->dev,
- "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
- vf->default_lan_addr.addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- goto error_param;
- }
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2709,8 +2704,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -4531,3 +4524,51 @@ out:
clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
+
+/**
+ * i40e_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-127)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_eth_stats *stats;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+
+ /* validate the request */
+ if (i40e_validate_vf(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ i40e_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 7164b9bb294f..631248c0981a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -101,7 +101,6 @@ struct i40e_vf {
bool link_up; /* only valid if VF link is forced */
bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
- u16 num_mac;
u16 num_vlan;
/* ADq related variables */
@@ -139,5 +138,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9edde960b4f2..7cb829132d28 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -13,9 +13,12 @@ ice-y := ice_main.o \
ice_nvm.o \
ice_switch.o \
ice_sched.o \
+ ice_base.o \
ice_lib.o \
+ ice_txrx_lib.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
-ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
+ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
+ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 45e100666049..f972dce8aebb 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -29,10 +29,13 @@
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/ipv6.h>
+#include <linux/pkt_sched.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
+#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
+#include <net/xdp_sock.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -42,6 +45,7 @@
#include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_xsk.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -78,8 +82,7 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
- (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
+#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
@@ -127,6 +130,16 @@ extern const char ice_drv_ver[];
ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX)
+#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+
+struct ice_txq_meta {
+ u32 q_teid; /* Tx-scheduler element identifier */
+ u16 q_id; /* Entry in VSI's txq_map bitmap */
+ u16 q_handle; /* Relative index of Tx queue within TC */
+ u16 vsi_idx; /* VSI index that Tx queue belongs to */
+ u8 tc; /* TC number that Tx queue belongs to */
+};
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -169,6 +182,7 @@ enum ice_state {
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
+ __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
@@ -271,9 +285,18 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
+ u16 req_txq; /* User requested Tx queues */
+ u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring **xdp_rings; /* XDP ring array */
+ u16 num_xdp_txq; /* Used XDP queues */
+ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -313,6 +336,7 @@ enum ice_pf_flags {
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
+ ICE_FLAG_LEGACY_RX,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -346,6 +370,7 @@ struct ice_pf {
struct work_struct serv_task;
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
+ struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
@@ -417,6 +442,37 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
return np->vsi->back;
}
+static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
+{
+ return !!vsi->xdp_prog;
+}
+
+static inline void ice_set_ring_xdp(struct ice_ring *ring)
+{
+ ring->flags |= ICE_TX_FLAGS_RING_XDP;
+}
+
+/**
+ * ice_xsk_umem - get XDP UMEM bound to a ring
+ * @ring - ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * NULL otherwise.
+ */
+static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+{
+ struct xdp_umem **umems = ring->vsi->xsk_umems;
+ int qid = ring->q_index;
+
+ if (ice_ring_is_xdp(ring))
+ qid -= ring->vsi->num_xdp_txq;
+
+ if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
+ return NULL;
+
+ return umems[qid];
+}
+
/**
* ice_get_main_vsi - Get the PF VSI
* @pf: PF instance
@@ -437,20 +493,23 @@ void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
+int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
-#ifdef CONFIG_DCB
-int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
-void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
-#endif /* CONFIG_DCB */
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 023e3d2fee5f..5421fc413f94 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -742,6 +742,10 @@ struct ice_aqc_add_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
+struct ice_aqc_conf_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
struct ice_aqc_get_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
@@ -783,6 +787,44 @@ struct ice_aqc_port_ets_elem {
__le32 tc_node_teid[8]; /* Used for response, reserved in command */
};
+/* Rate limiting profile for
+ * Add RL profile (indirect 0x0410)
+ * Query RL profile (indirect 0x0411)
+ * Remove RL profile (indirect 0x0415)
+ * These indirect commands acts on single or multiple
+ * RL profiles with specified data.
+ */
+struct ice_aqc_rl_profile {
+ __le16 num_profiles;
+ __le16 num_processed; /* Only for response. Reserved in Command. */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_rl_profile_elem {
+ u8 level;
+ u8 flags;
+#define ICE_AQC_RL_PROFILE_TYPE_S 0x0
+#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S)
+#define ICE_AQC_RL_PROFILE_TYPE_CIR 0
+#define ICE_AQC_RL_PROFILE_TYPE_EIR 1
+#define ICE_AQC_RL_PROFILE_TYPE_SRL 2
+/* The following flag is used for Query RL Profile Data */
+#define ICE_AQC_RL_PROFILE_INVAL_S 0x7
+#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S)
+
+ __le16 profile_id;
+ __le16 max_burst_size;
+ __le16 rl_multiply;
+ __le16 wake_up_calc;
+ __le16 rl_encode;
+};
+
+struct ice_aqc_rl_profile_generic_elem {
+ struct ice_aqc_rl_profile_elem generic[1];
+};
+
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -1044,6 +1086,10 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1)
#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2)
+#define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 reserved1;
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
@@ -1147,6 +1193,33 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ice_aqc_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F
+#define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF
+#define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10)
+#define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0
+#define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S)
+#define ICE_AQC_SFF_NO_PAGE_CHANGE 0
+#define ICE_AQC_SFF_SET_23_ON_MISMATCH 1
+#define ICE_AQC_SFF_SET_22_ON_MISMATCH 2
+#define ICE_AQC_SFF_IS_WRITE BIT(15)
+ __le16 i2c_mem_addr;
+ __le16 eeprom_page;
+#define ICE_AQC_SFF_EEPROM_BANK_S 0
+#define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S)
+#define ICE_AQC_SFF_EEPROM_PAGE_S 8
+#define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S)
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1618,6 +1691,7 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
@@ -1625,6 +1699,7 @@ struct ice_aq_desc {
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
+ struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_checksum nvm_checksum;
struct ice_aqc_pf_vf_msg virt;
@@ -1726,12 +1801,15 @@ enum ice_adminq_opc {
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_cfg_sched_elems = 0x0403,
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
+ ice_aqc_opc_add_rl_profiles = 0x0410,
ice_aqc_opc_query_sched_res = 0x0412,
+ ice_aqc_opc_remove_rl_profiles = 0x0415,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
@@ -1741,6 +1819,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
new file mode 100644
index 000000000000..77d6a0291e97
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_base.h"
+#include "ice_dcb_lib.h"
+
+/**
+ * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
+{
+ int offset, i;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
+ 0, qs_cfg->q_count, 0);
+ if (offset >= qs_cfg->pf_map_size) {
+ mutex_unlock(qs_cfg->qs_mutex);
+ return -ENOMEM;
+ }
+
+ bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
+ for (i = 0; i < qs_cfg->q_count; i++)
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
+{
+ int i, index = 0;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ for (i = 0; i < qs_cfg->q_count; i++) {
+ index = find_next_zero_bit(qs_cfg->pf_map,
+ qs_cfg->pf_map_size, index);
+ if (index >= qs_cfg->pf_map_size)
+ goto err_scatter;
+ set_bit(index, qs_cfg->pf_map);
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+err_scatter:
+ for (index = 0; index < i; index++) {
+ clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
+ qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ usleep_range(20, 40);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the VSI struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
+ GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+ /* This will not be called in the driver load path because the netdev
+ * will not be created yet. All other cases with register the NAPI
+ * handler here (i.e. resume, reset/rebuild, etc.)
+ */
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+out:
+ /* tie q_vector and VSI together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_pf *pf = vsi->back;
+ struct ice_ring *ring;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->q_vectors[v_idx]) {
+ dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
+ return;
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+ ice_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ devm_kfree(dev, q_vector);
+ vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
+ * ice_calc_q_handle - calculate the queue handle
+ * @vsi: VSI that ring belongs to
+ * @ring: ring to get the absolute queue index
+ * @tc: traffic class number
+ */
+static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+{
+ WARN_ONCE(ice_ring_is_xdp(ring) && tc,
+ "XDP ring can't belong to TC other than 0");
+
+ /* Idea here for calculation is that we subtract the number of queue
+ * count from TC that ring belongs to from it's absolute queue index
+ * and as a result we get the queue's index within TC.
+ */
+ return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* queue belongs to a specific VSI type
+ * VF / VM index should be programmed per vmvf_type setting:
+ * for vmvf_type = VF, it is VF number between 0-256
+ * for vmvf_type = VM, it is VM number between 0-767
+ * for PF or EMP this field should be set to zero
+ */
+ switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF ID */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
+ default:
+ return;
+ }
+
+ /* make sure the context is associated with the right VSI */
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ tlan_ctx->tso_ena = ICE_TX_LEGACY;
+ tlan_ctx->tso_qnum = pf_q;
+
+ /* Legacy or Advanced Host Interface:
+ * 0: Advanced Host Interface
+ * 1: Legacy Host Interface
+ */
+ tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+ int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ struct ice_vsi *vsi = ring->vsi;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ struct ice_rlan_ctx rlan_ctx;
+ struct ice_hw *hw;
+ u32 regval;
+ u16 pf_q;
+ int err;
+
+ hw = &vsi->back->hw;
+
+ /* what is Rx queue number in global space of 2K Rx queues */
+ pf_q = vsi->rxq_map[ring->q_index];
+
+ /* clear the context structure first */
+ memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index);
+
+ ring->xsk_umem = ice_xsk_umem(ring);
+ if (ring->xsk_umem) {
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ chain_len = 1;
+ ring->zca.free = ice_zca_free;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca);
+ if (err)
+ return err;
+
+ dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ ring->q_index);
+ } else {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index);
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
+ }
+ }
+ /* Receive Queue Base Address.
+ * Indicates the starting address of the descriptor queue defined in
+ * 128 Byte units.
+ */
+ rlan_ctx.base = ring->dma >> 7;
+
+ rlan_ctx.qlen = ring->count;
+
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+ rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to host
+ * memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+ rlan_ctx.l2tsel = 1;
+
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* This controls whether VLAN is stripped from inner headers
+ * The VLAN in the inner L2 header is stripped to the receive
+ * descriptor if enabled by this flag.
+ */
+ rlan_ctx.showiv = 0;
+
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+ rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ chain_len * ring->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+ rlan_ctx.lrxqthresh = 1;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile ID;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
+
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+ }
+
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ ice_clear_ring_build_skb_ena(ring);
+ else
+ ice_set_ring_build_skb_ena(ring);
+
+ /* init queue specific tail register */
+ ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ err = ring->xsk_umem ?
+ ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+ if (err)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ ring->xsk_umem ? "UMEM enabled " : "",
+ ring->q_index, pf_q);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
+{
+ int ret = 0;
+
+ ret = __ice_vsi_get_qs_contig(qs_cfg);
+ if (ret) {
+ /* contig failed, so try with scatter approach */
+ qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
+ qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->scatter_count);
+ ret = __ice_vsi_get_qs_sc(qs_cfg);
+ }
+ return ret;
+}
+
+/**
+ * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ * @rxq_idx: Rx queue index
+ */
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+{
+ int pf_q = vsi->rxq_map[rxq_idx];
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int ret = 0;
+ u32 rx_reg;
+
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf),
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+
+ return ret;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int v_idx = 0, num_q_vectors;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->q_vectors[0]) {
+ dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ num_q_vectors = vsi->num_q_vectors;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = ice_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ ice_free_q_vector(vsi, v_idx);
+
+ dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->vsi_num, err);
+ vsi->num_q_vectors = 0;
+ return err;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+ int q_vectors = vsi->num_q_vectors;
+ int tx_rings_rem, rx_rings_rem;
+ int v_id;
+
+ /* initially assigning remaining rings count to VSIs num queue value */
+ tx_rings_rem = vsi->num_txq;
+ rx_rings_rem = vsi->num_rxq;
+
+ for (v_id = 0; v_id < q_vectors; v_id++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+ int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+ /* Tx rings mapping to vector */
+ tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_tx = tx_rings_per_v;
+ q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
+ q_base = vsi->num_txq - tx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ }
+ tx_rings_rem -= tx_rings_per_v;
+
+ /* Rx rings mapping to vector */
+ rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_rx = rx_rings_per_v;
+ q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
+ q_base = vsi->num_rxq - rx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+ struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ }
+ rx_rings_rem -= rx_rings_per_v;
+ }
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_vsi_cfg_txq - Configure single Tx queue
+ * @vsi: the VSI that queue belongs to
+ * @ring: Tx ring to be configured
+ * @qg_buf: queue group buffer
+ */
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf)
+{
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ u8 buf_len = sizeof(*qg_buf);
+ enum ice_status status;
+ u16 pf_q;
+ u8 tc;
+
+ pf_q = ring->reg_idx;
+ ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ /* Add unique software queue handle of the Tx queue per
+ * TC into the VSI Tx ring
+ */
+ ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return -ENODEV;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ ring->txq_teid = le32_to_cpu(txq->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ ice_cfg_itr_gran(hw);
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_RX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_TX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+}
+
+/**
+ * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * @vsi: the VSI being configured
+ * @txq: Tx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
+ val);
+ }
+ ice_flush(hw);
+}
+
+/**
+ * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
+ * @vsi: the VSI being configured
+ * @rxq: Rx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_trigger_sw_intr - trigger a software interrupt
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector to trigger the software interrupt for
+ */
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_M);
+}
+
+/**
+ * ice_vsi_stop_tx_ring - Disable single Tx ring
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative ID of VF/VM
+ * @ring: Tx ring to be stopped
+ * @txq_meta: Meta data of Tx ring to be stopped
+ */
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 val;
+
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(ring->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(ring->reg_idx), val);
+
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ q_vector = ring->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
+ status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
+ txq_meta->tc, 1, &txq_meta->q_handle,
+ &txq_meta->q_id, &txq_meta->q_teid, rst_src,
+ rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an
+ * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * This is not an error as the reset operation disables
+ * queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "LAN Tx queues do not exist, nothing to disable\n");
+ } else if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_txq_meta - Prepare the Tx queue's meta data
+ * @vsi: VSI that ring belongs to
+ * @ring: ring that txq_meta will be based on
+ * @txq_meta: a helper struct that wraps Tx queue's information
+ *
+ * Set up a helper struct that will contain all the necessary fields that
+ * are needed for stopping Tx queue
+ */
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ u8 tc;
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ txq_meta->q_id = ring->reg_idx;
+ txq_meta->q_teid = ring->txq_teid;
+ txq_meta->q_handle = ring->q_handle;
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
new file mode 100644
index 000000000000..407995e8e944
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_BASE_H_
+#define _ICE_BASE_H_
+
+#include "ice.h"
+
+int ice_setup_rx_ctx(struct ice_ring *ring);
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf);
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3a6b3950eb0e..fb1d930470c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -855,6 +855,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
+ /* Initialize max burst size */
+ if (!hw->max_burst_size)
+ ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
status = ice_init_fltr_mgmt_struct(hw);
if (status)
@@ -1067,6 +1070,72 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
+ * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ */
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type)
+{
+ enum ice_status status;
+ u16 pfa_len, pfa_ptr;
+ u16 next_tlv;
+
+ status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ while (next_tlv < pfa_ptr + pfa_len) {
+ u16 tlv_sub_module_type;
+ u16 tlv_len;
+
+ /* Read TLV type */
+ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
+ break;
+ }
+ /* Read TLV length */
+ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return ICE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
@@ -1182,56 +1251,6 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
-/**
- * ice_debug_cq
- * @hw: pointer to the hardware structure
- * @mask: debug mask
- * @desc: pointer to control queue descriptor
- * @buf: pointer to command buffer
- * @buf_len: max length of buf
- *
- * Dumps debug log about control command with descriptor contents.
- */
-void
-ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
- u16 buf_len)
-{
- struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
- u16 len;
-
-#ifndef CONFIG_DYNAMIC_DEBUG
- if (!(mask & hw->debug_mask))
- return;
-#endif
-
- if (!desc)
- return;
-
- len = le16_to_cpu(cq_desc->datalen);
-
- ice_debug(hw, mask,
- "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(cq_desc->opcode),
- le16_to_cpu(cq_desc->flags),
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
- ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->cookie_high),
- le32_to_cpu(cq_desc->cookie_low));
- ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->params.generic.param0),
- le32_to_cpu(cq_desc->params.generic.param1));
- ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->params.generic.addr_high),
- le32_to_cpu(cq_desc->params.generic.addr_low));
- if (buf && cq_desc->datalen != 0) {
- ice_debug(hw, mask, "Buffer:\n");
- if (buf_len < len)
- len = buf_len;
-
- ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
- }
-}
-
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
@@ -1654,6 +1673,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
ice_debug(hw, ICE_DBG_INIT,
"%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
+
+ /* store func count for resource management purposes */
+ if (dev_p)
+ dev_p->num_funcs = hweight32(number);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
@@ -1760,6 +1783,18 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
break;
}
}
+
+ /* Re-calculate capabilities that are dependent on the number of
+ * physical ports; i.e. some features are not supported or function
+ * differently on devices with more than 4 ports.
+ */
+ if (hw->dev_caps.num_funcs > 4) {
+ /* Max 4 TCs per port */
+ caps->maxtc = 4;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d (based on #ports)\n", prefix,
+ caps->maxtc);
+ }
}
/**
@@ -1856,8 +1891,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
u32 valid_func, rxq_first_id, txq_first_id;
u32 msix_vector_first_id, max_mtu;
- u32 num_func = 0;
- u8 i;
+ u32 num_funcs;
/* cache some func_caps values that should be restored after memset */
valid_func = func_caps->common_cap.valid_functions;
@@ -1890,6 +1924,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
rxq_first_id = dev_caps->common_cap.rxq_first_id;
msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
max_mtu = dev_caps->common_cap.max_mtu;
+ num_funcs = dev_caps->num_funcs;
/* unset dev capabilities */
memset(dev_caps, 0, sizeof(*dev_caps));
@@ -1900,19 +1935,14 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
dev_caps->common_cap.rxq_first_id = rxq_first_id;
dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
dev_caps->common_cap.max_mtu = max_mtu;
-
- /* valid_func is a bitmap. get number of functions */
-#define ICE_MAX_FUNCS 8
- for (i = 0; i < ICE_MAX_FUNCS; i++)
- if (valid_func & BIT(i))
- num_func++;
+ dev_caps->num_funcs = num_funcs;
/* one Tx and one Rx queue per function in safe mode */
- dev_caps->common_cap.num_rxq = num_func;
- dev_caps->common_cap.num_txq = num_func;
+ dev_caps->common_cap.num_rxq = num_funcs;
+ dev_caps->common_cap.num_txq = num_funcs;
/* two MSIX vectors per function */
- dev_caps->common_cap.num_msix_vectors = 2 * num_func;
+ dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
}
/**
@@ -2556,6 +2586,52 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
}
/**
+ * ice_aq_sff_eeprom
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @set_page: set or ignore the page
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read/Write SFF EEPROM (0x06EE)
+ */
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_sff_eeprom *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || (mem_addr & 0xff00))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
+ ICE_AQC_SFF_I2CBUS_7BIT_M) |
+ ((set_page <<
+ ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
+ ICE_AQC_SFF_SET_EEPROM_PAGE_M));
+ cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
+ cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ if (write)
+ cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
+
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+ return status;
+}
+
+/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
@@ -3148,7 +3224,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @tc: TC number
* @q_handle: software queue handle
*/
-static struct ice_q_ctx *
+struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
{
struct ice_vsi_ctx *vsi;
@@ -3245,9 +3321,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
q_ctx->q_handle = q_handle;
+ q_ctx->q_teid = le32_to_cpu(node.node_teid);
- /* add a leaf node into schduler tree queue layer */
+ /* add a leaf node into scheduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+ if (!status)
+ status = ice_sched_replay_q_bw(pi, q_ctx);
ena_txq_exit:
mutex_unlock(&pi->sched_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index c3df92f57777..b22aa561e253 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,16 +6,18 @@
#include "ice.h"
#include "ice_type.h"
+#include "ice_nvm.h"
#include "ice_flex_pipe.h"
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-void
-ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
@@ -117,6 +119,10 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
@@ -133,6 +139,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
+struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 2353166c654e..dd946866d7b8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -810,6 +810,52 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
+ * ice_debug_cq
+ * @hw: pointer to the hardware structure
+ * @desc: pointer to control queue descriptor
+ * @buf: pointer to command buffer
+ * @buf_len: max length of buf
+ *
+ * Dumps debug log about control command with descriptor contents.
+ */
+static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+{
+ struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ u16 len;
+
+ if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
+ !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
+ return;
+
+ if (!desc)
+ return;
+
+ len = le16_to_cpu(cq_desc->datalen);
+
+ ice_debug(hw, ICE_DBG_AQ_DESC,
+ "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ le16_to_cpu(cq_desc->opcode),
+ le16_to_cpu(cq_desc->flags),
+ le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_high),
+ le32_to_cpu(cq_desc->cookie_low));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param0),
+ le32_to_cpu(cq_desc->params.generic.param1));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ if (buf && cq_desc->datalen != 0) {
+ ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+
+ ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
+ }
+}
+
+/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
@@ -934,10 +980,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
/* Debug desc and buffer */
- ice_debug(hw, ICE_DBG_AQ_MSG,
+ ice_debug(hw, ICE_DBG_AQ_DESC,
"ATQ: Control Send queue desc and buffer:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
+ ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@@ -948,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ice_sq_done(hw, cq))
break;
- mdelay(1);
+ udelay(ICE_CTL_Q_SQ_CMD_USEC);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);
@@ -971,7 +1017,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval = le16_to_cpu(desc->retval);
if (retval) {
ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue command completed with error 0x%x\n",
+ "Control Send Queue command 0x%04X completed with error 0x%X\n",
+ le16_to_cpu(desc->opcode),
retval);
/* strip off FW internal code */
@@ -986,7 +1033,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG,
"ATQ: desc and buffer writeback:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
+ ice_debug_cq(hw, (void *)desc, buf, buf_size);
/* save writeback AQ if requested */
if (details->wb_desc)
@@ -1075,7 +1122,8 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Receive Queue Event received with error 0x%x\n",
+ "Control Receive Queue Event 0x%04X received with error 0x%X\n",
+ le16_to_cpu(desc->opcode),
cq->rq_last_status);
}
memcpy(&e->desc, desc, sizeof(e->desc));
@@ -1084,10 +1132,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
- ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
+ ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
- cq->rq_buf_size);
+ ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 44945c2165d8..bf0ebe6149e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -22,7 +22,7 @@
*/
#define EXP_FW_API_VER_BRANCH 0x00
#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x03
+#define EXP_FW_API_VER_MINOR 0x05
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
@@ -31,8 +31,9 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
-/* Control Queue default settings */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
+/* Control Queue timeout settings - max delay 250ms */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index dd7efff121bd..713e8a892e14 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -965,9 +965,9 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi);
- pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
if (ret)
return ret;
+ pi->is_sw_lldp = false;
} else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
}
@@ -975,8 +975,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
/* Configure the LLDP MIB change event */
if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
- if (!ret)
- pi->is_sw_lldp = false;
+ if (ret)
+ pi->is_sw_lldp = true;
}
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index dd47869c4ad4..d3d3ec29def9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
@@ -100,6 +101,16 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_dcb_get_tc - Get the TC associated with the queue
+ * @vsi: ptr to the VSI
+ * @queue_index: queue number associated with VSI
+ */
+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
+{
+ return vsi->tx_rings[queue_index]->dcb_tc;
+}
+
+/**
* ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
* @vsi: VSI owner of rings being updated
*/
@@ -138,83 +149,62 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
}
/**
- * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
- * @pf: pointer to the PF struct
- *
- * Assumed caller has already disabled all VSIs before
- * calling this function. Reconfiguring DCB based on
- * local_dcbx_cfg.
- */
-static void ice_pf_dcb_recfg(struct ice_pf *pf)
-{
- struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
- u8 tc_map = 0;
- int v, ret;
-
- /* Update each VSI */
- ice_for_each_vsi(pf, v) {
- if (!pf->vsi[v])
- continue;
-
- if (pf->vsi[v]->type == ICE_VSI_PF)
- tc_map = ice_dcb_get_ena_tc(dcbcfg);
- else
- tc_map = ICE_DFLT_TRAFFIC_CLASS;
-
- ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to config TC for VSI index: %d\n",
- pf->vsi[v]->idx);
- continue;
- }
-
- ice_vsi_map_rings_to_vectors(pf->vsi[v]);
- }
-}
-
-/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
* @locked: is the RTNL held
*/
-static
int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{
- struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
- int ret = 0;
+ struct ice_dcbx_cfg *old_cfg, *curr_cfg;
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret = ICE_DCB_NO_HW_CHG;
+ struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ /* FW does not care if change happened */
+ if (!pf->hw.port_info->is_sw_lldp)
+ ret = ICE_DCB_HW_CHG_RST;
+
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
- dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
- dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
- dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
+ dev_dbg(dev, "No change in DCB config required\n");
return ret;
}
/* Store old config in case FW config fails */
- old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
- memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
+ old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
+ if (!old_cfg)
+ return -ENOMEM;
+
+ dev_info(dev, "Commit DCB Configuration to the hardware\n");
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_dbg(dev, "PF VSI doesn't exist\n");
+ ret = -EINVAL;
+ goto free_cfg;
+ }
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
if (!locked)
rtnl_lock();
- ice_pf_dis_all_vsi(pf, true);
+ ice_dis_vsi(pf_vsi, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
+ memcpy(&new_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
@@ -222,7 +212,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
if (pf->hw.port_info->is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
+ dev_err(dev, "Set DCB Config failed\n");
/* Restore previous settings to local config */
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
goto out;
@@ -231,17 +221,18 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
goto out;
}
ice_pf_dcb_recfg(pf);
out:
- ice_pf_ena_all_vsi(pf, true);
+ ice_ena_vsi(pf_vsi, true);
if (!locked)
rtnl_unlock();
- devm_kfree(&pf->pdev->dev, old_cfg);
+free_cfg:
+ kfree(old_cfg);
return ret;
}
@@ -277,6 +268,7 @@ static bool
ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
+ struct device *dev = ice_pf_to_dev(pf);
bool need_reconfig = false;
/* Check if ETS configuration has changed */
@@ -287,33 +279,33 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
&old_cfg->etscfg.prio_table,
sizeof(new_cfg->etscfg.prio_table))) {
need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ dev_dbg(dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
- dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+ dev_dbg(dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
- dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ dev_dbg(dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ dev_dbg(dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ dev_dbg(dev, "APP Table change detected.\n");
}
- dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ dev_dbg(dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
@@ -325,11 +317,12 @@ void ice_dcb_rebuild(struct ice_pf *pf)
{
struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
+ struct device *dev = ice_pf_to_dev(pf);
enum ice_status ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
goto dcb_error;
}
@@ -348,17 +341,14 @@ void ice_dcb_rebuild(struct ice_pf *pf)
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
+ dev_err(dev, "Failed to set DCB to unwilling\n");
goto dcb_error;
}
/* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg,
- sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg) {
- dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
+ prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
+ if (!prev_cfg)
goto dcb_error;
- }
ice_init_dcb(&pf->hw, true);
if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
@@ -368,12 +358,13 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */
- dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
+ dev_err(dev, "Set local MIB not accurate\n");
+ kfree(prev_cfg);
goto dcb_error;
}
/* fetched config congruent to previous configuration */
- devm_kfree(&pf->pdev->dev, prev_cfg);
+ kfree(prev_cfg);
/* Set the local desired config */
if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
@@ -383,27 +374,30 @@ void ice_dcb_rebuild(struct ice_pf *pf)
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to set desired config\n");
+ dev_err(dev, "Failed to set desired config\n");
goto dcb_error;
}
- dev_info(&pf->pdev->dev, "DCB restored after reset\n");
+ dev_info(dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
goto dcb_error;
}
return;
dcb_error:
- dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
- prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
+ dev_err(dev, "Disabling DCB until new settings occur\n");
+ prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
+ if (!prev_cfg)
+ return;
+
prev_cfg->etscfg.willing = true;
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
ice_pf_dcb_cfg(pf, prev_cfg, false);
- devm_kfree(&pf->pdev->dev, prev_cfg);
+ kfree(prev_cfg);
}
/**
@@ -418,18 +412,17 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
int ret = 0;
pi = pf->hw.port_info;
- newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
+ newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL);
if (!newcfg)
return -ENOMEM;
- memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
- dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
+ dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg, locked))
ret = -EINVAL;
- devm_kfree(&pf->pdev->dev, newcfg);
+ kfree(newcfg);
return ret;
}
@@ -437,9 +430,10 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
/**
* ice_dcb_sw_default_config - Apply a default DCB config
* @pf: PF to apply config to
+ * @ets_willing: configure ets willing
* @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -449,12 +443,13 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
hw = &pf->hw;
pi = hw->port_info;
- dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
+ dcbcfg = kzalloc(sizeof(*dcbcfg), GFP_KERNEL);
+ if (!dcbcfg)
+ return -ENOMEM;
- memset(dcbcfg, 0, sizeof(*dcbcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
- dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
dcbcfg->etscfg.tcbwtable[0] = 100;
dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
@@ -472,7 +467,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
- devm_kfree(&pf->pdev->dev, dcbcfg);
+ kfree(dcbcfg);
if (ret)
return ret;
@@ -480,13 +475,112 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
}
/**
+ * ice_dcb_tc_contig - Check that TCs are contiguous
+ * @prio_table: pointer to priority table
+ *
+ * Check if TCs begin with TC0 and are contiguous
+ */
+static bool ice_dcb_tc_contig(u8 *prio_table)
+{
+ u8 max_tc = 0;
+ int i;
+
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
+ u8 cur_tc = prio_table[i];
+
+ if (cur_tc > max_tc)
+ return false;
+ else if (cur_tc == max_tc)
+ max_tc++;
+ }
+
+ return true;
+}
+
+/**
+ * ice_dcb_noncontig_cfg - Configure DCB for non-contiguous TCs
+ * @pf: pointer to the PF struct
+ *
+ * If non-contiguous TCs, then configure SW DCB with TC0 and ETS non-willing
+ */
+static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret;
+
+ /* Configure SW DCB default with ETS non-willing */
+ ret = ice_dcb_sw_dflt_cfg(pf, false, true);
+ if (ret) {
+ dev_err(dev, "Failed to set local DCB config %d\n", ret);
+ return ret;
+ }
+
+ /* Reconfigure with ETS willing so that FW will send LLDP MIB event */
+ dcbcfg->etscfg.willing = 1;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret)
+ dev_err(dev, "Failed to set DCB to unwilling\n");
+
+ return ret;
+}
+
+/**
+ * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
+ * @pf: pointer to the PF struct
+ *
+ * Assumed caller has already disabled all VSIs before
+ * calling this function. Reconfiguring DCB based on
+ * local_dcbx_cfg.
+ */
+void ice_pf_dcb_recfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ u8 tc_map = 0;
+ int v, ret;
+
+ /* Update each VSI */
+ ice_for_each_vsi(pf, v) {
+ struct ice_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type == ICE_VSI_PF) {
+ tc_map = ice_dcb_get_ena_tc(dcbcfg);
+
+ /* If DCBX request non-contiguous TC, then configure
+ * default TC
+ */
+ if (!ice_dcb_tc_contig(dcbcfg->etscfg.prio_table)) {
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+ ice_dcb_noncontig_cfg(pf);
+ }
+ } else {
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+ }
+
+ ret = ice_vsi_cfg_tc(vsi, tc_map);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "Failed to config TC for VSI index: %d\n",
+ vsi->idx);
+ continue;
+ }
+
+ ice_vsi_map_rings_to_vectors(vsi);
+ if (vsi->type == ICE_VSI_PF)
+ ice_dcbnl_set_all(vsi);
+ }
+}
+
+/**
* ice_init_pf_dcb - initialize DCB for a PF
* @pf: PF to initialize DCB for
* @locked: Was function called with RTNL held
*/
int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
{
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw;
int err;
@@ -495,26 +589,39 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
err = ice_init_dcb(hw, false);
if (err && !port_info->is_sw_lldp) {
- dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err);
+ dev_err(dev, "Error initializing DCB %d\n", err);
goto dcb_init_err;
}
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc);
if (err) {
+ struct ice_vsi *pf_vsi;
+
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
- dev_info(&pf->pdev->dev,
- "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
+ dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
- err = ice_dcb_sw_dflt_cfg(pf, locked);
+ err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Failed to set local DCB config %d\n", err);
err = -EIO;
goto dcb_init_err;
}
+ /* If the FW DCBX engine is not running then Rx LLDP packets
+ * need to be redirected up the stack.
+ */
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_err(dev, "Failed to set local DCB config\n");
+ err = -EIO;
+ goto dcb_init_err;
+ }
+
+ ice_cfg_sw_lldp(pf_vsi, false, true);
+
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
return 0;
}
@@ -623,10 +730,12 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event)
{
struct ice_aqc_port_ets_elem buf = { 0 };
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
+ struct ice_vsi *pf_vsi;
u8 type;
int ret;
@@ -635,8 +744,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
return;
if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
- dev_dbg(&pf->pdev->dev,
- "MIB Change Event in HOST mode\n");
+ dev_dbg(dev, "MIB Change Event in HOST mode\n");
return;
}
@@ -645,21 +753,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M);
- dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type);
+ dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
- dev_dbg(&pf->pdev->dev,
- "LLDP event mib type %s\n", type ? "remote" : "local");
+ dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
if (type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->remote_dcbx_cfg);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n");
+ dev_err(dev, "Failed to get remote DCB config\n");
return;
}
}
@@ -673,37 +780,43 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to get DCB config\n");
+ dev_err(dev, "Failed to get DCB config\n");
return;
}
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
- dev_dbg(&pf->pdev->dev,
- "No change detected in DCBX configuration.\n");
+ dev_dbg(dev, "No change detected in DCBX configuration.\n");
return;
}
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
+ ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig)
return;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
- dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
- dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_dbg(dev, "PF VSI doesn't exist\n");
+ return;
+ }
+
rtnl_lock();
- ice_pf_dis_all_vsi(pf, true);
+ ice_dis_vsi(pf_vsi, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
rtnl_unlock();
return;
}
@@ -711,6 +824,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
- ice_pf_ena_all_vsi(pf, true);
+ ice_ena_vsi(pf_vsi, true);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 661a6f7bca64..f15e5776f287 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -5,14 +5,22 @@
#define _ICE_DCB_LIB_H_
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#ifdef CONFIG_DCB
-#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+#define ICE_DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
+#define ICE_DCB_NO_HW_CHG 1 /* DCB configuration did not change */
+#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
+int
+ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
+void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -41,10 +49,25 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
return 1;
}
+static inline u8
+ice_dcb_get_tc(struct ice_vsi __always_unused *vsi,
+ int __always_unused queue_index)
+{
+ return 0;
+}
+
static inline int
ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{
- dev_dbg(&pf->pdev->dev, "DCB not supported\n");
+ dev_dbg(ice_pf_to_dev(pf), "DCB not supported\n");
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
+ struct ice_dcbx_cfg __always_unused *new_cfg,
+ bool __always_unused locked)
+{
return -EOPNOTSUPP;
}
@@ -56,6 +79,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
}
#define ice_update_dcb_stats(pf) do {} while (0)
+#define ice_pf_dcb_recfg(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
new file mode 100644
index 000000000000..d870c1aedc17
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_dcb.h"
+#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
+#include <net/dcbnl.h>
+
+#define ICE_APP_PROT_ID_ROCE 0x8915
+
+/**
+ * ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
+ * @netdev: device associated with interface that needs reset
+ */
+static void ice_dcbnl_devreset(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ while (ice_is_reset_in_progress(pf->state))
+ usleep_range(1000, 2000);
+
+ set_bit(__ICE_DCBNL_DEVRESET, pf->state);
+ dev_close(netdev);
+ netdev_state_change(netdev);
+ dev_open(netdev, NULL);
+ netdev_state_change(netdev);
+ clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
+}
+
+/**
+ * ice_dcbnl_getets - retrieve local ETS configuration
+ * @netdev: the relevant netdev
+ * @ets: struct to hold ETS configuration
+ */
+static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
+{
+ struct ice_dcbx_cfg *dcbxcfg;
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+
+ pf = ice_netdev_to_pf(netdev);
+ pi = pf->hw.port_info;
+ dcbxcfg = &pi->local_dcbx_cfg;
+
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prio_table, sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prio_table,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_setets - set IEEE ETS configuration
+ * @netdev: pointer to relevant netdev
+ * @ets: struct to hold ETS configuration
+ */
+static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int bwcfg = 0, bwrec = 0;
+ int err, i, max_tc = 0;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg->etscfg.willing = ets->willing;
+ new_cfg->etscfg.cbs = ets->cbs;
+ ice_for_each_traffic_class(i) {
+ new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
+ bwcfg += ets->tc_tx_bw[i];
+ new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
+ new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+ new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
+ bwrec += ets->tc_reco_bw[i];
+ new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
+ new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
+ }
+
+ /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
+ * for the TC's index number. Add one to value if not zero, and
+ * for zero set it to the FW's default value
+ */
+ if (max_tc)
+ max_tc++;
+ else
+ max_tc = IEEE_8021QAZ_MAX_TCS;
+
+ new_cfg->etscfg.maxtcs = max_tc;
+
+ if (!bwcfg)
+ new_cfg->etscfg.tcbwtable[0] = 100;
+
+ if (!bwrec)
+ new_cfg->etsrec.tcbwtable[0] = 100;
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (err == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (err == ICE_DCB_NO_HW_CHG)
+ err = ICE_DCB_HW_CHG_RST;
+
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+}
+
+/**
+ * ice_dcbnl_getnumtcs - Get max number of traffic classes supported
+ * @dev: pointer to netdev struct
+ * @tcid: TC ID
+ * @num: total number of TCs supported by the adapter
+ *
+ * Return the total number of TCs supported
+ */
+static int
+ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+
+ if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ return -EINVAL;
+
+ *num = IEEE_8021QAZ_MAX_TCS;
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getdcbx - retrieve current DCBX capability
+ * @netdev: pointer to the netdev struct
+ */
+static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * ice_dcbnl_setdcbx - set required DCBX capability
+ * @netdev: the corresponding netdev
+ * @mode: required mode
+ */
+static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ /* No support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return ICE_DCB_NO_HW_CHG;
+
+ /* Already set to the given mode no change */
+ if (mode == pf->dcbx_cap)
+ return ICE_DCB_NO_HW_CHG;
+
+ pf->dcbx_cap = mode;
+ if (mode & DCB_CAP_DCBX_VER_CEE)
+ pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
+ else
+ pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+
+ dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
+ return ICE_DCB_HW_CHG_RST;
+}
+
+/**
+ * ice_dcbnl_get_perm_hw_addr - MAC address used by DCBX
+ * @netdev: pointer to netdev struct
+ * @perm_addr: buffer to return permanent MAC address
+ */
+static void ice_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < netdev->addr_len; i++)
+ perm_addr[i] = pi->mac.perm_addr[i];
+
+ for (j = 0; j < netdev->addr_len; j++, i++)
+ perm_addr[i] = pi->mac.perm_addr[j];
+}
+
+/**
+ * ice_get_pfc_delay - Retrieve PFC Link Delay
+ * @hw: pointer to HW struct
+ * @delay: holds the PFC Link Delay value
+ */
+static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, PRTDCB_GENC);
+ *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
+}
+
+/**
+ * ice_dcbnl_getpfc - retrieve local IEEE PFC config
+ * @netdev: pointer to netdev struct
+ * @pfc: struct to hold PFC info
+ */
+static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+ struct ice_dcbx_cfg *dcbxcfg;
+ int i;
+
+ dcbxcfg = &pi->local_dcbx_cfg;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcena;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ ice_get_pfc_delay(&pf->hw, &pfc->delay);
+
+ ice_for_each_traffic_class(i) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_setpfc - set local IEEE PFC config
+ * @netdev: pointer to relevant netdev
+ * @pfc: pointer to struct holding PFC config
+ */
+static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int err;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ if (pfc->pfc_cap)
+ new_cfg->pfc.pfccap = pfc->pfc_cap;
+ else
+ new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
+
+ new_cfg->pfc.pfcena = pfc->pfc_en;
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+ if (err == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (err == ICE_DCB_NO_HW_CHG)
+ err = ICE_DCB_HW_CHG_RST;
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+}
+
+/**
+ * ice_dcbnl_get_pfc_cfg - Get CEE PFC config
+ * @netdev: pointer to netdev struct
+ * @prio: corresponding user priority
+ * @setting: the PFC setting for given priority
+ */
+static void
+ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
+ dev_dbg(ice_pf_to_dev(pf),
+ "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
+ prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
+}
+
+/**
+ * ice_dcbnl_set_pfc_cfg - Set CEE PFC config
+ * @netdev: the corresponding netdev
+ * @prio: User Priority
+ * @set: PFC setting to apply
+ */
+static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
+ if (set)
+ new_cfg->pfc.pfcena |= BIT(prio);
+ else
+ new_cfg->pfc.pfcena &= ~BIT(prio);
+
+ dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
+ prio, set, new_cfg->pfc.pfcena);
+}
+
+/**
+ * ice_dcbnl_getpfcstate - get CEE PFC mode
+ * @netdev: pointer to netdev struct
+ */
+static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ /* Return enabled if any UP enabled for PFC */
+ if (pi->local_dcbx_cfg.pfc.pfcena)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getstate - get DCB enabled state
+ * @netdev: pointer to netdev struct
+ */
+static u8 ice_dcbnl_getstate(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ u8 state = 0;
+
+ state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+
+ dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
+ return state;
+}
+
+/**
+ * ice_dcbnl_setstate - Set CEE DCB state
+ * @netdev: pointer to relevant netdev
+ * @state: state value to set
+ */
+static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return ICE_DCB_NO_HW_CHG;
+
+ /* Nothing to do */
+ if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return ICE_DCB_NO_HW_CHG;
+
+ if (state) {
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ memcpy(&pf->hw.port_info->desired_dcbx_cfg,
+ &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(struct ice_dcbx_cfg));
+ } else {
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ }
+
+ return ICE_DCB_HW_CHG;
+}
+
+/**
+ * ice_dcbnl_get_pg_tc_cfg_tx - get CEE PG Tx config
+ * @netdev: pointer to netdev struct
+ * @prio: the corresponding user priority
+ * @prio_type: traffic priority type
+ * @pgid: the BW group ID the traffic class belongs to
+ * @bw_pct: BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type, u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+ dev_dbg(ice_pf_to_dev(pf),
+ "Get PG config prio=%d tc=%d\n", prio, *pgid);
+}
+
+/**
+ * ice_dcbnl_set_pg_tc_cfg_tx - set CEE PG Tx config
+ * @netdev: pointer to relevant netdev
+ * @tc: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @bwg_id: the BW group ID the TC belongs to
+ * @bw_pct: the BW perventage for the BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 __always_unused prio_type,
+ u8 __always_unused bwg_id,
+ u8 __always_unused bw_pct, u8 up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int i;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ /* prio_type, bwg_id and bw_pct per UP are not supported */
+
+ ice_for_each_traffic_class(i) {
+ if (up_map & BIT(i))
+ new_cfg->etscfg.prio_table[i] = tc;
+ }
+ new_cfg->etscfg.tsatable[tc] = ICE_IEEE_TSA_ETS;
+}
+
+/**
+ * ice_dcbnl_get_pg_bwg_cfg_tx - Get CEE PGBW config
+ * @netdev: pointer to the netdev struct
+ * @pgid: corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
+ dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
+ pgid, *bw_pct);
+}
+
+/**
+ * ice_dcbnl_set_pg_bwg_cfg_tx - set CEE PG Tx BW config
+ * @netdev: the corresponding netdev
+ * @pgid: Correspongind traffic class
+ * @bw_pct: the BW percentage for the specified TC
+ */
+static void
+ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
+}
+
+/**
+ * ice_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config
+ * @netdev: pointer to netdev struct
+ * @prio: the corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type, u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+}
+
+/**
+ * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
+ * @netdev: pointer to netdev struct
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
+ u8 *bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ *bw_pct = 0;
+}
+
+/**
+ * ice_dcbnl_get_cap - Get DCBX capabilities of adapter
+ * @netdev: pointer to netdev struct
+ * @capid: the capability type
+ * @cap: the capability value
+ */
+static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
+ return ICE_DCB_NO_HW_CHG;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = pf->dcbx_cap;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
+ capid, *cap);
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getapp - get CEE APP
+ * @netdev: pointer to netdev struct
+ * @idtype: the App selector
+ * @id: the App ethtype or port number
+ */
+static int ice_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+/**
+ * ice_dcbnl_find_app - Search for APP in given DCB config
+ * @cfg: struct to hold DCBX config
+ * @app: struct to hold app data to look for
+ */
+static bool
+ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
+ struct ice_dcb_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->prot_id == cfg->app[i].prot_id &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_dcbnl_setapp - set local IEEE App config
+ * @netdev: relevant netdev struct
+ * @app: struct to hold app config info
+ */
+static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcb_app_priority_table new_app;
+ struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ int ret;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
+ ret = -EINVAL;
+ goto setapp_out;
+ }
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ goto setapp_out;
+
+ new_app.selector = app->selector;
+ new_app.prot_id = app->protocol;
+ new_app.priority = app->priority;
+ if (ice_dcbnl_find_app(old_cfg, &new_app)) {
+ ret = 0;
+ goto setapp_out;
+ }
+
+ new_cfg->app[new_cfg->numapps++] = new_app;
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (ret == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (ret == ICE_DCB_NO_HW_CHG)
+ ret = ICE_DCB_HW_CHG_RST;
+
+setapp_out:
+ mutex_unlock(&pf->tc_mutex);
+ return ret;
+}
+
+/**
+ * ice_dcbnl_delapp - Delete local IEEE App config
+ * @netdev: relevant netdev
+ * @app: struct to hold app too delete
+ *
+ * Will not delete first application required by the FW
+ */
+static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ int i, j, ret = 0;
+
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ goto delapp_out;
+
+ old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ if (old_cfg->numapps == 1)
+ goto delapp_out;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ for (i = 1; i < new_cfg->numapps; i++) {
+ if (app->selector == new_cfg->app[i].selector &&
+ app->protocol == new_cfg->app[i].prot_id &&
+ app->priority == new_cfg->app[i].priority) {
+ new_cfg->app[i].selector = 0;
+ new_cfg->app[i].prot_id = 0;
+ new_cfg->app[i].priority = 0;
+ break;
+ }
+ }
+
+ /* Did not find DCB App */
+ if (i == new_cfg->numapps) {
+ ret = -EINVAL;
+ goto delapp_out;
+ }
+
+ new_cfg->numapps--;
+
+ for (j = i; j < new_cfg->numapps; j++) {
+ new_cfg->app[i].selector = old_cfg->app[j + 1].selector;
+ new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id;
+ new_cfg->app[i].priority = old_cfg->app[j + 1].priority;
+ }
+
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (ret == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (ret == ICE_DCB_NO_HW_CHG)
+ ret = ICE_DCB_HW_CHG_RST;
+
+delapp_out:
+ mutex_unlock(&pf->tc_mutex);
+ return ret;
+}
+
+/**
+ * ice_dcbnl_cee_set_all - Commit CEE DCB settings to HW
+ * @netdev: the corresponding netdev
+ */
+static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int err;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return ICE_DCB_NO_HW_CHG;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ mutex_lock(&pf->tc_mutex);
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+
+ mutex_unlock(&pf->tc_mutex);
+ return (err != ICE_DCB_HW_CHG_RST) ? ICE_DCB_NO_HW_CHG : err;
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = ice_dcbnl_getets,
+ .ieee_setets = ice_dcbnl_setets,
+ .ieee_getpfc = ice_dcbnl_getpfc,
+ .ieee_setpfc = ice_dcbnl_setpfc,
+ .ieee_setapp = ice_dcbnl_setapp,
+ .ieee_delapp = ice_dcbnl_delapp,
+
+ /* CEE std */
+ .getstate = ice_dcbnl_getstate,
+ .setstate = ice_dcbnl_setstate,
+ .getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
+ .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = ice_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = ice_dcbnl_set_pfc_cfg,
+ .getpfccfg = ice_dcbnl_get_pfc_cfg,
+ .setall = ice_dcbnl_cee_set_all,
+ .getcap = ice_dcbnl_get_cap,
+ .getnumtcs = ice_dcbnl_getnumtcs,
+ .getpfcstate = ice_dcbnl_getpfcstate,
+ .getapp = ice_dcbnl_getapp,
+
+ /* DCBX configuration */
+ .getdcbx = ice_dcbnl_getdcbx,
+ .setdcbx = ice_dcbnl_setdcbx,
+};
+
+/**
+ * ice_dcbnl_set_all - set all the apps and ieee data from DCBX config
+ * @vsi: pointer to VSI struct
+ */
+void ice_dcbnl_set_all(struct ice_vsi *vsi)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_dcbx_cfg *dcbxcfg;
+ struct ice_port_info *pi;
+ struct dcb_app sapp;
+ struct ice_pf *pf;
+ int i;
+
+ if (!netdev)
+ return;
+
+ pf = ice_netdev_to_pf(netdev);
+ pi = pf->hw.port_info;
+
+ /* SW DCB taken care of by SW Default Config */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return;
+
+ /* DCB not enabled */
+ if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return;
+
+ dcbxcfg = &pi->local_dcbx_cfg;
+
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ u8 prio, tc_map;
+
+ prio = dcbxcfg->app[i].priority;
+ tc_map = BIT(dcbxcfg->etscfg.prio_table[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_cfg.ena_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].prot_id;
+ sapp.priority = prio;
+ dcb_ieee_setapp(netdev, &sapp);
+ }
+ }
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * ice_dcbnl_vsi_del_app - Delete APP on all VSIs
+ * @vsi: pointer to the main VSI
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ */
+static void
+ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
+ struct ice_dcb_app_priority_table *app)
+{
+ struct dcb_app sapp;
+ int err;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->prot_id;
+ sapp.priority = app->priority;
+ err = ice_dcbnl_delapp(vsi->netdev, &sapp);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
+ vsi->idx, err, app->selector, app->prot_id, app->priority);
+}
+
+/**
+ * ice_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPS that are not present in the passed
+ * DCB configuration
+ */
+void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
+ int i;
+
+ if (!main_vsi)
+ return;
+
+ for (i = 0; i < old_cfg->numapps; i++) {
+ struct ice_dcb_app_priority_table app = old_cfg->app[i];
+
+ /* The APP is not available anymore delete it */
+ if (!ice_dcbnl_find_app(new_cfg, &app))
+ ice_dcbnl_vsi_del_app(main_vsi, &app);
+ }
+}
+
+/**
+ * ice_dcbnl_setup - setup DCBNL
+ * @vsi: VSI to get associated netdev from
+ */
+void ice_dcbnl_setup(struct ice_vsi *vsi)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf;
+
+ pf = ice_netdev_to_pf(netdev);
+ if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ return;
+
+ netdev->dcbnl_ops = &dcbnl_ops;
+ ice_dcbnl_set_all(vsi);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
new file mode 100644
index 000000000000..6c630a362293
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_NL_H_
+#define _ICE_DCB_NL_H_
+
+#ifdef CONFIG_DCB
+void ice_dcbnl_setup(struct ice_vsi *vsi);
+void ice_dcbnl_set_all(struct ice_vsi *vsi);
+void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg);
+#else
+#define ice_dcbnl_setup(vsi) do {} while (0)
+#define ice_dcbnl_set_all(vsi) do {} while (0)
+#define ice_dcbnl_flush_apps(pf, old_cfg, new_cfg) do {} while (0)
+#endif /* CONFIG_DCB */
+
+#endif /* _ICE_DCB_NL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 7e23034df955..aec3c6c379df 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -156,6 +156,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -247,7 +248,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
int ret = 0;
u16 *buf;
- dev = &pf->pdev->dev;
+ dev = ice_pf_to_dev(pf);
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -342,6 +343,7 @@ static u64 ice_eeprom_test(struct net_device *netdev)
static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
{
struct ice_pf *pf = (struct ice_pf *)hw->back;
+ struct device *dev = ice_pf_to_dev(pf);
static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5,
0x00000000, 0xFFFFFFFF
@@ -357,7 +359,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
val = rd32(hw, reg);
if (val == pattern)
continue;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
, __func__, reg, pattern, val);
return 1;
@@ -366,7 +368,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
wr32(hw, reg, orig_val);
val = rd32(hw, reg);
if (val != orig_val) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
, __func__, reg, orig_val, val);
return 1;
@@ -506,7 +508,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
- data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -623,7 +625,7 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
continue;
rx_buf = &rx_ring->rx_buf[i];
- received_buf = page_address(rx_buf->page);
+ received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
if (ice_lbtest_check_frame(received_buf))
valid_frames++;
@@ -648,9 +650,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
LIST_HEAD(tmp_list);
+ struct device *dev;
u8 *tx_frame;
int i;
+ dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@@ -711,12 +715,12 @@ static u64 ice_loopback_test(struct net_device *netdev)
ret = 10;
lbtest_free_frame:
- devm_kfree(&pf->pdev->dev, tx_frame);
+ devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list))
netdev_err(netdev, "Could not remove MAC filter for the test VSI");
free_mac_list:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_list);
+ ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
@@ -773,6 +777,9 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
struct ice_netdev_priv *np = netdev_priv(netdev);
bool if_running = netif_running(netdev);
struct ice_pf *pf = np->vsi->back;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
netdev_info(netdev, "offline testing starting\n");
@@ -780,7 +787,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
set_bit(__ICE_TESTING, pf->state);
if (ice_active_vfs(pf)) {
- dev_warn(&pf->pdev->dev,
+ dev_warn(dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[ICE_ETH_TEST_REG] = 1;
data[ICE_ETH_TEST_EEPROM] = 1;
@@ -815,8 +822,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev);
if (status) {
- dev_err(&pf->pdev->dev,
- "Could not open device %s, err %d",
+ dev_err(dev, "Could not open device %s, err %d",
pf->int_name, status);
}
}
@@ -961,7 +967,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
}
/* Get last SW configuration */
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
@@ -1006,7 +1012,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
}
done:
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
return err;
}
@@ -1082,7 +1088,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
break;
}
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
@@ -1109,7 +1115,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fecparam->fec |= ETHTOOL_FEC_OFF;
done:
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
return err;
}
@@ -1154,12 +1160,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct device *dev;
int ret = 0;
u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL;
+ dev = ice_pf_to_dev(pf);
set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
@@ -1188,7 +1196,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* events to respond to.
*/
if (status)
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"Failed to unreg for LLDP events\n");
/* The AQ call to stop the FW LLDP agent will generate
@@ -1196,20 +1204,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/
status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
if (status)
- dev_warn(&pf->pdev->dev,
- "Fail to stop LLDP agent\n");
+ dev_warn(dev, "Fail to stop LLDP agent\n");
/* Use case for having the FW LLDP agent stopped
* will likely not need DCB, so failure to init is
* not a concern of ethtool
*/
status = ice_init_pf_dcb(pf, true);
if (status)
- dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
-
- /* Forward LLDP packets to default VSI so that they
- * are passed up the stack
- */
- ice_cfg_sw_lldp(vsi, false, true);
+ dev_warn(dev, "Fail to init DCB\n");
} else {
enum ice_status status;
bool dcbx_agent_status;
@@ -1219,8 +1221,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/
status = ice_aq_start_lldp(&pf->hw, true, NULL);
if (status)
- dev_warn(&pf->pdev->dev,
- "Fail to start LLDP Agent\n");
+ dev_warn(dev, "Fail to start LLDP Agent\n");
/* AQ command to start FW DCBX agent will fail if
* the agent is already started
@@ -1229,10 +1230,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
&dcbx_agent_status,
NULL);
if (status)
- dev_dbg(&pf->pdev->dev,
- "Failed to start FW DCBX\n");
+ dev_dbg(dev, "Failed to start FW DCBX\n");
- dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
+ dev_info(dev, "FW DCBX agent is %s\n",
dcbx_agent_status ? "ACTIVE" : "DISABLED");
/* Failure to configure MIB change or init DCB is not
@@ -1242,7 +1242,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/
status = ice_init_pf_dcb(pf, true);
if (status)
- dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
+ dev_dbg(dev, "Fail to init DCB\n");
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
@@ -1252,10 +1252,15 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Fail to enable MIB change events\n");
}
}
+ if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
+ /* down and up VSI so that changes of Rx cfg are reflected. */
+ ice_down(vsi);
+ ice_up(vsi);
+ }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -2140,7 +2145,7 @@ ice_get_link_ksettings(struct net_device *netdev,
/* flow control is symmetric and always supported */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
@@ -2198,7 +2203,7 @@ ice_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
done:
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
return err;
}
@@ -2427,8 +2432,7 @@ ice_set_link_ksettings(struct net_device *netdev,
usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
}
- abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities),
- GFP_KERNEL);
+ abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
if (!abilities)
return -ENOMEM;
@@ -2520,7 +2524,7 @@ ice_set_link_ksettings(struct net_device *netdev,
}
done:
- devm_kfree(&pf->pdev->dev, abilities);
+ kfree(abilities);
clear_bit(__ICE_CFG_BUSY, pf->state);
return err;
@@ -2577,6 +2581,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_ring *xdp_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2611,6 +2616,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
return 0;
}
+ /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ * disallow changing the number of descriptors -- regardless
+ * if the netdev is running or not.
+ */
+ if (ice_xsk_any_rx_ring_ena(vsi))
+ return -EBUSY;
+
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout)
@@ -2624,6 +2636,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
vsi->tx_rings[i]->count = new_tx_cnt;
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rx_rings[i]->count = new_rx_cnt;
+ if (ice_is_xdp_ena_vsi(vsi))
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->count = new_tx_cnt;
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2635,14 +2652,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
vsi->tx_rings[0]->count, new_tx_cnt);
- tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
- sizeof(*tx_rings), GFP_KERNEL);
+ tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
@@ -2650,15 +2666,42 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
tx_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
- while (i) {
- i--;
+ while (i--)
ice_clean_tx_ring(&tx_rings[i]);
- }
- devm_kfree(&pf->pdev->dev, tx_rings);
+ kfree(tx_rings);
goto done;
}
}
+ if (!ice_is_xdp_ena_vsi(vsi))
+ goto process_rx;
+
+ /* alloc updated XDP resources */
+ netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
+ vsi->xdp_rings[0]->count, new_tx_cnt);
+
+ xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL);
+ if (!xdp_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ /* clone ring and setup updated count */
+ xdp_rings[i] = *vsi->xdp_rings[i];
+ xdp_rings[i].count = new_tx_cnt;
+ xdp_rings[i].desc = NULL;
+ xdp_rings[i].tx_buf = NULL;
+ err = ice_setup_tx_ring(&xdp_rings[i]);
+ if (err) {
+ while (i--)
+ ice_clean_tx_ring(&xdp_rings[i]);
+ kfree(xdp_rings);
+ goto free_tx;
+ }
+ ice_set_ring_xdp(&xdp_rings[i]);
+ }
+
process_rx:
if (new_rx_cnt == vsi->rx_rings[0]->count)
goto process_link;
@@ -2667,14 +2710,13 @@ process_rx:
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0]->count, new_rx_cnt);
- rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
- sizeof(*rx_rings), GFP_KERNEL);
+ rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
@@ -2698,7 +2740,7 @@ rx_unwind:
i--;
ice_free_rx_ring(&rx_rings[i]);
}
- devm_kfree(&pf->pdev->dev, rx_rings);
+ kfree(rx_rings);
err = -ENOMEM;
goto free_tx;
}
@@ -2712,15 +2754,15 @@ process_link:
ice_down(vsi);
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
ice_free_tx_ring(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
- devm_kfree(&pf->pdev->dev, tx_rings);
+ kfree(tx_rings);
}
if (rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ice_free_rx_ring(vsi->rx_rings[i]);
/* copy the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail;
@@ -2734,9 +2776,19 @@ process_link:
rx_rings[i].next_to_alloc = 0;
*vsi->rx_rings[i] = rx_rings[i];
}
- devm_kfree(&pf->pdev->dev, rx_rings);
+ kfree(rx_rings);
+ }
+
+ if (xdp_rings) {
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ *vsi->xdp_rings[i] = xdp_rings[i];
+ }
+ kfree(xdp_rings);
}
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
ice_up(vsi);
}
goto done;
@@ -2744,9 +2796,9 @@ process_link:
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_txq(vsi, i)
ice_free_tx_ring(&tx_rings[i]);
- devm_kfree(&pf->pdev->dev, tx_rings);
+ kfree(tx_rings);
}
done:
@@ -2794,7 +2846,6 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_vsi *vsi = np->vsi;
struct ice_dcbx_cfg *dcbx_cfg;
enum ice_status status;
@@ -2804,8 +2855,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
dcbx_cfg = &pi->local_dcbx_cfg;
- pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
- GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return;
@@ -2828,7 +2878,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = 1;
out:
- devm_kfree(&vsi->back->pdev->dev, pcaps);
+ kfree(pcaps);
}
/**
@@ -3009,7 +3059,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
return -EIO;
}
- lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -3022,7 +3072,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
indir[i] = (u32)(lut[i]);
out:
- devm_kfree(&pf->pdev->dev, lut);
+ kfree(lut);
return ret;
}
@@ -3043,8 +3093,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct device *dev;
u8 *seed = NULL;
+ dev = ice_pf_to_dev(pf);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -3057,8 +3109,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
- devm_kzalloc(&pf->pdev->dev,
- ICE_VSIQF_HKEY_ARRAY_SIZE,
+ devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
@@ -3068,8 +3119,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
}
if (!vsi->rss_lut_user) {
- vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
- vsi->rss_table_size,
+ vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size,
GFP_KERNEL);
if (!vsi->rss_lut_user)
return -ENOMEM;
@@ -3092,6 +3142,188 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return 0;
}
+/**
+ * ice_get_max_txq - return the maximum number of Tx queues for in a PF
+ * @pf: PF structure
+ */
+static int ice_get_max_txq(struct ice_pf *pf)
+{
+ return min_t(int, num_online_cpus(),
+ pf->hw.func_caps.common_cap.num_txq);
+}
+
+/**
+ * ice_get_max_rxq - return the maximum number of Rx queues for in a PF
+ * @pf: PF structure
+ */
+static int ice_get_max_rxq(struct ice_pf *pf)
+{
+ return min_t(int, num_online_cpus(),
+ pf->hw.func_caps.common_cap.num_rxq);
+}
+
+/**
+ * ice_get_combined_cnt - return the current number of combined channels
+ * @vsi: PF VSI pointer
+ *
+ * Go through all queue vectors and count ones that have both Rx and Tx ring
+ * attached
+ */
+static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
+{
+ u32 combined = 0;
+ int q_idx;
+
+ ice_for_each_q_vector(vsi, q_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+ combined++;
+ }
+
+ return combined;
+}
+
+/**
+ * ice_get_channels - get the current and max supported channels
+ * @dev: network interface device structure
+ * @ch: ethtool channel data structure
+ */
+static void
+ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ /* check to see if VSI is active */
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ /* report maximum channels */
+ ch->max_rx = ice_get_max_rxq(pf);
+ ch->max_tx = ice_get_max_txq(pf);
+ ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
+
+ /* report current channels */
+ ch->combined_count = ice_get_combined_cnt(vsi);
+ ch->rx_count = vsi->num_rxq - ch->combined_count;
+ ch->tx_count = vsi->num_txq - ch->combined_count;
+}
+
+/**
+ * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
+ * @vsi: VSI to reconfigure RSS LUT on
+ * @req_rss_size: requested range of queue numbers for hashing
+ *
+ * Set the VSI's RSS parameters, configure the RSS LUT based on these.
+ */
+static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_hw *hw;
+ int err = 0;
+ u8 *lut;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+
+ if (!req_rss_size)
+ return -EINVAL;
+
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* set RSS LUT parameters */
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ vsi->rss_size = 1;
+ } else {
+ struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
+
+ vsi->rss_size = min_t(int, req_rss_size,
+ BIT(caps->rss_table_entry_width));
+ }
+
+ /* create/set RSS LUT */
+ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+ status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
+ vsi->rss_table_size);
+ if (status) {
+ dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
+ status, hw->adminq.rq_last_status);
+ err = -EIO;
+ }
+
+ kfree(lut);
+ return err;
+}
+
+/**
+ * ice_set_channels - set the number channels
+ * @dev: network interface device structure
+ * @ch: ethtool channel data structure
+ */
+static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ int new_rx = 0, new_tx = 0;
+ u32 curr_combined;
+
+ /* do not support changing channels in Safe Mode */
+ if (ice_is_safe_mode(pf)) {
+ netdev_err(dev, "Changing channel in Safe Mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ /* do not support changing other_count */
+ if (ch->other_count)
+ return -EINVAL;
+
+ curr_combined = ice_get_combined_cnt(vsi);
+
+ /* these checks are for cases where user didn't specify a particular
+ * value on cmd line but we get non-zero value anyway via
+ * get_channels(); look at ethtool.c in ethtool repository (the user
+ * space part), particularly, do_schannels() routine
+ */
+ if (ch->rx_count == vsi->num_rxq - curr_combined)
+ ch->rx_count = 0;
+ if (ch->tx_count == vsi->num_txq - curr_combined)
+ ch->tx_count = 0;
+ if (ch->combined_count == curr_combined)
+ ch->combined_count = 0;
+
+ if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
+ netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
+ return -EINVAL;
+ }
+
+ new_rx = ch->combined_count + ch->rx_count;
+ new_tx = ch->combined_count + ch->tx_count;
+
+ if (new_rx > ice_get_max_rxq(pf)) {
+ netdev_err(dev, "Maximum allowed Rx channels is %d\n",
+ ice_get_max_rxq(pf));
+ return -EINVAL;
+ }
+ if (new_tx > ice_get_max_txq(pf)) {
+ netdev_err(dev, "Maximum allowed Tx channels is %d\n",
+ ice_get_max_txq(pf));
+ return -EINVAL;
+ }
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+
+ if (new_rx && !netif_is_rxfh_configured(dev))
+ return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+
+ return 0;
+}
+
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
@@ -3131,7 +3363,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
break;
default:
- dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
return -EINVAL;
}
@@ -3271,7 +3503,8 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
default:
- dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
+ c_type);
return -EINVAL;
}
@@ -3368,10 +3601,17 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
- int i;
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ /* In some cases if DCB is configured the num_[rx|tx]q
+ * can be less than vsi->num_q_vectors. This check
+ * accounts for that so we don't report a false failure
+ */
+ if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
+ goto set_complete;
- ice_for_each_q_vector(vsi, i) {
- if (ice_set_q_coalesce(vsi, ec, i))
+ if (ice_set_q_coalesce(vsi, ec, v_idx))
return -EINVAL;
}
goto set_complete;
@@ -3398,6 +3638,151 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
return __ice_set_coalesce(netdev, ec, q_num);
}
+#define ICE_I2C_EEPROM_DEV_ADDR 0xA0
+#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
+#define ICE_MODULE_TYPE_SFP 0x03
+#define ICE_MODULE_TYPE_QSFP_PLUS 0x0D
+#define ICE_MODULE_TYPE_QSFP28 0x11
+#define ICE_MODULE_SFF_ADDR_MODE 0x04
+#define ICE_MODULE_SFF_DIAG_CAPAB 0x40
+#define ICE_MODULE_REVISION_ADDR 0x01
+#define ICE_MODULE_SFF_8472_COMP 0x5E
+#define ICE_MODULE_SFF_8472_SWAP 0x5C
+#define ICE_MODULE_QSFP_MAX_LEN 640
+
+/**
+ * ice_get_module_info - get SFF module type and revision information
+ * @netdev: network interface device structure
+ * @modinfo: module EEPROM size and layout information structure
+ */
+static int
+ice_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 sff8472_comp = 0;
+ u8 sff8472_swap = 0;
+ u8 sff8636_rev = 0;
+ u8 value = 0;
+
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
+ 0, &value, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ switch (value) {
+ case ICE_MODULE_TYPE_SFP:
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_SFF_8472_COMP, 0x00, 0,
+ &sff8472_comp, 1, 0, NULL);
+ if (status)
+ return -EIO;
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
+ &sff8472_swap, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp &&
+ (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ }
+ break;
+ case ICE_MODULE_TYPE_QSFP_PLUS:
+ case ICE_MODULE_TYPE_QSFP28:
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_REVISION_ADDR, 0x00, 0,
+ &sff8636_rev, 1, 0, NULL);
+ if (status)
+ return -EIO;
+ /* Check revision compliance */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ default:
+ netdev_warn(netdev,
+ "SFF Module Type not recognized.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ice_get_module_eeprom - fill buffer with SFF EEPROM contents
+ * @netdev: network interface device structure
+ * @ee: EEPROM dump request structure
+ * @data: buffer to be filled with EEPROM contents
+ */
+static int
+ice_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ bool is_sfp = false;
+ u16 offset = 0;
+ u8 value = 0;
+ u8 page = 0;
+ int i;
+
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0,
+ &value, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ if (!ee || !ee->len || !data)
+ return -EINVAL;
+
+ if (value == ICE_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < ee->len; i++) {
+ offset = i + ee->offset;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= ETH_MODULE_SFF_8079_LEN) {
+ offset -= ETH_MODULE_SFF_8079_LEN;
+ addr = ICE_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= ETH_MODULE_SFF_8436_LEN / 2;
+ page++;
+ }
+ }
+
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp,
+ &value, 1, 0, NULL);
+ if (status)
+ value = 0;
+ data[i] = value;
+ }
+ return 0;
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
@@ -3428,11 +3813,15 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_channels = ice_get_channels,
+ .set_channels = ice_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
.set_fecparam = ice_set_fecparam,
+ .get_module_info = ice_get_module_info,
+ .get_module_eeprom = ice_get_module_eeprom,
};
static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
@@ -3451,6 +3840,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
+ .get_channels = ice_get_channels,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 152fbd556e9b..e8f32350fed2 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -52,6 +52,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PRTDCB_GENC 0x00083000
+#define PRTDCB_GENC_PFCLDA_S 16
+#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 2aac8f13daeb..ad34f22d44ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -211,7 +211,7 @@ enum ice_flex_rx_mdid {
/* Rx/Tx Flag64 packet flag bits */
enum ice_flg64_bits {
ICE_FLG_PKT_DSI = 0,
- ICE_FLG_EVLAN_x8100 = 15,
+ ICE_FLG_EVLAN_x8100 = 14,
ICE_FLG_EVLAN_x9100,
ICE_FLG_VLAN_x8100,
ICE_FLG_TNL_MAC = 22,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index cc755382df25..e7449248fab4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2,232 +2,26 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_setup_rx_ctx - Configure a receive ring context
- * @ring: The Rx ring to configure
- *
- * Configure the Rx descriptor ring in RLAN context.
+ * ice_vsi_type_str - maps VSI type enum to string equivalents
+ * @type: VSI type enum
*/
-static int ice_setup_rx_ctx(struct ice_ring *ring)
+const char *ice_vsi_type_str(enum ice_vsi_type type)
{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
- u32 rxdid = ICE_RXDID_FLEX_NIC;
- struct ice_rlan_ctx rlan_ctx;
- u32 regval;
- u16 pf_q;
- int err;
-
- /* what is Rx queue number in global space of 2K Rx queues */
- pf_q = vsi->rxq_map[ring->q_index];
-
- /* clear the context structure first */
- memset(&rlan_ctx, 0, sizeof(rlan_ctx));
-
- rlan_ctx.base = ring->dma >> 7;
-
- rlan_ctx.qlen = ring->count;
-
- /* Receive Packet Data Buffer Size.
- * The Packet Data Buffer Size is defined in 128 byte units.
- */
- rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
-
- /* use 32 byte descriptors */
- rlan_ctx.dsize = 1;
-
- /* Strip the Ethernet CRC bytes before the packet is posted to host
- * memory.
- */
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
-
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
- rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
-
- /* This controls whether VLAN is stripped from inner headers
- * The VLAN in the inner L2 header is stripped to the receive
- * descriptor if enabled by this flag.
- */
- rlan_ctx.showiv = 0;
-
- /* Max packet size for this queue - must not be set to a larger value
- * than 5 x DBUF
- */
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
- ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
-
- /* Rx queue threshold in units of 64 */
- rlan_ctx.lrxqthresh = 1;
-
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- if (vsi->type != ICE_VSI_VF) {
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile ID;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
- }
-
- /* Absolute queue number out of 2K needs to be passed */
- err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
- if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
- pf_q, err);
- return -EIO;
- }
-
- if (vsi->type == ICE_VSI_VF)
- return 0;
-
- /* init queue specific tail register */
- ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
- writel(0, ring->tail);
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
-
- return 0;
-}
-
-/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
- *
- * Configure the Tx descriptor ring in TLAN context.
- */
-static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring);
-
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
-
- /* queue belongs to a specific VSI type
- * VF / VM index should be programmed per vmvf_type setting:
- * for vmvf_type = VF, it is VF number between 0-256
- * for vmvf_type = VM, it is VM number between 0-767
- * for PF or EMP this field should be set to zero
- */
- switch (vsi->type) {
- case ICE_VSI_LB:
- /* fall through */
+ switch (type) {
case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
- break;
+ return "ICE_VSI_PF";
case ICE_VSI_VF:
- /* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
- break;
+ return "ICE_VSI_VF";
+ case ICE_VSI_LB:
+ return "ICE_VSI_LB";
default:
- return;
- }
-
- /* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
-
- tlan_ctx->tso_ena = ICE_TX_LEGACY;
- tlan_ctx->tso_qnum = pf_q;
-
- /* Legacy or Advanced Host Interface:
- * 0: Advanced Host Interface
- * 1: Legacy Host Interface
- */
- tlan_ctx->legacy_int = ICE_TX_LEGACY;
-}
-
-/**
- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
- * @pf: the PF being configured
- * @pf_q: the PF queue
- * @ena: enable or disable state of the queue
- *
- * This routine will wait for the given Rx queue of the PF to reach the
- * enabled or disabled state.
- * Returns -ETIMEDOUT in case of failing to reach the requested state after
- * multiple retries; else will return 0 in case of success.
- */
-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
-{
- int i;
-
- for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
- QRX_CTRL_QENA_STAT_M))
- return 0;
-
- usleep_range(20, 40);
+ return "unknown";
}
-
- return -ETIMEDOUT;
-}
-
-/**
- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
- * @vsi: the VSI being configured
- * @ena: start or stop the Rx rings
- * @rxq_idx: Rx queue index
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
-{
- int pf_q = vsi->rxq_map[rxq_idx];
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int ret = 0;
- u32 rx_reg;
-
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
-
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- return 0;
-
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret)
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
-
- return ret;
}
/**
@@ -258,36 +52,39 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
/* allocate memory for both Tx and Rx ring pointers */
- vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings)
return -ENOMEM;
- vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings)
goto err_rings;
- vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
+ vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map)
goto err_txq_map;
- vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
sizeof(*vsi->rxq_map), GFP_KERNEL);
if (!vsi->rxq_map)
goto err_rxq_map;
-
/* There is no need to allocate q_vectors for a loopback VSI. */
if (vsi->type == ICE_VSI_LB)
return 0;
/* allocate memory for q_vector pointers */
- vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
+ vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
sizeof(*vsi->q_vectors), GFP_KERNEL);
if (!vsi->q_vectors)
goto err_vectors;
@@ -295,13 +92,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
return 0;
err_vectors:
- devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+ devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
- devm_kfree(&pf->pdev->dev, vsi->txq_map);
+ devm_kfree(dev, vsi->txq_map);
err_txq_map:
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+ devm_kfree(dev, vsi->rx_rings);
err_rings:
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+ devm_kfree(dev, vsi->tx_rings);
return -ENOMEM;
}
@@ -345,15 +142,24 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
case ICE_VSI_PF:
vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
num_online_cpus());
+ if (vsi->req_txq) {
+ vsi->alloc_txq = vsi->req_txq;
+ vsi->num_txq = vsi->req_txq;
+ }
pf->num_lan_tx = vsi->alloc_txq;
/* only 1 Rx queue unless RSS is enabled */
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
- else
+ } else {
vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
num_online_cpus());
+ if (vsi->req_rxq) {
+ vsi->alloc_rxq = vsi->req_rxq;
+ vsi->num_rxq = vsi->req_rxq;
+ }
+ }
pf->num_lan_rx = vsi->alloc_rxq;
@@ -375,7 +181,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
break;
}
@@ -421,7 +227,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
struct ice_vsi_ctx *ctxt;
enum ice_status status;
- ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
@@ -433,10 +239,10 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
- vsi->vsi_num);
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
+ vsi->vsi_num, status);
- devm_kfree(&pf->pdev->dev, ctxt);
+ kfree(ctxt);
}
/**
@@ -446,26 +252,29 @@ void ice_vsi_delete(struct ice_vsi *vsi)
static void ice_vsi_free_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
/* free the ring and vector containers */
if (vsi->q_vectors) {
- devm_kfree(&pf->pdev->dev, vsi->q_vectors);
+ devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
}
if (vsi->tx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+ devm_kfree(dev, vsi->tx_rings);
vsi->tx_rings = NULL;
}
if (vsi->rx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+ devm_kfree(dev, vsi->rx_rings);
vsi->rx_rings = NULL;
}
if (vsi->txq_map) {
- devm_kfree(&pf->pdev->dev, vsi->txq_map);
+ devm_kfree(dev, vsi->txq_map);
vsi->txq_map = NULL;
}
if (vsi->rxq_map) {
- devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+ devm_kfree(dev, vsi->rxq_map);
vsi->rxq_map = NULL;
}
}
@@ -482,6 +291,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
int ice_vsi_clear(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
+ struct device *dev;
if (!vsi)
return 0;
@@ -490,10 +300,10 @@ int ice_vsi_clear(struct ice_vsi *vsi)
return -EINVAL;
pf = vsi->back;
+ dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
- dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
- vsi->idx);
+ dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
return -EINVAL;
}
@@ -506,7 +316,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
- devm_kfree(&pf->pdev->dev, vsi);
+ devm_kfree(dev, vsi);
return 0;
}
@@ -539,6 +349,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
/* Need to protect the allocation of the VSIs at the PF level */
@@ -549,11 +360,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
* is available to be populated
*/
if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
+ dev_dbg(dev, "out of VSI slots!\n");
goto unlock_pf;
}
- vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
+ vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
if (!vsi)
goto unlock_pf;
@@ -585,7 +396,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
goto err_rings;
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
}
@@ -598,7 +409,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
goto unlock_pf;
err_rings:
- devm_kfree(&pf->pdev->dev, vsi);
+ devm_kfree(dev, vsi);
vsi = NULL;
unlock_pf:
mutex_unlock(&pf->sw_mutex);
@@ -606,88 +417,6 @@ unlock_pf:
}
/**
- * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
-{
- int offset, i;
-
- mutex_lock(qs_cfg->qs_mutex);
- offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
- 0, qs_cfg->q_count, 0);
- if (offset >= qs_cfg->pf_map_size) {
- mutex_unlock(qs_cfg->qs_mutex);
- return -ENOMEM;
- }
-
- bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
- for (i = 0; i < qs_cfg->q_count; i++)
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-}
-
-/**
- * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
-{
- int i, index = 0;
-
- mutex_lock(qs_cfg->qs_mutex);
- for (i = 0; i < qs_cfg->q_count; i++) {
- index = find_next_zero_bit(qs_cfg->pf_map,
- qs_cfg->pf_map_size, index);
- if (index >= qs_cfg->pf_map_size)
- goto err_scatter;
- set_bit(index, qs_cfg->pf_map);
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-err_scatter:
- for (index = 0; index < i; index++) {
- clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
- qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return -ENOMEM;
-}
-
-/**
- * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * This function first tries to find contiguous space. If it is not successful,
- * it tries with the scatter approach.
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
-{
- int ret = 0;
-
- ret = __ice_vsi_get_qs_contig(qs_cfg);
- if (ret) {
- /* contig failed, so try with scatter approach */
- qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
- qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
- qs_cfg->scatter_count);
- ret = __ice_vsi_get_qs_sc(qs_cfg);
- }
- return ret;
-}
-
-/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -769,14 +498,15 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
static void ice_rss_clean(struct ice_vsi *vsi)
{
- struct ice_pf *pf;
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
- pf = vsi->back;
+ dev = ice_pf_to_dev(pf);
if (vsi->rss_hkey_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
+ devm_kfree(dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
+ devm_kfree(dev, vsi->rss_lut_user);
}
/**
@@ -814,7 +544,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
case ICE_VSI_LB:
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
vsi->type);
break;
}
@@ -918,7 +648,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
else
max_rss = ICE_MAX_SMALL_RSS_QS;
qcount_rx = min_t(int, rx_numq_tc, max_rss);
- qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
+ if (!vsi->req_rxq)
+ qcount_rx = min_t(int, qcount_rx,
+ vsi->rss_size);
}
}
@@ -990,9 +722,11 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 lut_type, hash_type;
+ struct device *dev;
struct ice_pf *pf;
pf = vsi->back;
+ dev = ice_pf_to_dev(pf);
switch (vsi->type) {
case ICE_VSI_PF:
@@ -1006,10 +740,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_LB:
- dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
+ dev_dbg(dev, "Unsupported VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
return;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
return;
}
@@ -1022,18 +757,21 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
+ * @init_vsi: is this call creating a VSI
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
-static int ice_vsi_init(struct ice_vsi *vsi)
+static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct ice_vsi_ctx *ctxt;
+ struct device *dev;
int ret = 0;
- ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -1050,7 +788,8 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
break;
default:
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
ice_set_dflt_vsi_ctx(ctxt);
@@ -1059,11 +798,24 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_set_rss_vsi_ctx(ctxt, vsi);
+ /* if updating VSI context, make sure to set valid_section:
+ * to indicate which section of VSI context being updated
+ */
+ if (!init_vsi)
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ }
ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, ctxt);
+ if (!init_vsi) /* means VSI being updated */
+ /* must to indicate which section of VSI context are
+ * being modified
+ */
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
/* Enable MAC Antispoof with new VSI being initialized or updated */
if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
@@ -1080,11 +832,20 @@ static int ice_vsi_init(struct ice_vsi *vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
}
- ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Add VSI failed, err %d\n", ret);
- return -EIO;
+ if (init_vsi) {
+ ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(dev, "Add VSI failed, err %d\n", ret);
+ ret = -EIO;
+ goto out;
+ }
+ } else {
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(dev, "Update VSI failed, err %d\n", ret);
+ ret = -EIO;
+ goto out;
+ }
}
/* keep context for update VSI operations */
@@ -1093,131 +854,9 @@ static int ice_vsi_init(struct ice_vsi *vsi)
/* record VSI number returned */
vsi->vsi_num = ctxt->vsi_num;
- devm_kfree(&pf->pdev->dev, ctxt);
- return ret;
-}
-
-/**
- * ice_free_q_vector - Free memory allocated for a specific interrupt vector
- * @vsi: VSI having the memory freed
- * @v_idx: index of the vector to be freed
- */
-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_q_vector *q_vector;
- struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
-
- if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
- v_idx);
- return;
- }
- q_vector = vsi->q_vectors[v_idx];
-
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
-
- /* only VSI with an associated netdev is set up with NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
-
- devm_kfree(&pf->pdev->dev, q_vector);
- vsi->q_vectors[v_idx] = NULL;
-}
-
-/**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
- * @vsi: the VSI having memory freed
- */
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
-{
- int v_idx;
-
- ice_for_each_q_vector(vsi, v_idx)
- ice_free_q_vector(vsi, v_idx);
-}
-
-/**
- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the VSI struct
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
-
- /* allocate q_vector */
- q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector)
- return -ENOMEM;
-
- q_vector->vsi = vsi;
- q_vector->v_idx = v_idx;
- if (vsi->type == ICE_VSI_VF)
- goto out;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
- /* This will not be called in the driver load path because the netdev
- * will not be created yet. All other cases with register the NAPI
- * handler here (i.e. resume, reset/rebuild, etc.)
- */
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
-
out:
- /* tie q_vector and VSI together */
- vsi->q_vectors[v_idx] = q_vector;
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- int err;
-
- if (vsi->q_vectors[0]) {
- dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
- vsi->vsi_num);
- return -EEXIST;
- }
-
- num_q_vectors = vsi->num_q_vectors;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = ice_vsi_alloc_q_vector(vsi, v_idx);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
-
- dev_err(&pf->pdev->dev,
- "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ kfree(ctxt);
+ return ret;
}
/**
@@ -1233,14 +872,16 @@ err_out:
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
u16 num_q_vectors;
+ dev = ice_pf_to_dev(pf);
/* SRIOV doesn't grab irq_tracker entries for each VSI */
if (vsi->type == ICE_VSI_VF)
return 0;
if (vsi->base_vector) {
- dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
+ dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
vsi->vsi_num, vsi->base_vector);
return -EEXIST;
}
@@ -1250,7 +891,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
@@ -1293,8 +934,10 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
int i;
+ dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
@@ -1309,7 +952,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false;
ring->vsi = vsi;
- ring->dev = &pf->pdev->dev;
+ ring->dev = dev;
ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring;
}
@@ -1328,7 +971,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
- ring->dev = &pf->pdev->dev;
+ ring->dev = dev;
ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring;
}
@@ -1341,66 +984,6 @@ err_out:
}
/**
- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
- * @vsi: the VSI being configured
- *
- * This function maps descriptor rings to the queue-specific vectors allotted
- * through the MSI-X enabling code. On a constrained vector budget, we map Tx
- * and Rx rings to the vector as "efficiently" as possible.
- */
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#else
-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#endif /* CONFIG_DCB */
-{
- int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
- int v_id;
-
- /* initially assigning remaining rings count to VSIs num queue value */
- tx_rings_rem = vsi->num_txq;
- rx_rings_rem = vsi->num_rxq;
-
- for (v_id = 0; v_id < q_vectors; v_id++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
-
- /* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
- q_vector->tx.itr_idx = ICE_TX_ITR;
- q_base = vsi->num_txq - tx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- }
- tx_rings_rem -= tx_rings_per_v;
-
- /* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
- q_vector->rx.itr_idx = ICE_RX_ITR;
- q_base = vsi->num_rxq - rx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- }
- rx_rings_rem -= rx_rings_per_v;
- }
-}
-
-/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1414,8 +997,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
int err = 0;
u8 *lut;
- lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
- GFP_KERNEL);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -1428,7 +1010,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
}
err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
- devm_kfree(&vsi->back->pdev->dev, lut);
+ kfree(lut);
return err;
}
@@ -1441,12 +1023,14 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
enum ice_status status;
+ struct device *dev;
int err = 0;
u8 *lut;
+ dev = ice_pf_to_dev(pf);
vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
- lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -1459,13 +1043,12 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(&pf->pdev->dev,
- "set_rss_lut failed, error %d\n", status);
+ dev_err(dev, "set_rss_lut failed, error %d\n", status);
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
- key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
@@ -1482,14 +1065,13 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
- status);
+ dev_err(dev, "set_rss_key failed, error %d\n", status);
err = -EIO;
}
- devm_kfree(&pf->pdev->dev, key);
+ kfree(key);
ice_vsi_cfg_rss_exit:
- devm_kfree(&pf->pdev->dev, lut);
+ kfree(lut);
return err;
}
@@ -1509,7 +1091,7 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
+ tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
if (!tmp)
return -ENOMEM;
@@ -1601,9 +1183,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
int err = 0;
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -1620,11 +1204,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_add_vlan(&pf->hw, &tmp_add_list);
if (status) {
err = -ENODEV;
- dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
- vid, vsi->vsi_num);
+ dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
+ vsi->vsi_num);
}
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1641,9 +1225,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
int err = 0;
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return -ENOMEM;
@@ -1659,21 +1245,46 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
} else if (status) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
}
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+#if (PAGE_SIZE < 8192)
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+#else
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#endif
+ }
+}
+
+/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -1687,13 +1298,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
- if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
- vsi->max_frame = vsi->netdev->mtu +
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = ICE_RXBUF_2048;
-
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq; i++) {
@@ -1712,101 +1317,34 @@ setup_rings:
}
/**
- * ice_vsi_cfg_txq - Configure single Tx queue
- * @vsi: the VSI that queue belongs to
- * @ring: Tx ring to be configured
- * @tc_q_idx: queue index within given TC
- * @qg_buf: queue group buffer
- * @tc: TC that Tx ring belongs to
- */
-static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
- struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
-{
- struct ice_tlan_ctx tlan_ctx = { 0 };
- struct ice_aqc_add_txqs_perq *txq;
- struct ice_pf *pf = vsi->back;
- u8 buf_len = sizeof(*qg_buf);
- enum ice_status status;
- u16 pf_q;
-
- pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as
- * transmit comm scheduler queue doorbell.
- */
- ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
-
- /* Add unique software queue handle of the Tx queue per
- * TC into the VSI Tx ring
- */
- ring->q_handle = tc_q_idx;
-
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- return -ENODEV;
- }
-
- /* Add Tx Queue TEID into the VSI Tx ring from the
- * response. This will complete configuring and
- * enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- ring->txq_teid = le32_to_cpu(txq->q_teid);
-
- return 0;
-}
-
-/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
- * @offset: offset within vsi->txq_map
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_pf *pf = vsi->back;
- u16 q_idx = 0, i;
+ u16 q_idx = 0;
int err = 0;
- u8 tc;
- qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
- /* set up and configure the Tx queues for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
- qg_buf, tc);
- if (err)
- goto err_cfg_txqs;
-
- q_idx++;
- }
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ goto err_cfg_txqs;
}
+
err_cfg_txqs:
- devm_kfree(&pf->pdev->dev, qg_buf);
+ kfree(qg_buf);
return err;
}
@@ -1819,159 +1357,46 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
-}
-
-/**
- * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
- * @intrl: interrupt rate limit in usecs
- * @gran: interrupt rate limit granularity in usecs
- *
- * This function converts a decimal interrupt rate limit in usecs to the format
- * expected by firmware.
- */
-u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
-{
- u32 val = intrl / gran;
-
- if (val)
- return val | GLINT_RATE_INTRL_ENA_M;
- return 0;
-}
-
-/**
- * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
- * @hw: board specific structure
- */
-static void ice_cfg_itr_gran(struct ice_hw *hw)
-{
- u32 regval = rd32(hw, GLINT_CTL);
-
- /* no need to update global register if ITR gran is already set */
- if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
- (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
- GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
- GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
- GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
- GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
- return;
-
- regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
- GLINT_CTL_ITR_GRAN_200_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
- GLINT_CTL_ITR_GRAN_100_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
- GLINT_CTL_ITR_GRAN_50_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
- GLINT_CTL_ITR_GRAN_25_M);
- wr32(hw, GLINT_CTL, regval);
-}
-
-/**
- * ice_cfg_itr - configure the initial interrupt throttle values
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector that's being configured
- *
- * Configure interrupt throttling values for the ring containers that are
- * associated with the interrupt vector passed in.
- */
-static void
-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- ice_cfg_itr_gran(hw);
-
- if (q_vector->num_ring_rx) {
- struct ice_ring_container *rc = &q_vector->rx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_RX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
-
- if (q_vector->num_ring_tx) {
- struct ice_ring_container *rc = &q_vector->tx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_TX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
}
/**
- * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
- * @txq: Tx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
*
- * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
- * within the function space.
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
+ int ret;
+ int i;
- itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ if (ret)
+ return ret;
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ return ret;
}
/**
- * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
- * @vsi: the VSI being configured
- * @rxq: Rx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
*
- * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
- * within the function space.
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
-
- itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
-
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
-
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ u32 val = intrl / gran;
- ice_flush(hw);
+ if (val)
+ return val | GLINT_RATE_INTRL_ENA_M;
+ return 0;
}
/**
@@ -2028,13 +1453,12 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
*/
int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2052,7 +1476,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -2060,7 +1484,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -2071,13 +1495,12 @@ out:
*/
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2099,7 +1522,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -2107,7 +1530,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -2134,109 +1557,6 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
}
/**
- * ice_trigger_sw_intr - trigger a software interrupt
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector to trigger the software interrupt for
- */
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
- (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_M);
-}
-
-/**
- * ice_vsi_stop_tx_ring - Disable single Tx ring
- * @vsi: the VSI being configured
- * @rst_src: reset source
- * @rel_vmvf_num: Relative ID of VF/VM
- * @ring: Tx ring to be stopped
- * @txq_meta: Meta data of Tx ring to be stopped
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
- struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u32 val;
-
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(ring->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(ring->reg_idx), val);
-
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector
- * associated to the queue to schedule NAPI handler
- */
- q_vector = ring->q_vector;
- if (q_vector)
- ice_trigger_sw_intr(hw, q_vector);
-
- status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
- txq_meta->tc, 1, &txq_meta->q_handle,
- &txq_meta->q_id, &txq_meta->q_teid, rst_src,
- rel_vmvf_num, NULL);
-
- /* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
- * This is not an error as the reset operation disables
- * queues at the hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&vsi->back->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&vsi->back->pdev->dev,
- "LAN Tx queues do not exist, nothing to disable\n");
- } else if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n", status);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * ice_fill_txq_meta - Prepare the Tx queue's meta data
- * @vsi: VSI that ring belongs to
- * @ring: ring that txq_meta will be based on
- * @txq_meta: a helper struct that wraps Tx queue's information
- *
- * Set up a helper struct that will contain all the necessary fields that
- * are needed for stopping Tx queue
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- u8 tc = 0;
-
-#ifdef CONFIG_DCB
- tc = ring->dcb_tc;
-#endif /* CONFIG_DCB */
- txq_meta->q_id = ring->reg_idx;
- txq_meta->q_teid = ring->txq_teid;
- txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
-}
-
-/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
@@ -2247,34 +1567,24 @@ static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings)
{
- u16 i, q_idx = 0;
- int status;
- u8 tc;
+ u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- /* set up the Tx queue list to be disabled for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_txq_meta txq_meta = { };
-
- if (!rings || !rings[q_idx])
- return -EINVAL;
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ struct ice_txq_meta txq_meta = { };
+ int status;
- ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
- status = ice_vsi_stop_tx_ring(vsi, rst_src,
- rel_vmvf_num,
- rings[q_idx], &txq_meta);
+ if (!rings || !rings[q_idx])
+ return -EINVAL;
- if (status)
- return status;
+ ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
+ status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
+ rings[q_idx], &txq_meta);
- q_idx++;
- }
+ if (status)
+ return status;
}
return 0;
@@ -2294,6 +1604,15 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
}
/**
+ * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
+ * @vsi: the VSI being configured
+ */
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -2304,7 +1623,6 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
{
struct ice_vsi_ctx *ctxt;
- struct device *dev;
struct ice_pf *pf;
int status;
@@ -2312,8 +1630,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
return -EINVAL;
pf = vsi->back;
- dev = &pf->pdev->dev;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2347,11 +1664,11 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return 0;
err_out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return -EIO;
}
@@ -2420,8 +1737,10 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
@@ -2441,11 +1760,11 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status);
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
}
/**
@@ -2460,8 +1779,10 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
@@ -2488,12 +1809,11 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev,
- "Fail %s %s LLDP rule on VSI %i error: %d\n",
+ dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
vsi->vsi_num, status);
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
}
/**
@@ -2515,7 +1835,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
enum ice_status status;
struct ice_vsi *vsi;
int ret, i;
@@ -2551,7 +1871,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi);
+ ret = ice_vsi_init(vsi, true);
if (ret)
goto unroll_get_qs;
@@ -2624,8 +1944,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(&pf->pdev->dev,
- "VSI %d failed lan queue config, error %d\n",
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
goto unroll_vector_base;
}
@@ -2635,23 +1954,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
- * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
- * need to be dropped so that VFs cannot send LLDP packets to reconfig
- * DCB settings in the HW. Also, if the FW DCBX engine is not running
- * then Rx LLDP packets need to be redirected up the stack.
+ * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
+ * be dropped so that VFs cannot send LLDP packets to reconfig DCB
+ * settings in the HW.
*/
- if (!ice_is_safe_mode(pf)) {
+ if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
/* Tx LLDP packets */
ice_cfg_sw_lldp(vsi, true, true);
-
- /* Rx LLDP packets */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, true);
}
- }
return vsi;
@@ -2690,6 +2003,11 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
+ }
txq++;
}
@@ -2738,8 +2056,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
- devm_free_irq(&pf->pdev->dev, irq_num,
- vsi->q_vectors[i]);
+ devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
}
@@ -2790,6 +2107,62 @@ void ice_vsi_close(struct ice_vsi *vsi)
}
/**
+ * ice_ena_vsi - resume a VSI
+ * @vsi: the VSI being resume
+ * @locked: is the rtnl_lock already held
+ */
+int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
+{
+ int err = 0;
+
+ if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ return 0;
+
+ clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked)
+ rtnl_lock();
+
+ err = ice_open(vsi->netdev);
+
+ if (!locked)
+ rtnl_unlock();
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
+ */
+void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+{
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked)
+ rtnl_lock();
+
+ ice_stop(vsi->netdev);
+
+ if (!locked)
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
+}
+
+/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
* @index: starting index previously returned by ice_get_res
@@ -2869,7 +2242,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
@@ -3031,10 +2404,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
+ * @init_vsi: is this an initialization or a reconfigure of the VSI
*
* Returns 0 on success and negative value on failure
*/
-int ice_vsi_rebuild(struct ice_vsi *vsi)
+int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vf *vf = NULL;
@@ -3064,6 +2438,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0;
}
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
@@ -3081,11 +2460,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_init(vsi);
+ ret = ice_vsi_init(vsi, init_vsi);
if (ret < 0)
goto err_vsi;
-
switch (vsi->type) {
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3105,6 +2483,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto err_vectors;
+ }
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -3131,16 +2515,25 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ for (i = 0; i < vsi->tc_cfg.numtc; i++) {
max_txqs[i] = vsi->alloc_txq;
+ if (ice_is_xdp_ena_vsi(vsi))
+ max_txqs[i] += vsi->num_xdp_txq;
+ }
+
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
- goto err_vectors;
+ if (init_vsi) {
+ ret = -EIO;
+ goto err_vectors;
+ } else {
+ return ice_schedule_reset(pf, ICE_RESET_PFR);
+ }
}
return 0;
@@ -3166,6 +2559,7 @@ err_vsi:
bool ice_is_reset_in_progress(unsigned long *state)
{
return test_bit(__ICE_RESET_OICR_RECV, state) ||
+ test_bit(__ICE_DCBNL_DEVRESET, state) ||
test_bit(__ICE_PFR_REQ, state) ||
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
@@ -3199,9 +2593,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back;
enum ice_status status;
+ struct device *dev;
int i, ret = 0;
u8 num_tc = 0;
+ dev = ice_pf_to_dev(pf);
+
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
if (ena_tc & BIT(i))
@@ -3213,7 +2610,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
- ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -3226,7 +2623,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
- dev_info(&pf->pdev->dev, "Failed VSI Update\n");
+ dev_info(dev, "Failed VSI Update\n");
ret = -EIO;
goto out;
}
@@ -3235,8 +2632,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs);
if (status) {
- dev_err(&pf->pdev->dev,
- "VSI %d failed TC config, error %d\n",
+ dev_err(dev, "VSI %d failed TC config, error %d\n",
vsi->vsi_num, status);
ret = -EIO;
goto out;
@@ -3246,7 +2642,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
- devm_kfree(&pf->pdev->dev, ctx);
+ kfree(ctx);
return ret;
}
#endif /* CONFIG_DCB */
@@ -3271,6 +2667,51 @@ char *ice_nvm_version_str(struct ice_hw *hw)
}
/**
+ * ice_update_ring_stats - Update ring statistics
+ * @ring: ring to update
+ * @cont: used to increment per-vector counters
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ *
+ * This function assumes that caller has acquired a u64_stats_sync lock.
+ */
+static void
+ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
+ u64 pkts, u64 bytes)
+{
+ ring->stats.bytes += bytes;
+ ring->stats.pkts += pkts;
+ cont->total_bytes += bytes;
+ cont->total_pkts += pkts;
+}
+
+/**
+ * ice_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
+ u64_stats_update_end(&tx_ring->syncp);
+}
+
+/**
+ * ice_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
+ u64_stats_update_end(&rx_ring->syncp);
+}
+
+/**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter
* @macaddr: the MAC address to be added.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 47bc033fff20..6e31e30aba39 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,18 +6,7 @@
#include "ice.h"
-struct ice_txq_meta {
- /* Tx-scheduler element identifier */
- u32 q_teid;
- /* Entry in VSI's txq_map bitmap */
- u16 q_id;
- /* Relative index of Tx queue within TC */
- u16 q_handle;
- /* VSI index that Tx queue belongs to */
- u16 vsi_idx;
- /* TC number that Tx queue belongs to */
- u8 tc;
-};
+const char *ice_vsi_type_str(enum ice_vsi_type type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -33,24 +22,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
-
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
-
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
-#endif /* CONFIG_PCI_IOV */
-
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -67,6 +38,10 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
+
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
+
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -89,25 +64,21 @@ int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
+int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
+
+void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
+
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
-int ice_vsi_rebuild(struct ice_vsi *vsi);
+int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state);
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
-
void ice_vsi_put_qs(struct ice_vsi *vsi);
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
-#endif /* CONFIG_DCB */
-
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
@@ -118,6 +89,12 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
char *ice_nvm_version_str(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 214cd6eca405..69bff085acf7 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,8 +6,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
@@ -42,6 +44,7 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
+static int ice_vsi_open(struct ice_vsi *vsi);
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
@@ -159,7 +162,7 @@ unregister:
* had an error
*/
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"Could not add MAC filters error %d. Unregistering device\n",
status);
unregister_netdev(vsi->netdev);
@@ -435,42 +438,11 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
- * ice_dis_vsi - pause a VSI
- * @vsi: the VSI being paused
- * @locked: is the rtnl_lock already held
- */
-static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
-{
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
-
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
-
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
- if (netif_running(vsi->netdev)) {
- if (!locked)
- rtnl_lock();
-
- ice_stop(vsi->netdev);
-
- if (!locked)
- rtnl_unlock();
- } else {
- ice_vsi_close(vsi);
- }
- }
-}
-
-/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
-#ifdef CONFIG_DCB
-void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
-#else
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
-#endif /* CONFIG_DCB */
{
int v;
@@ -524,7 +496,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
*/
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
@@ -636,8 +608,14 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
switch (vsi->port_info->phy.link_info.topo_media_conflict) {
case ICE_AQ_LINK_TOPO_CONFLICT:
case ICE_AQ_LINK_MEDIA_CONFLICT:
+ case ICE_AQ_LINK_TOPO_UNREACH_PRT:
+ case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
+ case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
break;
+ case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
+ netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ break;
default:
break;
}
@@ -747,7 +725,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
an = "False";
/* Get FEC mode requested based on PHY caps last SW configuration */
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps) {
fec_req = "Unknown";
goto done;
@@ -767,7 +745,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
else
fec_req = "NONE";
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
done:
netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
@@ -815,6 +793,7 @@ static int
ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
u16 link_speed)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_phy_info *phy_info;
struct ice_vsi *vsi;
u16 old_link_speed;
@@ -832,7 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
result = ice_update_link_info(pi);
if (result)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Failed to update link status and re-enable link events for port %d\n",
pi->lport);
@@ -851,7 +830,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
result = ice_aq_set_link_restart_an(pi, false, NULL);
if (result) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Failed to set link down, VSI %d error %d\n",
vsi->vsi_num, result);
return result;
@@ -947,7 +926,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
!!(link_data->link_info & ICE_AQ_LINK_UP),
le16_to_cpu(link_data->link_speed));
if (status)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(ice_pf_to_dev(pf),
"Could not process link event, error %d\n", status);
return status;
@@ -960,6 +939,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*/
static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct ice_ctl_q_info *cq;
@@ -981,8 +961,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
qtype = "Mailbox";
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
- q_type);
+ dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
return 0;
}
@@ -994,15 +973,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
PF_FW_ARQLEN_ARQCRIT_M)) {
oldval = val;
if (val & PF_FW_ARQLEN_ARQVFE_M)
- dev_dbg(&pf->pdev->dev,
- "%s Receive Queue VF Error detected\n", qtype);
+ dev_dbg(dev, "%s Receive Queue VF Error detected\n",
+ qtype);
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Receive Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ARQLEN_ARQCRIT_M)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Receive Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
@@ -1016,16 +995,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
PF_FW_ATQLEN_ATQCRIT_M)) {
oldval = val;
if (val & PF_FW_ATQLEN_ATQVFE_M)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Send Queue VF Error detected\n", qtype);
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
- dev_dbg(&pf->pdev->dev,
- "%s Send Queue Overflow Error detected\n",
+ dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ATQLEN_ATQCRIT_M)
- dev_dbg(&pf->pdev->dev,
- "%s Send Queue Critical Error detected\n",
+ dev_dbg(dev, "%s Send Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M);
@@ -1034,8 +1011,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
event.buf_len = cq->rq_buf_size;
- event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
- GFP_KERNEL);
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
return 0;
@@ -1047,8 +1023,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
- dev_err(&pf->pdev->dev,
- "%s Receive Queue event error %d\n", qtype,
+ dev_err(dev, "%s Receive Queue event error %d\n", qtype,
ret);
break;
}
@@ -1058,8 +1033,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf, &event))
- dev_err(&pf->pdev->dev,
- "Could not handle link event\n");
+ dev_err(dev, "Could not handle link event\n");
break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
@@ -1071,14 +1045,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
default:
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
break;
}
} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
- devm_kfree(&pf->pdev->dev, event.msg_buf);
+ kfree(event.msg_buf);
return pending && (i == ICE_DFLT_IRQ_WORK);
}
@@ -1222,6 +1196,7 @@ static void ice_service_timer(struct timer_list *t)
*/
static void ice_handle_mdd_event(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
bool mdd_detected = false;
u32 reg;
@@ -1243,7 +1218,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
GL_MDET_TX_PQM_QNUM_S);
if (netif_msg_tx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
mdd_detected = true;
@@ -1261,7 +1236,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
GL_MDET_TX_TCLAN_QNUM_S);
if (netif_msg_rx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
mdd_detected = true;
@@ -1279,7 +1254,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
GL_MDET_RX_QNUM_S);
if (netif_msg_rx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
+ dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff);
mdd_detected = true;
@@ -1291,21 +1266,21 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, PF_MDET_TX_PQM);
if (reg & PF_MDET_TX_PQM_VALID_M) {
wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ dev_info(dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, PF_MDET_TX_TCLAN);
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ dev_info(dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, PF_MDET_RX);
if (reg & PF_MDET_RX_VALID_M) {
wr32(hw, PF_MDET_RX, 0xFFFF);
- dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
+ dev_info(dev, "RX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
/* Queue belongs to the PF initiate a reset */
@@ -1325,7 +1300,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ dev_info(dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1333,7 +1308,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ dev_info(dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1341,7 +1316,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ dev_info(dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1349,7 +1324,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ dev_info(dev, "RX driver issue detected on VF %d\n",
i);
}
@@ -1357,7 +1332,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
vf->num_mdd_events++;
if (vf->num_mdd_events &&
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
}
@@ -1393,7 +1368,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
pi = vsi->port_info;
- pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@@ -1412,7 +1387,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
- cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
@@ -1437,9 +1412,9 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
retcode = -EIO;
}
- devm_kfree(dev, cfg);
+ kfree(cfg);
out:
- devm_kfree(dev, pcaps);
+ kfree(pcaps);
return retcode;
}
@@ -1551,6 +1526,44 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
}
/**
+ * ice_schedule_reset - schedule a reset
+ * @pf: board private structure
+ * @reset: reset being requested
+ */
+int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ /* bail out if earlier reset has failed */
+ if (test_bit(__ICE_RESET_FAILED, pf->state)) {
+ dev_dbg(dev, "earlier reset has failed\n");
+ return -EIO;
+ }
+ /* bail if reset/recovery already in progress */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_dbg(dev, "Reset already in progress\n");
+ return -EBUSY;
+ }
+
+ switch (reset) {
+ case ICE_RESET_PFR:
+ set_bit(__ICE_PFR_REQ, pf->state);
+ break;
+ case ICE_RESET_CORER:
+ set_bit(__ICE_CORER_REQ, pf->state);
+ break;
+ case ICE_RESET_GLOBR:
+ set_bit(__ICE_GLOBR_REQ, pf->state);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ice_service_task_schedule(pf);
+ return 0;
+}
+
+/**
* ice_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
* @mask: the new affinity mask
@@ -1604,11 +1617,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
int base = vsi->base_vector;
+ struct device *dev;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
int irq_num;
+ dev = ice_pf_to_dev(pf);
for (vector = 0; vector < q_vectors; vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[vector];
@@ -1628,8 +1643,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(&pf->pdev->dev, irq_num,
- vsi->irq_handler, 0,
+ err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev,
@@ -1655,12 +1669,331 @@ free_q_irqs:
irq_num = pf->msix_entries[base + vector].vector,
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
- devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
+ devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
}
/**
+ * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
+ * @vsi: VSI to setup Tx rings used by XDP
+ *
+ * Return 0 on success and negative value on error
+ */
+static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ int i;
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ u16 xdp_q_idx = vsi->alloc_txq + i;
+ struct ice_ring *xdp_ring;
+
+ xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
+
+ if (!xdp_ring)
+ goto free_xdp_rings;
+
+ xdp_ring->q_index = xdp_q_idx;
+ xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
+ xdp_ring->ring_active = false;
+ xdp_ring->vsi = vsi;
+ xdp_ring->netdev = NULL;
+ xdp_ring->dev = dev;
+ xdp_ring->count = vsi->num_tx_desc;
+ vsi->xdp_rings[i] = xdp_ring;
+ if (ice_setup_tx_ring(xdp_ring))
+ goto free_xdp_rings;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ return 0;
+
+free_xdp_rings:
+ for (; i >= 0; i--)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
+ * @vsi: VSI to set the bpf prog on
+ * @prog: the bpf prog pointer
+ */
+static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+ int i;
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ ice_for_each_rxq(vsi, i)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+}
+
+/**
+ * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+ * @vsi: VSI to bring up Tx rings used by XDP
+ * @prog: bpf program that will be assigned to VSI
+ *
+ * Return 0 on success and negative value on error
+ */
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ struct ice_pf *pf = vsi->back;
+ struct ice_qs_cfg xdp_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_txqs,
+ .pf_map_size = pf->max_pf_txqs,
+ .q_count = vsi->num_xdp_txq,
+ .scatter_count = ICE_MAX_SCATTER_TXQS,
+ .vsi_map = vsi->txq_map,
+ .vsi_map_offset = vsi->alloc_txq,
+ .mapping_mode = ICE_VSI_MAP_CONTIG
+ };
+ enum ice_status status;
+ struct device *dev;
+ int i, v_idx;
+
+ dev = ice_pf_to_dev(pf);
+ vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
+ sizeof(*vsi->xdp_rings), GFP_KERNEL);
+ if (!vsi->xdp_rings)
+ return -ENOMEM;
+
+ vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
+ if (__ice_vsi_get_qs(&xdp_qs_cfg))
+ goto err_map_xdp;
+
+ if (ice_xdp_alloc_setup_rings(vsi))
+ goto clear_xdp_rings;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ /* omit the scheduler update if in reset path; XDP queues will be
+ * taken into account at the end of ice_vsi_rebuild, where
+ * ice_cfg_vsi_lan is being called
+ */
+ if (ice_is_reset_in_progress(pf->state))
+ return 0;
+
+ /* tell the Tx scheduler that right now we have
+ * additional queues
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
+
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
+ status);
+ goto clear_xdp_rings;
+ }
+ ice_vsi_assign_bpf_prog(vsi, prog);
+
+ return 0;
+clear_xdp_rings:
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+err_map_xdp:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ devm_kfree(dev, vsi->xdp_rings);
+ return -ENOMEM;
+}
+
+/**
+ * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
+ * @vsi: VSI to remove XDP rings
+ *
+ * Detach XDP rings from irq vectors, clean up the PF bitmap and free
+ * resources
+ */
+int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf = vsi->back;
+ int i, v_idx;
+
+ /* q_vectors are freed in reset path so there's no point in detaching
+ * rings; in case of rebuild being triggered not from reset reset bits
+ * in pf->state won't be set, so additionally check first q_vector
+ * against NULL
+ */
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ goto free_qmap;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_ring *ring;
+
+ ice_for_each_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.ring = ring;
+ }
+
+free_qmap:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ if (vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+ devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ return 0;
+
+ ice_vsi_assign_bpf_prog(vsi, NULL);
+
+ /* notify Tx scheduler that we destroyed XDP queues and bring
+ * back the old number of child nodes
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+}
+
+/**
+ * ice_xdp_setup_prog - Add or remove XDP eBPF program
+ * @vsi: VSI to setup XDP for
+ * @prog: XDP program
+ * @extack: netlink extended ack
+ */
+static int
+ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ bool if_running = netif_running(vsi->netdev);
+ int ret = 0, xdp_ring_err = 0;
+
+ if (frame_size > vsi->rx_buf_len) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
+ return -EOPNOTSUPP;
+ }
+
+ /* need to stop netdev while setting up the program for Rx rings */
+ if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Preparing device for XDP attach failed");
+ return ret;
+ }
+ }
+
+ if (!ice_is_xdp_ena_vsi(vsi) && prog) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting up XDP Tx resources failed");
+ } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Freeing XDP Tx resources failed");
+ } else {
+ ice_vsi_assign_bpf_prog(vsi, prog);
+ }
+
+ if (if_running)
+ ret = ice_up(vsi);
+
+ if (!ret && prog && vsi->xsk_umems) {
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_ring *rx_ring = vsi->rx_rings[i];
+
+ if (rx_ring->xsk_umem)
+ napi_schedule(&rx_ring->q_vector->napi);
+ }
+ }
+
+ return (ret || xdp_ring_err) ? -ENOMEM : 0;
+}
+
+/**
+ * ice_xdp - implements XDP handler
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (vsi->type != ICE_VSI_PF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "XDP can be loaded only on PF VSI");
+ return -EINVAL;
+ }
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
+ return 0;
+ case XDP_SETUP_XSK_UMEM:
+ return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
@@ -1698,8 +2031,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
struct ice_pf *pf = (struct ice_pf *)data;
struct ice_hw *hw = &pf->hw;
irqreturn_t ret = IRQ_NONE;
+ struct device *dev;
u32 oicr, ena_mask;
+ dev = ice_pf_to_dev(pf);
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
@@ -1735,8 +2070,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
else if (reset == ICE_RESET_EMPR)
pf->empr_count++;
else
- dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
- reset);
+ dev_dbg(dev, "Invalid reset type %d\n", reset);
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
@@ -1770,8 +2104,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_HMC_ERR_M) {
ena_mask &= ~PFINT_OICR_HMC_ERR_M;
- dev_dbg(&pf->pdev->dev,
- "HMC Error interrupt - info 0x%x, data 0x%x\n",
+ dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
rd32(hw, PFHMC_ERRORINFO),
rd32(hw, PFHMC_ERRORDATA));
}
@@ -1779,8 +2112,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* Report any remaining unexpected interrupts */
oicr &= ena_mask;
if (oicr) {
- dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
- oicr);
+ dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
/* If a critical error is pending there is no choice but to
* reset the device.
*/
@@ -1838,7 +2170,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
if (pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
- devm_free_irq(&pf->pdev->dev,
+ devm_free_irq(ice_pf_to_dev(pf),
pf->msix_entries[pf->oicr_idx].vector, pf);
}
@@ -1882,13 +2214,13 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
- dev_driver_string(&pf->pdev->dev),
- dev_name(&pf->pdev->dev));
+ dev_driver_string(dev), dev_name(dev));
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset. Note that this function is called only during
@@ -1905,12 +2237,10 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = oicr_idx;
- err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector,
+ err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
- dev_err(&pf->pdev->dev,
- "devm_request_irq for %s failed: %d\n",
+ dev_err(dev, "devm_request_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
@@ -2043,7 +2373,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -2219,6 +2549,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
status = -ENODEV;
goto unroll_vsi_setup;
}
+ /* netdev has to be configured before setting frame size */
+ ice_vsi_cfg_frame_size(vsi);
+
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
/* registering the NAPI handler requires both the queues and
* netdev to be created, which are done in ice_pf_vsi_setup()
@@ -2300,6 +2635,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
+ mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
if (pf->avail_txqs) {
@@ -2349,6 +2685,7 @@ static int ice_init_pf(struct ice_pf *pf)
ice_set_pf_caps(pf);
mutex_init(&pf->sw_mutex);
+ mutex_init(&pf->tc_mutex);
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
@@ -2363,7 +2700,7 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(&pf->pdev->dev, pf->avail_txqs);
+ devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
@@ -2380,6 +2717,7 @@ static int ice_init_pf(struct ice_pf *pf)
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
int v_left, v_actual, v_budget = 0;
int needed, err, i;
@@ -2400,7 +2738,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_budget += needed;
v_left -= needed;
- pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
+ pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
@@ -2416,13 +2754,13 @@ static int ice_ena_msix_range(struct ice_pf *pf)
ICE_MIN_MSIX, v_budget);
if (v_actual < 0) {
- dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
+ dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
if (v_actual < v_budget) {
- dev_warn(&pf->pdev->dev,
+ dev_warn(dev,
"not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
/* 2 vectors for LAN (traffic + OICR) */
@@ -2441,11 +2779,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
return v_actual;
msix_err:
- devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ devm_kfree(dev, pf->msix_entries);
goto exit_err;
no_hw_vecs_left_err:
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"not enough device MSI-X vectors. requested = %d, available = %d\n",
needed, v_left);
err = -ERANGE;
@@ -2461,7 +2799,7 @@ exit_err:
static void ice_dis_msix(struct ice_pf *pf)
{
pci_disable_msix(pf->pdev);
- devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
pf->msix_entries = NULL;
}
@@ -2474,7 +2812,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
ice_dis_msix(pf);
if (pf->irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
pf->irq_tracker = NULL;
}
}
@@ -2494,7 +2832,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
/* set up vector assignment tracking */
pf->irq_tracker =
- devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
+ devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
(sizeof(u16) * vectors), GFP_KERNEL);
if (!pf->irq_tracker) {
ice_dis_msix(pf);
@@ -2510,6 +2848,52 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_vsi_recfg_qs - Change the number of queues on a VSI
+ * @vsi: VSI being changed
+ * @new_rx: new number of Rx queues
+ * @new_tx: new number of Tx queues
+ *
+ * Only change the number of queues if new_tx, or new_rx is non-0.
+ *
+ * Returns 0 on success.
+ */
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+{
+ struct ice_pf *pf = vsi->back;
+ int err = 0, timeout = 50;
+
+ if (!new_rx && !new_tx)
+ return -EINVAL;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ if (new_tx)
+ vsi->req_txq = new_tx;
+ if (new_rx)
+ vsi->req_rxq = new_rx;
+
+ /* set for the next time the netdev is started */
+ if (!netif_running(vsi->netdev)) {
+ ice_vsi_rebuild(vsi, false);
+ dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
+ goto done;
+ }
+
+ ice_vsi_close(vsi);
+ ice_vsi_rebuild(vsi, false);
+ ice_pf_dcb_recfg(pf);
+ ice_vsi_open(vsi);
+done:
+ clear_bit(__ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
* @status: status of package load
@@ -2518,7 +2902,7 @@ static void
ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
{
struct ice_pf *pf = (struct ice_pf *)hw->back;
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
switch (*status) {
case ICE_SUCCESS:
@@ -2598,7 +2982,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
- switch (hw->adminq.sq_last_status) {
+ switch (hw->pkg_dwnld_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
dev_err(dev,
@@ -2637,7 +3021,7 @@ static void
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
{
enum ice_status status = ICE_ERR_PARAM;
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
/* Load DDP Package */
@@ -2677,7 +3061,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
static void ice_verify_cacheline_size(struct ice_pf *pf)
{
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
- dev_warn(&pf->pdev->dev,
+ dev_warn(ice_pf_to_dev(pf),
"%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
ICE_CACHE_LINE_BYTES);
}
@@ -2747,7 +3131,7 @@ static void ice_request_fw(struct ice_pf *pf)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
const struct firmware *firmware = NULL;
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
int err = 0;
/* optional device-specific DDP (if present) overrides the default DDP
@@ -2831,6 +3215,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ pci_save_state(pdev);
+
hw->back = pf;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
@@ -2936,7 +3322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2976,12 +3362,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_cfg_lldp_mib_change(&pf->hw, true);
}
+ /* print PCI link speed and width */
+ pcie_print_link_status(pf->pdev);
+
return 0;
err_alloc_sw_unroll:
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
- devm_kfree(&pf->pdev->dev, pf->first_sw);
+ devm_kfree(dev, pf->first_sw);
err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
@@ -3027,12 +3416,13 @@ static void ice_remove(struct pci_dev *pdev)
}
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
- ice_clear_interrupt_scheme(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
*/
ice_reset(&pf->hw, ICE_RESET_PFR);
+ pci_wait_for_pending_transaction(pdev);
+ ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
}
@@ -3347,6 +3737,48 @@ static void ice_set_rx_mode(struct net_device *netdev)
}
/**
+ * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Queue ID
+ * @maxrate: maximum bandwidth in Mbps
+ */
+static int
+ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ enum ice_status status;
+ u16 q_handle;
+ u8 tc;
+
+ /* Validate maxrate requested is within permitted range */
+ if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
+ netdev_err(netdev,
+ "Invalid max rate %d specified for the queue %d\n",
+ maxrate, queue_index);
+ return -EINVAL;
+ }
+
+ q_handle = vsi->tx_rings[queue_index]->q_handle;
+ tc = ice_dcb_get_tc(vsi, queue_index);
+
+ /* Set BW back to default, when user set maxrate to 0 */
+ if (!maxrate)
+ status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
+ q_handle, ICE_MAX_BW);
+ else
+ status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
+ q_handle, ICE_MAX_BW, maxrate * 1000);
+ if (status) {
+ netdev_err(netdev,
+ "Unable to set Tx max rate, error %d\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
* ice_fdb_add - add an entry to the hardware database
* @ndm: the input from the stack
* @tb: pointer to array of nladdr (unused)
@@ -3426,6 +3858,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
@@ -3435,6 +3868,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return ret;
}
+ /* Do not change setting during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ return -EBUSY;
+ }
+
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
@@ -3505,6 +3945,8 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
+ if (!err && ice_is_xdp_ena_vsi(vsi))
+ err = ice_vsi_cfg_xdp_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -3920,6 +4362,13 @@ int ice_down(struct ice_vsi *vsi)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
+ if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
+ tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
+ if (tx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop XDP rings, VSI %d error %d\n",
+ vsi->vsi_num, tx_err);
+ }
rx_err = ice_vsi_stop_rx_rings(vsi);
if (rx_err)
@@ -3970,8 +4419,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- vsi->tx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_tx_ring(vsi->tx_rings[i]);
+ struct ice_ring *ring = vsi->tx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_tx_ring(ring);
if (err)
break;
}
@@ -3996,8 +4450,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- vsi->rx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_rx_ring(vsi->rx_rings[i]);
+ struct ice_ring *ring = vsi->rx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_rx_ring(ring);
if (err)
break;
}
@@ -4033,7 +4492,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
goto err_setup_rx;
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
err = ice_vsi_req_irq_msix(vsi, int_name);
if (err)
goto err_setup_rx;
@@ -4082,61 +4541,13 @@ static void ice_vsi_release_all(struct ice_pf *pf)
err = ice_vsi_release(pf->vsi[i]);
if (err)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(ice_pf_to_dev(pf),
"Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
i, err, pf->vsi[i]->vsi_num);
}
}
/**
- * ice_ena_vsi - resume a VSI
- * @vsi: the VSI being resume
- * @locked: is the rtnl_lock already held
- */
-static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
-{
- int err = 0;
-
- if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
- return 0;
-
- clear_bit(__ICE_NEEDS_RESTART, vsi->state);
-
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
- if (netif_running(vsi->netdev)) {
- if (!locked)
- rtnl_lock();
-
- err = ice_open(vsi->netdev);
-
- if (!locked)
- rtnl_unlock();
- }
- }
-
- return err;
-}
-
-/**
- * ice_pf_ena_all_vsi - Resume all VSIs on a PF
- * @pf: the PF
- * @locked: is the rtnl_lock already held
- */
-#ifdef CONFIG_DCB
-int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
-{
- int v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- if (ice_ena_vsi(pf->vsi[v], locked))
- return -EIO;
-
- return 0;
-}
-#endif /* CONFIG_DCB */
-
-/**
* ice_vsi_rebuild_by_type - Rebuild VSI of a given type
* @pf: pointer to the PF instance
* @type: VSI type to rebuild
@@ -4145,6 +4556,7 @@ int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
*/
static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
+ struct device *dev = ice_pf_to_dev(pf);
enum ice_status status;
int i, err;
@@ -4155,20 +4567,20 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
continue;
/* rebuild the VSI */
- err = ice_vsi_rebuild(vsi);
+ err = ice_vsi_rebuild(vsi, true);
if (err) {
- dev_err(&pf->pdev->dev,
- "rebuild VSI failed, err %d, VSI index %d, type %d\n",
- err, vsi->idx, type);
+ dev_err(dev,
+ "rebuild VSI failed, err %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
return err;
}
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(&pf->pdev->dev,
- "replay VSI failed, status %d, VSI index %d, type %d\n",
- status, vsi->idx, type);
+ dev_err(dev,
+ "replay VSI failed, status %d, VSI index %d, type %s\n",
+ status, vsi->idx, ice_vsi_type_str(type));
return -EIO;
}
@@ -4180,14 +4592,14 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* enable the VSI */
err = ice_ena_vsi(vsi, false);
if (err) {
- dev_err(&pf->pdev->dev,
- "enable VSI failed, err %d, VSI index %d, type %d\n",
- err, vsi->idx, type);
+ dev_err(dev,
+ "enable VSI failed, err %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
return err;
}
- dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n",
- vsi->idx, type);
+ dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
+ ice_vsi_type_str(type));
}
return 0;
@@ -4226,7 +4638,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
int err;
@@ -4272,7 +4684,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
err = ice_update_link_info(hw->port_info);
if (err)
- dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
+ dev_err(dev, "Get link status error %d\n", err);
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
@@ -4329,6 +4741,18 @@ clear_recovery:
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -4347,6 +4771,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ int frame_size = ice_max_xdp_frame_size(vsi);
+
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
+ netdev_err(netdev, "max MTU for XDP usage is %d\n",
+ frame_size - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
+ }
+
if (new_mtu < netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
@@ -4409,7 +4843,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
+ struct device *dev;
+ dev = ice_pf_to_dev(pf);
if (seed) {
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
@@ -4417,8 +4853,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot set RSS key, err %d aq_err %d\n",
+ dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4428,8 +4863,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot set RSS lut, err %d aq_err %d\n",
+ dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4452,15 +4886,16 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
+ struct device *dev;
+ dev = ice_pf_to_dev(pf);
if (seed) {
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot get RSS key, err %d aq_err %d\n",
+ dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4470,8 +4905,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot get RSS lut, err %d aq_err %d\n",
+ dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4515,7 +4949,6 @@ ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
*/
static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
@@ -4524,7 +4957,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
vsi_props = &vsi->info;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -4540,7 +4973,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -4549,7 +4982,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
vsi_props->sw_flags = ctxt->info.sw_flags;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -4864,12 +5297,14 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
+ .ndo_set_tx_maxrate = ice_set_tx_maxrate,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
.ndo_set_vf_trust = ice_set_vf_trust,
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
.ndo_set_vf_link_state = ice_set_vf_link_state,
+ .ndo_get_vf_stats = ice_get_vf_stats,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
@@ -4878,4 +5313,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index bcb431f1bd92..57c73f613f32 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -219,8 +219,7 @@ static void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-static enum ice_status
-ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -242,9 +241,10 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
+ u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len;
struct ice_nvm_info *nvm = &hw->nvm;
u16 eetrack_lo, eetrack_hi;
- enum ice_status status = 0;
+ enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -261,15 +261,15 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
nvm->blank_nvm_mode = false;
- } else { /* Blank programming mode */
+ } else {
+ /* Blank programming mode */
nvm->blank_nvm_mode = true;
- status = ICE_ERR_NVM_BLANK_MODE;
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: unsupported blank mode.\n");
- return status;
+ return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver);
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
@@ -287,9 +287,42 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
- hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
- return status;
+ status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
+ ICE_SR_BOOT_CFG_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read Boot Configuration Block TLV.\n");
+ return status;
+ }
+
+ /* Boot Configuration Block must have length at least 2 words
+ * (Combo Image Version High and Combo Image Version Low)
+ */
+ if (boot_cfg_tlv_len < 2) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Invalid Boot Configuration Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF),
+ &oem_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1),
+ &oem_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n");
+ return status;
+ }
+
+ nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
new file mode 100644
index 000000000000..a9fa011c22c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_NVM_H_
+#define _ICE_NVM_H_
+
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2fde9653a608..eae707ddf8e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -411,6 +411,27 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
}
/**
+ * ice_aq_cfg_sched_elems - configures scheduler elements
+ * @hw: pointer to the HW struct
+ * @elems_req: number of elements to configure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_cfgd: returns total number of elements configured
+ * @cd: pointer to command details structure or NULL
+ *
+ * Configure scheduling elements (0x0403)
+ */
+static enum ice_status
+ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_conf_elem *buf, u16 buf_size,
+ u16 *elems_cfgd, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_cfgd, cd);
+}
+
+/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
* @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
@@ -557,6 +578,149 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
}
/**
+ * ice_aq_rl_profile - performs a rate limiting task
+ * @hw: pointer to the HW struct
+ * @opcode:opcode for add, query, or remove profile(s)
+ * @num_profiles: the number of profiles
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_processed: number of processed add or remove profile(s) to return
+ * @cd: pointer to command details structure
+ *
+ * RL profile function to add, query, or remove profile(s)
+ */
+static enum ice_status
+ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
+ u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_rl_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.rl_profile;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd->num_profiles = cpu_to_le16(num_profiles);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && num_processed)
+ *num_processed = le16_to_cpu(cmd->num_processed);
+ return status;
+}
+
+/**
+ * ice_aq_add_rl_profile - adds rate limiting profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to be add
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_added: total number of profiles added to return
+ * @cd: pointer to command details structure
+ *
+ * Add RL profile (0x0410)
+ */
+static enum ice_status
+ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_added,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_added, cd);
+}
+
+/**
+ * ice_aq_remove_rl_profile - removes RL profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to remove
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_removed: total number of profiles removed to return
+ * @cd: pointer to command details structure or NULL
+ *
+ * Remove RL profile (0x0415)
+ */
+static enum ice_status
+ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_removed,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_removed, cd);
+}
+
+/**
+ * ice_sched_del_rl_profile - remove RL profile
+ * @hw: pointer to the HW struct
+ * @rl_info: rate limit profile information
+ *
+ * If the profile ID is not referenced anymore, it removes profile ID with
+ * its associated parameters from HW DB,and locally. The caller needs to
+ * hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_del_rl_profile(struct ice_hw *hw,
+ struct ice_aqc_rl_profile_info *rl_info)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ u16 num_profiles_removed;
+ enum ice_status status;
+ u16 num_profiles = 1;
+
+ if (rl_info->prof_id_ref != 0)
+ return ICE_ERR_IN_USE;
+
+ /* Safe to remove profile ID */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_info->profile;
+ status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &num_profiles_removed, NULL);
+ if (status || num_profiles_removed != num_profiles)
+ return ICE_ERR_CFG;
+
+ /* Delete stale entry now */
+ list_del(&rl_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), rl_info);
+ return status;
+}
+
+/**
+ * ice_sched_clear_rl_prof - clears RL prof entries
+ * @pi: port information structure
+ *
+ * This function removes all RL profile from HW as well as from SW DB.
+ */
+static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln], list_entry) {
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ rl_prof_elem->prof_id_ref = 0;
+ status = ice_sched_del_rl_profile(hw, rl_prof_elem);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ /* On error, free mem required */
+ list_del(&rl_prof_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
+ }
+ }
+ }
+}
+
+/**
* ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure
*
@@ -592,6 +756,8 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
{
if (!pi)
return;
+ /* remove RL profiles related lists */
+ ice_sched_clear_rl_prof(pi);
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -632,8 +798,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
hw->layer_info = NULL;
}
- if (hw->port_info)
- ice_sched_clear_port(hw->port_info);
+ ice_sched_clear_port(hw->port_info);
hw->num_tx_sched_layers = 0;
hw->num_tx_sched_phys_layers = 0;
@@ -1014,6 +1179,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
/* initialize the port for handling the scheduler tree */
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
+ for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
+ INIT_LIST_HEAD(&pi->rl_prof_list[i]);
err_init_port:
if (status && pi->root) {
@@ -1062,8 +1229,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
* and so on. This array will be populated from root (index 0) to
* qgroup layer 7. Leaf node has no children.
*/
- for (i = 0; i < hw->num_tx_sched_layers; i++) {
- max_sibl = buf->layer_props[i].max_sibl_grp_sz;
+ for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
+ max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
hw->max_children[i] = le16_to_cpu(max_sibl);
}
@@ -1670,3 +1837,1095 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
+
+/**
+ * ice_sched_rm_unused_rl_prof - remove unused RL profile
+ * @pi: port information structure
+ *
+ * This function removes unused rate limit profiles from the HW and
+ * SW DB. The caller needs to hold scheduler lock.
+ */
+static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln], list_entry) {
+ if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Removed rl profile\n");
+ }
+ }
+}
+
+/**
+ * ice_sched_update_elem - update element
+ * @hw: pointer to the HW struct
+ * @node: pointer to node
+ * @info: node info to update
+ *
+ * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * parameters of node from argument info data buffer (Info->data buf) and
+ * returns success or error on config sched element failure. The caller
+ * needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_aqc_conf_elem buf;
+ enum ice_status status;
+ u16 elem_cfgd = 0;
+ u16 num_elems = 1;
+
+ buf.generic[0] = *info;
+ /* Parent TEID is reserved field in this aq call */
+ buf.generic[0].parent_teid = 0;
+ /* Element type is reserved field in this aq call */
+ buf.generic[0].data.elem_type = 0;
+ /* Flags is reserved field in this aq call */
+ buf.generic[0].data.flags = 0;
+
+ /* Update HW DB */
+ /* Configure element node */
+ status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
+ &elem_cfgd, NULL);
+ if (status || elem_cfgd != num_elems) {
+ ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Config success case */
+ /* Now update local SW DB */
+ /* Only copy the data portion of info buffer */
+ node->info.data = info->data;
+ return status;
+}
+
+/**
+ * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @bw_alloc: BW weight/allocation
+ *
+ * This function configures node element's BW allocation.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u8 bw_alloc)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ if (rl_type == ICE_MIN_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
+ } else if (rl_type == ICE_MAX_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
+ } else {
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_set_clear_cir_bw - set or clear CIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = 0;
+ } else {
+ /* Save type of BW information */
+ set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_eir_bw - set or clear EIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved shared BW information.
+ */
+ clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ /* save EIR BW information */
+ set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_shared_bw - set or clear shared BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved EIR BW information.
+ */
+ clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ /* save shared BW information */
+ set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = bw;
+ }
+}
+
+/**
+ * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
+ * @bw: bandwidth in Kbps
+ *
+ * This function calculates the wakeup parameter of RL profile.
+ */
+static u16 ice_sched_calc_wakeup(s32 bw)
+{
+ s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
+ s32 wakeup_f_int;
+ u16 wakeup = 0;
+
+ /* Get the wakeup integer value */
+ bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
+ wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
+ if (wakeup_int > 63) {
+ wakeup = (u16)((1 << 15) | wakeup_int);
+ } else {
+ /* Calculate fraction value up to 4 decimals
+ * Convert Integer value to a constant multiplier
+ */
+ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
+ wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
+ ICE_RL_PROF_FREQUENCY,
+ bytes_per_sec);
+
+ /* Get Fraction value */
+ wakeup_f = wakeup_a - wakeup_b;
+
+ /* Round up the Fractional value via Ceil(Fractional value) */
+ if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
+ wakeup_f += 1;
+
+ wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
+ ICE_RL_PROF_MULTIPLIER);
+ wakeup |= (u16)(wakeup_int << 9);
+ wakeup |= (u16)(0x1ff & wakeup_f_int);
+ }
+
+ return wakeup;
+}
+
+/**
+ * ice_sched_bw_to_rl_profile - convert BW to profile parameters
+ * @bw: bandwidth in Kbps
+ * @profile: profile parameters to return
+ *
+ * This function converts the BW to profile structure format.
+ */
+static enum ice_status
+ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ s64 bytes_per_sec, ts_rate, mv_tmp;
+ bool found = false;
+ s32 encode = 0;
+ s64 mv = 0;
+ s32 i;
+
+ /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
+ if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
+ return status;
+
+ /* Bytes per second from Kbps */
+ bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
+
+ /* encode is 6 bits but really useful are 5 bits */
+ for (i = 0; i < 64; i++) {
+ u64 pow_result = BIT_ULL(i);
+
+ ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
+ pow_result * ICE_RL_PROF_TS_MULTIPLIER);
+ if (ts_rate <= 0)
+ continue;
+
+ /* Multiplier value */
+ mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
+ ts_rate);
+
+ /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
+ mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
+
+ /* First multiplier value greater than the given
+ * accuracy bytes
+ */
+ if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
+ encode = i;
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ u16 wm;
+
+ wm = ice_sched_calc_wakeup(bw);
+ profile->rl_multiply = cpu_to_le16(mv);
+ profile->wake_up_calc = cpu_to_le16(wm);
+ profile->rl_encode = cpu_to_le16(encode);
+ status = 0;
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_add_rl_profile - add RL profile
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: specifies in which layer to create profile
+ *
+ * This function first checks the existing list for corresponding BW
+ * parameter. If it exists, it returns the associated profile otherwise
+ * it creates a new rate limit profile for requested BW, and adds it to
+ * the HW DB and local list. It returns the new profile or null on error.
+ * The caller needs to hold the scheduler lock.
+ */
+static struct ice_aqc_rl_profile_info *
+ice_sched_add_rl_profile(struct ice_port_info *pi,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ u16 profiles_added = 0, num_profiles = 1;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return NULL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!pi)
+ return NULL;
+ hw = pi->hw;
+ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ rl_prof_elem->bw == bw)
+ /* Return existing profile ID info */
+ return rl_prof_elem;
+
+ /* Create new profile ID */
+ rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
+ GFP_KERNEL);
+
+ if (!rl_prof_elem)
+ return NULL;
+
+ status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
+ if (status)
+ goto exit_add_rl_prof;
+
+ rl_prof_elem->bw = bw;
+ /* layer_num is zero relative, and fw expects level from 1 to 9 */
+ rl_prof_elem->profile.level = layer_num + 1;
+ rl_prof_elem->profile.flags = profile_type;
+ rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
+
+ /* Create new entry in HW DB */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_prof_elem->profile;
+ status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &profiles_added, NULL);
+ if (status || profiles_added != num_profiles)
+ goto exit_add_rl_prof;
+
+ /* Good entry - add in the list */
+ rl_prof_elem->prof_id_ref = 0;
+ list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
+ return rl_prof_elem;
+
+exit_add_rl_prof:
+ devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
+ return NULL;
+}
+
+/**
+ * ice_sched_cfg_node_bw_lmt - configure node sched params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @rl_prof_id: rate limit profile ID
+ *
+ * This function configures node element's BW limit.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u16 rl_prof_id)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
+ break;
+ case ICE_MAX_BW:
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ return ICE_ERR_CFG;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
+ break;
+ case ICE_SHARED_BW:
+ /* Check for removing shared BW */
+ if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
+ /* remove shared profile */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = 0; /* clear SRL field */
+
+ /* enable back EIR to default profile */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ break;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
+ (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
+ ICE_SCHED_DFLT_RL_PROF_ID))
+ return ICE_ERR_CFG;
+ /* EIR BW is set to default, disable it */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
+ /* Okay to enable shared BW now */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = cpu_to_le16(rl_prof_id);
+ break;
+ default:
+ /* Unknown rate limit type */
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ return ice_sched_update_elem(hw, node, &buf);
+}
+
+/**
+ * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
+ * @node: sched node
+ * @rl_type: rate limit type
+ *
+ * If existing profile matches, it returns the corresponding rate
+ * limit profile ID, otherwise it returns an invalid ID as error.
+ */
+static u16
+ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
+ struct ice_aqc_txsched_elem *data;
+
+ data = &node->info.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
+ rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
+ break;
+ case ICE_MAX_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
+ rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
+ break;
+ case ICE_SHARED_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ rl_prof_id = le16_to_cpu(data->srl_id);
+ break;
+ default:
+ break;
+ }
+
+ return rl_prof_id;
+}
+
+/**
+ * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @layer_index: layer index
+ *
+ * This function returns requested profile creation layer.
+ */
+static u8
+ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
+ u8 layer_index)
+{
+ struct ice_hw *hw = pi->hw;
+
+ if (layer_index >= hw->num_tx_sched_layers)
+ return ICE_SCHED_INVAL_LAYER_NUM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (hw->layer_info[layer_index].max_cir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_MAX_BW:
+ if (hw->layer_info[layer_index].max_eir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_SHARED_BW:
+ /* if current layer doesn't support SRL profile creation
+ * then try a layer up or down.
+ */
+ if (hw->layer_info[layer_index].max_srl_profiles)
+ return layer_index;
+ else if (layer_index < hw->num_tx_sched_layers - 1 &&
+ hw->layer_info[layer_index + 1].max_srl_profiles)
+ return layer_index + 1;
+ else if (layer_index > 0 &&
+ hw->layer_info[layer_index - 1].max_srl_profiles)
+ return layer_index - 1;
+ break;
+ default:
+ break;
+ }
+ return ICE_SCHED_INVAL_LAYER_NUM;
+}
+
+/**
+ * ice_sched_get_srl_node - get shared rate limit node
+ * @node: tree node
+ * @srl_layer: shared rate limit layer
+ *
+ * This function returns SRL node to be used for shared rate limit purpose.
+ * The caller needs to hold scheduler lock.
+ */
+static struct ice_sched_node *
+ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
+{
+ if (srl_layer > node->tx_sched_layer)
+ return node->children[0];
+ else if (srl_layer < node->tx_sched_layer)
+ /* Node can't be created without a parent. It will always
+ * have a valid parent except root node.
+ */
+ return node->parent;
+ else
+ return node;
+}
+
+/**
+ * ice_sched_rm_rl_profile - remove RL profile ID
+ * @pi: port information structure
+ * @layer_num: layer number where profiles are saved
+ * @profile_type: profile type like EIR, CIR, or SRL
+ * @profile_id: profile ID to remove
+ *
+ * This function removes rate limit profile from layer 'layer_num' of type
+ * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
+ * scheduler lock.
+ */
+static enum ice_status
+ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
+ u16 profile_id)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ enum ice_status status = 0;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return ICE_ERR_PARAM;
+ /* Check the existing list for RL profile */
+ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ le16_to_cpu(rl_prof_elem->profile.profile_id) ==
+ profile_id) {
+ if (rl_prof_elem->prof_id_ref)
+ rl_prof_elem->prof_id_ref--;
+
+ /* Remove old profile ID from database */
+ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
+ if (status && status != ICE_ERR_IN_USE)
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ break;
+ }
+ if (status == ICE_ERR_IN_USE)
+ status = 0;
+ return status;
+}
+
+/**
+ * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ * @layer_num: layer number where RL profiles are saved
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u8 layer_num)
+{
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+ u16 rl_prof_id;
+ u16 old_id;
+
+ hw = pi->hw;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ /* No SRL is configured for default case */
+ rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* Remove stale RL profile ID */
+ if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
+ old_id == ICE_SCHED_INVAL_PROF_ID)
+ return 0;
+
+ return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
+}
+
+/**
+ * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @layer_num: layer number where rate limit profiles are saved
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth value
+ *
+ * This function prepares node element's bandwidth to SRL or EIR exclusively.
+ * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
+ * them may be set for any given element. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ u8 layer_num, enum ice_rl_type rl_type, u32 bw)
+{
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node passed in this case, it may be different node */
+ if (bw == ICE_SCHED_DFLT_BW)
+ /* SRL being removed, ice_sched_cfg_node_bw_lmt()
+ * enables EIR to default. EIR is not set in this
+ * case, so no additional action is required.
+ */
+ return 0;
+
+ /* SRL being configured, set EIR to default here.
+ * ice_sched_cfg_node_bw_lmt() disables EIR when it
+ * configures SRL
+ */
+ return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
+ layer_num);
+ } else if (rl_type == ICE_MAX_BW &&
+ node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
+ /* Remove Shared profile. Set default shared BW call
+ * removes shared profile for a node.
+ */
+ return ice_sched_set_node_bw_dflt(pi, node,
+ ICE_SHARED_BW,
+ layer_num);
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_set_node_bw - set node's bandwidth
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: layer number
+ *
+ * This function adds new profile corresponding to requested BW, configures
+ * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
+ * ID from local database. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_hw *hw = pi->hw;
+ u16 old_id, rl_prof_id;
+
+ rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
+ if (!rl_prof_info)
+ return status;
+
+ rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
+
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* New changes has been applied */
+ /* Increment the profile ID reference count */
+ rl_prof_info->prof_id_ref++;
+
+ /* Check for old ID removal */
+ if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
+ old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
+ return 0;
+
+ return ice_sched_rm_rl_profile(pi, layer_num,
+ rl_prof_info->profile.flags,
+ old_id);
+}
+
+/**
+ * ice_sched_set_node_bw_lmt - set node's BW limit
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * It updates node's BW limit parameters like BW RL profile ID of type CIR,
+ * EIR, or SRL. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_sched_node *cfg_node = node;
+ enum ice_status status;
+
+ struct ice_hw *hw;
+ u8 layer_num;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+ /* Remove unused RL profile IDs from HW and SW DB */
+ ice_sched_rm_unused_rl_prof(pi);
+ layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (layer_num >= hw->num_tx_sched_layers)
+ return ICE_ERR_PARAM;
+
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node may be different */
+ cfg_node = ice_sched_get_srl_node(node, layer_num);
+ if (!cfg_node)
+ return ICE_ERR_CFG;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
+ bw);
+ if (status)
+ return status;
+ if (bw == ICE_SCHED_DFLT_BW)
+ return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
+ layer_num);
+ return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
+}
+
+/**
+ * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ return ice_sched_set_node_bw_lmt(pi, node, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_sched_validate_srl_node - Check node for SRL applicability
+ * @node: sched node to configure
+ * @sel_layer: selected SRL layer
+ *
+ * This function checks if the SRL can be applied to a selected layer node on
+ * behalf of the requested node (first argument). This function needs to be
+ * called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
+{
+ /* SRL profiles are not available on all layers. Check if the
+ * SRL profile can be applied to a node above or below the
+ * requested node. SRL configuration is possible only if the
+ * selected layer's node has single child.
+ */
+ if (sel_layer == node->tx_sched_layer ||
+ ((sel_layer == node->tx_sched_layer + 1) &&
+ node->num_children == 1) ||
+ ((sel_layer == node->tx_sched_layer - 1) &&
+ (node->parent && node->parent->num_children == 1)))
+ return 0;
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_sched_save_q_bw - save queue node's BW information
+ * @q_ctx: queue context structure
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of queue type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
+{
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_set_q_bw_lmt - sets queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of queue scheduling node.
+ */
+static enum ice_status
+ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+ struct ice_q_ctx *q_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ mutex_lock(&pi->sched_lock);
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
+ if (!q_ctx)
+ goto exit_q_bw_lmt;
+ node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
+ goto exit_q_bw_lmt;
+ }
+
+ /* Return error if it is not a leaf node */
+ if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
+ goto exit_q_bw_lmt;
+
+ /* SRL bandwidth layer selection */
+ if (rl_type == ICE_SHARED_BW) {
+ u8 sel_layer; /* selected layer */
+
+ sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (sel_layer >= pi->hw->num_tx_sched_layers) {
+ status = ICE_ERR_PARAM;
+ goto exit_q_bw_lmt;
+ }
+ status = ice_sched_validate_srl_node(node, sel_layer);
+ if (status)
+ goto exit_q_bw_lmt;
+ }
+
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+ if (!status)
+ status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
+
+exit_q_bw_lmt:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_q_bw_lmt - configure queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ bw);
+}
+
+/**
+ * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ *
+ * This function configures BW default limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_cfg_rl_burst_size - Set burst size value
+ * @hw: pointer to the HW struct
+ * @bytes: burst size in bytes
+ *
+ * This function configures/set the burst size to requested new value. The new
+ * burst size value is used for future rate limit calls. It doesn't change the
+ * existing or previously created RL profiles.
+ */
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+{
+ u16 burst_size_to_prog;
+
+ if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
+ bytes > ICE_MAX_BURST_SIZE_ALLOWED)
+ return ICE_ERR_PARAM;
+ if (ice_round_to_num(bytes, 64) <=
+ ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
+ /* 64 byte granularity case */
+ /* Disable MSB granularity bit */
+ burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
+ /* round number to nearest 64 byte granularity */
+ bytes = ice_round_to_num(bytes, 64);
+ /* The value is in 64 byte chunks */
+ burst_size_to_prog |= (u16)(bytes / 64);
+ } else {
+ /* k bytes granularity case */
+ /* Enable MSB granularity bit */
+ burst_size_to_prog = ICE_KBYTE_GRANULARITY;
+ /* round number to nearest 1024 granularity */
+ bytes = ice_round_to_num(bytes, 1024);
+ /* check rounding doesn't go beyond allowed */
+ if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
+ bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
+ /* The value is in k bytes */
+ burst_size_to_prog |= (u16)(bytes / 1024);
+ }
+ hw->max_burst_size = burst_size_to_prog;
+ return 0;
+}
+
+/**
+ * ice_sched_replay_node_prio - re-configure node priority
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @priority: priority value
+ *
+ * This function configures node element's priority value. It
+ * needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
+ u8 priority)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic = priority;
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_sched_replay_node_bw - replay node(s) BW
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @bw_t_info: BW type information
+ *
+ * This function restores node's BW from bw_t_info. The caller needs
+ * to hold the scheduler lock.
+ */
+static enum ice_status
+ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_bw_type_info *bw_t_info)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ u16 bw_alloc;
+
+ if (!node)
+ return status;
+ if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
+ return 0;
+ if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_replay_node_prio(hw, node,
+ bw_t_info->generic);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
+ bw_t_info->cir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
+ bw_alloc = bw_t_info->cir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
+ bw_t_info->eir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
+ bw_alloc = bw_t_info->eir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
+ bw_t_info->shared_bw);
+ return status;
+}
+
+/**
+ * ice_sched_replay_q_bw - replay queue type node BW
+ * @pi: port information structure
+ * @q_ctx: queue context structure
+ *
+ * This function replays queue type node bandwidth. This function needs to be
+ * called with scheduler lock held.
+ */
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+{
+ struct ice_sched_node *q_node;
+
+ /* Following also checks the presence of node in tree */
+ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!q_node)
+ return ICE_ERR_PARAM;
+ return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 3902a8ad3025..f0593cfb6521 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -8,6 +8,36 @@
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
+#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
+/* Burst size is a 12 bits register that is configured while creating the RL
+ * profile(s). MSB is a granularity bit and tells the granularity type
+ * 0 - LSB bits are in 64 bytes granularity
+ * 1 - LSB bits are in 1K bytes granularity
+ */
+#define ICE_64_BYTE_GRANULARITY 0
+#define ICE_KBYTE_GRANULARITY BIT(11)
+#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */
+#define ICE_MAX_BURST_SIZE_ALLOWED \
+ ((BIT(11) - 1) * 1024) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
+ ((BIT(11) - 1) * 64) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
+
+#define ICE_RL_PROF_FREQUENCY 446000000
+#define ICE_RL_PROF_ACCURACY_BYTES 128
+#define ICE_RL_PROF_MULTIPLIER 10000
+#define ICE_RL_PROF_TS_MULTIPLIER 32
+#define ICE_RL_PROF_FRACTION 512
+
+/* BW rate limit profile parameters list entry along
+ * with bandwidth maintained per layer in port info
+ */
+struct ice_aqc_rl_profile_info {
+ struct ice_aqc_rl_profile_elem profile;
+ struct list_head list_entry;
+ u32 bw; /* requested */
+ u16 prof_id_ref; /* profile ID to node association ref count */
+};
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
@@ -48,4 +78,13 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 1acdd43a2edd..b5a53f862a83 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -416,8 +416,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
} else {
/* update with new HW VSI num */
- if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
- tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
+ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
return 0;
@@ -2429,7 +2428,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
- if (vid)
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
else
recipe_id = ICE_SW_LKUP_PROMISC;
@@ -2441,13 +2440,18 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
mutex_lock(rule_lock);
list_for_each_entry(itr, rule_head, list_entry) {
+ struct ice_fltr_info *fltr_info;
u8 fltr_promisc_mask = 0;
if (!ice_vsi_uses_fltr(itr, vsi_handle))
continue;
+ fltr_info = &itr->fltr_info;
+
+ if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
+ vid != fltr_info->l_data.mac_vlan.vlan_id)
+ continue;
- fltr_promisc_mask |=
- ice_determine_promisc_mask(&itr->fltr_info);
+ fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
/* Skip if filter is not completely specified by given mask */
if (fltr_promisc_mask & ~promisc_mask)
@@ -2455,7 +2459,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
&remove_list_head,
- &itr->fltr_info);
+ fltr_info);
if (status) {
mutex_unlock(rule_lock);
goto free_fltr_list;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index cb123fbe30be..fa14b9545dab 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,11 +14,6 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
-/* VSI queue context structure */
-struct ice_q_ctx {
- u16 q_handle;
-};
-
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 33dd103035dc..2c212f64d99f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -5,8 +5,13 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
#include "ice.h"
#include "ice_dcb_lib.h"
+#include "ice_xsk.h"
#define ICE_RX_HDR_SIZE 256
@@ -19,7 +24,10 @@ static void
ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- dev_kfree_skb_any(tx_buf->skb);
+ if (ice_ring_is_xdp(ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buf->skb);
if (dma_unmap_len(tx_buf, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buf, dma),
@@ -51,6 +59,11 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ ice_xsk_clean_xdp_ring(tx_ring);
+ goto tx_skip_free;
+ }
+
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buf)
return;
@@ -59,6 +72,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
+tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
/* Zero out the descriptor ring */
@@ -136,8 +150,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ if (ice_ring_is_xdp(tx_ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -188,12 +205,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.pkts += total_pkts;
- u64_stats_update_end(&tx_ring->syncp);
- tx_ring->q_vector->tx.total_bytes += total_bytes;
- tx_ring->q_vector->tx.total_pkts += total_pkts;
+
+ ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
+
+ if (ice_ring_is_xdp(tx_ring))
+ return !!budget;
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -273,6 +289,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
+ if (rx_ring->xsk_umem) {
+ ice_xsk_clean_rx_ring(rx_ring);
+ goto rx_skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -289,10 +310,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
*/
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
rx_buf->page_offset,
- ICE_RXBUF_2048, DMA_FROM_DEVICE);
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
/* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
@@ -300,6 +322,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
rx_buf->page_offset = 0;
}
+rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
@@ -319,6 +342,10 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
void ice_free_rx_ring(struct ice_ring *rx_ring)
{
ice_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == ICE_VSI_PF)
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
devm_kfree(rx_ring->dev, rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
@@ -363,6 +390,15 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+
+ if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->q_index))
+ goto err;
return 0;
err:
@@ -372,34 +408,110 @@ err:
}
/**
- * ice_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
*/
-static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
{
- u16 prev_ntu = rx_ring->next_to_use;
+ if (ice_ring_uses_build_skb(rx_ring))
+ return ICE_SKB_PAD;
+ else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ return XDP_PACKET_HEADROOM;
- rx_ring->next_to_use = val;
+ return 0;
+}
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
+/**
+ * ice_run_xdp - Executes an XDP program on initialized xdp_buff
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = ICE_XDP_PASS;
+ struct ice_ring *xdp_ring;
+ u32 act;
- /* QRX_TAIL will be updated with any tail value, but hardware ignores
- * the lower 3 bits. This makes it so we only bump tail on meaningful
- * boundaries. Also, this allows us to bump tail on intervals of 8 up to
- * the budget depending on the current traffic load.
- */
- val &= ~0x7;
- if (prev_ntu != val) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
}
+
+ return result;
+}
+
+/**
+ * ice_xdp_xmit - submit packets to XDP ring for transmission
+ * @dev: netdev
+ * @n: number of XDP frames to be transmitted
+ * @frames: XDP frames to be transmitted
+ * @flags: transmit flags
+ *
+ * Returns number of frames successfully sent. Frames that fail are
+ * free'ed via XDP return API.
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ */
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ unsigned int queue_index = smp_processor_id();
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *xdp_ring;
+ int drops = 0, i;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdp_ring = vsi->xdp_rings[queue_index];
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ if (err != ICE_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ ice_xdp_ring_update_tail(xdp_ring);
+
+ return n - drops;
}
/**
@@ -423,28 +535,28 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
}
/* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, ice_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ice_rx_offset(rx_ring);
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -486,7 +598,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- ICE_RXBUF_2048,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
@@ -557,9 +669,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
*/
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
-#endif
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -572,7 +681,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
#else
- if (rx_buf->page_offset > last_offset)
+#define ICE_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
+ if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false;
#endif /* PAGE_SIZE < 8192) */
@@ -590,6 +701,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
/**
* ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: buffer containing page to add
* @skb: sk_buff to place the data into
* @size: packet length from rx_desc
@@ -599,13 +711,13 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
if (!size)
@@ -679,10 +791,64 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
}
/**
+ * ice_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @xdp: xdp_buff pointing to the data
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *
+ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
+#endif
+ struct sk_buff *skb;
+
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ prefetch(xdp->data_meta);
+#if L1_CACHE_BYTES < 128
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
+#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(xdp->data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* must to record Rx queue, otherwise OS features such as
+ * symmetric queue won't work
+ */
+ skb_record_rx_queue(skb, rx_ring->q_index);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+
+ return skb;
+}
+
+/**
* ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
+ * @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
@@ -690,16 +856,16 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
*/
static struct sk_buff *
ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch((u8 *)va + L1_CACHE_BYTES);
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
#endif /* L1_CACHE_BYTES */
/* allocate a skb to store the frags */
@@ -712,10 +878,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* Determine available headroom for copy */
headlen = size;
if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
+ sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -723,7 +890,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE >= 8192)
unsigned int truesize = SKB_DATA_ALIGN(size);
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
skb_add_rx_frag(skb, 0, rx_buf->page,
rx_buf->page_offset + headlen, size, truesize);
@@ -745,11 +912,18 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
*
- * This function will clean up the contents of the rx_buf. It will
- * either recycle the buffer or unmap it and free the associated resources.
+ * This function will update next_to_clean and then clean up the contents
+ * of the rx_buf. It will either recycle the buffer or unmap it and free
+ * the associated resources.
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
if (!rx_buf)
return;
@@ -759,8 +933,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
+ ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
}
@@ -770,227 +945,31 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
- * ice_cleanup_headers - Correct empty headers
- * @skb: pointer to current skb being fixed
- *
- * Also address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
- *
- * Returns true if an error was encountered and skb was freed.
- */
-static bool ice_cleanup_headers(struct sk_buff *skb)
-{
- /* if eth_skb_pad returns an error the skb was freed */
- if (eth_skb_pad(skb))
- return true;
-
- return false;
-}
-
-/**
- * ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
-{
- return !!(rx_desc->wb.status_error0 &
- cpu_to_le16(stat_err_bits));
-}
-
-/**
* ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb)
{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(ICE_RX_DESC(rx_ring, ntc));
-
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
return false;
/* place skb in next buffer to be received */
- rx_ring->rx_buf[ntc].skb = skb;
+ rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- */
-static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
-{
- return PKT_HASH_TYPE_NONE;
-}
-
-/**
- * ice_rx_hash - set the hash value in the skb
- * @rx_ring: descriptor ring
- * @rx_desc: specific descriptor
- * @skb: pointer to current skb
- * @rx_ptype: the ptype value from the descriptor
- */
-static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 rx_ptype)
-{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
- u32 hash;
-
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
- return;
-
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
-}
-
-/**
- * ice_rx_csum - Indicate in skb if checksum is good
- * @ring: the ring we care about
- * @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
- * @ptype: the packet type decoded by hardware
- *
- * skb->protocol must be set before this function is called
- */
-static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
-{
- struct ice_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
- bool ipv4, ipv6;
-
- rx_status = le16_to_cpu(rx_desc->wb.status_error0);
- rx_error = rx_status;
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
- /* Start with CHECKSUM_NONE and by default csum_level = 0 */
- skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
-
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
- return;
-
- /* check if HW has decoded the packet and checksum */
- if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
- return;
-
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
-
- if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
- goto checksum_fail;
- else if (ipv6 && (rx_status &
- (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
- goto checksum_fail;
-
- /* check for L4 errors and handle packets that were not able to be
- * checksummed due to arrival speed
- */
- if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
- goto checksum_fail;
-
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- default:
- break;
- }
- return;
-
-checksum_fail:
- ring->vsi->back->hw_csum_rx_error++;
-}
-
-/**
- * ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: Rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
- *
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, protocol, and
- * other fields within the skb.
- */
-static void
-ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
-{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
-
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
- ice_rx_csum(rx_ring, skb, rx_desc, ptype);
-}
-
-/**
- * ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: Rx ring in play
- * @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
- *
- * This function sends the completed packet (via. skb) up the stack using
- * gro receive functions (with/without VLAN tag)
- */
-static void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
-{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1006,8 +985,13 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog = NULL;
+ struct xdp_buff xdp;
bool failure;
+ xdp.rxq = &rx_ring->xdp_rxq;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1042,10 +1026,57 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ if (!size) {
+ xdp.data = NULL;
+ xdp.data_end = NULL;
+ xdp.data_hard_start = NULL;
+ xdp.data_meta = NULL;
+ goto construct_skb;
+ }
+
+ xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
+ xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
+ xdp.data_meta = xdp.data;
+ xdp.data_end = xdp.data + size;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ goto construct_skb;
+ }
+
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ rcu_read_unlock();
+ if (!xdp_res)
+ goto construct_skb;
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
+ size);
+#endif
+ xdp_xmit |= xdp_res;
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ } else {
+ rx_buf->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_pkts++;
+
+ cleaned_count++;
+ ice_put_rx_buf(rx_ring, rx_buf);
+ continue;
+construct_skb:
if (skb)
- ice_add_rx_frag(rx_buf, skb, size);
+ ice_add_rx_frag(rx_ring, rx_buf, skb, size);
+ else if (ice_ring_uses_build_skb(rx_ring))
+ skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
- skb = ice_construct_skb(rx_ring, rx_buf, size);
+ skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1072,10 +1103,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (ice_test_staterr(rx_desc, stat_err_bits))
vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
- /* correct empty headers and pad skb if needed (to make valid
- * ethernet frame
- */
- if (ice_cleanup_headers(skb)) {
+ /* pad the skb if needed, to make a valid ethernet frame */
+ if (eth_skb_pad(skb)) {
skb = NULL;
continue;
}
@@ -1099,13 +1128,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
- /* update queue and vector specific stats */
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.pkts += total_rx_pkts;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ if (xdp_prog)
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+
+ ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
@@ -1483,9 +1509,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx)
- if (!ice_clean_tx_irq(ring, budget))
+ ice_for_each_ring(ring, q_vector->tx) {
+ bool wd = ring->xsk_umem ?
+ ice_clean_tx_irq_zc(ring, budget) :
+ ice_clean_tx_irq(ring, budget);
+
+ if (!wd)
clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (unlikely(budget <= 0))
@@ -1505,7 +1536,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
- cleaned = ice_clean_rx_irq(ring, budget_per_ring);
+ /* A dedicated path for zero-copy allows making a single
+ * comparison in the irq context instead of many inside the
+ * ice_clean_rx_irq function and makes the codebase cleaner.
+ */
+ cleaned = ring->xsk_umem ?
+ ice_clean_rx_irq_zc(ring, budget_per_ring) :
+ ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1527,17 +1564,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
return min_t(int, work_done, budget - 1);
}
-/* helper function for building cmd/type/offset */
-static __le64
-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
-{
- return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
- (td_cmd << ICE_TXD_QW1_CMD_S) |
- (td_offset << ICE_TXD_QW1_OFFSET_S) |
- ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
- (td_tag << ICE_TXD_QW1_L2TAG1_S));
-}
-
/**
* __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
@@ -1689,9 +1715,9 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
i = 0;
/* write last descriptor with RS and EOP bits */
- td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag);
+ td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
+ td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 94a9280193e2..a84cc0e6dd27 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -4,8 +4,12 @@
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
+#include "ice_type.h"
+
#define ICE_DFLT_IRQ_WORK 256
+#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
@@ -22,6 +26,71 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
+/* Attempt to maximize the headroom available for incoming frames. We use a 2K
+ * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
+ * This leaves us with 512 bytes of room. From that we need to deduct the
+ * space needed for the shared info and the padding needed to IP align the
+ * frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define ICE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+
+/**
+ * ice_compute_pad - compute the padding
+ * rx_buf_len: buffer length
+ *
+ * Figure out the size of half page based on given buffer length and
+ * then subtract the skb_shared_info followed by subtraction of the
+ * actual buffer length; this in turn results in the actual space that
+ * is left for padding usage
+ */
+static inline int ice_compute_pad(int rx_buf_len)
+{
+ int half_page_size;
+
+ half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
+}
+
+/**
+ * ice_skb_pad - determine the padding that we can supply
+ *
+ * Figure out the right Rx buffer size and based on that calculate the
+ * padding
+ */
+static inline int ice_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (ICE_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = ICE_RXBUF_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return ice_compute_pad(rx_buf_len);
+}
+
+#define ICE_SKB_PAD ice_skb_pad()
+#else
+#define ICE_2K_TOO_SMALL_WITH_PADDING false
+#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
@@ -49,12 +118,24 @@
#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_XDP_PASS 0
+#define ICE_XDP_CONSUMED BIT(0)
+#define ICE_XDP_TX BIT(1)
+#define ICE_XDP_REDIR BIT(2)
+
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
+#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ void *raw_buf; /* used for XDP */
+ };
unsigned int bytecount;
unsigned short gso_segs;
u32 tx_flags;
@@ -76,9 +157,17 @@ struct ice_tx_offload_params {
struct ice_rx_buf {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct ice_q_stats {
@@ -198,18 +287,44 @@ struct ice_ring {
};
struct rcu_head rcu; /* to avoid race on free */
+ struct bpf_prog *xdp_prog;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca;
+ /* CL3 - 3rd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible
*/
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u8 flags;
dma_addr_t dma; /* physical address of ring */
unsigned int size; /* length of descriptor ring in bytes */
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
-#ifdef CONFIG_DCB
u8 dcb_tc; /* Traffic class of ring */
-#endif /* CONFIG_DCB */
} ____cacheline_internodealigned_in_smp;
+static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
+}
+
+static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
+}
+
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
@@ -230,6 +345,19 @@ struct ice_ring_container {
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
+static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
+
+union ice_32b_rx_flex_desc;
+
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_clean_tx_ring(struct ice_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
new file mode 100644
index 000000000000..35bbc4ff603c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_txrx_lib.h"
+
+/**
+ * ice_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ */
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+{
+ u16 prev_ntu = rx_ring->next_to_use;
+
+ rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
+ /* QRX_TAIL will be updated with any tail value, but hardware ignores
+ * the lower 3 bits. This makes it so we only bump tail on meaningful
+ * boundaries. Also, this allows us to bump tail on intervals of 8 up to
+ * the budget depending on the current traffic load.
+ */
+ val &= ~0x7;
+ if (prev_ntu != val) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+ }
+}
+
+/**
+ * ice_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ */
+static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
+{
+ return PKT_HASH_TYPE_NONE;
+}
+
+/**
+ * ice_rx_hash - set the hash value in the skb
+ * @rx_ring: descriptor ring
+ * @rx_desc: specific descriptor
+ * @skb: pointer to current skb
+ * @rx_ptype: the ptype value from the descriptor
+ */
+static void
+ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 rx_ptype)
+{
+ struct ice_32b_rx_flex_desc_nic *nic_mdid;
+ u32 hash;
+
+ if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
+ return;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ hash = le32_to_cpu(nic_mdid->rss_hash);
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+}
+
+/**
+ * ice_rx_csum - Indicate in skb if checksum is good
+ * @ring: the ring we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: the packet type decoded by hardware
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void
+ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+{
+ struct ice_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+
+ rx_status = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_error = rx_status;
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+
+ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
+ /* check if Rx checksum is enabled */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ return;
+
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ goto checksum_fail;
+ else if (ipv6 && (rx_status &
+ (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+ goto checksum_fail;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed due to arrival speed
+ */
+ if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ default:
+ break;
+ }
+ return;
+
+checksum_fail:
+ ring->vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * ice_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: Rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @ptype: the packet type decoded by hardware
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ */
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
+{
+ ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ ice_rx_csum(rx_ring, skb, rx_desc, ptype);
+}
+
+/**
+ * ice_receive_skb - Send a completed packet up the stack
+ * @rx_ring: Rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: VLAN tag for packet
+ *
+ * This function sends the completed packet (via. skb) up the stack using
+ * gro receive functions (with/without VLAN tag)
+ */
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+{
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+}
+
+/**
+ * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
+ * @data: packet data pointer
+ * @size: packet data size
+ * @xdp_ring: XDP ring for transmission
+ */
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+{
+ u16 i = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ return ICE_XDP_CONSUMED;
+
+ tx_buf = &xdp_ring->tx_buf[i];
+ tx_buf->bytecount = size;
+ tx_buf->gso_segs = 1;
+ tx_buf->raw_buf = data;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, i);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ size, 0);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ i++;
+ if (i == xdp_ring->count)
+ i = 0;
+
+ tx_buf->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = i;
+
+ return ICE_XDP_TX;
+}
+
+/**
+ * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
+ * @xdp: XDP buffer
+ * @xdp_ring: XDP Tx ring
+ *
+ * Returns negative on failure, 0 on success.
+ */
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+{
+ struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+
+ if (unlikely(!xdpf))
+ return ICE_XDP_CONSUMED;
+
+ return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+}
+
+/**
+ * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
+ * @rx_ring: Rx ring
+ * @xdp_res: Result of the receive batch
+ *
+ * This function bumps XDP Tx tail and/or flush redirect map, and
+ * should be called when a batch of packets has been processed in the
+ * napi loop.
+ */
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+{
+ if (xdp_res & ICE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_res & ICE_XDP_TX) {
+ struct ice_ring *xdp_ring =
+ rx_ring->vsi->xdp_rings[rx_ring->q_index];
+
+ ice_xdp_ring_update_tail(xdp_ring);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
new file mode 100644
index 000000000000..ba9164dad9ae
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_TXRX_LIB_H_
+#define _ICE_TXRX_LIB_H_
+#include "ice.h"
+
+/**
+ * ice_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+{
+ return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+}
+
+static inline __le64
+build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+{
+ return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
+ (td_cmd << ICE_TXD_QW1_CMD_S) |
+ (td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ (td_tag << ICE_TXD_QW1_L2TAG1_S));
+}
+
+/**
+ * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
+ * @xdp_ring: XDP Tx ring
+ *
+ * This function updates the XDP Tx ring tail register.
+ */
+static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+{
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
+}
+
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype);
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6667d17a4206..c4854a987130 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -19,6 +19,17 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
return test_bit(tc, &bitmap);
}
+static inline u64 round_up_64bit(u64 a, u32 b)
+{
+ return div64_long(((a) + (b) / 2), (b));
+}
+
+static inline u32 ice_round_to_num(u32 N, u32 R)
+{
+ return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :
+ ((((N) + (R) - 1) / (R)) * (R)));
+}
+
/* Driver always calls main vsi_handle first */
#define ICE_MAIN_VSI_HANDLE 0
@@ -35,6 +46,8 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
+#define ICE_DBG_AQ_DESC BIT_ULL(25)
+#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
#define ICE_DBG_USER BIT_ULL(31)
@@ -189,6 +202,7 @@ struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_funcs;
};
/* MAC info */
@@ -272,10 +286,56 @@ enum ice_agg_type {
ICE_AGG_TYPE_QG
};
+/* Rate limit types */
+enum ice_rl_type {
+ ICE_UNKNOWN_BW = 0,
+ ICE_MIN_BW, /* for CIR profile */
+ ICE_MAX_BW, /* for EIR profile */
+ ICE_SHARED_BW /* for shared profile */
+};
+
+#define ICE_SCHED_MIN_BW 500 /* in Kbps */
+#define ICE_SCHED_MAX_BW 100000000 /* in Kbps */
+#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
+#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
+#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
-/* VSI type list entry to locate corresponding VSI/ag nodes */
+ /* Data structure for saving BW information */
+enum ice_bw_type {
+ ICE_BW_TYPE_PRIO,
+ ICE_BW_TYPE_CIR,
+ ICE_BW_TYPE_CIR_WT,
+ ICE_BW_TYPE_EIR,
+ ICE_BW_TYPE_EIR_WT,
+ ICE_BW_TYPE_SHARED,
+ ICE_BW_TYPE_CNT /* This must be last */
+};
+
+struct ice_bw {
+ u32 bw;
+ u16 bw_alloc;
+};
+
+struct ice_bw_type_info {
+ DECLARE_BITMAP(bw_t_bitmap, ICE_BW_TYPE_CNT);
+ u8 generic;
+ struct ice_bw cir_bw;
+ struct ice_bw eir_bw;
+ u32 shared_bw;
+};
+
+/* VSI queue context structure for given TC */
+struct ice_q_ctx {
+ u16 q_handle;
+ u32 q_teid;
+ /* bw_t_info saves queue BW information */
+ struct ice_bw_type_info bw_t_info;
+};
+
+/* VSI type list entry to locate corresponding VSI/aggregator nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
@@ -364,6 +424,8 @@ struct ice_port_info {
struct mutex sched_lock; /* protect access to TXSched tree */
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ /* List contain profile ID(s) and other params per layer */
+ struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
@@ -415,6 +477,8 @@ struct ice_hw {
u8 pf_id; /* device profile info */
+ u16 max_burst_size; /* driver sets this value */
+
/* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
@@ -555,6 +619,8 @@ struct ice_hw_port_stats {
};
/* Checksum and Shadow RAM pointers */
+#define ICE_SR_BOOT_CFG_PTR 0x132
+#define ICE_NVM_OEM_VER_OFF 0x02
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
@@ -568,6 +634,7 @@ struct ice_hw_port_stats {
#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
#define ICE_OEM_VER_SHIFT 24
#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
+#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index b45797f39b2f..edb374296d1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -2,9 +2,39 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
/**
+ * ice_validate_vf_id - helper to check if VF ID is valid
+ * @pf: pointer to the PF structure
+ * @vf_id: the ID of the VF to check
+ */
+static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
+{
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ice_check_vf_init - helper to check if VF init complete
+ * @pf: pointer to the PF structure
+ * @vf: the pointer to the VF to check
+ */
+static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
+{
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
* ice_err_to_virt err - translate errors for VF return code
* @ice_err: error return code
*/
@@ -184,12 +214,14 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ struct device *dev;
int first, last, v;
struct ice_hw *hw;
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
+ dev = ice_pf_to_dev(pf);
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
@@ -208,13 +240,12 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
else
- dev_err(&pf->pdev->dev,
- "Scattered mode for VF Tx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
else
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Scattered mode for VF Rx queues is not yet implemented\n");
}
@@ -289,6 +320,7 @@ static void ice_dis_vf_qs(struct ice_vf *vf)
*/
void ice_free_vfs(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int tmp, i;
@@ -310,24 +342,24 @@ void ice_free_vfs(struct ice_pf *pf)
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+ dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
- /* disable VF qp mappings */
+ /* disable VF qp mappings and set VF disable state */
ice_dis_vf_mappings(&pf->vf[i]);
+ set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]);
}
}
if (ice_sriov_free_msix_res(pf))
- dev_err(&pf->pdev->dev,
- "Failed to free MSIX resources used by SR-IOV\n");
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
- devm_kfree(&pf->pdev->dev, pf->vf);
+ devm_kfree(dev, pf->vf);
pf->vf = NULL;
/* This check is for when the driver is unloaded while VFs are
@@ -366,9 +398,11 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
{
struct ice_pf *pf = vf->pf;
u32 reg, reg_idx, bit_idx;
+ struct device *dev;
struct ice_hw *hw;
int vf_abs_id, i;
+ dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
@@ -389,7 +423,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
* by the time we get here.
*/
if (!is_pfr)
- wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
+ wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -414,7 +448,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"VF %d PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
@@ -457,13 +491,12 @@ static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
*/
static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -475,7 +508,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -483,7 +516,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
vsi->info = ctxt->info;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -531,14 +564,16 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
LIST_HEAD(tmp_add_list);
u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
+ struct device *dev;
int status = 0;
+ dev = ice_pf_to_dev(pf);
/* first vector index is the VFs OICR index */
vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
if (!vsi) {
- dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
+ dev_err(dev, "Failed to create VF VSI\n");
return -ENOMEM;
}
@@ -566,8 +601,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev,
- "could not add mac filters error %d\n", status);
+ dev_err(dev, "could not add mac filters error %d\n", status);
else
vf->num_mac = 1;
@@ -578,7 +612,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* more vectors.
*/
ice_alloc_vsi_res_exit:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
return status;
}
@@ -634,10 +668,12 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
int abs_vf_id, abs_first, abs_last;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ struct device *dev;
int first, last, v;
struct ice_hw *hw;
u32 reg;
+ dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
@@ -685,8 +721,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
VPLAN_TX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
- dev_err(&pf->pdev->dev,
- "Scattered mode for VF Tx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
}
/* set regardless of mapping mode */
@@ -704,8 +739,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
VPLAN_RX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
- dev_err(&pf->pdev->dev,
- "Scattered mode for VF Rx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
}
}
@@ -851,6 +885,7 @@ static int ice_check_avail_res(struct ice_pf *pf)
{
int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
u16 num_msix, num_txq, num_rxq, num_avail_msix;
+ struct device *dev = ice_pf_to_dev(pf);
if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
return -EINVAL;
@@ -883,8 +918,7 @@ static int ice_check_avail_res(struct ice_pf *pf)
ICE_DFLT_INTR_PER_VF,
ICE_MIN_INTR_PER_VF);
} else {
- dev_err(&pf->pdev->dev,
- "Number of VFs %d exceeds max VF count %d\n",
+ dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
return -EIO;
}
@@ -1022,12 +1056,12 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
*/
static bool ice_config_res_vfs(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int v;
if (ice_check_avail_res(pf)) {
- dev_err(&pf->pdev->dev,
- "Cannot allocate VF resources, try with fewer number of VFs\n");
+ dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
return false;
}
@@ -1040,9 +1074,8 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
- dev_dbg(&pf->pdev->dev,
- "VF-id %d has %d queues configured\n",
- vf->vf_id, vf->num_vf_qs);
+ dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
+ vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
}
@@ -1066,6 +1099,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
*/
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf;
int v, i;
@@ -1124,7 +1158,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* time, but continue on with the operation.
*/
if (v < pf->num_alloc_vfs)
- dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
+ dev_warn(dev, "VF reset check timeout\n");
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) {
@@ -1141,8 +1175,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
if (ice_sriov_free_msix_res(pf))
- dev_err(&pf->pdev->dev,
- "Failed to free MSIX resources used by SR-IOV\n");
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
if (!ice_config_res_vfs(pf))
return false;
@@ -1151,6 +1184,25 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * Returns true if the PF or VF is disabled, false otherwise.
+ */
+static bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ /* If the PF has been disabled, there is no need resetting VF until
+ * PF is active again. Similarly, if the VF has been disabled, this
+ * means something else is resetting the VF, so we shouldn't continue.
+ * Otherwise, set disable VF state bit for actual reset, and continue.
+ */
+ return (test_bit(__ICE_VF_DIS, pf->state) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
* ice_reset_vf - Reset a particular VF
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -1161,25 +1213,23 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ struct device *dev;
struct ice_hw *hw;
bool rsd = false;
u8 promisc_m;
u32 reg;
int i;
- /* If the PF has been disabled, there is no need resetting VF until
- * PF is active again.
- */
- if (test_bit(__ICE_VF_DIS, pf->state))
- return false;
+ dev = ice_pf_to_dev(pf);
- /* If the VF has been disabled, this means something else is
- * resetting the VF, so we shouldn't continue. Otherwise, set
- * disable VF state bit for actual reset, and continue.
- */
- if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return false;
+ if (ice_is_vf_disabled(vf)) {
+ dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ vf->vf_id);
+ return true;
+ }
+ /* Set VF disable bit state here, before triggering reset */
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
ice_trigger_vf_reset(vf, is_vflr, false);
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -1216,8 +1266,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
* continue on with the operation.
*/
if (!rsd)
- dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
- vf->vf_id);
+ dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
@@ -1231,7 +1280,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
- dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
+ dev_err(dev, "disabling promiscuous mode failed\n");
}
/* free VF resources to begin resetting the VSI state */
@@ -1282,19 +1331,26 @@ void ice_vc_notify_reset(struct ice_pf *pf)
static void ice_vc_notify_vf_reset(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe;
+ struct ice_pf *pf;
+
+ if (!vf)
+ return;
- /* validate the request */
- if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ pf = vf->pf;
+ if (ice_validate_vf_id(pf, vf->vf_id))
return;
- /* verify if the VF is in either init or active before proceeding */
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
return;
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
NULL);
}
@@ -1306,6 +1362,7 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
*/
static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_vf *vfs;
int i, ret;
@@ -1322,8 +1379,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
goto err_unroll_intr;
}
/* allocate memory */
- vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
- GFP_KERNEL);
+ vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
if (!vfs) {
ret = -ENOMEM;
goto err_pci_disable_sriov;
@@ -1352,7 +1408,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
err_unroll_sriov:
pf->vf = NULL;
- devm_kfree(&pf->pdev->dev, vfs);
+ devm_kfree(dev, vfs);
vfs = NULL;
pf->num_alloc_vfs = 0;
err_pci_disable_sriov:
@@ -1397,7 +1453,7 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf)
static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
int pre_existing_vfs = pci_num_vf(pf->pdev);
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
int err;
if (!ice_pf_state_is_nominal(pf)) {
@@ -1407,7 +1463,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
dev_err(dev, "This device is not capable of SR-IOV\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
@@ -1442,10 +1498,10 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
+ struct device *dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
- dev_err(&pf->pdev->dev,
- "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
return -EOPNOTSUPP;
}
@@ -1455,8 +1511,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pdev)) {
ice_free_vfs(pf);
} else {
- dev_err(&pf->pdev->dev,
- "can't free VFs because some are assigned to VMs.\n");
+ dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
return -EBUSY;
}
@@ -1495,12 +1550,10 @@ void ice_process_vflr_event(struct ice_pf *pf)
}
/**
- * ice_vc_dis_vf - Disable a given VF via SW reset
+ * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
* @vf: pointer to the VF info
- *
- * Disable the VF through a SW reset
*/
-static void ice_vc_dis_vf(struct ice_vf *vf)
+static void ice_vc_reset_vf(struct ice_vf *vf)
{
ice_vc_notify_vf_reset(vf);
ice_reset_vf(vf, false);
@@ -1521,24 +1574,28 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
enum ice_status aq_ret;
+ struct device *dev;
struct ice_pf *pf;
- /* validate the request */
- if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ if (!vf)
return -EINVAL;
pf = vf->pf;
+ if (ice_validate_vf_id(pf, vf->vf_id))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
/* single place to detect unsuccessful return values */
if (v_retval) {
vf->num_inval_msgs++;
- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
- vf->vf_id, v_opcode, v_retval);
+ dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
+ v_opcode, v_retval);
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Number of invalid messages exceeded for VF %d\n",
vf->vf_id);
- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ dev_err(dev, "Use PF Control I/F to enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
return -EIO;
}
@@ -1551,7 +1608,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"Unable to send the message to VF %d ret %d aq_err %d\n",
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
@@ -1599,14 +1656,14 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
int len = 0;
int ret;
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ if (ice_check_vf_init(pf, vf)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
len = sizeof(struct virtchnl_vf_resource);
- vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
+ vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
len = 0;
@@ -1672,6 +1729,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->dflt_lan_addr.addr);
+ /* match guest capabilities */
+ vf->driver_caps = vfres->vf_cap_flags;
+
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
err:
@@ -1679,7 +1739,7 @@ err:
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
(u8 *)vfres, len);
- devm_kfree(&pf->pdev->dev, vfres);
+ kfree(vfres);
return ret;
}
@@ -1775,7 +1835,7 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi = NULL;
+ struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1822,7 +1882,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi = NULL;
+ struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1869,8 +1929,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
+ struct ice_eth_stats stats = { 0 };
struct ice_pf *pf = vf->pf;
- struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -1889,7 +1949,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- memset(&stats, 0, sizeof(struct ice_eth_stats));
ice_update_eth_stats(vsi);
stats = vsi->eth_stats;
@@ -2159,9 +2218,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
vector_id = map->vector_id;
vsi_id = map->vsi_id;
- /* validate msg params */
- if (!(vector_id < pf->hw.func_caps.common_cap
- .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2252,7 +2313,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2365,9 +2426,12 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
enum virtchnl_ops vc_op;
enum ice_status status;
struct ice_vsi *vsi;
+ struct device *dev;
int mac_count = 0;
int i;
+ dev = ice_pf_to_dev(pf);
+
if (set)
vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
else
@@ -2381,7 +2445,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
/* There is no need to let VF know about not being trusted
@@ -2406,13 +2470,13 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* VF is trying to add filters that the PF
* already added. Just continue.
*/
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"MAC %pM already set for VF %d\n",
maddr, vf->vf_id);
continue;
} else {
/* VF can't remove dflt_lan_addr/bcast MAC */
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
continue;
@@ -2421,7 +2485,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* check for the invalid cases and bail if necessary */
if (is_zero_ether_addr(maddr)) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"invalid MAC %pM provided for VF %d\n",
maddr, vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2430,7 +2494,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (is_unicast_ether_addr(maddr) &&
!ice_can_vf_change_mac(vf)) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"can't change unicast MAC for untrusted VF %d\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2441,12 +2505,12 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
if (status == ICE_ERR_DOES_NOT_EXIST ||
status == ICE_ERR_ALREADY_EXISTS) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"can't %s MAC filters %pM for VF %d, error %d\n",
set ? "add" : "remove", maddr, vf->vf_id,
status);
} else if (status) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"can't %s MAC filters for VF %d, error %d\n",
set ? "add" : "remove", vf->vf_id, status);
v_ret = ice_err_to_virt_err(status);
@@ -2511,7 +2575,9 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
u16 max_allowed_vf_queues;
u16 tx_rx_queue_left;
u16 cur_queues;
+ struct device *dev;
+ dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2522,17 +2588,15 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
ice_get_avail_rxq_count(pf));
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (!req_queues) {
- dev_err(&pf->pdev->dev,
- "VF %d tried to request 0 queues. Ignoring.\n",
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
vf->vf_id);
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
- dev_err(&pf->pdev->dev,
- "VF %d tried to request more than %d queues.\n",
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(&pf->pdev->dev,
+ dev_warn(dev,
"VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
@@ -2540,9 +2604,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
- ice_vc_dis_vf(vf);
- dev_info(&pf->pdev->dev,
- "VF %d granted request of %u queues.\n",
+ ice_vc_reset_vf(vf);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
}
@@ -2568,39 +2631,34 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vsi;
+ struct device *dev;
struct ice_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
if (vlan_id > ICE_MAX_VLANID || qos > 7) {
- dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ dev_err(dev, "Invalid VF Parameters\n");
return -EINVAL;
}
if (vlan_proto != htons(ETH_P_8021Q)) {
- dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ dev_err(dev, "VF VLAN protocol is not supported\n");
return -EPROTONOSUPPORT;
}
vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
/* duplicate request, so just return success */
- dev_info(&pf->pdev->dev,
- "Duplicate pvid %d request\n", vlanprio);
+ dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
return ret;
}
@@ -2619,7 +2677,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
}
if (vlan_id) {
- dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
/* add new VLAN filter for each MAC */
@@ -2638,6 +2696,17 @@ error_set_pvid:
}
/**
+ * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
+ */
+static bool ice_vf_vlan_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+}
+
+/**
* ice_vc_process_vlan_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2653,16 +2722,23 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
struct ice_pf *pf = vf->pf;
bool vlan_promisc = false;
struct ice_vsi *vsi;
+ struct device *dev;
struct ice_hw *hw;
int status = 0;
u8 promisc_m;
int i;
+ dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2670,7 +2746,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
@@ -2682,7 +2758,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
goto error_param;
}
@@ -2700,14 +2776,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
- dev_err(&pf->pdev->dev,
- "%sable VLAN stripping failed for VSI %i\n",
- add_v ? "en" : "dis", vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
vlan_promisc = true;
@@ -2718,7 +2786,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
@@ -2739,7 +2807,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
@@ -2753,7 +2821,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
promisc_m, vid);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
vid, status);
}
@@ -2848,6 +2916,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_manage_vlan_stripping(vsi, true))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2874,6 +2947,11 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2889,6 +2967,33 @@ error_param:
}
/**
+ * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
+ * @vf: VF to enable/disable VLAN stripping for on initialization
+ *
+ * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
+ * the flag is cleared then we want to disable stripping. For example, the flag
+ * will be cleared when port VLANs are configured by the administrator before
+ * passing the VF to the guest or if the AVF driver doesn't support VLAN
+ * offloads.
+ */
+static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* don't modify stripping if port VLAN is configured */
+ if (vsi->info.pvid)
+ return 0;
+
+ if (ice_vf_vlan_offload_ena(vf->driver_caps))
+ return ice_vsi_manage_vlan_stripping(vsi, true);
+ else
+ return ice_vsi_manage_vlan_stripping(vsi, false);
+}
+
+/**
* ice_vc_process_vf_msg - Process request from VF
* @pf: pointer to the PF structure
* @event: pointer to the AQ event
@@ -2903,9 +3008,11 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
u16 msglen = event->msg_len;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
+ struct device *dev;
int err = 0;
- if (vf_id >= pf->num_alloc_vfs) {
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id)) {
err = -EINVAL;
goto error_handler;
}
@@ -2931,7 +3038,7 @@ error_handler:
if (err) {
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
NULL, 0);
- dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
return;
}
@@ -2942,6 +3049,10 @@ error_handler:
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ice_vc_get_vf_res_msg(vf, msg);
+ if (ice_vf_init_vlan_stripping(vf))
+ dev_err(dev,
+ "Failed to initialize VLAN stripping for VF %d\n",
+ vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
@@ -2992,8 +3103,8 @@ error_handler:
break;
case VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
- v_opcode, vf_id);
+ dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
+ vf_id);
err = ice_vc_send_msg_to_vf(vf, v_opcode,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
@@ -3003,8 +3114,7 @@ error_handler:
/* Helper function cares less about error return values here
* as it is busy with pending work.
*/
- dev_info(&pf->pdev->dev,
- "PF failed to honor VF %d, opcode %d, error %d\n",
+ dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
vf_id, v_opcode, err);
}
}
@@ -3020,24 +3130,18 @@ error_handler:
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
struct ice_vf *vf;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
ivi->vf = vf_id;
ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
@@ -3070,33 +3174,29 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
*/
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi = pf->vsi[0];
struct ice_vsi_ctx *ctx;
enum ice_status status;
+ struct device *dev;
struct ice_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
if (ena == vf->spoofchk) {
- dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
+ dev_dbg(dev, "VF spoofchk already %s\n",
ena ? "ON" : "OFF");
return 0;
}
- ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -3109,7 +3209,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Error %d, failed to update VSI* parameters\n", status);
ret = -EIO;
goto out;
@@ -3119,11 +3219,28 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
vsi->info.sec_flags = ctx->info.sec_flags;
vsi->info.sw_flags2 = ctx->info.sw_flags2;
out:
- devm_kfree(&pf->pdev->dev, ctx);
+ kfree(ctx);
return ret;
}
/**
+ * ice_wait_on_vf_reset
+ * @vf: The VF being resseting
+ *
+ * Poll to make sure a given VF is ready after reset
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(20);
+ }
+}
+
+/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3133,23 +3250,25 @@ out:
*/
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ /* Don't set MAC on disabled VF */
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ /* In case VF is in reset mode, wait until it is completed. Depending
+ * on factors like queue disabling routine, this could take ~250ms
+ */
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
netdev_err(netdev, "%pM not a valid unicast address\n", mac);
@@ -3167,7 +3286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
"MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
- ice_vc_dis_vf(vf);
+ ice_vc_reset_vf(vf);
return ret;
}
@@ -3181,30 +3300,34 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
*/
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct device *dev;
struct ice_vf *vf;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ /* Don't set Trusted Mode on disabled VF */
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ /* In case VF is in reset mode, wait until it is completed. Depending
+ * on factors like queue disabling routine, this could take ~250ms
+ */
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
/* Check if already trusted */
if (trusted == vf->trusted)
return 0;
vf->trusted = trusted;
- ice_vc_dis_vf(vf);
- dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ ice_vc_reset_vf(vf);
+ dev_info(dev, "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un");
return 0;
@@ -3220,26 +3343,21 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
*/
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct virtchnl_pf_event pfe = { 0 };
struct ice_link_status *ls;
struct ice_vf *vf;
struct ice_hw *hw;
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
hw = &pf->hw;
ls = &pf->hw.port_info->phy.link_info;
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
@@ -3273,3 +3391,48 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
return 0;
}
+
+/**
+ * ice_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-255)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_eth_stats *stats;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+
+ if (ice_check_vf_init(pf, vf))
+ return -EBUSY;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ ice_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 0d9880c8bba3..88aa65d5cb31 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -38,6 +38,7 @@
#define ICE_MAX_POLICY_INTR_PER_VF 33
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_WAIT 15
/* Specific VF states */
enum ice_vf_states {
@@ -121,6 +122,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+int
+ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -193,5 +197,13 @@ ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
{
return 0;
}
+
+static inline int
+ice_get_vf_stats(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_stats __always_unused *vf_stats)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
new file mode 100644
index 000000000000..cf9b8b22d24f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -0,0 +1,1181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_type.h"
+#include "ice_xsk.h"
+#include "ice_txrx.h"
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
+ sizeof(vsi->rx_rings[q_idx]->rx_stats));
+ memset(&vsi->tx_rings[q_idx]->stats, 0,
+ sizeof(vsi->tx_rings[q_idx]->stats));
+ if (ice_is_xdp_ena_vsi(vsi))
+ memset(&vsi->xdp_rings[q_idx]->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
+ * @vsi: VSI that has netdev
+ * @q_vector: q_vector that has NAPI context
+ * @enable: true for enable, false for disable
+ */
+static void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable)
+{
+ if (!vsi->netdev || !q_vector)
+ return;
+
+ if (enable)
+ napi_enable(&q_vector->napi);
+ else
+ napi_disable(&q_vector->napi);
+}
+
+/**
+ * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
+ * @vsi: the VSI that contains queue vector being un-configured
+ * @rx_ring: Rx ring that will have its IRQ disabled
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u16 reg;
+ u32 val;
+
+ /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
+ * here only QINT_RQCTL
+ */
+ reg = rx_ring->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+
+ if (q_vector) {
+ u16 v_idx = q_vector->v_idx;
+
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
+ ice_flush(hw);
+ synchronize_irq(pf->msix_entries[v_idx + base].vector);
+ }
+}
+
+/**
+ * ice_qvec_cfg_msix - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ u16 reg_idx = q_vector->reg_idx;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_ring *ring;
+
+ ice_cfg_itr(hw, q_vector);
+
+ wr32(hw, GLINT_RATE(reg_idx),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->tx.itr_idx);
+
+ ice_for_each_ring(ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->rx.itr_idx);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qvec_ena_irq - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ ice_irq_dynamic_ena(hw, vsi, q_vector);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int timeout = 50;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (err)
+ return err;
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (err)
+ return err;
+ }
+ err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
+ if (err)
+ return err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return 0;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ qg_buf->num_txqs = 1;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ if (err)
+ goto free_buf;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(qg_buf, 0, sizeof(*qg_buf));
+ qg_buf->num_txqs = 1;
+ err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ if (err)
+ goto free_buf;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ err = ice_setup_rx_ctx(rx_ring);
+ if (err)
+ goto free_buf;
+
+ ice_qvec_cfg_msix(vsi, q_vector);
+
+ err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
+ if (err)
+ goto free_buf;
+
+ clear_bit(__ICE_CFG_BUSY, vsi->state);
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+free_buf:
+ kfree(qg_buf);
+ return err;
+}
+
+/**
+ * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
+ * @vsi: VSI to allocate the UMEM on
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+{
+ if (vsi->xsk_umems)
+ return 0;
+
+ vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ GFP_KERNEL);
+
+ if (!vsi->xsk_umems) {
+ vsi->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_xsk_add_umem - add a UMEM region for XDP sockets
+ * @vsi: VSI to which the UMEM will be added
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ int err;
+
+ err = ice_xsk_alloc_umems(vsi);
+ if (err)
+ return err;
+
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * @vsi: VSI from which the VSI will be removed
+ * @qid: Ring/qid associated with the UMEM
+ */
+static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+{
+ vsi->xsk_umems[qid] = NULL;
+ vsi->num_xsk_umems_used--;
+
+ if (vsi->num_xsk_umems_used == 0) {
+ kfree(vsi->xsk_umems);
+ vsi->xsk_umems = NULL;
+ vsi->num_xsk_umems = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
+ * @vsi: VSI to map the UMEM region
+ * @umem: UMEM to map
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = ice_pf_to_dev(pf);
+ for (i = 0; i < umem->npgs; i++) {
+ dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ ICE_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma)) {
+ dev_dbg(dev,
+ "XSK UMEM DMA mapping error on page num %d", i);
+ goto out_unmap;
+ }
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (; i > 0; i--) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -EFAULT;
+}
+
+/**
+ * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
+ * @vsi: VSI from which the UMEM will be unmapped
+ * @umem: UMEM to unmap
+ */
+static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = ice_pf_to_dev(pf);
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_disable - disable a UMEM region
+ * @vsi: Current VSI
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+{
+ if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
+ !vsi->xsk_umems[qid])
+ return -EINVAL;
+
+ ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ ice_xsk_remove_umem(vsi, qid);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_enable - enable a UMEM region
+ * @vsi: Current VSI
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ int err;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_umems)
+ return -EINVAL;
+
+ if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ return -EBUSY;
+
+ reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = ice_xsk_umem_dma_map(vsi, umem);
+ if (err)
+ return err;
+
+ err = ice_xsk_add_umem(vsi, umem, qid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * @vsi: Current VSI
+ * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ bool if_running, umem_present = !!umem;
+ int ret = 0, umem_failure = 0;
+
+ if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+
+ if (if_running) {
+ ret = ice_qp_dis(vsi, qid);
+ if (ret) {
+ netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+ goto xsk_umem_if_up;
+ }
+ }
+
+ umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
+ ice_xsk_umem_disable(vsi, qid);
+
+xsk_umem_if_up:
+ if (if_running) {
+ ret = ice_qp_ena(vsi, qid);
+ if (!ret && umem_present)
+ napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ else if (ret)
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+ }
+
+ if (umem_failure) {
+ netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+ umem_present ? "en" : "dis", umem_failure);
+ return umem_failure;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
+ * @zca: zero-cpoy allocator
+ * @handle: Buffer handle
+ */
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
+{
+ struct ice_rx_buf *rx_buf;
+ struct ice_ring *rx_ring;
+ struct xdp_umem *umem;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(zca, struct ice_ring, zca);
+ umem = rx_ring->xsk_umem;
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ mask = umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ rx_buf = &rx_ring->rx_buf[nta];
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = (u64)handle + umem->headroom;
+}
+
+/**
+ * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the hot path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = rx_buf->addr;
+ u64 handle, hr;
+
+ if (addr) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the slow path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, headroom;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ handle &= umem->chunk_mask;
+ headroom = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += headroom;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += headroom;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ * @alloc: the function pointer to call for allocation
+ *
+ * This function allocates a number of Rx buffers from the fill ring
+ * or the internal recycle mechanism and places them on the Rx ring.
+ *
+ * Returns false if all allocations were successful, true if any fail.
+ */
+static bool
+ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
+ bool alloc(struct ice_ring *, struct ice_rx_buf *))
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ struct ice_rx_buf *rx_buf;
+ bool ret = false;
+
+ if (!count)
+ return false;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ rx_buf = &rx_ring->rx_buf[ntu];
+
+ do {
+ if (!alloc(rx_ring, rx_buf)) {
+ ret = true;
+ break;
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
+ rx_desc->wb.status_error0 = 0;
+
+ rx_desc++;
+ rx_buf++;
+ ntu++;
+
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ rx_buf = rx_ring->rx_buf;
+ ntu = 0;
+ }
+ } while (--count);
+
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return ret;
+}
+
+/**
+ * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_fast_zc);
+}
+
+/**
+ * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_slow_zc);
+}
+
+/**
+ * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
+ * @rx_ring: Rx ring
+ */
+static void ice_bump_ntc(struct ice_ring *rx_ring)
+{
+ int ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(ICE_RX_DESC(rx_ring, ntc));
+}
+
+/**
+ * ice_get_rx_buf_zc - Fetch the current Rx buffer
+ * @rx_ring: Rx ring
+ * @size: size of a buffer
+ *
+ * This function returns the current, received Rx buffer and does
+ * DMA synchronization.
+ *
+ * Returns a pointer to the received Rx buffer.
+ */
+static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
+{
+ struct ice_rx_buf *rx_buf;
+
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
+ size, DMA_BIDIRECTIONAL);
+
+ return rx_buf;
+}
+
+/**
+ * ice_reuse_rx_buf_zc - reuse an Rx buffer
+ * @rx_ring: Rx ring
+ * @old_buf: The buffer to recycle
+ *
+ * This function recycles a finished Rx buffer, and places it on the recycle
+ * queue (next_to_alloc).
+ */
+static void
+ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+{
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+ struct ice_rx_buf *new_buf;
+
+ new_buf = &rx_ring->rx_buf[nta++];
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ new_buf->dma = old_buf->dma & mask;
+ new_buf->dma += hr;
+
+ new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
+ new_buf->addr += hr;
+
+ new_buf->handle = old_buf->handle & mask;
+ new_buf->handle += rx_ring->xsk_umem->headroom;
+
+ old_buf->addr = NULL;
+}
+
+/**
+ * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
+ * @rx_ring: Rx ring
+ * @rx_buf: zero-copy Rx buffer
+ * @xdp: XDP buffer
+ *
+ * This function allocates a new skb from a zero-copy Rx buffer.
+ *
+ * Returns the skb on success, NULL on failure.
+ */
+static struct sk_buff *
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end -
+ xdp->data_hard_start;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+
+ return skb;
+}
+
+/**
+ * ice_run_xdp_zc - Executes an XDP program in zero-copy path
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+{
+ int err, result = ICE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring *xdp_ring;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return ICE_XDP_PASS;
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
+ }
+
+ rcu_read_unlock();
+ return result;
+}
+
+/**
+ * ice_clean_rx_irq_zc - consumes packets from the hardware ring
+ * @rx_ring: AF_XDP Rx ring
+ * @budget: NAPI budget
+ *
+ * Returns number of processed packets on success, remaining budget on failure.
+ */
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_xmit = 0;
+ struct xdp_buff xdp;
+ bool failure = 0;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < (unsigned int)budget)) {
+ union ice_32b_rx_flex_desc *rx_desc;
+ unsigned int size, xdp_res = 0;
+ struct ice_rx_buf *rx_buf;
+ struct sk_buff *skb;
+ u16 stat_err_bits;
+ u16 vlan_tag = 0;
+ u8 rx_ptype;
+
+ if (cleaned_count >= ICE_RX_BUF_WRITE) {
+ failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc, stat_err_bits))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
+
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (!size)
+ break;
+
+ rx_buf = ice_get_rx_buf_zc(rx_ring, size);
+ if (!rx_buf->addr)
+ break;
+
+ xdp.data = rx_buf->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = rx_buf->handle;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
+ if (xdp_res) {
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ rx_buf->addr = NULL;
+ } else {
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+ }
+
+ total_rx_bytes += size;
+ total_rx_packets++;
+ cleaned_count++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+ skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ ice_bump_ntc(rx_ring);
+
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc, stat_err_bits))
+ vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+
+ rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+
+ ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_receive_skb(rx_ring, skb, vlan_tag);
+ }
+
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+
+ return failure ? budget : (int)total_rx_packets;
+}
+
+/**
+ * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: max number of frames to xmit
+ *
+ * Returns true if cleanup/transmission is done.
+ */
+static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+{
+ struct ice_tx_desc *tx_desc = NULL;
+ bool work_done = true;
+ struct xdp_desc desc;
+ dma_addr_t dma;
+
+ while (likely(budget-- > 0)) {
+ struct ice_tx_buf *tx_buf;
+
+ if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ work_done = false;
+ break;
+ }
+
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ break;
+
+ dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
+ DMA_BIDIRECTIONAL);
+
+ tx_buf->bytecount = desc.len;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
+ 0, desc.len, 0);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ ice_xdp_ring_update_tail(xdp_ring);
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return budget > 0 && work_done;
+}
+
+/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @xdp_ring: XDP Tx ring
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: NAPI budget
+ *
+ * Returns true if cleanup/tranmission is done.
+ */
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+{
+ int total_packets = 0, total_bytes = 0;
+ s16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ bool xmit_done = true;
+ u32 xsk_frames = 0;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntc);
+ tx_buf = &xdp_ring->tx_buf[ntc];
+ ntc -= xdp_ring->count;
+
+ do {
+ if (!(tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ total_bytes += tx_buf->bytecount;
+ total_packets++;
+
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
+ }
+
+ tx_desc->cmd_type_offset_bsz = 0;
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+
+ if (unlikely(!ntc)) {
+ ntc -= xdp_ring->count;
+ tx_buf = xdp_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(xdp_ring, 0);
+ }
+
+ prefetch(tx_desc);
+
+ } while (likely(--budget));
+
+ ntc += xdp_ring->count;
+ xdp_ring->next_to_clean = ntc;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+
+ ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
+ xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
+
+ return budget > 0 && xmit_done;
+}
+
+/**
+ * ice_xsk_wakeup - Implements ndo_xsk_wakeup
+ * @netdev: net_device
+ * @queue_id: queue to wake up
+ * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
+ *
+ * Returns negative on error, zero otherwise.
+ */
+int
+ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ u32 __always_unused flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_q_vector *q_vector;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *ring;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return -ENXIO;
+
+ if (queue_id >= vsi->num_txq)
+ return -ENXIO;
+
+ if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ return -ENXIO;
+
+ ring = vsi->xdp_rings[queue_id];
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+ * it will run again. If not, trigger an interrupt and
+ * schedule the NAPI from interrupt context. If NAPI would be
+ * scheduled here, the interrupt affinity would not be
+ * honored.
+ */
+ q_vector = ring->q_vector;
+ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
+ ice_trigger_sw_intr(&vsi->back->hw, q_vector);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * @vsi: VSI to be checked
+ *
+ * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ */
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->xsk_umems)
+ return false;
+
+ for (i = 0; i < vsi->num_xsk_umems; i++) {
+ if (vsi->xsk_umems[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * @rx_ring: ring to be cleaned
+ */
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+{
+ u16 i;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+
+ if (!rx_buf->addr)
+ continue;
+
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
+ rx_buf->addr = NULL;
+ }
+}
+
+/**
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * @xdp_ring: XDP_Tx ring
+ */
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->raw_buf)
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ else
+ xsk_frames++;
+
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
new file mode 100644
index 000000000000..3479e1de98fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_XSK_H_
+#define _ICE_XSK_H_
+#include "ice_txrx.h"
+#include "ice.h"
+
+struct ice_vsi;
+
+#ifdef CONFIG_XDP_SOCKETS
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+#else
+static inline int
+ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
+ struct xdp_umem __always_unused *umem,
+ u16 __always_unused qid)
+{
+ return -ENOTSUPP;
+}
+
+static inline void
+ice_zca_free(struct zero_copy_allocator __always_unused *zca,
+ unsigned long __always_unused handle)
+{
+}
+
+static inline int
+ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ int __always_unused budget)
+{
+ return 0;
+}
+
+static inline bool
+ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ int __always_unused budget)
+{
+ return false;
+}
+
+static inline bool
+ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring,
+ u16 __always_unused count)
+{
+ return false;
+}
+
+static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
+{
+ return false;
+}
+
+static inline int
+ice_xsk_wakeup(struct net_device __always_unused *netdev,
+ u32 __always_unused queue_id, u32 __always_unused flags)
+{
+ return -ENOTSUPP;
+}
+
+#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
+#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
+#endif /* CONFIG_XDP_SOCKETS */
+#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 6ad775b1a4c5..63ec253ac788 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -127,6 +127,7 @@ struct e1000_adv_tx_context_desc {
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed7e667d7eb2..98346eb064d5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2518,6 +2518,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -2526,6 +2527,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -3122,7 +3124,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
if (hw->mac.type >= e1000_i350)
netdev->features |= NETIF_F_HW_TC;
@@ -5696,6 +5698,7 @@ static int igb_tso(struct igb_ring *tx_ring,
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
@@ -5715,7 +5718,8 @@ static int igb_tso(struct igb_ring *tx_ring,
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -5743,12 +5747,19 @@ static int igb_tso(struct igb_ring *tx_ring,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -6225,7 +6236,6 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
/* adjust max frame to be at least the size of a standard frame */
@@ -6241,8 +6251,8 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igb_down(adapter);
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index fd3071f55bd3..c39e921757ba 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests failing to enable both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
@@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0f2b68f4bb0f..6003dc3ff5fd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2437,8 +2437,8 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
ETH_FCS_LEN;
- dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 7e16345d836e..0868677d43ed 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -411,7 +411,6 @@ struct igc_adapter {
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
- u32 *shadow_vfta;
u32 rss_queues;
u32 rss_indir_tbl_init;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f3f2325fe567..f3788f0b95b4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -282,7 +282,10 @@
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
/* Receive Descriptor bit definitions */
-#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
@@ -402,4 +405,7 @@
#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */
#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
+/* Maximum size of the MTA register table in all supported adapters */
+#define MAX_MTA_REG 128
+
#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index abb2d72911ff..20f710645746 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -91,6 +91,7 @@ struct igc_mac_info {
u16 mta_reg_count;
u16 uta_reg_count;
+ u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 5eeb4c8caf4a..12aa6b5fcb5d 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -784,3 +784,107 @@ bool igc_enable_mng_pass_thru(struct igc_hw *hw)
out:
return ret_val;
}
+
+/**
+ * igc_hash_mc_addr - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value. See
+ * igc_mta_set()
+ **/
+static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16)mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * igc_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igc_update_mc_addr_list(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32)i < mc_addr_count; i++) {
+ hash_value = igc_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
+ mc_addr_list += ETH_ALEN;
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]);
+ wrfl();
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h
index 782bc995badc..832cccec87cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.h
+++ b/drivers/net/ethernet/intel/igc/igc_mac.h
@@ -29,6 +29,8 @@ s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex);
bool igc_enable_mng_pass_thru(struct igc_hw *hw);
+void igc_update_mc_addr_list(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
enum igc_mng_mode {
igc_mng_mode_none = 0,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 24888676f69b..9700527dd797 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -795,6 +795,44 @@ static int igc_set_mac(struct net_device *netdev, void *p)
return 0;
}
+/**
+ * igc_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int igc_write_mc_addr_list(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ igc_update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ igc_update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd,
@@ -1163,6 +1201,46 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
}
+static void igc_rx_checksum(struct igc_ring *ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set */
+ if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
+ return;
+
+ /* Rx checksum disabled via ethtool */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (igc_test_staterr(rx_desc,
+ IGC_RXDEXT_STATERR_TCPE |
+ IGC_RXDEXT_STATERR_IPE)) {
+ /* work around errata with sctp packets where the TCPE aka
+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+ * packets (aka let the stack check the crc32c)
+ */
+ if (!(skb->len == 60 &&
+ test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.csum_err++;
+ u64_stats_update_end(&ring->rx_syncp);
+ }
+ /* let the stack verify checksum errors */
+ return;
+ }
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
+ IGC_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ dev_dbg(ring->dev, "cksum success: bits %08X\n",
+ le32_to_cpu(rx_desc->wb.upper.status_error));
+}
+
static inline void igc_rx_hash(struct igc_ring *ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -1189,6 +1267,8 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
{
igc_rx_hash(rx_ring, rx_desc, skb);
+ igc_rx_checksum(rx_ring, rx_desc, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -2192,7 +2272,6 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
{
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct igc_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
@@ -2207,8 +2286,8 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igc_down(adapter);
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -2518,6 +2597,110 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
IGC_MAC_STATE_QUEUE_STEERING | flags);
}
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, 0))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].state != 0)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue = 0;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return min_t(int, ret, 0);
+}
+
+static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return 0;
+}
+
/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -2529,6 +2712,44 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
*/
static void igc_set_rx_mode(struct net_device *netdev)
{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= IGC_RCTL_MPE;
+ } else {
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igc_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= IGC_RCTL_MPE;
+ }
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
+ rctl |= IGC_RCTL_UPE;
+
+ /* update state of unicast and multicast */
+ rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
+ wr32(IGC_RCTL, rctl);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
+ rlpml = IGC_MAX_FRAME_BUILD_SKB;
+#endif
+ wr32(IGC_RLPML, rlpml);
}
/**
@@ -3982,6 +4203,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
.ndo_start_xmit = igc_xmit_frame,
+ .ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
@@ -4211,7 +4433,9 @@ static int igc_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Add supported features to the features list*/
+ netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_SCTP_CRC;
/* setup the private structure */
err = igc_sw_init(adapter);
@@ -4349,7 +4573,6 @@ static void igc_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
kfree(adapter->mac_table);
- kfree(adapter->shadow_vfta);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index cc3196ae5aea..fd9f5d41b594 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -832,9 +832,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int xdp_count, int xdp_idx,
int rxr_count, int rxr_idx)
{
+ int node = dev_to_node(&adapter->pdev->dev);
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count;
u8 tcs = adapter->hw_tcs;
@@ -845,10 +845,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
if (rss_i > 1 && adapter->atr_sample_rate) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
- }
+ cpu = cpumask_local_spread(v_idx, node);
+ node = cpu_to_node(cpu);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 91b3780ddb04..25c097cd8100 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6725,7 +6725,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
(new_mtu > ETH_DATA_LEN))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
- e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
@@ -7945,6 +7946,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
@@ -7968,7 +7970,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -7998,12 +8001,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+
+ if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -8639,7 +8650,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock) {
- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -10189,6 +10201,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -10197,6 +10210,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -10906,7 +10920,7 @@ skip_sriov:
IXGBE_GSO_PARTIAL_FEATURES;
if (hw->mac.type >= ixgbe_mac_82599EB)
- netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
#ifdef CONFIG_IXGBE_IPSEC
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fb942167ee54..3d5caea096fb 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -61,6 +61,7 @@ config MVNETA
depends on ARCH_MVEBU || COMPILE_TEST
select MVMDIO
select PHYLINK
+ select PAGE_POOL
---help---
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 82ea55ae5053..d5b644131cff 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2959,15 +2959,16 @@ static void set_params(struct mv643xx_eth_private *mp,
static int get_phy_mode(struct mv643xx_eth_private *mp)
{
struct device *dev = mp->dev->dev.parent;
- int iface = -1;
+ phy_interface_t iface;
+ int err;
if (dev->of_node)
- iface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &iface);
/* Historical default if unspecified. We could also read/write
* the interface state in the PSC1
*/
- if (iface < 0)
+ if (!dev->of_node || err)
iface = PHY_INTERFACE_MODE_GMII;
return iface;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e49820675c8c..71a872d46bc4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -37,6 +37,8 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@@ -322,6 +324,13 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())
+#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+ NET_IP_ALIGN)
+#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
+ MVNETA_SKB_HEADROOM))
+#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
+#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
+
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_phys) && \
(addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
@@ -346,6 +355,11 @@ struct mvneta_statistic {
#define T_REG_64 64
#define T_SW 1
+#define MVNETA_XDP_PASS BIT(0)
+#define MVNETA_XDP_DROPPED BIT(1)
+#define MVNETA_XDP_TX BIT(2)
+#define MVNETA_XDP_REDIR BIT(3)
+
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
{ 0x3010, T_REG_32, "good_frames_received", },
@@ -425,6 +439,8 @@ struct mvneta_port {
u32 cause_rx_tx;
struct napi_struct napi;
+ struct bpf_prog *xdp_prog;
+
/* Core clock */
struct clk *clk;
/* AXI clock */
@@ -545,6 +561,20 @@ struct mvneta_rx_desc {
};
#endif
+enum mvneta_tx_buf_type {
+ MVNETA_TYPE_SKB,
+ MVNETA_TYPE_XDP_TX,
+ MVNETA_TYPE_XDP_NDO,
+};
+
+struct mvneta_tx_buf {
+ enum mvneta_tx_buf_type type;
+ union {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ };
+};
+
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
u8 id;
@@ -560,8 +590,8 @@ struct mvneta_tx_queue {
int tx_stop_threshold;
int tx_wake_threshold;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
+ /* Array of transmitted buffers */
+ struct mvneta_tx_buf *buf;
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -603,6 +633,10 @@ struct mvneta_rx_queue {
u32 pkts_coal;
u32 time_coal;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
/* Virtual address of the RX buffer */
void **buf_virt_addr;
@@ -641,7 +675,6 @@ static int txq_number = 8;
static int rxq_def;
static int rx_copybreak __read_mostly = 256;
-static int rx_header_size __read_mostly = 128;
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
@@ -1761,24 +1794,25 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
int i;
for (i = 0; i < num; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
txq->txq_get_index;
- struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
-
- if (skb) {
- bytes_compl += skb->len;
- pkts_compl++;
- }
mvneta_txq_inc_get(txq);
- if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
+ buf->type != MVNETA_TYPE_XDP_TX)
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE);
- if (!skb)
- continue;
- dev_kfree_skb_any(skb);
+ if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
+ bytes_compl += buf->skb->len;
+ pkts_compl++;
+ dev_kfree_skb_any(buf->skb);
+ } else if (buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) {
+ xdp_return_frame(buf->xdpf);
+ }
}
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -1815,20 +1849,14 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
dma_addr_t phys_addr;
struct page *page;
- page = __dev_alloc_page(gfp_mask);
+ page = page_pool_alloc_pages(rxq->page_pool,
+ gfp_mask | __GFP_NOWARN);
if (!page)
return -ENOMEM;
- /* map page for use */
- phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- __free_page(page);
- return -ENOMEM;
- }
-
- phys_addr += pp->rx_offset_correction;
+ phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
+
return 0;
}
@@ -1894,10 +1922,29 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(data);
+ page_pool_put_page(rxq->page_pool, data, false);
+ }
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+}
+
+static void
+mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
+ u32 len, bool tx)
+{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ if (tx) {
+ stats->tx_packets += pkts;
+ stats->tx_bytes += len;
+ } else {
+ stats->rx_packets += pkts;
+ stats->rx_bytes += len;
}
+ u64_stats_update_end(&stats->syncp);
}
static inline
@@ -1925,43 +1972,300 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
return i;
}
+static int
+mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct mvneta_tx_desc *tx_desc;
+ struct mvneta_tx_buf *buf;
+ dma_addr_t dma_addr;
+
+ if (txq->count >= txq->tx_stop_threshold)
+ return MVNETA_XDP_DROPPED;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ buf = &txq->buf[txq->txq_put_index];
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ return MVNETA_XDP_DROPPED;
+ }
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) +
+ sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = xdpf;
+
+ tx_desc->command = MVNETA_TXD_FLZ_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = xdpf->len;
+
+ mvneta_update_stats(pp, 1, xdpf->len, true);
+ mvneta_txq_inc_put(txq);
+ txq->pending++;
+ txq->count++;
+
+ return MVNETA_XDP_TX;
+}
+
+static int
+mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
+{
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int cpu;
+ u32 ret;
+
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ return MVNETA_XDP_DROPPED;
+
+ cpu = smp_processor_id();
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ if (ret == MVNETA_XDP_TX)
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int
+mvneta_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ int i, drops = 0;
+ u32 ret;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ for (i = 0; i < num_frame; i++) {
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ if (ret != MVNETA_XDP_TX) {
+ xdp_return_frame_rx_napi(frames[i]);
+ drops++;
+ }
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return num_frame - drops;
+}
+
+static int
+mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ struct bpf_prog *prog, struct xdp_buff *xdp)
+{
+ u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ ret = MVNETA_XDP_PASS;
+ break;
+ case XDP_REDIRECT: {
+ int err;
+
+ err = xdp_do_redirect(pp->dev, xdp, prog);
+ if (err) {
+ ret = MVNETA_XDP_DROPPED;
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
+ } else {
+ ret = MVNETA_XDP_REDIR;
+ }
+ break;
+ }
+ case XDP_TX:
+ ret = mvneta_xdp_xmit_back(pp, xdp);
+ if (ret != MVNETA_XDP_TX)
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(pp->dev, prog, act);
+ /* fall through */
+ case XDP_DROP:
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
+ ret = MVNETA_XDP_DROPPED;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mvneta_swbm_rx_frame(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog,
+ struct page *page, u32 *xdp_ret)
+{
+ unsigned char *data = page_address(page);
+ int data_len = -MVNETA_MH_SIZE, len;
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+
+ if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len += len;
+ } else {
+ len = rx_desc->data_size;
+ data_len += len - ETH_FCS_LEN;
+ }
+
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+
+ /* Prefetch header */
+ prefetch(data);
+
+ xdp->data_hard_start = data;
+ xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
+ xdp->data_end = xdp->data + data_len;
+ xdp_set_data_meta_invalid(xdp);
+
+ if (xdp_prog) {
+ u32 ret;
+
+ ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
+ if (ret != MVNETA_XDP_PASS) {
+ mvneta_update_stats(pp, 1,
+ xdp->data_end - xdp->data,
+ false);
+ rx_desc->buf_phys_addr = 0;
+ *xdp_ret |= ret;
+ return ret;
+ }
+ }
+
+ rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
+ if (unlikely(!rxq->skb)) {
+ netdev_err(dev,
+ "Can't allocate skb on queue %d\n",
+ rxq->id);
+ dev->stats.rx_dropped++;
+ rxq->skb_alloc_err++;
+ return -ENOMEM;
+ }
+ page_pool_release_page(rxq->page_pool, page);
+
+ skb_reserve(rxq->skb,
+ xdp->data - xdp->data_hard_start);
+ skb_put(rxq->skb, xdp->data_end - xdp->data);
+ mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
+
+ rxq->left_size = rx_desc->data_size - len;
+ rx_desc->buf_phys_addr = 0;
+
+ return 0;
+}
+
+static void
+mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct page *page)
+{
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+ int data_len, len;
+
+ if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len = len;
+ } else {
+ len = rxq->left_size;
+ data_len = len - ETH_FCS_LEN;
+ }
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+ if (data_len > 0) {
+ /* refill descriptor with new buffer later */
+ skb_add_rx_frag(rxq->skb,
+ skb_shinfo(rxq->skb)->nr_frags,
+ page, pp->rx_offset_correction, data_len,
+ PAGE_SIZE);
+ }
+ page_pool_release_page(rxq->page_pool, page);
+ rx_desc->buf_phys_addr = 0;
+ rxq->left_size -= len;
+}
+
/* Main rx processing when using software buffer management */
static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
+ int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
struct net_device *dev = pp->dev;
- int rx_todo, rx_proc;
- int refill = 0;
- u32 rcvd_pkts = 0;
- u32 rcvd_bytes = 0;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp_buf;
+ int rx_todo, refill;
+ u32 xdp_ret = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
- rx_proc = 0;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(pp->xdp_prog);
+ xdp_buf.rxq = &rxq->xdp_rxq;
/* Fairness NAPI loop */
- while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
+ while (rx_proc < budget && rx_proc < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- unsigned char *data;
- struct page *page;
- dma_addr_t phys_addr;
u32 rx_status, index;
- int rx_bytes, skb_size, copy_size;
- int frag_num, frag_size, frag_offset;
+ struct page *page;
index = rx_desc - rxq->descs;
page = (struct page *)rxq->buf_virt_addr[index];
- data = page_address(page);
- /* Prefetch header */
- prefetch(data);
- phys_addr = rx_desc->buf_phys_addr;
rx_status = rx_desc->status;
rx_proc++;
rxq->refill_num++;
if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ int err;
+
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
@@ -1969,85 +2273,18 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* leave the descriptor untouched */
continue;
}
- rx_bytes = rx_desc->data_size -
- (ETH_FCS_LEN + MVNETA_MH_SIZE);
- /* Allocate small skb for each new packet */
- skb_size = max(rx_copybreak, rx_header_size);
- rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
- if (unlikely(!rxq->skb)) {
- netdev_err(dev,
- "Can't allocate skb on queue %d\n",
- rxq->id);
- dev->stats.rx_dropped++;
- rxq->skb_alloc_err++;
+ err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
+ xdp_prog, page, &xdp_ret);
+ if (err)
continue;
- }
- copy_size = min(skb_size, rx_bytes);
-
- /* Copy data from buffer to SKB, skip Marvell header */
- memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
- copy_size);
- skb_put(rxq->skb, copy_size);
- rxq->left_size = rx_bytes - copy_size;
-
- mvneta_rx_csum(pp, rx_status, rxq->skb);
- if (rxq->left_size == 0) {
- int size = copy_size + MVNETA_MH_SIZE;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr, 0,
- size,
- DMA_FROM_DEVICE);
-
- /* leave the descriptor and buffer untouched */
- } else {
- /* refill descriptor with new buffer later */
- rx_desc->buf_phys_addr = 0;
-
- frag_num = 0;
- frag_offset = copy_size + MVNETA_MH_SIZE;
- frag_size = min(rxq->left_size,
- (int)(PAGE_SIZE - frag_offset));
- skb_add_rx_frag(rxq->skb, frag_num, page,
- frag_offset, frag_size,
- PAGE_SIZE);
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- rxq->left_size -= frag_size;
- }
} else {
- /* Middle or Last descriptor */
if (unlikely(!rxq->skb)) {
pr_debug("no skb for rx_status 0x%x\n",
rx_status);
continue;
}
- if (!rxq->left_size) {
- /* last descriptor has only FCS */
- /* and can be discarded */
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr, 0,
- ETH_FCS_LEN,
- DMA_FROM_DEVICE);
- /* leave the descriptor and buffer untouched */
- } else {
- /* refill descriptor with new buffer later */
- rx_desc->buf_phys_addr = 0;
-
- frag_num = skb_shinfo(rxq->skb)->nr_frags;
- frag_offset = 0;
- frag_size = min(rxq->left_size,
- (int)(PAGE_SIZE - frag_offset));
- skb_add_rx_frag(rxq->skb, frag_num, page,
- frag_offset, frag_size,
- PAGE_SIZE);
-
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- rxq->left_size -= frag_size;
- }
+ mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2072,17 +2309,14 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* clean uncomplete skb pointer in queue */
rxq->skb = NULL;
- rxq->left_size = 0;
}
+ rcu_read_unlock();
- if (rcvd_pkts) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ if (xdp_ret & MVNETA_XDP_REDIR)
+ xdp_do_flush_map();
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets += rcvd_pkts;
- stats->rx_bytes += rcvd_bytes;
- u64_stats_update_end(&stats->syncp);
- }
+ if (rcvd_pkts)
+ mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
/* return some buffers to hardware queue, one at a time is too slow */
refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2206,14 +2440,8 @@ err_drop_frame:
napi_gro_receive(napi, skb);
}
- if (rcvd_pkts) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets += rcvd_pkts;
- stats->rx_bytes += rcvd_bytes;
- u64_stats_update_end(&stats->syncp);
- }
+ if (rcvd_pkts)
+ mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2225,16 +2453,19 @@ static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
- struct mvneta_tx_desc *tx_desc;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
- txq->tx_skb[txq->txq_put_index] = NULL;
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
+
mvneta_txq_inc_put(txq);
}
@@ -2243,6 +2474,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
struct sk_buff *skb, char *data, int size,
bool last_tcp, bool is_last)
{
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2256,7 +2488,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
}
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
if (last_tcp) {
/* last descriptor in the TCP packet */
@@ -2264,7 +2497,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
/* last descriptor in SKB */
if (is_last)
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
}
mvneta_txq_inc_put(txq);
return 0;
@@ -2349,6 +2582,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
int i, nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nr_frags; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = skb_frag_address(frag);
@@ -2368,12 +2602,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
}
+ buf->type = MVNETA_TYPE_SKB;
mvneta_txq_inc_put(txq);
}
@@ -2401,6 +2636,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
int len = skb->len;
int frags = 0;
@@ -2433,16 +2669,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ buf->type = MVNETA_TYPE_SKB;
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVNETA_TXD_FLZ_DESC;
tx_desc->command = tx_cmd;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
mvneta_txq_inc_put(txq);
} else {
/* First but not Last */
tx_cmd |= MVNETA_TXD_F_DESC;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
mvneta_txq_inc_put(txq);
tx_desc->command = tx_cmd;
/* Continue with other skb fragments */
@@ -2459,7 +2696,6 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
netdev_tx_sent_queue(nq, len);
@@ -2474,10 +2710,7 @@ out:
else
txq->pending += frags;
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += len;
- u64_stats_update_end(&stats->syncp);
+ mvneta_update_stats(pp, 1, len, true);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
@@ -2830,11 +3063,57 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
return rx_done;
}
+static int mvneta_create_page_pool(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, int size)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = cpu_to_node(0),
+ .dev = pp->dev->dev.parent,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = pp->rx_offset_correction,
+ .max_len = MVNETA_MAX_RX_BUF_SIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int num)
{
- int i;
+ int i, err;
+
+ err = mvneta_create_page_pool(pp, rxq, num);
+ if (err < 0)
+ return err;
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
@@ -2908,7 +3187,7 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
/* Set Offset */
mvneta_rxq_offset_set(pp, rxq, 0);
mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
- PAGE_SIZE :
+ MVNETA_MAX_RX_BUF_SIZE :
MVNETA_RX_BUF_SIZE(pp->pkt_size));
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
@@ -2989,9 +3268,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1;
- txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
- GFP_KERNEL);
- if (!txq->tx_skb) {
+ txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
+ if (!txq->buf) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3003,7 +3281,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
if (!txq->tso_hdrs) {
- kfree(txq->tx_skb);
+ kfree(txq->buf);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3056,7 +3334,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
- kfree(txq->tx_skb);
+ kfree(txq->buf);
if (txq->tso_hdrs)
dma_free_coherent(pp->dev->dev.parent,
@@ -3263,6 +3541,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
+ if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
+ return -EINVAL;
+ }
+
dev->mtu = mtu;
if (!netif_running(dev)) {
@@ -3411,8 +3694,8 @@ static void mvneta_validate(struct phylink_config *config,
phylink_helper_basex_speed(state);
}
-static int mvneta_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvneta_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
@@ -3438,8 +3721,6 @@ static int mvneta_mac_link_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_RX;
if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static void mvneta_mac_an_restart(struct phylink_config *config)
@@ -3632,7 +3913,7 @@ static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
static const struct phylink_mac_ops mvneta_phylink_ops = {
.validate = mvneta_validate,
- .mac_link_state = mvneta_mac_link_state,
+ .mac_pcs_get_state = mvneta_mac_pcs_get_state,
.mac_an_restart = mvneta_mac_an_restart,
.mac_config = mvneta_mac_config,
.mac_link_down = mvneta_mac_link_down,
@@ -3932,6 +4213,47 @@ static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phylink_mii_ioctl(pp->phylink, ifr, cmd);
}
+static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(dev);
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!pp->xdp_prog != !!prog;
+ if (running && need_update)
+ mvneta_stop(dev);
+
+ old_prog = xchg(&pp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (running && need_update)
+ return mvneta_open(dev);
+
+ return 0;
+}
+
+static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/* Ethtool methods */
/* Set link ksettings (phy address, speed) for ethtools */
@@ -4328,6 +4650,8 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_fix_features = mvneta_fix_features,
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
+ .ndo_bpf = mvneta_xdp,
+ .ndo_xdp_xmit = mvneta_xdp_xmit,
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
@@ -4477,9 +4801,9 @@ static int mvneta_probe(struct platform_device *pdev)
struct phy *comphy;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
+ phy_interface_t phy_mode;
const char *mac_from;
int tx_csum_limit;
- int phy_mode;
int err;
int cpu;
@@ -4492,10 +4816,9 @@ static int mvneta_probe(struct platform_device *pdev)
if (dev->irq == 0)
return -EINVAL;
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(dn, &phy_mode);
+ if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto err_free_irq;
}
@@ -4618,7 +4941,7 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
- pp->rx_offset_correction = 0; /* not relevant for SW BM */
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 111b3b8239e1..62dc2f362a16 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2863,7 +2863,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+/* Allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int pool)
{
@@ -2871,7 +2871,6 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
phys_addr_t phys_addr;
void *buf;
- /* No recycle or too many buffers are in use, so allocate a new skb */
buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
GFP_ATOMIC);
if (!buf)
@@ -2957,14 +2956,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
* by the hardware, and the information about the buffer is
* comprised by the RX descriptor.
*/
- if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
-err_drop_frame:
- dev->stats.rx_errors++;
- mvpp2_rx_error(port, rx_desc);
- /* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
- continue;
- }
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+ rx_bytes + MVPP2_MH_SIZE,
+ DMA_FROM_DEVICE);
+ prefetch(data);
if (bm_pool->frag_size > PAGE_SIZE)
frag_size = 0;
@@ -2983,8 +2981,9 @@ err_drop_frame:
goto err_drop_frame;
}
- dma_unmap_single(dev->dev.parent, dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE);
+ dma_unmap_single_attrs(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2995,6 +2994,13 @@ err_drop_frame:
mvpp2_rx_csum(port, rx_status, skb);
napi_gro_receive(napi, skb);
+ continue;
+
+err_drop_frame:
+ dev->stats.rx_errors++;
+ mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
if (rcvd_pkts) {
@@ -4817,8 +4823,8 @@ empty_set:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void mvpp22_xlg_link_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
+static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
{
u32 val;
@@ -4837,8 +4843,8 @@ static void mvpp22_xlg_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_RX;
}
-static void mvpp2_gmac_link_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
+static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
{
u32 val;
@@ -4871,8 +4877,8 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_TX;
}
-static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct mvpp2_port *port = container_of(config, struct mvpp2_port,
phylink_config);
@@ -4882,13 +4888,12 @@ static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
- mvpp22_xlg_link_state(port, state);
- return 1;
+ mvpp22_xlg_pcs_get_state(port, state);
+ return;
}
}
- mvpp2_gmac_link_state(port, state);
- return 1;
+ mvpp2_gmac_pcs_get_state(port, state);
}
static void mvpp2_mac_an_restart(struct phylink_config *config)
@@ -5180,7 +5185,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.validate = mvpp2_phylink_validate,
- .mac_link_state = mvpp2_phylink_mac_link_state,
+ .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
.mac_an_restart = mvpp2_mac_an_restart,
.mac_config = mvpp2_mac_config,
.mac_link_up = mvpp2_mac_link_up,
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 711ada7139d3..fb34fbd62088 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -16,3 +16,12 @@ config OCTEONTX2_AF
Unit's admin function manager which manages all RVU HW resources
and provides a medium to other PF/VFs to configure HW. Should be
enabled for other RVU device drivers to work.
+
+config NDC_DIS_DYNAMIC_CACHING
+ bool "Disable caching of dynamic entries in NDC"
+ depends on OCTEONTX2_AF
+ default n
+ ---help---
+ This config option disables caching of dynamic entries such as NIX SQEs
+ , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
+ NPA Aura/Pool contexts.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 06329acf9c2c..1b25948c662b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 6d55e3d0b7ea..5ca788691911 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -138,6 +138,16 @@ void *cgx_get_pdata(int cgx_id)
}
EXPORT_SYMBOL(cgx_get_pdata);
+int cgx_get_cgxid(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -EINVAL;
+
+ return cgx->cgx_id;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -281,6 +291,35 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_promisc_config);
+/* Enable or disable forwarding received pause frames to Tx block */
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
+
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
@@ -321,6 +360,27 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg, last;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ last = cfg;
+ if (enable)
+ cfg |= DATA_PKT_TX_EN;
+ else
+ cfg &= ~DATA_PKT_TX_EN;
+
+ if (cfg != last)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return !!(last & DATA_PKT_TX_EN);
+}
+EXPORT_SYMBOL(cgx_lmac_tx_enable);
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 206dc5dc1df8..9343bf39cfac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -56,6 +56,11 @@
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+#define CGXX_SMUX_RX_FRM_CTL 0x20020
+#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
+#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
@@ -63,6 +68,11 @@
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
+enum cgx_nix_stat_type {
+ NIX_STATS_RX,
+ NIX_STATS_TX,
+};
+
enum LMAC_TYPE {
LMAC_MODE_SGMII = 0,
LMAC_MODE_XAUI = 1,
@@ -96,6 +106,7 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
int cgx_get_cgxcnt_max(void);
+int cgx_get_cgxid(void *cgxd);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
@@ -104,9 +115,11 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index fb3ba4968a9b..473d9751601f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index e332e82fc066..784207bae5f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -196,4 +196,20 @@ enum nix_scheduler {
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+/* NDC info */
+enum ndc_idx_e {
+ NIX0_RX = 0x0,
+ NIX0_TX = 0x1,
+ NPA0_U = 0x2,
+};
+
+enum ndc_ctype_e {
+ CACHING = 0x0,
+ BYPASS = 0x1,
+};
+
+#define NDC_MAX_PORT 6
+#define NDC_READ_TRANS 0
+#define NDC_WRITE_TRANS 1
+
#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index d6f9ed8ea966..387e33fa417a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -19,17 +19,20 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
- tx_hdr = mdev->mbase + mbox->tx_start;
- rx_hdr = mdev->mbase + mbox->rx_start;
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
spin_lock(&mdev->mbox_lock);
mdev->msg_size = 0;
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
+ tx_hdr->msg_size = 0;
rx_hdr->num_msgs = 0;
+ rx_hdr->msg_size = 0;
spin_unlock(&mdev->mbox_lock);
}
EXPORT_SYMBOL(otx2_mbox_reset);
@@ -133,16 +136,17 @@ EXPORT_SYMBOL(otx2_mbox_init);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- int timeout = 0, sleep = 1;
+ struct device *sender = &mbox->pdev->dev;
- while (mdev->num_msgs != mdev->msgs_acked) {
- msleep(sleep);
- timeout += sleep;
- if (timeout >= MBOX_RSP_TIMEOUT)
- return -EIO;
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ usleep_range(800, 1000);
}
- return 0;
+ dev_dbg(sender, "timed out while waiting for rsp\n");
+ return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -162,13 +166,25 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
- tx_hdr = mdev->mbase + mbox->tx_start;
- rx_hdr = mdev->mbase + mbox->rx_start;
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+
+ /* If bounce buffer is implemented copy mbox messages from
+ * bounce buffer to hw mbox memory.
+ */
+ if (mdev->mbase != hw_mbase)
+ memcpy(hw_mbase + mbox->tx_start + msgs_offset,
+ mdev->mbase + mbox->tx_start + msgs_offset,
+ mdev->msg_size);
spin_lock(&mdev->mbox_lock);
+
+ tx_hdr->msg_size = mdev->msg_size;
+
/* Reset header for next messages */
mdev->msg_size = 0;
mdev->rsp_size = 0;
@@ -215,7 +231,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
/* Clear the whole msg region */
- memset(msghdr, 0, sizeof(*msghdr) + size);
+ memset(msghdr, 0, size);
/* Init message header with reset values */
msghdr->ver = OTX2_MBOX_VERSION;
mdev->msg_size += size;
@@ -236,8 +252,10 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
u16 msgs;
+ spin_lock(&mdev->mbox_lock);
+
if (mdev->num_msgs != mdev->msgs_acked)
- return ERR_PTR(-ENODEV);
+ goto error;
for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
struct mbox_msghdr *pmsg = mdev->mbase + imsg;
@@ -245,18 +263,55 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
if (msg == pmsg) {
if (pmsg->id != prsp->id)
- return ERR_PTR(-ENODEV);
+ goto error;
+ spin_unlock(&mdev->mbox_lock);
return prsp;
}
- imsg = pmsg->next_msgoff;
- irsp = prsp->next_msgoff;
+ imsg = mbox->tx_start + pmsg->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
}
+error:
+ spin_unlock(&mdev->mbox_lock);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL(otx2_mbox_get_rsp);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
+{
+ unsigned long ireq = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = -ENODEV;
+ u16 msgs;
+
+ spin_lock(&mdev->mbox_lock);
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ goto exit;
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *preq = mdev->mbase + ireq;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (preq->id != prsp->id)
+ goto exit;
+ if (prsp->rc) {
+ rc = prsp->rc;
+ goto exit;
+ }
+
+ ireq = mbox->tx_start + preq->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
+ }
+ rc = 0;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
+
int
otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 76a4575d18ff..a589748f1240 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -36,7 +36,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -75,6 +75,7 @@ struct otx2_mbox {
/* Header which preceeds all mbox messages */
struct mbox_hdr {
+ u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
};
@@ -103,6 +104,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp);
struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *msg);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid);
int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
u16 pcifunc, u16 id);
bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
@@ -125,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -300,6 +303,12 @@ struct msix_offset_rsp {
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
};
+struct get_hw_cap_rsp {
+ struct mbox_msghdr hdr;
+ u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ u8 nix_shaping; /* Is shaping and coloring supported */
+};
+
/* CGX mbox message formats */
struct cgx_stats_rsp {
@@ -352,6 +361,7 @@ struct npa_lf_alloc_req {
int node;
int aura_sz; /* No of auras */
u32 nr_pools; /* No of pools */
+ u64 way_mask;
};
struct npa_lf_alloc_rsp {
@@ -442,6 +452,7 @@ struct nix_lf_alloc_req {
u16 npa_func;
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+ u64 way_mask;
};
struct nix_lf_alloc_rsp {
@@ -512,6 +523,9 @@ struct nix_txsch_alloc_rsp {
/* Scheduler queue list allocated at each level */
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u8 aggr_level; /* Traffic aggregation scheduler level */
+ u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */
+ u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
};
struct nix_txsch_free_req {
@@ -578,6 +592,18 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
+#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6)
+#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7)
+#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8)
+#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9)
+#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10)
+#define NIX_FLOW_KEY_TYPE_GTPU BIT(11)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
+#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14)
+#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
+#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
+#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 8d6d90fdfb73..3803af9231c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -27,26 +27,45 @@ enum NPC_LID_E {
enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
+ NPC_LT_LA_IH_NIX_ETHER,
+ NPC_LT_LA_IH_8_ETHER,
+ NPC_LT_LA_IH_4_ETHER,
+ NPC_LT_LA_IH_2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM0 = 0xE,
+ NPC_LT_LA_CUSTOM1 = 0xF,
};
enum npc_kpu_lb_ltype {
NPC_LT_LB_ETAG = 1,
NPC_LT_LB_CTAG,
- NPC_LT_LB_STAG,
+ NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_QINQ,
NPC_LT_LB_ITAG,
+ NPC_LT_LB_DSA,
+ NPC_LT_LB_DSA_VLAN,
+ NPC_LT_LB_EDSA,
+ NPC_LT_LB_EDSA_VLAN,
+ NPC_LT_LB_EXDSA,
+ NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_CUSTOM0 = 0xE,
+ NPC_LT_LB_CUSTOM1 = 0xF,
};
enum npc_kpu_lc_ltype {
NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
NPC_LT_LC_ARP,
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_CUSTOM0 = 0xE,
+ NPC_LT_LC_CUSTOM1 = 0xF,
};
/* Don't modify Ltypes upto SCTP, otherwise it will
@@ -57,49 +76,67 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_UDP,
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
- NPC_LT_LD_IGMP,
NPC_LT_LD_ICMP6,
+ NPC_LT_LD_IGMP = 8,
NPC_LT_LD_ESP,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
- NPC_LT_LD_GRE_MPLS,
- NPC_LT_LD_GRE_NSH,
- NPC_LT_LD_TU_MPLS,
+ NPC_LT_LD_NVGRE,
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_CUSTOM0 = 0xE,
+ NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
- NPC_LT_LE_TU_ETHER = 1,
- NPC_LT_LE_TU_PPP,
- NPC_LT_LE_TU_MPLS_IN_NSH,
- NPC_LT_LE_TU_3RD_NSH,
+ NPC_LT_LE_VXLAN = 1,
+ NPC_LT_LE_GENEVE,
+ NPC_LT_LE_GTPU = 4,
+ NPC_LT_LE_VXLANGPE,
+ NPC_LT_LE_GTPC,
+ NPC_LT_LE_NSH,
+ NPC_LT_LE_TU_MPLS_IN_GRE,
+ NPC_LT_LE_TU_NSH_IN_GRE,
+ NPC_LT_LE_TU_MPLS_IN_UDP,
+ NPC_LT_LE_CUSTOM0 = 0xE,
+ NPC_LT_LE_CUSTOM1 = 0xF,
};
enum npc_kpu_lf_ltype {
- NPC_LT_LF_TU_IP = 1,
- NPC_LT_LF_TU_IP6,
- NPC_LT_LF_TU_ARP,
- NPC_LT_LF_TU_MPLS_IP,
- NPC_LT_LF_TU_MPLS_IP6,
- NPC_LT_LF_TU_MPLS_ETHER,
+ NPC_LT_LF_TU_ETHER = 1,
+ NPC_LT_LF_TU_PPP,
+ NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ NPC_LT_LF_TU_MPLS_IN_NSH,
+ NPC_LT_LF_TU_3RD_NSH,
+ NPC_LT_LF_CUSTOM0 = 0xE,
+ NPC_LT_LF_CUSTOM1 = 0xF,
};
enum npc_kpu_lg_ltype {
- NPC_LT_LG_TU_TCP = 1,
- NPC_LT_LG_TU_UDP,
- NPC_LT_LG_TU_SCTP,
- NPC_LT_LG_TU_ICMP,
- NPC_LT_LG_TU_IGMP,
- NPC_LT_LG_TU_ICMP6,
- NPC_LT_LG_TU_ESP,
- NPC_LT_LG_TU_AH,
+ NPC_LT_LG_TU_IP = 1,
+ NPC_LT_LG_TU_IP6,
+ NPC_LT_LG_TU_ARP,
+ NPC_LT_LG_TU_ETHER_IN_NSH,
+ NPC_LT_LG_CUSTOM0 = 0xE,
+ NPC_LT_LG_CUSTOM1 = 0xF,
};
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
enum npc_kpu_lh_ltype {
- NPC_LT_LH_TCP_DATA = 1,
- NPC_LT_LH_HTTP_DATA,
- NPC_LT_LH_HTTPS_DATA,
- NPC_LT_LH_PPTP_DATA,
- NPC_LT_LH_UDP_DATA,
+ NPC_LT_LH_TU_TCP = 1,
+ NPC_LT_LH_TU_UDP,
+ NPC_LT_LH_TU_ICMP,
+ NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_TU_IGMP = 8,
+ NPC_LT_LH_TU_ESP,
+ NPC_LT_LH_TU_AH,
+ NPC_LT_LH_CUSTOM0 = 0xE,
+ NPC_LT_LH_CUSTOM1 = 0xF,
};
struct npc_kpu_profile_cam {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index b2ce957605bb..aa2727e6211a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -11,6 +11,11 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
+#define NPC_KPU_PROFILE_VER 0x0000000100050000
+
+#define NPC_IH_W 0x8000
+#define NPC_IH_UTAG 0x2000
+
#define NPC_ETYPE_IP 0x0800
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
@@ -27,6 +32,7 @@
#define NPC_ETYPE_TRANS_ETH_BR 0x6558
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
+#define NPC_ETYPE_DSA 0xdada
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -44,13 +50,19 @@
#define NPC_IPNH_NONH 59
#define NPC_IPNH_DEST 60
#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MOBILITY 135
#define NPC_IPNH_MPLS 137
+#define NPC_IPNH_HOSTID 139
+#define NPC_IPNH_SHIM6 140
+#define NPC_UDP_PORT_PTP_E 319
+#define NPC_UDP_PORT_PTP_G 320
#define NPC_UDP_PORT_GTPC 2123
#define NPC_UDP_PORT_GTPU 2152
#define NPC_UDP_PORT_VXLAN 4789
#define NPC_UDP_PORT_VXLANGPE 4790
#define NPC_UDP_PORT_GENEVE 6081
+#define NPC_UDP_PORT_MPLS 6635
#define NPC_VXLANGPE_NP_IP 0x1
#define NPC_VXLANGPE_NP_IP6 0x2
@@ -72,11 +84,17 @@
#define NPC_MPLS_S 0x0100
+#define NPC_IP_TTL_MASK 0xff00
#define NPC_IP_VER_4 0x4000
#define NPC_IP_VER_6 0x6000
#define NPC_IP_VER_MASK 0xf000
#define NPC_IP_HDR_LEN_5 0x0500
#define NPC_IP_HDR_LEN_MASK 0x0f00
+#define NPC_IP_HDR_MF 0x2000
+#define NPC_IP_HDR_FRAGOFF 0x1fff
+
+#define NPC_IP6_HOP_MASK 0x00ff
+#define NPC_IP6_FRAG_FRAGOFF 0xfff8
#define NPC_GRE_F_CSUM (0x1 << 15)
#define NPC_GRE_F_ROUTE (0x1 << 14)
@@ -108,22 +126,44 @@
#define NPC_GTP_MT_G_PDU 0xff
#define NPC_GTP_MT_MASK 0xff
+#define NPC_TCP_FLAGS_FIN 0x0001
+#define NPC_TCP_FLAGS_SYN 0x0002
+#define NPC_TCP_FLAGS_RST 0x0004
+#define NPC_TCP_FLAGS_PSH 0x0008
+#define NPC_TCP_FLAGS_ACK 0x0010
+#define NPC_TCP_FLAGS_URG 0x0020
+#define NPC_TCP_FLAGS_MASK 0x003f
+
#define NPC_TCP_DATA_OFFSET_5 0x5000
#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+#define NPC_DSA_EXTEND 0x1000
+#define NPC_DSA_EDSA 0x8000
+
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
- NPC_S_KPU1_PKI,
+ NPC_S_KPU1_IH_NIX,
+ NPC_S_KPU1_IH,
+ NPC_S_KPU1_EXDSA,
+ NPC_S_KPU1_HIGIG2,
+ NPC_S_KPU1_IH_NIX_HIGIG2,
NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_ITAG,
+ NPC_S_KPU2_PREHEADER,
+ NPC_S_KPU2_EXDSA,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
NPC_S_KPU3_ITAG,
+ NPC_S_KPU3_CTAG_C,
+ NPC_S_KPU3_STAG_C,
+ NPC_S_KPU3_QINQ_C,
+ NPC_S_KPU3_DSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU5_IP,
@@ -136,7 +176,12 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU6_IP6_HOP_DEST,
+ NPC_S_KPU6_IP6_ROUT,
+ NPC_S_KPU6_IP6_FRAG,
NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU7_IP6_ROUT,
+ NPC_S_KPU7_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -146,16 +191,26 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_GRE,
NPC_S_KPU8_ESP,
NPC_S_KPU8_AH,
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN,
- NPC_S_KPU9_TU_MPLS,
- NPC_S_KPU9_TU_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE,
+ NPC_S_KPU9_TU_MPLS_IN_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_IP,
+ NPC_S_KPU9_TU_MPLS_IN_UDP,
+ NPC_S_KPU9_TU_NSH_IN_GRE,
+ NPC_S_KPU9_VXLAN,
+ NPC_S_KPU9_VXLANGPE,
+ NPC_S_KPU9_GENEVE,
+ NPC_S_KPU9_GTPC,
+ NPC_S_KPU9_GTPU,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
- NPC_S_KPU10_TU_NSH,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE,
NPC_S_KPU11_TU_ETHER,
NPC_S_KPU11_TU_PPP,
NPC_S_KPU11_TU_MPLS_IN_NSH,
- NPC_S_KPU11_TU_3RD_NSH,
+ NPC_S_KPU11_TU_MPLS_PL,
+ NPC_S_KPU11_TU_MPLS,
+ NPC_S_KPU11_TU_ETHER_IN_NSH,
NPC_S_KPU12_TU_IP,
NPC_S_KPU12_TU_IP6,
NPC_S_KPU12_TU_ARP,
@@ -174,135 +229,172 @@ enum npc_kpu_parser_state {
NPC_S_KPU16_PPTP_DATA,
NPC_S_KPU16_TCP_DATA,
NPC_S_KPU16_UDP_DATA,
+ NPC_S_KPU16_UDP_PTP,
NPC_S_LAST /* has to be the last item */
};
-enum npc_kpu_parser_flag {
- NPC_F_NA = 0,
- NPC_F_PKI,
- NPC_F_PKI_VLAN,
- NPC_F_PKI_ETAG,
- NPC_F_PKI_ITAG,
- NPC_F_PKI_MPLS,
- NPC_F_PKI_NSH,
- NPC_F_ETYPE_UNK,
- NPC_F_ETHER_VLAN,
- NPC_F_ETHER_ETAG,
- NPC_F_ETHER_ITAG,
- NPC_F_ETHER_MPLS,
- NPC_F_ETHER_NSH,
- NPC_F_STAG_CTAG,
- NPC_F_STAG_CTAG_UNK,
- NPC_F_STAG_STAG_CTAG,
- NPC_F_STAG_STAG_STAG,
- NPC_F_QINQ_CTAG,
- NPC_F_QINQ_CTAG_UNK,
- NPC_F_QINQ_QINQ_CTAG,
- NPC_F_QINQ_QINQ_QINQ,
- NPC_F_BTAG_ITAG,
- NPC_F_BTAG_ITAG_STAG,
- NPC_F_BTAG_ITAG_CTAG,
- NPC_F_BTAG_ITAG_UNK,
- NPC_F_ETAG_CTAG,
- NPC_F_ETAG_BTAG_ITAG,
- NPC_F_ETAG_STAG,
- NPC_F_ETAG_QINQ,
- NPC_F_ETAG_ITAG,
- NPC_F_ETAG_ITAG_STAG,
- NPC_F_ETAG_ITAG_CTAG,
- NPC_F_ETAG_ITAG_UNK,
- NPC_F_ITAG_STAG_CTAG,
- NPC_F_ITAG_STAG,
- NPC_F_ITAG_CTAG,
- NPC_F_MPLS_4_LABELS,
- NPC_F_MPLS_3_LABELS,
- NPC_F_MPLS_2_LABELS,
- NPC_F_IP_HAS_OPTIONS,
- NPC_F_IP_IP_IN_IP,
- NPC_F_IP_6TO4,
- NPC_F_IP_MPLS_IN_IP,
- NPC_F_IP_UNK_PROTO,
- NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
- NPC_F_IP_6TO4_HAS_OPTIONS,
- NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
- NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
- NPC_F_IP6_HAS_EXT,
- NPC_F_IP6_TUN_IP6,
- NPC_F_IP6_MPLS_IN_IP,
- NPC_F_TCP_HAS_OPTIONS,
- NPC_F_TCP_HTTP,
- NPC_F_TCP_HTTPS,
- NPC_F_TCP_PPTP,
- NPC_F_TCP_UNK_PORT,
- NPC_F_TCP_HTTP_HAS_OPTIONS,
- NPC_F_TCP_HTTPS_HAS_OPTIONS,
- NPC_F_TCP_PPTP_HAS_OPTIONS,
- NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
- NPC_F_UDP_VXLAN,
- NPC_F_UDP_VXLAN_NOVNI,
- NPC_F_UDP_VXLAN_NOVNI_NSH,
- NPC_F_UDP_VXLANGPE,
- NPC_F_UDP_VXLANGPE_NSH,
- NPC_F_UDP_VXLANGPE_MPLS,
- NPC_F_UDP_VXLANGPE_NOVNI,
- NPC_F_UDP_VXLANGPE_NOVNI_NSH,
- NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
- NPC_F_UDP_VXLANGPE_UNK,
- NPC_F_UDP_VXLANGPE_NONP,
- NPC_F_UDP_GTP_GTPC,
- NPC_F_UDP_GTP_GTPU_G_PDU,
- NPC_F_UDP_GTP_GTPU_UNK,
- NPC_F_UDP_UNK_PORT,
- NPC_F_UDP_GENEVE,
- NPC_F_UDP_GENEVE_OAM,
- NPC_F_UDP_GENEVE_CRI_OPT,
- NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- NPC_F_GRE_NVGRE,
- NPC_F_GRE_HAS_SRE,
- NPC_F_GRE_HAS_CSUM,
- NPC_F_GRE_HAS_KEY,
- NPC_F_GRE_HAS_SEQ,
- NPC_F_GRE_HAS_CSUM_KEY,
- NPC_F_GRE_HAS_CSUM_SEQ,
- NPC_F_GRE_HAS_KEY_SEQ,
- NPC_F_GRE_HAS_CSUM_KEY_SEQ,
- NPC_F_GRE_HAS_ROUTE,
- NPC_F_GRE_UNK_PROTO,
- NPC_F_GRE_VER1,
- NPC_F_GRE_VER1_HAS_SEQ,
- NPC_F_GRE_VER1_HAS_ACK,
- NPC_F_GRE_VER1_HAS_SEQ_ACK,
- NPC_F_GRE_VER1_UNK_PROTO,
- NPC_F_TU_ETHER_UNK,
- NPC_F_TU_ETHER_CTAG,
- NPC_F_TU_ETHER_CTAG_UNK,
- NPC_F_TU_ETHER_STAG_CTAG,
- NPC_F_TU_ETHER_STAG_CTAG_UNK,
- NPC_F_TU_ETHER_STAG,
- NPC_F_TU_ETHER_STAG_UNK,
- NPC_F_TU_ETHER_QINQ_CTAG,
- NPC_F_TU_ETHER_QINQ_CTAG_UNK,
- NPC_F_TU_ETHER_QINQ,
- NPC_F_TU_ETHER_QINQ_UNK,
- NPC_F_LAST /* has to be the last item */
+enum npc_kpu_la_uflag {
+ NPC_F_LA_U_HAS_TAG = 0x10,
+ NPC_F_LA_U_HAS_IH_NIX = 0x20,
+ NPC_F_LA_U_HAS_HIGIG2 = 0x40,
+};
+enum npc_kpu_la_lflag {
+ NPC_F_LA_L_UNK_ETYPE = 1,
+ NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_L_WITH_ETAG,
+ NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_L_WITH_MPLS,
+ NPC_F_LA_L_WITH_NSH,
+};
+
+enum npc_kpu_lb_uflag {
+ NPC_F_LB_U_UNK_ETYPE = 0x80,
+ NPC_F_LB_U_MORE_TAG = 0x40,
+};
+enum npc_kpu_lb_lflag {
+ NPC_F_LB_L_WITH_CTAG = 1,
+ NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_L_WITH_QINQ_CTAG,
+ NPC_F_LB_L_WITH_QINQ_QINQ,
+ NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_L_DSA,
+ NPC_F_LB_L_DSA_VLAN,
+ NPC_F_LB_L_EDSA,
+ NPC_F_LB_L_EDSA_VLAN,
+ NPC_F_LB_L_EXDSA,
+ NPC_F_LB_L_EXDSA_VLAN,
+};
+
+enum npc_kpu_lc_uflag {
+ NPC_F_LC_U_UNK_PROTO = 0x10,
+ NPC_F_LC_U_IP_FRAG = 0x20,
+ NPC_F_LC_U_IP6_FRAG = 0x40,
+};
+enum npc_kpu_lc_lflag {
+ NPC_F_LC_L_IP_IN_IP = 1,
+ NPC_F_LC_L_6TO4,
+ NPC_F_LC_L_MPLS_IN_IP,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ NPC_F_LC_L_EXT_HOP,
+ NPC_F_LC_L_EXT_DEST,
+ NPC_F_LC_L_EXT_ROUT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ NPC_F_LC_L_EXT_HOSTID,
+ NPC_F_LC_L_EXT_SHIM6,
+};
+
+enum npc_kpu_ld_lflag {
+ NPC_F_LD_L_TCP_UNK_PORT = 1,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_LD_L_UDP_UNK_PORT,
+ NPC_F_LD_L_GRE_NVGRE,
+ NPC_F_LD_L_GRE_HAS_SRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ NPC_F_LD_L_GRE_VER1,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ NPC_F_LD_L_MPLS_2_LABELS,
+};
+
+enum npc_kpu_le_lflag {
+ NPC_F_LE_L_VXLAN_NOVNI,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ NPC_F_LE_L_GENEVE_OAM,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ NPC_F_LE_L_GTPU_G_PDU,
+ NPC_F_LE_L_GTPU_UNK,
+};
+
+enum npc_kpu_lf_uflag {
+ NPC_F_LF_U_UNK_ETYPE = 0x10,
+ NPC_F_LF_U_HAS_TAG = 0x20,
+};
+
+enum npc_kpu_lf_lflag {
+ NPC_F_LF_L_WITH_CTAG = 1,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ NPC_F_LF_L_WITH_STAG,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ NPC_F_LF_L_WITH_QINQ,
+};
+
+enum npc_kpu_lg_uflag {
+ NPC_F_LG_U_UNK_IP_PROTO = 0x10,
+ NPC_F_LG_U_IP_HAS_OPTIONS = 0x20,
+ NPC_F_LG_U_IP6_HAS_EXT = 0x40,
+};
+
+enum npc_kpu_lh_uflag {
+ NPC_F_LH_U_TCP_HAS_OPTIONS = 0x80,
+};
+
+enum npc_kpu_lh_lflag {
+ NPC_F_LH_L_TCP_HTTP = 1,
+ NPC_F_LH_L_TCP_HTTPS,
+ NPC_F_LH_L_TCP_PPTP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ NPC_F_LH_L_UDP_UNK_PORT,
};
enum npc_kpu_err_code {
NPC_EC_NOERR = 0, /* has to be zero */
NPC_EC_UNK,
+ NPC_EC_IH_LENGTH,
+ NPC_EC_EDSA_UNK,
NPC_EC_L2_K1,
NPC_EC_L2_K2,
NPC_EC_L2_K3,
NPC_EC_L2_K3_ETYPE_UNK,
- NPC_EC_L2_MPLS_2MANY,
NPC_EC_L2_K4,
+ NPC_EC_MPLS_2MANY,
+ NPC_EC_MPLS_UNK,
+ NPC_EC_NSH_UNK,
+ NPC_EC_IP_TTL_0,
+ NPC_EC_IP_FRAG_OFFSET_1,
NPC_EC_IP_VER,
+ NPC_EC_IP6_HOP_0,
NPC_EC_IP6_VER,
+ NPC_EC_TCP_FLAGS_FIN_ONLY,
+ NPC_EC_TCP_FLAGS_ZERO,
+ NPC_EC_TCP_FLAGS_RST_FIN,
+ NPC_EC_TCP_FLAGS_URG_SYN,
+ NPC_EC_TCP_FLAGS_RST_SYN,
+ NPC_EC_TCP_FLAGS_SYN_FIN,
NPC_EC_VXLAN,
NPC_EC_NVGRE,
NPC_EC_GRE,
NPC_EC_GRE_VER1,
NPC_EC_L4,
+ NPC_EC_OIP4_CSUM,
+ NPC_EC_IIP4_CSUM,
NPC_EC_LAST /* has to be the last item */
};
@@ -328,5282 +420,12598 @@ enum NPC_ERRLEV_E {
static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 28, 32, 36, 0, 0,
+ NPC_S_KPU1_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_EXDSA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 20, 24, 28, 0, 0,
+ NPC_S_KPU1_IH_NIX, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
};
static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0010, 0x0010, 0x0000, 0xffff,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0010, 0x0010, 0x0000, 0xffff,
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_DSA,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0xfc00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0400,
+ 0xfe00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ NPC_IH_W,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_EXTEND,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
{
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
- NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU4_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_MPLS,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
};
-static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU6_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU7_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLAN,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLANGPE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GENEVE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPC,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_E,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_G,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_MPLS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_GRE_F_ROUTE,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x2001,
+ 0xef7f,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0001,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLAN_I,
+ NPC_VXLAN_I,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_S_KPU9_GTPC, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff,
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU13_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU14_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU16_TCP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 1, 0,
+ NPC_S_KPU3_DSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 8, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_8_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 4, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_4_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 2, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_2_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 0, 0,
+ NPC_S_KPU2_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_L2_K1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 1,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
- NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 16, 20, 24, 0, 0,
+ NPC_S_KPU3_ITAG, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 20, 0, 0,
+ NPC_S_KPU3_STAG_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 16, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA_VLAN,
+ NPC_F_LB_L_EDSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA_VLAN,
+ NPC_F_LB_L_EXDSA_VLAN,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
- NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
- NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
- {
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
- },
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- 0x0000, 0xffff, 0x0000, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0000, 0x4fff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0003, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x2001, 0xef7f, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0001, 0x0003, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
- },
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 7, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 7, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 6, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 4, 0,
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
- NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
- NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
},
-};
-
-static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
- NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU5_IP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU5_IP6, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_ARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_RARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_PTP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_FCOE, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_CTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
- 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_QINQ, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
- 0, 0, NPC_S_KPU2_ETAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU2_ITAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU4_NSH, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU5_IP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU5_IP6, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_ARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_RARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_PTP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_FCOE, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_CTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
- 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_QINQ, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
- 0, 0, NPC_S_KPU2_ETAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU2_ITAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LB, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_NSH, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu2_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_STAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_STAG, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_QINQ, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU3_ITAG, 12, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_STAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_QINQ, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_STAG, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLANGPE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GENEVE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPC, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPU, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ESP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_NVGRE,
+ NPC_F_LD_L_GRE_NVGRE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_NVGRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU5_MPLS, 12, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 7, 0, NPC_S_KPU12_TU_IP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 7, 0, NPC_S_KPU12_TU_IP6, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU5_NSH, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_GRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu5_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_IGMP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_ESP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_AH, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_IGMP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_ESP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_AH, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
- 0, 0xf, 0, 2,
+ NPC_ERRLEV_LE, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ NPC_F_LE_L_VXLAN_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_VXLAN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP6, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ESP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_AH, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPC,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ NPC_F_LE_L_GTPU_G_PDU,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ NPC_F_LE_L_GTPU_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu6_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu7_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu8_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 4, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 8, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_NSH_UNK,
+ 6, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_PPP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ETHER_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu9_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU10_TU_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
-};
-
-static struct npc_kpu_profile_action kpu10_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS | NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ARP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ NPC_F_LG_U_IP6_HAS_EXT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu11_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER,
- NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER,
- NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LE, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu12_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP,
- NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 40, 1,
- NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 40, 1,
- NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LF, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu13_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu14_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
-};
-
-static struct npc_kpu_profile_action kpu15_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_PPTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTP,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTPS,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_PPTP,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_UNK_PORT,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
- NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_UDP,
+ NPC_F_LH_L_UDP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_SCTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_IGMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ESP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_AH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LG, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_L4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
static struct npc_kpu_profile_action kpu16_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e581091c09c4..5c190c3ce898 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -56,12 +56,34 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static void rvu_setup_hw_capabilities(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
+ hw->cap.nix_fixed_txschq_mapping = false;
+ hw->cap.nix_shaping = true;
+ hw->cap.nix_tx_link_bp = true;
+ hw->cap.nix_rx_multicast = true;
+
+ if (is_rvu_96xx_B0(rvu)) {
+ hw->cap.nix_fixed_txschq_mapping = true;
+ hw->cap.nix_txsch_per_cgx_lmac = 4;
+ hw->cap.nix_txsch_per_lbk_lmac = 132;
+ hw->cap.nix_txsch_per_sdp_lmac = 76;
+ hw->cap.nix_shaping = false;
+ hw->cap.nix_tx_link_bp = false;
+ if (is_rvu_96xx_A0(rvu))
+ hw->cap.nix_rx_multicast = false;
+ }
+}
+
/* Poll a RVU block's register 'offset', for a 'zero'
* or 'nonzero' at bits specified by 'mask'
*/
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{
- unsigned long timeout = jiffies + usecs_to_jiffies(100);
+ unsigned long timeout = jiffies + usecs_to_jiffies(10000);
void __iomem *reg;
u64 reg_val;
@@ -73,7 +95,6 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
if (!zero && (reg_val & mask))
return 0;
usleep_range(1, 5);
- timeout--;
}
return -EBUSY;
}
@@ -433,9 +454,9 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
}
static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
@@ -877,8 +898,8 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
return 0;
}
-static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
- struct ready_msg_rsp *rsp)
+int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
{
return 0;
}
@@ -1023,9 +1044,9 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
return 0;
}
-static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
- struct rsrc_detach *detach,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_detach_resources(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
{
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
@@ -1171,9 +1192,9 @@ fail:
return -ENOSPC;
}
-static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
- struct rsrc_attach *attach,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_attach_resources(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
{
u16 pcifunc = attach->hdr.pcifunc;
int err;
@@ -1294,8 +1315,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
-static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
- struct msix_offset_rsp *rsp)
+int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -1343,8 +1364,8 @@ static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
return 0;
}
-static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
u16 vf, numvfs;
@@ -1363,6 +1384,17 @@ static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
+ struct get_hw_cap_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
+ rsp->nix_shaping = hw->cap.nix_shaping;
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -1440,12 +1472,12 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
/* Process received mbox messages */
req_hdr = mdev->mbase + mbox->rx_start;
- if (req_hdr->num_msgs == 0)
+ if (mw->mbox_wrk[devid].num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < req_hdr->num_msgs; id++) {
+ for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
@@ -1471,13 +1503,14 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
- msg->id, devid,
+ msg->id, rvu_get_pf(msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
+ mw->mbox_wrk[devid].num_msgs = 0;
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
@@ -1523,14 +1556,14 @@ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
mdev = &mbox->dev[devid];
rsp_hdr = mdev->mbase + mbox->rx_start;
- if (rsp_hdr->num_msgs == 0) {
+ if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
return;
}
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < rsp_hdr->num_msgs; id++) {
+ for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
@@ -1560,6 +1593,7 @@ end:
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
+ mw->mbox_wrk_up[devid].up_num_msgs = 0;
otx2_mbox_reset(mbox, devid);
}
@@ -1697,14 +1731,28 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ /*The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+
+ if (hdr->num_msgs) {
+ mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ }
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
+ if (hdr->num_msgs) {
+ mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
}
}
@@ -2316,18 +2364,6 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (vfs > chans)
vfs = chans;
- /* AF's VFs work in pairs and talk over consecutive loopback channels.
- * Thus we want to enable maximum even number of VFs. In case
- * odd number of VFs are available then the last VF on the list
- * remains disabled.
- */
- if (vfs & 0x1) {
- dev_warn(&pdev->dev,
- "Number of VFs should be even. Enabling %d out of %d.\n",
- vfs - 1, vfs);
- vfs--;
- }
-
if (!vfs)
return 0;
@@ -2432,6 +2468,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu_reset_all_blocks(rvu);
+ rvu_setup_hw_capabilities(rvu);
+
err = rvu_setup_hw_resources(rvu);
if (err)
goto err_release_regions;
@@ -2456,6 +2494,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_irq;
+ /* Initialize debugfs */
+ rvu_dbg_init(rvu);
+
return 0;
err_irq:
rvu_unregister_interrupts(rvu);
@@ -2482,6 +2523,7 @@ static void rvu_remove(struct pci_dev *pdev)
{
struct rvu *rvu = pci_get_drvdata(pdev);
+ rvu_dbg_exit(rvu);
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c9d60b0554c0..51c206f4fe6f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -35,9 +35,36 @@
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
+#ifdef CONFIG_DEBUG_FS
+struct dump_ctx {
+ int lf;
+ int id;
+ bool all;
+};
+
+struct rvu_debugfs {
+ struct dentry *root;
+ struct dentry *cgx_root;
+ struct dentry *cgx;
+ struct dentry *lmac;
+ struct dentry *npa;
+ struct dentry *nix;
+ struct dentry *npc;
+ struct dump_ctx npa_aura_ctx;
+ struct dump_ctx npa_pool_ctx;
+ struct dump_ctx nix_cq_ctx;
+ struct dump_ctx nix_rq_ctx;
+ struct dump_ctx nix_sq_ctx;
+ int npa_qsize_id;
+ int nix_qsize_id;
+};
+#endif
+
struct rvu_work {
struct work_struct work;
struct rvu *rvu;
+ int num_msgs;
+ int up_num_msgs;
};
struct rsrc_bmap {
@@ -99,6 +126,7 @@ struct npc_mcam {
u16 lprio_start;
u16 hprio_count;
u16 hprio_end;
+ u16 rx_miss_act_cntr; /* Counter for RX MISS action */
};
/* Structure for per RVU func info ie PF/VF */
@@ -151,15 +179,20 @@ struct rvu_pfvf {
struct mcam_entry entry;
int rxvlan_index;
bool rxvlan;
+
+ bool cgx_in_use; /* this PF/VF using CGX? */
+ int cgx_users; /* number of cgx users - used only by PFs */
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
-#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
+#define NIX_TXSCHQ_FREE BIT_ULL(1)
+#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0)
#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
+#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16))
u32 *pfvf_map;
};
@@ -193,6 +226,21 @@ struct nix_hw {
struct nix_lso lso;
};
+/* RVU block's capabilities or functionality,
+ * which vary by silicon version/skew.
+ */
+struct hw_cap {
+ /* Transmit side supported functionality */
+ u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
+ u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
+ u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
+ u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
+ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
+ bool nix_rx_multicast; /* Rx packet replication support */
+};
+
struct rvu_hwinfo {
u8 total_pfs; /* MAX RVU PFs HW supports */
u16 total_vfs; /* Max RVU VFs HW supports */
@@ -204,7 +252,7 @@ struct rvu_hwinfo {
u8 sdp_links;
u8 npc_kpus; /* No of parser units */
-
+ struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
struct nix_hw *nix0;
struct npc_pkind pkind;
@@ -261,8 +309,13 @@ struct rvu {
struct workqueue_struct *cgx_evh_wq;
spinlock_t cgx_evq_lock; /* cgx event queue lock */
struct list_head cgx_evq_head; /* cgx event queue head */
+ struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+
+#ifdef CONFIG_DEBUG_FS
+ struct rvu_debugfs rvu_dbg;
+#endif
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -285,7 +338,8 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
-static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+/* Silicon revisions */
+static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
@@ -293,6 +347,14 @@ static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
}
+static inline bool is_rvu_96xx_B0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
/* Function Prototypes
* RVU
*/
@@ -342,52 +404,25 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+#define M(_name, _id, fn_name, req, rsp) \
+int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
+MBOX_MESSAGES
+#undef M
+
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
-int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
- struct cgx_stats_rsp *rsp);
-int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
- struct cgx_link_info_msg *rsp);
-int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
+ int rxtxflag, u64 *stat);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
-int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
- struct npa_aq_enq_req *req,
- struct npa_aq_enq_rsp *rsp);
-int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
- struct hwctx_disable_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
- struct npa_lf_alloc_req *req,
- struct npa_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
/* NIX APIs */
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
@@ -397,55 +432,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
-int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
- struct nix_lf_alloc_req *req,
- struct nix_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
- struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp);
-int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
- struct hwctx_disable_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
- struct nix_txsch_alloc_req *req,
- struct nix_txsch_alloc_rsp *rsp);
-int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
- struct nix_txsch_free_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
- struct nix_txschq_config *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
- struct nix_vtag_config *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
- struct nix_rss_flowkey_cfg *req,
- struct nix_rss_flowkey_cfg_rsp *rsp);
-int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
- struct nix_set_mac_addr *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
- struct nix_mark_format_cfg *req,
- struct nix_mark_format_cfg_rsp *rsp);
-int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
- struct nix_lso_format_cfg *req,
- struct nix_lso_format_cfg_rsp *rsp);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -460,45 +447,25 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
+void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc);
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
-int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
- struct npc_mcam_alloc_entry_req *req,
- struct npc_mcam_alloc_entry_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
- struct npc_mcam_free_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
- struct npc_mcam_write_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
- struct npc_mcam_ena_dis_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
- struct npc_mcam_ena_dis_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
- struct npc_mcam_shift_entry_req *req,
- struct npc_mcam_shift_entry_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
- struct npc_mcam_alloc_counter_req *req,
- struct npc_mcam_alloc_counter_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
- struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req,
- struct npc_mcam_oper_counter_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
- struct npc_mcam_alloc_and_write_entry_req *req,
- struct npc_mcam_alloc_and_write_entry_rsp *rsp);
-int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
- struct npc_get_kex_cfg_rsp *rsp);
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+
+#ifdef CONFIG_DEBUG_FS
+void rvu_dbg_init(struct rvu *rvu);
+void rvu_dbg_exit(struct rvu *rvu);
+#else
+static inline void rvu_dbg_init(struct rvu *rvu) {}
+static inline void rvu_dbg_exit(struct rvu *rvu) {}
+#endif
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 7d7133c5f799..11e5921c55b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "rvu.h"
#include "cgx.h"
+#include "rvu_reg.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -40,12 +41,25 @@ MBOX_UP_CGX_MESSAGES
#undef M
/* Returns bitmap of mapped PFs */
-static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+ unsigned long pfmap;
+
+ pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
+
+ /* Assumes only one pf mapped to a cgx lmac port */
+ if (!pfmap)
+ return -ENODEV;
+ else
+ return find_first_bit(&pfmap, 16);
+}
+
+static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
{
return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
}
@@ -294,6 +308,8 @@ int rvu_cgx_init(struct rvu *rvu)
if (err)
return err;
+ mutex_init(&rvu->cgx_cfg_lock);
+
/* Ensure event handler registration is completed, before
* we turn on the links
*/
@@ -334,6 +350,24 @@ int rvu_cgx_exit(struct rvu *rvu)
return 0;
}
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
+ if (enable)
+ cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ else
+ cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+}
+
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
@@ -562,3 +596,95 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
return 0;
}
+
+/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
+ * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
+ */
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
+ int index, int rxtxflag, u64 *stat)
+{
+ struct rvu_block *block;
+ int blkaddr;
+ u16 pcifunc;
+ int pf, lf;
+
+ *stat = 0;
+
+ if (!cgxd || !rvu)
+ return -EINVAL;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ if (pf < 0)
+ return pf;
+
+ /* Assumes LF of a PF and all of its VF belongs to the same
+ * NIX block
+ */
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ /* Check if a lf is attached to this PF or one of its VFs */
+ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ if (rxtxflag == NIX_STATS_RX)
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_STATX(lf, index));
+ else
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_TX_STATX(lf, index));
+ }
+
+ return 0;
+}
+
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ struct rvu_pfvf *parent_pf, *pfvf;
+ int cgx_users, err = 0;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return 0;
+
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ mutex_lock(&rvu->cgx_cfg_lock);
+
+ if (start && pfvf->cgx_in_use)
+ goto exit; /* CGX is already started hence nothing to do */
+ if (!start && !pfvf->cgx_in_use)
+ goto exit; /* CGX is already stopped hence nothing to do */
+
+ if (start) {
+ cgx_users = parent_pf->cgx_users;
+ parent_pf->cgx_users++;
+ } else {
+ parent_pf->cgx_users--;
+ cgx_users = parent_pf->cgx_users;
+ }
+
+ /* Start CGX when first of all NIXLFs is started.
+ * Stop CGX when last of all NIXLFs is stopped.
+ */
+ if (!cgx_users) {
+ err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ start);
+ if (err) {
+ dev_err(rvu->dev, "Unable to %s CGX\n",
+ start ? "start" : "stop");
+ /* Revert the usage count in case of error */
+ parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
+ : parent_pf->cgx_users + 1;
+ goto exit;
+ }
+ }
+ pfvf->cgx_in_use = start;
+exit:
+ mutex_unlock(&rvu->cgx_cfg_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
new file mode 100644
index 000000000000..77adad4adb1b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -0,0 +1,1711 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "npc.h"
+
+#define DEBUGFS_DIR_NAME "octeontx2"
+
+enum {
+ CGX_STAT0,
+ CGX_STAT1,
+ CGX_STAT2,
+ CGX_STAT3,
+ CGX_STAT4,
+ CGX_STAT5,
+ CGX_STAT6,
+ CGX_STAT7,
+ CGX_STAT8,
+ CGX_STAT9,
+ CGX_STAT10,
+ CGX_STAT11,
+ CGX_STAT12,
+ CGX_STAT13,
+ CGX_STAT14,
+ CGX_STAT15,
+ CGX_STAT16,
+ CGX_STAT17,
+ CGX_STAT18,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+static char *cgx_rx_stats_fields[] = {
+ [CGX_STAT0] = "Received packets",
+ [CGX_STAT1] = "Octets of received packets",
+ [CGX_STAT2] = "Received PAUSE packets",
+ [CGX_STAT3] = "Received PAUSE and control packets",
+ [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
+ [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
+ [CGX_STAT6] = "Packets dropped due to RX FIFO full",
+ [CGX_STAT7] = "Octets dropped due to RX FIFO full",
+ [CGX_STAT8] = "Error packets",
+ [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
+ [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
+ [CGX_STAT11] = "NCSI-bound packets dropped",
+ [CGX_STAT12] = "NCSI-bound octets dropped",
+};
+
+static char *cgx_tx_stats_fields[] = {
+ [CGX_STAT0] = "Packets dropped due to excessive collisions",
+ [CGX_STAT1] = "Packets dropped due to excessive deferral",
+ [CGX_STAT2] = "Multiple collisions before successful transmission",
+ [CGX_STAT3] = "Single collisions before successful transmission",
+ [CGX_STAT4] = "Total octets sent on the interface",
+ [CGX_STAT5] = "Total frames sent on the interface",
+ [CGX_STAT6] = "Packets sent with an octet count < 64",
+ [CGX_STAT7] = "Packets sent with an octet count == 64",
+ [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT9] = "Packets sent with an octet count of 128-255",
+ [CGX_STAT10] = "Packets sent with an octet count of 256-511",
+ [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
+ [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
+ [CGX_STAT13] = "Packets sent with an octet count of > 1518",
+ [CGX_STAT14] = "Packets sent to a broadcast DMAC",
+ [CGX_STAT15] = "Packets sent to the multicast DMAC",
+ [CGX_STAT16] = "Transmit underflow and were truncated",
+ [CGX_STAT17] = "Control/PAUSE packets sent",
+};
+
+#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
+ blk_addr, NDC_AF_CONST) & 0xFF)
+
+#define rvu_dbg_NULL NULL
+#define rvu_dbg_open_NULL NULL
+
+#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
+static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, rvu_dbg_##read_op, inode->i_private); \
+} \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = rvu_dbg_open_##name, \
+ .read = seq_read, \
+ .write = rvu_dbg_##write_op, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define RVU_DEBUG_FOPS(name, read_op, write_op) \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = rvu_dbg_##read_op, \
+ .write = rvu_dbg_##write_op \
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+
+/* Dumps current provisioning status of all RVU block LFs */
+static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int index, off = 0, flag = 0, go_back = 0, off_prev;
+ struct rvu *rvu = filp->private_data;
+ int lf, pf, vf, pcifunc;
+ struct rvu_block block;
+ int bytes_not_copied;
+ int buf_size = 2048;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
+ for (index = 0; index < BLK_COUNT; index++)
+ if (strlen(rvu->hw->block[index].name))
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%*s\t", (index - 1) * 2,
+ rvu->hw->block[index].name);
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ if (vf) {
+ go_back = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "PF%d:VF%d\t\t", pf,
+ vf - 1);
+ } else {
+ go_back = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "PF%d\t\t", pf);
+ }
+
+ off += go_back;
+ for (index = 0; index < BLKTYPE_MAX; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+ off_prev = off;
+ for (lf = 0; lf < block.lf.max; lf++) {
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+ flag = 1;
+ off += scnprintf(&buf[off], buf_size - 1
+ - off, "%3d,", lf);
+ }
+ if (flag && off_prev != off)
+ off--;
+ else
+ go_back++;
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\t");
+ }
+ if (!flag)
+ off -= go_back;
+ else
+ flag = 0;
+ off--;
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ }
+ }
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+ u16 *pcifunc)
+{
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ if (blkaddr < 0) {
+ dev_warn(rvu->dev, "Invalid blktype\n");
+ return false;
+ }
+
+ hw = rvu->hw;
+ block = &hw->block[blkaddr];
+
+ if (lf < 0 || lf >= block->lf.max) {
+ dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
+ block->lf.max - 1);
+ return false;
+ }
+
+ *pcifunc = block->fn_map[lf];
+ if (!*pcifunc) {
+ dev_warn(rvu->dev,
+ "This LF is not attached to any RVU PFFUNC\n");
+ return false;
+ }
+ return true;
+}
+
+static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (!pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
+ pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
+ }
+
+ if (!pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
+ pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
+ }
+ kfree(buf);
+}
+
+/* The 'qsize' entry dumps current Aura/Pool context Qsize
+ * and each context's current enable/disable status in a bitmap.
+ */
+static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+ int blktype)
+{
+ void (*print_qsize)(struct seq_file *filp,
+ struct rvu_pfvf *pfvf) = NULL;
+ struct rvu_pfvf *pfvf;
+ struct rvu *rvu;
+ int qsize_id;
+ u16 pcifunc;
+
+ rvu = filp->private;
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ qsize_id = rvu->rvu_dbg.npa_qsize_id;
+ print_qsize = print_npa_qsize;
+ break;
+
+ case BLKTYPE_NIX:
+ qsize_id = rvu->rvu_dbg.nix_qsize_id;
+ print_qsize = print_nix_qsize;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ print_qsize(filp, pfvf);
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, int blktype)
+{
+ char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
+ struct seq_file *seqfile = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ struct rvu *rvu = seqfile->private;
+ u16 pcifunc;
+ int ret, lf;
+
+ cmd_buf = memdup_user(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
+ if (cmd_buf)
+ ret = -EINVAL;
+
+ if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
+ goto qsize_write_done;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ ret = -EINVAL;
+ goto qsize_write_done;
+ }
+ if (blktype == BLKTYPE_NPA)
+ rvu->rvu_dbg.npa_qsize_id = lf;
+ else
+ rvu->rvu_dbg.nix_qsize_id = lf;
+
+qsize_write_done:
+ kfree(cmd_buf_tmp);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NPA);
+}
+
+static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
+
+/* Dumps given NPA Aura's context */
+static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_aura_s *aura = &rsp->aura;
+
+ seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
+
+ seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
+ aura->ena, aura->pool_caching);
+ seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
+ aura->pool_way_mask, aura->avg_con);
+ seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
+ aura->pool_drop_ena, aura->aura_drop_ena);
+ seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
+ aura->bp_ena, aura->aura_drop);
+ seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
+ aura->shift, aura->avg_level);
+
+ seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
+ (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
+
+ seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (u64)aura->limit, aura->bp, aura->fc_ena);
+ seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
+ aura->fc_up_crossing, aura->fc_stype);
+ seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
+
+ seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
+
+ seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
+ aura->pool_drop, aura->update_time);
+ seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
+ aura->err_int, aura->err_int_ena);
+ seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
+ aura->thresh_int, aura->thresh_int_ena);
+ seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
+ aura->thresh_up, aura->thresh_qint_idx);
+ seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
+
+ seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+}
+
+/* Dumps given NPA Pool's context */
+static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_pool_s *pool = &rsp->pool;
+
+ seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
+
+ seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
+ pool->ena, pool->nat_align);
+ seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
+ pool->stack_caching, pool->stack_way_mask);
+ seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
+ pool->buf_offset, pool->buf_size);
+
+ seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
+ pool->stack_max_pages, pool->stack_pages);
+
+ seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
+
+ seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
+
+ seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
+
+ seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
+
+ seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
+
+ seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
+ pool->err_int, pool->err_int_ena);
+ seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
+ seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
+ pool->thresh_int_ena, pool->thresh_up);
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+}
+
+/* Reads aura/pool's ctx from admin queue */
+static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
+{
+ void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
+ struct npa_aq_enq_req aq_req;
+ struct npa_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ int aura, rc, max_id;
+ int npalf, id, all;
+ struct rvu *rvu;
+ u16 pcifunc;
+
+ rvu = m->private;
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
+ id = rvu->rvu_dbg.npa_aura_ctx.id;
+ all = rvu->rvu_dbg.npa_aura_ctx.all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
+ id = rvu->rvu_dbg.npa_pool_ctx.id;
+ all = rvu->rvu_dbg.npa_pool_ctx.all;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NPA_AQ_INSTOP_READ;
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ max_id = pfvf->aura_ctx->qsize;
+ print_npa_ctx = print_npa_aura_ctx;
+ } else {
+ max_id = pfvf->pool_ctx->qsize;
+ print_npa_ctx = print_npa_pool_ctx;
+ }
+
+ if (id < 0 || id >= max_id) {
+ seq_printf(m, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+
+ for (aura = id; aura < max_id; aura++) {
+ aq_req.aura_id = aura;
+ seq_printf(m, "======%s : %d=======\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
+ aq_req.aura_id);
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(m, "Failed to read context\n");
+ return -EINVAL;
+ }
+ print_npa_ctx(m, &rsp);
+ }
+ return 0;
+}
+
+static int write_npa_ctx(struct rvu *rvu, bool all,
+ int npalf, int id, int ctype)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ if (!pfvf->aura_ctx) {
+ dev_warn(rvu->dev, "Aura context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->aura_ctx->qsize;
+ } else if (ctype == NPA_AQ_CTYPE_POOL) {
+ if (!pfvf->pool_ctx) {
+ dev_warn(rvu->dev, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->pool_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_aura_ctx.id = id;
+ rvu->rvu_dbg.npa_aura_ctx.all = all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_pool_ctx.id = id;
+ rvu->rvu_dbg.npa_pool_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *npalf,
+ int *id, bool *all)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+ int ret;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else {
+ ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+ return ret;
+}
+
+static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos, int ctype)
+{
+ char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
+ "aura" : "pool";
+ struct seq_file *seqfp = filp->private_data;
+ struct rvu *rvu = seqfp->private;
+ int npalf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &npalf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_npa_ctx(rvu, all, npalf, id, ctype);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_AURA);
+}
+
+static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
+
+static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_POOL);
+}
+
+static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
+
+static void ndc_cache_stats(struct seq_file *s, int blk_addr,
+ int ctype, int transaction)
+{
+ u64 req, out_req, lat, cant_alloc;
+ struct rvu *rvu = s->private;
+ int port;
+
+ for (port = 0; port < NDC_MAX_PORT; port++) {
+ req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
+ (port, ctype, transaction));
+ lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
+ (port, ctype, transaction));
+ out_req = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_RWX_OSTDN_PC
+ (port, ctype, transaction));
+ cant_alloc = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_CANT_ALLOC_PC
+ (port, transaction));
+ seq_printf(s, "\nPort:%d\n", port);
+ seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
+ seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
+ seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
+ seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
+ seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
+ }
+}
+
+static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ seq_puts(s, "\n***** CACHE mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
+ seq_puts(s, "\n***** CACHE mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
+ return 0;
+}
+
+static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
+
+static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ struct rvu *rvu = s->private;
+ int bank, max_bank;
+
+ max_bank = NDC_MAX_BANK(rvu, blk_addr);
+ for (bank = 0; bank < max_bank; bank++) {
+ seq_printf(s, "BANK:%d\n", bank);
+ seq_printf(s, "\tHits:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_HIT_PC(bank)));
+ seq_printf(s, "\tMiss:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_MISS_PC(bank)));
+ }
+ return 0;
+}
+
+static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NIX0_RX,
+ BLKADDR_NDC_NIX0_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NIX0_TX,
+ BLKADDR_NDC_NIX0_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
+
+static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp,
+ NPA0_U, BLKADDR_NDC_NIX0_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp,
+ NPA0_U, BLKADDR_NDC_NIX0_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+
+/* Dumps given nix_sq's context */
+static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+
+ seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
+ sq_ctx->sqe_way_mask, sq_ctx->cq);
+ seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ sq_ctx->sdp_mcast, sq_ctx->substream);
+ seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
+ sq_ctx->qint_idx, sq_ctx->ena);
+
+ seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
+ sq_ctx->sqb_count, sq_ctx->default_chan);
+ seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
+ sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
+ seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
+ sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
+
+ seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
+ sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
+ seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
+ sq_ctx->sq_int, sq_ctx->sqb_aura);
+ seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
+ sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
+ seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
+ sq_ctx->smenq_offset, sq_ctx->tail_offset);
+ seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
+ sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
+ seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
+ sq_ctx->cq_limit, sq_ctx->max_sqe_size);
+
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
+ sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
+ seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
+ sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+/* Dumps given nix_rq's context */
+static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ rq_ctx->wqe_aura, rq_ctx->substream);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
+
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->sso_tt);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
+
+ seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
+ rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
+ seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
+ rq_ctx->xqe_imm_size, rq_ctx->later_skip);
+ seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
+ rq_ctx->first_skip, rq_ctx->lpb_sizem1);
+ seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
+ rq_ctx->spb_ena, rq_ctx->wqe_skip);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
+
+ seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
+ rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
+ seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+ seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
+ rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
+ seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
+ rq_ctx->xqe_pass, rq_ctx->xqe_drop);
+
+ seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
+ rq_ctx->qint_idx, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
+
+ seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
+ rq_ctx->flow_tagw, rq_ctx->bad_utag);
+ seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
+ rq_ctx->good_utag, rq_ctx->ltag);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
+/* Dumps given nix_cq's context */
+static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+
+ seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
+
+ seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
+ seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
+ cq_ctx->avg_con, cq_ctx->cint_idx);
+ seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
+ cq_ctx->cq_err, cq_ctx->qint_idx);
+ seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
+ cq_ctx->bpid, cq_ctx->bp_ena);
+
+ seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
+ cq_ctx->update_time, cq_ctx->avg_level);
+ seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
+ cq_ctx->head, cq_ctx->tail);
+
+ seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
+ cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
+ seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
+ cq_ctx->qsize, cq_ctx->caching);
+ seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
+ cq_ctx->substream, cq_ctx->ena);
+ seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
+ cq_ctx->drop_ena, cq_ctx->drop);
+ seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
+}
+
+static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
+ void *unused, int ctype)
+{
+ void (*print_nix_ctx)(struct seq_file *filp,
+ struct nix_aq_enq_rsp *rsp) = NULL;
+ struct rvu *rvu = filp->private;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ char *ctype_string = NULL;
+ int qidx, rc, max_id = 0;
+ struct rvu_pfvf *pfvf;
+ int nixlf, id, all;
+ u16 pcifunc;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
+ id = rvu->rvu_dbg.nix_cq_ctx.id;
+ all = rvu->rvu_dbg.nix_cq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
+ id = rvu->rvu_dbg.nix_sq_ctx.id;
+ all = rvu->rvu_dbg.nix_sq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
+ id = rvu->rvu_dbg.nix_rq_ctx.id;
+ all = rvu->rvu_dbg.nix_rq_ctx.all;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
+ seq_puts(filp, "SQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
+ seq_puts(filp, "RQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
+ seq_puts(filp, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ max_id = pfvf->sq_ctx->qsize;
+ ctype_string = "sq";
+ print_nix_ctx = print_nix_sq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ max_id = pfvf->rq_ctx->qsize;
+ ctype_string = "rq";
+ print_nix_ctx = print_nix_rq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ max_id = pfvf->cq_ctx->qsize;
+ ctype_string = "cq";
+ print_nix_ctx = print_nix_cq_ctx;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+ seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
+ ctype_string, nixlf, aq_req.qidx);
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(filp, "Failed to read the context\n");
+ return -EINVAL;
+ }
+ print_nix_ctx(filp, &rsp);
+ }
+ return 0;
+}
+
+static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
+ int id, int ctype, char *ctype_string)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->sq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ if (!pfvf->rq_ctx) {
+ dev_warn(rvu->dev, "RQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->rq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ if (!pfvf->cq_ctx) {
+ dev_warn(rvu->dev, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->cq_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
+ ctype_string, max_id - 1);
+ return -EINVAL;
+ }
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_cq_ctx.id = id;
+ rvu->rvu_dbg.nix_cq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_sq_ctx.id = id;
+ rvu->rvu_dbg.nix_sq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_rq_ctx.id = id;
+ rvu->rvu_dbg.nix_rq_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos,
+ int ctype)
+{
+ struct seq_file *m = filp->private_data;
+ struct rvu *rvu = m->private;
+ char *cmd_buf, *ctype_string;
+ int nixlf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_SQ:
+ ctype_string = "sq";
+ break;
+ case NIX_AQ_CTYPE_RQ:
+ ctype_string = "rq";
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ ctype_string = "cq";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+
+ if (!cmd_buf)
+ return count;
+
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &nixlf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
+ ctype_string);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_SQ);
+}
+
+static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
+
+static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_RQ);
+}
+
+static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
+
+static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_CQ);
+}
+
+static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
+
+static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
+ unsigned long *bmap, char *qtype)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ bitmap_print_to_pagebuf(false, buf, bmap, qsize);
+ seq_printf(filp, "%s context count : %d\n", qtype, qsize);
+ seq_printf(filp, "%s context ena/dis bitmap : %s\n",
+ qtype, buf);
+ kfree(buf);
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
+{
+ if (!pfvf->cq_ctx)
+ seq_puts(filp, "cq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
+ "cq");
+
+ if (!pfvf->rq_ctx)
+ seq_puts(filp, "rq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
+ "rq");
+
+ if (!pfvf->sq_ctx)
+ seq_puts(filp, "sq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
+ "sq");
+}
+
+static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NIX);
+}
+
+static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+
+static void rvu_dbg_nix_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev, "create debugfs dir failed for nix\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_sq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_rq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_cq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
+ rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
+ rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.nix);
+}
+
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.npa)
+ return;
+
+ pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
+ rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.npa);
+}
+
+#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_RX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_TX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+static int cgx_print_stats(struct seq_file *s, int lmac_id)
+{
+ struct cgx_link_user_info linfo;
+ void *cgxd = s->private;
+ u64 ucast, mcast, bcast;
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ struct rvu *rvu;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ /* Link status */
+ seq_puts(s, "\n=======Link Status======\n\n");
+ err = cgx_get_link_info(cgxd, lmac_id, &linfo);
+ if (err)
+ seq_puts(s, "Failed to read link status\n");
+ seq_printf(s, "\nLink is %s %d Mbps\n\n",
+ linfo.link_up ? "UP" : "DOWN", linfo.speed);
+
+ /* Rx stats */
+ seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
+ if (err)
+ return err;
+
+ /* Tx stats */
+ seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
+ if (err)
+ return err;
+
+ /* Rx stats */
+ seq_puts(s, "\n=======CGX RX_STATS======\n\n");
+ while (stat < CGX_RX_STATS_COUNT) {
+ err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ if (err)
+ return err;
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ seq_puts(s, "\n=======CGX TX_STATS======\n\n");
+ while (stat < CGX_TX_STATS_COUNT) {
+ err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ if (err)
+ return err;
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
+ stat++;
+ }
+
+ return err;
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ struct dentry *current_dir;
+ int err, lmac_id;
+ char *buf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ buf = strrchr(current_dir->d_name.name, 'c');
+ if (!buf)
+ return -EINVAL;
+
+ err = kstrtoint(buf + 1, 10, &lmac_id);
+ if (!err) {
+ err = cgx_print_stats(filp, lmac_id);
+ if (err)
+ return err;
+ }
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+
+static void rvu_dbg_cgx_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+ int i, lmac_id;
+ char dname[20];
+ void *cgx;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+
+ for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
+ cgx = rvu_cgx_pdata(i, rvu);
+ if (!cgx)
+ continue;
+ /* cgx debugfs dir */
+ sprintf(dname, "cgx%d", i);
+ rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
+ rvu->rvu_dbg.cgx_root);
+ for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+ /* lmac debugfs dir */
+ sprintf(dname, "lmac%d", lmac_id);
+ rvu->rvu_dbg.lmac =
+ debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
+
+ pfile = debugfs_create_file("stats", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_stat_fops);
+ if (!pfile)
+ goto create_failed;
+ }
+ }
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
+}
+
+/* NPC debugfs APIs */
+static void rvu_print_npc_mcam_info(struct seq_file *s,
+ u16 pcifunc, int blkaddr)
+{
+ struct rvu *rvu = s->private;
+ int entry_acnt, entry_ecnt;
+ int cntr_acnt, cntr_ecnt;
+
+ /* Skip PF0 */
+ if (!pcifunc)
+ return;
+ rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
+ &entry_acnt, &entry_ecnt);
+ rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
+ &cntr_acnt, &cntr_ecnt);
+ if (!entry_acnt && !cntr_acnt)
+ return;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+
+ if (entry_acnt) {
+ seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
+ seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
+ }
+ if (cntr_acnt) {
+ seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
+ seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
+ }
+}
+
+static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
+{
+ struct rvu *rvu = filp->private;
+ int pf, vf, numvfs, blkaddr;
+ struct npc_mcam *mcam;
+ u16 pcifunc;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM info:\n");
+ /* MCAM keywidth on receive and transmit sides */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+
+ mutex_lock(&mcam->lock);
+ /* MCAM entries */
+ seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ mcam->total_entries - mcam->bmap_entries);
+ seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
+
+ /* MCAM counters */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ cfg = (cfg >> 48) & 0xFFFF;
+ seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
+ seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\t\t Available \t: %d\n",
+ rvu_rsrc_free_count(&mcam->counters));
+
+ if (mcam->bmap_entries == mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ seq_puts(filp, "\n\t\t Current allocation\n");
+ seq_puts(filp, "\t\t====================\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
+
+static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
+ void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct npc_mcam *mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
+ seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.npc)
+ return;
+
+ pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_mcam_info_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_rx_miss_act_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.npc);
+}
+
+void rvu_dbg_init(struct rvu *rvu)
+{
+ struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ if (!rvu->rvu_dbg.root) {
+ dev_err(rvu->dev, "%s failed\n", __func__);
+ return;
+ }
+ pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+ if (!pfile)
+ goto create_failed;
+
+ rvu_dbg_npa_init(rvu);
+ rvu_dbg_nix_init(rvu);
+ rvu_dbg_cgx_init(rvu);
+ rvu_dbg_npc_init(rvu);
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+void rvu_dbg_exit(struct rvu *rvu)
+{
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4a7609fd6dd0..8a59f7d53fbf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -64,7 +64,6 @@ enum nix_makr_fmt_indexes {
struct mce {
struct hlist_node node;
- u16 idx;
u16 pcifunc;
};
@@ -127,17 +126,12 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
dev_err(rvu->dev, "NIX RX software sync failed\n");
-
- /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
- * bit too early. Hence wait for 50us more.
- */
- if (is_rvu_9xxx_A0(rvu))
- usleep_range(50, 60);
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u16 map_func;
@@ -155,13 +149,15 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
mutex_unlock(&rvu->rsrc_lock);
- /* For TL1 schq, sharing across VF's of same PF is ok */
- if (lvl == NIX_TXSCH_LVL_TL1 &&
- rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
- return false;
+ /* TLs aggegating traffic are shared across PF and VFs */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ return false;
+ else
+ return true;
+ }
- if (lvl != NIX_TXSCH_LVL_TL1 &&
- map_func != pcifunc)
+ if (map_func != pcifunc)
return false;
return true;
@@ -198,6 +194,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+
+ /* Note that AF's VFs work in pairs and talk over consecutive
+ * loopback channels.Therefore if odd number of AF VFs are
+ * enabled then the last VF remains with no pair.
+ */
pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
NIX_CHAN_LBK_CHX(0, vf + 1);
@@ -382,7 +383,8 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
- int rss_sz, int rss_grps, int hwctx_size)
+ int rss_sz, int rss_grps, int hwctx_size,
+ u64 way_mask)
{
int err, grp, num_indices;
@@ -402,7 +404,8 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
/* Config full RSS table size, enable RSS and caching */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
BIT_ULL(36) | BIT_ULL(4) |
- ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
+ way_mask << 20);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
@@ -663,6 +666,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
+static const char *nix_get_ctx_name(int ctype)
+{
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ return "CQ";
+ case NIX_AQ_CTYPE_SQ:
+ return "SQ";
+ case NIX_AQ_CTYPE_RQ:
+ return "RQ";
+ case NIX_AQ_CTYPE_RSS:
+ return "RSS";
+ }
+ return "";
+}
+
static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -707,21 +725,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
- (req->ctype == NIX_AQ_CTYPE_CQ) ?
- "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
- "RQ" : "SQ"), qidx);
+ nix_get_ctx_name(req->ctype), qidx);
}
}
return err;
}
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
+{
+ struct nix_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NIX_AQ_INSTOP_INIT)
+ return 0;
+
+ if (req->ctype == NIX_AQ_CTYPE_MCE ||
+ req->ctype == NIX_AQ_CTYPE_DYNO)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
+ lock_ctx_req.qidx = req->qidx;
+ err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
+ req->hdr.pcifunc,
+ nix_get_ctx_name(req->ctype), req->qidx);
+ return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_nix_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = nix_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
+#endif
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -745,6 +802,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
return NIX_AF_ERR_PARAM;
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
@@ -810,7 +870,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
(u64)pfvf->rq_ctx->iova);
/* Set caching and queue count in HW */
- cfg = BIT_ULL(36) | (req->rq_cnt - 1);
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
/* Alloc NIX SQ HW context memory and config the base */
@@ -825,7 +885,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
(u64)pfvf->sq_ctx->iova);
- cfg = BIT_ULL(36) | (req->sq_cnt - 1);
+
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
/* Alloc NIX CQ HW context memory and config the base */
@@ -840,13 +901,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
(u64)pfvf->cq_ctx->iova);
- cfg = BIT_ULL(36) | (req->cq_cnt - 1);
+
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
- err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
- req->rss_sz, req->rss_grps, hwctx_size);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
+ req->rss_grps, hwctx_size, req->way_mask);
if (err)
goto free_mem;
@@ -860,7 +922,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
(u64)pfvf->cq_ints_ctx->iova);
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
/* Alloc memory for QINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
@@ -872,7 +936,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
(u64)pfvf->nix_qints_ctx->iova);
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
/* Setup VLANX TPID's.
* Use VLAN1 for 802.1Q
@@ -1048,6 +1113,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int link;
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ return;
+
/* Reset TL4's SDP link config */
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
@@ -1061,83 +1129,185 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
-static int
-rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
- u16 *schq_list, u16 *schq_cnt)
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
- struct nix_txsch *txsch;
- struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
- u8 cgx_id, lmac_id;
- u16 schq_base;
- u32 *pfvf_map;
- int pf, intf;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return -ENODEV;
+ if (is_afvf(pcifunc)) {/* LBK links */
+ return hw->cgx_links;
+ } else if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return (cgx_id * hw->lmac_per_cgx) + lmac_id;
+ }
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
- pfvf_map = txsch->pfvf_map;
- pf = rvu_get_pf(pcifunc);
+ /* SDP link */
+ return hw->cgx_links + hw->lbk_links;
+}
- /* static allocation as two TL1's per link */
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
+ int link, int *start, int *end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
- switch (intf) {
- case NIX_INTF_TYPE_CGX:
- rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
- schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
- break;
- case NIX_INTF_TYPE_LBK:
- schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
- break;
- default:
- return -ENODEV;
+ if (is_afvf(pcifunc)) { /* LBK links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
+ } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
+ } else { /* SDP link */
+ *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
+ (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
+ *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
}
+}
- if (schq_base + 1 > txsch->schq.max)
- return -ENODEV;
+static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+ struct nix_hw *nix_hw,
+ struct nix_txsch_alloc_req *req)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int schq, req_schq, free_cnt;
+ struct nix_txsch *txsch;
+ int link, start, end;
- /* init pfvf_map as we store flags */
- if (pfvf_map[schq_base] == U32_MAX) {
- pfvf_map[schq_base] =
- TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
- pfvf_map[schq_base + 1] =
- TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
- /* Onetime reset for TL1 */
- nix_reset_tx_linkcfg(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base);
- nix_reset_tx_shaping(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base);
+ if (!req_schq)
+ return 0;
- nix_reset_tx_linkcfg(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base + 1);
- nix_reset_tx_shaping(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base + 1);
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ /* For traffic aggregating scheduler level, one queue is enough */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (req_schq != 1)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+ return 0;
}
- if (schq_list && schq_cnt) {
- schq_list[0] = schq_base;
- schq_list[1] = schq_base + 1;
- *schq_cnt = 2;
+ /* Get free SCHQ count and check if request can be accomodated */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
+ if (end <= txsch->schq.max && schq < end &&
+ !test_bit(schq, txsch->schq.bmap))
+ free_cnt = 1;
+ else
+ free_cnt = 0;
+ } else {
+ free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
+ if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ /* If contiguous queues are needed, check for availability */
+ if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
return 0;
}
+static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
+ struct nix_txsch_alloc_rsp *rsp,
+ int lvl, int start, int end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = rsp->hdr.pcifunc;
+ int idx, schq;
+
+ /* For traffic aggregating levels, queue alloc is based
+ * on transmit link to which PF_FUNC is mapped to.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ /* A single TL queue is allocated */
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ rsp->schq_contig_list[lvl][0] = start;
+ }
+
+ /* Both contig and non-contig reqs doesn't make sense here */
+ if (rsp->schq_contig[lvl])
+ rsp->schq[lvl] = 0;
+
+ if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ rsp->schq_list[lvl][0] = start;
+ }
+ return;
+ }
+
+ /* Adjust the queue request count if HW supports
+ * only one queue per level configuration.
+ */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ idx = pcifunc & RVU_PFVF_FUNC_MASK;
+ schq = start + idx;
+ if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
+ rsp->schq_contig[lvl] = 0;
+ rsp->schq[lvl] = 0;
+ return;
+ }
+
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][0] = schq;
+ rsp->schq[lvl] = 0;
+ } else if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][0] = schq;
+ }
+ return;
+ }
+
+ /* Allocate contiguous queue indices requesty first */
+ if (rsp->schq_contig[lvl]) {
+ schq = bitmap_find_next_zero_area(txsch->schq.bmap,
+ txsch->schq.max, start,
+ rsp->schq_contig[lvl], 0);
+ if (schq >= end)
+ rsp->schq_contig[lvl] = 0;
+ for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Allocate non-contiguous queue indices */
+ if (rsp->schq[lvl]) {
+ idx = 0;
+ for (schq = start; schq < end; schq++) {
+ if (!test_bit(schq, txsch->schq.bmap)) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][idx++] = schq;
+ }
+ if (idx == rsp->schq[lvl])
+ break;
+ }
+ /* Update how many were allocated */
+ rsp->schq[lvl] = idx;
+ }
+}
+
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
+ struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int link, blkaddr, rc = 0;
+ int lvl, idx, start, end;
struct nix_txsch *txsch;
- int lvl, idx, req_schq;
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
- int blkaddr, rc = 0;
u32 *pfvf_map;
u16 schq;
@@ -1151,83 +1321,66 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
return -EINVAL;
mutex_lock(&rvu->rsrc_lock);
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- txsch = &nix_hw->txsch[lvl];
- req_schq = req->schq_contig[lvl] + req->schq[lvl];
- pfvf_map = txsch->pfvf_map;
-
- if (!req_schq)
- continue;
- /* There are only 28 TL1s */
- if (lvl == NIX_TXSCH_LVL_TL1) {
- if (req->schq_contig[lvl] ||
- req->schq[lvl] > 2 ||
- rvu_get_tl1_schqs(rvu, blkaddr,
- pcifunc, NULL, NULL))
- goto err;
- continue;
- }
-
- /* Check if request is valid */
- if (req_schq > MAX_TXSCHQ_PER_FUNC)
- goto err;
-
- /* If contiguous queues are needed, check for availability */
- if (req->schq_contig[lvl] &&
- !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
- goto err;
-
- /* Check if full request can be accommodated */
- if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
+ /* Check if request is valid as per HW capabilities
+ * and can be accomodated.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
+ if (rc)
goto err;
}
+ /* Allocate requested Tx scheduler queues */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
- rsp->schq_contig[lvl] = req->schq_contig[lvl];
pfvf_map = txsch->pfvf_map;
- rsp->schq[lvl] = req->schq[lvl];
if (!req->schq[lvl] && !req->schq_contig[lvl])
continue;
- /* Handle TL1 specially as it is
- * allocation is restricted to 2 TL1's
- * per link
- */
+ rsp->schq[lvl] = req->schq[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
- if (lvl == NIX_TXSCH_LVL_TL1) {
- rsp->schq_contig[lvl] = 0;
- rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
- &rsp->schq_list[lvl][0],
- &rsp->schq[lvl]);
- continue;
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ start = link;
+ end = link;
+ } else if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ } else {
+ start = 0;
+ end = txsch->schq.max;
}
- /* Alloc contiguous queues first */
- if (req->schq_contig[lvl]) {
- schq = rvu_alloc_rsrc_contig(&txsch->schq,
- req->schq_contig[lvl]);
+ nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
- for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ /* Reset queue config */
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ schq = rsp->schq_contig_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
- nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
- rsp->schq_contig_list[lvl][idx] = schq;
- schq++;
- }
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
}
- /* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) {
- schq = rvu_alloc_rsrc(&txsch->schq);
- pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ schq = rsp->schq_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
- rsp->schq_list[lvl][idx] = schq;
}
}
+
+ rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
+ rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
+ rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
+ NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
goto exit;
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
@@ -1236,13 +1389,50 @@ exit:
return rc;
}
+static void nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ int err, restore_tx_en = 0;
+ u64 cfg;
+
+ /* enable cgx tx if disabled */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+ if (err)
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
+
+ rvu_cgx_enadis_rx_bp(rvu, pf, true);
+ /* restore cgx tx state */
+ if (restore_tx_en)
+ cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+}
+
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
{
int blkaddr, nixlf, lvl, schq, err;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
- u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1275,26 +1465,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
-
- /* Wait for flush to complete */
- err = rvu_poll_reg(rvu, blkaddr,
- NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
- if (err) {
- dev_err(rvu->dev,
- "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
- }
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
}
/* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- /* Free all SCHQ's except TL1 as
- * TL1 is shared across all VF's for a RVU PF
- */
- if (lvl == NIX_TXSCH_LVL_TL1)
+ /* TLs above aggregation level are shared across all PF
+ * and it's VFs, hence skip freeing them.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
txsch = &nix_hw->txsch[lvl];
@@ -1302,7 +1481,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
rvu_free_rsrc(&txsch->schq, schq);
- txsch->pfvf_map[schq] = 0;
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
}
mutex_unlock(&rvu->rsrc_lock);
@@ -1319,13 +1498,12 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch_free_req *req)
{
- int lvl, schq, nixlf, blkaddr, rc;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int lvl, schq, nixlf, blkaddr;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
- u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1343,10 +1521,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
schq = req->schq;
txsch = &nix_hw->txsch[lvl];
- /* Don't allow freeing TL1 */
- if (lvl > NIX_TXSCH_LVL_TL2 ||
- schq >= txsch->schq.max)
- goto err;
+ if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
+ return 0;
pfvf_map = txsch->pfvf_map;
mutex_lock(&rvu->rsrc_lock);
@@ -1359,24 +1535,12 @@ static int nix_txschq_free_one(struct rvu *rvu,
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
- if (lvl == NIX_TXSCH_LVL_SMQ) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
-
- /* Wait for flush to complete */
- rc = rvu_poll_reg(rvu, blkaddr,
- NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
- if (rc) {
- dev_err(rvu->dev,
- "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
- }
- }
+ if (lvl == NIX_TXSCH_LVL_SMQ)
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
- txsch->pfvf_map[schq] = 0;
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
@@ -1393,8 +1557,8 @@ int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
return nix_txschq_free_one(rvu, req);
}
-static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
- int lvl, u64 reg, u64 regval)
+static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
{
u64 regbase = reg & 0xFFFF;
u16 schq, parent;
@@ -1431,79 +1595,82 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true;
}
-static int
-nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
{
- u16 schq_list[2], schq_cnt, schq;
- int blkaddr, idx, err = 0;
- u16 map_func, map_flags;
- struct nix_hw *nix_hw;
- u64 reg, regval;
- u32 *pfvf_map;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ u64 regbase;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return -EINVAL;
-
- pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
-
- mutex_lock(&rvu->rsrc_lock);
-
- err = rvu_get_tl1_schqs(rvu, blkaddr,
- pcifunc, schq_list, &schq_cnt);
- if (err)
- goto unlock;
+ if (hw->cap.nix_shaping)
+ return true;
- for (idx = 0; idx < schq_cnt; idx++) {
- schq = schq_list[idx];
- map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
- map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+ /* If shaping and coloring is not supported, then
+ * *_CIR and *_PIR registers should not be configured.
+ */
+ regbase = reg & 0xFFFF;
- /* check if config is already done or this is pf */
- if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
- continue;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ if (regbase == NIX_AF_TL1X_CIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ if (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ if (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ if (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0))
+ return false;
+ break;
+ }
+ return true;
+}
- /* default configuration */
- reg = NIX_AF_TL1X_TOPOLOGY(schq);
- regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
- rvu_write64(rvu, blkaddr, reg, regval);
- reg = NIX_AF_TL1X_SCHEDULE(schq);
- regval = TXSCH_TL1_DFLT_RR_QTM;
- rvu_write64(rvu, blkaddr, reg, regval);
- reg = NIX_AF_TL1X_CIR(schq);
- regval = 0;
- rvu_write64(rvu, blkaddr, reg, regval);
+static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
+ u16 pcifunc, int blkaddr)
+{
+ u32 *pfvf_map;
+ int schq;
- map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
- pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
- }
-unlock:
- mutex_unlock(&rvu->rsrc_lock);
- return err;
+ schq = nix_get_tx_link(rvu, pcifunc);
+ pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+ /* Skip if PF has already done the config */
+ if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
+ return;
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
+ (TXSCH_TL1_DFLT_RR_PRIO << 1));
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
- u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
- u16 map_func, map_flags;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
+ int nixlf, schq;
u32 *pfvf_map;
- int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
@@ -1512,19 +1679,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
if (!nix_hw)
return -EINVAL;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
- /* VF is only allowed to trigger
- * setting default cfg on TL1
- */
- if (pcifunc & RVU_PFVF_FUNC_MASK &&
- req->lvl == NIX_TXSCH_LVL_TL1) {
- return nix_tl1_default_cfg(rvu, pcifunc);
+ if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
+ pcifunc & RVU_PFVF_FUNC_MASK) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (req->lvl == NIX_TXSCH_LVL_TL1)
+ nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
}
for (idx = 0; idx < req->num_regs; idx++) {
@@ -1532,10 +1696,14 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
- if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
- txsch->lvl, reg, regval))
+ if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
return NIX_AF_INVAL_TXSCHQ_CFG;
+ /* Check if shaping and coloring is supported */
+ if (!is_txschq_shaping_valid(hw, req->lvl, reg))
+ continue;
+
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
@@ -1544,32 +1712,36 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval |= ((u64)nixlf << 24);
}
+ /* Clear 'BP_ENA' config, if it's not allowed */
+ if (!hw->cap.nix_tx_link_bp) {
+ if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
+ (schq_regbase & 0xFF00) ==
+ NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
+ regval &= ~BIT_ULL(13);
+ }
+
/* Mark config as done for TL1 by PF */
if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
-
mutex_lock(&rvu->rsrc_lock);
-
- map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
- map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
-
- map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
- pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
+ NIX_TXSCHQ_CFG_DONE);
mutex_unlock(&rvu->rsrc_lock);
}
- rvu_write64(rvu, blkaddr, reg, regval);
-
- /* Check for SMQ flush, if so, poll for its completion */
+ /* SMQ flush is special hence split register writes such
+ * that flush first and write rest of the bits later.
+ */
if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
(regval & BIT_ULL(49))) {
- err = rvu_poll_reg(rvu, blkaddr,
- reg, BIT_ULL(49), true);
- if (err)
- return NIX_AF_SMQ_FLUSH_FAILED;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ regval &= ~BIT_ULL(49);
}
+ rvu_write64(rvu, blkaddr, reg, regval);
}
+
return 0;
}
@@ -1650,7 +1822,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
}
static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, int idx, bool add)
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -1679,7 +1851,6 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce)
return -ENOMEM;
- mce->idx = idx;
mce->pcifunc = pcifunc;
if (!tail)
hlist_add_head(&mce->node, &mce_list->head);
@@ -1691,12 +1862,12 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
{
- int err = 0, idx, next_idx, count;
+ int err = 0, idx, next_idx, last_idx;
struct nix_mce_list *mce_list;
- struct mce *mce, *next_mce;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
+ struct mce *mce;
int blkaddr;
/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
@@ -1728,31 +1899,31 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, idx, add);
+ err = nix_update_mce_list(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
-
- if (!mce_list->count)
+ if (!mce_list->count) {
+ rvu_npc_disable_bcast_entry(rvu, pcifunc);
goto end;
- count = mce_list->count;
+ }
/* Dump the updated list to HW */
+ idx = pfvf->bcast_mce_idx;
+ last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
- next_idx = 0;
- count--;
- if (count) {
- next_mce = hlist_entry(mce->node.next,
- struct mce, node);
- next_idx = next_mce->idx;
- }
+ if (idx > last_idx)
+ break;
+
+ next_idx = idx + 1;
/* EOL should be set in last MCE */
- err = nix_setup_mce(rvu, mce->idx,
- NIX_AQ_INSTOP_WRITE, mce->pcifunc,
- next_idx, count ? false : true);
+ err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
if (err)
goto end;
+ idx++;
}
end:
@@ -1849,8 +2020,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
+ int err, lvl, schq;
u64 cfg, reg;
- int err, lvl;
/* Get scheduler queue count of each type and alloc
* bitmap for each for alloc/free/attach operations.
@@ -1888,7 +2059,8 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map)
return -ENOMEM;
- memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
+ for (schq = 0; schq < txsch->schq.max; schq++)
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
return 0;
}
@@ -2032,51 +2204,82 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
if (field_marker)
memset(&tmp, 0, sizeof(tmp));
+ field_marker = true;
+ keyoff_marker = true;
switch (key_type) {
case NIX_FLOW_KEY_TYPE_PORT:
field->sel_chan = true;
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
- field_marker = true;
- keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_IPV4:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV4:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP;
+ }
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
field->ltype_mask = 0xF; /* Match only IPv4 */
- field_marker = true;
keyoff_marker = false;
break;
case NIX_FLOW_KEY_TYPE_IPV6:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV6:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP6;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP6;
+ }
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
field->ltype_mask = 0xF; /* Match only IPv6 */
- field_marker = true;
- keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_TCP:
case NIX_FLOW_KEY_TYPE_UDP:
case NIX_FLOW_KEY_TYPE_SCTP:
+ case NIX_FLOW_KEY_TYPE_INNR_TCP:
+ case NIX_FLOW_KEY_TYPE_INNR_UDP:
+ case NIX_FLOW_KEY_TYPE_INNR_SCTP:
field->lid = NPC_LID_LD;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
+ field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
- if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
+
+ /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
+ * so no need to change the ltype_match, just change
+ * the lid for inner protocols
+ */
+ BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
+ (int)NPC_LT_LH_TU_TCP);
+ BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
+ (int)NPC_LT_LH_TU_UDP);
+ BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
+ (int)NPC_LT_LH_TU_SCTP);
+
+ if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
+ valid_key) {
field->ltype_match |= NPC_LT_LD_TCP;
group_member = true;
- } else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_UDP;
group_member = true;
- } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_SCTP;
group_member = true;
}
field->ltype_mask = ~field->ltype_match;
- if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
+ if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
/* Handle the case where any of the group item
* is enabled in the group but not the final one
*/
@@ -2084,13 +2287,73 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
valid_key = true;
group_member = false;
}
- field_marker = true;
- keyoff_marker = true;
} else {
field_marker = false;
keyoff_marker = false;
}
break;
+ case NIX_FLOW_KEY_TYPE_NVGRE:
+ field->lid = NPC_LID_LD;
+ field->hdr_offset = 4; /* VSID offset */
+ field->bytesm1 = 2;
+ field->ltype_match = NPC_LT_LD_NVGRE;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_VXLAN:
+ case NIX_FLOW_KEY_TYPE_GENEVE:
+ field->lid = NPC_LID_LE;
+ field->bytesm1 = 2;
+ field->hdr_offset = 4;
+ field->ltype_mask = 0xF;
+ field_marker = false;
+ keyoff_marker = false;
+
+ if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
+ field->ltype_match |= NPC_LT_LE_VXLAN;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
+ field->ltype_match |= NPC_LT_LE_GENEVE;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
+ if (group_member) {
+ field->ltype_mask = ~field->ltype_match;
+ field_marker = true;
+ keyoff_marker = true;
+ valid_key = true;
+ group_member = false;
+ }
+ }
+ break;
+ case NIX_FLOW_KEY_TYPE_ETH_DMAC:
+ case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
+ field->lid = NPC_LID_LA;
+ field->ltype_match = NPC_LT_LA_ETHER;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
+ field->lid = NPC_LID_LF;
+ field->ltype_match = NPC_LT_LF_TU_ETHER;
+ }
+ field->hdr_offset = 0;
+ field->bytesm1 = 5; /* DMAC 6 Byte */
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6_EXT:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 40; /* IPV6 hdr */
+ field->bytesm1 = 0; /* 1 Byte ext hdr*/
+ field->ltype_match = NPC_LT_LC_IP6_EXT;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_GTPU:
+ field->lid = NPC_LID_LE;
+ field->hdr_offset = 4;
+ field->bytesm1 = 3; /* 4 bytes TID*/
+ field->ltype_match = NPC_LT_LE_GTPU;
+ field->ltype_mask = 0xF;
+ break;
}
field->ena = 1;
@@ -2449,8 +2712,6 @@ linkcfg:
cfg &= ~(0xFFFFFULL << 12);
cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
-
return 0;
}
@@ -2591,9 +2852,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link),
tx_credits);
- rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_EXPR_CREDIT(link),
- tx_credits);
}
}
@@ -2605,8 +2863,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
- rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
}
}
@@ -2674,6 +2930,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
cfg &= ~0x3FFEULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of SQB aka SQEs */
+ cfg |= 0x04ULL;
+#endif
rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
/* Result structure can be followed by RQ/SQ/CQ context at
@@ -2704,13 +2964,25 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
block = &hw->block[blkaddr];
- /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
- * internal state when conditional clocks are turned off.
- * Hence enable them.
- */
- if (is_rvu_9xxx_A0(rvu))
+ if (is_rvu_96xx_B0(rvu)) {
+ /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
- rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+ }
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
@@ -2763,23 +3035,23 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
(NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
(NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
(NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
0x0F);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
@@ -2825,7 +3097,7 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
-static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -2853,7 +3125,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
- return 0;
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
@@ -2867,7 +3140,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
- return 0;
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
@@ -2883,6 +3157,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ rvu_cgx_start_stop_io(rvu, pcifunc, false);
+
if (pfvf->sq_ctx) {
ctx_req.ctype = NIX_AQ_CTYPE_SQ;
err = nix_lf_hwctx_disable(rvu, &ctx_req);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index c0e165dfc403..6e7c7f459f74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -52,8 +52,8 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
-static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
- struct npa_aq_enq_rsp *rsp)
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+ struct npa_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NPA_AQ_INSTOP_INIT)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+ lock_ctx_req.aura_id = req->aura_id;
+ err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+ req->hdr.pcifunc,
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", req->aura_id);
+ return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = npa_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
+#endif
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -289,6 +327,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
return NPA_AF_ERR_PARAM;
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
@@ -345,7 +386,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
/* Clear way partition mask and set aura offset to '0' */
cfg &= ~(BIT_ULL(34) - 1);
/* Set aura size & enable caching of contexts */
- cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
+
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
/* Configure aura HW context's base */
@@ -353,7 +395,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
(u64)pfvf->aura_ctx->iova);
/* Enable caching of qints hw context */
- rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
+ BIT_ULL(36) | req->way_mask << 20);
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
(u64)pfvf->npa_qints_ctx->iova);
@@ -422,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
/* Result structure can be followed by Aura/Pool context at
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 15f70273e29c..40e431debbe9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -120,6 +120,31 @@ static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
}
+static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
+ }
+}
+
static void npc_get_keyword(struct mcam_entry *entry, int idx,
u64 *cam0, u64 *cam1)
{
@@ -211,6 +236,12 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
actindex = index;
index &= (mcam->banksize - 1);
+ /* Disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+
+ /* Clear mcam entry to avoid writes being suppressed by NPC */
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
+
/* CAM1 takes the comparison value and
* CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
* CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
@@ -251,8 +282,6 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
/* Enable the entry */
if (enable)
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
- else
- npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
}
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
@@ -354,8 +383,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
NIX_INTF_RX, &entry, true);
/* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+ entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
+ entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
entry.vtag_action = VTAG0_VALID_BIT |
FIELD_PREP(VTAG0_TYPE_MASK, 0) |
@@ -448,68 +477,75 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
-#ifdef MCAST_MCE
struct rvu_pfvf *pfvf;
-#endif
int blkaddr, index;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- /* Only PF can add a bcast match entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
+ /* Skip LBK VFs */
+ if (is_afvf(pcifunc))
return;
-#ifdef MCAST_MCE
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
-#endif
+ /* If pkt replication is not supported,
+ * then only PF is allowed to add a bcast match entry.
+ */
+ if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Check for L2B bit and LMAC channel
- * NOTE: Since MKEX default profile(a reduced version intended to
- * accommodate more capability but igoring few bits) a stap-gap
- * approach.
- * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
- * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
- * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
- * DSCP.
- *
- * Reduced layout of MKEX default profile -
- * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
- *
- * BIT_POS[31:28] : LD
- * BIT_POS[27:24] : LC
- * BIT_POS[23:20] : LB
- * BIT_POS[19:16] : LA
- * BIT_POS[15:12] : L3B, L3M, L2B, L2M
- * BIT_POS[11:00] : CHAN
- *
+ /* Match ingress channel */
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xfffull;
+
+ /* Match broadcast MAC address.
+ * DMAC is extracted at 0th bit of PARSE_KEX::KW1
*/
- entry.kw[0] = BIT_ULL(13) | chan;
- entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
+ entry.kw[1] = 0xffffffffffffull;
+ entry.kw_mask[1] = 0xffffffffffffull;
*(u64 *)&action = 0x00;
-#ifdef MCAST_MCE
- /* Early silicon doesn't support pkt replication,
- * so install entry with UCAST action, so that PF
- * receives all broadcast packets.
- */
- action.op = NIX_RX_ACTIONOP_MCAST;
- action.pf_func = pcifunc;
- action.index = pfvf->bcast_mce_idx;
-#else
- action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
-#endif
+ if (!hw->cap.nix_rx_multicast) {
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ } else {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ action.index = pfvf->bcast_mce_idx;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ }
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
}
+void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
@@ -704,8 +740,7 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
/* Layer B: Stacked VLAN (STAG|QinQ) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
/* Layer C: IPv4 */
/* SIP+DIP: 8 bytes, KW2[63:0] */
@@ -806,11 +841,11 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
- /* Due to an errata (35786) in A0 pass silicon,
+ /* Due to an errata (35786) in A0/B0 pass silicon,
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
- if (is_rvu_9xxx_A0(rvu) &&
+ if (is_rvu_96xx_B0(rvu) &&
mcam_kex->keyx_cfg[NIX_INTF_RX] !=
mcam_kex->keyx_cfg[NIX_INTF_TX])
goto load_default;
@@ -1064,6 +1099,13 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop pkt. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
@@ -1101,6 +1143,7 @@ free_mem:
int rvu_npc_init(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
u64 keyz = NPC_MCAM_KEY_X2;
int blkaddr, entry, bank, err;
u64 cfg, nibble_ena;
@@ -1143,7 +1186,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
@@ -1165,7 +1208,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
* configuration has to be identical for both Rx and Tx interfaces.
*/
- if (!is_rvu_9xxx_A0(rvu))
+ if (!is_rvu_96xx_B0(rvu))
nibble_ena = (1ULL << 19) - 1;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
((keyz & 0x3) << 32) | nibble_ena);
@@ -1183,9 +1226,13 @@ int rvu_npc_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
NIX_TX_ACTIONOP_UCAST_DEFAULT);
- /* If MCAM lookup doesn't result in a match, drop the received packet */
+ /* If MCAM lookup doesn't result in a match, drop the received packet.
+ * And map this action to a counter to count dropped pkts.
+ */
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
NIX_RX_ACTIONOP_DROP);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
+ BIT_ULL(9) | mcam->rx_miss_act_cntr);
return 0;
}
@@ -1200,6 +1247,44 @@ void rvu_npc_freemem(struct rvu *rvu)
mutex_destroy(&mcam->lock);
}
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int entry;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (entry = 0; entry < mcam->bmap_entries; entry++) {
+ if (mcam->entry2pfvf_map[entry] == pcifunc) {
+ (*alloc_cnt)++;
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
+ (*enable_cnt)++;
+ }
+ }
+}
+
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int cntr;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ (*alloc_cnt)++;
+ if (mcam->cntr_refcnt[cntr])
+ (*enable_cnt)++;
+ }
+ }
+}
+
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 09a8d61f3144..7ca599b973c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -246,6 +246,7 @@
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
#define NIX_AF_TX_EXPR_CREDIT (0x830)
@@ -435,7 +436,6 @@
#define CPT_AF_LF_RST (0x44000)
#define CPT_AF_BLK_RST (0x46000)
-#define NDC_AF_BLK_RST (0x002F0)
#define NPC_AF_BLK_RST (0x00040)
/* NPC */
@@ -499,4 +499,30 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+/* NDC */
+#define NDC_AF_CONST (0x00000)
+#define NDC_AF_CLK_EN (0x00020)
+#define NDC_AF_CTL (0x00030)
+#define NDC_AF_BANK_CTL (0x00040)
+#define NDC_AF_BANK_CTL_DONE (0x00048)
+#define NDC_AF_INTR (0x00058)
+#define NDC_AF_INTR_W1S (0x00060)
+#define NDC_AF_INTR_ENA_W1S (0x00068)
+#define NDC_AF_INTR_ENA_W1C (0x00070)
+#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_BP_TEST_ENABLE (0x001F8)
+#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
+#define NDC_AF_BLK_RST (0x002F0)
+#define NDC_PRIV_AF_INT_CFG (0x002F8)
+#define NDC_AF_HASHX(a) (0x00300 | (a) << 3)
+#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \
+ (0x00C00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \
+ (0x00D00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \
+ (0x00E00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \
+ (0x00F00 | (a) << 5 | (b) << 4)
+#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
+#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index f920dac74e6c..9d8942acc232 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -13,22 +13,22 @@
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
- BLKADDR_RVUM = 0x0ULL,
- BLKADDR_LMT = 0x1ULL,
- BLKADDR_MSIX = 0x2ULL,
- BLKADDR_NPA = 0x3ULL,
- BLKADDR_NIX0 = 0x4ULL,
- BLKADDR_NIX1 = 0x5ULL,
- BLKADDR_NPC = 0x6ULL,
- BLKADDR_SSO = 0x7ULL,
- BLKADDR_SSOW = 0x8ULL,
- BLKADDR_TIM = 0x9ULL,
- BLKADDR_CPT0 = 0xaULL,
- BLKADDR_CPT1 = 0xbULL,
- BLKADDR_NDC0 = 0xcULL,
- BLKADDR_NDC1 = 0xdULL,
- BLKADDR_NDC2 = 0xeULL,
- BLK_COUNT = 0xfULL,
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC_NIX0_RX = 0xcULL,
+ BLKADDR_NDC_NIX0_TX = 0xdULL,
+ BLKADDR_NDC_NPA0 = 0xeULL,
+ BLK_COUNT = 0xfULL,
};
/* RVU Block Type Enumeration */
@@ -474,9 +474,9 @@ struct nix_cq_ctx_s {
u64 ena : 1;
u64 drop_ena : 1;
u64 drop : 8;
- u64 dp : 8;
+ u64 bp : 8;
#else
- u64 dp : 8;
+ u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 51b77c2de400..3fb7ee3d4d13 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1489,8 +1489,10 @@ static int pxa168_eth_probe(struct platform_device *pdev)
goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
- pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
of_node_put(np);
+ err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
+ if (err && err != -ENODEV)
+ goto err_netdev;
}
/* Hardware supports only 3 ports */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index ef11cf3d1ccc..0fe97155dd8f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -57,7 +57,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
default:
updated = false;
break;
- };
+ }
if (updated) {
val = mtk_r32(eth, MTK_MAC_MISC);
@@ -143,7 +143,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
default:
updated = false;
break;
- };
+ }
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
@@ -174,7 +174,7 @@ static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
break;
default:
updated = false;
- };
+ }
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 703adb96429e..527ad2aadcca 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -361,8 +361,8 @@ init_err:
mac->id, phy_modes(state->interface), err);
}
-static int mtk_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mtk_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
@@ -391,8 +391,6 @@ static int mtk_mac_link_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_RX;
if (pmsr & MAC_MSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static void mtk_mac_an_restart(struct phylink_config *config)
@@ -514,7 +512,7 @@ static void mtk_validate(struct phylink_config *config,
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = mtk_validate,
- .mac_link_state = mtk_mac_link_state,
+ .mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
.mac_link_down = mtk_mac_link_down,
@@ -2180,6 +2178,31 @@ static int mtk_start_dma(struct mtk_eth *eth)
return 0;
}
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+{
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
+
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+ val |= config;
+
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
+}
+
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -2200,6 +2223,8 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
+ mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
@@ -2252,6 +2277,8 @@ static int mtk_stop(struct net_device *dev)
if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0;
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
napi_disable(&eth->tx_napi);
@@ -2375,8 +2402,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
@@ -2385,19 +2410,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
-
- /* setup the forward port to send frame to PDMA */
- val &= ~0xffff;
-
- /* Enable RX checksum */
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
-
- /* setup the mac dma */
- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
- }
-
return 0;
err_disable_pm:
@@ -2758,9 +2770,10 @@ static const struct net_device_ops mtk_netdev_ops = {
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
+ phy_interface_t phy_mode;
struct phylink *phylink;
- int phy_mode, id, err;
struct mtk_mac *mac;
+ int id, err;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -2805,10 +2818,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
/* phylink create */
- phy_mode = of_get_phy_mode(np);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(np, &phy_mode);
+ if (err) {
dev_err(eth->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto free_netdev;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 76bd12cb8150..85830fe14a1b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -84,6 +84,8 @@
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_TO_PDMA 0x0
+#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 4db27dfc7ec1..32d83421226a 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -93,7 +93,7 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
case SPEED_1000:
val |= SGMII_SPEED_1000;
break;
- };
+ }
if (state->duplex == DUPLEX_FULL)
val |= SGMII_DUPLEX_FULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d8313e2ee600..a1202e53710c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
+ cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i);
if (!err)
@@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
+ int total_tx_count;
int port_up = 0;
int xdp_count;
int err = 0;
@@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
- if (channel->tx_count * priv->prof->num_up + xdp_count >
- priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
+ total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
+ if (total_tx_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
- channel->tx_count * priv->prof->num_up + xdp_count,
- MAX_TX_RINGS);
+ total_tx_count, MAX_TX_RINGS);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 40ec5acf79c0..7af75b63245f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -91,6 +91,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
+ int total_count;
int port_up = 0;
int err = 0;
@@ -104,6 +105,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
MLX4_EN_NUM_UP_HIGH;
new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
new_prof.num_up;
+ total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
+ if (total_count > MAX_TX_RINGS) {
+ err = -EINVAL;
+ en_err(priv,
+ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
+ total_count, MAX_TX_RINGS);
+ goto out;
+ }
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
@@ -2286,11 +2295,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
lockdep_is_held(&priv->mdev->state_lock));
if (xdp_prog && carry_xdp_prog) {
- xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
- if (IS_ERR(xdp_prog)) {
- mlx4_en_free_resources(tmp);
- return PTR_ERR(xdp_prog);
- }
+ bpf_prog_add(xdp_prog, tmp->rx_ring_num);
for (i = 0; i < tmp->rx_ring_num; i++)
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
xdp_prog);
@@ -2782,11 +2787,9 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
* program for a new one.
*/
if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
- if (prog) {
- prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
- }
+ if (prog)
+ bpf_prog_add(prog, priv->rx_ring_num - 1);
+
mutex_lock(&mdev->state_lock);
for (i = 0; i < priv->rx_ring_num; i++) {
old_prog = rcu_dereference_protected(
@@ -2807,13 +2810,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (!tmp)
return -ENOMEM;
- if (prog) {
- prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto out;
- }
- }
+ if (prog)
+ bpf_prog_add(prog, priv->rx_ring_num - 1);
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
@@ -2862,7 +2860,6 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
unlock_out:
mutex_unlock(&mdev->state_lock);
-out:
kfree(tmp);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 69bb6bb06e76..5716c3d2bb86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3934,13 +3934,17 @@ static void mlx4_restart_one_down(struct pci_dev *pdev);
static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
struct devlink *devlink);
-static int mlx4_devlink_reload_down(struct devlink *devlink,
+static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_dev_persistent *persist = dev->persist;
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
if (persist->num_vfs)
mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
mlx4_restart_one_down(persist->pdev);
@@ -4010,6 +4014,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
pci_save_state(pdev);
return 0;
@@ -4121,6 +4126,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
+ devlink_reload_disable(devlink);
+
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 5708fcc079ca..a6f390fdb971 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -70,7 +70,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o steering/dr_crc32.o \
+ steering/dr_icm_pool.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index ea934cd02448..34cba97f7bf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -866,7 +866,7 @@ static void cmd_work_handler(struct work_struct *work)
if (!ent->page_queue) {
alloc_ret = alloc_ent(cmd);
if (alloc_ret < 0) {
- mlx5_core_err(dev, "failed to allocate command entry\n");
+ mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 381925c90d94..ac108f1e5bd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -85,6 +85,22 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
return 0;
}
+static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ return mlx5_unload_one(dev, false);
+}
+
+static int mlx5_devlink_reload_up(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ return mlx5_load_one(dev, false);
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -96,6 +112,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
+ .reload_down = mlx5_devlink_reload_down,
+ .reload_up = mlx5_devlink_reload_up,
};
struct devlink *mlx5_devlink_alloc(void)
@@ -177,12 +195,29 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
};
+static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !MLX5_CAP_GEN(dev, roce)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_enable_roce_validate),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -197,6 +232,11 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
value);
+
+ value.vbool = MLX5_CAP_GEN(dev, roce);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
@@ -213,6 +253,7 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
goto params_reg_err;
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
return 0;
params_reg_err:
@@ -222,6 +263,7 @@ params_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ devlink_reload_disable(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 633b117eb13e..7b672ada63a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -175,7 +175,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @port_buffer: <output> port receive buffer configuration
* @change: <output>
*
- * Update buffer configuration based on pfc configuraiton and
+ * Update buffer configuration based on pfc configuration and
* priority to buffer mapping.
* Buffer's lossy bit is changed to:
* lossless if there is at least one PFC enabled priority
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index b860569d4247..6c72b592315b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -222,7 +222,8 @@ static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
}
static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
- void *context)
+ void *context,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_err_ctx *err_ctx = context;
@@ -301,7 +302,8 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
}
static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_params *params = &priv->channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index bfed558637c2..b468549e96ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -135,7 +135,8 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
}
static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
- void *context)
+ void *context,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_err_ctx *err_ctx = context;
@@ -205,7 +206,8 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
}
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 13af72556987..784b1e26f414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -77,8 +77,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct neighbour **out_n,
u8 *out_ttl)
{
+ struct neighbour *n;
struct rtable *rt;
- struct neighbour *n = NULL;
#if IS_ENABLED(CONFIG_INET)
struct mlx5_core_dev *mdev = priv->mdev;
@@ -138,10 +138,9 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct neighbour **out_n,
u8 *out_ttl)
{
- struct neighbour *n = NULL;
struct dst_entry *dst;
+ struct neighbour *n;
-#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int ret;
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
@@ -157,9 +156,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
dst_release(dst);
return ret;
}
-#else
- return -EOPNOTSUPP;
-#endif
n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst);
@@ -212,8 +208,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
- struct neighbour *n = NULL;
struct flowi4 fl4 = {};
+ struct neighbour *n;
int ipv4_encap_size;
char *encap_header;
u8 nud_state, ttl;
@@ -239,12 +235,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto release_neigh;
}
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -295,7 +294,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto out;
+ goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
@@ -315,9 +314,8 @@ destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
free_encap:
kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
+release_neigh:
+ neigh_release(n);
return err;
}
@@ -328,9 +326,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
- struct neighbour *n = NULL;
struct flowi6 fl6 = {};
struct ipv6hdr *ip6h;
+ struct neighbour *n;
int ipv6_encap_size;
char *encap_header;
u8 nud_state, ttl;
@@ -355,12 +353,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto release_neigh;
}
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -410,7 +411,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto out;
+ goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
@@ -431,9 +432,8 @@ destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
free_encap:
kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
+release_neigh:
+ neigh_release(n);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index c362b9225dc2..6f9a78c85ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -58,9 +58,16 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+#else
+static inline int
+mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
+#endif
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 327c93a7bd55..95601269fa2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
- u8 connector_type)
+ u8 connector_type, bool ext)
{
- if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+ if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
-static u8 get_connector_port(u32 eth_proto, u8 connector_type)
+static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
{
- if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+ if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type];
if (eth_proto &
@@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper,
- connector_type);
+ connector_type, ext);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
- connector_type);
+ connector_type, ext);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 772bfdbdeb9c..09ed7f5f688b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -63,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
+#include "lib/mlx5.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
@@ -408,12 +409,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->stats = &c->priv->channel_stats[c->ix].rq;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
- rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
- if (IS_ERR(rq->xdp_prog)) {
- err = PTR_ERR(rq->xdp_prog);
- rq->xdp_prog = NULL;
- goto err_rq_wq_destroy;
- }
+ if (params->xdp_prog)
+ bpf_prog_inc(params->xdp_prog);
+ rq->xdp_prog = params->xdp_prog;
rq_xdp_ix = rq->ix;
if (xsk)
@@ -4252,9 +4250,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) {
case IPPROTO_GRE:
+ return features;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
- return features;
+ if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
+ return features;
+ break;
case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
@@ -4406,16 +4407,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
- if (was_opened && !reset) {
+ if (was_opened && !reset)
/* num_channels is invariant here, so we can take the
* batched reference right upfront.
*/
- prog = bpf_prog_add(prog, priv->channels.num);
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto unlock;
- }
- }
+ bpf_prog_add(prog, priv->channels.num);
if (was_opened && reset) {
struct mlx5e_channels new_channels = {};
@@ -5427,6 +5423,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
return NULL;
}
+ dev_net_set(netdev, mlx5_core_net(mdev));
priv = netdev_priv(netdev);
err = mlx5e_attach(mdev, priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index cd9bb7c7b341..f175cb24bb67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -47,6 +47,7 @@
#include "en/tc_tun.h"
#include "fs_core.h"
#include "lib/port_tun.h"
+#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
@@ -1243,21 +1244,60 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
}
}
-static LIST_HEAD(mlx5e_rep_block_cb_list);
+static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *f = type_data;
+ struct flow_cls_offload cls_flower;
+ struct mlx5e_priv *priv = cb_priv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ flags = MLX5_TC_FLAG(INGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+ esw = priv->mdev->priv.eswitch;
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
+ return -EOPNOTSUPP;
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ */
+ memcpy(&cls_flower, f, sizeof(*f));
+ cls_flower.common.chain_index = FDB_FT_CHAIN;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
+ memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
+static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct flow_block_offload *f = type_data;
+ f->unlocked_driver_cb = true;
+
switch (type) {
case TC_SETUP_BLOCK:
- f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
- &mlx5e_rep_block_cb_list,
+ &mlx5e_rep_block_tc_cb_list,
mlx5e_rep_setup_tc_cb,
priv, priv, true);
+ case TC_SETUP_FT:
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_rep_block_ft_cb_list,
+ mlx5e_rep_setup_ft_cb,
+ priv, priv, true);
default:
return -EOPNOTSUPP;
}
@@ -1877,6 +1917,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
+ dev_net_set(netdev, mlx5_core_net(dev));
rpriv->netdev = netdev;
rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 82cffb3a9964..9e9960146e5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1386,6 +1386,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
+ if (rq->page_pool)
+ page_pool_nid_changed(rq->page_pool, numa_mem_id());
+
if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
if (rq->cqd.left || work_done >= budget)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fda0b37075e8..0d5d84b5fa23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -74,6 +74,7 @@ enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
@@ -276,6 +277,11 @@ static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, ESWITCH);
}
+static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, FT);
+}
+
static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, OFFLOADED);
@@ -1074,7 +1080,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
+ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1091,7 +1097,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
+ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1168,7 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->chain > max_chain) {
+ /* We check chain range only for tc flows.
+ * For ft flows, we checked attr->chain was originally 0 and set it to
+ * FDB_FT_CHAIN which is outside tc range.
+ * See mlx5e_rep_setup_ft_cb().
+ */
+ if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
@@ -2241,13 +2252,14 @@ out_err:
struct mlx5_fields {
u8 field;
- u8 size;
+ u8 field_bsize;
+ u32 field_mask;
u32 offset;
u32 match_offset;
};
-#define OFFLOAD(fw_field, size, field, off, match_field) \
- {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
+#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
+ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
offsetof(struct pedit_headers, field) + (off), \
MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
@@ -2265,18 +2277,18 @@ struct mlx5_fields {
})
static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
- void *matchmaskp, int size)
+ void *matchmaskp, u8 bsize)
{
bool same = false;
- switch (size) {
- case sizeof(u8):
+ switch (bsize) {
+ case 8:
same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
break;
- case sizeof(u16):
+ case 16:
same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
break;
- case sizeof(u32):
+ case 32:
same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
break;
}
@@ -2285,41 +2297,43 @@ static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
}
static struct mlx5_fields fields[] = {
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
- OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0),
- OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
- OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0),
- OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype),
- OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid),
-
- OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit),
- OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-
- OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
+ OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
+ OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
+ OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
+ OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
+ OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
+ OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
+
+ OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
+ OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
+ OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+
+ OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
- OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0,
+ OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
- OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0,
+ OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
- OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0,
+ OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
- OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
+ OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
- OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0,
+ OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
- OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0,
+ OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
- OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0,
+ OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
- OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
+ OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
- OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport),
- OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport),
- OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
+ OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
+ /* in linux iphdr tcp_flags is 8 bits long */
+ OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
- OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
- OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport),
+ OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
+ OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
@@ -2332,19 +2346,17 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- void *headers_c = get_match_headers_criteria(*action_flags,
- &parse_attr->spec);
- void *headers_v = get_match_headers_value(*action_flags,
- &parse_attr->spec);
int i, action_size, nactions, max_actions, first, last, next_z;
- void *s_masks_p, *a_masks_p, *vals_p;
+ void *headers_c, *headers_v, *action, *vals_p;
+ u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5_fields *f;
- u8 cmd, field_bsize;
- u32 s_mask, a_mask;
unsigned long mask;
__be32 mask_be32;
__be16 mask_be16;
- void *action;
+ u8 cmd;
+
+ headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
+ headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
@@ -2369,8 +2381,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
- memcpy(&s_mask, s_masks_p, f->size);
- memcpy(&a_mask, a_masks_p, f->size);
+ s_mask = *s_masks_p & f->field_mask;
+ a_mask = *a_masks_p & f->field_mask;
if (!s_mask && !a_mask) /* nothing to offload here */
continue;
@@ -2399,38 +2411,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
vals_p = (void *)set_vals + f->offset;
/* don't rewrite if we have a match on the same value */
if (cmp_val_mask(vals_p, s_masks_p, match_val,
- match_mask, f->size))
+ match_mask, f->field_bsize))
skip = true;
/* clear to denote we consumed this field */
- memset(s_masks_p, 0, f->size);
+ *s_masks_p &= ~f->field_mask;
} else {
- u32 zero = 0;
-
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */
- if (!memcmp(vals_p, &zero, f->size))
+ if ((*(u32 *)vals_p & f->field_mask) == 0)
skip = true;
/* clear to denote we consumed this field */
- memset(a_masks_p, 0, f->size);
+ *a_masks_p &= ~f->field_mask;
}
if (skip)
continue;
- field_bsize = f->size * BITS_PER_BYTE;
-
- if (field_bsize == 32) {
+ if (f->field_bsize == 32) {
mask_be32 = *(__be32 *)&mask;
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
- } else if (field_bsize == 16) {
+ } else if (f->field_bsize == 16) {
mask_be16 = *(__be16 *)&mask;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
- first = find_first_bit(&mask, field_bsize);
- next_z = find_next_zero_bit(&mask, field_bsize, first);
- last = find_last_bit(&mask, field_bsize);
+ first = find_first_bit(&mask, f->field_bsize);
+ next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ last = find_last_bit(&mask, f->field_bsize);
if (first < next_z && next_z < last) {
NL_SET_ERR_MSG_MOD(extack,
"rewrite of few sub-fields isn't supported");
@@ -2443,16 +2451,22 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
- MLX5_SET(set_action_in, action, offset, first);
+ int start;
+
+ /* if field is bit sized it can start not from first bit */
+ start = find_first_bit((unsigned long *)&f->field_mask,
+ f->field_bsize);
+
+ MLX5_SET(set_action_in, action, offset, first - start);
/* length is num of bits to be written, zero means length of 32 */
MLX5_SET(set_action_in, action, length, (last - first + 1));
}
- if (field_bsize == 32)
+ if (f->field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
- else if (field_bsize == 16)
+ else if (f->field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
- else if (field_bsize == 8)
+ else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
action += action_size;
@@ -3214,6 +3228,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
bool encap = false;
u32 action = 0;
@@ -3258,6 +3273,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
+ if (ft_flow && out_dev == priv->netdev) {
+ /* Ignore forward to self rules generated
+ * by adding both mlx5 devs to the flow table
+ * block on a normal nft offload setup.
+ */
+ return -EOPNOTSUPP;
+ }
+
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
@@ -3268,7 +3291,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
+ if (encap) {
+ parse_attr->mirred_ifindex[attr->out_count] =
+ out_dev->ifindex;
+ parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[attr->out_count])
+ return -ENOMEM;
+ encap = false;
+ attr->dests[attr->out_count].flags |=
+ MLX5_ESW_DEST_ENCAP;
+ attr->out_count++;
+ /* attr->dests[].rep is resolved when we
+ * handle encap
+ */
+ } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
@@ -3310,19 +3346,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dests[attr->out_count].rep = rpriv->rep;
attr->dests[attr->out_count].mdev = out_priv->mdev;
attr->out_count++;
- } else if (encap) {
- parse_attr->mirred_ifindex[attr->out_count] =
- out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
- if (!parse_attr->tun_info[attr->out_count])
- return -ENOMEM;
- encap = false;
- attr->dests[attr->out_count].flags |=
- MLX5_ESW_DEST_ENCAP;
- attr->out_count++;
- /* attr->dests[].rep is resolved when we
- * handle encap
- */
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -3382,6 +3405,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
u32 dest_chain = act->chain_index;
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
if (dest_chain <= attr->chain) {
NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
return -EOPNOTSUPP;
@@ -3443,6 +3470,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
+ if (!(attr->action &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
+ return -EOPNOTSUPP;
+ }
+
if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
@@ -3466,6 +3499,8 @@ static void get_flags(int flags, unsigned long *flow_flags)
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
+ if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
*flow_flags = __flow_flags;
}
@@ -3841,7 +3876,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
int err;
rcu_read_lock();
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
if (!flow || !same_flow_direction(flow, flags)) {
err = -EINVAL;
goto errout;
@@ -4000,9 +4035,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
- int prio = TC_H_MAJ(ma->common.prio) >> 16;
- if (prio != 1) {
+ if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 924c6ef86a14..262cdb7b69b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -44,7 +44,8 @@ enum {
MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
- MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
};
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 67dc4f0921b6..66951ff975f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -461,8 +461,14 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
+ struct mlx5e_tx_wqe_info *wi;
+ u16 ci;
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.wqe_info[ci];
mlx5e_dump_error_cqe(sq,
(struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
&sq->recover_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 30aae76b6a1d..2c965ad0d744 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -111,42 +111,32 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
}
/* E-Switch vport context HW commands */
-static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
- void *in, int inlen)
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
+ void *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
-int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
- void *in, int inlen)
-{
- return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
-}
-
-static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
- void *out, int outlen)
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
+ void *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
MLX5_SET(query_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
- void *out, int outlen)
-{
- return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
-}
-
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
@@ -179,7 +169,8 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_insert, 1);
- return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
+ return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
+ in, sizeof(in));
}
/* E-Switch FDB */
@@ -452,6 +443,13 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+{
+ esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_fdb_table(esw);
+ esw_destroy_legacy_vepa_table(esw);
+}
+
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
@@ -464,15 +462,10 @@ static int esw_legacy_enable(struct mlx5_eswitch *esw)
if (ret)
return ret;
- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
- return 0;
-}
-
-static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
-{
- esw_cleanup_vepa_rules(esw);
- esw_destroy_legacy_fdb_table(esw);
- esw_destroy_legacy_vepa_table(esw);
+ ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ if (ret)
+ esw_destroy_legacy_table(esw);
+ return ret;
}
static void esw_legacy_disable(struct mlx5_eswitch *esw)
@@ -501,7 +494,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Skip mlx5_mpfs_add_mac for eswitch_managers,
* it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (esw->manager_vport == vport)
+ if (mlx5_esw_is_manager_vport(esw, vport))
goto fdb_add;
err = mlx5_mpfs_add_mac(esw->dev, mac);
@@ -530,10 +523,10 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
u16 vport = vaddr->vport;
int err = 0;
- /* Skip mlx5_mpfs_del_mac for eswitch managerss,
+ /* Skip mlx5_mpfs_del_mac for eswitch managers,
* it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (!vaddr->mpfs || esw->manager_vport == vport)
+ if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
goto fdb_del;
err = mlx5_mpfs_del_mac(esw->dev, mac);
@@ -1040,14 +1033,15 @@ out:
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
mlx5_del_flow_rules(vport->egress.allowed_vlan);
+ vport->egress.allowed_vlan = NULL;
+ }
- if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
- mlx5_del_flow_rules(vport->egress.drop_rule);
-
- vport->egress.allowed_vlan = NULL;
- vport->egress.drop_rule = NULL;
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
+ mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
+ vport->egress.legacy.drop_rule = NULL;
+ }
}
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
@@ -1067,57 +1061,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int
+esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
- /* The ingress acl table contains 4 groups
- * (2 active rules at the same time -
- * 1 allow rule from one of the first 3 groups.
- * 1 drop rule from the last group):
- * 1)Allow untagged traffic with smac=original mac.
- * 2)Allow untagged traffic.
- * 3)Allow traffic with smac=original mac.
- * 4)Drop all other traffic.
- */
- int table_size = 4;
- int err = 0;
-
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
- return 0;
-
- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
-
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- mlx5_eswitch_vport_num_to_index(esw, vport->vport));
- if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
- return -EOPNOTSUPP;
- }
+ int err;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR(acl)) {
- err = PTR_ERR(acl);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
- vport->vport, err);
- goto out;
- }
- vport->ingress.acl = acl;
-
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1127,14 +1085,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto spoof_err;
}
- vport->ingress.allow_untagged_spoofchk_grp = g;
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1142,14 +1100,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto untagged_err;
}
- vport->ingress.allow_untagged_only_grp = g;
+ vport->ingress.legacy.allow_untagged_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1158,108 +1116,178 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto allow_spoof_err;
}
- vport->ingress.allow_spoofchk_only_grp = g;
+ vport->ingress.legacy.allow_spoofchk_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto drop_err;
}
- vport->ingress.drop_grp = g;
+ vport->ingress.legacy.drop_grp = g;
+ kvfree(flow_group_in);
+ return 0;
-out:
- if (err) {
- if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_spoofchk_only_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_untagged_only_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_untagged_spoofchk_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
- mlx5_destroy_flow_table(vport->ingress.acl);
+drop_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
}
-
+allow_spoof_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+untagged_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+spoof_err:
kvfree(flow_group_in);
return err;
}
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport, int table_size)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ int vport_index;
+ int err;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport_index);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
+ vport->vport);
+ return -EOPNOTSUPP;
+ }
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+ vport->ingress.acl = acl;
+ return 0;
+}
+
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
+{
+ if (!vport->ingress.acl)
+ return;
+
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+}
+
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
- mlx5_del_flow_rules(vport->ingress.drop_rule);
+ if (vport->ingress.legacy.drop_rule) {
+ mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
+ vport->ingress.legacy.drop_rule = NULL;
+ }
- if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+ if (vport->ingress.allow_rule) {
mlx5_del_flow_rules(vport->ingress.allow_rule);
-
- vport->ingress.drop_rule = NULL;
- vport->ingress.allow_rule = NULL;
-
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ vport->ingress.allow_rule = NULL;
+ }
}
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- if (IS_ERR_OR_NULL(vport->ingress.acl))
+ if (!vport->ingress.acl)
return;
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw_vport_cleanup_ingress_rules(esw, vport);
- mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
- mlx5_destroy_flow_group(vport->ingress.drop_grp);
- mlx5_destroy_flow_table(vport->ingress.acl);
- vport->ingress.acl = NULL;
- vport->ingress.drop_grp = NULL;
- vport->ingress.allow_spoofchk_only_grp = NULL;
- vport->ingress.allow_untagged_only_grp = NULL;
- vport->ingress.allow_untagged_spoofchk_grp = NULL;
+ if (vport->ingress.legacy.allow_spoofchk_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+ if (vport->ingress.legacy.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
+ vport->ingress.legacy.drop_grp = NULL;
+ }
+ esw_vport_destroy_ingress_acl_table(vport);
}
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
+ struct mlx5_flow_spec *spec = NULL;
int dest_num = 0;
int err = 0;
u8 *smac_v;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
- esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
return 0;
}
- err = esw_vport_enable_ingress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable ingress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
+ if (!vport->ingress.acl) {
+ err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport[%d] enable ingress acl err (%d)\n",
+ err, vport->vport);
+ return err;
+ }
+
+ err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
+ if (err)
+ goto out;
}
esw_debug(esw->dev,
@@ -1309,21 +1337,59 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
dst = &drop_ctr_dst;
dest_num++;
}
- vport->ingress.drop_rule =
+ vport->ingress.legacy.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, dst, dest_num);
- if (IS_ERR(vport->ingress.drop_rule)) {
- err = PTR_ERR(vport->ingress.drop_rule);
+ if (IS_ERR(vport->ingress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->ingress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
- vport->ingress.drop_rule = NULL;
+ vport->ingress.legacy.drop_rule = NULL;
goto out;
}
+ kvfree(spec);
+ return 0;
out:
- if (err)
- esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ kvfree(spec);
+ return err;
+}
+
+int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u16 vlan_id, u32 flow_action)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ if (vport->egress.allowed_vlan)
+ return -EEXIST;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = flow_action;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ }
+
kvfree(spec);
return err;
}
@@ -1331,7 +1397,7 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
@@ -1358,34 +1424,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out;
- }
-
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
+ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ if (err)
+ return err;
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
- esw_warn(esw->dev,
- "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
+ /* Drop others rule (star rule) */
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
goto out;
- }
- /* Drop others rule (star rule) */
- memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
@@ -1396,15 +1445,15 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
dst = &drop_ctr_dst;
dest_num++;
}
- vport->egress.drop_rule =
+ vport->egress.legacy.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
&flow_act, dst, dest_num);
- if (IS_ERR(vport->egress.drop_rule)) {
- err = PTR_ERR(vport->egress.drop_rule);
+ if (IS_ERR(vport->egress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->egress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
- vport->egress.drop_rule = NULL;
+ vport->egress.legacy.drop_rule = NULL;
}
out:
kvfree(spec);
@@ -1619,7 +1668,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
u16 vport_num = vport->vport;
int flags;
- if (esw->manager_vport == vport_num)
+ if (mlx5_esw_is_manager_vport(esw, vport_num))
return;
mlx5_modify_vport_admin_state(esw->dev,
@@ -1639,66 +1688,112 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
flags);
-
- /* Only legacy mode needs ACLs */
- if (esw->mode == MLX5_ESWITCH_LEGACY) {
- esw_vport_ingress_config(esw, vport);
- esw_vport_egress_config(esw, vport);
- }
}
-static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_core_dev *dev = vport->dev;
+ int ret;
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
- vport->ingress.drop_counter = mlx5_fc_create(dev, false);
- if (IS_ERR(vport->ingress.drop_counter)) {
- esw_warn(dev,
+ /* Only non manager vports need ACL in legacy mode */
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return 0;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(vport->ingress.legacy.drop_counter)) {
+ esw_warn(esw->dev,
"vport[%d] configure ingress drop rule counter failed\n",
vport->vport);
- vport->ingress.drop_counter = NULL;
+ vport->ingress.legacy.drop_counter = NULL;
}
}
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
- vport->egress.drop_counter = mlx5_fc_create(dev, false);
- if (IS_ERR(vport->egress.drop_counter)) {
- esw_warn(dev,
+ ret = esw_vport_ingress_config(esw, vport);
+ if (ret)
+ goto ingress_err;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(vport->egress.legacy.drop_counter)) {
+ esw_warn(esw->dev,
"vport[%d] configure egress drop rule counter failed\n",
vport->vport);
- vport->egress.drop_counter = NULL;
+ vport->egress.legacy.drop_counter = NULL;
}
}
+
+ ret = esw_vport_egress_config(esw, vport);
+ if (ret)
+ goto egress_err;
+
+ return 0;
+
+egress_err:
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+
+ingress_err:
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
+ return ret;
}
-static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_core_dev *dev = vport->dev;
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ return esw_vport_create_legacy_acl_tables(esw, vport);
+ else
+ return esw_vport_create_offloads_acl_tables(esw, vport);
+}
- if (vport->ingress.drop_counter)
- mlx5_fc_destroy(dev, vport->ingress.drop_counter);
- if (vport->egress.drop_counter)
- mlx5_fc_destroy(dev, vport->egress.drop_counter);
+static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+
+{
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return;
+
+ esw_vport_disable_egress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
}
-static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- enum mlx5_eswitch_vport_event enabled_events)
+static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_vport_destroy_legacy_acl_tables(esw, vport);
+ else
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
+}
+
+static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
+ int ret;
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
- esw_vport_create_drop_counters(vport);
-
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
+ ret = esw_vport_setup_acl(esw, vport);
+ if (ret)
+ goto done;
+
/* Attach vport to the eswitch rate limiter */
if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
vport->qos.bw_share))
@@ -1711,7 +1806,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
* in smartNIC as it's a vport group manager.
*/
- if (esw->manager_vport == vport_num ||
+ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
@@ -1719,7 +1814,9 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
+done:
mutex_unlock(&esw->state_lock);
+ return ret;
}
static void esw_disable_vport(struct mlx5_eswitch *esw,
@@ -1727,18 +1824,16 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
{
u16 vport_num = vport->vport;
+ mutex_lock(&esw->state_lock);
if (!vport->enabled)
- return;
+ goto done;
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
/* Mark this vport as disabled to discard new events */
vport->enabled = false;
- /* Wait for current already scheduled events to complete */
- flush_workqueue(esw->work_queue);
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
- mutex_lock(&esw->state_lock);
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -1746,17 +1841,18 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport);
- if (esw->manager_vport != vport_num &&
- esw->mode == MLX5_ESWITCH_LEGACY) {
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- esw_vport_destroy_drop_counters(vport);
- }
+
+ esw_vport_cleanup_acl(esw, vport);
esw->enabled_vports--;
+
+done:
mutex_unlock(&esw->state_lock);
}
@@ -1770,12 +1866,8 @@ static int eswitch_vport_event(struct notifier_block *nb,
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return NOTIFY_OK;
-
- if (vport->enabled)
+ if (!IS_ERR(vport))
queue_work(esw->work_queue, &vport->vport_change_handler);
-
return NOTIFY_OK;
}
@@ -1831,32 +1923,66 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
flush_workqueue(esw->work_queue);
}
+static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ memset(&vport->info, 0, sizeof(vport->info));
+}
+
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
-void
+int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
+ int num_vfs;
+ int ret;
int i;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_enable_vport(esw, vport, enabled_events);
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ return ret;
- /* Enable ECPF vports */
+ /* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_enable_vport(esw, vport, enabled_events);
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ goto ecpf_err;
}
/* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- esw_enable_vport(esw, vport, enabled_events);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ goto vf_err;
+ }
+ return 0;
+
+vf_err:
+ num_vfs = i - 1;
+ mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
+ esw_disable_vport(esw, vport);
+
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_disable_vport(esw, vport);
+ }
+
+ecpf_err:
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_disable_vport(esw, vport);
+ return ret;
}
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
@@ -1923,7 +2049,7 @@ abort:
return err;
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
{
int old_mode;
@@ -1952,6 +2078,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
@@ -2117,7 +2245,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
unlock:
mutex_unlock(&esw->state_lock);
- return 0;
+ return err;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
@@ -2474,12 +2602,12 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
- if (vport->egress.drop_counter)
- mlx5_fc_query(dev, vport->egress.drop_counter,
+ if (vport->egress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
- if (vport->ingress.drop_counter)
- mlx5_fc_query(dev, vport->ingress.drop_counter,
+ if (vport->ingress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
&stats->tx_dropped, &bytes);
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 6bd6f5895244..962888a7c3c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -43,6 +43,16 @@
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#define FDB_TC_MAX_CHAIN 3
+#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
+#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
+
+/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
+#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
+
+#define FDB_TC_MAX_PRIO 16
+#define FDB_TC_LEVELS_PER_PRIO 2
+
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -59,21 +69,22 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
-#define FDB_MAX_CHAIN 3
-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
-#define FDB_MAX_PRIO 16
-
struct vport_ingress {
struct mlx5_flow_table *acl;
- struct mlx5_flow_group *allow_untagged_spoofchk_grp;
- struct mlx5_flow_group *allow_spoofchk_only_grp;
- struct mlx5_flow_group *allow_untagged_only_grp;
- struct mlx5_flow_group *drop_grp;
- struct mlx5_modify_hdr *modify_metadata;
- struct mlx5_flow_handle *modify_metadata_rule;
- struct mlx5_flow_handle *allow_rule;
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
+ struct mlx5_flow_handle *allow_rule;
+ struct {
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+ struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
+ struct {
+ struct mlx5_flow_group *metadata_grp;
+ struct mlx5_modify_hdr *modify_metadata;
+ struct mlx5_flow_handle *modify_metadata_rule;
+ } offloads;
};
struct vport_egress {
@@ -81,8 +92,10 @@ struct vport_egress {
struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
+ struct {
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
};
struct mlx5_vport_drop_stats {
@@ -139,7 +152,6 @@ enum offloads_fdb_flags {
extern const unsigned int ESW_POOLS[4];
-#define PRIO_LEVELS 2
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -166,7 +178,7 @@ struct mlx5_eswitch_fdb {
struct {
struct mlx5_flow_table *fdb;
u32 num_rules;
- } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
+ } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
/* Protects fdb_prio table */
struct mutex fdb_prio_lock;
@@ -217,8 +229,8 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
- /* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
+ /* legacy data structures */
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
@@ -251,18 +263,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ int table_size);
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps);
@@ -270,7 +280,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -292,9 +302,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
-int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
void *in, int inlen);
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
void *out, int outlen);
struct mlx5_flow_spec;
@@ -421,6 +433,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
+int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u16 vlan_id, u32 flow_action);
+
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
@@ -459,6 +475,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
}
+static inline bool
+mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return esw->manager_vport == vport_num;
+}
+
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev) ?
@@ -593,17 +615,24 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
-void
+int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+int
+esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void
+esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
@@ -613,10 +642,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
-#define FDB_MAX_CHAIN 1
-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
-#define FDB_MAX_PRIO 1
-
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9004a07e457a..8ba59a21a163 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -75,7 +75,7 @@ bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
{
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_MAX_CHAIN;
+ return FDB_TC_MAX_CHAIN;
return 0;
}
@@ -83,7 +83,7 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
{
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_MAX_PRIO;
+ return FDB_TC_MAX_PRIO;
return 1;
}
@@ -599,7 +599,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
+ err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
out, sizeof(out));
if (err)
return err;
@@ -618,7 +618,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
- return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
+ return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
in, sizeof(in));
}
@@ -927,7 +927,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
int table_prio, l = 0;
u32 flags = 0;
- if (chain == FDB_SLOW_PATH_CHAIN)
+ if (chain == FDB_TC_SLOW_PATH_CHAIN)
return esw->fdb_table.offloads.slow_fdb;
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
@@ -952,7 +952,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
+ table_prio = prio - 1;
/* create earlier levels for correct fs_core lookup when
* connecting tables
@@ -989,7 +989,7 @@ esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
{
int l;
- if (chain == FDB_SLOW_PATH_CHAIN)
+ if (chain == FDB_TC_SLOW_PATH_CHAIN)
return;
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
@@ -1369,7 +1369,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw);
+ mlx5_eswitch_disable(esw, false);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
@@ -1777,9 +1777,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
flow_act.vlan[0].vid = 0;
flow_act.vlan[0].prio = 0;
- if (vport->ingress.modify_metadata_rule) {
+ if (vport->ingress.offloads.modify_metadata_rule) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- flow_act.modify_hdr = vport->ingress.modify_metadata;
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
}
vport->ingress.allow_rule =
@@ -1815,11 +1815,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
MLX5_SET(set_action_in, action, data,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
- vport->ingress.modify_metadata =
+ vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1, action);
- if (IS_ERR(vport->ingress.modify_metadata)) {
- err = PTR_ERR(vport->ingress.modify_metadata);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata);
esw_warn(esw->dev,
"failed to alloc modify header for vport %d ingress acl (%d)\n",
vport->vport, err);
@@ -1827,100 +1827,76 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.modify_hdr = vport->ingress.modify_metadata;
- vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
- &spec, &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.modify_metadata_rule)) {
- err = PTR_ERR(vport->ingress.modify_metadata_rule);
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ vport->ingress.offloads.modify_metadata_rule =
+ mlx5_add_flow_rules(vport->ingress.acl,
+ &spec, &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
vport->vport, err);
- vport->ingress.modify_metadata_rule = NULL;
+ vport->ingress.offloads.modify_metadata_rule = NULL;
goto out;
}
out:
if (err)
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
return err;
}
-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- if (vport->ingress.modify_metadata_rule) {
- mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
+ if (vport->ingress.offloads.modify_metadata_rule) {
+ mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
- vport->ingress.modify_metadata_rule = NULL;
+ vport->ingress.offloads.modify_metadata_rule = NULL;
}
}
-static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
- return 0;
-
- /* For prio tag mode, there is only 1 FTEs:
- * 1) prio tag packets - pop the prio tag VLAN, allow
- * Unmatched traffic is allowed by default
- */
-
- esw_vport_cleanup_egress_rules(esw, vport);
-
- err = esw_vport_enable_egress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable egress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure prio tag egress rules\n", vport->vport);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int ret = 0;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out_no_mem;
- }
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
- /* prio tag vlan rule - pop it so VF receives untagged packets */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
- MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
esw_warn(esw->dev,
- "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
- goto out;
+ "Failed to create vport[%d] ingress metadata group, err(%d)\n",
+ vport->vport, ret);
+ goto grp_err;
}
+ vport->ingress.offloads.metadata_grp = g;
+grp_err:
+ kvfree(flow_group_in);
+ return ret;
+}
-out:
- kvfree(spec);
-out_no_mem:
- if (err)
- esw_vport_cleanup_egress_rules(esw, vport);
- return err;
+static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
+{
+ if (vport->ingress.offloads.metadata_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
+ vport->ingress.offloads.metadata_grp = NULL;
+ }
}
-static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int err;
@@ -1929,8 +1905,7 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
return 0;
esw_vport_cleanup_ingress_rules(esw, vport);
-
- err = esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_create_ingress_acl_table(esw, vport, 1);
if (err) {
esw_warn(esw->dev,
"failed to enable ingress acl (%d) on vport[%d]\n",
@@ -1938,25 +1913,65 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
return err;
}
+ err = esw_vport_create_ingress_acl_group(esw, vport);
+ if (err)
+ goto group_err;
+
esw_debug(esw->dev,
"vport[%d] configure ingress rules\n", vport->vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
if (err)
- goto out;
+ goto metadata_err;
}
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err)
- goto out;
+ goto prio_tag_err;
}
+ return 0;
-out:
+prio_tag_err:
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+metadata_err:
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_group(vport);
+group_err:
+ esw_vport_destroy_ingress_acl_table(vport);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ return 0;
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err)
+ return err;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) prio tag packets - pop the prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+ esw_debug(esw->dev,
+ "vport[%d] configure prio tag egress rules\n", vport->vport);
+
+ /* prio tag vlan rule - pop it so VF receives untagged packets */
+ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
if (err)
- esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_disable_egress_acl(esw, vport);
+
return err;
}
@@ -1980,54 +1995,59 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
-static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
+int
+esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
- int i, j;
int err;
- if (esw_check_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- err = esw_vport_ingress_common_config(esw, vport);
- if (err)
- goto err_ingress;
+ err = esw_vport_ingress_config(esw, vport);
+ if (err)
+ return err;
- if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
- err = esw_vport_egress_prio_tag_config(esw, vport);
- if (err)
- goto err_egress;
+ if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
+ err = esw_vport_egress_config(esw, vport);
+ if (err) {
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_table(vport);
}
}
+ return err;
+}
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
+void
+esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_group(vport);
+ esw_vport_destroy_ingress_acl_table(vport);
+}
- return 0;
+static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int err;
-err_egress:
- esw_vport_disable_ingress_acl(esw, vport);
-err_ingress:
- for (j = MLX5_VPORT_PF; j < i; j++) {
- vport = &esw->vports[j];
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- }
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ err = esw_vport_create_offloads_acl_tables(esw, vport);
+ if (err)
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
return err;
}
-static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
+static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int i;
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- }
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
@@ -2045,7 +2065,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
- err = esw_create_offloads_acl_tables(esw);
+ err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
return err;
@@ -2070,7 +2090,7 @@ create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
- esw_destroy_offloads_acl_tables(esw);
+ esw_destroy_uplink_offloads_acl_tables(esw);
return err;
}
@@ -2080,7 +2100,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
- esw_destroy_offloads_acl_tables(esw);
+ esw_destroy_uplink_offloads_acl_tables(esw);
}
static void
@@ -2169,7 +2189,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ if (err)
+ goto err_vports;
err = esw_offloads_load_all_reps(esw);
if (err)
@@ -2182,6 +2204,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err_reps:
mlx5_eswitch_disable_pf_vf_vports(esw);
+err_vports:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
@@ -2195,7 +2218,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw);
+ mlx5_eswitch_disable(esw, false);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index eb8b0fe0b4e1..11621d265d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -35,11 +35,11 @@
#include <linux/mlx5/driver.h>
-enum mlx5_fpga_device_id {
- MLX5_FPGA_DEVICE_UNKNOWN = 0,
- MLX5_FPGA_DEVICE_KU040 = 1,
- MLX5_FPGA_DEVICE_KU060 = 2,
- MLX5_FPGA_DEVICE_KU060_2 = 3,
+enum mlx5_fpga_id {
+ MLX5_FPGA_NEWTON = 0,
+ MLX5_FPGA_EDISON = 1,
+ MLX5_FPGA_MORSE = 2,
+ MLX5_FPGA_MORSEQ = 3,
};
enum mlx5_fpga_image {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index d046d1ec2a86..2ce4241459ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -81,19 +81,28 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
}
}
-static const char *mlx5_fpga_device_name(u32 device)
+static const char *mlx5_fpga_name(u32 fpga_id)
{
- switch (device) {
- case MLX5_FPGA_DEVICE_KU040:
- return "ku040";
- case MLX5_FPGA_DEVICE_KU060:
- return "ku060";
- case MLX5_FPGA_DEVICE_KU060_2:
- return "ku060_2";
- case MLX5_FPGA_DEVICE_UNKNOWN:
- default:
- return "unknown";
+ static char ret[32];
+
+ switch (fpga_id) {
+ case MLX5_FPGA_NEWTON:
+ return "Newton";
+ case MLX5_FPGA_EDISON:
+ return "Edison";
+ case MLX5_FPGA_MORSE:
+ return "Morse";
+ case MLX5_FPGA_MORSEQ:
+ return "MorseQ";
}
+
+ snprintf(ret, sizeof(ret), "Unknown %d", fpga_id);
+ return ret;
+}
+
+static int mlx5_is_fpga_lookaside(u32 fpga_id)
+{
+ return fpga_id != MLX5_FPGA_NEWTON && fpga_id != MLX5_FPGA_EDISON;
}
static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
@@ -110,8 +119,12 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
fdev->last_admin_image = query.admin_image;
fdev->last_oper_image = query.oper_image;
- mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n",
- query.status, query.admin_image, query.oper_image);
+ mlx5_fpga_info(fdev, "Status %u; Admin image %u; Oper image %u\n",
+ query.status, query.admin_image, query.oper_image);
+
+ /* for FPGA lookaside projects FPGA load status is not important */
+ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
+ return 0;
if (query.status != MLX5_FPGA_STATUS_SUCCESS) {
mlx5_fpga_err(fdev, "%s image failed to load; status %u\n",
@@ -167,25 +180,30 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned int max_num_qps;
unsigned long flags;
- u32 fpga_device_id;
+ u32 fpga_id;
int err;
if (!fdev)
return 0;
- err = mlx5_fpga_device_load_check(fdev);
+ err = mlx5_fpga_caps(fdev->mdev);
if (err)
goto out;
- err = mlx5_fpga_caps(fdev->mdev);
+ err = mlx5_fpga_device_load_check(fdev);
if (err)
goto out;
- fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device);
- mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n",
- mlx5_fpga_device_name(fpga_device_id),
- fpga_device_id,
+ fpga_id = MLX5_CAP_FPGA(fdev->mdev, fpga_id);
+ mlx5_fpga_info(fdev, "FPGA card %s:%u\n", mlx5_fpga_name(fpga_id), fpga_id);
+
+ /* No QPs if FPGA does not participate in net processing */
+ if (mlx5_is_fpga_lookaside(fpga_id))
+ goto out;
+
+ mlx5_fpga_info(fdev, "%s(%d): image, version %u; SBU %06x:%04x version %d\n",
mlx5_fpga_image_name(fdev->last_oper_image),
+ fdev->last_oper_image,
MLX5_CAP_FPGA(fdev->mdev, image_version),
MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id),
MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id),
@@ -264,6 +282,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
if (!fdev)
return;
+ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
+ return;
+
spin_lock_irqsave(&fdev->state_lock, flags);
if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) {
spin_unlock_irqrestore(&fdev->state_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3bbb49354829..d60577484567 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -531,9 +531,16 @@ static void del_hw_fte(struct fs_node *node)
}
}
+static void del_sw_fte_rcu(struct rcu_head *head)
+{
+ struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
+ struct mlx5_flow_steering *steering = get_steering(&fte->node);
+
+ kmem_cache_free(steering->ftes_cache, fte);
+}
+
static void del_sw_fte(struct fs_node *node)
{
- struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
struct fs_fte *fte;
int err;
@@ -546,7 +553,8 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
- kmem_cache_free(steering->ftes_cache, fte);
+
+ call_rcu(&fte->rcu, del_sw_fte_rcu);
}
static void del_hw_flow_group(struct fs_node *node)
@@ -579,7 +587,7 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator);
- if (ft->autogroup.active)
+ if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
@@ -1126,6 +1134,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
+ /* We save place for flow groups in addition to max types */
+ ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
return ft;
}
@@ -1328,8 +1338,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
return ERR_PTR(-ENOENT);
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
- /* We save place for flow groups in addition to max types */
- group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
+ group_size = ft->autogroup.group_size;
/* ft->max_fte == ft->autogroup.max_types */
if (group_size == 0)
@@ -1356,7 +1365,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (IS_ERR(fg))
goto out;
- ft->autogroup.num_groups++;
+ if (group_size == ft->autogroup.group_size)
+ ft->autogroup.num_groups++;
out:
return fg;
@@ -1623,22 +1633,47 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
}
static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g,
- const u32 *match_value,
- bool take_write)
+lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
{
struct fs_fte *fte_tmp;
- if (take_write)
- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
- else
- nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
- rhash_fte);
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
fte_tmp = NULL;
goto out;
}
+
+ if (!fte_tmp->node.active) {
+ tree_put_node(&fte_tmp->node, false);
+ fte_tmp = NULL;
+ goto out;
+ }
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
+out:
+ up_write_ref_node(&g->node, false);
+ return fte_tmp;
+}
+
+static struct fs_fte *
+lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
+{
+ struct fs_fte *fte_tmp;
+
+ if (!tree_get_node(&g->node))
+ return NULL;
+
+ rcu_read_lock();
+ fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+ rcu_read_unlock();
+ fte_tmp = NULL;
+ goto out;
+ }
+ rcu_read_unlock();
+
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
@@ -1646,14 +1681,21 @@ lookup_fte_locked(struct mlx5_flow_group *g,
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
out:
- if (take_write)
- up_write_ref_node(&g->node, false);
- else
- up_read_ref_node(&g->node);
+ tree_put_node(&g->node, false);
return fte_tmp;
}
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
+{
+ if (write)
+ return lookup_fte_for_write_locked(g, match_value);
+ else
+ return lookup_fte_for_read_locked(g, match_value);
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1814,6 +1856,13 @@ search_again_locked:
return rule;
}
+ fte = alloc_fte(ft, spec, flow_act);
+ if (IS_ERR(fte)) {
+ up_write_ref_node(&ft->node, false);
+ err = PTR_ERR(fte);
+ goto err_alloc_fte;
+ }
+
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
up_write_ref_node(&ft->node, false);
@@ -1821,17 +1870,9 @@ search_again_locked:
if (err)
goto err_release_fg;
- fte = alloc_fte(ft, spec, flow_act);
- if (IS_ERR(fte)) {
- err = PTR_ERR(fte);
- goto err_release_fg;
- }
-
err = insert_fte(g, fte);
- if (err) {
- kmem_cache_free(steering->ftes_cache, fte);
+ if (err)
goto err_release_fg;
- }
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node, false);
@@ -1843,6 +1884,8 @@ search_again_locked:
err_release_fg:
up_write_ref_node(&g->node, false);
+ kmem_cache_free(steering->ftes_cache, fte);
+err_alloc_fte:
tree_put_node(&g->node, false);
return ERR_PTR(err);
}
@@ -2359,9 +2402,17 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
int acc_level_ns = acc_level;
prio->start_level = acc_level;
- fs_for_each_ns(ns, prio)
+ fs_for_each_ns(ns, prio) {
/* This updates start_level and num_levels of ns's priority descendants */
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
+
+ /* If this a prio with chains, and we can jump from one chain
+ * (namepsace) to another, so we accumulate the levels
+ */
+ if (prio->node.type == FS_TYPE_PRIO_CHAINS)
+ acc_level = acc_level_ns;
+ }
+
if (!prio->num_levels)
prio->num_levels = acc_level_ns - prio->start_level;
WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
@@ -2550,58 +2601,109 @@ out_err:
steering->rdma_rx_root_ns = NULL;
return err;
}
-static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+
+/* FT and tc chains are stored in the same array so we can re-use the
+ * mlx5_get_fdb_sub_ns() and tc api for FT chains.
+ * When creating a new ns for each chain store it in the first available slot.
+ * Assume tc chains are created and stored first and only then the FT chain.
+ */
+static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_namespace *ns)
+{
+ int chain = 0;
+
+ while (steering->fdb_sub_ns[chain])
+ ++chain;
+
+ steering->fdb_sub_ns[chain] = ns;
+}
+
+static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct fs_prio *maj_prio)
{
struct mlx5_flow_namespace *ns;
- struct fs_prio *maj_prio;
struct fs_prio *min_prio;
+ int prio;
+
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
+ min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
+ if (IS_ERR(min_prio))
+ return PTR_ERR(min_prio);
+ }
+
+ store_fdb_sub_ns_prio_chain(steering, ns);
+
+ return 0;
+}
+
+static int create_fdb_chains(struct mlx5_flow_steering *steering,
+ int fs_prio,
+ int chains)
+{
+ struct fs_prio *maj_prio;
int levels;
int chain;
- int prio;
int err;
- steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
- if (!steering->fdb_root_ns)
- return -ENOMEM;
+ levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
+ fs_prio,
+ levels);
+ if (IS_ERR(maj_prio))
+ return PTR_ERR(maj_prio);
+
+ for (chain = 0; chain < chains; chain++) {
+ err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
+{
+ int err;
- steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
- (FDB_MAX_CHAIN + 1), GFP_KERNEL);
+ steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
+ sizeof(*steering->fdb_sub_ns),
+ GFP_KERNEL);
if (!steering->fdb_sub_ns)
return -ENOMEM;
+ err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
+ if (err)
+ return err;
+
+ err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *maj_prio;
+ int err;
+
+ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+ if (!steering->fdb_root_ns)
+ return -ENOMEM;
+
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
-
- levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
- maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
- FDB_FAST_PATH,
- levels);
- if (IS_ERR(maj_prio)) {
- err = PTR_ERR(maj_prio);
+ err = create_fdb_fast_path(steering);
+ if (err)
goto out_err;
- }
-
- for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
- ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
- if (IS_ERR(ns)) {
- err = PTR_ERR(ns);
- goto out_err;
- }
-
- for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
- min_prio = fs_create_prio(ns, prio, 2);
- if (IS_ERR(min_prio)) {
- err = PTR_ERR(min_prio);
- goto out_err;
- }
- }
-
- steering->fdb_sub_ns[chain] = ns;
- }
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 00717eba2256..e8cd997f413e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -162,6 +162,7 @@ struct mlx5_flow_table {
struct {
bool active;
unsigned int required_groups;
+ unsigned int group_size;
unsigned int num_groups;
} autogroup;
/* Protect fwd_rules */
@@ -202,6 +203,7 @@ struct fs_fte {
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
+ struct rcu_head rcu;
int modify_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index c07f3154437c..d9f4e8c59c1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -390,7 +390,8 @@ static void print_health_info(struct mlx5_core_dev *dev)
static int
mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
struct mlx5_core_health *health = &dev->priv.health;
@@ -491,7 +492,8 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
static int
mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx)
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
int err;
@@ -545,23 +547,22 @@ static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
return mlx5_health_try_recover(dev);
}
-#define MLX5_CR_DUMP_CHUNK_SIZE 256
static int
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx)
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
u32 crdump_size = dev->priv.health.crdump_size;
u32 *cr_data;
- u32 data_size;
- u32 offset;
int err;
if (!mlx5_core_is_pf(dev))
@@ -582,20 +583,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
goto free_data;
}
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data");
- if (err)
- goto free_data;
- for (offset = 0; offset < crdump_size; offset += data_size) {
- if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE)
- data_size = crdump_size - offset;
- else
- data_size = MLX5_CR_DUMP_CHUNK_SIZE;
- err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
- data_size);
- if (err)
- goto free_data;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
free_data:
kvfree(cr_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index c5ef2ff26465..fc0d9583475d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -145,34 +145,35 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
{
*port1 = 1;
*port2 = 2;
- if (!tracker->netdev_state[0].tx_enabled ||
- !tracker->netdev_state[0].link_up) {
+ if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
+ !tracker->netdev_state[MLX5_LAG_P1].link_up) {
*port1 = 2;
return;
}
- if (!tracker->netdev_state[1].tx_enabled ||
- !tracker->netdev_state[1].link_up)
+ if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
+ !tracker->netdev_state[MLX5_LAG_P2].link_up)
*port2 = 1;
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u8 v2p_port1, v2p_port2;
int err;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
&v2p_port2);
- if (v2p_port1 != ldev->v2p_map[0] ||
- v2p_port2 != ldev->v2p_map[1]) {
- ldev->v2p_map[0] = v2p_port1;
- ldev->v2p_map[1] = v2p_port2;
+ if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
+ v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
+ ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
+ ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
- ldev->v2p_map[0], ldev->v2p_map[1]);
+ ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
@@ -185,16 +186,17 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
- &ldev->v2p_map[1]);
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
+ &ldev->v2p_map[MLX5_LAG_P2]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
- ldev->v2p_map[0], ldev->v2p_map[1]);
+ ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]);
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
+ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
if (err)
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -207,7 +209,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
u8 flags)
{
bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
err = mlx5_create_lag(ldev, tracker);
@@ -229,7 +231,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
@@ -252,14 +254,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+ return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
+ ldev->pf[MLX5_LAG_P2].dev);
#else
- return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) &&
- !mlx5_sriov_is_enabled(ldev->pf[1].dev));
+ return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
+ !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
#endif
}
@@ -285,8 +288,8 @@ static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker;
bool do_bond, roce_lag;
int err;
@@ -692,10 +695,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- ndev = ldev->tracker.netdev_state[0].tx_enabled ?
- ldev->pf[0].netdev : ldev->pf[1].netdev;
+ ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
+ ldev->pf[MLX5_LAG_P1].netdev :
+ ldev->pf[MLX5_LAG_P2].netdev;
} else {
- ndev = ldev->pf[0].netdev;
+ ndev = ldev->pf[MLX5_LAG_P1].netdev;
}
if (ndev)
dev_hold(ndev);
@@ -717,7 +721,8 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true;
ldev = mlx5_lag_dev_get(dev);
- if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev)
+ if (!ldev || !__mlx5_lag_is_roce(ldev) ||
+ ldev->pf[MLX5_LAG_P1].dev == dev)
return true;
/* If bonded, we do not add an IB device for PF1. */
@@ -746,11 +751,11 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
- mdev[0] = ldev->pf[0].dev;
- mdev[1] = ldev->pf[1].dev;
+ mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
+ mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
} else {
num_ports = 1;
- mdev[0] = dev;
+ mdev[MLX5_LAG_P1] = dev;
}
for (i = 0; i < num_ports; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 1dea0b1c9826..f1068aac6406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -8,6 +8,11 @@
#include "lag_mp.h"
enum {
+ MLX5_LAG_P1,
+ MLX5_LAG_P2,
+};
+
+enum {
MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 5d20d615663e..b70afa310ad2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -11,10 +11,11 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
+ ldev->pf[MLX5_LAG_P2].dev);
}
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
@@ -43,7 +44,8 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
* 2 - set affinity to port 2.
*
**/
-static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
+static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
+ enum mlx5_lag_port_affinity port)
{
struct lag_tracker tracker;
@@ -51,37 +53,37 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
return;
switch (port) {
- case 0:
- tracker.netdev_state[0].tx_enabled = true;
- tracker.netdev_state[1].tx_enabled = true;
- tracker.netdev_state[0].link_up = true;
- tracker.netdev_state[1].link_up = true;
+ case MLX5_LAG_NORMAL_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = true;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break;
- case 1:
- tracker.netdev_state[0].tx_enabled = true;
- tracker.netdev_state[0].link_up = true;
- tracker.netdev_state[1].tx_enabled = false;
- tracker.netdev_state[1].link_up = false;
+ case MLX5_LAG_P1_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = true;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = false;
break;
- case 2:
- tracker.netdev_state[0].tx_enabled = false;
- tracker.netdev_state[0].link_up = false;
- tracker.netdev_state[1].tx_enabled = true;
- tracker.netdev_state[1].link_up = true;
+ case MLX5_LAG_P2_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = false;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[0].dev, "Invalid affinity port %d",
- port);
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[0].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[0].dev->priv.events,
+ if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[1].dev->priv.events,
+ if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -141,11 +143,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Verify next hops are ports of the same hca */
fib_nh0 = fib_info_nh(fi, 0);
fib_nh1 = fib_info_nh(fi, 1);
- if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[1].netdev) &&
- !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) {
- mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
+ if (!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev) &&
+ !(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) {
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -157,7 +160,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH);
}
- mlx5_lag_set_port_affinity(ldev, 0);
+ mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
mp->mfi = fi;
}
@@ -182,7 +185,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
- mlx5_lag_set_port_affinity(ldev, 0);
+ mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
}
}
@@ -248,9 +251,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct net_device *fib_dev;
struct fib_info *fi;
- if (!net_eq(info->net, &init_net))
- return NOTIFY_DONE;
-
if (info->family != AF_INET)
return NOTIFY_DONE;
@@ -270,8 +270,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return notifier_from_errno(-EINVAL);
}
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev != ldev->pf[0].netdev &&
- fib_dev != ldev->pf[1].netdev) {
+ if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
+ fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
return NOTIFY_DONE;
}
fib_work = mlx5_lag_init_fib_work(ldev, event);
@@ -311,8 +311,8 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
return 0;
mp->fib_nb.notifier_call = mlx5_lag_fib_event;
- err = register_fib_notifier(&mp->fib_nb,
- mlx5_lag_fib_event_flush);
+ err = register_fib_notifier(&init_net, &mp->fib_nb,
+ mlx5_lag_fib_event_flush, NULL);
if (err)
mp->fib_nb.notifier_call = NULL;
@@ -326,6 +326,6 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
if (!mp->fib_nb.notifier_call)
return;
- unregister_fib_notifier(&mp->fib_nb);
+ unregister_fib_notifier(&init_net, &mp->fib_nb);
mp->fib_nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
index 6d14b1100be9..79be89e9c7a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
@@ -7,6 +7,12 @@
#include "lag.h"
#include "mlx5_core.h"
+enum mlx5_lag_port_affinity {
+ MLX5_LAG_NORMAL_AFFINITY,
+ MLX5_LAG_P1_AFFINITY,
+ MLX5_LAG_P2_AFFINITY,
+};
+
struct lag_mp {
struct notifier_block fib_nb;
struct fib_info *mfi; /* used in tracking fib events */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0059b290e095..43f97601b500 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index b99d469e4e64..249539247e2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -84,4 +84,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e47dd7c1b909..173e2c12e1c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -837,8 +837,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_qp_table(dev);
- mlx5_init_mkey_table(dev);
-
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
@@ -896,7 +894,6 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
@@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
@@ -1168,7 +1164,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_put_uars_page(dev, dev->priv.uar);
}
-static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
{
int err = 0;
@@ -1226,10 +1222,8 @@ function_teardown:
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
+int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
- int err = 0;
-
if (cleanup) {
mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
@@ -1257,7 +1251,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
mlx5_function_teardown(dev, cleanup);
out:
mutex_unlock(&dev->intf_state_mutex);
- return err;
+ return 0;
}
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
@@ -1566,6 +1560,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
+ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b100489dc85c..da67b28d6e23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -243,4 +243,7 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
+
+int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
+int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index c501bf2a0252..42cc3c7ac5b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,16 +36,6 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
-{
- xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ);
-}
-
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
-{
- WARN_ON(!xa_empty(&dev->priv.mkey_table));
-}
-
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_async_ctx *async_ctx, u32 *in,
@@ -54,7 +44,6 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_async_work *context)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
u32 mkey_index;
void *mkc;
int err;
@@ -84,16 +73,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
mkey_index, key, mkey->key);
-
- err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey,
- GFP_KERNEL));
- if (err) {
- mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n",
- mlx5_base_mkey(mkey->key), err);
- mlx5_core_destroy_mkey(dev, mkey);
- }
-
- return err;
+ return 0;
}
EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
@@ -111,12 +91,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
{
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
- unsigned long flags;
-
- xa_lock_irqsave(mkeys, flags);
- __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
- xa_unlock_irqrestore(mkeys, flags);
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 61fcfd8b39b4..03f037811f1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -108,10 +108,10 @@ enable_vfs_hca:
return 0;
}
-static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
+static void
+mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int num_vfs = pci_num_vf(dev->pdev);
int err;
int vf;
@@ -127,7 +127,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
}
if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch);
+ mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, num_vfs, true);
}
return err;
}
@@ -155,9 +155,10 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
static void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, num_vfs, true);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -192,7 +193,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return;
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false);
}
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
deleted file mode 100644
index 9e2eccbb1eb8..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-/* Copyright (c) 2011-2015 Stephan Brumme. All rights reserved.
- * Slicing-by-16 contributed by Bulat Ziganshin
- *
- * This software is provided 'as-is', without any express or implied warranty.
- * In no event will the author be held liable for any damages arising from the
- * of this software.
- *
- * Permission is granted to anyone to use this software for any purpose,
- * including commercial applications, and to alter it and redistribute it
- * freely, subject to the following restrictions:
- *
- * 1. The origin of this software must not be misrepresented; you must not
- * claim that you wrote the original software.
- * 2. If you use this software in a product, an acknowledgment in the product
- * documentation would be appreciated but is not required.
- * 3. Altered source versions must be plainly marked as such, and must not be
- * misrepresented as being the original software.
- *
- * Taken from http://create.stephan-brumme.com/crc32/ and adapted.
- */
-
-#include "dr_types.h"
-
-#define DR_STE_CRC_POLY 0xEDB88320L
-
-static u32 dr_ste_crc_tab32[8][256];
-
-static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j)
-{
- tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff];
-}
-
-void mlx5dr_crc32_init_table(void)
-{
- u32 crc, i, j;
-
- for (i = 0; i < 256; i++) {
- crc = i;
- for (j = 0; j < 8; j++) {
- if (crc & 0x00000001L)
- crc = (crc >> 1) ^ DR_STE_CRC_POLY;
- else
- crc = crc >> 1;
- }
- dr_ste_crc_tab32[0][i] = crc;
- }
-
- /* Init CRC lookup tables according to crc_slice_8 algorithm */
- for (i = 0; i < 256; i++) {
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 1, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 2, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 3, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 4, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 5, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 6, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 7, i);
- }
-}
-
-/* Compute CRC32 (Slicing-by-8 algorithm) */
-u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length)
-{
- const u32 *curr = (const u32 *)input_data;
- const u8 *curr_char;
- u32 crc = 0, one, two;
-
- if (!input_data)
- return 0;
-
- /* Process eight bytes at once (Slicing-by-8) */
- while (length >= 8) {
- one = *curr++ ^ crc;
- two = *curr++;
-
- crc = dr_ste_crc_tab32[0][(two >> 24) & 0xff]
- ^ dr_ste_crc_tab32[1][(two >> 16) & 0xff]
- ^ dr_ste_crc_tab32[2][(two >> 8) & 0xff]
- ^ dr_ste_crc_tab32[3][two & 0xff]
- ^ dr_ste_crc_tab32[4][(one >> 24) & 0xff]
- ^ dr_ste_crc_tab32[5][(one >> 16) & 0xff]
- ^ dr_ste_crc_tab32[6][(one >> 8) & 0xff]
- ^ dr_ste_crc_tab32[7][one & 0xff];
-
- length -= 8;
- }
-
- curr_char = (const u8 *)curr;
- /* Remaining 1 to 7 bytes (standard algorithm) */
- while (length-- != 0)
- crc = (crc >> 8) ^ dr_ste_crc_tab32[0][(crc & 0xff)
- ^ *curr_char++];
-
- return ((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
- ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 5b24732b18c0..a9da961d4d2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -326,9 +326,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_resourses;
}
- /* Init CRC table for htbl CRC calculation */
- mlx5dr_crc32_init_table();
-
return dmn;
uninit_resourses:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 67dea7698fc9..c6dbd856df94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -102,13 +102,52 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
-static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3)
+static bool
+dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->outer_vxlan_gpe_vni ||
misc3->outer_vxlan_gpe_next_protocol ||
misc3->outer_vxlan_gpe_flags);
}
+static bool
+dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols &
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+}
+
+static bool
+dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) &&
+ dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps);
+}
+
+static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc)
+{
+ return misc->geneve_vni ||
+ misc->geneve_oam ||
+ misc->geneve_protocol_type ||
+ misc->geneve_opt_len;
+}
+
+static bool
+dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols &
+ MLX5_FLEX_PARSER_GENEVE_ENABLED;
+}
+
+static bool
+dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_misc_geneve_set(&mask->misc) &&
+ dr_matcher_supp_flex_parser_geneve(&dmn->info.caps);
+}
+
static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->icmpv6_type || misc3->icmpv6_code ||
@@ -137,24 +176,15 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
return (misc->source_sqn || misc->source_port);
}
-static bool
-dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn)
-{
- return dmn->info.caps.flex_protocols &
- MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
-}
-
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6)
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv)
{
- if (ipv6) {
- nic_matcher->ste_builder = nic_matcher->ste_builder6;
- nic_matcher->num_of_builders = nic_matcher->num_of_builders6;
- } else {
- nic_matcher->ste_builder = nic_matcher->ste_builder4;
- nic_matcher->num_of_builders = nic_matcher->num_of_builders4;
- }
+ nic_matcher->ste_builder =
+ nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
+ nic_matcher->num_of_builders =
+ nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
if (!nic_matcher->num_of_builders) {
mlx5dr_dbg(matcher->tbl->dmn,
@@ -167,26 +197,19 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6)
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param mask = {};
struct mlx5dr_match_misc3 *misc3;
struct mlx5dr_ste_build *sb;
- u8 *num_of_builders;
bool inner, rx;
int idx = 0;
int ret, i;
- if (ipv6) {
- sb = nic_matcher->ste_builder6;
- num_of_builders = &nic_matcher->num_of_builders6;
- } else {
- sb = nic_matcher->ste_builder4;
- num_of_builders = &nic_matcher->num_of_builders4;
- }
-
+ sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
/* Create a temporary mask to track and clear used mask fields */
@@ -249,7 +272,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
- if (ipv6) {
+ if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
@@ -271,10 +294,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
}
- if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) &&
- dr_matcher_supp_flex_parser_vxlan_gpe(dmn))
- mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask,
- inner, rx);
+ if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn))
+ mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++],
+ &mask,
+ inner, rx);
+ else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn))
+ mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++],
+ &mask,
+ inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
@@ -325,7 +352,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
- if (ipv6) {
+ if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
@@ -373,7 +400,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
}
}
- *num_of_builders = idx;
+ nic_matcher->ste_builder = sb;
+ nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
return 0;
}
@@ -524,24 +552,33 @@ static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
}
}
-static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
- struct mlx5dr_matcher_rx_tx *nic_matcher)
+static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
- int ret, ret_v4, ret_v6;
- ret_v4 = dr_matcher_set_ste_builders(matcher, nic_matcher, false);
- ret_v6 = dr_matcher_set_ste_builders(matcher, nic_matcher, true);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
- if (ret_v4 && ret_v6) {
+ if (!nic_matcher->ste_builder) {
mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL;
}
- if (!ret_v4)
- nic_matcher->ste_builder = nic_matcher->ste_builder4;
- else
- nic_matcher->ste_builder = nic_matcher->ste_builder6;
+ return 0;
+}
+
+static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ int ret;
+
+ ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
+ if (ret)
+ return ret;
nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
DR_CHUNK_SIZE_1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 5dcb8baf491a..32e94d2ee5e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
}
}
+static u16 dr_get_bits_per_mask(u16 byte_mask)
+{
+ u16 bits = 0;
+
+ while (byte_mask) {
+ byte_mask = byte_mask & (byte_mask - 1);
+ bits++;
+ }
+
+ return bits;
+}
+
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn)
@@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
if (!ctrl->may_grow)
return false;
+ if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+ return false;
+
if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
return true;
@@ -954,12 +969,12 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
return 0;
}
-static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
+static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
{
- return (param->outer.ip_version == 6 ||
- param->inner.ip_version == 6 ||
- param->outer.ethertype == ETH_P_IPV6 ||
- param->inner.ethertype == ETH_P_IPV6);
+ if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
+ return DR_RULE_IPV6;
+
+ return DR_RULE_IPV4;
}
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
@@ -1023,7 +1038,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
- dr_rule_is_ipv6(param));
+ dr_rule_get_ipv(&param->outer),
+ dr_rule_get_ipv(&param->inner));
if (ret)
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 5df8436b2ae3..51803eef13dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -700,6 +700,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
unsigned int irqn;
void *cqc, *in;
__be64 *pas;
+ int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -728,7 +729,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 4efe1b0be4a8..a5a266983dd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/types.h>
+#include <linux/crc32.h>
#include "dr_types.h"
#define DR_STE_CRC_POLY 0xEDB88320L
@@ -107,6 +108,13 @@ struct dr_hw_ste_format {
u8 mask[DR_STE_SIZE_MASK];
};
+static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
+{
+ u32 crc = crc32(0, input_data, length);
+
+ return htonl(crc);
+}
+
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -128,7 +136,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
bit = bit >> 1;
}
- crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG);
+ crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
index = crc32 & (htbl->chunk->num_of_entries - 1);
return index;
@@ -560,18 +568,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
return !refcount_read(&ste->refcount);
}
-static u16 get_bits_per_mask(u16 byte_mask)
-{
- u16 bits = 0;
-
- while (byte_mask) {
- byte_mask = byte_mask & (byte_mask - 1);
- bits++;
- }
-
- return bits;
-}
-
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
@@ -620,20 +616,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
- u32 bits_in_mask;
u8 next_lu_type;
u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
- /* Don't allocate table more than required,
- * the size of the table defined via the byte_mask, so no need
- * to allocate more than that.
- */
- bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
- log_table_size = min(log_table_size, bits_in_mask);
-
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
next_lu_type,
@@ -671,7 +659,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
htbl->ctrl.may_grow = true;
- if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1)
+ if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
htbl->ctrl.may_grow = false;
/* Threshold is 50%, one is added to table of size 1 */
@@ -2095,68 +2083,110 @@ void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
}
-static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
+static void
+dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
- if (misc_3_mask->outer_vxlan_gpe_flags ||
- misc_3_mask->outer_vxlan_gpe_next_protocol) {
- MLX5_SET(ste_flex_parser_tnl, bit_mask,
- flex_parser_tunneling_header_63_32,
- (misc_3_mask->outer_vxlan_gpe_flags << 24) |
- (misc_3_mask->outer_vxlan_gpe_next_protocol));
- misc_3_mask->outer_vxlan_gpe_flags = 0;
- misc_3_mask->outer_vxlan_gpe_next_protocol = 0;
- }
-
- if (misc_3_mask->outer_vxlan_gpe_vni) {
- MLX5_SET(ste_flex_parser_tnl, bit_mask,
- flex_parser_tunneling_header_31_0,
- misc_3_mask->outer_vxlan_gpe_vni << 8);
- misc_3_mask->outer_vxlan_gpe_vni = 0;
- }
+ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
+ outer_vxlan_gpe_flags,
+ misc_3_mask, outer_vxlan_gpe_flags);
+ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
+ outer_vxlan_gpe_next_protocol,
+ misc_3_mask, outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
+ outer_vxlan_gpe_vni,
+ misc_3_mask, outer_vxlan_gpe_vni);
}
-static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+static int
+dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
u8 *tag = hw_ste->tag;
- if (misc3->outer_vxlan_gpe_flags ||
- misc3->outer_vxlan_gpe_next_protocol) {
- MLX5_SET(ste_flex_parser_tnl, tag,
- flex_parser_tunneling_header_63_32,
- (misc3->outer_vxlan_gpe_flags << 24) |
- (misc3->outer_vxlan_gpe_next_protocol));
- misc3->outer_vxlan_gpe_flags = 0;
- misc3->outer_vxlan_gpe_next_protocol = 0;
- }
-
- if (misc3->outer_vxlan_gpe_vni) {
- MLX5_SET(ste_flex_parser_tnl, tag,
- flex_parser_tunneling_header_31_0,
- misc3->outer_vxlan_gpe_vni << 8);
- misc3->outer_vxlan_gpe_vni = 0;
- }
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
return 0;
}
-void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
+ sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static void
+dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_protocol_type,
+ misc_mask, geneve_protocol_type);
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_oam,
+ misc_mask, geneve_oam);
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_opt_len,
+ misc_mask, geneve_opt_len);
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_vni,
+ misc_mask, geneve_vni);
+}
+
+static int
+dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
{
- dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask);
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag;
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
}
static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1cb3769d4e3c..290fe61c33d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -106,6 +106,12 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_MAX,
};
+enum mlx5dr_ipv {
+ DR_RULE_IPV4,
+ DR_RULE_IPV6,
+ DR_RULE_IPV_MAX,
+};
+
struct mlx5dr_icm_pool;
struct mlx5dr_icm_chunk;
struct mlx5dr_icm_bucket;
@@ -319,9 +325,12 @@ int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
@@ -679,11 +688,11 @@ struct mlx5dr_matcher_rx_tx {
struct mlx5dr_ste_htbl *s_htbl;
struct mlx5dr_ste_htbl *e_anchor;
struct mlx5dr_ste_build *ste_builder;
- struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES];
- struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES];
+ struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
+ [DR_RULE_IPV_MAX]
+ [DR_RULE_MAX_STES];
u8 num_of_builders;
- u8 num_of_builders4;
- u8 num_of_builders6;
+ u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
u64 default_icm_addr;
struct mlx5dr_table_rx_tx *nic_tbl;
};
@@ -812,7 +821,8 @@ mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6);
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv);
static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
@@ -962,9 +972,6 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask);
-void mlx5dr_crc32_init_table(void);
-u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
-
struct mlx5dr_qp {
struct mlx5_core_dev *mdev;
struct mlx5_wq_qp wq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 596c927220d9..1722f4668269 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -548,6 +548,30 @@ struct mlx5_ifc_ste_flex_parser_tnl_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits {
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_8[0x10];
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+
+ u8 outer_vxlan_gpe_vni[0x18];
+ u8 reserved_at_38[0x8];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
+ u8 reserved_at_0[0x2];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_oam[0x1];
+ u8 reserved_at_9[0x7];
+ u8 geneve_protocol_type[0x10];
+
+ u8 geneve_vni[0x18];
+ u8 reserved_at_38[0x8];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 30f7848a6f88..1faac31f74d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1064,26 +1064,13 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
- MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
- MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
- MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
- MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
- MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
- MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
- MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
- MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
- MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
- MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
- MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
- MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
- MLX5_SET(hca_vport_context, ctx, lid, req->lid);
- MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
- MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
- MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
- MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
- MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
- MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
- MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
+ if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
+ MLX5_SET(hca_vport_context, ctx, vport_state_policy,
+ req->policy);
+ if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
+ MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
+ if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
+ MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
ex:
kfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index dd2315ce4441..f2a0e72285ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -34,26 +34,6 @@
#include "wq.h"
#include "mlx5_core.h"
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
-{
- return (u32)wq->fbc.sz_m1 + 1;
-}
-
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
-{
- return wq->fbc.sz_m1 + 1;
-}
-
-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
-{
- return wq->fbc.log_stride;
-}
-
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
-{
- return (u32)wq->fbc.sz_m1 + 1;
-}
-
static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
return ((u32)1 << log_sz) << log_stride;
@@ -96,6 +76,24 @@ err_db_free:
return err;
}
+void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
+{
+ size_t len;
+ void *wqe;
+
+ if (!net_ratelimit())
+ return;
+
+ nstrides = max_t(u8, nstrides, 1);
+
+ len = nstrides << wq->fbc.log_stride;
+ wqe = mlx5_wq_cyc_get_wqe(wq, ix);
+
+ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
+ mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
+}
+
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 55791f71a778..d9a94bc223c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -79,7 +79,7 @@ struct mlx5_wq_ll {
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -88,16 +88,18 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->fbc.sz_m1 + 1;
+}
+
static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq)
{
return wq->cur_sz == wq->sz;
@@ -168,6 +170,16 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
return !equal && !smaller;
}
+static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.sz_m1 + 1;
+}
+
+static inline u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.log_stride;
+}
+
static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
{
return ctr & wq->fbc.sz_m1;
@@ -224,6 +236,11 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
return cqe;
}
+static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->fbc.sz_m1 + 1;
+}
+
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
return wq->cur_sz == wq->fbc.sz_m1;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 67990406cba2..29e95d0a6ad1 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -66,6 +66,8 @@ retry:
return err;
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
+ fsm_state_err = min_t(enum mlxfw_fsm_state_err,
+ fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]);
NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 4421ab22182f..e9f791c43f20 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -71,6 +71,7 @@ struct mlxsw_core {
struct list_head trans_list;
spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
+ bool enable_string_tlv;
} emad;
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
@@ -127,6 +128,16 @@ bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev)
+{
+ return rev->minor > req_rev->minor ||
+ (rev->minor == req_rev->minor &&
+ rev->subminor >= req_rev->subminor);
+}
+EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
+
struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
@@ -239,6 +250,25 @@ MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
*/
MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+/* emad_string_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x2 (string TLV).
+ */
+MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
+
+/* emad_string_tlv_len
+ * Length of the string TLV in u32.
+ */
+MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
+
+#define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
+
+/* emad_string_tlv_string
+ * String provided by the device's firmware in case of erroneous register access
+ */
+MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+
/* emad_reg_tlv_type
* Type of the TLV.
* Must be set to 0x3 (register TLV).
@@ -294,6 +324,12 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
memcpy(reg_tlv + sizeof(u32), payload, reg->len);
}
+static void mlxsw_emad_pack_string_tlv(char *string_tlv)
+{
+ mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
+ mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
+}
+
static void mlxsw_emad_pack_op_tlv(char *op_tlv,
const struct mlxsw_reg_info *reg,
enum mlxsw_core_reg_access_type type,
@@ -335,7 +371,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type,
- u64 tid)
+ u64 tid, bool enable_string_tlv)
{
char *buf;
@@ -345,26 +381,82 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
buf = skb_push(skb, reg->len + sizeof(u32));
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+ if (enable_string_tlv) {
+ buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_string_tlv(buf);
+ }
+
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
mlxsw_emad_construct_eth_hdr(skb);
}
+struct mlxsw_emad_tlv_offsets {
+ u16 op_tlv;
+ u16 string_tlv;
+ u16 reg_tlv;
+};
+
+static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
+{
+ u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
+
+ return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
+}
+
+static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
+ offsets->string_tlv = 0;
+ offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+
+ /* If string TLV is present, it must come after the operation TLV. */
+ if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
+ offsets->string_tlv = offsets->reg_tlv;
+ offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
+ }
+}
+
static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
{
- return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->op_tlv));
+}
+
+static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ if (!offsets->string_tlv)
+ return NULL;
+
+ return ((char *) (skb->data + offsets->string_tlv));
}
static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
{
- return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
- MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->reg_tlv));
}
-static char *mlxsw_emad_reg_payload(const char *op_tlv)
+static char *mlxsw_emad_reg_payload(const char *reg_tlv)
{
- return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+ return ((char *) (reg_tlv + sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
+{
+ return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
}
static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
@@ -430,10 +522,31 @@ struct mlxsw_reg_trans {
const struct mlxsw_reg_info *reg;
enum mlxsw_core_reg_access_type type;
int err;
+ char *emad_err_string;
enum mlxsw_emad_op_tlv_status emad_status;
struct rcu_head rcu;
};
+static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
+ struct mlxsw_reg_trans *trans)
+{
+ char *string_tlv;
+ char *string;
+
+ string_tlv = mlxsw_emad_string_tlv(skb);
+ if (!string_tlv)
+ return;
+
+ trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
+ GFP_ATOMIC);
+ if (!trans->emad_err_string)
+ return;
+
+ string = mlxsw_emad_string_tlv_string_data(string_tlv);
+ strlcpy(trans->emad_err_string, string,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+}
+
#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
#define MLXSW_EMAD_TIMEOUT_MS 200
@@ -525,12 +638,14 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
mlxsw_emad_transmit_retry(mlxsw_core, trans);
} else {
if (err == 0) {
- char *op_tlv = mlxsw_emad_op_tlv(skb);
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
if (trans->cb)
trans->cb(mlxsw_core,
- mlxsw_emad_reg_payload(op_tlv),
+ mlxsw_emad_reg_payload(reg_tlv),
trans->reg->len, trans->cb_priv);
+ } else {
+ mlxsw_emad_process_string_tlv(skb, trans);
}
mlxsw_emad_trans_finish(trans, err);
}
@@ -546,6 +661,8 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
skb->data, skb->len);
+ mlxsw_emad_tlv_parse(skb);
+
if (!mlxsw_emad_is_resp(skb))
goto free_skb;
@@ -621,7 +738,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
- u16 reg_len)
+ u16 reg_len, bool enable_string_tlv)
{
struct sk_buff *skb;
u16 emad_len;
@@ -629,6 +746,8 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (enable_string_tlv)
+ emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
return NULL;
@@ -650,6 +769,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb,
unsigned long cb_priv, u64 tid)
{
+ bool enable_string_tlv;
struct sk_buff *skb;
int err;
@@ -657,7 +777,12 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
tid, reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ /* Since this can be changed during emad_reg_access, read it once and
+ * use the value all the way.
+ */
+ enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
if (!skb)
return -ENOMEM;
@@ -674,7 +799,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->reg = reg;
trans->type = type;
- mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+ mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
+ enable_string_tlv);
mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
@@ -985,6 +1111,7 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
+ bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -1005,7 +1132,7 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
- devlink);
+ devlink, extack);
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -1098,7 +1225,8 @@ static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink)
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
@@ -1172,7 +1300,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
if (err)
goto err_driver_init;
}
@@ -1189,6 +1317,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (mlxsw_driver->params_register)
devlink_params_publish(devlink);
+ if (!reload)
+ devlink_reload_enable(devlink);
+
return 0;
err_thermal_init:
@@ -1223,14 +1354,16 @@ err_devlink_alloc:
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink)
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
bool called_again = false;
int err;
again:
err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
- bus_priv, reload, devlink);
+ bus_priv, reload,
+ devlink, extack);
/* -EAGAIN is returned in case the FW was updated. FW needs
* a reset, so lets try to call __mlxsw_core_bus_device_register()
* again.
@@ -1249,6 +1382,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (!reload)
+ devlink_reload_disable(devlink);
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
@@ -1376,12 +1511,16 @@ static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
struct mlxsw_event_listener_item *event_listener_item = priv;
struct mlxsw_reg_info reg;
char *payload;
- char *op_tlv = mlxsw_emad_op_tlv(skb);
- char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+ char *reg_tlv;
+ char *op_tlv;
+
+ mlxsw_emad_tlv_parse(skb);
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ reg_tlv = mlxsw_emad_reg_tlv(skb);
reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
- payload = mlxsw_emad_reg_payload(op_tlv);
+ payload = mlxsw_emad_reg_payload(reg_tlv);
event_listener_item->el.func(&reg, payload, event_listener_item->priv);
dev_kfree_skb(skb);
}
@@ -1599,8 +1738,11 @@ int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_reg_trans_write);
+#define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
+
static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
{
+ char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
struct mlxsw_core *mlxsw_core = trans->core;
int err;
@@ -1618,9 +1760,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
mlxsw_core_reg_access_type_str(trans->type),
trans->emad_status,
mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+ snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
+ "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
+ trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_emad_op_tlv_status_str(trans->emad_status),
+ trans->emad_err_string ? trans->emad_err_string : "");
+
trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
- trans->emad_status,
- mlxsw_emad_op_tlv_status_str(trans->emad_status));
+ trans->emad_status, err_string);
+
+ kfree(trans->emad_err_string);
}
list_del(&trans->bulk_list);
@@ -1694,7 +1844,7 @@ retry:
}
if (!err)
- memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+ memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
reg->len);
mlxsw_cmd_mbox_free(out_mbox);
@@ -2003,6 +2153,35 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ enum mlxsw_reg_pmtm_module_type module_type;
+ char pmtm_pl[MLXSW_REG_PMTM_LEN];
+ int err;
+
+ mlxsw_reg_pmtm_pack(pmtm_pl, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
+
+ /* Here we need to get the module width according to the module type. */
+
+ switch (module_type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP:
+ return 4;
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+ return 2;
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_module_max_width);
+
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
@@ -2162,6 +2341,12 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->emad.enable_string_tlv = true;
+}
+EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 5d7d2ab6d155..543476a2e503 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/net_namespace.h>
#include <net/devlink.h>
#include "trap.h"
@@ -23,6 +24,7 @@ struct mlxsw_core_port;
struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+struct mlxsw_fw_rev;
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
@@ -30,13 +32,18 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev);
+
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink);
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
@@ -193,6 +200,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
@@ -252,7 +260,8 @@ struct mlxsw_driver {
const char *kind;
size_t priv_size;
int (*init)(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info);
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
@@ -338,6 +347,8 @@ void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -350,6 +361,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \
mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id)
+static inline struct net *mlxsw_core_net(struct mlxsw_core *mlxsw_core)
+{
+ return devlink_net(priv_to_devlink(mlxsw_core));
+}
+
#define MLXSW_BUS_F_TXRX BIT(0)
#define MLXSW_BUS_F_RESET BIT(1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index d2c7ce67c300..08215fed193d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -50,6 +50,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
u16 i2c_addr;
+ u8 page = 0;
int status;
int err;
@@ -62,11 +63,21 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
- offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
+ page = MLXSW_REG_MCIA_PAGE_GET(offset);
+ offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
+ /* When reading upper pages 1, 2 and 3 the offset starts at
+ * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436
+ * specification for graphical depiction.
+ * MCIA register accepts buffer size <= 48. Page of size 128
+ * should be read by chunks of size 48, 48, 32. Align the size
+ * of the last chunk to avoid reading after the end of the
+ * page.
+ */
+ if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
}
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, 0, offset, size, i2c_addr);
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -168,7 +179,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
switch (module_id) {
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
@@ -176,10 +187,10 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
module_rev_id >=
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) {
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 5b00726c4346..9bf8da5f6daf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -41,7 +41,7 @@ struct mlxsw_hwmon {
struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT];
unsigned int attrs_count;
u8 sensor_count;
- u8 module_sensor_count;
+ u8 module_sensor_max;
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -56,7 +56,7 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
int err;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -79,7 +79,7 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
int err;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -109,7 +109,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
return -EINVAL;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -336,7 +336,7 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
int index = mlwsw_hwmon_attr->type_index -
- mlxsw_hwmon->module_sensor_count + 1;
+ mlxsw_hwmon->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
}
@@ -528,51 +528,45 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
- unsigned int module_count = mlxsw_core_max_ports(mlxsw_hwmon->core);
- char pmlp_pl[MLXSW_REG_PMLP_LEN] = {0};
- int i, index;
- u8 width;
- int err;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 module_sensor_max;
+ int i, err;
if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core))
return 0;
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &module_sensor_max);
+
/* Add extra attributes for module temperature. Sensor index is
* assigned to sensor_count value, while all indexed before
* sensor_count are already utilized by the sensors connected through
* mtmp register by mlxsw_hwmon_temp_init().
*/
- index = mlxsw_hwmon->sensor_count;
- for (i = 1; i < module_count; i++) {
- mlxsw_reg_pmlp_pack(pmlp_pl, i);
- err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(pmlp),
- pmlp_pl);
- if (err) {
- dev_err(mlxsw_hwmon->bus_info->dev, "Failed to read module index %d\n",
- i);
- return err;
- }
- width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- if (!width)
- continue;
+ mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon->sensor_count;
+ i < mlxsw_hwmon->module_sensor_max; i++) {
mlxsw_hwmon_attr_add(mlxsw_hwmon,
- MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, index,
- index);
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
- index, index);
+ i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
- MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
- index, index);
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
+ i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
- index, index);
+ i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
- index, index);
- index++;
+ i, i);
}
- mlxsw_hwmon->module_sensor_count = index;
return 0;
}
@@ -590,14 +584,14 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
if (!gbox_num)
return 0;
- index = mlxsw_hwmon->module_sensor_count;
- max_index = mlxsw_hwmon->module_sensor_count + gbox_num;
+ index = mlxsw_hwmon->module_sensor_max;
+ max_index = mlxsw_hwmon->module_sensor_max + gbox_num;
while (index < max_index) {
- sensor_index = index % mlxsw_hwmon->module_sensor_count +
+ sensor_index = index % mlxsw_hwmon->module_sensor_max +
MLXSW_REG_MTMP_GBOX_INDEX_MIN;
mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 35a1dc89c28a..c721b171bd8d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -112,6 +112,7 @@ struct mlxsw_thermal {
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
struct mlxsw_thermal_module *tz_gearbox_arr;
u8 tz_gearbox_num;
unsigned int tz_highest_score;
@@ -775,23 +776,10 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
static int
mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal, u8 local_port)
+ struct mlxsw_thermal *thermal, u8 module)
{
struct mlxsw_thermal_module *module_tz;
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
- u8 width, module;
- int err;
-
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
- width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- if (!width)
- return 0;
-
- module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
module_tz = &thermal->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
@@ -819,26 +807,34 @@ static int
mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal)
{
- unsigned int module_count = mlxsw_core_max_ports(core);
struct mlxsw_thermal_module *module_tz;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
if (!mlxsw_core_res_query_enabled(core))
return 0;
- thermal->tz_module_arr = kcalloc(module_count,
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &thermal->tz_module_num);
+
+ thermal->tz_module_arr = kcalloc(thermal->tz_module_num,
sizeof(*thermal->tz_module_arr),
GFP_KERNEL);
if (!thermal->tz_module_arr)
return -ENOMEM;
- for (i = 1; i < module_count; i++) {
+ for (i = 0; i < thermal->tz_module_num; i++) {
err = mlxsw_thermal_module_init(dev, core, thermal, i);
if (err)
goto err_unreg_tz_module_arr;
}
- for (i = 0; i < module_count - 1; i++) {
+ for (i = 0; i < thermal->tz_module_num; i++) {
module_tz = &thermal->tz_module_arr[i];
if (!module_tz->parent)
continue;
@@ -850,7 +846,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
return 0;
err_unreg_tz_module_arr:
- for (i = module_count - 1; i >= 0; i--)
+ for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
return err;
@@ -859,13 +855,12 @@ err_unreg_tz_module_arr:
static void
mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
{
- unsigned int module_count = mlxsw_core_max_ports(thermal->core);
int i;
if (!mlxsw_core_res_query_enabled(thermal->core))
return;
- for (i = module_count - 1; i >= 0; i--)
+ for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
}
@@ -913,7 +908,8 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
+ NULL);
if (!thermal->tz_gearbox_num)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index a33b896f4bb8..acfbbec52424 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -19,10 +19,8 @@
enum {
MLXSW_EMAD_TLV_TYPE_END,
MLXSW_EMAD_TLV_TYPE_OP,
- MLXSW_EMAD_TLV_TYPE_DR,
+ MLXSW_EMAD_TLV_TYPE_STRING,
MLXSW_EMAD_TLV_TYPE_REG,
- MLXSW_EMAD_TLV_TYPE_USERDATA,
- MLXSW_EMAD_TLV_TYPE_OOBETH,
};
/* OP TLV */
@@ -89,6 +87,9 @@ enum {
MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
};
+/* STRING TLV */
+#define MLXSW_EMAD_STRING_TLV_LEN 33 /* Length in u32 */
+
/* END TLV */
#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 95f408d0e103..34566eb62c47 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -640,7 +640,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
&mlxsw_i2c_bus, mlxsw_i2c, false,
- NULL);
+ NULL, NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 471b0ca6d69a..2b543911ae00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -16,6 +16,14 @@
static const char mlxsw_m_driver_name[] = "mlxsw_minimal";
+#define MLXSW_M_FWREV_MINOR 2000
+#define MLXSW_M_FWREV_SUBMINOR 1886
+
+static const struct mlxsw_fw_rev mlxsw_m_fw_rev = {
+ .minor = MLXSW_M_FWREV_MINOR,
+ .subminor = MLXSW_M_FWREV_SUBMINOR,
+};
+
struct mlxsw_m_port;
struct mlxsw_m {
@@ -172,6 +180,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
}
SET_NETDEV_DEV(dev, mlxsw_m->bus_info->dev);
+ dev_net_set(dev, mlxsw_core_net(mlxsw_m->core));
mlxsw_m_port = netdev_priv(dev);
mlxsw_m_port->dev = dev;
mlxsw_m_port->mlxsw_m = mlxsw_m;
@@ -325,8 +334,27 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
kfree(mlxsw_m->ports);
}
+static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_m->bus_info->fw_rev;
+
+ /* Validate driver and FW are compatible.
+ * Do not check major version, since it defines chip type, while
+ * driver is supposed to support any type.
+ */
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, &mlxsw_m_fw_rev))
+ return 0;
+
+ dev_err(mlxsw_m->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, rev->major,
+ mlxsw_m_fw_rev.minor, mlxsw_m_fw_rev.subminor);
+
+ return -EINVAL;
+}
+
static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
int err;
@@ -334,6 +362,10 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
mlxsw_m->core = mlxsw_core;
mlxsw_m->bus_info = mlxsw_bus_info;
+ err = mlxsw_m_fw_rev_validate(mlxsw_m);
+ if (err)
+ return err;
+
err = mlxsw_m_base_mac_get(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 615455a21567..914c33e46fb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -284,15 +284,18 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ int tclass;
int i;
int err;
q->producer_counter = 0;
q->consumer_counter = 0;
+ tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
+ MLXSW_PCI_SDQ_CTL_TC;
/* Set CQ of same number of this SDQ. */
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
- mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -963,6 +966,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
if (num_sdqs + num_rdqs > num_cqs ||
+ num_sdqs < MLXSW_PCI_SDQS_MIN ||
num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
@@ -1520,7 +1524,15 @@ static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
{
- u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+ u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
+ u8 sdqn;
+
+ if (tx_info->is_emad) {
+ sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
+ } else {
+ BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
+ sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
+ }
return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
}
@@ -1790,7 +1802,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
- NULL);
+ NULL, NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index e57e42e2d2b2..e0d7d2d9a0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -51,6 +51,11 @@
#define MLXSW_PCI_EQ_ASYNC_NUM 0
#define MLXSW_PCI_EQ_COMP_NUM 1
+#define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */
+#define MLXSW_PCI_SDQ_EMAD_INDEX 0
+#define MLXSW_PCI_SDQ_EMAD_TC 0
+#define MLXSW_PCI_SDQ_CTL_TC 3
+
#define MLXSW_PCI_AQ_PAGES 8
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index a33eeef0b00c..741fd2989d12 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -24,8 +24,6 @@
#define MLXSW_PORT_DONT_CARE 0xFF
-#define MLXSW_PORT_MODULE_MAX_WIDTH 4
-
enum mlxsw_port_admin_status {
MLXSW_PORT_ADMIN_STATUS_UP = 1,
MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 5494cf93f34c..5294a1622643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3969,6 +3969,7 @@ MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
* 1 - Lane 0 is used.
* 2 - Lanes 0 and 1 are used.
* 4 - Lanes 0, 1, 2 and 3 are used.
+ * 8 - Lanes 0-7 are used.
* Access: RW
*/
MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
@@ -3983,14 +3984,14 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
/* reg_pmlp_rx_lane
* Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
* equal to Tx lane.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
{
@@ -4111,6 +4112,7 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
/* reg_ptys_ext_eth_proto_cap
* Extended Ethernet port supported speeds and protocols.
@@ -5373,6 +5375,55 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM allows query or configuration of module types.
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+ /* Backplane with 4 lanes */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
+ /* QSFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP,
+ /* SFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP,
+ /* Backplane with single lane */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
+ /* Backplane with two lane */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
+ /* Chip2Chip */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C = 10,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(pmtm, payload);
+ mlxsw_reg_pmtm_module_set(payload, module);
+}
+
+static inline void
+mlxsw_reg_pmtm_unpack(char *payload,
+ enum mlxsw_reg_pmtm_module_type *module_type)
+{
+ *module_type = mlxsw_reg_pmtm_module_type_get(payload);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -5429,6 +5480,7 @@ enum mlxsw_reg_htgt_trap_group {
enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
};
/* reg_htgt_trap_group
@@ -8411,6 +8463,7 @@ MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256
+#define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128
#define MLXSW_REG_MCIA_EEPROM_SIZE 48
#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50
#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51
@@ -8446,6 +8499,14 @@ enum mlxsw_reg_mcia_eeprom_module_info {
*/
MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
+/* This is used to access the optional upper pages (1-3) in the QSFP+
+ * memory map. Page 1 is available on offset 256 through 383, page 2 -
+ * on offset 384 through 511, page 3 - on offset 512 through 639.
+ */
+#define MLXSW_REG_MCIA_PAGE_GET(off) (((off) - \
+ MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
+ MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
+
static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
u8 page_number, u16 device_addr,
u8 size, u8 i2c_device_addr)
@@ -8670,7 +8731,7 @@ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
* properties.
*/
#define MLXSW_REG_MPAR_ID 0x901B
-#define MLXSW_REG_MPAR_LEN 0x08
+#define MLXSW_REG_MPAR_LEN 0x0C
MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
@@ -9531,6 +9592,12 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+/* num_of_modules
+ * Number of modules.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
+
static inline void mlxsw_reg_mgpir_pack(char *payload)
{
MLXSW_REG_ZERO(mgpir, payload);
@@ -9539,7 +9606,7 @@ static inline void mlxsw_reg_mgpir_pack(char *payload)
static inline void
mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
enum mlxsw_reg_mgpir_device_type *device_type,
- u8 *devices_per_flash)
+ u8 *devices_per_flash, u8 *num_of_modules)
{
if (num_of_devices)
*num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
@@ -9548,6 +9615,8 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
if (devices_per_flash)
*devices_per_flash =
mlxsw_reg_mgpir_devices_per_flash_get(payload);
+ if (num_of_modules)
+ *num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
}
/* TNGCR - Tunneling NVE General Configuration Register
@@ -10526,6 +10595,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
MLXSW_REG(pplr),
+ MLXSW_REG(pmtm),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 33a9fc9ef6a4..6534184cb942 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -26,7 +26,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
- MLXSW_RES_ID_MAX_BUFFER_SIZE,
+ MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
+ MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
@@ -82,7 +83,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
[MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
- [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
+ [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
+ [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index dcf9562bce8a..556dca328bb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -22,6 +22,7 @@
#include <linux/inetdevice.h>
#include <linux/netlink.h>
#include <linux/jhash.h>
+#include <linux/log2.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -48,7 +49,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 1886
+#define MLXSW_SP1_FWREV_SUBMINOR 2308
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -63,6 +64,21 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_MINOR) \
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP2_FWREV_MAJOR 29
+#define MLXSW_SP2_FWREV_MINOR 2000
+#define MLXSW_SP2_FWREV_SUBMINOR 2308
+
+static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
+ .major = MLXSW_SP2_FWREV_MAJOR,
+ .minor = MLXSW_SP2_FWREV_MINOR,
+ .subminor = MLXSW_SP2_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP2_FW_FILENAME \
+ "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP2_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
+
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
@@ -409,9 +425,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
}
if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
- (rev->minor > req_rev->minor ||
- (rev->minor == req_rev->minor &&
- rev->subminor >= req_rev->subminor)))
+ mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
@@ -735,35 +749,69 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width, u8 *p_lane)
+static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ bool separate_rxtx;
+ u8 module;
+ u8 width;
int err;
+ int i;
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
if (err)
return err;
- *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
- *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
+ module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+
+ if (width && !is_power_of_2(width)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < width; i++) {
+ if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (separate_rxtx &&
+ mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
+ mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
+ local_port);
+ return -EINVAL;
+ }
+ }
+
+ port_mapping->module = module;
+ port_mapping->width = width;
+ port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 module, u8 width, u8 lane)
+static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i;
mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
- mlxsw_reg_pmlp_width_set(pmlp_pl, width);
- for (i = 0; i < width; i++) {
- mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
- mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
+ mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
+ for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
+ mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
@@ -2914,9 +2962,22 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_8[] = {
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
+
#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0)
#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1)
#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2)
+#define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3)
static u8 mlxsw_sp_port_mask_width_get(u8 width)
{
@@ -2927,6 +2988,8 @@ static u8 mlxsw_sp_port_mask_width_get(u8 width)
return MLXSW_SP_PORT_MASK_WIDTH_2X;
case 4:
return MLXSW_SP_PORT_MASK_WIDTH_4X;
+ case 8:
+ return MLXSW_SP_PORT_MASK_WIDTH_8X;
default:
WARN_ON_ONCE(1);
return 0;
@@ -2948,7 +3011,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100,
},
{
@@ -2957,7 +3021,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_1000,
},
{
@@ -2966,7 +3031,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_2500,
},
{
@@ -2975,7 +3041,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_5000,
},
{
@@ -2984,14 +3051,16 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_40000,
},
{
@@ -3000,7 +3069,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_25000,
},
{
@@ -3008,7 +3078,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_50000,
},
{
@@ -3022,7 +3093,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100000,
},
{
@@ -3036,9 +3108,17 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_200000,
},
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_400000,
+ },
};
#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
@@ -3435,7 +3515,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
};
static int
-mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
+mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3451,7 +3531,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
&base_speed);
if (err)
return err;
- upper_speed = base_speed * width;
+ upper_speed = base_speed * mlxsw_sp_port->mapping.width;
eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
@@ -3612,15 +3692,18 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
+ u8 split_base_local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ bool split = !!split_base_local_port;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
int err;
err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
- module + 1, split, lane / width,
+ port_mapping->module + 1, split,
+ port_mapping->lane / port_mapping->width,
mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
if (err) {
@@ -3635,15 +3718,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_etherdev;
}
SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
+ dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
- mlxsw_sp_port->mapping.module = module;
- mlxsw_sp_port->mapping.width = width;
- mlxsw_sp_port->mapping.lane = lane;
+ mlxsw_sp_port->split_base_local_port = split_base_local_port;
+ mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
@@ -3668,7 +3751,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
- err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
+ err = mlxsw_sp_port_module_map(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
mlxsw_sp_port->local_port);
@@ -3710,7 +3793,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
- err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
+ err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
mlxsw_sp_port->local_port);
@@ -3933,14 +4016,13 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
- kfree(mlxsw_sp->port_to_module);
kfree(mlxsw_sp->ports);
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- u8 module, width, lane;
+ struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
int i;
int err;
@@ -3950,66 +4032,100 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
- mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
- GFP_KERNEL);
- if (!mlxsw_sp->port_to_module) {
- err = -ENOMEM;
- goto err_port_to_module_alloc;
- }
-
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
for (i = 1; i < max_ports; i++) {
- /* Mark as invalid */
- mlxsw_sp->port_to_module[i] = -1;
-
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width, &lane);
- if (err)
- goto err_port_module_info_get;
- if (!width)
+ port_mapping = mlxsw_sp->port_mapping[i];
+ if (!port_mapping)
continue;
- mlxsw_sp->port_to_module[i] = module;
- err = mlxsw_sp_port_create(mlxsw_sp, i, false,
- module, width, lane);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
if (err)
goto err_port_create;
}
return 0;
err_port_create:
-err_port_module_info_get:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
- kfree(mlxsw_sp->port_to_module);
-err_port_to_module_alloc:
kfree(mlxsw_sp->ports);
return err;
}
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
+static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping port_mapping;
+ int i;
+ int err;
+
+ mlxsw_sp->port_mapping = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_port_mapping *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->port_mapping)
+ return -ENOMEM;
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+ if (err)
+ goto err_port_module_info_get;
+ if (!port_mapping.width)
+ continue;
+
+ mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
+ sizeof(port_mapping),
+ GFP_KERNEL);
+ if (!mlxsw_sp->port_mapping[i]) {
+ err = -ENOMEM;
+ goto err_port_module_info_dup;
+ }
+ }
+ return 0;
+
+err_port_module_info_get:
+err_port_module_info_dup:
+ for (i--; i >= 1; i--)
+ kfree(mlxsw_sp->port_mapping[i]);
+ kfree(mlxsw_sp->port_mapping);
+ return err;
+}
+
+static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
{
- u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
+ int i;
+
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+ kfree(mlxsw_sp->port_mapping[i]);
+ kfree(mlxsw_sp->port_mapping);
+}
+
+static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
+{
+ u8 offset = (local_port - 1) % max_width;
return local_port - offset;
}
-static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
- u8 module, unsigned int count, u8 offset)
+static int
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ struct mlxsw_sp_port_mapping *port_mapping,
+ unsigned int count, u8 offset)
{
- u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ struct mlxsw_sp_port_mapping split_port_mapping;
int err, i;
+ split_port_mapping = *port_mapping;
+ split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
- true, module, width, i * width);
+ base_port, &split_port_mapping);
if (err)
goto err_port_create;
+ split_port_mapping.lane += split_port_mapping.width;
}
return 0;
@@ -4022,45 +4138,55 @@ err_port_create:
}
static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
- u8 base_port, unsigned int count)
+ u8 base_port,
+ unsigned int count, u8 offset)
{
- u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ struct mlxsw_sp_port_mapping *port_mapping;
int i;
- /* Split by four means we need to re-create two ports, otherwise
- * only one.
- */
- count = count / 2;
-
- for (i = 0; i < count; i++) {
- local_port = base_port + i * 2;
- if (mlxsw_sp->port_to_module[local_port] < 0)
+ /* Go over original unsplit ports in the gap and recreate them. */
+ for (i = 0; i < count * offset; i++) {
+ port_mapping = mlxsw_sp->port_mapping[base_port + i];
+ if (!port_mapping)
continue;
- module = mlxsw_sp->port_to_module[local_port];
-
- mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
- width, 0);
+ mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
}
}
+static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
+ unsigned int count,
+ unsigned int max_width)
+{
+ enum mlxsw_res_id local_ports_in_x_res_id;
+ int split_width = max_width / count;
+
+ if (split_width == 1)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
+ else if (split_width == 2)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
+ else if (split_width == 4)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
+ else
+ return -EINVAL;
+
+ if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
+ return -EINVAL;
+ return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u8 local_ports_in_1x, local_ports_in_2x, offset;
+ struct mlxsw_sp_port_mapping port_mapping;
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ int max_width;
+ u8 base_port;
+ int offset;
int i;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
- !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
- return -EIO;
-
- local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
- local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4069,47 +4195,70 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- module = mlxsw_sp_port->mapping.module;
- cur_width = mlxsw_sp_port->mapping.width;
+ /* Split ports cannot be split. */
+ if (mlxsw_sp_port->split) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+ return -EINVAL;
+ }
+
+ max_width = mlxsw_core_module_max_width(mlxsw_core,
+ mlxsw_sp_port->mapping.module);
+ if (max_width < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+ return max_width;
+ }
- if (count != 2 && count != 4) {
- netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
- NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
+ /* Split port with non-max and 1 module width cannot be split. */
+ if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
return -EINVAL;
}
- if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
- netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
- NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+ if (count == 1 || !is_power_of_2(count) || count > max_width) {
+ netdev_err(mlxsw_sp_port->dev, "Invalid split count\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid split count");
return -EINVAL;
}
- /* Make sure we have enough slave (even) ports for the split. */
- if (count == 2) {
- offset = local_ports_in_2x;
- base_port = local_port;
- if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
- netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
- NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
- return -EINVAL;
- }
- } else {
- offset = local_ports_in_1x;
- base_port = mlxsw_sp_cluster_base_port_get(local_port);
- if (mlxsw_sp->ports[base_port + 1] ||
- mlxsw_sp->ports[base_port + 3]) {
+ offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+ if (offset < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ return -EINVAL;
+ }
+
+ /* Only in case max split is being done, the local port and
+ * base port may differ.
+ */
+ base_port = count == max_width ?
+ mlxsw_sp_cluster_base_port_get(local_port, max_width) :
+ local_port;
+
+ for (i = 0; i < count * offset; i++) {
+ /* Expect base port to exist and also the one in the middle in
+ * case of maximal split count.
+ */
+ if (i == 0 || (count == max_width && i == count / 2))
+ continue;
+
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
return -EINVAL;
}
}
+ port_mapping = mlxsw_sp_port->mapping;
+
for (i = 0; i < count; i++)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
- offset);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
+ count, offset);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -4118,7 +4267,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
err_port_split_create:
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
return err;
}
@@ -4126,19 +4275,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u8 local_ports_in_1x, local_ports_in_2x, offset;
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 cur_width, base_port;
unsigned int count;
+ int max_width;
+ u8 base_port;
+ int offset;
int i;
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
- !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
- return -EIO;
-
- local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
- local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4153,25 +4296,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- cur_width = mlxsw_sp_port->mapping.width;
- count = cur_width == 1 ? 4 : 2;
+ max_width = mlxsw_core_module_max_width(mlxsw_core,
+ mlxsw_sp_port->mapping.module);
+ if (max_width < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+ return max_width;
+ }
- if (count == 2)
- offset = local_ports_in_2x;
- else
- offset = local_ports_in_1x;
+ count = max_width / mlxsw_sp_port->mapping.width;
- base_port = mlxsw_sp_cluster_base_port_get(local_port);
+ offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+ if (WARN_ON(offset < 0)) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ return -EINVAL;
+ }
- /* Determine which ports to remove. */
- if (count == 2 && local_port >= base_port + 2)
- base_port = base_port + 2;
+ base_port = mlxsw_sp_port->split_base_local_port;
for (i = 0; i < count; i++)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
return 0;
}
@@ -4364,8 +4512,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
false),
/* L3 traps */
- MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
@@ -4392,8 +4538,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
false),
- MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
- MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
@@ -4408,7 +4552,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
- MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
@@ -4738,7 +4881,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
@@ -4750,6 +4894,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
if (err)
return err;
+ mlxsw_core_emad_string_tlv_enable(mlxsw_core);
+
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
@@ -4831,7 +4977,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
- err = mlxsw_sp_router_init(mlxsw_sp);
+ err = mlxsw_sp_router_init(mlxsw_sp, extack);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
@@ -4864,7 +5010,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
* respin.
*/
mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
- err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
goto err_netdev_notifier;
@@ -4876,6 +5023,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_dpipe_init;
}
+ err = mlxsw_sp_port_module_info_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
+ goto err_port_module_info_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -4885,9 +5038,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
+err_port_module_info_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
- unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
err_netdev_notifier:
if (mlxsw_sp->clock)
mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
@@ -4924,7 +5080,8 @@ err_fids_init:
}
static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -4944,14 +5101,17 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
- return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
+ mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -4964,7 +5124,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
- return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
@@ -4972,8 +5132,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
- unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
if (mlxsw_sp->clock) {
mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
@@ -5165,14 +5327,61 @@ static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
&kvd_size_params);
}
+static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params span_size_params;
+ u32 max_span;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
+ return -EIO;
+
+ max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
+ devlink_resource_size_params_init(&span_size_params, max_span, max_span,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
- return mlxsw_sp1_resources_kvd_register(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ return 0;
+
+err_resources_span_register:
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ return err;
}
static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
{
- return mlxsw_sp2_resources_kvd_register(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ return 0;
+
+err_resources_span_register:
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ return err;
}
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -6565,3 +6774,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index b2a0028b1694..347bec9d1ecf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -14,6 +14,7 @@
#include <linux/dcbnl.h>
#include <linux/in6.h>
#include <linux/notifier.h>
+#include <linux/net_namespace.h>
#include <net/psample.h>
#include <net/pkt_cls.h>
#include <net/red.h>
@@ -31,8 +32,6 @@
#define MLXSW_SP_MID_MAX 7000
-#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
-
#define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */
#define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */
@@ -47,6 +46,8 @@
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
+#define MLXSW_SP_RESOURCE_NAME_SPAN "span_agents"
+
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD = 1,
MLXSW_SP_RESOURCE_KVD_LINEAR,
@@ -55,6 +56,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_SPAN,
};
struct mlxsw_sp_port;
@@ -139,6 +141,12 @@ struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
+struct mlxsw_sp_port_mapping {
+ u8 module;
+ u8 width;
+ u8 lane;
+};
+
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
@@ -146,7 +154,7 @@ struct mlxsw_sp {
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
- int *port_to_module;
+ struct mlxsw_sp_port_mapping **port_mapping;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
@@ -255,11 +263,11 @@ struct mlxsw_sp_port {
struct ieee_pfc *pfc;
enum mlxsw_reg_qpts_trust_state trust_state;
} dcb;
- struct {
- u8 module;
- u8 width;
- u8 lane;
- } mapping;
+ struct mlxsw_sp_port_mapping mapping; /* mapping is constant during the
+ * mlxsw_sp_port lifetime, however
+ * the same localport can have
+ * different mapping.
+ */
/* TC handles */
struct list_head mall_tc_list;
struct {
@@ -283,6 +291,7 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
+ u8 split_base_local_port;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -524,7 +533,8 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr);
@@ -982,4 +992,9 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
+static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_net(mlxsw_sp->core);
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index b9eeae37a4dc..968f0902e4fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -35,6 +35,7 @@ struct mlxsw_sp_sb_cm {
};
#define MLXSW_SP_SB_INFI -1U
+#define MLXSW_SP_SB_REST -2U
struct mlxsw_sp_sb_pm {
u32 min_buff;
@@ -421,19 +422,16 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
.freeze_size = _freeze_size, \
}
-#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
-#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp1_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_INGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
- MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
@@ -445,19 +443,16 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
};
-#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
-#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp2_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_INGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
- MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
@@ -471,11 +466,33 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_sb_pr *prs,
+ const struct mlxsw_sp_sb_pool_des *pool_dess,
size_t prs_len)
{
+ /* Round down, unlike mlxsw_sp_bytes_cells(). */
+ u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
+ u32 rest_cells[2] = {sb_cells, sb_cells};
int i;
int err;
+ /* Calculate how much space to give to the "REST" pools in either
+ * direction.
+ */
+ for (i = 0; i < prs_len; i++) {
+ enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
+ u32 size = prs[i].size;
+ u32 size_cells;
+
+ if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
+ continue;
+
+ size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
+ continue;
+
+ rest_cells[dir] -= size_cells;
+ }
+
for (i = 0; i < prs_len; i++) {
u32 size = prs[i].size;
u32 size_cells;
@@ -483,6 +500,10 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
if (size == MLXSW_SP_SB_INFI) {
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
0, true);
+ } else if (size == MLXSW_SP_SB_REST) {
+ size_cells = rest_cells[pool_dess[i].dir];
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ size_cells, false);
} else {
size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
@@ -904,7 +925,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
return -EIO;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
@@ -915,7 +936,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_BUFFER_SIZE);
+ GUARANTEED_SHARED_BUFFER);
max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_HEADROOM_SIZE);
/* Round down, because this limit must not be overstepped. */
@@ -926,6 +947,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_sb_ports_init;
err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
+ mlxsw_sp->sb_vals->pool_dess,
mlxsw_sp->sb_vals->pool_count);
if (err)
goto err_sb_prs_init;
@@ -1013,7 +1035,8 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
pr = &mlxsw_sp->sb_vals->prs[pool_index];
- if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
return -EINVAL;
}
@@ -1021,12 +1044,12 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
if (pr->freeze_mode && pr->mode != mode) {
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
return -EINVAL;
- };
+ }
if (pr->freeze_size && pr->size != size) {
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
return -EINVAL;
- };
+ }
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
pool_size, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 17f334b46c40..2153bcc4b585 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -870,7 +870,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_vni(fid, &vni)))
goto out;
- nve_dev = dev_get_by_index(&init_net, nve_ifindex);
+ nve_dev = dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
if (!nve_dev)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index bdf53cf350f6..68cc6737d45c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -305,7 +305,8 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
p->max);
return -EINVAL;
}
- if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index a330b369e899..30bfe3880faf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -16,6 +16,7 @@
#include <linux/if_macvlan.h>
#include <linux/refcount.h>
#include <linux/jhash.h>
+#include <linux/net_namespace.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -76,6 +77,8 @@ struct mlxsw_sp_router {
struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
+ u32 adj_discard_index;
+ bool adj_discard_index_valid;
};
struct mlxsw_sp_rif {
@@ -366,6 +369,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
+ MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
/* This is a special case of local delivery, where a packet should be
* decapsulated on reception. Note that there is no corresponding ENCAP,
@@ -994,7 +998,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
if (d)
return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
- return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
+ return RT_TABLE_MAIN;
}
static struct mlxsw_sp_rif *
@@ -1598,27 +1602,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
- enum mlxsw_sp_l3proto ul_proto;
- union mlxsw_sp_l3addr saddr;
- u32 ul_tb_id;
if (!ipip_entry)
return 0;
- /* For flat configuration cases, moving overlay to a different VRF might
- * cause local address conflict, and the conflicting tunnels need to be
- * demoted.
- */
- ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
- ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
- saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
- if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
- saddr, ul_tb_id,
- ipip_entry)) {
- mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
- return 0;
- }
-
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack);
}
@@ -1627,8 +1614,25 @@ static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct net_device *ul_dev,
+ bool *demote_this,
struct netlink_ext_ack *extack)
{
+ u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr saddr;
+
+ /* Moving underlay to a different VRF might cause local address
+ * conflict, and the conflicting tunnels need to be demoted.
+ */
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ ipip_entry)) {
+ *demote_this = true;
+ return 0;
+ }
+
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, true, false, extack);
}
@@ -1779,6 +1783,7 @@ static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct net_device *ul_dev,
+ bool *demote_this,
unsigned long event,
struct netdev_notifier_info *info)
{
@@ -1793,6 +1798,7 @@ __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
ipip_entry,
ul_dev,
+ demote_this,
extack);
break;
@@ -1819,13 +1825,31 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
ul_dev,
ipip_entry))) {
+ struct mlxsw_sp_ipip_entry *prev;
+ bool demote_this = false;
+
err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
- ul_dev, event, info);
+ ul_dev, &demote_this,
+ event, info);
if (err) {
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
ul_dev);
return err;
}
+
+ if (demote_this) {
+ if (list_is_first(&ipip_entry->ipip_list_node,
+ &mlxsw_sp->router->ipip_list))
+ prev = NULL;
+ else
+ /* This can't be cached from previous iteration,
+ * because that entry could be gone now.
+ */
+ prev = list_prev_entry(ipip_entry,
+ ipip_list_node);
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ ipip_entry = prev;
+ }
}
return 0;
@@ -2551,14 +2575,14 @@ static int mlxsw_sp_router_schedule_work(struct net *net,
struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_router *router;
- if (!net_eq(net, &init_net))
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
return NOTIFY_DONE;
net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
if (!net_work)
return NOTIFY_BAD;
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
INIT_WORK(&net_work->work, cb);
net_work->mlxsw_sp = router->mlxsw_sp;
mlxsw_core_schedule_work(&net_work->work);
@@ -4195,15 +4219,50 @@ mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
}
}
+static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
+{
+ enum mlxsw_reg_ratr_trap_action trap_action;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ int err;
+
+ if (mlxsw_sp->router->adj_discard_index_valid)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &mlxsw_sp->router->adj_discard_index);
+ if (err)
+ return err;
+
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+ MLXSW_REG_RATR_TYPE_ETHERNET,
+ mlxsw_sp->router->adj_discard_index, rif_index);
+ mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+ if (err)
+ goto err_ratr_write;
+
+ mlxsw_sp->router->adj_discard_index_valid = true;
+
+ return 0;
+
+err_ratr_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_discard_index);
+ return err;
+}
+
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
+ struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
u16 ecmp_size = 0;
+ int err;
/* In case the nexthop group adjacency index is valid, use it
* with provided ECMP size. Otherwise, setup trap and pass
@@ -4213,6 +4272,15 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
adjacency_index = fib_entry->nh_group->adj_index;
ecmp_size = fib_entry->nh_group->ecmp_size;
+ } else if (!nh_group->adj_index_valid && nh_group->count &&
+ nh_group->nh_rif) {
+ err = mlxsw_sp_adj_discard_write(mlxsw_sp,
+ nh_group->nh_rif->rif_index);
+ if (err)
+ return err;
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = mlxsw_sp->router->adj_discard_index;
+ ecmp_size = 1;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
@@ -4273,6 +4341,23 @@ static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u16 trap_id;
+
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -4313,6 +4398,9 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
fib_entry, op);
@@ -4390,7 +4478,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
* can do so with a lower priority than packets directed
* at the host, so use action type local instead of trap.
*/
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
return 0;
case RTN_UNICAST:
if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
@@ -5350,7 +5438,7 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
@@ -5908,6 +5996,16 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
}
+
+ /* After flushing all the routes, it is not possible anyone is still
+ * using the adjacency index that is discarding packets, so free it in
+ * case it was allocated.
+ */
+ if (!mlxsw_sp->router->adj_discard_index_valid)
+ return;
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_discard_index);
+ mlxsw_sp->router->adj_discard_index_valid = false;
}
static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
@@ -6019,12 +6117,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
@@ -6065,12 +6157,6 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
fib_work->fib6_work.nrt6);
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
}
rtnl_unlock();
kfree(fib_work);
@@ -6112,12 +6198,6 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
&fib_work->ven_info);
dev_put(fib_work->ven_info.dev);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
}
rtnl_unlock();
kfree(fib_work);
@@ -6213,7 +6293,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
rule = fr_info->rule;
/* Rule only affects locally generated traffic */
- if (rule->iifindex == info->net->loopback_dev->ifindex)
+ if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
return 0;
switch (info->family) {
@@ -6250,8 +6330,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct mlxsw_sp_router *router;
int err;
- if (!net_eq(info->net, &init_net) ||
- (info->family != AF_INET && info->family != AF_INET6 &&
+ if ((info->family != AF_INET && info->family != AF_INET6 &&
info->family != RTNL_FAMILY_IPMR &&
info->family != RTNL_FAMILY_IP6MR))
return NOTIFY_DONE;
@@ -6263,9 +6342,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
case FIB_EVENT_RULE_DEL:
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
- if (!err || info->extack)
- return notifier_from_errno(err);
- break;
+ return notifier_from_errno(err);
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
@@ -7974,9 +8051,10 @@ static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
}
-static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
+static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
{
- bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
@@ -7991,9 +8069,9 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
}
-static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
+static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
{
- bool only_l3 = !ip6_multipath_hash_policy(&init_net);
+ bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
@@ -8021,8 +8099,8 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_recr2_pack(recr2_pl, seed);
- mlxsw_sp_mp4_hash_init(recr2_pl);
- mlxsw_sp_mp6_hash_init(recr2_pl);
+ mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
+ mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
}
@@ -8053,7 +8131,8 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
- bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
@@ -8079,7 +8158,8 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_router *router;
int err;
@@ -8155,8 +8235,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
goto err_dscp_init;
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
- err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
- mlxsw_sp_router_fib_dump_flush);
+ err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb,
+ mlxsw_sp_router_fib_dump_flush, extack);
if (err)
goto err_register_fib_notifier;
@@ -8195,7 +8276,8 @@ err_register_inetaddr_notifier:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
- unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 560a60e522f9..200d324e6d99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -14,8 +14,23 @@
#include "spectrum_span.h"
#include "spectrum_switchdev.h"
+static u64 mlxsw_sp_span_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (mlxsw_sp->span.entries[i].ref_count)
+ occ++;
+ }
+
+ return occ;
+}
+
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
@@ -36,13 +51,19 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
curr->id = i;
}
+ devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
+
return 0;
}
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5ecb45118400..a3af171c6358 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2591,7 +2591,7 @@ __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- dev = __dev_get_by_index(&init_net, nve_ifindex);
+ dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
if (!dev)
return -EINVAL;
*nve_dev = dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 7c03b661ae7e..e0d7c49ffae0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -13,16 +13,27 @@
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
void *priv);
+static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx);
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
false, SP_##_group_id, DISCARD)
+#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
+ MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \
+ _action, false, SP_##_group_id, DISCARD)
+
static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
@@ -30,6 +41,23 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(RPF, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
};
static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
@@ -40,6 +68,28 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS),
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, ROUTER_EXP, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, ROUTER_EXP, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RPF, RPF, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, REMOTE_ROUTE, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, HOST_MISS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, HOST_MISS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, REMOTE_ROUTE,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
@@ -54,6 +104,25 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -104,6 +173,30 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
+static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
+ skb_pull(skb, ETH_HLEN);
+ skb->offload_fwd_mark = 1;
+ netif_receive_skb(skb);
+}
+
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -211,6 +304,7 @@ mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
u32 rate;
switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:/* fall through */
case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
policer_id = MLXSW_SP_DISCARD_POLICER_ID;
ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
@@ -242,6 +336,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
priority = 0;
tc = 1;
break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index 0d9356b3f65d..4ff1e623aa76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -446,7 +446,8 @@ static int mlxsw_sib_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
}
static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 1c14c051ee52..de6cb22f68b1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -992,6 +992,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
+ dev_net_set(dev, mlxsw_core_net(mlxsw_sx->core));
mlxsw_sx_port = netdev_priv(dev);
mlxsw_sx_port->dev = dev;
mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
@@ -1563,7 +1564,8 @@ static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
}
static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 7618f084cae9..0c1c142bb6b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -49,6 +49,7 @@ enum {
MLXSW_TRAP_ID_IPV6_DHCP = 0x69,
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
@@ -66,6 +67,8 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
+ MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148,
MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149,
@@ -73,6 +76,18 @@ enum {
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150,
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151,
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_NON_IP_PACKET = 0x160,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
/* Multicast trap used for routes with trap action */
MLXSW_TRAP_ID_ACL1 = 0x1C1,
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 57b26c2acf87..afe52463dc57 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -11,7 +11,9 @@
#include "lan743x_ptp.h"
-#define LAN743X_NUMBER_OF_GPIO (12)
+#define LAN743X_LED0_ENABLE 20 /* LED0 offset in HW_CFG */
+#define LAN743X_LED_ENABLE(pin) BIT(LAN743X_LED0_ENABLE + (pin))
+
#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999)
#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934)
@@ -139,19 +141,20 @@ done:
spin_unlock_bh(&ptp->tx_ts_lock);
}
-static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter)
+static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
{
struct lan743x_ptp *ptp = &adapter->ptp;
int result = -ENODEV;
- int index = 0;
mutex_lock(&ptp->command_lock);
- for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) {
- if (!(test_bit(index, &ptp->used_event_ch))) {
- ptp->used_event_ch |= BIT(index);
- result = index;
- break;
- }
+ if (!(test_bit(event_channel, &ptp->used_event_ch))) {
+ ptp->used_event_ch |= BIT(event_channel);
+ result = event_channel;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted to reserved a used event_channel = %d\n",
+ event_channel);
}
mutex_unlock(&ptp->command_lock);
return result;
@@ -179,12 +182,62 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns);
+static void lan743x_led_mux_enable(struct lan743x_adapter *adapter,
+ int pin, bool enable)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ if (ptp->leds_multiplexed &&
+ ptp->led_enabled[pin]) {
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ if (enable)
+ val |= LAN743X_LED_ENABLE(pin);
+ else
+ val &= ~LAN743X_LED_ENABLE(pin);
+
+ lan743x_csr_write(adapter, HW_CFG, val);
+ }
+}
+
+static void lan743x_led_mux_save(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ for (i = 0; i < LAN7430_N_LED; i++) {
+ bool led_enabled = (val & LAN743X_LED_ENABLE(i)) != 0;
+
+ ptp->led_enabled[i] = led_enabled;
+ }
+ ptp->leds_multiplexed = true;
+ } else {
+ ptp->leds_multiplexed = false;
+ }
+}
+
+static void lan743x_led_mux_restore(struct lan743x_adapter *adapter)
+{
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+
+ for (i = 0; i < LAN7430_N_LED; i++)
+ lan743x_led_mux_enable(adapter, i, true);
+ }
+}
+
static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
- int bit, int ptp_channel)
+ int pin, int event_channel)
{
struct lan743x_gpio *gpio = &adapter->gpio;
unsigned long irq_flags = 0;
- int bit_mask = BIT(bit);
+ int bit_mask = BIT(pin);
int ret = -EBUSY;
spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
@@ -194,41 +247,44 @@ static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
gpio->output_bits |= bit_mask;
gpio->ptp_bits |= bit_mask;
+ /* assign pin to GPIO function */
+ lan743x_led_mux_enable(adapter, pin, false);
+
/* set as output, and zero initial value */
- gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit);
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
/* enable gpio, and set buffer type to push pull */
- gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit);
- gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
/* set 1588 polarity to high */
- gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit);
+ gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
- if (!ptp_channel) {
+ if (event_channel == 0) {
/* use channel A */
- gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(pin);
} else {
/* use channel B */
- gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(pin);
}
- gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit);
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
- ret = bit;
+ ret = pin;
}
spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
return ret;
}
-static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
+static void lan743x_gpio_release(struct lan743x_adapter *adapter, int pin)
{
struct lan743x_gpio *gpio = &adapter->gpio;
unsigned long irq_flags = 0;
- int bit_mask = BIT(bit);
+ int bit_mask = BIT(pin);
spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
if (gpio->used_bits & bit_mask) {
@@ -239,21 +295,24 @@ static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
if (gpio->ptp_bits & bit_mask) {
gpio->ptp_bits &= ~bit_mask;
/* disable ptp output */
- gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit);
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG3,
gpio->gpio_cfg3);
}
/* release gpio output */
/* disable gpio */
- gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit);
- gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
/* reset back to input */
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit);
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* assign pin to original function */
+ lan743x_led_mux_enable(adapter, pin, true);
}
}
spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
@@ -391,89 +450,95 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
return 0;
}
-static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter)
+static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter,
+ unsigned int index)
{
struct lan743x_ptp *ptp = &adapter->ptp;
u32 general_config = 0;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
- if (ptp->perout_gpio_bit >= 0) {
- lan743x_gpio_release(adapter, ptp->perout_gpio_bit);
- ptp->perout_gpio_bit = -1;
+ if (perout->gpio_pin >= 0) {
+ lan743x_gpio_release(adapter, perout->gpio_pin);
+ perout->gpio_pin = -1;
}
- if (ptp->perout_event_ch >= 0) {
+ if (perout->event_ch >= 0) {
/* set target to far in the future, effectively disabling it */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
0xFFFF0000);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
0);
general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_
- (ptp->perout_event_ch);
+ (perout->event_ch);
lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
- lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch);
- ptp->perout_event_ch = -1;
+ lan743x_ptp_release_event_ch(adapter, perout->event_ch);
+ perout->event_ch = -1;
}
}
static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
- struct ptp_perout_request *perout)
+ struct ptp_perout_request *perout_request)
{
struct lan743x_ptp *ptp = &adapter->ptp;
u32 period_sec = 0, period_nsec = 0;
u32 start_sec = 0, start_nsec = 0;
u32 general_config = 0;
int pulse_width = 0;
- int perout_bit = 0;
-
- if (!on) {
- lan743x_ptp_perout_off(adapter);
+ int perout_pin = 0;
+ unsigned int index = perout_request->index;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
+
+ /* Reject requests with unsupported flags */
+ if (perout_request->flags)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
+ perout_request->index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_perout_off(adapter, index);
return 0;
}
- if (ptp->perout_event_ch >= 0 ||
- ptp->perout_gpio_bit >= 0) {
+ if (perout->event_ch >= 0 ||
+ perout->gpio_pin >= 0) {
/* already on, turn off first */
- lan743x_ptp_perout_off(adapter);
+ lan743x_ptp_perout_off(adapter, index);
}
- ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter);
- if (ptp->perout_event_ch < 0) {
+ perout->event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+
+ if (perout->event_ch < 0) {
netif_warn(adapter, drv, adapter->netdev,
- "Failed to reserve event channel for PEROUT\n");
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
goto failed;
}
- switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
- case ID_REV_ID_LAN7430_:
- perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */
- break;
- case ID_REV_ID_LAN7431_:
- perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */
- break;
- }
+ perout->gpio_pin = lan743x_gpio_rsrv_ptp_out(adapter,
+ perout_pin,
+ perout->event_ch);
- ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter,
- perout_bit,
- ptp->perout_event_ch);
-
- if (ptp->perout_gpio_bit < 0) {
+ if (perout->gpio_pin < 0) {
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve gpio %d for PEROUT\n",
- perout_bit);
+ perout_pin);
goto failed;
}
- start_sec = perout->start.sec;
- start_sec += perout->start.nsec / 1000000000;
- start_nsec = perout->start.nsec % 1000000000;
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
- period_sec = perout->period.sec;
- period_sec += perout->period.nsec / 1000000000;
- period_nsec = perout->period.nsec % 1000000000;
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
if (period_sec == 0) {
if (period_nsec >= 400000000) {
@@ -499,41 +564,41 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
/* turn off by setting target far in future */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
0xFFFF0000);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0);
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch), 0);
/* Configure to pulse every period */
general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
- (ptp->perout_event_ch));
+ (perout->event_ch));
general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
- (ptp->perout_event_ch, pulse_width);
+ (perout->event_ch, pulse_width);
general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_
- (ptp->perout_event_ch);
+ (perout->event_ch);
lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
/* set the reload to one toggle cycle */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(perout->event_ch),
period_sec);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_RELOAD_NS_X(perout->event_ch),
period_nsec);
/* set the start time */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
start_sec);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
start_nsec);
return 0;
failed:
- lan743x_ptp_perout_off(adapter);
+ lan743x_ptp_perout_off(adapter, index);
return -ENODEV;
}
@@ -550,7 +615,7 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
case PTP_CLK_REQ_EXTTS:
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
- if (request->perout.index == 0)
+ if (request->perout.index < ptpci->n_per_out)
return lan743x_ptp_perout(adapter, on,
&request->perout);
return -EINVAL;
@@ -568,6 +633,29 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
return 0;
}
+static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
+ unsigned int pin,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{
+ int result = 0;
+
+ /* Confirm the requested function is supported. Parameter
+ * validation is done by the caller.
+ */
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ case PTP_PF_PHYSYNC:
+ default:
+ result = -1;
+ break;
+ }
+ return result;
+}
+
static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
{
struct lan743x_ptp *ptp =
@@ -861,12 +949,19 @@ void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
int lan743x_ptp_init(struct lan743x_adapter *adapter)
{
struct lan743x_ptp *ptp = &adapter->ptp;
+ int i;
mutex_init(&ptp->command_lock);
spin_lock_init(&ptp->tx_ts_lock);
ptp->used_event_ch = 0;
- ptp->perout_event_ch = -1;
- ptp->perout_gpio_bit = -1;
+
+ for (i = 0; i < LAN743X_PTP_N_EVENT_CHAN; i++) {
+ ptp->perout[i].event_ch = -1;
+ ptp->perout[i].gpio_pin = -1;
+ }
+
+ lan743x_led_mux_save(adapter);
+
return 0;
}
@@ -875,6 +970,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
struct lan743x_ptp *ptp = &adapter->ptp;
int ret = -ENODEV;
u32 temp;
+ int i;
+ int n_pins;
lan743x_ptp_reset(adapter);
lan743x_ptp_sync_to_system_clock(adapter);
@@ -890,10 +987,32 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK))
return 0;
- snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0");
- ptp->pin_config[0].index = 0;
- ptp->pin_config[0].func = PTP_PF_PEROUT;
- ptp->pin_config[0].chan = 0;
+ switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
+ case ID_REV_ID_LAN7430_:
+ n_pins = LAN7430_N_GPIO;
+ break;
+ case ID_REV_ID_LAN7431_:
+ n_pins = LAN7431_N_GPIO;
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ "Unknown LAN743x (%08x). Assuming no GPIO\n",
+ adapter->csr.id_rev);
+ n_pins = 0;
+ break;
+ }
+
+ if (n_pins > LAN743X_PTP_N_GPIO)
+ n_pins = LAN743X_PTP_N_GPIO;
+
+ for (i = 0; i < n_pins; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "lan743x_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
ptp->ptp_clock_info.owner = THIS_MODULE;
snprintf(ptp->ptp_clock_info.name, 16, "%pm",
@@ -901,10 +1020,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
ptp->ptp_clock_info.n_alarm = 0;
ptp->ptp_clock_info.n_ext_ts = 0;
- ptp->ptp_clock_info.n_per_out = 1;
- ptp->ptp_clock_info.n_pins = 0;
+ ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
+ ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = 0;
- ptp->ptp_clock_info.pin_config = NULL;
+ ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -913,7 +1032,7 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64;
ptp->ptp_clock_info.enable = lan743x_ptpci_enable;
ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work;
- ptp->ptp_clock_info.verify = NULL;
+ ptp->ptp_clock_info.verify = lan743x_ptpci_verify_pin_config;
ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info,
&adapter->pdev->dev);
@@ -939,7 +1058,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
int index;
if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
- ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) {
+ (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED)) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED;
@@ -973,6 +1092,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
ptp->pending_tx_timestamps = 0;
spin_unlock_bh(&ptp->tx_ts_lock);
+ lan743x_led_mux_restore(adapter);
+
lan743x_ptp_disable(adapter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 5fc1b3cd5e33..7663bf5d2e33 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -7,6 +7,18 @@
#include "linux/ptp_clock_kernel.h"
#include "linux/netdevice.h"
+#define LAN7430_N_LED 4
+#define LAN7430_N_GPIO 4 /* multiplexed with PHY LEDs */
+#define LAN7431_N_GPIO 12
+
+#define LAN743X_PTP_N_GPIO LAN7431_N_GPIO
+
+/* the number of periodic outputs is limited by number of
+ * PTP clock event channels
+ */
+#define LAN743X_PTP_N_EVENT_CHAN 2
+#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
+
struct lan743x_adapter;
/* GPIO */
@@ -40,9 +52,14 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
-#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
+#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
#define PTP_FLAG_ISR_ENABLED BIT(2)
+struct lan743x_ptp_perout {
+ int event_ch; /* PTP event channel (0=channel A, 1=channel B) */
+ int gpio_pin; /* GPIO pin where output appears */
+};
+
struct lan743x_ptp {
int flags;
@@ -51,13 +68,13 @@ struct lan743x_ptp {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
- struct ptp_pin_desc pin_config[1];
+ struct ptp_pin_desc pin_config[LAN743X_PTP_N_GPIO];
-#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2)
unsigned long used_event_ch;
+ struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
- int perout_event_ch;
- int perout_gpio_bit;
+ bool leds_multiplexed;
+ bool led_enabled[LAN7430_N_LED];
/* tx_ts_lock: used to prevent concurrent access to timestamp arrays */
spinlock_t tx_ts_lock;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 672ea1342add..2cccadc204fd 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -132,11 +132,11 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
-static void ocelot_vcap_enable(struct ocelot *ocelot, struct ocelot_port *port)
+static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
{
ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
- ANA_PORT_VCAP_S2_CFG, port->chip_port);
+ ANA_PORT_VCAP_S2_CFG, port);
}
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
@@ -169,117 +169,178 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static void ocelot_vlan_mode(struct ocelot_port *port,
+static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
netdev_features_t features)
{
- struct ocelot *ocelot = port->ocelot;
- u8 p = port->chip_port;
u32 val;
/* Filtering */
val = ocelot_read(ocelot, ANA_VLANMASK);
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- val |= BIT(p);
+ val |= BIT(port);
else
- val &= ~BIT(p);
+ val &= ~BIT(port);
ocelot_write(ocelot, val, ANA_VLANMASK);
}
-static void ocelot_vlan_port_apply(struct ocelot *ocelot,
- struct ocelot_port *port)
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val;
- /* Ingress clasification (ANA_PORT_VLAN_CFG) */
- /* Default vlan to clasify for untagged frames (may be zero) */
- val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
- if (port->vlan_aware)
- val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
-
+ if (vlan_aware)
+ val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+ else
+ val = 0;
ocelot_rmw_gix(ocelot, val,
- ANA_PORT_VLAN_CFG_VLAN_VID_M |
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
- ANA_PORT_VLAN_CFG, port->chip_port);
+ ANA_PORT_VLAN_CFG, port);
- /* Drop frames with multicast source address */
- val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
- if (port->vlan_aware && !port->vid)
+ if (vlan_aware && !ocelot_port->vid)
/* If port is vlan-aware and tagged, drop untagged and priority
* tagged frames.
*/
- val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ else
+ val = 0;
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
- ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
-
- /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
- val = REW_TAG_CFG_TAG_TPID_CFG(0);
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
- if (port->vlan_aware) {
- if (port->vid)
+ if (vlan_aware) {
+ if (ocelot_port->vid)
/* Tag all frames except when VID == DEFAULT_VLAN */
val |= REW_TAG_CFG_TAG_CFG(1);
else
/* Tag all frames */
val |= REW_TAG_CFG_TAG_CFG(3);
+ } else {
+ /* Port tagging disabled. */
+ val = REW_TAG_CFG_TAG_CFG(0);
}
ocelot_rmw_gix(ocelot, val,
- REW_TAG_CFG_TAG_TPID_CFG_M |
REW_TAG_CFG_TAG_CFG_M,
- REW_TAG_CFG, port->chip_port);
+ REW_TAG_CFG, port);
+}
+EXPORT_SYMBOL(ocelot_port_vlan_filtering);
- /* Set default VLAN and tag type to 8021Q. */
- val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
- REW_PORT_VLAN_CFG_PORT_VID(port->vid);
- ocelot_rmw_gix(ocelot, val,
- REW_PORT_VLAN_CFG_PORT_TPID_M |
+static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+ u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (ocelot_port->vid != vid) {
+ /* Always permit deleting the native VLAN (vid = 0) */
+ if (ocelot_port->vid && vid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ ocelot_port->vid);
+ return -EBUSY;
+ }
+ ocelot_port->vid = vid;
+ }
+
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port->chip_port);
+ REW_PORT_VLAN_CFG, port);
+
+ return 0;
}
-static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
- bool untagged)
+/* Default vlan to clasify for untagged frames (may be zero) */
+static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int ret;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- /* Add the port MAC address to with the right VLAN information */
- ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
- ENTRYTYPE_LOCKED);
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
+ ANA_PORT_VLAN_CFG_VLAN_VID_M,
+ ANA_PORT_VLAN_CFG, port);
+
+ ocelot_port->pvid = pvid;
+}
+
+int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged)
+{
+ int ret;
/* Make the port a member of the VLAN */
- ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+ ocelot->vlan_mask[vid] |= BIT(port);
ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
if (ret)
return ret;
/* Default ingress vlan classification */
if (pvid)
- port->pvid = vid;
+ ocelot_port_set_pvid(ocelot, port, vid);
/* Untagged egress vlan clasification */
- if (untagged && port->vid != vid) {
- if (port->vid) {
- dev_err(ocelot->dev,
- "Port already has a native VLAN: %d\n",
- port->vid);
- return -EBUSY;
- }
- port->vid = vid;
+ if (untagged) {
+ ret = ocelot_port_set_native_vlan(ocelot, port, vid);
+ if (ret)
+ return ret;
}
- ocelot_vlan_port_apply(ocelot, port);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vlan_add);
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
+ if (ret)
+ return ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
return 0;
}
+int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int ret;
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (ocelot_port->pvid == vid)
+ ocelot_port_set_pvid(ocelot, port, 0);
+
+ /* Egress */
+ if (ocelot_port->vid == vid)
+ ocelot_port_set_native_vlan(ocelot, port, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vlan_del);
+
static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
int ret;
/* 8021q removes VID 0 on module unload for all interfaces
@@ -289,24 +350,12 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
if (vid == 0)
return 0;
- /* Del the port MAC address to with the right VLAN information */
- ocelot_mact_forget(ocelot, dev->dev_addr, vid);
-
- /* Stop the port from being a member of the vlan */
- ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
- ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ ret = ocelot_vlan_del(ocelot, port, vid);
if (ret)
return ret;
- /* Ingress */
- if (port->pvid == vid)
- port->pvid = 0;
-
- /* Egress */
- if (port->vid == vid)
- port->vid = 0;
-
- ocelot_vlan_port_apply(ocelot, port);
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
return 0;
}
@@ -333,16 +382,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
- /* Configure the CPU port to be VLAN aware */
- ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
- ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
-
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
*/
- ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+ ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_VLANMASK);
for (port = 0; port < ocelot->num_phys_ports; port++) {
ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
@@ -362,14 +406,13 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
-static void ocelot_port_adjust_link(struct net_device *dev)
+void ocelot_adjust_link(struct ocelot *ocelot, int port,
+ struct phy_device *phydev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- u8 p = port->chip_port;
- int speed, atop_wm, mode = 0;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int speed, mode = 0;
- switch (dev->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_10:
speed = OCELOT_SPEED_10;
break;
@@ -385,87 +428,41 @@ static void ocelot_port_adjust_link(struct net_device *dev)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
break;
default:
- netdev_err(dev, "Unsupported PHY speed: %d\n",
- dev->phydev->speed);
+ dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n",
+ port, phydev->speed);
return;
}
- phy_print_status(dev->phydev);
+ phy_print_status(phydev);
- if (!dev->phydev->link)
+ if (!phydev->link)
return;
/* Only full duplex supported for now */
- ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA |
+ ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
mode, DEV_MAC_MODE_CFG);
- /* Set MAC IFG Gaps
- * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
- * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
- */
- ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG);
-
- /* Load seed (0) and set MAC HDX late collision */
- ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
- DEV_MAC_HDX_CFG_SEED_LOAD,
- DEV_MAC_HDX_CFG);
- mdelay(1);
- ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
- DEV_MAC_HDX_CFG);
-
- /* Disable HDX fast control */
- ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC);
-
- /* SGMII only for now */
- ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG);
- ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
-
- /* Enable PCS */
- ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
-
- /* No aneg on SGMII */
- ocelot_port_writel(port, 0, PCS1G_ANEG_CFG);
-
- /* No loopback */
- ocelot_port_writel(port, 0, PCS1G_LB_CFG);
-
- /* Set Max Length and maximum tags allowed */
- ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG);
- ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
- DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
- DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
- DEV_MAC_TAGS_CFG);
+ if (ocelot->ops->pcs_init)
+ ocelot->ops->pcs_init(ocelot, port);
/* Enable MAC module */
- ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA |
+ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
/* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
* reset */
- ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed),
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
DEV_CLOCK_CFG);
- /* Set SMAC of Pause frame (00:00:00:00:00:00) */
- ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
- ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG);
-
/* No PFC */
ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
- ANA_PFC_PFC_CFG, p);
-
- /* Set Pause WM hysteresis
- * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
- * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
- */
- ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
- SYS_PAUSE_CFG_PAUSE_STOP(101) |
- SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p);
+ ANA_PFC_PFC_CFG, port);
/* Core: Enable port for frame transfer */
ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, p);
+ QSYS_SWITCH_PORT_MODE, port);
/* Flow control */
ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
@@ -473,64 +470,88 @@ static void ocelot_port_adjust_link(struct net_device *dev)
SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
- SYS_MAC_FC_CFG, p);
- ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p);
-
- /* Tail dropping watermark */
- atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ;
- ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN),
- SYS_ATOP, p);
- ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+ SYS_MAC_FC_CFG, port);
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
}
+EXPORT_SYMBOL(ocelot_adjust_link);
-static int ocelot_port_open(struct net_device *dev)
+static void ocelot_port_adjust_link(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int err;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_adjust_link(ocelot, port, dev->phydev);
+}
+void ocelot_port_enable(struct ocelot *ocelot, int port,
+ struct phy_device *phy)
+{
/* Enable receiving frames on the port, and activate auto-learning of
* MAC addresses.
*/
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
- ANA_PORT_PORT_CFG, port->chip_port);
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+}
+EXPORT_SYMBOL(ocelot_port_enable);
+
+static int ocelot_port_open(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ int err;
- if (port->serdes) {
- err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
- port->phy_mode);
+ if (priv->serdes) {
+ err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
+ priv->phy_mode);
if (err) {
netdev_err(dev, "Could not set mode of SerDes\n");
return err;
}
}
- err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
- port->phy_mode);
+ err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
+ priv->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
}
- dev->phydev = port->phy;
+ dev->phydev = priv->phy;
+
+ phy_attached_info(priv->phy);
+ phy_start(priv->phy);
+
+ ocelot_port_enable(ocelot, port, priv->phy);
- phy_attached_info(port->phy);
- phy_start(port->phy);
return 0;
}
+void ocelot_port_disable(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+EXPORT_SYMBOL(ocelot_port_disable);
+
static int ocelot_port_stop(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
- phy_disconnect(port->phy);
+ phy_disconnect(priv->phy);
dev->phydev = NULL;
- ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG);
- ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, port->chip_port);
+ ocelot_port_disable(ocelot, port);
+
return 0;
}
@@ -554,15 +575,35 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
return 0;
}
+int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
+ struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
+ ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ shinfo->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in cb[0] of sk_buff */
+ skb->cb[0] = ocelot_port->ts_id % 4;
+ skb_queue_tail(&ocelot_port->tx_skbs, skb);
+ return 0;
+ }
+ return -ENODATA;
+}
+EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
+
static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct ocelot_port_private *priv = netdev_priv(dev);
struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- u32 val, ifh[IFH_LEN];
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ u32 val, ifh[OCELOT_TAG_LEN / 4];
struct frame_info info = {};
u8 grp = 0; /* Send everything on CPU group 0 */
unsigned int i, count, last;
+ int port = priv->chip_port;
val = ocelot_read(ocelot, QS_INJ_STATUS);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
@@ -572,20 +613,20 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- info.port = BIT(port->chip_port);
+ info.port = BIT(port);
info.tag_type = IFH_TAG_TYPE_C;
info.vid = skb_vlan_tag_get(skb);
/* Check if timestamping is needed */
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
- info.rew_op = port->ptp_cmd;
- if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
- info.rew_op |= (port->ts_id % 4) << 3;
+ info.rew_op = ocelot_port->ptp_cmd;
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ info.rew_op |= (ocelot_port->ts_id % 4) << 3;
}
ocelot_gen_ifh(ifh, &info);
- for (i = 0; i < IFH_LEN; i++)
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
QS_INJ_WR, grp);
@@ -614,31 +655,17 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
- port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- struct ocelot_skb *oskb =
- kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
-
- if (unlikely(!oskb))
- goto out;
-
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
- oskb->skb = skb;
- oskb->id = port->ts_id % 4;
- port->ts_id++;
-
- list_add_tail(&oskb->head, &port->skbs);
-
+ if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) {
+ ocelot_port->ts_id++;
return NETDEV_TX_OK;
}
-out:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts)
+static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
+ struct timespec64 *ts)
{
unsigned long flags;
u32 val;
@@ -663,29 +690,90 @@ void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts)
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
-EXPORT_SYMBOL(ocelot_get_hwtimestamp);
+
+void ocelot_get_txtstamp(struct ocelot *ocelot)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+
+ /* Retrieve its associated skb */
+ port = ocelot->ports[txport];
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (skb->cb[0] != id)
+ continue;
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+
+ if (unlikely(!skb_match))
+ continue;
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_txtstamp);
static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_forget(port->ocelot, addr, port->pvid);
+ return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
}
static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_learn(port->ocelot, PGID_CPU, addr, port->pvid,
+ return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
}
static void ocelot_set_rx_mode(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int i;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
u32 val;
+ int i;
/* This doesn't handle promiscuous mode because the bridge core is
* setting IFF_PROMISC on all slave interfaces and all frames would be
@@ -701,10 +789,11 @@ static void ocelot_set_rx_mode(struct net_device *dev)
static int ocelot_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ int port = priv->chip_port;
int ret;
- ret = snprintf(buf, len, "p%d", port->chip_port);
+ ret = snprintf(buf, len, "p%d", port);
if (ret >= len)
return -EINVAL;
@@ -713,15 +802,16 @@ static int ocelot_port_get_phys_port_name(struct net_device *dev,
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
const struct sockaddr *addr = p;
/* Learn the new net device MAC address in the mac table. */
- ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid,
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
ether_addr_copy(dev->dev_addr, addr->sa_data);
return 0;
@@ -730,11 +820,12 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
static void ocelot_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
/* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port),
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
SYS_STAT_CFG);
/* Get Rx stats */
@@ -765,21 +856,18 @@ static void ocelot_get_stats64(struct net_device *dev,
stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
}
-static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+int ocelot_fdb_add(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid, bool vlan_aware)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!vid) {
- if (!port->vlan_aware)
+ if (!vlan_aware)
/* If the bridge is not VLAN aware and no VID was
* provided, set it to pvid to ensure the MAC entry
* matches incoming untagged packets
*/
- vid = port->pvid;
+ vid = ocelot_port->pvid;
else
/* If the bridge is VLAN aware a VID must be provided as
* otherwise the learnt entry wouldn't match any frame.
@@ -787,19 +875,40 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
}
- return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
- ENTRYTYPE_LOCKED);
+ return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
}
+EXPORT_SYMBOL(ocelot_fdb_add);
-static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, priv->vlan_aware);
+}
+int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid)
+{
return ocelot_mact_forget(ocelot, addr, vid);
}
+EXPORT_SYMBOL(ocelot_fdb_del);
+
+static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid);
+}
struct ocelot_dump_ctx {
struct net_device *dev;
@@ -808,9 +917,10 @@ struct ocelot_dump_ctx {
int idx;
};
-static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
- struct ocelot_dump_ctx *dump)
+static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
{
+ struct ocelot_dump_ctx *dump = data;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
@@ -831,12 +941,12 @@ static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dump->dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
- if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac))
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
- if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid))
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
goto nla_put_failure;
nlmsg_end(dump->skb, nlh);
@@ -850,12 +960,11 @@ nla_put_failure:
return -EMSGSIZE;
}
-static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
- struct ocelot_mact_entry *entry)
+static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
+ struct ocelot_mact_entry *entry)
{
- struct ocelot *ocelot = port->ocelot;
- char mac[ETH_ALEN];
u32 val, dst, macl, mach;
+ char mac[ETH_ALEN];
/* Set row and column to read from */
ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
@@ -878,7 +987,7 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
* do not report it.
*/
dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
- if (dst != port->chip_port)
+ if (dst != port)
return -EINVAL;
/* Get the entry's MAC address and VLAN id */
@@ -898,43 +1007,61 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
return 0;
}
-static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev, int *idx)
+int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- int i, j, ret = 0;
- struct ocelot_dump_ctx dump = {
- .dev = dev,
- .skb = skb,
- .cb = cb,
- .idx = *idx,
- };
-
- struct ocelot_mact_entry entry;
+ int i, j;
/* Loop through all the mac tables entries. There are 1024 rows of 4
* entries.
*/
for (i = 0; i < 1024; i++) {
for (j = 0; j < 4; j++) {
- ret = ocelot_mact_read(port, i, j, &entry);
+ struct ocelot_mact_entry entry;
+ bool is_static;
+ int ret;
+
+ ret = ocelot_mact_read(ocelot, port, i, j, &entry);
/* If the entry is invalid (wrong port, invalid...),
* skip it.
*/
if (ret == -EINVAL)
continue;
else if (ret)
- goto end;
+ return ret;
+
+ is_static = (entry.type == ENTRYTYPE_LOCKED);
- ret = ocelot_fdb_do_dump(&entry, &dump);
+ ret = cb(entry.mac, entry.vid, is_static, data);
if (ret)
- goto end;
+ return ret;
}
}
-end:
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_fdb_dump);
+
+static int ocelot_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_dump_ctx dump = {
+ .dev = dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
+
*idx = dump.idx;
+
return ret;
}
@@ -953,18 +1080,20 @@ static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
static int ocelot_set_features(struct net_device *dev,
netdev_features_t features)
{
- struct ocelot_port *port = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
- port->tc.offload_cnt) {
+ priv->tc.offload_cnt) {
netdev_err(dev,
"Cannot disable HW TC offload while offloads active\n");
return -EBUSY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
- ocelot_vlan_mode(port, features);
+ ocelot_vlan_mode(ocelot, port, features);
return 0;
}
@@ -972,8 +1101,8 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
ppid->id_len = sizeof(ocelot->base_mac);
memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
@@ -981,17 +1110,16 @@ static int ocelot_get_port_parent_id(struct net_device *dev,
return 0;
}
-static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr)
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
- struct ocelot *ocelot = port->ocelot;
-
return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(ocelot_hwstamp_get);
-static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct hwtstamp_config cfg;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1004,16 +1132,16 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
/* Tx type sanity check */
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
- port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
* need to update the origin time.
*/
- port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
break;
case HWTSTAMP_TX_OFF:
- port->ptp_cmd = 0;
+ ocelot_port->ptp_cmd = 0;
break;
default:
return -ERANGE;
@@ -1052,11 +1180,13 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(ocelot_hwstamp_set);
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
/* The function is only used for PTP operations for now */
if (!ocelot->ptp)
@@ -1064,9 +1194,9 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(port, ifr);
+ return ocelot_hwstamp_set(ocelot, port, ifr);
case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(port, ifr);
+ return ocelot_hwstamp_get(ocelot, port, ifr);
default:
return -EOPNOTSUPP;
}
@@ -1080,9 +1210,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
- .ndo_fdb_add = ocelot_fdb_add,
- .ndo_fdb_del = ocelot_fdb_del,
- .ndo_fdb_dump = ocelot_fdb_dump,
+ .ndo_fdb_add = ocelot_port_fdb_add,
+ .ndo_fdb_del = ocelot_port_fdb_del,
+ .ndo_fdb_dump = ocelot_port_fdb_dump,
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
@@ -1091,10 +1221,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_do_ioctl = ocelot_ioctl,
};
-static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
{
- struct ocelot_port *port = netdev_priv(netdev);
- struct ocelot *ocelot = port->ocelot;
int i;
if (sset != ETH_SS_STATS)
@@ -1104,6 +1232,17 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
}
+EXPORT_SYMBOL(ocelot_get_strings);
+
+static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ struct ocelot_port_private *priv = netdev_priv(netdev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_get_strings(ocelot, port, sset, data);
+}
static void ocelot_update_stats(struct ocelot *ocelot)
{
@@ -1145,11 +1284,8 @@ static void ocelot_check_stats_work(struct work_struct *work)
OCELOT_STATS_CHECK_DELAY);
}
-static void ocelot_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
int i;
/* check and update now */
@@ -1157,28 +1293,42 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i];
+ *data++ = ocelot->stats[port * ocelot->num_stats + i];
}
+EXPORT_SYMBOL(ocelot_get_ethtool_stats);
-static int ocelot_get_sset_count(struct net_device *dev, int sset)
+static void ocelot_port_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+{
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
+
return ocelot->num_stats;
}
+EXPORT_SYMBOL(ocelot_get_sset_count);
-static int ocelot_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
- if (!ocelot->ptp)
- return ethtool_op_get_ts_info(dev, info);
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info)
+{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -1193,36 +1343,43 @@ static int ocelot_get_ts_info(struct net_device *dev,
return 0;
}
+EXPORT_SYMBOL(ocelot_get_ts_info);
+
+static int ocelot_port_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ocelot->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
static const struct ethtool_ops ocelot_ethtool_ops = {
- .get_strings = ocelot_get_strings,
- .get_ethtool_stats = ocelot_get_ethtool_stats,
- .get_sset_count = ocelot_get_sset_count,
+ .get_strings = ocelot_port_get_strings,
+ .get_ethtool_stats = ocelot_port_get_ethtool_stats,
+ .get_sset_count = ocelot_port_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
- .get_ts_info = ocelot_get_ts_info,
+ .get_ts_info = ocelot_port_get_ts_info,
};
-static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
- struct switchdev_trans *trans,
- u8 state)
+void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
u32 port_cfg;
- int port, i;
-
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ int p, i;
- if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask))
- return 0;
+ if (!(BIT(port) & ocelot->bridge_mask))
+ return;
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG,
- ocelot_port->chip_port);
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
switch (state) {
case BR_STATE_FORWARDING:
- ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port);
+ ocelot->bridge_fwd_mask |= BIT(port);
/* Fallthrough */
case BR_STATE_LEARNING:
port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
@@ -1230,19 +1387,18 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
default:
port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
- ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port);
+ ocelot->bridge_fwd_mask &= ~BIT(port);
break;
}
- ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG,
- ocelot_port->chip_port);
+ ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
/* Apply FWD mask. The loop is needed to add/remove the current port as
* a source for the other ports.
*/
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (ocelot->bridge_fwd_mask & BIT(port)) {
- unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port);
+ for (p = 0; p < ocelot->num_phys_ports; p++) {
+ if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) {
+ unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) {
unsigned long bond_mask = ocelot->lags[i];
@@ -1250,78 +1406,93 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
if (!bond_mask)
continue;
- if (bond_mask & BIT(port)) {
+ if (bond_mask & BIT(p)) {
mask &= ~bond_mask;
break;
}
}
- ocelot_write_rix(ocelot,
- BIT(ocelot->num_phys_ports) | mask,
- ANA_PGID_PGID, PGID_SRC + port);
+ /* Avoid the NPI port from looping back to itself */
+ if (p != ocelot->cpu)
+ mask |= BIT(ocelot->cpu);
+
+ ocelot_write_rix(ocelot, mask,
+ ANA_PGID_PGID, PGID_SRC + p);
} else {
/* Only the CPU port, this is compatible with link
* aggregation.
*/
ocelot_write_rix(ocelot,
- BIT(ocelot->num_phys_ports),
- ANA_PGID_PGID, PGID_SRC + port);
+ BIT(ocelot->cpu),
+ ANA_PGID_PGID, PGID_SRC + p);
}
}
+}
+EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
- return 0;
+static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return;
+
+ ocelot_bridge_stp_state_set(ocelot, port, state);
}
-static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port,
+void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
+{
+ ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
+ ANA_AUTOAGE);
+}
+EXPORT_SYMBOL(ocelot_set_ageing_time);
+
+static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
unsigned long ageing_clock_t)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2),
- ANA_AUTOAGE);
+ ocelot_set_ageing_time(ocelot, ageing_time);
}
-static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc)
+static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
{
- struct ocelot *ocelot = port->ocelot;
- u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG,
- port->chip_port);
+ u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
+ u32 val = 0;
if (mc)
- val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
- else
- val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA);
+ val = cpu_fwd_mcast;
- ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port);
+ ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast,
+ ANA_PORT_CPU_FWD_CFG, port);
}
static int ocelot_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ocelot_port_attr_stp_state_set(ocelot_port, trans,
+ ocelot_port_attr_stp_state_set(ocelot, port, trans,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
+ ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ocelot_port->vlan_aware = attr->u.vlan_filtering;
- ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+ priv->vlan_aware = attr->u.vlan_filtering;
+ ocelot_port_vlan_filtering(ocelot, port, priv->vlan_aware);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
- ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
+ ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
default:
err = -EOPNOTSUPP;
@@ -1383,15 +1554,17 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb,
struct switchdev_trans *trans)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- struct ocelot_multicast *mc;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ int port = priv->chip_port;
u16 vid = mdb->vid;
bool new = false;
if (!vid)
- vid = port->pvid;
+ vid = ocelot_port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -1415,7 +1588,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
ocelot_mact_forget(ocelot, addr, vid);
}
- mc->ports |= BIT(port->chip_port);
+ mc->ports |= BIT(port);
addr[2] = mc->ports << 0;
addr[1] = mc->ports << 8;
@@ -1425,14 +1598,16 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
static int ocelot_port_obj_del_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- struct ocelot_multicast *mc;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ int port = priv->chip_port;
u16 vid = mdb->vid;
if (!vid)
- vid = port->pvid;
+ vid = ocelot_port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -1444,7 +1619,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
addr[0] = 0;
ocelot_mact_forget(ocelot, addr, vid);
- mc->ports &= ~BIT(port->chip_port);
+ mc->ports &= ~BIT(port);
if (!mc->ports) {
list_del(&mc->list);
devm_kfree(ocelot->dev, mc);
@@ -1501,11 +1676,9 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
- struct net_device *bridge)
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
-
if (!ocelot->bridge_mask) {
ocelot->hw_bridge_dev = bridge;
} else {
@@ -1515,26 +1688,25 @@ static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
return -ENODEV;
}
- ocelot->bridge_mask |= BIT(ocelot_port->chip_port);
+ ocelot->bridge_mask |= BIT(port);
return 0;
}
+EXPORT_SYMBOL(ocelot_port_bridge_join);
-static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
- struct net_device *bridge)
+int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
-
- ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port);
+ ocelot->bridge_mask &= ~BIT(port);
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
- ocelot_port->vlan_aware = 0;
- ocelot_port->pvid = 0;
- ocelot_port->vid = 0;
+ ocelot_port_vlan_filtering(ocelot, port, 0);
+ ocelot_port_set_pvid(ocelot, port, 0);
+ return ocelot_port_set_native_vlan(ocelot, port, 0);
}
+EXPORT_SYMBOL(ocelot_port_bridge_leave);
static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
{
@@ -1594,20 +1766,18 @@ static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
}
}
-static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+static int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
- int p = ocelot_port->chip_port;
- int lag, lp;
struct net_device *ndev;
u32 bond_mask = 0;
+ int lag, lp;
rcu_read_lock();
for_each_netdev_in_bond_rcu(bond, ndev) {
- struct ocelot_port *port = netdev_priv(ndev);
+ struct ocelot_port_private *priv = netdev_priv(ndev);
- bond_mask |= BIT(port->chip_port);
+ bond_mask |= BIT(priv->chip_port);
}
rcu_read_unlock();
@@ -1616,17 +1786,17 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
/* If the new port is the lowest one, use it as the logical port from
* now on
*/
- if (p == lp) {
- lag = p;
- ocelot->lags[p] = bond_mask;
- bond_mask &= ~BIT(p);
+ if (port == lp) {
+ lag = port;
+ ocelot->lags[port] = bond_mask;
+ bond_mask &= ~BIT(port);
if (bond_mask) {
lp = __ffs(bond_mask);
ocelot->lags[lp] = 0;
}
} else {
lag = lp;
- ocelot->lags[lp] |= BIT(p);
+ ocelot->lags[lp] |= BIT(port);
}
ocelot_setup_lag(ocelot, lag);
@@ -1635,34 +1805,32 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
return 0;
}
-static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+static void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
- int p = ocelot_port->chip_port;
u32 port_cfg;
int i;
/* Remove port from any lag */
for (i = 0; i < ocelot->num_phys_ports; i++)
- ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+ ocelot->lags[i] &= ~BIT(port);
/* if it was the logical port of the lag, move the lag config to the
* next port
*/
- if (ocelot->lags[p]) {
- int n = __ffs(ocelot->lags[p]);
+ if (ocelot->lags[port]) {
+ int n = __ffs(ocelot->lags[port]);
- ocelot->lags[n] = ocelot->lags[p];
- ocelot->lags[p] = 0;
+ ocelot->lags[n] = ocelot->lags[port];
+ ocelot->lags[port] = 0;
ocelot_setup_lag(ocelot, n);
}
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
- ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
- ANA_PORT_PORT_CFG, p);
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
ocelot_set_aggr_pgids(ocelot);
}
@@ -1677,28 +1845,30 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
unsigned long event,
struct netdev_notifier_changeupper_info *info)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
int err = 0;
switch (event) {
case NETDEV_CHANGEUPPER:
if (netif_is_bridge_master(info->upper_dev)) {
- if (info->linking)
- err = ocelot_port_bridge_join(ocelot_port,
+ if (info->linking) {
+ err = ocelot_port_bridge_join(ocelot, port,
info->upper_dev);
- else
- ocelot_port_bridge_leave(ocelot_port,
- info->upper_dev);
-
- ocelot_vlan_port_apply(ocelot_port->ocelot,
- ocelot_port);
+ } else {
+ err = ocelot_port_bridge_leave(ocelot, port,
+ info->upper_dev);
+ priv->vlan_aware = false;
+ }
}
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking)
- err = ocelot_port_lag_join(ocelot_port,
+ err = ocelot_port_lag_join(ocelot, port,
info->upper_dev);
else
- ocelot_port_lag_leave(ocelot_port,
+ ocelot_port_lag_leave(ocelot, port,
info->upper_dev);
}
break;
@@ -2001,24 +2171,97 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
return 0;
}
+static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int atop_wm;
+
+ ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+
+ /* Set Pause WM hysteresis
+ * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
+ * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+ */
+ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
+ SYS_PAUSE_CFG_PAUSE_STOP(101) |
+ SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
+
+ /* Tail dropping watermark */
+ atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+ SYS_ATOP, port);
+ ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+}
+
+void ocelot_init_port(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ skb_queue_head_init(&ocelot_port->tx_skbs);
+
+ /* Basic L2 initialization */
+
+ /* Set MAC IFG Gaps
+ * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
+ * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
+ */
+ ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
+ DEV_MAC_IFG_CFG);
+
+ /* Load seed (0) and set MAC HDX late collision */
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ DEV_MAC_HDX_CFG);
+ mdelay(1);
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
+ DEV_MAC_HDX_CFG);
+
+ /* Set Max Length and maximum tags allowed */
+ ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+ ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
+ DEV_MAC_TAGS_CFG);
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
+
+ /* Drop frames with multicast source address */
+ ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG, port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
+ REW_PORT_VLAN_CFG_PORT_TPID_M,
+ REW_PORT_VLAN_CFG, port);
+
+ /* Enable vcap lookups */
+ ocelot_vcap_enable(ocelot, port);
+}
+EXPORT_SYMBOL(ocelot_init_port);
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
{
+ struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct net_device *dev;
int err;
- dev = alloc_etherdev(sizeof(struct ocelot_port));
+ dev = alloc_etherdev(sizeof(struct ocelot_port_private));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, ocelot->dev);
- ocelot_port = netdev_priv(dev);
- ocelot_port->dev = dev;
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->phy = phy;
+ priv->chip_port = port;
+ ocelot_port = &priv->port;
ocelot_port->ocelot = ocelot;
ocelot_port->regs = regs;
- ocelot_port->chip_port = port;
- ocelot_port->phy = phy;
ocelot->ports[port] = ocelot_port;
dev->netdev_ops = &ocelot_port_netdev_ops;
@@ -2033,33 +2276,81 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
- INIT_LIST_HEAD(&ocelot_port->skbs);
+ ocelot_init_port(ocelot, port);
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
- goto err_register_netdev;
+ free_netdev(dev);
}
- /* Basic L2 initialization */
- ocelot_vlan_port_apply(ocelot, ocelot_port);
+ return err;
+}
+EXPORT_SYMBOL(ocelot_probe_port);
- /* Enable vcap lookups */
- ocelot_vcap_enable(ocelot, ocelot_port);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction)
+{
+ /* Configure and enable the CPU port. */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
+ ANA_PORT_PORT_CFG, cpu);
- return 0;
+ /* If the CPU port is a physical port, set up the port in Node
+ * Processor Interface (NPI) mode. This is the mode through which
+ * frames can be injected from and extracted to an external CPU.
+ * Only one port can be an NPI at the same time.
+ */
+ if (cpu < ocelot->num_phys_ports) {
+ int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
-err_register_netdev:
- free_netdev(dev);
- return err;
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
+ QSYS_EXT_CPU_CFG);
+
+ if (injection == OCELOT_TAG_PREFIX_SHORT)
+ mtu += OCELOT_SHORT_PREFIX_LEN;
+ else if (injection == OCELOT_TAG_PREFIX_LONG)
+ mtu += OCELOT_LONG_PREFIX_LEN;
+
+ ocelot_port_set_mtu(ocelot, cpu, mtu);
+ }
+
+ /* CPU port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, cpu);
+ ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
+ SYS_PORT_MODE_INCL_INJ_HDR(injection),
+ SYS_PORT_MODE, cpu);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, cpu);
+
+ ocelot->cpu = cpu;
}
-EXPORT_SYMBOL(ocelot_probe_port);
+EXPORT_SYMBOL(ocelot_set_cpu_port);
int ocelot_init(struct ocelot *ocelot)
{
- u32 port;
- int i, ret, cpu = ocelot->num_phys_ports;
char queue_name[32];
+ int i, ret;
+ u32 port;
+
+ if (ocelot->ops->reset) {
+ ret = ocelot->ops->reset(ocelot);
+ if (ret) {
+ dev_err(ocelot->dev, "Switch reset failed\n");
+ return ret;
+ }
+ }
ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(u32), GFP_KERNEL);
@@ -2081,6 +2372,7 @@ int ocelot_init(struct ocelot *ocelot)
if (!ocelot->stats_queue)
return -ENOMEM;
+ INIT_LIST_HEAD(&ocelot->multicast);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_ace_init(ocelot);
@@ -2138,13 +2430,6 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
- /* Configure and enable the CPU port. */
- ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
- ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
- ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
- ANA_PORT_PORT_CFG, cpu);
-
/* Allow broadcast MAC frames. */
for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
@@ -2157,13 +2442,6 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
- /* CPU port Injection/Extraction configuration */
- ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
- QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
- QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, cpu);
- ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) |
- SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu);
/* Allow manual injection via DEVCPU_QS registers, and byte swap these
* registers endianness.
*/
@@ -2204,9 +2482,7 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- struct list_head *pos, *tmp;
struct ocelot_port *port;
- struct ocelot_skb *entry;
int i;
cancel_delayed_work(&ocelot->stats_work);
@@ -2216,14 +2492,7 @@ void ocelot_deinit(struct ocelot *ocelot)
for (i = 0; i < ocelot->num_phys_ports; i++) {
port = ocelot->ports[i];
-
- list_for_each_safe(pos, tmp, &port->skbs) {
- entry = list_entry(pos, struct ocelot_skb, head);
-
- list_del(pos);
- dev_kfree_skb_any(entry->skb);
- kfree(entry);
- }
+ skb_queue_purge(&port->tx_skbs);
}
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 06ac806052bc..c259114c48fd 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -18,11 +18,12 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
#include "ocelot_ana.h"
#include "ocelot_dev.h"
#include "ocelot_qsys.h"
#include "ocelot_rew.h"
-#include "ocelot_sys.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
#include "ocelot_ptp.h"
@@ -43,8 +44,6 @@
#define OCELOT_PTP_QUEUE_SZ 128
-#define IFH_LEN 4
-
struct frame_info {
u32 len;
u16 port;
@@ -54,372 +53,6 @@ struct frame_info {
u32 timestamp; /* rew_val */
};
-#define IFH_INJ_BYPASS BIT(31)
-#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
-
-#define IFH_TAG_TYPE_C 0
-#define IFH_TAG_TYPE_S 1
-
-#define IFH_REW_OP_NOOP 0x0
-#define IFH_REW_OP_DSCP 0x1
-#define IFH_REW_OP_ONE_STEP_PTP 0x2
-#define IFH_REW_OP_TWO_STEP_PTP 0x3
-#define IFH_REW_OP_ORIGIN_PTP 0x5
-
-#define OCELOT_SPEED_2500 0
-#define OCELOT_SPEED_1000 1
-#define OCELOT_SPEED_100 2
-#define OCELOT_SPEED_10 3
-
-#define TARGET_OFFSET 24
-#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
-#define REG(reg, offset) [reg & REG_MASK] = offset
-
-enum ocelot_target {
- ANA = 1,
- QS,
- QSYS,
- REW,
- SYS,
- S2,
- HSIO,
- PTP,
- TARGET_MAX,
-};
-
-enum ocelot_reg {
- ANA_ADVLEARN = ANA << TARGET_OFFSET,
- ANA_VLANMASK,
- ANA_PORT_B_DOMAIN,
- ANA_ANAGEFIL,
- ANA_ANEVENTS,
- ANA_STORMLIMIT_BURST,
- ANA_STORMLIMIT_CFG,
- ANA_ISOLATED_PORTS,
- ANA_COMMUNITY_PORTS,
- ANA_AUTOAGE,
- ANA_MACTOPTIONS,
- ANA_LEARNDISC,
- ANA_AGENCTRL,
- ANA_MIRRORPORTS,
- ANA_EMIRRORPORTS,
- ANA_FLOODING,
- ANA_FLOODING_IPMC,
- ANA_SFLOW_CFG,
- ANA_PORT_MODE,
- ANA_CUT_THRU_CFG,
- ANA_PGID_PGID,
- ANA_TABLES_ANMOVED,
- ANA_TABLES_MACHDATA,
- ANA_TABLES_MACLDATA,
- ANA_TABLES_STREAMDATA,
- ANA_TABLES_MACACCESS,
- ANA_TABLES_MACTINDX,
- ANA_TABLES_VLANACCESS,
- ANA_TABLES_VLANTIDX,
- ANA_TABLES_ISDXACCESS,
- ANA_TABLES_ISDXTIDX,
- ANA_TABLES_ENTRYLIM,
- ANA_TABLES_PTP_ID_HIGH,
- ANA_TABLES_PTP_ID_LOW,
- ANA_TABLES_STREAMACCESS,
- ANA_TABLES_STREAMTIDX,
- ANA_TABLES_SEQ_HISTORY,
- ANA_TABLES_SEQ_MASK,
- ANA_TABLES_SFID_MASK,
- ANA_TABLES_SFIDACCESS,
- ANA_TABLES_SFIDTIDX,
- ANA_MSTI_STATE,
- ANA_OAM_UPM_LM_CNT,
- ANA_SG_ACCESS_CTRL,
- ANA_SG_CONFIG_REG_1,
- ANA_SG_CONFIG_REG_2,
- ANA_SG_CONFIG_REG_3,
- ANA_SG_CONFIG_REG_4,
- ANA_SG_CONFIG_REG_5,
- ANA_SG_GCL_GS_CONFIG,
- ANA_SG_GCL_TI_CONFIG,
- ANA_SG_STATUS_REG_1,
- ANA_SG_STATUS_REG_2,
- ANA_SG_STATUS_REG_3,
- ANA_PORT_VLAN_CFG,
- ANA_PORT_DROP_CFG,
- ANA_PORT_QOS_CFG,
- ANA_PORT_VCAP_CFG,
- ANA_PORT_VCAP_S1_KEY_CFG,
- ANA_PORT_VCAP_S2_CFG,
- ANA_PORT_PCP_DEI_MAP,
- ANA_PORT_CPU_FWD_CFG,
- ANA_PORT_CPU_FWD_BPDU_CFG,
- ANA_PORT_CPU_FWD_GARP_CFG,
- ANA_PORT_CPU_FWD_CCM_CFG,
- ANA_PORT_PORT_CFG,
- ANA_PORT_POL_CFG,
- ANA_PORT_PTP_CFG,
- ANA_PORT_PTP_DLY1_CFG,
- ANA_PORT_PTP_DLY2_CFG,
- ANA_PORT_SFID_CFG,
- ANA_PFC_PFC_CFG,
- ANA_PFC_PFC_TIMER,
- ANA_IPT_OAM_MEP_CFG,
- ANA_IPT_IPT,
- ANA_PPT_PPT,
- ANA_FID_MAP_FID_MAP,
- ANA_AGGR_CFG,
- ANA_CPUQ_CFG,
- ANA_CPUQ_CFG2,
- ANA_CPUQ_8021_CFG,
- ANA_DSCP_CFG,
- ANA_DSCP_REWR_CFG,
- ANA_VCAP_RNG_TYPE_CFG,
- ANA_VCAP_RNG_VAL_CFG,
- ANA_VRAP_CFG,
- ANA_VRAP_HDR_DATA,
- ANA_VRAP_HDR_MASK,
- ANA_DISCARD_CFG,
- ANA_FID_CFG,
- ANA_POL_PIR_CFG,
- ANA_POL_CIR_CFG,
- ANA_POL_MODE_CFG,
- ANA_POL_PIR_STATE,
- ANA_POL_CIR_STATE,
- ANA_POL_STATE,
- ANA_POL_FLOWC,
- ANA_POL_HYST,
- ANA_POL_MISC_CFG,
- QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
- QS_XTR_RD,
- QS_XTR_FRM_PRUNING,
- QS_XTR_FLUSH,
- QS_XTR_DATA_PRESENT,
- QS_XTR_CFG,
- QS_INJ_GRP_CFG,
- QS_INJ_WR,
- QS_INJ_CTRL,
- QS_INJ_STATUS,
- QS_INJ_ERR,
- QS_INH_DBG,
- QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
- QSYS_SWITCH_PORT_MODE,
- QSYS_STAT_CNT_CFG,
- QSYS_EEE_CFG,
- QSYS_EEE_THRES,
- QSYS_IGR_NO_SHARING,
- QSYS_EGR_NO_SHARING,
- QSYS_SW_STATUS,
- QSYS_EXT_CPU_CFG,
- QSYS_PAD_CFG,
- QSYS_CPU_GROUP_MAP,
- QSYS_QMAP,
- QSYS_ISDX_SGRP,
- QSYS_TIMED_FRAME_ENTRY,
- QSYS_TFRM_MISC,
- QSYS_TFRM_PORT_DLY,
- QSYS_TFRM_TIMER_CFG_1,
- QSYS_TFRM_TIMER_CFG_2,
- QSYS_TFRM_TIMER_CFG_3,
- QSYS_TFRM_TIMER_CFG_4,
- QSYS_TFRM_TIMER_CFG_5,
- QSYS_TFRM_TIMER_CFG_6,
- QSYS_TFRM_TIMER_CFG_7,
- QSYS_TFRM_TIMER_CFG_8,
- QSYS_RED_PROFILE,
- QSYS_RES_QOS_MODE,
- QSYS_RES_CFG,
- QSYS_RES_STAT,
- QSYS_EGR_DROP_MODE,
- QSYS_EQ_CTRL,
- QSYS_EVENTS_CORE,
- QSYS_QMAXSDU_CFG_0,
- QSYS_QMAXSDU_CFG_1,
- QSYS_QMAXSDU_CFG_2,
- QSYS_QMAXSDU_CFG_3,
- QSYS_QMAXSDU_CFG_4,
- QSYS_QMAXSDU_CFG_5,
- QSYS_QMAXSDU_CFG_6,
- QSYS_QMAXSDU_CFG_7,
- QSYS_PREEMPTION_CFG,
- QSYS_CIR_CFG,
- QSYS_EIR_CFG,
- QSYS_SE_CFG,
- QSYS_SE_DWRR_CFG,
- QSYS_SE_CONNECT,
- QSYS_SE_DLB_SENSE,
- QSYS_CIR_STATE,
- QSYS_EIR_STATE,
- QSYS_SE_STATE,
- QSYS_HSCH_MISC_CFG,
- QSYS_TAG_CONFIG,
- QSYS_TAS_PARAM_CFG_CTRL,
- QSYS_PORT_MAX_SDU,
- QSYS_PARAM_CFG_REG_1,
- QSYS_PARAM_CFG_REG_2,
- QSYS_PARAM_CFG_REG_3,
- QSYS_PARAM_CFG_REG_4,
- QSYS_PARAM_CFG_REG_5,
- QSYS_GCL_CFG_REG_1,
- QSYS_GCL_CFG_REG_2,
- QSYS_PARAM_STATUS_REG_1,
- QSYS_PARAM_STATUS_REG_2,
- QSYS_PARAM_STATUS_REG_3,
- QSYS_PARAM_STATUS_REG_4,
- QSYS_PARAM_STATUS_REG_5,
- QSYS_PARAM_STATUS_REG_6,
- QSYS_PARAM_STATUS_REG_7,
- QSYS_PARAM_STATUS_REG_8,
- QSYS_PARAM_STATUS_REG_9,
- QSYS_GCL_STATUS_REG_1,
- QSYS_GCL_STATUS_REG_2,
- REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
- REW_TAG_CFG,
- REW_PORT_CFG,
- REW_DSCP_CFG,
- REW_PCP_DEI_QOS_MAP_CFG,
- REW_PTP_CFG,
- REW_PTP_DLY1_CFG,
- REW_RED_TAG_CFG,
- REW_DSCP_REMAP_DP1_CFG,
- REW_DSCP_REMAP_CFG,
- REW_STAT_CFG,
- REW_REW_STICKY,
- REW_PPT,
- SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
- SYS_COUNT_RX_UNICAST,
- SYS_COUNT_RX_MULTICAST,
- SYS_COUNT_RX_BROADCAST,
- SYS_COUNT_RX_SHORTS,
- SYS_COUNT_RX_FRAGMENTS,
- SYS_COUNT_RX_JABBERS,
- SYS_COUNT_RX_CRC_ALIGN_ERRS,
- SYS_COUNT_RX_SYM_ERRS,
- SYS_COUNT_RX_64,
- SYS_COUNT_RX_65_127,
- SYS_COUNT_RX_128_255,
- SYS_COUNT_RX_256_1023,
- SYS_COUNT_RX_1024_1526,
- SYS_COUNT_RX_1527_MAX,
- SYS_COUNT_RX_PAUSE,
- SYS_COUNT_RX_CONTROL,
- SYS_COUNT_RX_LONGS,
- SYS_COUNT_RX_CLASSIFIED_DROPS,
- SYS_COUNT_TX_OCTETS,
- SYS_COUNT_TX_UNICAST,
- SYS_COUNT_TX_MULTICAST,
- SYS_COUNT_TX_BROADCAST,
- SYS_COUNT_TX_COLLISION,
- SYS_COUNT_TX_DROPS,
- SYS_COUNT_TX_PAUSE,
- SYS_COUNT_TX_64,
- SYS_COUNT_TX_65_127,
- SYS_COUNT_TX_128_511,
- SYS_COUNT_TX_512_1023,
- SYS_COUNT_TX_1024_1526,
- SYS_COUNT_TX_1527_MAX,
- SYS_COUNT_TX_AGING,
- SYS_RESET_CFG,
- SYS_SR_ETYPE_CFG,
- SYS_VLAN_ETYPE_CFG,
- SYS_PORT_MODE,
- SYS_FRONT_PORT_MODE,
- SYS_FRM_AGING,
- SYS_STAT_CFG,
- SYS_SW_STATUS,
- SYS_MISC_CFG,
- SYS_REW_MAC_HIGH_CFG,
- SYS_REW_MAC_LOW_CFG,
- SYS_TIMESTAMP_OFFSET,
- SYS_CMID,
- SYS_PAUSE_CFG,
- SYS_PAUSE_TOT_CFG,
- SYS_ATOP,
- SYS_ATOP_TOT_CFG,
- SYS_MAC_FC_CFG,
- SYS_MMGT,
- SYS_MMGT_FAST,
- SYS_EVENTS_DIF,
- SYS_EVENTS_CORE,
- SYS_CNT,
- SYS_PTP_STATUS,
- SYS_PTP_TXSTAMP,
- SYS_PTP_NXT,
- SYS_PTP_CFG,
- SYS_RAM_INIT,
- SYS_CM_ADDR,
- SYS_CM_DATA_WR,
- SYS_CM_DATA_RD,
- SYS_CM_OP,
- SYS_CM_DATA,
- S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
- S2_CORE_MV_CFG,
- S2_CACHE_ENTRY_DAT,
- S2_CACHE_MASK_DAT,
- S2_CACHE_ACTION_DAT,
- S2_CACHE_CNT_DAT,
- S2_CACHE_TG_DAT,
- PTP_PIN_CFG = PTP << TARGET_OFFSET,
- PTP_PIN_TOD_SEC_MSB,
- PTP_PIN_TOD_SEC_LSB,
- PTP_PIN_TOD_NSEC,
- PTP_CFG_MISC,
- PTP_CLK_CFG_ADJ_CFG,
- PTP_CLK_CFG_ADJ_FREQ,
-};
-
-enum ocelot_regfield {
- ANA_ADVLEARN_VLAN_CHK,
- ANA_ADVLEARN_LEARN_MIRROR,
- ANA_ANEVENTS_FLOOD_DISCARD,
- ANA_ANEVENTS_MSTI_DROP,
- ANA_ANEVENTS_ACLKILL,
- ANA_ANEVENTS_ACLUSED,
- ANA_ANEVENTS_AUTOAGE,
- ANA_ANEVENTS_VS2TTL1,
- ANA_ANEVENTS_STORM_DROP,
- ANA_ANEVENTS_LEARN_DROP,
- ANA_ANEVENTS_AGED_ENTRY,
- ANA_ANEVENTS_CPU_LEARN_FAILED,
- ANA_ANEVENTS_AUTO_LEARN_FAILED,
- ANA_ANEVENTS_LEARN_REMOVE,
- ANA_ANEVENTS_AUTO_LEARNED,
- ANA_ANEVENTS_AUTO_MOVED,
- ANA_ANEVENTS_DROPPED,
- ANA_ANEVENTS_CLASSIFIED_DROP,
- ANA_ANEVENTS_CLASSIFIED_COPY,
- ANA_ANEVENTS_VLAN_DISCARD,
- ANA_ANEVENTS_FWD_DISCARD,
- ANA_ANEVENTS_MULTICAST_FLOOD,
- ANA_ANEVENTS_UNICAST_FLOOD,
- ANA_ANEVENTS_DEST_KNOWN,
- ANA_ANEVENTS_BUCKET3_MATCH,
- ANA_ANEVENTS_BUCKET2_MATCH,
- ANA_ANEVENTS_BUCKET1_MATCH,
- ANA_ANEVENTS_BUCKET0_MATCH,
- ANA_ANEVENTS_CPU_OPERATION,
- ANA_ANEVENTS_DMAC_LOOKUP,
- ANA_ANEVENTS_SMAC_LOOKUP,
- ANA_ANEVENTS_SEQ_GEN_ERR_0,
- ANA_ANEVENTS_SEQ_GEN_ERR_1,
- ANA_TABLES_MACACCESS_B_DOM,
- ANA_TABLES_MACTINDX_BUCKET,
- ANA_TABLES_MACTINDX_M_INDEX,
- QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
- QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
- QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
- QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
- QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
- SYS_RESET_CFG_CORE_ENA,
- SYS_RESET_CFG_MEM_ENA,
- SYS_RESET_CFG_MEM_INIT,
- REGFIELD_MAX
-};
-
-enum ocelot_clk_pins {
- ALT_PPS_PIN = 1,
- EXT_CLK_PIN,
- ALT_LDST_PIN,
- TOD_ACC_PIN
-};
-
struct ocelot_multicast {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -427,133 +60,40 @@ struct ocelot_multicast {
u16 ports;
};
-struct ocelot_port;
-
-struct ocelot_stat_layout {
- u32 offset;
- char name[ETH_GSTRING_LEN];
-};
-
-struct ocelot {
- struct device *dev;
-
- struct regmap *targets[TARGET_MAX];
- struct regmap_field *regfields[REGFIELD_MAX];
- const u32 *const *map;
- const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
-
- u8 base_mac[ETH_ALEN];
-
- struct net_device *hw_bridge_dev;
- u16 bridge_mask;
- u16 bridge_fwd_mask;
-
- struct workqueue_struct *ocelot_owq;
-
- int shared_queue_sz;
-
- u8 num_phys_ports;
- u8 num_cpu_ports;
- struct ocelot_port **ports;
-
- u32 *lags;
-
- /* Keep track of the vlan port masks */
- u32 vlan_mask[VLAN_N_VID];
-
- struct list_head multicast;
-
- /* Workqueue to check statistics for overflow with its lock */
- struct mutex stats_lock;
- u64 *stats;
- struct delayed_work stats_work;
- struct workqueue_struct *stats_queue;
-
- u8 ptp:1;
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_info;
- struct hwtstamp_config hwtstamp_config;
- struct mutex ptp_lock; /* Protects the PTP interface state */
- spinlock_t ptp_clock_lock; /* Protects the PTP clock */
-};
-
-struct ocelot_port {
+struct ocelot_port_private {
+ struct ocelot_port port;
struct net_device *dev;
- struct ocelot *ocelot;
struct phy_device *phy;
- void __iomem *regs;
u8 chip_port;
- /* Ingress default VLAN (pvid) */
- u16 pvid;
-
- /* Egress default VLAN (vid) */
- u16 vid;
-
u8 vlan_aware;
- u64 *stats;
-
phy_interface_t phy_mode;
struct phy *serdes;
struct ocelot_port_tc tc;
-
- u8 ptp_cmd;
- struct list_head skbs;
- u8 ts_id;
-};
-
-struct ocelot_skb {
- struct list_head head;
- struct sk_buff *skb;
- u8 id;
};
-u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
-#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
-#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
-#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
-
-void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
-#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
-#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
-#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
-
-void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
- u32 offset);
-#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
-#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
-#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
-
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
-int ocelot_regfields_init(struct ocelot *ocelot,
- const struct reg_field *const regfields);
-struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
- struct platform_device *pdev,
- const char *name);
-
#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
-int ocelot_init(struct ocelot *ocelot);
-void ocelot_deinit(struct ocelot *ocelot);
-int ocelot_chip_init(struct ocelot *ocelot);
+int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops);
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
-int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
-void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts);
+#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index e98944c87259..c08e3e8482e7 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -224,9 +224,9 @@ int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
int ocelot_ace_init(struct ocelot *ocelot);
void ocelot_ace_deinit(void);
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
struct flow_block_offload *f);
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
struct flow_block_offload *f);
#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index aac115136720..2da8eee27e98 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -95,6 +95,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
do {
struct skb_shared_hwtstamps *shhwtstamps;
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
u64 tod_in_ns, full_ts_in_ns;
struct frame_info info = {};
struct net_device *dev;
@@ -103,7 +105,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
int sz, len, buf_len;
struct sk_buff *skb;
- for (i = 0; i < IFH_LEN; i++) {
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
if (err != 4)
break;
@@ -114,7 +116,10 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
ocelot_parse_ifh(ifh, &info);
- dev = ocelot->ports[info.port]->dev;
+ ocelot_port = ocelot->ports[info.port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+ dev = priv->dev;
skb = netdev_alloc_skb(dev, info.len);
@@ -185,69 +190,69 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
{
- int budget = OCELOT_PTP_QUEUE_SZ;
struct ocelot *ocelot = arg;
- while (budget--) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct list_head *pos, *tmp;
- struct sk_buff *skb = NULL;
- struct ocelot_skb *entry;
- struct ocelot_port *port;
- struct timespec64 ts;
- u32 val, id, txport;
+ ocelot_get_txtstamp(ocelot);
- val = ocelot_read(ocelot, SYS_PTP_STATUS);
+ return IRQ_HANDLED;
+}
- /* Check if a timestamp can be retrieved */
- if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
- break;
+static const struct of_device_id mscc_ocelot_match[] = {
+ { .compatible = "mscc,vsc7514-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
- WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+static void ocelot_port_pcs_init(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- /* Retrieve the ts ID and Tx port */
- id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
- txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
- /* Retrieve its associated skb */
- port = ocelot->ports[txport];
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
- list_for_each_safe(pos, tmp, &port->skbs) {
- entry = list_entry(pos, struct ocelot_skb, head);
- if (entry->id != id)
- continue;
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
- skb = entry->skb;
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
- list_del(pos);
- kfree(entry);
- }
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+}
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+static int ocelot_reset(struct ocelot *ocelot)
+{
+ int retries = 100;
+ u32 val;
- if (unlikely(!skb))
- continue;
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
+ do {
+ msleep(1);
+ regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+ } while (val && --retries);
- /* Set the timestamp into the skb */
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
+ if (!retries)
+ return -ETIMEDOUT;
- dev_kfree_skb_any(skb);
- }
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
- return IRQ_HANDLED;
+ return 0;
}
-static const struct of_device_id mscc_ocelot_match[] = {
- { .compatible = "mscc,vsc7514-switch" },
- { }
+static const struct ocelot_ops ocelot_ops = {
+ .pcs_init = ocelot_port_pcs_init,
+ .reset = ocelot_reset,
};
-MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
static int mscc_ocelot_probe(struct platform_device *pdev)
{
@@ -257,13 +262,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
- u32 val;
struct {
enum ocelot_target id;
char *name;
u8 optional:1;
- } res[] = {
+ } io_target[] = {
{ SYS, "sys" },
{ REW, "rew" },
{ QSYS, "qsys" },
@@ -283,20 +287,23 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
- for (i = 0; i < ARRAY_SIZE(res); i++) {
+ for (i = 0; i < ARRAY_SIZE(io_target); i++) {
struct regmap *target;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ io_target[i].name);
- target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
+ target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
- if (res[i].optional) {
- ocelot->targets[res[i].id] = NULL;
+ if (io_target[i].optional) {
+ ocelot->targets[io_target[i].id] = NULL;
continue;
}
-
return PTR_ERR(target);
}
- ocelot->targets[res[i].id] = target;
+ ocelot->targets[io_target[i].id] = target;
}
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
@@ -307,7 +314,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[HSIO] = hsio;
- err = ocelot_chip_init(ocelot);
+ err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
return err;
@@ -334,18 +341,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ptp = 1;
}
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
-
- do {
- msleep(1);
- regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
- } while (val);
-
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
-
ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
ports = of_get_child_by_name(np, "ethernet-ports");
@@ -359,17 +354,20 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
- INIT_LIST_HEAD(&ocelot->multicast);
ocelot_init(ocelot);
+ ocelot_set_cpu_port(ocelot, ocelot->num_phys_ports,
+ OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_NONE);
for_each_available_child_of_node(ports, portnp) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
struct device_node *phy_node;
+ phy_interface_t phy_mode;
struct phy_device *phy;
struct resource *res;
struct phy *serdes;
void __iomem *regs;
char res_name[8];
- int phy_mode;
u32 port;
if (of_property_read_u32(portnp, "reg", &port))
@@ -398,13 +396,15 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
goto out_put_ports;
}
- phy_mode = of_get_phy_mode(portnp);
- if (phy_mode < 0)
- ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
- else
- ocelot->ports[port]->phy_mode = phy_mode;
+ ocelot_port = ocelot->ports[port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ of_get_phy_mode(portnp, &phy_mode);
+
+ priv->phy_mode = phy_mode;
- switch (ocelot->ports[port]->phy_mode) {
+ switch (priv->phy_mode) {
case PHY_INTERFACE_MODE_NA:
continue;
case PHY_INTERFACE_MODE_SGMII:
@@ -413,7 +413,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
/* Ensure clock signals and speed is set on all
* QSGMII links
*/
- ocelot_port_writel(ocelot->ports[port],
+ ocelot_port_writel(ocelot_port,
DEV_CLOCK_CFG_LINK_SPEED
(OCELOT_SPEED_1000),
DEV_CLOCK_CFG);
@@ -441,7 +441,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
goto out_put_ports;
}
- ocelot->ports[port]->serdes = serdes;
+ priv->serdes = serdes;
}
register_netdevice_notifier(&ocelot_netdevice_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index b894bc0c9c16..3d65b99b9734 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -10,7 +10,7 @@
struct ocelot_port_block {
struct ocelot_acl_block *block;
- struct ocelot_port *port;
+ struct ocelot_port_private *priv;
};
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
@@ -177,8 +177,8 @@ struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
if (!rule)
return NULL;
- rule->port = block->port;
- rule->chip_port = block->port->chip_port;
+ rule->port = &block->priv->port;
+ rule->chip_port = block->priv->chip_port;
return rule;
}
@@ -202,7 +202,7 @@ static int ocelot_flower_replace(struct flow_cls_offload *f,
if (ret)
return ret;
- port_block->port->tc.offload_cnt++;
+ port_block->priv->tc.offload_cnt++;
return 0;
}
@@ -213,14 +213,14 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
int ret;
rule.prio = f->common.prio;
- rule.port = port_block->port;
+ rule.port = &port_block->priv->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_offload_del(&rule);
if (ret)
return ret;
- port_block->port->tc.offload_cnt--;
+ port_block->priv->tc.offload_cnt--;
return 0;
}
@@ -231,7 +231,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
int ret;
rule.prio = f->common.prio;
- rule.port = port_block->port;
+ rule.port = &port_block->priv->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_stats_update(&rule);
if (ret)
@@ -261,7 +261,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
{
struct ocelot_port_block *port_block = cb_priv;
- if (!tc_cls_can_offload_and_chain0(port_block->port->dev, type_data))
+ if (!tc_cls_can_offload_and_chain0(port_block->priv->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -275,7 +275,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
}
static struct ocelot_port_block*
-ocelot_port_block_create(struct ocelot_port *port)
+ocelot_port_block_create(struct ocelot_port_private *priv)
{
struct ocelot_port_block *port_block;
@@ -283,7 +283,7 @@ ocelot_port_block_create(struct ocelot_port *port)
if (!port_block)
return NULL;
- port_block->port = port;
+ port_block->priv = priv;
return port_block;
}
@@ -300,7 +300,7 @@ static void ocelot_tc_block_unbind(void *cb_priv)
ocelot_port_block_destroy(port_block);
}
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct ocelot_port_block *port_block;
@@ -311,14 +311,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
return -EOPNOTSUPP;
block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, port);
+ ocelot_setup_tc_block_cb_flower, priv);
if (!block_cb) {
- port_block = ocelot_port_block_create(port);
+ port_block = ocelot_port_block_create(priv);
if (!port_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
- port, port_block,
+ priv, port_block,
ocelot_tc_block_unbind);
if (IS_ERR(block_cb)) {
ret = PTR_ERR(block_cb);
@@ -339,13 +339,13 @@ err_cb_register:
return ret;
}
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, port);
+ ocelot_setup_tc_block_cb_flower, priv);
if (!block_cb)
return;
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index c6db8ad31fdf..b229b1cb68ef 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -97,20 +97,16 @@ static struct regmap_config ocelot_regmap_config = {
.reg_stride = 4,
};
-struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
- struct platform_device *pdev,
- const char *name)
+struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
{
- struct resource *res;
void __iomem *regs;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
regs = devm_ioremap_resource(ocelot->dev, res);
if (IS_ERR(regs))
return ERR_CAST(regs);
- ocelot_regmap_config.name = name;
- return devm_regmap_init_mmio(ocelot->dev, regs,
- &ocelot_regmap_config);
+ ocelot_regmap_config.name = res->name;
+
+ return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
}
-EXPORT_SYMBOL(ocelot_io_platform_init);
+EXPORT_SYMBOL(ocelot_regmap_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index 701e82dd749a..faddce43f2e3 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -40,13 +40,12 @@ struct qos_policer_conf {
u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
};
-static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
+static int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
struct qos_policer_conf *conf)
{
u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
bool cir_discard = 0, pir_discard = 0;
- struct ocelot *ocelot = port->ocelot;
u32 pbs_max = 0, cbs_max = 0;
u8 ipg = 20;
u32 value;
@@ -123,22 +122,26 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
/* Check limits */
if (pir > GENMASK(15, 0)) {
- netdev_err(port->dev, "Invalid pir\n");
+ dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n",
+ port, pir, GENMASK(15, 0));
return -EINVAL;
}
if (cir > GENMASK(15, 0)) {
- netdev_err(port->dev, "Invalid cir\n");
+ dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n",
+ port, cir, GENMASK(15, 0));
return -EINVAL;
}
if (pbs > pbs_max) {
- netdev_err(port->dev, "Invalid pbs\n");
+ dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n",
+ port, pbs, pbs_max);
return -EINVAL;
}
if (cbs > cbs_max) {
- netdev_err(port->dev, "Invalid cbs\n");
+ dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n",
+ port, cbs, cbs_max);
return -EINVAL;
}
@@ -171,10 +174,9 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
return 0;
}
-int ocelot_port_policer_add(struct ocelot_port *port,
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol)
{
- struct ocelot *ocelot = port->ocelot;
struct qos_policer_conf pp = { 0 };
int err;
@@ -185,11 +187,10 @@ int ocelot_port_policer_add(struct ocelot_port *port,
pp.pir = pol->rate;
pp.pbs = pol->burst;
- netdev_dbg(port->dev,
- "%s: port %u pir %u kbps, pbs %u bytes\n",
- __func__, port->chip_port, pp.pir, pp.pbs);
+ dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n",
+ __func__, port, pp.pir, pp.pbs);
- err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -198,22 +199,21 @@ int ocelot_port_policer_add(struct ocelot_port *port,
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
- ANA_PORT_POL_CFG, port->chip_port);
+ ANA_PORT_POL_CFG, port);
return 0;
}
-int ocelot_port_policer_del(struct ocelot_port *port)
+int ocelot_port_policer_del(struct ocelot *ocelot, int port)
{
- struct ocelot *ocelot = port->ocelot;
struct qos_policer_conf pp = { 0 };
int err;
- netdev_dbg(port->dev, "%s: port %u\n", __func__, port->chip_port);
+ dev_dbg(ocelot->dev, "%s: port %u\n", __func__, port);
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -221,7 +221,7 @@ int ocelot_port_policer_del(struct ocelot_port *port)
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
- ANA_PORT_POL_CFG, port->chip_port);
+ ANA_PORT_POL_CFG, port);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index d1137f79efda..ae9509229463 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -14,9 +14,9 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
-int ocelot_port_policer_add(struct ocelot_port *port,
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol);
-int ocelot_port_policer_del(struct ocelot_port *port);
+int ocelot_port_policer_del(struct ocelot *ocelot, int port);
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index e59977d20400..b88b5899b227 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -423,7 +423,7 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
}
-int ocelot_chip_init(struct ocelot *ocelot)
+int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
@@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot)
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->shared_queue_sz = 224 * 1024;
+ ocelot->ops = ops;
ret = ocelot_regfields_init(ocelot, ocelot_regfields);
if (ret)
diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/drivers/net/ethernet/mscc/ocelot_sys.h
deleted file mode 100644
index 16f91e172bcb..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_sys.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_SYS_H_
-#define _MSCC_OCELOT_SYS_H_
-
-#define SYS_COUNT_RX_OCTETS_RSZ 0x4
-
-#define SYS_COUNT_TX_OCTETS_RSZ 0x4
-
-#define SYS_PORT_MODE_RSZ 0x4
-
-#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5))
-#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5)
-#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5)
-#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3))
-#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3)
-#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1))
-#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1)
-#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0)
-
-#define SYS_FRONT_PORT_MODE_RSZ 0x4
-
-#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0)
-
-#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
-#define SYS_FRM_AGING_MAX_AGE(x) ((x) & GENMASK(19, 0))
-#define SYS_FRM_AGING_MAX_AGE_M GENMASK(19, 0)
-
-#define SYS_STAT_CFG_STAT_CLEAR_SHOT(x) (((x) << 10) & GENMASK(16, 10))
-#define SYS_STAT_CFG_STAT_CLEAR_SHOT_M GENMASK(16, 10)
-#define SYS_STAT_CFG_STAT_CLEAR_SHOT_X(x) (((x) & GENMASK(16, 10)) >> 10)
-#define SYS_STAT_CFG_STAT_VIEW(x) ((x) & GENMASK(9, 0))
-#define SYS_STAT_CFG_STAT_VIEW_M GENMASK(9, 0)
-
-#define SYS_SW_STATUS_RSZ 0x4
-
-#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0)
-
-#define SYS_MISC_CFG_PTP_RSRV_CLR BIT(1)
-#define SYS_MISC_CFG_PTP_DIS_NEG_RO BIT(0)
-
-#define SYS_REW_MAC_HIGH_CFG_RSZ 0x4
-
-#define SYS_REW_MAC_LOW_CFG_RSZ 0x4
-
-#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG(x) (((x) << 6) & GENMASK(21, 6))
-#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_M GENMASK(21, 6)
-#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_X(x) (((x) & GENMASK(21, 6)) >> 6)
-#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0))
-#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0)
-
-#define SYS_PAUSE_CFG_RSZ 0x4
-
-#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10))
-#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10)
-#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10)
-#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1))
-#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1)
-#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1)
-#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
-
-#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9))
-#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9)
-#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9)
-#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP(x) ((x) & GENMASK(8, 0))
-#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP_M GENMASK(8, 0)
-
-#define SYS_ATOP_RSZ 0x4
-
-#define SYS_MAC_FC_CFG_RSZ 0x4
-
-#define SYS_MAC_FC_CFG_FC_LINK_SPEED(x) (((x) << 26) & GENMASK(27, 26))
-#define SYS_MAC_FC_CFG_FC_LINK_SPEED_M GENMASK(27, 26)
-#define SYS_MAC_FC_CFG_FC_LINK_SPEED_X(x) (((x) & GENMASK(27, 26)) >> 26)
-#define SYS_MAC_FC_CFG_FC_LATENCY_CFG(x) (((x) << 20) & GENMASK(25, 20))
-#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_M GENMASK(25, 20)
-#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_X(x) (((x) & GENMASK(25, 20)) >> 20)
-#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
-#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
-#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
-#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG(x) ((x) & GENMASK(15, 0))
-#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_M GENMASK(15, 0)
-
-#define SYS_MMGT_RELCNT(x) (((x) << 16) & GENMASK(31, 16))
-#define SYS_MMGT_RELCNT_M GENMASK(31, 16)
-#define SYS_MMGT_RELCNT_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define SYS_MMGT_FREECNT(x) ((x) & GENMASK(15, 0))
-#define SYS_MMGT_FREECNT_M GENMASK(15, 0)
-
-#define SYS_MMGT_FAST_FREEVLD(x) (((x) << 4) & GENMASK(7, 4))
-#define SYS_MMGT_FAST_FREEVLD_M GENMASK(7, 4)
-#define SYS_MMGT_FAST_FREEVLD_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define SYS_MMGT_FAST_RELVLD(x) ((x) & GENMASK(3, 0))
-#define SYS_MMGT_FAST_RELVLD_M GENMASK(3, 0)
-
-#define SYS_EVENTS_DIF_RSZ 0x4
-
-#define SYS_EVENTS_DIF_EV_DRX(x) (((x) << 6) & GENMASK(8, 6))
-#define SYS_EVENTS_DIF_EV_DRX_M GENMASK(8, 6)
-#define SYS_EVENTS_DIF_EV_DRX_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define SYS_EVENTS_DIF_EV_DTX(x) ((x) & GENMASK(5, 0))
-#define SYS_EVENTS_DIF_EV_DTX_M GENMASK(5, 0)
-
-#define SYS_EVENTS_CORE_EV_FWR BIT(2)
-#define SYS_EVENTS_CORE_EV_ANA(x) ((x) & GENMASK(1, 0))
-#define SYS_EVENTS_CORE_EV_ANA_M GENMASK(1, 0)
-
-#define SYS_CNT_GSZ 0x4
-
-#define SYS_PTP_STATUS_PTP_TXSTAMP_OAM BIT(29)
-#define SYS_PTP_STATUS_PTP_OVFL BIT(28)
-#define SYS_PTP_STATUS_PTP_MESS_VLD BIT(27)
-#define SYS_PTP_STATUS_PTP_MESS_ID(x) (((x) << 21) & GENMASK(26, 21))
-#define SYS_PTP_STATUS_PTP_MESS_ID_M GENMASK(26, 21)
-#define SYS_PTP_STATUS_PTP_MESS_ID_X(x) (((x) & GENMASK(26, 21)) >> 21)
-#define SYS_PTP_STATUS_PTP_MESS_TXPORT(x) (((x) << 16) & GENMASK(20, 16))
-#define SYS_PTP_STATUS_PTP_MESS_TXPORT_M GENMASK(20, 16)
-#define SYS_PTP_STATUS_PTP_MESS_TXPORT_X(x) (((x) & GENMASK(20, 16)) >> 16)
-#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID(x) ((x) & GENMASK(15, 0))
-#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID_M GENMASK(15, 0)
-
-#define SYS_PTP_TXSTAMP_PTP_TXSTAMP(x) ((x) & GENMASK(29, 0))
-#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_M GENMASK(29, 0)
-#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC BIT(31)
-
-#define SYS_PTP_NXT_PTP_NXT BIT(0)
-
-#define SYS_PTP_CFG_PTP_STAMP_WID(x) (((x) << 2) & GENMASK(7, 2))
-#define SYS_PTP_CFG_PTP_STAMP_WID_M GENMASK(7, 2)
-#define SYS_PTP_CFG_PTP_STAMP_WID_X(x) (((x) & GENMASK(7, 2)) >> 2)
-#define SYS_PTP_CFG_PTP_CF_ROLL_MODE(x) ((x) & GENMASK(1, 0))
-#define SYS_PTP_CFG_PTP_CF_ROLL_MODE_M GENMASK(1, 0)
-
-#define SYS_RAM_INIT_RAM_INIT BIT(1)
-#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index 16a6db71ca5e..a4f7fbd76507 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -9,17 +9,19 @@
#include "ocelot_ace.h"
#include <net/pkt_cls.h>
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
struct tc_cls_matchall_offload *f,
bool ingress)
{
struct netlink_ext_ack *extack = f->common.extack;
+ struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_policer pol = { 0 };
struct flow_action_entry *action;
+ int port = priv->chip_port;
int err;
- netdev_dbg(port->dev, "%s: port %u command %d cookie %lu\n",
- __func__, port->chip_port, f->command, f->cookie);
+ netdev_dbg(priv->dev, "%s: port %u command %d cookie %lu\n",
+ __func__, port, f->command, f->cookie);
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
@@ -34,7 +36,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
return -EOPNOTSUPP;
}
- if (port->tc.block_shared) {
+ if (priv->tc.block_shared) {
NL_SET_ERR_MSG_MOD(extack,
"Rate limit is not supported on shared blocks");
return -EOPNOTSUPP;
@@ -47,7 +49,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
return -EOPNOTSUPP;
}
- if (port->tc.police_id && port->tc.police_id != f->cookie) {
+ if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
"Only one policer per port is supported\n");
return -EEXIST;
@@ -58,27 +60,27 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
PSCHED_NS2TICKS(action->police.burst),
PSCHED_TICKS_PER_SEC);
- err = ocelot_port_policer_add(port, &pol);
+ err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
return err;
}
- port->tc.police_id = f->cookie;
- port->tc.offload_cnt++;
+ priv->tc.police_id = f->cookie;
+ priv->tc.offload_cnt++;
return 0;
case TC_CLSMATCHALL_DESTROY:
- if (port->tc.police_id != f->cookie)
+ if (priv->tc.police_id != f->cookie)
return -ENOENT;
- err = ocelot_port_policer_del(port);
+ err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Could not delete policer\n");
return err;
}
- port->tc.police_id = 0;
- port->tc.offload_cnt--;
+ priv->tc.police_id = 0;
+ priv->tc.offload_cnt--;
return 0;
case TC_CLSMATCHALL_STATS: /* fall through */
default:
@@ -90,21 +92,21 @@ static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
void *type_data,
void *cb_priv, bool ingress)
{
- struct ocelot_port *port = cb_priv;
+ struct ocelot_port_private *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(port->dev, type_data))
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
- netdev_dbg(port->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
+ netdev_dbg(priv->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
ingress ? "ingress" : "egress");
- return ocelot_setup_tc_cls_matchall(port, type_data, ingress);
+ return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
case TC_SETUP_CLSFLOWER:
return 0;
default:
- netdev_dbg(port->dev, "tc_block_cb: type %d %s\n",
+ netdev_dbg(priv->dev, "tc_block_cb: type %d %s\n",
type,
ingress ? "ingress" : "egress");
@@ -130,19 +132,19 @@ static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
static LIST_HEAD(ocelot_block_cb_list);
-static int ocelot_setup_tc_block(struct ocelot_port *port,
+static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
int err;
- netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
+ netdev_dbg(priv->dev, "tc_block command %d, binder_type %d\n",
f->command, f->binder_type);
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = ocelot_setup_tc_block_cb_ig;
- port->tc.block_shared = f->block_shared;
+ priv->tc.block_shared = f->block_shared;
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = ocelot_setup_tc_block_cb_eg;
} else {
@@ -153,14 +155,14 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
switch (f->command) {
case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
+ if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
return -EBUSY;
- block_cb = flow_block_cb_alloc(cb, port, port, NULL);
+ block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
- err = ocelot_setup_tc_block_flower_bind(port, f);
+ err = ocelot_setup_tc_block_flower_bind(priv, f);
if (err < 0) {
flow_block_cb_free(block_cb);
return err;
@@ -169,11 +171,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
- block_cb = flow_block_cb_lookup(f->block, cb, port);
+ block_cb = flow_block_cb_lookup(f->block, cb, priv);
if (!block_cb)
return -ENOENT;
- ocelot_setup_tc_block_flower_unbind(port, f);
+ ocelot_setup_tc_block_flower_unbind(priv, f);
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
@@ -185,11 +187,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
switch (type) {
case TC_SETUP_BLOCK:
- return ocelot_setup_tc_block(port, type_data);
+ return ocelot_setup_tc_block(priv, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 5afcb3c4c2ef..c80bb83c8ac9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3952,7 +3952,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta1, *meta2;
- const s32 exp_mask[] = {
+ static const s32 exp_mask[] = {
[BPF_B] = 0x000000ffU,
[BPF_H] = 0x0000ffffU,
[BPF_W] = 0xffffffffU,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 88fab6a82acf..95a0d3910e31 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -46,9 +46,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
/* Grab a single ref to the map for our record. The prog destroy ndo
* happens after free_used_maps().
*/
- map = bpf_map_inc(map, false);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ bpf_map_inc(map);
record = kmalloc(sizeof(*record), GFP_KERNEL);
if (!record) {
@@ -460,8 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
return -EINVAL;
rcu_read_lock();
- record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
- nfp_bpf_maps_neutral_params);
+ record = rhashtable_lookup(&bpf->maps_neutral, &map_id,
+ nfp_bpf_maps_neutral_params);
if (!record || map_id_full > U32_MAX) {
rcu_read_unlock();
cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 61aabffc8888..bcdcd6de7dea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -872,7 +872,8 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
/* jump forward, a TX may have gotten lost, need to sync TX */
if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
- tls_offload_tx_resync_request(nskb->sk);
+ tls_offload_tx_resync_request(nskb->sk, seq,
+ ntls->next_seq);
*nr_frags = 0;
return nskb;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2761f3a3ae50..49c7987c2abd 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1346,10 +1346,9 @@ static int nixge_probe(struct platform_device *pdev)
}
}
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)priv->phy_mode < 0) {
+ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (err) {
netdev_err(ndev, "not find \"phy-mode\" property\n");
- err = -EINVAL;
goto unregister_mdio;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 05d2b478c99b..6b54cb3b681d 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2225,6 +2225,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct nv_skb_map *prev_tx_ctx;
struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
unsigned long flags;
+ netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@@ -2240,7 +2241,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
- return NETDEV_TX_BUSY;
+
+ /* When normal packets and/or xmit_more packets fill up
+ * tx_desc, it is necessary to trigger NIC tx reg.
+ */
+ ret = NETDEV_TX_BUSY;
+ goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
@@ -2259,7 +2265,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
@@ -2305,7 +2314,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
@@ -2357,8 +2369,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
- return NETDEV_TX_OK;
+txkick:
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+ u32 txrxctl_kick;
+dma_error:
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+ }
+
+ return ret;
}
static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
@@ -2381,6 +2400,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
struct nv_skb_map *start_tx_ctx = NULL;
struct nv_skb_map *tmp_tx_ctx = NULL;
unsigned long flags;
+ netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@@ -2396,7 +2416,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
- return NETDEV_TX_BUSY;
+
+ /* When normal packets and/or xmit_more packets fill up
+ * tx_desc, it is necessary to trigger NIC tx reg.
+ */
+ ret = NETDEV_TX_BUSY;
+
+ goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
@@ -2416,7 +2442,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
@@ -2463,7 +2492,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
@@ -2542,8 +2574,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
spin_unlock_irqrestore(&np->lock, flags);
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
- return NETDEV_TX_OK;
+txkick:
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+ u32 txrxctl_kick;
+dma_error:
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+ }
+
+ return ret;
}
static inline void nv_tx_flip_ownership(struct net_device *dev)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 544012a67221..ebb81d6d4ca1 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -391,6 +392,7 @@ struct rx_status_t {
struct netdata_local {
struct platform_device *pdev;
struct net_device *ndev;
+ struct device_node *phy_node;
spinlock_t lock;
void __iomem *net_base;
u32 msg_enable;
@@ -749,22 +751,26 @@ static void lpc_handle_link_change(struct net_device *ndev)
static int lpc_mii_probe(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = phy_find_first(pldat->mii_bus);
-
- if (!phydev) {
- netdev_err(ndev, "no PHY found\n");
- return -ENODEV;
- }
+ struct phy_device *phydev;
/* Attach to the PHY */
if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
netdev_info(ndev, "using MII interface\n");
else
netdev_info(ndev, "using RMII interface\n");
+
+ if (pldat->phy_node)
+ phydev = of_phy_find_device(pldat->phy_node);
+ else
+ phydev = phy_find_first(pldat->mii_bus);
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -ENODEV;
+ }
+
phydev = phy_connect(ndev, phydev_name(phydev),
&lpc_handle_link_change,
lpc_phy_interface_mode(&pldat->pdev->dev));
-
if (IS_ERR(phydev)) {
netdev_err(ndev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
@@ -783,6 +789,7 @@ static int lpc_mii_probe(struct net_device *ndev)
static int lpc_mii_init(struct netdata_local *pldat)
{
+ struct device_node *node;
int err = -ENXIO;
pldat->mii_bus = mdiobus_alloc();
@@ -812,7 +819,10 @@ static int lpc_mii_init(struct netdata_local *pldat)
platform_set_drvdata(pldat->pdev, pldat->mii_bus);
- if (mdiobus_register(pldat->mii_bus))
+ node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
+ err = of_mdiobus_register(pldat->mii_bus, node);
+ of_node_put(node);
+ if (err)
goto err_out_unregister_bus;
if (lpc_mii_probe(pldat->ndev) != 0)
@@ -1345,6 +1355,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
pldat->dma_buff_base_v);
+ pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
/* Get MAC address from current HW setting (POR state is all zeros) */
__lpc_get_mac(pldat, ndev->dev_addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 7a7060677f15..98e102af7756 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,7 +12,7 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.15.0-k"
+#define IONIC_DRV_VERSION "0.18.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
@@ -46,6 +46,8 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
struct notifier_block nb;
+ struct timer_list watchdog_timer;
+ int watchdog_period;
};
struct ionic_admin_ctx {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d168a6435322..5f9d2ec70446 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -11,6 +11,16 @@
#include "ionic_dev.h"
#include "ionic_lif.h"
+static void ionic_watchdog_cb(struct timer_list *t)
+{
+ struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
+ ionic_heartbeat_check(ionic);
+}
+
void ionic_init_devinfo(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -72,6 +82,11 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
+ timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
+ ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -80,10 +95,53 @@ int ionic_dev_setup(struct ionic *ionic)
void ionic_dev_teardown(struct ionic *ionic)
{
- /* place holder */
+ del_timer_sync(&ionic->watchdog_timer);
}
/* Devcmd Interface */
+int ionic_heartbeat_check(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ unsigned long hb_time;
+ u32 fw_status;
+ u32 hb;
+
+ /* wait a little more than one second before testing again */
+ hb_time = jiffies;
+ if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period)))
+ return 0;
+
+ /* firmware is useful only if fw_status is non-zero */
+ fw_status = ioread32(&idev->dev_info_regs->fw_status);
+ if (!fw_status)
+ return -ENXIO;
+
+ /* early FW has no heartbeat, else FW will return non-zero */
+ hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
+ if (!hb)
+ return 0;
+
+ /* are we stalled? */
+ if (hb == idev->last_hb) {
+ /* only complain once for each stall seen */
+ if (idev->last_hb_time != 1) {
+ dev_info(ionic->dev, "FW heartbeat stalled at %d\n",
+ idev->last_hb);
+ idev->last_hb_time = 1;
+ }
+
+ return -ENXIO;
+ }
+
+ if (idev->last_hb_time == 1)
+ dev_info(ionic->dev, "FW heartbeat restored at %d\n", hb);
+
+ idev->last_hb = hb;
+ idev->last_hb_time = hb_time;
+
+ return 0;
+}
+
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 9610aeb7d5f4..4665c5dc5324 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -16,6 +16,7 @@
#define IONIC_MIN_TXRX_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_LIFS_MAX 1024
+#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
#define IONIC_DEV_CMD_REG_VERSION 1
@@ -123,6 +124,9 @@ struct ionic_dev {
union ionic_dev_info_regs __iomem *dev_info_regs;
union ionic_dev_cmd_regs __iomem *dev_cmd_regs;
+ unsigned long last_hb_time;
+ u32 last_hb;
+
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
@@ -151,12 +155,19 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
+struct ionic_page_info {
+ struct page *page;
+ dma_addr_t dma_addr;
+};
+
struct ionic_desc_info {
void *desc;
void *sg_desc;
struct ionic_desc_info *next;
unsigned int index;
unsigned int left;
+ unsigned int npages;
+ struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
ionic_desc_cb cb;
void *cb_arg;
};
@@ -295,5 +306,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
+int ionic_heartbeat_check(struct ionic *ionic);
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index af1647afa4e8..6fb27dcc5787 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -19,31 +19,30 @@ static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
err = devlink_info_driver_name_put(req, IONIC_DRV_NAME);
if (err)
- goto info_out;
+ return err;
err = devlink_info_version_running_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW,
idev->dev_info.fw_version);
if (err)
- goto info_out;
+ return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
buf);
if (err)
- goto info_out;
+ return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
buf);
if (err)
- goto info_out;
+ return err;
err = devlink_info_serial_number_put(req, idev->dev_info.serial_num);
-info_out:
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 7d10265f782a..f778fff034f5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -254,12 +254,9 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
struct ionic_dev *idev;
- u32 req_rs, req_fc;
- u8 fec_type;
int err = 0;
idev = &lif->ionic->idev;
- fec_type = IONIC_PORT_FEC_TYPE_NONE;
/* set autoneg */
if (ks->base.autoneg != idev->port_info->config.an_enable) {
@@ -281,29 +278,6 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
return err;
}
- /* set FEC */
- req_rs = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_RS);
- req_fc = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_BASER);
- if (req_rs && req_fc) {
- netdev_info(netdev, "Only select one FEC mode at a time\n");
- return -EINVAL;
- } else if (req_fc) {
- fec_type = IONIC_PORT_FEC_TYPE_FC;
- } else if (req_rs) {
- fec_type = IONIC_PORT_FEC_TYPE_RS;
- } else if (!(req_rs | req_fc)) {
- fec_type = IONIC_PORT_FEC_TYPE_NONE;
- }
-
- if (fec_type != idev->port_info->config.fec_type) {
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_port_fec(idev, fec_type);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
- mutex_unlock(&ionic->dev_cmd_lock);
- if (err)
- return err;
- }
-
return 0;
}
@@ -353,6 +327,70 @@ static int ionic_set_pauseparam(struct net_device *netdev,
return 0;
}
+static int ionic_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ switch (lif->ionic->idev.port_info->config.fec_type) {
+ case IONIC_PORT_FEC_TYPE_NONE:
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ case IONIC_PORT_FEC_TYPE_RS:
+ fec->active_fec = ETHTOOL_FEC_RS;
+ break;
+ case IONIC_PORT_FEC_TYPE_FC:
+ fec->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ }
+
+ fec->fec = ETHTOOL_FEC_OFF | ETHTOOL_FEC_RS | ETHTOOL_FEC_BASER;
+
+ return 0;
+}
+
+static int ionic_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ u8 fec_type;
+ int ret = 0;
+
+ if (lif->ionic->idev.port_info->config.an_enable) {
+ netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n");
+ return -EINVAL;
+ }
+
+ switch (fec->fec) {
+ case ETHTOOL_FEC_NONE:
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+ break;
+ case ETHTOOL_FEC_OFF:
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec_type = IONIC_PORT_FEC_TYPE_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec_type = IONIC_PORT_FEC_TYPE_FC;
+ break;
+ case ETHTOOL_FEC_AUTO:
+ default:
+ netdev_err(netdev, "FEC request 0x%04x not supported\n",
+ fec->fec);
+ return -EINVAL;
+ }
+
+ if (fec_type != lif->ionic->idev.port_info->config.fec_type) {
+ mutex_lock(&lif->ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_fec(&lif->ionic->idev, fec_type);
+ ret = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&lif->ionic->dev_cmd_lock);
+ }
+
+ return ret;
+}
+
static int ionic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coalesce)
{
@@ -372,7 +410,6 @@ static int ionic_set_coalesce(struct net_device *netdev,
struct ionic_identity *ident;
struct ionic_qcq *qcq;
unsigned int i;
- u32 usecs;
u32 coal;
if (coalesce->rx_max_coalesced_frames ||
@@ -410,26 +447,27 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ /* Convert the usec request to a HW useable value. If they asked
+ * for non-zero and it resolved to zero, bump it up
+ */
coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
-
- if (coal > IONIC_INTR_CTRL_COAL_MAX)
- return -ERANGE;
-
- /* If they asked for non-zero and it resolved to zero, bump it up */
if (!coal && coalesce->rx_coalesce_usecs)
coal = 1;
- /* Convert it back to get device resolution */
- usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+ if (coal > IONIC_INTR_CTRL_COAL_MAX)
+ return -ERANGE;
- if (usecs != lif->rx_coalesce_usecs) {
- lif->rx_coalesce_usecs = usecs;
+ /* Save the new value */
+ lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
+ if (coal != lif->rx_coalesce_hw) {
+ lif->rx_coalesce_hw = coal;
if (test_bit(IONIC_LIF_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index, coal);
+ qcq->intr.index,
+ lif->rx_coalesce_hw);
}
}
}
@@ -453,6 +491,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
bool running;
+ int err;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -470,8 +509,9 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = test_bit(IONIC_LIF_UP, lif->state);
if (running)
@@ -504,6 +544,7 @@ static int ionic_set_channels(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
bool running;
+ int err;
if (!ch->combined_count || ch->other_count ||
ch->rx_count || ch->tx_count)
@@ -512,8 +553,9 @@ static int ionic_set_channels(struct net_device *netdev,
if (ch->combined_count == lif->nxqs)
return 0;
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = test_bit(IONIC_LIF_UP, lif->state);
if (running)
@@ -747,6 +789,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_regs = ionic_get_regs,
.get_link = ethtool_op_get_link,
.get_link_ksettings = ionic_get_link_ksettings,
+ .set_link_ksettings = ionic_set_link_ksettings,
.get_coalesce = ionic_get_coalesce,
.set_coalesce = ionic_set_coalesce,
.get_ringparam = ionic_get_ringparam,
@@ -769,7 +812,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_module_eeprom = ionic_get_module_eeprom,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
- .set_link_ksettings = ionic_set_link_ksettings,
+ .get_fecparam = ionic_get_fecparam,
+ .set_fecparam = ionic_set_fecparam,
.nway_reset = ionic_nway_reset,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 5bfdda19f64d..dbdb7c5ae8f1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -111,7 +111,7 @@ struct ionic_admin_cmd {
};
/**
- * struct admin_comp - General admin command completion format
+ * struct ionic_admin_comp - General admin command completion format
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -134,7 +134,7 @@ static inline u8 color_match(u8 color, u8 done_color)
}
/**
- * struct nop_cmd - NOP command
+ * struct ionic_nop_cmd - NOP command
* @opcode: opcode
*/
struct ionic_nop_cmd {
@@ -143,7 +143,7 @@ struct ionic_nop_cmd {
};
/**
- * struct nop_comp - NOP command completion
+ * struct ionic_nop_comp - NOP command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_nop_comp {
@@ -152,7 +152,7 @@ struct ionic_nop_comp {
};
/**
- * struct dev_init_cmd - Device init command
+ * struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
* @type: device type
*/
@@ -172,7 +172,7 @@ struct ionic_dev_init_comp {
};
/**
- * struct dev_reset_cmd - Device reset command
+ * struct ionic_dev_reset_cmd - Device reset command
* @opcode: opcode
*/
struct ionic_dev_reset_cmd {
@@ -192,7 +192,7 @@ struct ionic_dev_reset_comp {
#define IONIC_IDENTITY_VERSION_1 1
/**
- * struct dev_identify_cmd - Driver/device identify command
+ * struct ionic_dev_identify_cmd - Driver/device identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
*/
@@ -284,7 +284,7 @@ enum ionic_lif_type {
};
/**
- * struct lif_identify_cmd - lif identify command
+ * struct ionic_lif_identify_cmd - lif identify command
* @opcode: opcode
* @type: lif type (enum lif_type)
* @ver: version of identify returned by device
@@ -297,7 +297,7 @@ struct ionic_lif_identify_cmd {
};
/**
- * struct lif_identify_comp - lif identify command completion
+ * struct ionic_lif_identify_comp - lif identify command completion
* @status: status of the command (enum status_code)
* @ver: version of identify returned by device
*/
@@ -325,7 +325,7 @@ enum ionic_logical_qtype {
};
/**
- * struct lif_logical_qtype - Descriptor of logical to hardware queue type.
+ * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue type.
* @qtype: Hardware Queue Type.
* @qid_count: Number of Queue IDs of the logical type.
* @qid_base: Minimum Queue ID of the logical type.
@@ -349,7 +349,7 @@ enum ionic_lif_state {
* @name: lif name
* @mtu: mtu
* @mac: station mac address
- * @features: features (enum eth_hw_features)
+ * @features: features (enum ionic_eth_hw_features)
* @queue_count: queue counts per queue-type
*/
union ionic_lif_config {
@@ -367,7 +367,7 @@ union ionic_lif_config {
};
/**
- * struct lif_identity - lif identity information (type-specific)
+ * struct ionic_lif_identity - lif identity information (type-specific)
*
* @capabilities LIF capabilities
*
@@ -441,11 +441,11 @@ union ionic_lif_identity {
};
/**
- * struct lif_init_cmd - LIF init command
+ * struct ionic_lif_init_cmd - LIF init command
* @opcode: opcode
* @type: LIF type (enum lif_type)
* @index: LIF index
- * @info_pa: destination address for lif info (struct lif_info)
+ * @info_pa: destination address for lif info (struct ionic_lif_info)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -457,7 +457,7 @@ struct ionic_lif_init_cmd {
};
/**
- * struct lif_init_comp - LIF init command completion
+ * struct ionic_lif_init_comp - LIF init command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_lif_init_comp {
@@ -468,7 +468,7 @@ struct ionic_lif_init_comp {
};
/**
- * struct q_init_cmd - Queue init command
+ * struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
* @type: Logical queue type
* @ver: Queue version (defines opcode/descriptor scope)
@@ -525,7 +525,7 @@ struct ionic_q_init_cmd {
};
/**
- * struct q_init_comp - Queue init command completion
+ * struct ionic_q_init_comp - Queue init command completion
* @status: The status of the command (enum status_code)
* @ver: Queue version (defines opcode/descriptor scope)
* @comp_index: The index in the descriptor ring for which this
@@ -556,7 +556,7 @@ enum ionic_txq_desc_opcode {
};
/**
- * struct txq_desc - Ethernet Tx queue descriptor format
+ * struct ionic_txq_desc - Ethernet Tx queue descriptor format
* @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
*
* IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
@@ -735,7 +735,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
#define IONIC_RX_MAX_SG_ELEMS 8
/**
- * struct txq_sg_desc - Transmit scatter-gather (SG) list
+ * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
@@ -748,7 +748,7 @@ struct ionic_txq_sg_desc {
};
/**
- * struct txq_comp - Ethernet transmit queue completion descriptor
+ * struct ionic_txq_comp - Ethernet transmit queue completion descriptor
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -768,7 +768,7 @@ enum ionic_rxq_desc_opcode {
};
/**
- * struct rxq_desc - Ethernet Rx queue descriptor format
+ * struct ionic_rxq_desc - Ethernet Rx queue descriptor format
* @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
*
* RXQ_DESC_OPCODE_SIMPLE:
@@ -789,7 +789,7 @@ struct ionic_rxq_desc {
};
/**
- * struct rxq_sg_desc - Receive scatter-gather (SG) list
+ * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
@@ -802,7 +802,7 @@ struct ionic_rxq_sg_desc {
};
/**
- * struct rxq_comp - Ethernet receive queue completion descriptor
+ * struct ionic_rxq_comp - Ethernet receive queue completion descriptor
* @status: The status of the command (enum status_code)
* @num_sg_elems: Number of SG elements used by this descriptor
* @comp_index: The index in the descriptor ring for which this
@@ -896,7 +896,7 @@ enum ionic_eth_hw_features {
};
/**
- * struct q_control_cmd - Queue control command
+ * struct ionic_q_control_cmd - Queue control command
* @opcode: opcode
* @type: Queue type
* @lif_index: LIF index
@@ -1033,8 +1033,8 @@ enum ionic_port_loopback_mode {
/**
* Transceiver Status information
- * @state: Transceiver status (enum xcvr_state)
- * @phy: Physical connection type (enum phy_type)
+ * @state: Transceiver status (enum ionic_xcvr_state)
+ * @phy: Physical connection type (enum ionic_phy_type)
* @pid: Transceiver link mode (enum pid)
* @sprom: Transceiver sprom contents
*/
@@ -1051,9 +1051,9 @@ struct ionic_xcvr_status {
* @mtu: mtu
* @state: port admin state (enum port_admin_state)
* @an_enable: autoneg enable
- * @fec_type: fec type (enum port_fec_type)
- * @pause_type: pause type (enum port_pause_type)
- * @loopback_mode: loopback mode (enum port_loopback_mode)
+ * @fec_type: fec type (enum ionic_port_fec_type)
+ * @pause_type: pause type (enum ionic_port_pause_type)
+ * @loopback_mode: loopback mode (enum ionic_port_loopback_mode)
*/
union ionic_port_config {
struct {
@@ -1080,7 +1080,7 @@ union ionic_port_config {
/**
* Port Status information
- * @status: link status (enum port_oper_status)
+ * @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
* @xcvr: tranceiver status
@@ -1094,7 +1094,7 @@ struct ionic_port_status {
};
/**
- * struct port_identify_cmd - Port identify command
+ * struct ionic_port_identify_cmd - Port identify command
* @opcode: opcode
* @index: port index
* @ver: Highest version of identify supported by driver
@@ -1107,7 +1107,7 @@ struct ionic_port_identify_cmd {
};
/**
- * struct port_identify_comp - Port identify command completion
+ * struct ionic_port_identify_comp - Port identify command completion
* @status: The status of the command (enum status_code)
* @ver: Version of identify returned by device
*/
@@ -1118,10 +1118,10 @@ struct ionic_port_identify_comp {
};
/**
- * struct port_init_cmd - Port initialization command
+ * struct ionic_port_init_cmd - Port initialization command
* @opcode: opcode
* @index: port index
- * @info_pa: destination address for port info (struct port_info)
+ * @info_pa: destination address for port info (struct ionic_port_info)
*/
struct ionic_port_init_cmd {
u8 opcode;
@@ -1132,7 +1132,7 @@ struct ionic_port_init_cmd {
};
/**
- * struct port_init_comp - Port initialization command completion
+ * struct ionic_port_init_comp - Port initialization command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_port_init_comp {
@@ -1141,7 +1141,7 @@ struct ionic_port_init_comp {
};
/**
- * struct port_reset_cmd - Port reset command
+ * struct ionic_port_reset_cmd - Port reset command
* @opcode: opcode
* @index: port index
*/
@@ -1152,7 +1152,7 @@ struct ionic_port_reset_cmd {
};
/**
- * struct port_reset_comp - Port reset command completion
+ * struct ionic_port_reset_comp - Port reset command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_port_reset_comp {
@@ -1183,7 +1183,7 @@ enum ionic_port_attr {
};
/**
- * struct port_setattr_cmd - Set port attributes on the NIC
+ * struct ionic_port_setattr_cmd - Set port attributes on the NIC
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
@@ -1207,7 +1207,7 @@ struct ionic_port_setattr_cmd {
};
/**
- * struct port_setattr_comp - Port set attr command completion
+ * struct ionic_port_setattr_comp - Port set attr command completion
* @status: The status of the command (enum status_code)
* @color: Color bit
*/
@@ -1218,7 +1218,7 @@ struct ionic_port_setattr_comp {
};
/**
- * struct port_getattr_cmd - Get port attributes from the NIC
+ * struct ionic_port_getattr_cmd - Get port attributes from the NIC
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
@@ -1231,7 +1231,7 @@ struct ionic_port_getattr_cmd {
};
/**
- * struct port_getattr_comp - Port get attr command completion
+ * struct ionic_port_getattr_comp - Port get attr command completion
* @status: The status of the command (enum status_code)
* @color: Color bit
*/
@@ -1252,10 +1252,10 @@ struct ionic_port_getattr_comp {
};
/**
- * struct lif_status - Lif status register
+ * struct ionic_lif_status - Lif status register
* @eid: most recent NotifyQ event id
* @port_num: port the lif is connected to
- * @link_status: port status (enum port_oper_status)
+ * @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
* @link_down_count: number of times link status changes
*/
@@ -1270,7 +1270,7 @@ struct ionic_lif_status {
};
/**
- * struct lif_reset_cmd - LIF reset command
+ * struct ionic_lif_reset_cmd - LIF reset command
* @opcode: opcode
* @index: LIF index
*/
@@ -1290,7 +1290,7 @@ enum ionic_dev_state {
};
/**
- * enum dev_attr - List of device attributes
+ * enum ionic_dev_attr - List of device attributes
*/
enum ionic_dev_attr {
IONIC_DEV_ATTR_STATE = 0,
@@ -1299,10 +1299,10 @@ enum ionic_dev_attr {
};
/**
- * struct dev_setattr_cmd - Set Device attributes on the NIC
+ * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC
* @opcode: Opcode
- * @attr: Attribute type (enum dev_attr)
- * @state: Device state (enum dev_state)
+ * @attr: Attribute type (enum ionic_dev_attr)
+ * @state: Device state (enum ionic_dev_state)
* @name: The bus info, e.g. PCI slot-device-function, 0 terminated
* @features: Device features
*/
@@ -1319,7 +1319,7 @@ struct ionic_dev_setattr_cmd {
};
/**
- * struct dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_setattr_comp - Device set attr command completion
* @status: The status of the command (enum status_code)
* @features: Device features
* @color: Color bit
@@ -1335,9 +1335,9 @@ struct ionic_dev_setattr_comp {
};
/**
- * struct dev_getattr_cmd - Get Device attributes from the NIC
+ * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC
* @opcode: opcode
- * @attr: Attribute type (enum dev_attr)
+ * @attr: Attribute type (enum ionic_dev_attr)
*/
struct ionic_dev_getattr_cmd {
u8 opcode;
@@ -1346,7 +1346,7 @@ struct ionic_dev_getattr_cmd {
};
/**
- * struct dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_setattr_comp - Device set attr command completion
* @status: The status of the command (enum status_code)
* @features: Device features
* @color: Color bit
@@ -1376,7 +1376,7 @@ enum ionic_rss_hash_types {
};
/**
- * enum lif_attr - List of LIF attributes
+ * enum ionic_lif_attr - List of LIF attributes
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1389,15 +1389,15 @@ enum ionic_lif_attr {
};
/**
- * struct lif_setattr_cmd - Set LIF attributes on the NIC
+ * struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC
* @opcode: Opcode
- * @type: Attribute type (enum lif_attr)
+ * @type: Attribute type (enum ionic_lif_attr)
* @index: LIF index
* @state: lif state (enum lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
- * @features: Features (enum eth_hw_features)
+ * @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
* @types: The hash types to enable (see rss_hash_types).
* @key: The hash secret key.
@@ -1426,11 +1426,11 @@ struct ionic_lif_setattr_cmd {
};
/**
- * struct lif_setattr_comp - LIF set attr command completion
+ * struct ionic_lif_setattr_comp - LIF set attr command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
- * @features: features (enum eth_hw_features)
+ * @features: features (enum ionic_eth_hw_features)
* @color: Color bit
*/
struct ionic_lif_setattr_comp {
@@ -1445,9 +1445,9 @@ struct ionic_lif_setattr_comp {
};
/**
- * struct lif_getattr_cmd - Get LIF attributes from the NIC
+ * struct ionic_lif_getattr_cmd - Get LIF attributes from the NIC
* @opcode: Opcode
- * @attr: Attribute type (enum lif_attr)
+ * @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
*/
struct ionic_lif_getattr_cmd {
@@ -1458,7 +1458,7 @@ struct ionic_lif_getattr_cmd {
};
/**
- * struct lif_getattr_comp - LIF get attr command completion
+ * struct ionic_lif_getattr_comp - LIF get attr command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -1466,7 +1466,7 @@ struct ionic_lif_getattr_cmd {
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
- * @features: Features (enum eth_hw_features)
+ * @features: Features (enum ionic_eth_hw_features)
* @color: Color bit
*/
struct ionic_lif_getattr_comp {
@@ -1492,7 +1492,7 @@ enum ionic_rx_mode {
};
/**
- * struct rx_mode_set_cmd - Set LIF's Rx mode command
+ * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command
* @opcode: opcode
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
@@ -1519,7 +1519,7 @@ enum ionic_rx_filter_match_type {
};
/**
- * struct rx_filter_add_cmd - Add LIF Rx filter command
+ * struct ionic_rx_filter_add_cmd - Add LIF Rx filter command
* @opcode: opcode
* @qtype: Queue type
* @lif_index: LIF index
@@ -1550,7 +1550,7 @@ struct ionic_rx_filter_add_cmd {
};
/**
- * struct rx_filter_add_comp - Add LIF Rx filter command completion
+ * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -1567,7 +1567,7 @@ struct ionic_rx_filter_add_comp {
};
/**
- * struct rx_filter_del_cmd - Delete LIF Rx filter command
+ * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command
* @opcode: opcode
* @lif_index: LIF index
* @filter_id: Filter ID
@@ -1583,7 +1583,7 @@ struct ionic_rx_filter_del_cmd {
typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
/**
- * struct qos_identify_cmd - QoS identify command
+ * struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
*
@@ -1595,7 +1595,7 @@ struct ionic_qos_identify_cmd {
};
/**
- * struct qos_identify_comp - QoS identify command completion
+ * struct ionic_qos_identify_comp - QoS identify command completion
* @status: The status of the command (enum status_code)
* @ver: Version of identify returned by device
*/
@@ -1610,7 +1610,7 @@ struct ionic_qos_identify_comp {
#define IONIC_QOS_DSCP_MAX_VALUES 64
/**
- * enum qos_class
+ * enum ionic_qos_class
*/
enum ionic_qos_class {
IONIC_QOS_CLASS_DEFAULT = 0,
@@ -1623,7 +1623,7 @@ enum ionic_qos_class {
};
/**
- * enum qos_class_type - Traffic classification criteria
+ * enum ionic_qos_class_type - Traffic classification criteria
*/
enum ionic_qos_class_type {
IONIC_QOS_CLASS_TYPE_NONE = 0,
@@ -1632,7 +1632,7 @@ enum ionic_qos_class_type {
};
/**
- * enum qos_sched_type - Qos class scheduling type
+ * enum ionic_qos_sched_type - Qos class scheduling type
*/
enum ionic_qos_sched_type {
IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
@@ -1640,15 +1640,15 @@ enum ionic_qos_sched_type {
};
/**
- * union qos_config - Qos configuration structure
+ * union ionic_qos_config - Qos configuration structure
* @flags: Configuration flags
* IONIC_QOS_CONFIG_F_ENABLE enable
* IONIC_QOS_CONFIG_F_DROP drop/nodrop
* IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
* IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
- * @sched_type: Qos class scheduling type (enum qos_sched_type)
- * @class_type: Qos class type (enum qos_class_type)
- * @pause_type: Qos pause type (enum qos_pause_type)
+ * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type)
+ * @class_type: Qos class type (enum ionic_qos_class_type)
+ * @pause_type: Qos pause type (enum ionic_qos_pause_type)
* @name: Qos class name
* @mtu: MTU of the class
* @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
@@ -1697,7 +1697,7 @@ union ionic_qos_config {
};
/**
- * union qos_identity - QoS identity structure
+ * union ionic_qos_identity - QoS identity structure
* @version: Version of the identify structure
* @type: QoS system type
* @nclasses: Number of usable QoS classes
@@ -1730,7 +1730,7 @@ struct ionic_qos_init_cmd {
typedef struct ionic_admin_comp ionic_qos_init_comp;
/**
- * struct qos_reset_cmd - Qos config reset command
+ * struct ionic_qos_reset_cmd - Qos config reset command
* @opcode: Opcode
*/
struct ionic_qos_reset_cmd {
@@ -1742,7 +1742,7 @@ struct ionic_qos_reset_cmd {
typedef struct ionic_admin_comp ionic_qos_reset_comp;
/**
- * struct fw_download_cmd - Firmware download command
+ * struct ionic_fw_download_cmd - Firmware download command
* @opcode: opcode
* @addr: dma address of the firmware buffer
* @offset: offset of the firmware buffer within the full image
@@ -1765,9 +1765,9 @@ enum ionic_fw_control_oper {
};
/**
- * struct fw_control_cmd - Firmware control command
+ * struct ionic_fw_control_cmd - Firmware control command
* @opcode: opcode
- * @oper: firmware control operation (enum fw_control_oper)
+ * @oper: firmware control operation (enum ionic_fw_control_oper)
* @slot: slot to activate
*/
struct ionic_fw_control_cmd {
@@ -1779,7 +1779,7 @@ struct ionic_fw_control_cmd {
};
/**
- * struct fw_control_comp - Firmware control copletion
+ * struct ionic_fw_control_comp - Firmware control copletion
* @opcode: opcode
* @slot: slot where the firmware was installed
*/
@@ -1797,13 +1797,13 @@ struct ionic_fw_control_comp {
******************************************************************/
/**
- * struct rdma_reset_cmd - Reset RDMA LIF cmd
+ * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
* @lif_index: lif index
*
* There is no rdma specific dev command completion struct. Completion uses
- * the common struct admin_comp. Only the status is indicated. Nonzero status
- * means the LIF does not support rdma.
+ * the common struct ionic_admin_comp. Only the status is indicated.
+ * Nonzero status means the LIF does not support rdma.
**/
struct ionic_rdma_reset_cmd {
u8 opcode;
@@ -1813,7 +1813,7 @@ struct ionic_rdma_reset_cmd {
};
/**
- * struct rdma_queue_cmd - Create RDMA Queue command
+ * struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
* @lif_index lif index
* @qid_ver: (qid | (rdma version << 24))
@@ -1839,7 +1839,7 @@ struct ionic_rdma_reset_cmd {
* memory registration.
*
* There is no rdma specific dev command completion struct. Completion uses
- * the common struct admin_comp. Only the status is indicated.
+ * the common struct ionic_admin_comp. Only the status is indicated.
**/
struct ionic_rdma_queue_cmd {
u8 opcode;
@@ -1860,7 +1860,7 @@ struct ionic_rdma_queue_cmd {
******************************************************************/
/**
- * struct notifyq_event
+ * struct ionic_notifyq_event
* @eid: event number
* @ecode: event code
* @data: unspecified data about the event
@@ -1875,7 +1875,7 @@ struct ionic_notifyq_event {
};
/**
- * struct link_change_event
+ * struct ionic_link_change_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_LINK_CHANGE
* @link_status: link up or down, with error bits (enum port_status)
@@ -1892,7 +1892,7 @@ struct ionic_link_change_event {
};
/**
- * struct reset_event
+ * struct ionic_reset_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_RESET
* @reset_code: reset type
@@ -1910,7 +1910,7 @@ struct ionic_reset_event {
};
/**
- * struct heartbeat_event
+ * struct ionic_heartbeat_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_HEARTBEAT
*
@@ -1923,7 +1923,7 @@ struct ionic_heartbeat_event {
};
/**
- * struct log_event
+ * struct ionic_log_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_LOG
* @data: log data
@@ -1937,7 +1937,7 @@ struct ionic_log_event {
};
/**
- * struct port_stats
+ * struct ionic_port_stats
*/
struct ionic_port_stats {
__le64 frames_rx_ok;
@@ -2067,7 +2067,7 @@ struct ionic_mgmt_port_stats {
};
/**
- * struct port_identity - port identity structure
+ * struct ionic_port_identity - port identity structure
* @version: identity structure version
* @type: type of port (enum port_type)
* @num_lanes: number of lanes for the port
@@ -2099,7 +2099,7 @@ union ionic_port_identity {
};
/**
- * struct port_info - port info structure
+ * struct ionic_port_info - port info structure
* @port_status: port status
* @port_stats: port stats
*/
@@ -2110,7 +2110,7 @@ struct ionic_port_info {
};
/**
- * struct lif_stats
+ * struct ionic_lif_stats
*/
struct ionic_lif_stats {
/* RX */
@@ -2264,7 +2264,7 @@ struct ionic_lif_stats {
};
/**
- * struct lif_info - lif info structure
+ * struct ionic_lif_info - lif info structure
*/
struct ionic_lif_info {
union ionic_lif_config config;
@@ -2357,7 +2357,7 @@ union ionic_dev_info_regs {
};
/**
- * union dev_cmd_regs - Device command register format (read-write)
+ * union ionic_dev_cmd_regs - Device command register format (read-write)
* @doorbell: Device Cmd Doorbell, write-only.
* Write a 1 to signal device to process cmd,
* poll done for completion.
@@ -2379,7 +2379,7 @@ union ionic_dev_cmd_regs {
};
/**
- * union dev_regs - Device register format in for bar 0 page 0
+ * union ionic_dev_regs - Device register format in for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
*/
@@ -2433,7 +2433,7 @@ union ionic_adminq_comp {
#define IONIC_ASIC_TYPE_CAPRI 0
/**
- * struct doorbell - Doorbell register layout
+ * struct ionic_doorbell - Doorbell register layout
* @p_index: Producer index
* @ring: Selects the specific ring of the queue to update.
* Type-specific meaning:
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 20faa8d24c9f..60fd14df49d7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -244,6 +244,21 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
+static void ionic_lif_quiesce(struct ionic_lif *lif)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .attr = IONIC_LIF_ATTR_STATE,
+ .index = lif->index,
+ .state = IONIC_LIF_DISABLE
+ },
+ };
+
+ ionic_adminq_post_wait(lif, &ctx);
+}
+
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
@@ -609,12 +624,14 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
.index = cpu_to_le32(q->index),
- .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
+ IONIC_QINIT_F_SG),
.intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
.cq_ring_base = cpu_to_le64(cq->base_pa),
+ .sg_ring_base = cpu_to_le64(q->sg_base_pa),
},
};
int err;
@@ -1432,7 +1449,6 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
unsigned int flags;
unsigned int i;
int err = 0;
- u32 coal;
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
for (i = 0; i < lif->nxqs; i++) {
@@ -1448,21 +1464,22 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
}
- flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR;
- coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs);
+ flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
lif->nrxq_descs,
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
- 0, lif->kern_pid, &lif->rxqcqs[i].qcq);
+ sizeof(struct ionic_rxq_sg_desc),
+ lif->kern_pid, &lif->rxqcqs[i].qcq);
if (err)
goto err_out;
lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[i].qcq->intr.index, coal);
+ lif->rxqcqs[i].qcq->intr.index,
+ lif->rx_coalesce_hw);
ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
lif->txqcqs[i].qcq);
}
@@ -1592,6 +1609,7 @@ int ionic_stop(struct net_device *netdev)
netif_tx_disable(netdev);
ionic_txrx_disable(lif);
+ ionic_lif_quiesce(lif);
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
@@ -1621,8 +1639,9 @@ int ionic_reset_queues(struct ionic_lif *lif)
/* Put off the next watchdog timeout */
netif_trans_update(lif->netdev);
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = netif_running(lif->netdev);
if (running)
@@ -1641,7 +1660,6 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
struct net_device *netdev;
struct ionic_lif *lif;
int tbl_sz;
- u32 coal;
int err;
netdev = alloc_etherdev_mqs(sizeof(*lif),
@@ -1672,8 +1690,9 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
/* Convert the default coalesce value to actual hw resolution */
- coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT);
- lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
+ lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
+ lif->rx_coalesce_usecs);
snprintf(lif->name, sizeof(lif->name), "lif%u", index);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 6a95b42a8d8c..a55fd1f8c31b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -175,7 +175,9 @@ struct ionic_lif {
unsigned long *dbid_inuse;
unsigned int dbid_count;
struct dentry *dentry;
- u32 rx_coalesce_usecs;
+ u32 rx_coalesce_usecs; /* what the user asked for */
+ u32 rx_coalesce_hw; /* what the hw is using */
+
u32 flags;
struct work_struct tx_timeout_work;
};
@@ -187,15 +189,10 @@ struct ionic_lif {
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
+/* return 0 if successfully set the bit, else non-zero */
static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname)
{
- unsigned long tlimit = jiffies + HZ;
-
- while (test_and_set_bit(bitname, lif->state) &&
- time_before(jiffies, tlimit))
- usleep_range(100, 200);
-
- return test_bit(bitname, lif->state);
+ return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index aab311413412..3590ea7fd88a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -247,6 +247,10 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
goto err_out;
}
+ err = ionic_heartbeat_check(lif->ionic);
+ if (err)
+ goto err_out;
+
memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
@@ -307,6 +311,14 @@ int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
return work_done;
}
+static void ionic_dev_cmd_clean(struct ionic *ionic)
+{
+ union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+
+ iowrite32(0, &regs->doorbell);
+ memset_io(&regs->cmd, 0, sizeof(regs->cmd));
+}
+
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
{
struct ionic_dev *idev = &ionic->idev;
@@ -316,6 +328,7 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
int opcode;
int done;
int err;
+ int hb;
WARN_ON(in_interrupt());
@@ -330,7 +343,8 @@ try_again:
if (done)
break;
msleep(20);
- } while (!done && time_before(jiffies, max_wait));
+ hb = ionic_heartbeat_check(ionic);
+ } while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
@@ -338,7 +352,15 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
+ if (!done && hb) {
+ ionic_dev_cmd_clean(ionic);
+ dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
+ ionic_opcode_to_str(opcode), opcode);
+ return -ENXIO;
+ }
+
if (!done && !time_before(jiffies, max_wait)) {
+ ionic_dev_cmd_clean(ionic);
dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n",
ionic_opcode_to_str(opcode), opcode, max_seconds);
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index ab6663d94f42..97e79949b359 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -34,52 +34,110 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct sk_buff *skb)
+static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
+ unsigned int len, bool frags)
{
- struct ionic_rxq_desc *old = desc_info->desc;
- struct ionic_rxq_desc *new = q->head->desc;
+ struct ionic_lif *lif = q->lif;
+ struct ionic_rx_stats *stats;
+ struct net_device *netdev;
+ struct sk_buff *skb;
+
+ netdev = lif->netdev;
+ stats = q_to_rx_stats(q);
+
+ if (frags)
+ skb = napi_get_frags(&q_to_qcq(q)->napi);
+ else
+ skb = netdev_alloc_skb_ip_align(netdev, len);
- new->addr = old->addr;
- new->len = old->len;
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
+ netdev->name, q->name);
+ stats->alloc_err++;
+ return NULL;
+ }
- ionic_rxq_post(q, true, ionic_rx_clean, skb);
+ return skb;
}
-static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, struct sk_buff **skb)
+static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
- struct ionic_rxq_desc *desc = desc_info->desc;
- struct net_device *netdev = q->lif->netdev;
struct device *dev = q->lif->ionic->dev;
- struct sk_buff *new_skb;
- u16 clen, dlen;
-
- clen = le16_to_cpu(comp->len);
- dlen = le16_to_cpu(desc->len);
- if (clen > q->lif->rx_copybreak) {
- dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- dlen, DMA_FROM_DEVICE);
- return false;
- }
+ struct ionic_page_info *page_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ u16 frag_len;
+ u16 len;
- new_skb = netdev_alloc_skb_ip_align(netdev, clen);
- if (!new_skb) {
- dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- dlen, DMA_FROM_DEVICE);
- return false;
- }
+ page_info = &desc_info->pages[0];
+ len = le16_to_cpu(comp->len);
- dma_sync_single_for_cpu(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- clen, DMA_FROM_DEVICE);
+ prefetch(page_address(page_info->page) + NET_IP_ALIGN);
- memcpy(new_skb->data, (*skb)->data, clen);
+ skb = ionic_rx_skb_alloc(q, len, true);
+ if (unlikely(!skb))
+ return NULL;
- ionic_rx_recycle(q, desc_info, *skb);
- *skb = new_skb;
+ i = comp->num_sg_elems + 1;
+ do {
+ if (unlikely(!page_info->page)) {
+ struct napi_struct *napi = &q_to_qcq(q)->napi;
- return true;
+ napi->skb = NULL;
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ frag_len = min(len, (u16)PAGE_SIZE);
+ len -= frag_len;
+
+ dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page_info->page, 0, frag_len, PAGE_SIZE);
+ page_info->page = NULL;
+ page_info++;
+ i--;
+ } while (i > 0);
+
+ return skb;
+}
+
+static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info)
+{
+ struct ionic_rxq_comp *comp = cq_info->cq_desc;
+ struct device *dev = q->lif->ionic->dev;
+ struct ionic_page_info *page_info;
+ struct sk_buff *skb;
+ u16 len;
+
+ page_info = &desc_info->pages[0];
+ len = le16_to_cpu(comp->len);
+
+ skb = ionic_rx_skb_alloc(q, len, false);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (unlikely(!page_info->page)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
+ len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, page_address(page_info->page), len);
+ dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
+ len, DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, q->lif->netdev);
+
+ return skb;
}
static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
@@ -87,35 +145,34 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_qcq *qcq = q_to_qcq(q);
- struct sk_buff *skb = cb_arg;
struct ionic_rx_stats *stats;
struct net_device *netdev;
+ struct sk_buff *skb;
stats = q_to_rx_stats(q);
netdev = q->lif->netdev;
- if (comp->status) {
- ionic_rx_recycle(q, desc_info, skb);
+ if (comp->status)
return;
- }
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
- /* no packet processing while resetting */
- ionic_rx_recycle(q, desc_info, skb);
+ /* no packet processing while resetting */
+ if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state)))
return;
- }
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
- ionic_rx_copybreak(q, desc_info, cq_info, &skb);
+ if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ skb = ionic_rx_copybreak(q, desc_info, cq_info);
+ else
+ skb = ionic_rx_frags(q, desc_info, cq_info);
- skb_put(skb, le16_to_cpu(comp->len));
- skb->protocol = eth_type_trans(skb, netdev);
+ if (unlikely(!skb))
+ return;
skb_record_rx_queue(skb, q->index);
- if (netdev->features & NETIF_F_RXHASH) {
+ if (likely(netdev->features & NETIF_F_RXHASH)) {
switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
case IONIC_PKT_TYPE_IPV4:
case IONIC_PKT_TYPE_IPV6:
@@ -132,7 +189,7 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
}
}
- if (netdev->features & NETIF_F_RXCSUM) {
+ if (likely(netdev->features & NETIF_F_RXCSUM)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__wsum)le16_to_cpu(comp->csum);
@@ -142,18 +199,21 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
stats->csum_none++;
}
- if ((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
- (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
- (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))
+ if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
stats->csum_error++;
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(comp->vlan_tci));
}
- napi_gro_receive(&qcq->napi, skb);
+ if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ napi_gro_receive(&qcq->napi, skb);
+ else
+ napi_gro_frags(&qcq->napi);
}
static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
@@ -213,66 +273,125 @@ void ionic_rx_flush(struct ionic_cq *cq)
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
-static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len,
- dma_addr_t *dma_addr)
+static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
+ dma_addr_t *dma_addr)
{
struct ionic_lif *lif = q->lif;
struct ionic_rx_stats *stats;
struct net_device *netdev;
- struct sk_buff *skb;
struct device *dev;
+ struct page *page;
netdev = lif->netdev;
dev = lif->ionic->dev;
stats = q_to_rx_stats(q);
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
+ page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!page)) {
+ net_err_ratelimited("%s: Page alloc failed on %s!\n",
+ netdev->name, q->name);
stats->alloc_err++;
return NULL;
}
- *dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *dma_addr)) {
- dev_kfree_skb(skb);
- net_warn_ratelimited("%s: DMA single map failed on %s!\n",
- netdev->name, q->name);
+ *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, *dma_addr))) {
+ __free_page(page);
+ net_err_ratelimited("%s: DMA single map failed on %s!\n",
+ netdev->name, q->name);
stats->dma_map_err++;
return NULL;
}
- return skb;
+ return page;
+}
+
+static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
+ dma_addr_t dma_addr)
+{
+ struct ionic_lif *lif = q->lif;
+ struct net_device *netdev;
+ struct device *dev;
+
+ netdev = lif->netdev;
+ dev = lif->ionic->dev;
+
+ if (unlikely(!page)) {
+ net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
+ netdev->name, q->name);
+ return;
+ }
+
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ __free_page(page);
}
-#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 2) - 1)
+#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1)
+#define IONIC_RX_RING_HEAD_BUF_SZ 2048
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
+ struct ionic_desc_info *desc_info;
+ struct ionic_page_info *page_info;
+ struct ionic_rxq_sg_desc *sg_desc;
+ struct ionic_rxq_sg_elem *sg_elem;
struct ionic_rxq_desc *desc;
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+ unsigned int nfrags;
bool ring_doorbell;
+ unsigned int i, j;
unsigned int len;
- unsigned int i;
len = netdev->mtu + ETH_HLEN;
+ nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
- skb = ionic_rx_skb_alloc(q, len, &dma_addr);
- if (!skb)
- return;
+ desc_info = q->head;
+ desc = desc_info->desc;
+ sg_desc = desc_info->sg_desc;
+ page_info = &desc_info->pages[0];
+
+ if (page_info->page) { /* recycle the buffer */
+ ring_doorbell = ((q->head->index + 1) &
+ IONIC_RX_RING_DOORBELL_STRIDE) == 0;
+ ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
+ continue;
+ }
- desc = q->head->desc;
- desc->addr = cpu_to_le64(dma_addr);
- desc->len = cpu_to_le16(len);
- desc->opcode = IONIC_RXQ_DESC_OPCODE_SIMPLE;
+ /* fill main descriptor - pages[0] */
+ desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
+ IONIC_RXQ_DESC_OPCODE_SIMPLE;
+ desc_info->npages = nfrags;
+ page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
+ if (unlikely(!page_info->page)) {
+ desc->addr = 0;
+ desc->len = 0;
+ return;
+ }
+ desc->addr = cpu_to_le64(page_info->dma_addr);
+ desc->len = cpu_to_le16(PAGE_SIZE);
+ page_info++;
+
+ /* fill sg descriptors - pages[1..n] */
+ for (j = 0; j < nfrags - 1; j++) {
+ if (page_info->page) /* recycle the sg buffer */
+ continue;
+
+ sg_elem = &sg_desc->elems[j];
+ page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
+ if (unlikely(!page_info->page)) {
+ sg_elem->addr = 0;
+ sg_elem->len = 0;
+ return;
+ }
+ sg_elem->addr = cpu_to_le64(page_info->dma_addr);
+ sg_elem->len = cpu_to_le16(PAGE_SIZE);
+ page_info++;
+ }
ring_doorbell = ((q->head->index + 1) &
IONIC_RX_RING_DOORBELL_STRIDE) == 0;
-
- ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb);
+ ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
}
}
@@ -283,15 +402,24 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *cur;
struct ionic_rxq_desc *desc;
+ unsigned int i;
for (cur = q->tail; cur != q->head; cur = cur->next) {
desc = cur->desc;
- dma_unmap_single(dev, le64_to_cpu(desc->addr),
- le16_to_cpu(desc->len), DMA_FROM_DEVICE);
- dev_kfree_skb(cur->cb_arg);
+ desc->addr = 0;
+ desc->len = 0;
+
+ for (i = 0; i < cur->npages; i++) {
+ if (likely(cur->pages[i].page)) {
+ ionic_rx_page_free(q, cur->pages[i].page,
+ cur->pages[i].dma_addr);
+ cur->pages[i].page = NULL;
+ cur->pages[i].dma_addr = 0;
+ }
+ }
+
cur->cb_arg = NULL;
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index d473b522afc5..9ad568d93ae6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -37,14 +37,14 @@
#include <linux/slab.h>
#include "qed.h"
-/* Fields of IGU PF CONFIGRATION REGISTER */
+/* Fields of IGU PF CONFIGURATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
-/* Fields of IGU VF CONFIGRATION REGISTER */
+/* Fields of IGU VF CONFIGURATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 9a8fd79611f2..368e88565783 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -305,7 +305,7 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
/**
* @brief Read sriov related information and allocated resources
- * reads from configuraiton space, shmem, etc.
+ * reads from configuration space, shmem, etc.
*
* @param p_hwfn
*
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9a6a9a008714..d6cfe4ffbaf3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1298,7 +1298,7 @@ void qede_config_rx_mode(struct net_device *ndev)
rx_mode.type = QED_FILTER_TYPE_RX_MODE;
/* Remove all previous unicast secondary macs and multicast macs
- * (configrue / leave the primary mac)
+ * (configure / leave the primary mac)
*/
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
edev->ndev->dev_addr);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a220cc7c947a..481b096e984d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2115,12 +2115,8 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
if (rc)
goto out;
- fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
- if (IS_ERR(fp->rxq->xdp_prog)) {
- rc = PTR_ERR(fp->rxq->xdp_prog);
- fp->rxq->xdp_prog = NULL;
- goto out;
- }
+ bpf_prog_add(edev->xdp_prog, 1);
+ fp->rxq->xdp_prog = edev->xdp_prog;
}
if (fp->type & QEDE_FASTPATH_TX) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index c84ab052ef26..98f92268cbaa 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -213,9 +213,9 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
{
struct emac_adapter *adpt = netdev_priv(netdev);
- netif_info(adpt, hw, adpt->netdev,
- "changing MTU from %d to %d\n", netdev->mtu,
- new_mtu);
+ netif_dbg(adpt, hw, adpt->netdev,
+ "changing MTU from %d to %d\n", netdev->mtu,
+ new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5ecf61df78bd..baac016f3ec0 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -363,7 +363,7 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
- if (available > QCASPI_HW_BUF_LEN) {
+ if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
/* This could only happen by interferences on the SPI line.
* So retry later ...
*/
@@ -496,7 +496,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
u16 signature = 0;
u16 spi_config;
u16 wrbuf_space = 0;
- static u16 reset_count;
if (event == QCASPI_EVENT_CPUON) {
/* Read signature twice, if not valid
@@ -549,13 +548,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->sync = QCASPI_SYNC_RESET;
qca->stats.trig_reset++;
- reset_count = 0;
+ qca->reset_count = 0;
break;
case QCASPI_SYNC_RESET:
- reset_count++;
+ qca->reset_count++;
netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
- reset_count);
- if (reset_count >= QCASPI_RESET_TIMEOUT) {
+ qca->reset_count);
+ if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */
qca->sync = QCASPI_SYNC_UNKNOWN;
qca->stats.reset_timeout++;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index eb9af45fcc5e..d13a67e20d65 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -94,6 +94,7 @@ struct qcaspi {
unsigned int intr_req;
unsigned int intr_svc;
+ u16 reset_count;
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index 8f54a2c832eb..355cc810e322 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -37,7 +37,7 @@ struct fw_info {
u8 chksum;
} __packed;
-#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0])
static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
{
@@ -92,19 +92,24 @@ static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw)
for (index = 0; index < pa->size; index++) {
u32 action = le32_to_cpu(pa->code[index]);
+ u32 val = action & 0x0000ffff;
u32 regno = (action & 0x0fff0000) >> 16;
switch (action >> 28) {
case PHY_READ:
case PHY_DATA_OR:
case PHY_DATA_AND:
- case PHY_MDIO_CHG:
case PHY_CLEAR_READCOUNT:
case PHY_WRITE:
case PHY_WRITE_PREVIOUS:
case PHY_DELAY_MS:
break;
+ case PHY_MDIO_CHG:
+ if (val > 1)
+ goto out;
+ break;
+
case PHY_BJMPN:
if (regno > index)
goto out;
@@ -164,12 +169,12 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index -= (regno + 1);
break;
case PHY_MDIO_CHG:
- if (data == 0) {
- fw_write = rtl_fw->phy_write;
- fw_read = rtl_fw->phy_read;
- } else if (data == 1) {
+ if (data) {
fw_write = rtl_fw->mac_mcu_write;
fw_read = rtl_fw->mac_mcu_read;
+ } else {
+ fw_write = rtl_fw->phy_write;
+ fw_read = rtl_fw->phy_read;
}
break;
@@ -198,7 +203,7 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index += regno;
break;
case PHY_DELAY_MS:
- mdelay(data);
+ msleep(data);
break;
}
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index c4e961ea44d5..d47a038cb8d0 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -52,6 +52,7 @@
#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
+#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
@@ -135,6 +136,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_49,
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_52,
RTL_GIGA_MAC_VER_60,
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_NONE
@@ -202,6 +204,7 @@ static const struct {
[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
[RTL_GIGA_MAC_VER_60] = {"RTL8125" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3},
};
@@ -680,6 +683,7 @@ struct rtl8169_private {
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
+ int eee_adv;
const char *fw_name;
struct rtl_fw *rtl_fw;
@@ -712,6 +716,7 @@ MODULE_FIRMWARE(FIRMWARE_8168G_2);
MODULE_FIRMWARE(FIRMWARE_8168G_3);
MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
+MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
@@ -741,12 +746,6 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
-static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
-{
- pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_READRQ, force);
-}
-
static bool rtl_is_8125(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_60;
@@ -756,7 +755,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_51;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -1092,6 +1091,39 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
rtl_writephy(tp, reg_addr, (val & ~m) | p);
}
+static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
+ int reg, u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0007);
+
+ __phy_write(phydev, 0x1e, extpage);
+ __phy_modify(phydev, reg, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0005);
+
+ __phy_write(phydev, 0x05, parm);
+ __phy_modify(phydev, 0x06, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0a43);
+
+ __phy_write(phydev, 0x13, parm);
+ __phy_modify(phydev, 0x14, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1262,9 +1294,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
rtl8168dp_driver_start(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_driver_start(tp);
break;
default:
@@ -1296,9 +1326,7 @@ static void rtl8168_driver_stop(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
rtl8168dp_driver_stop(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_driver_stop(tp);
break;
default:
@@ -1326,9 +1354,7 @@ static bool r8168_check_dash(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp);
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1503,7 +1529,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
@@ -1571,7 +1597,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
if (dev->mtu > JUMBO_1K &&
tp->mac_version > RTL_GIGA_MAC_VER_06)
- features &= ~NETIF_F_IP_CSUM;
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
return features;
}
@@ -2074,6 +2100,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
}
ret = phy_ethtool_set_eee(tp->phydev, data);
+
+ if (!ret)
+ tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV);
out:
pm_runtime_put_noidle(d);
return ret;
@@ -2104,10 +2134,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
static void rtl_enable_eee(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ int adv;
+
+ /* respect EEE advertisement the user may have set */
+ if (tp->eee_adv >= 0)
+ adv = tp->eee_adv;
+ else
+ adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (supported > 0)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported);
+ if (adv >= 0)
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
@@ -2132,6 +2168,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
{ 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ /* RTL8117 */
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
+
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
{ 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
@@ -2260,14 +2299,6 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
rtl_fw_write_firmware(tp, tp->rtl_fw);
}
-static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
-{
- if (rtl_readphy(tp, reg) != val)
- netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
- else
- rtl_apply_firmware(tp);
-}
-
static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
{
/* Adjust EEE LED frequency */
@@ -2287,15 +2318,8 @@ static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- phy_write(phydev, 0x1f, 0x0007);
- phy_write(phydev, 0x1e, 0x0020);
- phy_set_bits(phydev, 0x15, BIT(8));
-
- phy_write(phydev, 0x1f, 0x0005);
- phy_write(phydev, 0x05, 0x8b85);
- phy_set_bits(phydev, 0x06, BIT(13));
-
- phy_write(phydev, 0x1f, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
+ r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
}
static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
@@ -2392,13 +2416,7 @@ static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x01, 0x90d0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x01, 0x90d0);
}
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
@@ -2409,9 +2427,7 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
(pdev->subsystem_device != 0xe000))
return;
- rtl_writephy(tp, 0x1f, 0x0001);
- rtl_writephy(tp, 0x10, 0xf01b);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
}
static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
@@ -2516,54 +2532,28 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x10, 0xf41b },
- { 0x1f, 0x0000 }
- };
-
rtl_writephy(tp, 0x1f, 0x0001);
rtl_patchphy(tp, 0x16, 1 << 0);
-
- rtl_writephy_batch(tp, phy_reg_init);
+ rtl_writephy(tp, 0x10, 0xf41b);
+ rtl_writephy(tp, 0x1f, 0x0000);
}
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x10, 0xf41b },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf41b);
}
static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0000 },
- { 0x1d, 0x0f00 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x1ec8 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write(tp->phydev, 0x1d, 0x0f00);
+ phy_write_paged(tp->phydev, 0x0002, 0x0c, 0x1ec8);
}
static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_set_bits(tp->phydev, 0x14, BIT(5));
+ phy_set_bits(tp->phydev, 0x0d, BIT(5));
+ phy_write_paged(tp->phydev, 0x0001, 0x1d, 0x3d98);
}
static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
@@ -2645,11 +2635,6 @@ static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl8168c_3_hw_phy_config(tp);
-}
-
static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
/* Channel Estimation */
{ 0x1f, 0x0001 },
@@ -2700,6 +2685,21 @@ static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
{ 0x1f, 0x0002 }
};
+static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, u16 val)
+{
+ u16 reg_val;
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+ reg_val = rtl_readphy(tp, 0x06);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ if (reg_val != val)
+ netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
+ else
+ rtl_apply_firmware(tp);
+}
+
static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{
rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
@@ -2733,15 +2733,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x6662 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x6662 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x05, 0x6662);
+ r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x6662);
}
/* RSET couple improve */
@@ -2753,13 +2746,9 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0002);
rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
-
- rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
-
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, 0xbf00);
}
static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
@@ -2786,15 +2775,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x2642 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x2642 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x05, 0x2642);
+ r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x2642);
}
/* Fine tune PLL performance */
@@ -2805,13 +2787,9 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
/* Switching regulator Slew rate */
rtl_writephy(tp, 0x1f, 0x0002);
rtl_patchphy(tp, 0x0f, 0x0017);
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
-
- rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
-
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, 0xb300);
}
static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
@@ -2865,41 +2843,23 @@ static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
{ 0x04, 0xf800 },
{ 0x04, 0xf000 },
{ 0x1f, 0x0000 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x0023 },
- { 0x16, 0x0000 },
- { 0x1f, 0x0000 }
};
rtl_writephy_batch(tp, phy_reg_init);
+
+ r8168d_modify_extpage(tp->phydev, 0x0023, 0x16, 0xffff, 0x0000);
}
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x002d },
- { 0x18, 0x0040 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
- rtl_patchphy(tp, 0x0d, 1 << 5);
+ phy_write_paged(tp->phydev, 0x0001, 0x17, 0x0cc0);
+ r8168d_modify_extpage(tp->phydev, 0x002d, 0x18, 0xffff, 0x0040);
+ phy_set_bits(tp->phydev, 0x0d, BIT(5));
}
static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
- /* Enable Delay cap */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b80 },
- { 0x06, 0xc896 },
- { 0x1f, 0x0000 },
-
/* Channel estimation fine tune */
{ 0x1f, 0x0001 },
{ 0x0b, 0x6c20 },
@@ -2908,60 +2868,38 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0003 },
{ 0x14, 0x6420 },
{ 0x1f, 0x0000 },
-
- /* Update PFM & 10M TX idle timer */
- { 0x1f, 0x0007 },
- { 0x1e, 0x002f },
- { 0x15, 0x1919 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x00ac },
- { 0x18, 0x0006 },
- { 0x1f, 0x0000 }
};
+ struct phy_device *phydev = tp->phydev;
rtl_apply_firmware(tp);
+ /* Enable Delay cap */
+ r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
+
rtl_writephy_batch(tp, phy_reg_init);
+ /* Update PFM & 10M TX idle timer */
+ r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
+
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
/* DCO enable for 10M IDLE Power */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0023);
- rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
/* For impedance matching */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
+ phy_set_bits(phydev, 0x14, BIT(15));
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+ r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
- rtl_writephy(tp, 0x1f, 0x0006);
- rtl_writephy(tp, 0x00, 0x5a00);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0007);
- rtl_writephy(tp, 0x0e, 0x003c);
- rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
+ phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
}
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
@@ -2980,36 +2918,20 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Enable Delay cap */
- { 0x1f, 0x0004 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x00ac },
- { 0x18, 0x0006 },
- { 0x1f, 0x0002 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
+ struct phy_device *phydev = tp->phydev;
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
+ rtl_apply_firmware(tp);
- /* Green Setting */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b5b },
- { 0x06, 0x9222 },
- { 0x05, 0x8b6d },
- { 0x06, 0x8000 },
- { 0x05, 0x8b76 },
- { 0x06, 0x8000 },
- { 0x1f, 0x0000 }
- };
+ /* Enable Delay cap */
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
- rtl_apply_firmware(tp);
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Green Setting */
+ r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
+ r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
+ r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
/* For 4-corner performance improve */
rtl_writephy(tp, 0x1f, 0x0005);
@@ -3018,25 +2940,14 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
/* improve 10M EEE waveform */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
rtl8168f_config_eee_phy(tp);
rtl_enable_eee(tp);
@@ -3056,24 +2967,17 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
/* For 4-corner performance improve */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b80);
- rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
/* Improve 10M EEE waveform */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
rtl8168f_config_eee_phy(tp);
rtl_enable_eee(tp);
@@ -3081,52 +2985,31 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
+ struct phy_device *phydev = tp->phydev;
- /* Modify green table for giga & fnet */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b55 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b5e },
- { 0x06, 0x0000 },
- { 0x05, 0x8b67 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b70 },
- { 0x06, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x0078 },
- { 0x17, 0x0000 },
- { 0x19, 0x00fb },
- { 0x1f, 0x0000 },
+ rtl_apply_firmware(tp);
- /* Modify green table for 10M */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b79 },
- { 0x06, 0xaa00 },
- { 0x1f, 0x0000 },
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
- /* Disable hiimpedance detection (RTCT) */
- { 0x1f, 0x0003 },
- { 0x01, 0x328a },
- { 0x1f, 0x0000 }
- };
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
- rtl_apply_firmware(tp);
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
rtl8168f_hw_phy_config(tp);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3138,77 +3021,43 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
-
- /* Modify green table for giga & fnet */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b55 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b5e },
- { 0x06, 0x0000 },
- { 0x05, 0x8b67 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b70 },
- { 0x06, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x0078 },
- { 0x17, 0x0000 },
- { 0x19, 0x00aa },
- { 0x1f, 0x0000 },
-
- /* Modify green table for 10M */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b79 },
- { 0x06, 0xaa00 },
- { 0x1f, 0x0000 },
-
- /* Disable hiimpedance detection (RTCT) */
- { 0x1f, 0x0003 },
- { 0x01, 0x328a },
- { 0x1f, 0x0000 }
- };
-
+ struct phy_device *phydev = tp->phydev;
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
/* Modify green table for giga */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b54);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
- rtl_writephy(tp, 0x05, 0x8b5d);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
- rtl_writephy(tp, 0x05, 0x8a7c);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a7f);
- rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
- rtl_writephy(tp, 0x05, 0x8a82);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a88);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
+ r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
/* uc same-seed solution */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
@@ -3228,12 +3077,8 @@ static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x8084);
- phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
- phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0));
-
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3263,9 +3108,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168g_phy_param(tp->phydev, 0x8012, 0x0000, 0x8000);
phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
@@ -3295,73 +3138,48 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
u16 dout_tapbin;
u32 data;
rtl_apply_firmware(tp);
/* CHN EST parameters adjust - giga master */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x809b);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
- rtl_writephy(tp, 0x13, 0x80a2);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
- rtl_writephy(tp, 0x13, 0x80a4);
- rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
- rtl_writephy(tp, 0x13, 0x809c);
- rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
+ r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
/* CHN EST parameters adjust - giga slave */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80ad);
- rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
- rtl_writephy(tp, 0x13, 0x80b4);
- rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
- rtl_writephy(tp, 0x13, 0x80ac);
- rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
+ r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
/* CHN EST parameters adjust - fnet */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x808e);
- rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
- rtl_writephy(tp, 0x13, 0x8090);
- rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
- rtl_writephy(tp, 0x13, 0x8092);
- rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
/* enable R-tune & PGA-retune function */
dout_tapbin = 0;
- rtl_writephy(tp, 0x1f, 0x0a46);
- data = rtl_readphy(tp, 0x13);
+ data = phy_read_paged(phydev, 0x0a46, 0x13);
data &= 3;
data <<= 2;
dout_tapbin |= data;
- data = rtl_readphy(tp, 0x12);
+ data = phy_read_paged(phydev, 0x0a46, 0x12);
data &= 0xc000;
data >>= 14;
dout_tapbin |= data;
dout_tapbin = ~(dout_tapbin^0x08);
dout_tapbin <<= 12;
dout_tapbin &= 0xf000;
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x827a);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827b);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827c);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827d);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
-
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x0811);
- rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a42);
- rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
/* enable GPHY 10M */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
@@ -3369,22 +3187,13 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
/* SAR ADC performance */
phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x803f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8047);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x804f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8057);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x805f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8067);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x806f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
/* disable phy pfm mode */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
@@ -3397,24 +3206,18 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
{
u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
+ struct phy_device *phydev = tp->phydev;
u16 rlen;
u32 data;
rtl_apply_firmware(tp);
/* CHIN EST parameter update */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x808a);
- rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
/* enable R-tune & PGA-retune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x0811);
- rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a42);
- rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
/* enable GPHY 10M */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
@@ -3434,26 +3237,20 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
- (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
- rtl_writephy(tp, 0x1f, 0x0bcf);
- rtl_writephy(tp, 0x16, data);
- rtl_writephy(tp, 0x1f, 0x0000);
- }
+ (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f))
+ phy_write_paged(phydev, 0x0bcf, 0x16, data);
/* Modify rlen (TX LPF corner frequency) level */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- data = rtl_readphy(tp, 0x16);
+ data = phy_read_paged(phydev, 0x0bcd, 0x16);
data &= 0x000f;
rlen = 0;
if (data > 3)
rlen = data - 3;
data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
- rtl_writephy(tp, 0x17, data);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_write_paged(phydev, 0x0bcd, 0x17, data);
/* disable phy pfm mode */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3462,22 +3259,21 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
/* Enable PHY auto speed down */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable EEE auto-fallback function */
- phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
/* set rg_sel_sdm_rate */
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3486,63 +3282,38 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
/* Set rg_sel_sdm_rate */
phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
/* Channel estimation parameters */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80f3);
- rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff);
- rtl_writephy(tp, 0x13, 0x80f0);
- rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff);
- rtl_writephy(tp, 0x13, 0x80ef);
- rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff);
- rtl_writephy(tp, 0x13, 0x80f6);
- rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff);
- rtl_writephy(tp, 0x13, 0x80ec);
- rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff);
- rtl_writephy(tp, 0x13, 0x80ed);
- rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
- rtl_writephy(tp, 0x13, 0x80f2);
- rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff);
- rtl_writephy(tp, 0x13, 0x80f4);
- rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8110);
- rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff);
- rtl_writephy(tp, 0x13, 0x810f);
- rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff);
- rtl_writephy(tp, 0x13, 0x8111);
- rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff);
- rtl_writephy(tp, 0x13, 0x8113);
- rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff);
- rtl_writephy(tp, 0x13, 0x8115);
- rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff);
- rtl_writephy(tp, 0x13, 0x810e);
- rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff);
- rtl_writephy(tp, 0x13, 0x810c);
- rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
- rtl_writephy(tp, 0x13, 0x810b);
- rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80d1);
- rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff);
- rtl_writephy(tp, 0x13, 0x80cd);
- rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff);
- rtl_writephy(tp, 0x13, 0x80d3);
- rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff);
- rtl_writephy(tp, 0x13, 0x80d5);
- rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff);
- rtl_writephy(tp, 0x13, 0x80d7);
- rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff);
+ r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
+ r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
+ r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
+ r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
+ r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
+ r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
+ r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
+ r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
+ r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
+ r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
+ r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
+ r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
+ r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
+ r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
+ r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
+ r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
+ r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
/* Force PWM-mode */
rtl_writephy(tp, 0x1f, 0x0bcd);
@@ -3561,6 +3332,46 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
rtl_enable_eee(tp);
}
+static void rtl8117_hw_phy_config(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
+
+ r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
+ r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
+ r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
+ r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
+ r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
+ r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
+ r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
+ r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
+ r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
+ r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
+
+ r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
+
+ rtl8168g_disable_aldps(tp);
+ rtl8168h_config_eee_phy(tp);
+ rtl_enable_eee(tp);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3580,35 +3391,21 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0005 },
- { 0x1a, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0004 },
- { 0x1c, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x15, 0x7701 },
- { 0x1f, 0x0000 }
- };
-
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(100);
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0005, 0x1a, 0x0000);
+ phy_write_paged(tp->phydev, 0x0004, 0x1c, 0x0000);
+ phy_write_paged(tp->phydev, 0x0001, 0x15, 0x7701);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(20);
rtl_apply_firmware(tp);
@@ -3631,8 +3428,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(100);
rtl_apply_firmware(tp);
@@ -3657,38 +3453,22 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x80ea);
- phy_modify(phydev, 0x14, 0xff00, 0xc400);
- phy_write(phydev, 0x13, 0x80eb);
- phy_modify(phydev, 0x14, 0x0700, 0x0300);
- phy_write(phydev, 0x13, 0x80f8);
- phy_modify(phydev, 0x14, 0xff00, 0x1c00);
- phy_write(phydev, 0x13, 0x80f1);
- phy_modify(phydev, 0x14, 0xff00, 0x3000);
- phy_write(phydev, 0x13, 0x80fe);
- phy_modify(phydev, 0x14, 0xff00, 0xa500);
- phy_write(phydev, 0x13, 0x8102);
- phy_modify(phydev, 0x14, 0xff00, 0x5000);
- phy_write(phydev, 0x13, 0x8105);
- phy_modify(phydev, 0x14, 0xff00, 0x3300);
- phy_write(phydev, 0x13, 0x8100);
- phy_modify(phydev, 0x14, 0xff00, 0x7000);
- phy_write(phydev, 0x13, 0x8104);
- phy_modify(phydev, 0x14, 0xff00, 0xf000);
- phy_write(phydev, 0x13, 0x8106);
- phy_modify(phydev, 0x14, 0xff00, 0x6500);
- phy_write(phydev, 0x13, 0x80dc);
- phy_modify(phydev, 0x14, 0xff00, 0xed00);
- phy_write(phydev, 0x13, 0x80df);
- phy_set_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x13, 0x80e1);
- phy_clear_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
+ r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
+ r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
+ r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
+ r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
+ r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
+ r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
+ r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
+ r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
+ r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
+ r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- phy_write_paged(phydev, 0xa43, 0x13, 0x819f);
- phy_write_paged(phydev, 0xa43, 0x14, 0xd0b6);
+ r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
@@ -3743,22 +3523,16 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
phy_write(phydev, 0x14, 0x0002);
for (i = 0; i < 25; i++)
phy_write(phydev, 0x14, 0x0000);
-
- phy_write(phydev, 0x13, 0x8257);
- phy_write(phydev, 0x14, 0x020F);
-
- phy_write(phydev, 0x13, 0x80EA);
- phy_write(phydev, 0x14, 0x7843);
phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
+ r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
+
rtl_apply_firmware(tp);
phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x81a2);
- phy_set_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
@@ -3796,7 +3570,7 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
[RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
[RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
[RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
@@ -3826,6 +3600,7 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
};
@@ -3919,7 +3694,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3955,6 +3730,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_52:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
@@ -3986,6 +3762,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_52:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
@@ -4017,7 +3794,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
@@ -4039,14 +3816,12 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -4064,7 +3839,6 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, 0x3f);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4072,32 +3846,15 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, 0x0c);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-}
-
-static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- rtl_tx_performance_tweak(tp,
- PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
-}
-
-static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- rtl_tx_performance_tweak(tp,
- PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN);
}
static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
{
- r8168b_0_hw_jumbo_enable(tp);
-
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
}
static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
{
- r8168b_0_hw_jumbo_disable(tp);
-
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
@@ -4105,9 +3862,6 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- r8168b_0_hw_jumbo_enable(tp);
- break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
r8168b_1_hw_jumbo_enable(tp);
@@ -4131,9 +3885,6 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- r8168b_0_hw_jumbo_disable(tp);
- break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
r8168b_1_hw_jumbo_disable(tp);
@@ -4229,7 +3980,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
break;
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
@@ -4454,18 +4205,11 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
}
-static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
+static void rtl_hw_start_8168b(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
}
-static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
-{
- rtl_hw_start_8168bb(tp);
-
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
-}
-
static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
{
RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
@@ -4557,19 +4301,6 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-}
-
-static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
-{
- rtl_set_def_aspm_entry_latency(tp);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
- rtl_disable_clock_request(tp);
}
static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
@@ -4583,8 +4314,6 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_ephy_init(tp, e_info_8168d_4);
rtl_enable_clock_request(tp);
@@ -4659,8 +4388,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
@@ -4723,8 +4450,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
@@ -4775,8 +4500,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_2);
}
@@ -4961,8 +4685,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
@@ -5020,8 +4742,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
@@ -5106,6 +4826,71 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
+static void rtl_hw_start_8117(struct rtl8169_private *tp)
+{
+ static const struct ephy_info e_info_8117[] = {
+ { 0x19, 0x0040, 0x1100 },
+ { 0x59, 0x0040, 0x1100 },
+ };
+ int rg_saw_cnt;
+
+ rtl8168ep_stop_cmac(tp);
+
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8117);
+
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
+
+ rtl_set_def_aspm_entry_latency(tp);
+
+ rtl_reset_packet_filter(tp);
+
+ rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90);
+
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
+
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+
+ rtl8168_config_eee_mac(tp);
+
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
+
+ rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+
+ rtl_pcie_state_l2l3_disable(tp);
+
+ rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
+ if (rg_saw_cnt > 0) {
+ u16 sw_cnt_1ms_ini;
+
+ sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff;
+ r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
+ }
+
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_write(tp, 0xea80, 0x0003);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
+ r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
+
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc094, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+ /* firmware is for MAC only */
+ rtl_apply_firmware(tp);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
+}
+
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8102e_1[] = {
@@ -5124,8 +4909,6 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
RTL_W8(tp, DBG_REG, FIX_NAK_1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
RTL_W8(tp, Config1,
LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5141,8 +4924,6 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
}
@@ -5203,8 +4984,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8402);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
@@ -5358,13 +5137,13 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
- [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
+ [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = NULL,
[RTL_GIGA_MAC_VER_15] = NULL,
[RTL_GIGA_MAC_VER_16] = NULL,
- [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
@@ -5378,7 +5157,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
- [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
+ [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
@@ -5399,6 +5178,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+ [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2,
};
@@ -5420,11 +5200,6 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
- if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
- tp->mac_version == RTL_GIGA_MAC_VER_16)
- pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_NOSNOOP_EN);
-
if (rtl_is_8168evl_up(tp))
RTL_W8(tp, MaxTxPacketSize, EarlySize);
else
@@ -5573,18 +5348,15 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
if (!data) {
- rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
- goto err_out;
+ rtl8169_rx_clear(tp);
+ return -ENOMEM;
}
tp->Rx_databuff[i] = data;
}
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
- return 0;
-err_out:
- rtl8169_rx_clear(tp);
- return -ENOMEM;
+ return 0;
}
static int rtl8169_init_ring(struct rtl8169_private *tp)
@@ -6953,7 +6725,7 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
/* fall through */
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
@@ -7063,6 +6835,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
+ tp->eee_adv = -1;
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
@@ -7179,8 +6952,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
}
- /* RTL8168e-vl has a HW issue with TSO */
- if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ /* RTL8168e-vl and one RTL8168c variant are known to have a
+ * HW issue with TSO.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index a9c89d5d8898..9f88b5db4f89 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -955,6 +955,8 @@ enum RAVB_QUEUE {
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
+#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
+
/* TX descriptors per packet */
#define NUM_TX_DESC_GEN2 2
#define NUM_TX_DESC_GEN3 1
@@ -1018,7 +1020,6 @@ struct ravb_private {
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
u32 cur_tx[NUM_TX_QUEUE];
u32 dirty_tx[NUM_TX_QUEUE];
- u32 rx_buf_sz; /* Based on MTU+slack. */
struct napi_struct napi[NUM_RX_QUEUE];
struct work_struct work;
/* MII transceiver section. */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index de9aa8c47f1c..4b13a184bfc7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+ rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int ring_size;
int i;
- priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
- ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
-
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
+ skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+ desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
- priv->rx_buf_sz +
+ RX_BUF_SZ +
RAVB_ALIGN - 1);
if (!skb)
break; /* Better luck next round. */
@@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
- if (netif_running(ndev))
- return -EBUSY;
+ struct ravb_private *priv = netdev_priv(ndev);
ndev->mtu = new_mtu;
+
+ if (netif_running(ndev)) {
+ synchronize_irq(priv->emac_irq);
+ ravb_emac_init(ndev);
+ }
+
netdev_update_features(ndev);
return 0;
@@ -2046,7 +2048,9 @@ static int ravb_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
- priv->phy_interface = of_get_phy_mode(np);
+ error = of_get_phy_mode(np, &priv->phy_interface);
+ if (error && error != -ENODEV)
+ goto out_release;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 9a42580693cb..6984bd5b7da9 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
if (req->index)
return -EINVAL;
@@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
+ /* Reject requests with unsupported flags */
+ if (req->flags)
+ return -EOPNOTSUPP;
+
if (req->index)
return -EINVAL;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7ba35a0bdb29..e19b49c4013e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3183,6 +3183,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
{
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
+ phy_interface_t interface;
const char *mac_addr;
int ret;
@@ -3190,10 +3191,10 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
if (!pdata)
return NULL;
- ret = of_get_phy_mode(np);
- if (ret < 0)
+ ret = of_get_phy_mode(np, &interface);
+ if (ret)
return NULL;
- pdata->phy_interface = ret;
+ pdata->phy_interface = interface;
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 786b158bd305..bc4f951315da 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2189,9 +2189,6 @@ static int rocker_router_fib_event(struct notifier_block *nb,
struct rocker_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
- if (!net_eq(info->net, &init_net))
- return NOTIFY_DONE;
-
if (info->family != AF_INET)
return NOTIFY_DONE;
@@ -2994,7 +2991,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* the device, so no need to pass a callback.
*/
rocker->fib_nb.notifier_call = rocker_router_fib_event;
- err = register_fib_notifier(&rocker->fib_nb, NULL);
+ err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL);
if (err)
goto err_register_fib_notifier;
@@ -3021,7 +3018,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_register_switchdev_blocking_notifier:
unregister_switchdev_notifier(&rocker_switchdev_notifier);
err_register_switchdev_notifier:
- unregister_fib_notifier(&rocker->fib_nb);
+ unregister_fib_notifier(&init_net, &rocker->fib_nb);
err_register_fib_notifier:
rocker_remove_ports(rocker);
err_probe_ports:
@@ -3057,7 +3054,7 @@ static void rocker_remove(struct pci_dev *pdev)
unregister_switchdev_blocking_notifier(nb);
unregister_switchdev_notifier(&rocker_switchdev_notifier);
- unregister_fib_notifier(&rocker->fib_nb);
+ unregister_fib_notifier(&init_net, &rocker->fib_nb);
rocker_remove_ports(rocker);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
destroy_workqueue(rocker->rocker_owq);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 2412c87561e0..33f79402850d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -30,12 +30,15 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct sxgbe_dma_cfg *dma_cfg;
+ int err;
if (!np)
return -ENODEV;
*mac = of_get_mac_address(np);
- plat->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &plat->interface);
+ if (err && err != -ENODEV)
+ return err;
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 0ec13f520e90..4d9bbccc6f89 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -946,8 +946,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
/* Extra channels, even those with TXQs (PTP), do not require
* PIO resources.
*/
- if (!channel->type->want_pio)
+ if (!channel->type->want_pio ||
+ channel->channel >= efx->xdp_channel_offset)
continue;
+
efx_for_each_channel_tx_queue(tx_queue, channel) {
/* We assign the PIO buffers to queues in
* reverse order to allow for the following
@@ -1296,8 +1298,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
int rc;
channel_vis = max(efx->n_channels,
- (efx->n_tx_channels + efx->n_extra_tx_channels) *
- EFX_TXQ_TYPES);
+ ((efx->n_tx_channels + efx->n_extra_tx_channels) *
+ EFX_TXQ_TYPES) +
+ efx->n_xdp_channels * efx->xdp_tx_per_channel);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -2434,11 +2437,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
- * TSOv2 cannot be used with Hardware timestamping.
+ * TSOv2 cannot be used with Hardware timestamping, and is never needed
+ * for XDP tx.
*/
if (csum_offload && (nic_data->datapath_caps2 &
(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
- !tx_queue->timestamping) {
+ !tx_queue->timestamping && !tx_queue->xdp_tx) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
@@ -4198,11 +4202,15 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
+ size_t outlen;
int rc;
efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc && spec->priority != EFX_FILTER_PRI_HINT)
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
+ outbuf, outlen, rc);
if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
if (rc == -ENOSPC)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 2fef7402233e..992c773620ec 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -226,6 +226,10 @@ static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
@@ -340,6 +344,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
+ xdp_do_flush_map();
+
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
efx->irq_rx_adaptive &&
@@ -349,7 +355,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_RFS_ACCEL
/* Perhaps expire some ARFS filters */
- schedule_work(&channel->filter_work);
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
@@ -481,7 +487,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
#ifdef CONFIG_RFS_ACCEL
- INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif
rx_queue = &channel->rx_queue;
@@ -527,7 +533,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
#ifdef CONFIG_RFS_ACCEL
- INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif
return channel;
@@ -579,9 +585,14 @@ efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
int number;
number = channel->channel;
- if (efx->tx_channel_offset == 0) {
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
type = "";
- } else if (channel->channel < efx->tx_channel_offset) {
+ } else if (number < efx->tx_channel_offset) {
type = "-rx";
} else {
type = "-tx";
@@ -651,7 +662,7 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) +
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
efx->rx_ip_align + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
@@ -774,6 +785,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
}
+ efx->xdp_rxq_info_failed = false;
}
static void efx_remove_channel(struct efx_channel *channel)
@@ -798,6 +810,8 @@ static void efx_remove_channels(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
efx_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
}
int
@@ -1435,6 +1449,101 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
return count;
}
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels + n_xdp_ev > max_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = 0;
+ } else {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_tx, n_xdp_ev);
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ efx->n_channels = n_channels;
+
+ /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
+ if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
+ n_channels--;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ if (efx->n_xdp_channels)
+ efx->xdp_channel_offset = efx->tx_channel_offset +
+ efx->n_tx_channels;
+ else
+ efx->xdp_channel_offset = efx->n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
@@ -1449,19 +1558,19 @@ static int efx_probe_interrupts(struct efx_nic *efx)
++extra_channels;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
struct msix_entry xentries[EFX_MAX_CHANNELS];
unsigned int n_channels;
- n_channels = efx_wanted_parallelism(efx);
- if (efx_separate_tx_channels)
- n_channels *= 2;
- n_channels += extra_channels;
- n_channels = min(n_channels, efx->max_channels);
-
- for (i = 0; i < n_channels; i++)
- xentries[i].entry = i;
- rc = pci_enable_msix_range(efx->pci_dev,
- xentries, 1, n_channels);
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
if (rc < 0) {
/* Fall back to single channel MSI */
netif_err(efx, drv, efx->net_dev,
@@ -1480,21 +1589,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
if (rc > 0) {
- efx->n_channels = n_channels;
- if (n_channels > extra_channels)
- n_channels -= extra_channels;
- if (efx_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
- efx->n_rx_channels = max(n_channels -
- efx->n_tx_channels,
- 1U);
- } else {
- efx->n_tx_channels = min(n_channels,
- efx->max_tx_channels);
- efx->n_rx_channels = n_channels;
- }
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
@@ -1506,6 +1600,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -1524,12 +1620,14 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
}
- /* Assign extra channels if possible */
+ /* Assign extra channels if possible, before XDP channels */
efx->n_extra_tx_channels = 0;
- j = efx->n_channels;
+ j = efx->xdp_channel_offset;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
continue;
@@ -1724,29 +1822,50 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
-static void efx_set_channels(struct efx_nic *efx)
+static int efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
+ int xdp_queue_number;
efx->tx_channel_offset =
efx_separate_tx_channels ?
efx->n_channels - efx->n_tx_channels : 0;
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
/* We need to mark which channels really have RX and TX
* queues, and adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
+ xdp_queue_number = 0;
efx_for_each_channel(channel, efx) {
if (channel->channel < efx->n_rx_channels)
channel->rx_queue.core_index = channel->channel;
else
channel->rx_queue.core_index = -1;
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
+
+ if (efx_channel_is_xdp_tx(channel) &&
+ xdp_queue_number < efx->xdp_tx_queue_count) {
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ xdp_queue_number++;
+ }
+ }
}
+ return 0;
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1776,7 +1895,9 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;
- efx_set_channels(efx);
+ rc = efx_set_channels(efx);
+ if (rc)
+ goto fail1;
/* dimension_resources can fail with EAGAIN */
rc = efx->type->dimension_resources(efx);
@@ -1848,6 +1969,8 @@ static int efx_probe_filters(struct efx_nic *efx)
++i)
channel->rps_flow_id[i] =
RPS_FLOW_ID_INVALID;
+ channel->rfs_expire_index = 0;
+ channel->rfs_filter_count = 0;
}
if (!success) {
@@ -1857,8 +1980,6 @@ static int efx_probe_filters(struct efx_nic *efx)
rc = -ENOMEM;
goto out_unlock;
}
-
- efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
@@ -1872,8 +1993,10 @@ static void efx_remove_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
struct efx_channel *channel;
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
+ cancel_delayed_work_sync(&channel->filter_work);
kfree(channel->rps_flow_id);
+ }
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
@@ -2022,6 +2145,10 @@ static void efx_stop_all(struct efx_nic *efx)
static void efx_remove_all(struct efx_nic *efx)
{
+ rtnl_lock();
+ efx_xdp_setup_prog(efx, NULL);
+ rtnl_unlock();
+
efx_remove_channels(efx);
efx_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV
@@ -2082,6 +2209,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
channel->irq_moderation_us = rx_usecs;
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation_us = tx_usecs;
+ else if (efx_channel_is_xdp_tx(channel))
+ channel->irq_moderation_us = tx_usecs;
}
return 0;
@@ -2277,6 +2406,17 @@ static void efx_watchdog(struct net_device *net_dev)
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
+static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+ /* The maximum MTU that we can fit in a single page, allowing for
+ * framing, overhead and XDP headroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+ efx->rx_ip_align + XDP_PACKET_HEADROOM;
+
+ return PAGE_SIZE - overhead;
+}
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
@@ -2288,6 +2428,14 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (rc)
return rc;
+ if (rtnl_dereference(efx->xdp_prog) &&
+ new_mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big for XDP (max: %d)\n",
+ new_mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
@@ -2489,8 +2637,65 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_udp_tunnel_add = efx_udp_tunnel_add,
.ndo_udp_tunnel_del = efx_udp_tunnel_del,
+ .ndo_xdp_xmit = efx_xdp_xmit,
+ .ndo_bpf = efx_xdp
};
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+
+ if (efx->xdp_rxq_info_failed) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to bind XDP program due to previous failure of rxq_info\n");
+ return -EINVAL;
+ }
+
+ if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to configure XDP with MTU of %d (max: %d)\n",
+ efx->net_dev->mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ old_prog = rtnl_dereference(efx->xdp_prog);
+ rcu_assign_pointer(efx->xdp_prog, prog);
+ /* Release the reference that was originally passed by the caller. */
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+ struct bpf_prog *xdp_prog;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return efx_xdp_setup_prog(efx, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp_prog = rtnl_dereference(efx->xdp_prog);
+ xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+}
+
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 04fed7c06618..2dd8d5002315 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -166,15 +166,20 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
static inline void efx_filter_rfs_expire(struct work_struct *data)
{
- struct efx_channel *channel = container_of(data, struct efx_channel,
- filter_work);
-
- if (channel->rfs_filters_added >= 60 &&
- __efx_filter_rfs_expire(channel->efx, 100))
- channel->rfs_filters_added -= 60;
+ struct delayed_work *dwork = to_delayed_work(data);
+ struct efx_channel *channel;
+ unsigned int time, quota;
+
+ channel = container_of(dwork, struct efx_channel, filter_work);
+ time = jiffies - channel->rfs_last_expiry;
+ quota = channel->rfs_filter_count * time / (30 * HZ);
+ if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+ channel->rfs_last_expiry += time;
+ /* Ensure we do more work eventually even if NAPI poll is not happening */
+ schedule_delayed_work(dwork, 30 * HZ);
}
#define efx_filter_rfs_enabled() 1
#else
@@ -322,4 +327,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
return true;
}
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush);
+
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 86b965875540..b31032da4bcb 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -56,6 +56,9 @@ static u64 efx_get_atomic_stat(void *field)
#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
EFX_ETHTOOL_STAT(field, channel, n_##field, \
unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
+ EFX_ETHTOOL_STAT(field, channel, field, \
+ unsigned int, efx_get_uint_stat)
#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
@@ -83,6 +86,15 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+#ifdef CONFIG_RFS_ACCEL
+ EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
+#endif
};
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
@@ -399,6 +411,19 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
}
}
}
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ unsigned short xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ n_stats++;
+ if (strings) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-xdp-cpu-%hu.tx_packets", xdp);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+
return n_stats;
}
@@ -509,6 +534,14 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
data++;
}
}
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ int xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+ data++;
+ }
+ }
efx_ptp_update_stats(efx, data);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 284a1b047ac2..1f88212be085 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <net/busy_poll.h>
+#include <net/xdp.h>
#include "enum.h"
#include "bitfield.h"
@@ -136,7 +137,8 @@ struct efx_special_buffer {
* struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* freed when descriptor completes
- * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
+ * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
+ * member is the associated buffer to drop a page reference on.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -146,7 +148,10 @@ struct efx_special_buffer {
* Only valid if @unmap_len != 0.
*/
struct efx_tx_buffer {
- const struct sk_buff *skb;
+ union {
+ const struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
union {
efx_qword_t option;
dma_addr_t dma_addr;
@@ -160,6 +165,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
+#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -189,6 +195,7 @@ struct efx_tx_buffer {
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
+ * @xdp_tx: Is this an XDP tx queue?
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
@@ -250,6 +257,7 @@ struct efx_tx_queue {
unsigned int piobuf_offset;
bool initialised;
bool timestamping;
+ bool xdp_tx;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -363,6 +371,8 @@ struct efx_rx_page_state {
* refill was triggered.
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @xdp_rxq_info: XDP specific RX queue information.
+ * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -394,6 +404,8 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
+ struct xdp_rxq_info xdp_rxq_info;
+ bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
@@ -427,6 +439,13 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rfs_filter_count: number of accelerated RFS filters currently in place;
+ * equals the count of @rps_flow_id slots filled
+ * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters
+ * were checked for expiry
+ * @rfs_expire_index: next accelerated RFS filter ID to check for expiry
+ * @n_rfs_succeeded: number of successful accelerated RFS filter insertions
+ * @n_rfs_failed; number of failed accelerated RFS filter insertions
* @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -441,6 +460,10 @@ enum efx_sync_events_state {
* lack of descriptors
* @n_rx_merge_events: Number of RX merged completion events
* @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP
+ * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
+ * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
+ * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -473,8 +496,12 @@ struct efx_channel {
unsigned int irq_count;
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
- unsigned int rfs_filters_added;
- struct work_struct filter_work;
+ unsigned int rfs_filter_count;
+ unsigned int rfs_last_expiry;
+ unsigned int rfs_expire_index;
+ unsigned int n_rfs_succeeded;
+ unsigned int n_rfs_failed;
+ struct delayed_work filter_work;
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
u32 *rps_flow_id;
#endif
@@ -494,6 +521,10 @@ struct efx_channel {
unsigned int n_rx_nodesc_trunc;
unsigned int n_rx_merge_events;
unsigned int n_rx_merge_packets;
+ unsigned int n_rx_xdp_drops;
+ unsigned int n_rx_xdp_bad_drops;
+ unsigned int n_rx_xdp_tx;
+ unsigned int n_rx_xdp_redirect;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -818,6 +849,8 @@ struct efx_async_filter_insertion {
* @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
+ * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
+ * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -830,6 +863,9 @@ struct efx_async_filter_insertion {
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
* @n_extra_tx_channels: Number of extra channels with TX queues
+ * @n_xdp_channels: Number of channels used for XDP TX
+ * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
+ * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
* @rx_ip_align: RX DMA address offset to have IP header aligned in
* in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
@@ -894,12 +930,10 @@ struct efx_async_filter_insertion {
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
+ * @xdp_prog: Current XDP programme for this interface
* @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
* @filter_state: Architecture-dependent filter table state
* @rps_mutex: Protects RPS state of all channels
- * @rps_expire_channel: Next channel to check for expiry
- * @rps_expire_index: Next index to check for expiry in
- * @rps_expire_channel's @rps_flow_id
* @rps_slot_map: bitmap of in-flight entries in @rps_slot
* @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
* @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
@@ -919,6 +953,8 @@ struct efx_async_filter_insertion {
* @ptp_data: PTP state data
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
+ * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
+ * xdp_rxq_info structures?
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -966,6 +1002,9 @@ struct efx_nic {
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+ unsigned int xdp_tx_queue_count;
+ struct efx_tx_queue **xdp_tx_queues;
+
unsigned rxq_entries;
unsigned txq_entries;
unsigned int txq_stop_thresh;
@@ -984,6 +1023,9 @@ struct efx_nic {
unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned n_extra_tx_channels;
+ unsigned int n_xdp_channels;
+ unsigned int xdp_channel_offset;
+ unsigned int xdp_tx_per_channel;
unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
@@ -1053,13 +1095,15 @@ struct efx_nic {
u64 loopback_modes;
void *loopback_selftest;
+ /* We access loopback_selftest immediately before running XDP,
+ * so we want them next to each other.
+ */
+ struct bpf_prog __rcu *xdp_prog;
struct rw_semaphore filter_sem;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
struct mutex rps_mutex;
- unsigned int rps_expire_channel;
- unsigned int rps_expire_index;
unsigned long rps_slot_map;
struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
spinlock_t rps_hash_lock;
@@ -1082,6 +1126,7 @@ struct efx_nic {
bool ptp_warned;
char *vpd_sn;
+ bool xdp_rxq_info_failed;
/* The following fields may be written more often */
@@ -1473,10 +1518,24 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
+static inline struct efx_channel *
+efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels);
+ return efx->channel[efx->xdp_channel_offset + index];
+}
+
+static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->xdp_channel_offset <
+ channel->efx->n_xdp_channels;
+}
+
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return channel->type && channel->type->want_txqs &&
- channel->type->want_txqs(channel);
+ return efx_channel_is_xdp_tx(channel) ||
+ (channel->type && channel->type->want_txqs &&
+ channel->type->want_txqs(channel));
}
static inline struct efx_tx_queue *
@@ -1500,7 +1559,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
else \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
- efx_tx_queue_used(_tx_queue); \
+ (efx_tx_queue_used(_tx_queue) || \
+ efx_channel_is_xdp_tx(_channel)); \
_tx_queue++)
/* Iterate over all possible TX queues belonging to a channel */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 02ed6d1b716c..af15a737c675 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1531,7 +1531,8 @@ void efx_ptp_remove(struct efx_nic *efx)
(void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
- cancel_work_sync(&efx->ptp_data->pps_work);
+ if (efx->ptp_data->pps_workwq)
+ cancel_work_sync(&efx->ptp_data->pps_work);
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 85ec07f5a674..ef52b24ad9e7 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -17,6 +17,8 @@
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
+#include <net/xdp.h>
+#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
#include "filter.h"
@@ -27,6 +29,9 @@
/* Preferred number of descriptors to fill at once */
#define EFX_RX_PREFERRED_BATCH 8U
+/* Maximum rx prefix used by any architecture. */
+#define EFX_MAX_RX_PREFIX_SIZE 16
+
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
* ring, this number is divided by the number of buffers per page to calculate
* the number of pages to store in the RX page recycle ring.
@@ -95,7 +100,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
- efx->rx_page_buf_step);
+ (efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
efx->rx_bufs_per_page;
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
@@ -185,6 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
page_offset = sizeof(struct efx_rx_page_state);
do {
+ page_offset += XDP_PACKET_HEADROOM;
+ dma_addr += XDP_PACKET_HEADROOM;
+
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
@@ -635,6 +643,126 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
netif_receive_skb(skb);
}
+/** efx_do_xdp: perform XDP processing on a received packet
+ *
+ * Returns true if packet should still be delivered.
+ */
+static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf, u8 **ehp)
+{
+ u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
+ struct efx_rx_queue *rx_queue;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 xdp_act;
+ s16 offset;
+ int err;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(efx->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return true;
+ }
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(channel->rx_pkt_n_frags > 1)) {
+ /* We can't do XDP on fragmented packets - drop. */
+ rcu_read_unlock();
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP is not possible with multiple receive fragments (%d)\n",
+ channel->rx_pkt_n_frags);
+ channel->n_rx_xdp_bad_drops++;
+ return false;
+ }
+
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
+
+ /* Save the rx prefix. */
+ EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
+ memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
+ efx->rx_prefix_size);
+
+ xdp.data = *ehp;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+
+ /* No support yet for XDP metadata */
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + rx_buf->len;
+ xdp.rxq = &rx_queue->xdp_rxq_info;
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ rcu_read_unlock();
+
+ offset = (u8 *)xdp.data - *ehp;
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ /* Fix up rx prefix. */
+ if (offset) {
+ *ehp += offset;
+ rx_buf->page_offset += offset;
+ rx_buf->len -= offset;
+ memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
+ efx->rx_prefix_size);
+ }
+ break;
+
+ case XDP_TX:
+ /* Buffer ownership passes to tx on success. */
+ xdpf = convert_to_xdp_frame(&xdp);
+ err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
+ if (unlikely(err != 1)) {
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP TX failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ } else {
+ channel->n_rx_xdp_tx++;
+ }
+ break;
+
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
+ if (unlikely(err)) {
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP redirect failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ } else {
+ channel->n_rx_xdp_redirect++;
+ }
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ /* Fall through */
+ case XDP_DROP:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_drops++;
+ break;
+ }
+
+ return xdp_act == XDP_PASS;
+}
+
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel)
{
@@ -663,6 +791,9 @@ void __efx_rx_packet(struct efx_channel *channel)
goto out;
}
+ if (!efx_do_xdp(efx, channel, rx_buf, &eh))
+ goto out;
+
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
@@ -731,6 +862,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, max_trigger;
+ int rc = 0;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -764,6 +896,19 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->fast_fill_trigger = trigger;
rx_queue->refill_enabled = true;
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
/* Set up RX descriptor ring */
efx_nic_init_rx(rx_queue);
}
@@ -805,6 +950,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
}
kfree(rx_queue->page_ring);
rx_queue->page_ring = NULL;
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -838,6 +988,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
rc = efx->type->filter_insert(efx, &req->spec, true);
if (rc >= 0)
+ /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
rc %= efx->type->max_rx_ip_filters;
if (efx->rps_hash_table) {
spin_lock_bh(&efx->rps_hash_lock);
@@ -862,8 +1013,9 @@ static void efx_filter_rfs_work(struct work_struct *data)
* later.
*/
mutex_lock(&efx->rps_mutex);
+ if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
+ channel->rfs_filter_count++;
channel->rps_flow_id[rc] = req->flow_id;
- ++channel->rfs_filters_added;
mutex_unlock(&efx->rps_mutex);
if (req->spec.ether_type == htons(ETH_P_IP))
@@ -880,6 +1032,28 @@ static void efx_filter_rfs_work(struct work_struct *data)
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
req->rxq_index, req->flow_id, rc, arfs_id);
+ channel->n_rfs_succeeded++;
+ } else {
+ if (req->spec.ether_type == htons(ETH_P_IP))
+ netif_dbg(efx, rx_status, efx->net_dev,
+ "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ else
+ netif_dbg(efx, rx_status, efx->net_dev,
+ "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ channel->n_rfs_failed++;
+ /* We're overloading the NIC's filter tables, so let's do a
+ * chunk of extra expiry work.
+ */
+ __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
+ 100u));
}
/* Release references */
@@ -989,38 +1163,44 @@ out_clear:
return rc;
}
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int channel_idx, index, size;
+ struct efx_nic *efx = channel->efx;
+ unsigned int index, size, start;
u32 flow_id;
if (!mutex_trylock(&efx->rps_mutex))
return false;
expire_one = efx->type->filter_rfs_expire_one;
- channel_idx = efx->rps_expire_channel;
- index = efx->rps_expire_index;
+ index = channel->rfs_expire_index;
+ start = index;
size = efx->type->max_rx_ip_filters;
- while (quota--) {
- struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ while (quota) {
flow_id = channel->rps_flow_id[index];
- if (flow_id != RPS_FLOW_ID_INVALID &&
- expire_one(efx, flow_id, index)) {
- netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [queue %u flow %u]\n",
- index, channel_idx, flow_id);
- channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ if (flow_id != RPS_FLOW_ID_INVALID) {
+ quota--;
+ if (expire_one(efx, flow_id, index)) {
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [channel %u flow %u]\n",
+ index, channel->channel, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ channel->rfs_filter_count--;
+ }
}
- if (++index == size) {
- if (++channel_idx == efx->n_channels)
- channel_idx = 0;
+ if (++index == size)
index = 0;
- }
+ /* If we were called with a quota that exceeds the total number
+ * of filters in the table (which shouldn't happen, but could
+ * if two callers race), ensure that we don't loop forever -
+ * stop when we've examined every row of the table.
+ */
+ if (index == start)
+ break;
}
- efx->rps_expire_channel = channel_idx;
- efx->rps_expire_index = index;
+ channel->rfs_expire_index = index;
mutex_unlock(&efx->rps_mutex);
return true;
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 65e81ec1b314..00c1c4402451 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -95,6 +95,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
}
buffer->len = 0;
@@ -597,6 +599,94 @@ err:
return NETDEV_TX_OK;
}
+static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ xdp_return_frame_rx_napi(xdpfs[i]);
+}
+
+/* Transmit a packet from an XDP buffer
+ *
+ * Returns number of packets sent on success, error code otherwise.
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
+ * (for XDP redirect).
+ */
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush)
+{
+ struct efx_tx_buffer *tx_buffer;
+ struct efx_tx_queue *tx_queue;
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ unsigned int len;
+ int space;
+ int cpu;
+ int i;
+
+ cpu = raw_smp_processor_id();
+
+ if (!efx->xdp_tx_queue_count ||
+ unlikely(cpu >= efx->xdp_tx_queue_count))
+ return -EINVAL;
+
+ tx_queue = efx->xdp_tx_queues[cpu];
+ if (unlikely(!tx_queue))
+ return -EINVAL;
+
+ if (unlikely(n && !xdpfs))
+ return -EINVAL;
+
+ if (!n)
+ return 0;
+
+ /* Check for available space. We should never need multiple
+ * descriptors per frame.
+ */
+ space = efx->txq_entries +
+ tx_queue->read_count - tx_queue->insert_count;
+
+ for (i = 0; i < n; i++) {
+ xdpf = xdpfs[i];
+
+ if (i >= space)
+ break;
+
+ /* We'll want a descriptor for this tx. */
+ prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
+
+ len = xdpf->len;
+
+ /* Map for DMA. */
+ dma_addr = dma_map_single(&efx->pci_dev->dev,
+ xdpf->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
+ break;
+
+ /* Create descriptor and set up for unmapping DMA. */
+ tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+ tx_buffer->xdpf = xdpf;
+ tx_buffer->flags = EFX_TX_BUF_XDP |
+ EFX_TX_BUF_MAP_SINGLE;
+ tx_buffer->dma_offset = 0;
+ tx_buffer->unmap_len = len;
+ tx_queue->tx_packets++;
+ }
+
+ /* Pass mapped frames to hardware. */
+ if (flush && i > 0)
+ efx_nic_push_buffers(tx_queue);
+
+ if (i == 0)
+ return -EIO;
+
+ efx_xdp_return_frames(n - i, xdpfs + i);
+
+ return i;
+}
+
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
@@ -857,6 +947,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
/* Set up default function pointers. These may get replaced by
* efx_nic_init_tx() based off NIC/queue capabilities.
*/
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index deb636d653f3..d242906ae233 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -48,7 +48,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
-#include <linux/dma-direct.h>
+#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -89,6 +89,7 @@ struct ioc3_private {
struct device *dma_dev;
u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
+ void *tx_ring;
struct ioc3_etxd *txr;
dma_addr_t rxr_dma;
dma_addr_t txr_dma;
@@ -1173,26 +1174,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ioc3 *ioc3;
unsigned long ioc3_base, ioc3_size;
u32 vendor, model, rev;
- int err, pci_using_dac;
+ int err;
/* Configure DMA attributes. */
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err < 0) {
- pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n",
- pci_name(pdev));
- goto out;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("%s: No usable DMA configuration, aborting.\n",
- pci_name(pdev));
- goto out;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
+ goto out;
}
if (pci_enable_device(pdev))
@@ -1204,9 +1193,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable;
}
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
-
err = pci_request_regions(pdev, "ioc3");
if (err)
goto out_free;
@@ -1242,8 +1228,8 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_stop(ip);
/* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
- ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE,
- &ip->rxr_dma, GFP_ATOMIC, 0);
+ ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma,
+ GFP_KERNEL);
if (!ip->rxr) {
pr_err("ioc3-eth: rx ring allocation failed\n");
err = -ENOMEM;
@@ -1251,14 +1237,16 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
- ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE,
- &ip->txr_dma,
- GFP_KERNEL | __GFP_ZERO, 0);
- if (!ip->txr) {
+ ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1,
+ &ip->txr_dma, GFP_KERNEL);
+ if (!ip->tx_ring) {
pr_err("ioc3-eth: tx ring allocation failed\n");
err = -ENOMEM;
goto out_stop;
}
+ /* Align TX ring */
+ ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K);
+ ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K);
ioc3_init(dev);
@@ -1288,7 +1276,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &ioc3_netdev_ops;
dev->ethtool_ops = &ioc3_ethtool_ops;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->features = NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
@@ -1313,11 +1301,11 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_stop:
del_timer_sync(&ip->ioc3_timer);
if (ip->rxr)
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- if (ip->txr)
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma);
+ if (ip->tx_ring)
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
+ ip->txr_dma);
out_res:
pci_release_regions(pdev);
out_free:
@@ -1335,10 +1323,8 @@ static void ioc3_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index f9e6744d8fd6..869a498e3b5e 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -252,7 +252,6 @@
#define NETSEC_XDP_CONSUMED BIT(0)
#define NETSEC_XDP_TX BIT(1)
#define NETSEC_XDP_REDIR BIT(2)
-#define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
enum ring_id {
NETSEC_RING_TX = 0,
@@ -661,6 +660,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
bytes += desc->skb->len;
dev_kfree_skb(desc->skb);
} else {
+ bytes += desc->xdpf->len;
xdp_return_frame(desc->xdpf);
}
next:
@@ -847,8 +847,8 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
enum dma_data_direction dma_dir =
page_pool_get_dma_dir(rx_ring->page_pool);
- dma_handle = page_pool_get_dma_addr(page) +
- NETSEC_RXBUF_HEADROOM;
+ dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
+ sizeof(*xdpf);
dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
dma_dir);
tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
@@ -858,6 +858,7 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
tx_desc.addr = xdpf->data;
tx_desc.len = xdpf->len;
+ netdev_sent_queue(priv->ndev, xdpf->len);
netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
return NETSEC_XDP_TX;
@@ -1030,7 +1031,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
next:
if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
- xdp_result & NETSEC_XDP_RX_OK) {
+ xdp_result) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6e984d5a729f..f7e927ad67fa 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1565,10 +1565,10 @@ static int ave_probe(struct platform_device *pdev)
return -EINVAL;
np = dev->of_node;
- phy_mode = of_get_phy_mode(np);
- if ((int)phy_mode < 0) {
+ ret = of_get_phy_mode(np, &phy_mode);
+ if (ret) {
dev_err(dev, "phy-mode not found\n");
- return -EINVAL;
+ return ret;
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 912bbb6515b2..b210e987a1db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -248,12 +248,13 @@ struct stmmac_safety_stats {
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x10
+#define DEF_DMA_RIWT 0xa0
/* Tx coalesce parameters */
#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 1
-#define STMMAC_RX_FRAMES 25
+#define STMMAC_TX_FRAMES 25
+#define STMMAC_RX_FRAMES 0
/* Packets types */
enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 527f93320a5a..d0d2d0fc5f0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -61,9 +61,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
- int phy_mode;
- void __iomem *ctl_block;
struct anarion_gmac *gmac;
+ phy_interface_t phy_mode;
+ void __iomem *ctl_block;
+ int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
@@ -78,7 +79,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = (uintptr_t)ctl_block;
- phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
+ if (err)
+ return ERR_PTR(err);
+
switch (phy_mode) {
case PHY_INTERFACE_MODE_RGMII: /* Fall through */
case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 0d21082ceb93..6ae13dc19510 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -189,9 +189,10 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
{
struct device *dev = &gmac->pdev->dev;
+ int ret;
- gmac->phy_mode = of_get_phy_mode(dev->of_node);
- if ((int)gmac->phy_mode < 0) {
+ ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
+ if (ret) {
dev_err(dev, "missing phy mode property\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 79f2ee37afed..bdb80421acac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -54,7 +54,7 @@ struct mediatek_dwmac_plat_data {
struct device_node *np;
struct regmap *peri_regmap;
struct device *dev;
- int phy_mode;
+ phy_interface_t phy_mode;
bool rmii_rxc;
};
@@ -130,6 +130,31 @@ static void mt2712_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
}
}
+static void mt2712_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ /* 550ps per stage for MII/RMII */
+ mac_delay->tx_delay *= 550;
+ mac_delay->rx_delay *= 550;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* 170ps per stage for RGMII */
+ mac_delay->tx_delay *= 170;
+ mac_delay->rx_delay *= 170;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ break;
+ }
+}
+
static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
@@ -199,6 +224,8 @@ static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val);
regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val);
+ mt2712_delay_stage2ps(plat);
+
return 0;
}
@@ -216,6 +243,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
+ int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -223,10 +251,10 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- plat->phy_mode = of_get_phy_mode(plat->np);
- if (plat->phy_mode < 0) {
+ err = of_get_phy_mode(plat->np, &plat->phy_mode);
+ if (err) {
dev_err(plat->dev, "not find phy-mode\n");
- return -EINVAL;
+ return err;
}
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 306da8f6b7d5..bd6c01004913 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -338,10 +338,9 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
}
dwmac->dev = &pdev->dev;
- dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)dwmac->phy_mode < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
+ if (ret) {
dev_err(&pdev->dev, "missing phy-mode property\n");
- ret = -EINVAL;
goto err_remove_config_dt;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index e2e469c37a4d..dc50ba13a746 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -37,7 +37,7 @@ struct rk_gmac_ops {
struct rk_priv_data {
struct platform_device *pdev;
- int phy_iface;
+ phy_interface_t phy_iface;
struct regulator *regulator;
bool suspended;
const struct rk_gmac_ops *ops;
@@ -1224,7 +1224,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+ of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
bsp_priv->ops = ops;
bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index e9fd661f7995..e1b63df6f96f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -116,7 +116,7 @@
#define ETH_PHY_SEL_MII 0x0
struct sti_dwmac {
- int interface; /* MII interface */
+ phy_interface_t interface; /* MII interface */
bool ext_phyclk; /* Clock from external PHY */
u32 tx_retime_src; /* TXCLK Retiming*/
struct clk *clk; /* PHY clock */
@@ -269,7 +269,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return err;
}
- dwmac->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &dwmac->interface);
+ if (err && err != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ return err;
+ }
+
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 4ef041bdf6a1..9b7be996d07b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -155,18 +155,14 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
return ret;
-
- if (dwmac->clk_eth_ck) {
- ret = clk_prepare_enable(dwmac->clk_eth_ck);
- if (ret) {
- clk_disable_unprepare(dwmac->syscfg_clk);
- return ret;
- }
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
}
} else {
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->clk_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ clk_disable_unprepare(dwmac->clk_eth_ck);
}
return ret;
}
@@ -175,7 +171,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
u32 reg = dwmac->mode_reg;
- int val, ret;
+ int val;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
@@ -211,8 +207,8 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
}
/* Need to update PMCCLRR (clear register) */
- ret = regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
- dwmac->ops->syscfg_eth_mask);
+ regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
+ dwmac->ops->syscfg_eth_mask);
/* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
@@ -320,12 +316,10 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return PTR_ERR(dwmac->clk_ethstp);
}
- /* Clock for sysconfig */
+ /* Optional Clock for sysconfig */
dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk");
- if (IS_ERR(dwmac->syscfg_clk)) {
- dev_err(dev, "No syscfg clock provided...\n");
- return PTR_ERR(dwmac->syscfg_clk);
- }
+ if (IS_ERR(dwmac->syscfg_clk))
+ dwmac->syscfg_clk = NULL;
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
@@ -437,8 +431,7 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->clk_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ clk_disable_unprepare(dwmac->clk_eth_ck);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index ddcc191febdb..1c8d84ed8410 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1105,6 +1105,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
+ phy_interface_t interface;
int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
@@ -1178,10 +1179,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node);
- if (ret < 0)
+ ret = of_get_phy_mode(dev->of_node, &interface);
+ if (ret)
return -EINVAL;
- plat_dat->interface = ret;
+ plat_dat->interface = interface;
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
@@ -1226,7 +1227,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
dwmac_mux:
sun8i_dwmac_unset_syscon(gmac);
dwmac_exit:
- sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+ stmmac_pltfr_remove(pdev);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index a299da3971b4..26353ef616b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,7 @@
#include "stmmac_platform.h"
struct sunxi_priv_data {
- int interface;
+ phy_interface_t interface;
int clk_enabled;
struct clk *tx_clk;
struct regulator *regulator;
@@ -118,7 +118,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- gmac->interface = of_get_phy_mode(dev->of_node);
+ ret = of_get_phy_mode(dev->of_node, &gmac->interface);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ goto err_remove_config_dt;
+ }
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3d69da112625..d0356fbd1e43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -130,7 +130,6 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW);
writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH);
return;
- break;
case 7:
numhashregs = 4;
break;
@@ -140,7 +139,6 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
default:
pr_debug("STMMAC: err in setting multicast filter\n");
return;
- break;
}
for (regs = 0; regs < numhashregs; regs++)
writel(mcfilterbits[regs],
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 89a3420eba42..2dc70d104161 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -14,6 +14,7 @@
/* MAC registers */
#define GMAC_CONFIG 0x00000000
+#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
#define GMAC_VLAN_TAG 0x00000050
@@ -43,6 +44,10 @@
#define GMAC_ARP_ADDR 0x00000210
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30)
+#define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30)
+#define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30)
+#define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30)
/* RX Queues Routing */
#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
@@ -67,6 +72,7 @@
#define GMAC_PACKET_FILTER_PCF BIT(7)
#define GMAC_PACKET_FILTER_HPF BIT(10)
#define GMAC_PACKET_FILTER_VTFE BIT(16)
+#define GMAC_PACKET_FILTER_IPFE BIT(20)
#define GMAC_MAX_PERFECT_ADDRESSES 128
@@ -183,6 +189,11 @@ enum power_event {
#define GMAC_CONFIG_TE BIT(1)
#define GMAC_CONFIG_RE BIT(0)
+/* MAC extended config */
+#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
+#define GMAC_CONFIG_HDSMS_SHIFT 20
+#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
+
/* MAC HW features0 bitmap */
#define GMAC_HW_FEAT_SAVLANINS BIT(27)
#define GMAC_HW_FEAT_ADDMAC BIT(18)
@@ -202,9 +213,12 @@ enum power_event {
#define GMAC_HW_FEAT_MIISEL BIT(0)
/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_L3L4FNUM GENMASK(30, 27)
#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24)
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_FEAT_SPHEN BIT(17)
+#define GMAC_HW_ADDR64 GENMASK(15, 14)
#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
@@ -227,6 +241,21 @@ enum power_event {
#define GMAC_HI_DCS_SHIFT 16
#define GMAC_HI_REG_AE BIT(31)
+/* L3/L4 Filters regs */
+#define GMAC_L4DPIM0 BIT(21)
+#define GMAC_L4DPM0 BIT(20)
+#define GMAC_L4SPIM0 BIT(19)
+#define GMAC_L4SPM0 BIT(18)
+#define GMAC_L4PEN0 BIT(16)
+#define GMAC_L3DAIM0 BIT(5)
+#define GMAC_L3DAM0 BIT(4)
+#define GMAC_L3SAIM0 BIT(3)
+#define GMAC_L3SAM0 BIT(2)
+#define GMAC_L3PEN0 BIT(0)
+#define GMAC_L4DP0 GENMASK(31, 16)
+#define GMAC_L4DP0_SHIFT 16
+#define GMAC_L4SP0 GENMASK(15, 0)
+
/* MTL registers */
#define MTL_OPERATION_MODE 0x00000c00
#define MTL_FRPE BIT(15)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 66e60c7e9850..40ca00e596dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -733,7 +733,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- bool is_double)
+ __le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
@@ -748,6 +748,16 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
}
writel(value, ioaddr + GMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = GMAC_VLAN_ETV;
+
+ if (is_double) {
+ value |= GMAC_VLAN_EDVLP;
+ value |= GMAC_VLAN_ESVL;
+ value |= GMAC_VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + GMAC_VLAN_TAG);
@@ -799,6 +809,106 @@ static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + GMAC_CONFIG);
}
+static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool ipv6, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ /* For IPv6 not both SA/DA filters can be active */
+ if (ipv6) {
+ value |= GMAC_L3PEN0;
+ value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
+ value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ } else {
+ value &= ~GMAC_L3PEN0;
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
+ } else {
+ writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
+ }
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
+static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool udp, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+ if (udp) {
+ value |= GMAC_L4PEN0;
+ } else {
+ value &= ~GMAC_L4PEN0;
+ }
+
+ value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
+ value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
+ if (sa) {
+ value |= GMAC_L4SPM0;
+ if (inv)
+ value |= GMAC_L4SPIM0;
+ } else {
+ value |= GMAC_L4DPM0;
+ if (inv)
+ value |= GMAC_L4DPIM0;
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ value = match & GMAC_L4SP0;
+ } else {
+ value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
+ }
+
+ writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac,
@@ -828,11 +938,14 @@ const struct stmmac_ops dwmac4_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
.update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
const struct stmmac_ops dwmac410_ops = {
@@ -869,6 +982,8 @@ const struct stmmac_ops dwmac410_ops = {
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
const struct stmmac_ops dwmac510_ops = {
@@ -910,6 +1025,8 @@ const struct stmmac_ops dwmac510_ops = {
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 15eb1abba91d..3e14da69f378 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -83,9 +83,10 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes3 & RDES3_OWN))
return dma_own;
- /* Verify rx error by looking at the last segment. */
- if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
return discard_frame;
+ if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ return rx_not_ls;
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
if (unlikely(rdes3 & RDES3_GIANT_PACKET))
@@ -188,7 +189,7 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+ p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
@@ -431,8 +432,8 @@ static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
{
- p->des0 = cpu_to_le32(addr);
- p->des1 = 0;
+ p->des0 = cpu_to_le32(lower_32_bits(addr));
+ p->des1 = cpu_to_le32(upper_32_bits(addr));
}
static void dwmac4_clear(struct dma_desc *p)
@@ -492,6 +493,18 @@ static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
}
+static int dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & RDES2_HL;
+ return 0;
+}
+
+static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -519,6 +532,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_sarc = dwmac4_set_sarc,
.set_vlan_tag = dwmac4_set_vlan_tag,
.set_vlan = dwmac4_set_vlan,
+ .get_rx_header_len = dwmac4_get_rx_header_len,
+ .set_sec_addr = dwmac4_set_sec_addr,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 0d7b3bbcd5a7..6d92109dc9aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -109,6 +109,7 @@
#define RDES2_L4_FILTER_MATCH BIT(28)
#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
+#define RDES2_HL GENMASK(9, 0)
/* RDES3 (write back format) */
#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 68c157979b94..c15409030710 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -79,6 +79,10 @@ static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_rx_phy),
+ ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan));
+
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
}
@@ -97,6 +101,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_tx_phy),
+ ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan));
+
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
@@ -132,6 +140,9 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= DMA_SYS_BUS_AAL;
+ if (dma_cfg->eame)
+ value |= DMA_SYS_BUS_EAME;
+
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
@@ -241,19 +252,9 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
rfa = 0x01; /* Full-1.5K */
break;
- case 8192:
- rfd = 0x06; /* Full-4K */
- rfa = 0x0a; /* Full-6K */
- break;
-
- case 16384:
- rfd = 0x06; /* Full-4K */
- rfa = 0x12; /* Full-10K */
- break;
-
default:
- rfd = 0x06; /* Full-4K */
- rfa = 0x1e; /* Full-16K */
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
break;
}
@@ -353,9 +354,28 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature1 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
+
+ dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
+ switch (dma_cap->addr64) {
+ case 0:
+ dma_cap->addr64 = 32;
+ break;
+ case 1:
+ dma_cap->addr64 = 40;
+ break;
+ case 2:
+ dma_cap->addr64 = 48;
+ break;
+ default:
+ dma_cap->addr64 = 32;
+ break;
+ }
+
/* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
* shifting and store the sizes in bytes.
*/
@@ -431,6 +451,22 @@ static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
}
+static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
+
+ value &= ~GMAC_CONFIG_HDSMS;
+ value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + GMAC_EXT_CONFIG);
+
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (en)
+ value |= DMA_CONTROL_SPH;
+ else
+ value &= ~DMA_CONTROL_SPH;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -457,6 +493,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
};
const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -485,4 +522,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index b66da0237d2a..589931795847 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -65,6 +65,7 @@
#define DMA_SYS_BUS_MB BIT(14)
#define DMA_AXI_1KBBE BIT(13)
#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_EAME BIT(11)
#define DMA_AXI_BLEN256 BIT(7)
#define DMA_AXI_BLEN128 BIT(6)
#define DMA_AXI_BLEN64 BIT(5)
@@ -91,7 +92,9 @@
#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10)
#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18)
#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
@@ -107,6 +110,7 @@
#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
/* DMA Control X */
+#define DMA_CONTROL_SPH BIT(24)
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
/* DMA Tx Channel X Control register defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 775db776b3cc..23fecf68f781 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
// stmmac Support for 5.xx Ethernet QoS cores
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 99037386080a..3b6e559aa0b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
* stmmac XGMAC definitions.
@@ -360,7 +360,7 @@
#define XGMAC_TBUE BIT(2)
#define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
- XGMAC_RIE | XGMAC_TBUE | XGMAC_TIE)
+ XGMAC_RIE | XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 070bd7d1ae4c..082f5ee9e525 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -556,7 +556,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
}
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- bool is_double)
+ __le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
@@ -577,6 +577,21 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
}
writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index f70ca5300b82..22a7f0cc1b90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -27,7 +27,10 @@ static void dwxgmac2_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= XGMAC_AAL;
- writel(value | XGMAC_EAME, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ if (dma_cfg->eame)
+ value |= XGMAC_EAME;
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
}
static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
@@ -180,19 +183,9 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
rfa = 0x01; /* Full-1.5K */
break;
- case 8192:
- rfd = 0x06; /* Full-4K */
- rfa = 0x0a; /* Full-6K */
- break;
-
- case 16384:
- rfd = 0x06; /* Full-4K */
- rfa = 0x12; /* Full-10K */
- break;
-
default:
- rfd = 0x06; /* Full-4K */
- rfa = 0x1e; /* Full-16K */
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index ddb851d99618..aa5b917398fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
// stmmac HW Interface Callbacks
@@ -357,7 +357,7 @@ struct stmmac_ops {
struct stmmac_rss *cfg, u32 num_rxq);
/* VLAN */
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- bool is_double);
+ __le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f826365c979d..644cb5d1fd4f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -36,6 +36,7 @@
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
#include <linux/phylink.h>
+#include <linux/udp.h>
#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
@@ -867,10 +868,10 @@ static void stmmac_validate(struct phylink_config *config,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int stmmac_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void stmmac_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
- return -EOPNOTSUPP;
+ state->link = 0;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -964,7 +965,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.validate = stmmac_validate,
- .mac_link_state = stmmac_mac_link_state,
+ .mac_pcs_get_state = stmmac_mac_pcs_get_state,
.mac_config = stmmac_mac_config,
.mac_an_restart = stmmac_mac_an_restart,
.mac_link_down = stmmac_mac_link_down,
@@ -1502,10 +1503,8 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
rx_q->dma_erx, rx_q->dma_rx_phy);
kfree(rx_q->buf_pool);
- if (rx_q->page_pool) {
- page_pool_request_shutdown(rx_q->page_pool);
+ if (rx_q->page_pool)
page_pool_destroy(rx_q->page_pool);
- }
}
}
@@ -2604,9 +2603,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
- if (!ret)
- priv->rx_riwt = MIN_DMA_RIWT;
+ if (!priv->rx_riwt)
+ priv->rx_riwt = DEF_DMA_RIWT;
+
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
}
if (priv->hw->pcs)
@@ -2914,19 +2914,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
+ unsigned int first_entry, tx_packets;
+ int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- unsigned int first_entry;
- int tmp_pay_len = 0;
+ u8 proto_hdr_len, hdr;
+ bool has_vlan, set_ic;
u32 pay_len, mss;
- u8 proto_hdr_len;
dma_addr_t des;
- bool has_vlan;
int i;
tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
/* Compute header lengths */
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ hdr = sizeof(struct udphdr);
+ } else {
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr = tcp_hdrlen(skb);
+ }
/* Desc availability based on threshold should be enough safe */
if (unlikely(stmmac_tx_avail(priv, queue) <
@@ -2956,8 +2963,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (netif_msg_tx_queued(priv)) {
- pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
- __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+ pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, hdr, proto_hdr_len, pay_len, mss);
pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
skb->data_len);
}
@@ -3025,16 +3032,27 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
/* Manage tx mitigation */
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames)
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames)
+ set_ic = true;
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
desc = &tx_q->dma_tx[tx_q->cur_tx];
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
/* We've used all descriptors we need for this skb, however,
@@ -3071,7 +3089,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len,
pay_len,
1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -3125,27 +3143,30 @@ dma_map_err:
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ unsigned int first_entry, tx_packets, enh_desc;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ int gso = skb_shinfo(skb)->gso_type;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- unsigned int first_entry;
- unsigned int enh_desc;
+ bool has_vlan, set_ic;
+ int entry, first_tx;
dma_addr_t des;
- bool has_vlan;
- int entry;
tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ return stmmac_tso_xmit(skb, dev);
+ if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
return stmmac_tso_xmit(skb, dev);
}
@@ -3230,12 +3251,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* This approach takes care about the fragments: desc is the first
* element in case of no SG.
*/
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
+ tx_packets = (entry + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames)
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames)
+ set_ic = true;
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic;
else
@@ -3244,6 +3274,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
/* We've used all descriptors we need for this skb, however,
@@ -3430,7 +3462,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_count_frames += priv->rx_coal_frames;
if (rx_q->rx_count_frames > priv->rx_coal_frames)
rx_q->rx_count_frames = 0;
- use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
+
+ use_rx_wd = !priv->rx_coal_frames;
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
@@ -3443,6 +3479,55 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
}
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int ret, coe = priv->hw->rx_csum;
+ unsigned int plen = 0, hlen = 0;
+
+ /* Not first descriptor, buffer is always zero */
+ if (priv->sph && len)
+ return 0;
+
+ /* First descriptor, get split header length */
+ ret = stmmac_get_rx_header_len(priv, p, &hlen);
+ if (priv->sph && hlen) {
+ priv->xstats.rx_split_hdr_pkt_n++;
+ return hlen;
+ }
+
+ /* First descriptor, not last descriptor and not split header */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* First descriptor and last descriptor and not split header */
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
+}
+
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int coe = priv->hw->rx_csum;
+ unsigned int plen = 0;
+
+ /* Not split header, buffer is not available */
+ if (!priv->sph)
+ return 0;
+
+ /* Not last descriptor */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* Last descriptor */
+ return plen - len;
+}
+
/**
* stmmac_rx - manage the receive process
* @priv: driver private structure
@@ -3472,11 +3557,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- unsigned int hlen = 0, prev_len = 0;
+ unsigned int buf1_len = 0, buf2_len = 0;
enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- unsigned int sec_len;
int entry;
u32 hash;
@@ -3495,7 +3579,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
break;
read_again:
- sec_len = 0;
+ buf1_len = 0;
+ buf2_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3520,7 +3605,6 @@ read_again:
np = rx_q->dma_rx + next_entry;
prefetch(np);
- prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3537,69 +3621,61 @@ read_again:
goto read_again;
if (unlikely(error)) {
dev_kfree_skb(skb);
+ skb = NULL;
count++;
continue;
}
/* Buffer is good. Go on. */
- if (likely(status & rx_not_ls)) {
- len += priv->dma_buf_sz;
- } else {
- prev_len = len;
- len = stmmac_get_rx_frame_len(priv, p, coe);
-
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))
- len -= ETH_FCS_LEN;
+ prefetch(page_address(buf->page));
+ if (buf->sec_page)
+ prefetch(page_address(buf->sec_page));
+
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
+ len += buf2_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap)) {
+ if (buf2_len)
+ buf2_len -= ETH_FCS_LEN;
+ else
+ buf1_len -= ETH_FCS_LEN;
+
+ len -= ETH_FCS_LEN;
}
if (!skb) {
- int ret = stmmac_get_rx_header_len(priv, p, &hlen);
-
- if (priv->sph && !ret && (hlen > 0)) {
- sec_len = len;
- if (!(status & rx_not_ls))
- sec_len = sec_len - hlen;
- len = hlen;
-
- prefetch(page_address(buf->sec_page));
- priv->xstats.rx_split_hdr_pkt_n++;
- }
-
- skb = napi_alloc_skb(&ch->rx_napi, len);
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
priv->dev->stats.rx_dropped++;
count++;
- continue;
+ goto drain_data;
}
- dma_sync_single_for_cpu(priv->device, buf->addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- len);
- skb_put(skb, len);
+ buf1_len);
+ skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
- } else {
- unsigned int buf_len = len - prev_len;
-
- if (likely(status & rx_not_ls))
- buf_len = priv->dma_buf_sz;
-
+ } else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
- buf_len, DMA_FROM_DEVICE);
+ buf1_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->page, 0, buf_len,
+ buf->page, 0, buf1_len,
priv->dma_buf_sz);
/* Data payload appended into SKB */
@@ -3607,22 +3683,23 @@ read_again:
buf->page = NULL;
}
- if (sec_len > 0) {
+ if (buf2_len) {
dma_sync_single_for_cpu(priv->device, buf->sec_addr,
- sec_len, DMA_FROM_DEVICE);
+ buf2_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->sec_page, 0, sec_len,
+ buf->sec_page, 0, buf2_len,
priv->dma_buf_sz);
- len += sec_len;
-
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
+drain_data:
if (likely(status & rx_not_ls))
goto read_again;
+ if (!skb)
+ continue;
/* Got entire packet into SKB. Finish it. */
@@ -3640,13 +3717,14 @@ read_again:
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rx_napi, skb);
+ skb = NULL;
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += len;
count++;
}
- if (status & rx_not_ls) {
+ if (status & rx_not_ls || skb) {
rx_q->state_saved = true;
rx_q->state.skb = skb;
rx_q->state.error = error;
@@ -3994,11 +4072,13 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ int gso = skb_shinfo(skb)->gso_type;
+
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
/*
- * There is no way to determine the number of TSO
+ * There is no way to determine the number of TSO/USO
* capable Queues. Let's use always the Queue 0
- * because if TSO is supported then at least this
+ * because if TSO/USO is supported then at least this
* one will be capable.
*/
return 0;
@@ -4214,15 +4294,26 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- u16 vid;
+ __le16 pmatch = 0;
+ int count = 0;
+ u16 vid = 0;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
__le16 vid_le = cpu_to_le16(vid);
crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
hash |= (1 << crc);
+ count++;
}
- return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+ if (!priv->dma_cap.vlhash) {
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+ pmatch = cpu_to_le16(vid);
+ hash = 0;
+ }
+
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -4231,8 +4322,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -4251,8 +4340,6 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -4506,6 +4593,8 @@ int stmmac_dvr_probe(struct device *device,
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (priv->plat->has_gmac4)
+ ndev->hw_features |= NETIF_F_GSO_UDP_L4;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
@@ -4522,6 +4611,13 @@ int stmmac_dvr_probe(struct device *device,
if (!ret) {
dev_info(priv->device, "Using %d bits DMA width\n",
priv->dma_cap.addr64);
+
+ /*
+ * If more than 32 bits can be addressed, make sure to
+ * enable enhanced addressing mode.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ priv->plat->dma_cfg->eame = true;
} else {
ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 40c42637ad75..cfe5d8b73142 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -41,20 +41,32 @@
#define MII_XGMAC_BUSY BIT(22)
#define MII_XGMAC_MAX_C22ADDR 3
#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
+#define MII_XGMAC_PA_SHIFT 16
+#define MII_XGMAC_DA_SHIFT 21
+
+static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ u32 tmp;
+
+ /* Set port as Clause 45 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff);
+ *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT;
+ return 0;
+}
static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
int phyreg, u32 *hw_addr)
{
- unsigned int mii_data = priv->hw->mii.data;
u32 tmp;
/* HW does not support C22 addr >= 4 */
if (phyaddr > MII_XGMAC_MAX_C22ADDR)
return -ENODEV;
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
/* Set port as Clause 22 */
tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
@@ -62,7 +74,7 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
tmp |= BIT(phyaddr);
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
- *hw_addr = (phyaddr << 16) | (phyreg & 0x1f);
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f);
return 0;
}
@@ -75,17 +87,28 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
if (phyreg & MII_ADDR_C45) {
- return -EOPNOTSUPP;
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
return ret;
+
+ value |= MII_XGMAC_SADDR;
}
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- value |= MII_XGMAC_SADDR | MII_XGMAC_READ;
+ value |= MII_XGMAC_READ;
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -115,17 +138,28 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
if (phyreg & MII_ADDR_C45) {
- return -EOPNOTSUPP;
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
return ret;
+
+ value |= MII_XGMAC_SADDR;
}
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- value |= phydata | MII_XGMAC_SADDR;
+ value |= phydata;
value |= MII_XGMAC_WRITE;
/* Wait until any existing MII operation is complete */
@@ -363,6 +397,10 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_fail;
}
+ /* Looks like we need a dummy read for XGMAC only and C45 PHYs */
+ if (priv->plat->has_xgmac)
+ stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 170c3a052b14..bedaff0c13bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = NULL;
}
- plat->phy_interface = of_get_phy_mode(np);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ rc = of_get_phy_mode(np, &plat->phy_interface);
+ if (rc)
+ return ERR_PTR(rc);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index df638b18b72c..0989e2bb6ee3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
cfg = &priv->pps[rq->perout.index];
cfg->start.tv_sec = rq->perout.start.sec;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index ac3f658105c0..f3d8b9336b8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -877,16 +877,13 @@ out:
return 0;
}
-static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
-
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
@@ -952,16 +949,32 @@ cleanup:
return ret;
}
-static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_vlanfilt(priv);
+}
+
+static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_vlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
+static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
-
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
@@ -1028,6 +1041,25 @@ cleanup:
return ret;
}
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_dvlanfilt(priv);
+}
+
+static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_dvlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
#ifdef CONFIG_NET_CLS_ACT
static int stmmac_test_rxp(struct stmmac_priv *priv)
{
@@ -1702,119 +1734,127 @@ static const struct stmmac_test {
int (*fn)(struct stmmac_priv *priv);
} stmmac_selftests[] = {
{
- .name = "MAC Loopback ",
+ .name = "MAC Loopback ",
.lb = STMMAC_LOOPBACK_MAC,
.fn = stmmac_test_mac_loopback,
}, {
- .name = "PHY Loopback ",
+ .name = "PHY Loopback ",
.lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
.fn = stmmac_test_phy_loopback,
}, {
- .name = "MMC Counters ",
+ .name = "MMC Counters ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mmc,
}, {
- .name = "EEE ",
+ .name = "EEE ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_eee,
}, {
- .name = "Hash Filter MC ",
+ .name = "Hash Filter MC ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_hfilt,
}, {
- .name = "Perfect Filter UC ",
+ .name = "Perfect Filter UC ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_pfilt,
}, {
- .name = "MC Filter ",
+ .name = "MC Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mcfilt,
}, {
- .name = "UC Filter ",
+ .name = "UC Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_ucfilt,
}, {
- .name = "Flow Control ",
+ .name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
}, {
- .name = "RSS ",
+ .name = "RSS ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rss,
}, {
- .name = "VLAN Filtering ",
+ .name = "VLAN Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_vlanfilt,
}, {
- .name = "Double VLAN Filtering",
+ .name = "VLAN Filtering (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt_perfect,
+ }, {
+ .name = "Double VLAN Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_dvlanfilt,
}, {
- .name = "Flexible RX Parser ",
+ .name = "Double VLAN Filter (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt_perfect,
+ }, {
+ .name = "Flexible RX Parser ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rxp,
}, {
- .name = "SA Insertion (desc) ",
+ .name = "SA Insertion (desc) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_desc_sai,
}, {
- .name = "SA Replacement (desc)",
+ .name = "SA Replacement (desc) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_desc_sar,
}, {
- .name = "SA Insertion (reg) ",
+ .name = "SA Insertion (reg) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_reg_sai,
}, {
- .name = "SA Replacement (reg)",
+ .name = "SA Replacement (reg) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_reg_sar,
}, {
- .name = "VLAN TX Insertion ",
+ .name = "VLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_vlanoff,
}, {
- .name = "SVLAN TX Insertion ",
+ .name = "SVLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_svlanoff,
}, {
- .name = "L3 DA Filtering ",
+ .name = "L3 DA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_da,
}, {
- .name = "L3 SA Filtering ",
+ .name = "L3 SA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_sa,
}, {
- .name = "L4 DA TCP Filtering ",
+ .name = "L4 DA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_tcp,
}, {
- .name = "L4 SA TCP Filtering ",
+ .name = "L4 SA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_tcp,
}, {
- .name = "L4 DA UDP Filtering ",
+ .name = "L4 DA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_udp,
}, {
- .name = "L4 SA UDP Filtering ",
+ .name = "L4 SA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_udp,
}, {
- .name = "ARP Offload ",
+ .name = "ARP Offload ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_arpoffload,
}, {
- .name = "Jumbo Frame ",
+ .name = "Jumbo Frame ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_jumbo,
}, {
- .name = "Multichannel Jumbo ",
+ .name = "Multichannel Jumbo ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mjumbo,
}, {
- .name = "Split Header ",
+ .name = "Split Header ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_sph,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index f9a9a9d82233..7d972e0fd2b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -321,8 +321,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return -EINVAL;
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
- return -EOPNOTSUPP;
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 834afca3a019..9170572346b5 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -22,6 +22,7 @@ config TI_DAVINCI_EMAC
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
select TI_DAVINCI_MDIO
select PHYLIB
+ select GENERIC_ALLOCATOR
---help---
This driver supports TI's DaVinci Ethernet .
@@ -58,9 +59,24 @@ config TI_CPSW
To compile this driver as a module, choose M here: the module
will be called cpsw.
+config TI_CPSW_SWITCHDEV
+ tristate "TI CPSW Switch Support with switchdev"
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ select NET_SWITCHDEV
+ select TI_DAVINCI_MDIO
+ select MFD_SYSCON
+ select REGMAP
+ select NET_DEVLINK
+ imply PHY_TI_GMII_SEL
+ help
+ This driver supports TI's CPSW Ethernet Switch.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cpsw_new.
+
config TI_CPTS
bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST
+ depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST
depends on COMMON_CLK
depends on POSIX_TIMERS
---help---
@@ -72,7 +88,7 @@ config TI_CPTS
config TI_CPTS_MOD
tristate
depends on TI_CPTS
- default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+ default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
select NET_PTP_CLASSIFY
imply PTP_1588_CLOCK
default m
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index ed12e1e5df2f..d34df8e5cf94 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
+obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
+ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o cpsw_ale.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f298d714efd6..6ae4a72e6f43 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,7 +34,6 @@
#include <net/page_pool.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
-#include <linux/filter.h>
#include <linux/pinctrl/consumer.h>
#include <net/pkt_cls.h>
@@ -64,10 +63,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
-/* The buf includes headroom compatible with both skb and xdpf */
-#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
-#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
-
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
@@ -82,10 +77,16 @@ MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
(func)(slave++, ##arg); \
} while (0)
-#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave;
+}
-#define CPSW_XDP_CONSUMED 1
-#define CPSW_XDP_PASS 0
+static int cpsw_get_slave_port(u32 slave_num)
+{
+ return slave_num + 1;
+}
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid);
@@ -332,218 +333,6 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_del_mc_addr);
}
-void cpsw_intr_enable(struct cpsw_common *cpsw)
-{
- writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
- writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
-
- cpdma_ctlr_int_ctrl(cpsw->dma, true);
- return;
-}
-
-void cpsw_intr_disable(struct cpsw_common *cpsw)
-{
- writel_relaxed(0, &cpsw->wr_regs->tx_en);
- writel_relaxed(0, &cpsw->wr_regs->rx_en);
-
- cpdma_ctlr_int_ctrl(cpsw->dma, false);
- return;
-}
-
-static int cpsw_is_xdpf_handle(void *handle)
-{
- return (unsigned long)handle & BIT(0);
-}
-
-static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
-{
- return (void *)((unsigned long)xdpf | BIT(0));
-}
-
-static struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
-{
- return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
-}
-
-struct __aligned(sizeof(long)) cpsw_meta_xdp {
- struct net_device *ndev;
- int ch;
-};
-
-void cpsw_tx_handler(void *token, int len, int status)
-{
- struct cpsw_meta_xdp *xmeta;
- struct xdp_frame *xdpf;
- struct net_device *ndev;
- struct netdev_queue *txq;
- struct sk_buff *skb;
- int ch;
-
- if (cpsw_is_xdpf_handle(token)) {
- xdpf = cpsw_handle_to_xdpf(token);
- xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
- ndev = xmeta->ndev;
- ch = xmeta->ch;
- xdp_return_frame(xdpf);
- } else {
- skb = token;
- ndev = skb->dev;
- ch = skb_get_queue_mapping(skb);
- cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
- dev_kfree_skb_any(skb);
- }
-
- /* Check whether the queue is stopped due to stalled tx dma, if the
- * queue is stopped then start the queue as we have free desc for tx
- */
- txq = netdev_get_tx_queue(ndev, ch);
- if (unlikely(netif_tx_queue_stopped(txq)))
- netif_tx_wake_queue(txq);
-
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += len;
-}
-
-static void cpsw_rx_vlan_encap(struct sk_buff *skb)
-{
- struct cpsw_priv *priv = netdev_priv(skb->dev);
- struct cpsw_common *cpsw = priv->cpsw;
- u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
- u16 vtag, vid, prio, pkt_type;
-
- /* Remove VLAN header encapsulation word */
- skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
-
- pkt_type = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
- CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
- /* Ignore unknown & Priority-tagged packets*/
- if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
- pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
- return;
-
- vid = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
- VLAN_VID_MASK;
- /* Ignore vid 0 and pass packet as is */
- if (!vid)
- return;
- /* Ignore default vlans in dual mac mode */
- if (cpsw->data.dual_emac &&
- vid == cpsw->slaves[priv->emac_port].port_vlan)
- return;
-
- prio = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
- CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
-
- vtag = (prio << VLAN_PRIO_SHIFT) | vid;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
-
- /* strip vlan tag for VLAN-tagged packet */
- if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
- skb_pull(skb, VLAN_HLEN);
- }
-}
-
-static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
- struct page *page)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_meta_xdp *xmeta;
- struct cpdma_chan *txch;
- dma_addr_t dma;
- int ret, port;
-
- xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
- xmeta->ndev = priv->ndev;
- xmeta->ch = 0;
- txch = cpsw->txv[0].ch;
-
- port = priv->emac_port + cpsw->data.dual_emac;
- if (page) {
- dma = page_pool_get_dma_addr(page);
- dma += xdpf->headroom + sizeof(struct xdp_frame);
- ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
- dma, xdpf->len, port);
- } else {
- if (sizeof(*xmeta) > xdpf->headroom) {
- xdp_return_frame_rx_napi(xdpf);
- return -EINVAL;
- }
-
- ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
- xdpf->data, xdpf->len, port);
- }
-
- if (ret) {
- priv->ndev->stats.tx_dropped++;
- xdp_return_frame_rx_napi(xdpf);
- }
-
- return ret;
-}
-
-static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct net_device *ndev = priv->ndev;
- int ret = CPSW_XDP_CONSUMED;
- struct xdp_frame *xdpf;
- struct bpf_prog *prog;
- u32 act;
-
- rcu_read_lock();
-
- prog = READ_ONCE(priv->xdp_prog);
- if (!prog) {
- ret = CPSW_XDP_PASS;
- goto out;
- }
-
- act = bpf_prog_run_xdp(prog, xdp);
- switch (act) {
- case XDP_PASS:
- ret = CPSW_XDP_PASS;
- break;
- case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
- if (unlikely(!xdpf))
- goto drop;
-
- cpsw_xdp_tx_frame(priv, xdpf, page);
- break;
- case XDP_REDIRECT:
- if (xdp_do_redirect(ndev, xdp, prog))
- goto drop;
-
- /* Have to flush here, per packet, instead of doing it in bulk
- * at the end of the napi handler. The RX devices on this
- * particular hardware is sharing a common queue, so the
- * incoming device might change per packet.
- */
- xdp_do_flush_map();
- break;
- default:
- bpf_warn_invalid_xdp_action(act);
- /* fall through */
- case XDP_ABORTED:
- trace_xdp_exception(ndev, prog, act);
- /* fall through -- handle aborts by dropping packet */
- case XDP_DROP:
- goto drop;
- }
-out:
- rcu_read_unlock();
- return ret;
-drop:
- rcu_read_unlock();
- page_pool_recycle_direct(cpsw->page_pool[ch], page);
- return ret;
-}
-
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
len += CPSW_HEADROOM;
@@ -552,123 +341,6 @@ static unsigned int cpsw_rxbuf_total_len(unsigned int len)
return SKB_DATA_ALIGN(len);
}
-static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
- int size)
-{
- struct page_pool_params pp_params;
- struct page_pool *pool;
-
- pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = size;
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dma_dir = DMA_BIDIRECTIONAL;
- pp_params.dev = cpsw->dev;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- dev_err(cpsw->dev, "cannot create rx page pool\n");
-
- return pool;
-}
-
-static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct xdp_rxq_info *rxq;
- struct page_pool *pool;
- int ret;
-
- pool = cpsw->page_pool[ch];
- rxq = &priv->xdp_rxq[ch];
-
- ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
- if (ret)
- return ret;
-
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- xdp_rxq_info_unreg(rxq);
-
- return ret;
-}
-
-static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
-{
- struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
-
- if (!xdp_rxq_info_is_reg(rxq))
- return;
-
- xdp_rxq_info_unreg(rxq);
-}
-
-static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
-{
- struct page_pool *pool;
- int ret = 0, pool_size;
-
- pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
- pool = cpsw_create_page_pool(cpsw, pool_size);
- if (IS_ERR(pool))
- ret = PTR_ERR(pool);
- else
- cpsw->page_pool[ch] = pool;
-
- return ret;
-}
-
-void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
-{
- struct net_device *ndev;
- int i, ch;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- for (i = 0; i < cpsw->data.slaves; i++) {
- ndev = cpsw->slaves[i].ndev;
- if (!ndev)
- continue;
-
- cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
- }
-
- page_pool_destroy(cpsw->page_pool[ch]);
- cpsw->page_pool[ch] = NULL;
- }
-}
-
-int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
-{
- struct net_device *ndev;
- int i, ch, ret;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- ret = cpsw_create_rx_pool(cpsw, ch);
- if (ret)
- goto err_cleanup;
-
- /* using same page pool is allowed as no running rx handlers
- * simultaneously for both ndevs
- */
- for (i = 0; i < cpsw->data.slaves; i++) {
- ndev = cpsw->slaves[i].ndev;
- if (!ndev)
- continue;
-
- ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
- if (ret)
- goto err_cleanup;
- }
- }
-
- return 0;
-
-err_cleanup:
- cpsw_destroy_xdp_rxqs(cpsw);
-
- return ret;
-}
-
static void cpsw_rx_handler(void *token, int len, int status)
{
struct page *new_page, *page = token;
@@ -735,7 +407,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
- ret = cpsw_run_xdp(priv, ch, &xdp, page);
+ port = priv->emac_port + cpsw->data.dual_emac;
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
if (ret != CPSW_XDP_PASS)
goto requeue;
@@ -785,274 +458,6 @@ requeue:
}
}
-void cpsw_split_res(struct cpsw_common *cpsw)
-{
- u32 consumed_rate = 0, bigest_rate = 0;
- struct cpsw_vector *txv = cpsw->txv;
- int i, ch_weight, rlim_ch_num = 0;
- int budget, bigest_rate_ch = 0;
- u32 ch_rate, max_rate;
- int ch_budget = 0;
-
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (!ch_rate)
- continue;
-
- rlim_ch_num++;
- consumed_rate += ch_rate;
- }
-
- if (cpsw->tx_ch_num == rlim_ch_num) {
- max_rate = consumed_rate;
- } else if (!rlim_ch_num) {
- ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
- bigest_rate = 0;
- max_rate = consumed_rate;
- } else {
- max_rate = cpsw->speed * 1000;
-
- /* if max_rate is less then expected due to reduced link speed,
- * split proportionally according next potential max speed
- */
- if (max_rate < consumed_rate)
- max_rate *= 10;
-
- if (max_rate < consumed_rate)
- max_rate *= 10;
-
- ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
- ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
- (cpsw->tx_ch_num - rlim_ch_num);
- bigest_rate = (max_rate - consumed_rate) /
- (cpsw->tx_ch_num - rlim_ch_num);
- }
-
- /* split tx weight/budget */
- budget = CPSW_POLL_WEIGHT;
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (ch_rate) {
- txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
- if (!txv[i].budget)
- txv[i].budget++;
- if (ch_rate > bigest_rate) {
- bigest_rate_ch = i;
- bigest_rate = ch_rate;
- }
-
- ch_weight = (ch_rate * 100) / max_rate;
- if (!ch_weight)
- ch_weight++;
- cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
- } else {
- txv[i].budget = ch_budget;
- if (!bigest_rate_ch)
- bigest_rate_ch = i;
- cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
- }
-
- budget -= txv[i].budget;
- }
-
- if (budget)
- txv[bigest_rate_ch].budget += budget;
-
- /* split rx budget */
- budget = CPSW_POLL_WEIGHT;
- ch_budget = budget / cpsw->rx_ch_num;
- for (i = 0; i < cpsw->rx_ch_num; i++) {
- cpsw->rxv[i].budget = ch_budget;
- budget -= ch_budget;
- }
-
- if (budget)
- cpsw->rxv[0].budget += budget;
-}
-
-static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
-{
- struct cpsw_common *cpsw = dev_id;
-
- writel(0, &cpsw->wr_regs->tx_en);
- cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
-
- if (cpsw->quirk_irq) {
- disable_irq_nosync(cpsw->irqs_table[1]);
- cpsw->tx_irq_disabled = true;
- }
-
- napi_schedule(&cpsw->napi_tx);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
-{
- struct cpsw_common *cpsw = dev_id;
-
- cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
- writel(0, &cpsw->wr_regs->rx_en);
-
- if (cpsw->quirk_irq) {
- disable_irq_nosync(cpsw->irqs_table[0]);
- cpsw->rx_irq_disabled = true;
- }
-
- napi_schedule(&cpsw->napi_rx);
- return IRQ_HANDLED;
-}
-
-static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
-{
- u32 ch_map;
- int num_tx, cur_budget, ch;
- struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
- struct cpsw_vector *txv;
-
- /* process every unprocessed channel */
- ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
- if (!(ch_map & 0x80))
- continue;
-
- txv = &cpsw->txv[ch];
- if (unlikely(txv->budget > budget - num_tx))
- cur_budget = budget - num_tx;
- else
- cur_budget = txv->budget;
-
- num_tx += cpdma_chan_process(txv->ch, cur_budget);
- if (num_tx >= budget)
- break;
- }
-
- if (num_tx < budget) {
- napi_complete(napi_tx);
- writel(0xff, &cpsw->wr_regs->tx_en);
- }
-
- return num_tx;
-}
-
-static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
-{
- struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
- int num_tx;
-
- num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
- if (num_tx < budget) {
- napi_complete(napi_tx);
- writel(0xff, &cpsw->wr_regs->tx_en);
- if (cpsw->tx_irq_disabled) {
- cpsw->tx_irq_disabled = false;
- enable_irq(cpsw->irqs_table[1]);
- }
- }
-
- return num_tx;
-}
-
-static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
-{
- u32 ch_map;
- int num_rx, cur_budget, ch;
- struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
- struct cpsw_vector *rxv;
-
- /* process every unprocessed channel */
- ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
- for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
- if (!(ch_map & 0x01))
- continue;
-
- rxv = &cpsw->rxv[ch];
- if (unlikely(rxv->budget > budget - num_rx))
- cur_budget = budget - num_rx;
- else
- cur_budget = rxv->budget;
-
- num_rx += cpdma_chan_process(rxv->ch, cur_budget);
- if (num_rx >= budget)
- break;
- }
-
- if (num_rx < budget) {
- napi_complete_done(napi_rx, num_rx);
- writel(0xff, &cpsw->wr_regs->rx_en);
- }
-
- return num_rx;
-}
-
-static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
-{
- struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
- int num_rx;
-
- num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
- if (num_rx < budget) {
- napi_complete_done(napi_rx, num_rx);
- writel(0xff, &cpsw->wr_regs->rx_en);
- if (cpsw->rx_irq_disabled) {
- cpsw->rx_irq_disabled = false;
- enable_irq(cpsw->irqs_table[0]);
- }
- }
-
- return num_rx;
-}
-
-static inline void soft_reset(const char *module, void __iomem *reg)
-{
- unsigned long timeout = jiffies + HZ;
-
- writel_relaxed(1, reg);
- do {
- cpu_relax();
- } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
-
- WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
-}
-
-static void cpsw_set_slave_mac(struct cpsw_slave *slave,
- struct cpsw_priv *priv)
-{
- slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
- slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
-}
-
-static bool cpsw_shp_is_off(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 shift, mask, val;
-
- val = readl_relaxed(&cpsw->regs->ptype);
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
- mask = 7 << shift;
- val = val & mask;
-
- return !val;
-}
-
-static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 shift, mask, val;
-
- val = readl_relaxed(&cpsw->regs->ptype);
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
- mask = (1 << --fifo) << shift;
- val = on ? val | mask : val & ~mask;
-
- writel_relaxed(val, &cpsw->regs->ptype);
-}
-
static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct cpsw_priv *priv, bool *link)
{
@@ -1118,44 +523,6 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
slave->mac_control = mac_control;
}
-static int cpsw_get_common_speed(struct cpsw_common *cpsw)
-{
- int i, speed;
-
- for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
- if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
- speed += cpsw->slaves[i].phy->speed;
-
- return speed;
-}
-
-static int cpsw_need_resplit(struct cpsw_common *cpsw)
-{
- int i, rlim_ch_num;
- int speed, ch_rate;
-
- /* re-split resources only in case speed was changed */
- speed = cpsw_get_common_speed(cpsw);
- if (speed == cpsw->speed || !speed)
- return 0;
-
- cpsw->speed = speed;
-
- for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
- if (!ch_rate)
- break;
-
- rlim_ch_num++;
- }
-
- /* cases not dependent on speed */
- if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
- return 0;
-
- return 1;
-}
-
static void cpsw_adjust_link(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1348,51 +715,6 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
}
}
-int cpsw_fill_rx_channels(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_meta_xdp *xmeta;
- struct page_pool *pool;
- struct page *page;
- int ch_buf_num;
- int ch, i, ret;
- dma_addr_t dma;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- pool = cpsw->page_pool[ch];
- ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
- for (i = 0; i < ch_buf_num; i++) {
- page = page_pool_dev_alloc_pages(pool);
- if (!page) {
- cpsw_err(priv, ifup, "allocate rx page err\n");
- return -ENOMEM;
- }
-
- xmeta = page_address(page) + CPSW_XMETA_OFFSET;
- xmeta->ndev = priv->ndev;
- xmeta->ch = ch;
-
- dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
- ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
- page, dma,
- cpsw->rx_packet_max,
- 0);
- if (ret < 0) {
- cpsw_err(priv, ifup,
- "cannot submit page to channel %d rx, error %d\n",
- ch, ret);
- page_pool_recycle_direct(pool, page);
- return ret;
- }
- }
-
- cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
- ch, ch_buf_num);
- }
-
- return 0;
-}
-
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
{
u32 slave_port;
@@ -1410,221 +732,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
cpsw_sl_ctl_reset(slave->mac_sl);
}
-static int cpsw_tc_to_fifo(int tc, int num_tc)
-{
- if (tc == num_tc - 1)
- return 0;
-
- return CPSW_FIFO_SHAPERS_NUM - tc;
-}
-
-static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- u32 val = 0, send_pct, shift;
- struct cpsw_slave *slave;
- int pct = 0, i;
-
- if (bw > priv->shp_cfg_speed * 1000)
- goto err;
-
- /* shaping has to stay enabled for highest fifos linearly
- * and fifo bw no more then interface can allow
- */
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- send_pct = slave_read(slave, SEND_PERCENT);
- for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
- if (!bw) {
- if (i >= fifo || !priv->fifo_bw[i])
- continue;
-
- dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
- continue;
- }
-
- if (!priv->fifo_bw[i] && i > fifo) {
- dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
- return -EINVAL;
- }
-
- shift = (i - 1) * 8;
- if (i == fifo) {
- send_pct &= ~(CPSW_PCT_MASK << shift);
- val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
- if (!val)
- val = 1;
-
- send_pct |= val << shift;
- pct += val;
- continue;
- }
-
- if (priv->fifo_bw[i])
- pct += (send_pct >> shift) & CPSW_PCT_MASK;
- }
-
- if (pct >= 100)
- goto err;
-
- slave_write(slave, send_pct, SEND_PERCENT);
- priv->fifo_bw[fifo] = bw;
-
- dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
- DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
-
- return 0;
-err:
- dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
- return -EINVAL;
-}
-
-static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 tx_in_ctl_rg, val;
- int ret;
-
- ret = cpsw_set_fifo_bw(priv, fifo, bw);
- if (ret)
- return ret;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
-
- if (!bw)
- cpsw_fifo_shp_on(priv, fifo, bw);
-
- val = slave_read(slave, tx_in_ctl_rg);
- if (cpsw_shp_is_off(priv)) {
- /* disable FIFOs rate limited queues */
- val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
-
- /* set type of FIFO queues to normal priority mode */
- val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
-
- /* set type of FIFO queues to be rate limited */
- if (bw)
- val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
- else
- priv->shp_cfg_speed = 0;
- }
-
- /* toggle a FIFO rate limited queue */
- if (bw)
- val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
- else
- val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
- slave_write(slave, val, tx_in_ctl_rg);
-
- /* FIFO transmit shape enable */
- cpsw_fifo_shp_on(priv, fifo, bw);
- return 0;
-}
-
-/* Defaults:
- * class A - prio 3
- * class B - prio 2
- * shaping for class A should be set first
- */
-static int cpsw_set_cbs(struct net_device *ndev,
- struct tc_cbs_qopt_offload *qopt)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int prev_speed = 0;
- int tc, ret, fifo;
- u32 bw = 0;
-
- tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
-
- /* enable channels in backward order, as highest FIFOs must be rate
- * limited first and for compliance with CPDMA rate limited channels
- * that also used in bacward order. FIFO0 cannot be rate limited.
- */
- fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
- if (!fifo) {
- dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
- return -EINVAL;
- }
-
- /* do nothing, it's disabled anyway */
- if (!qopt->enable && !priv->fifo_bw[fifo])
- return 0;
-
- /* shapers can be set if link speed is known */
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- if (slave->phy && slave->phy->link) {
- if (priv->shp_cfg_speed &&
- priv->shp_cfg_speed != slave->phy->speed)
- prev_speed = priv->shp_cfg_speed;
-
- priv->shp_cfg_speed = slave->phy->speed;
- }
-
- if (!priv->shp_cfg_speed) {
- dev_err(priv->dev, "Link speed is not known");
- return -1;
- }
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- bw = qopt->enable ? qopt->idleslope : 0;
- ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
- if (ret) {
- priv->shp_cfg_speed = prev_speed;
- prev_speed = 0;
- }
-
- if (bw && prev_speed)
- dev_warn(priv->dev,
- "Speed was changed, CBS shaper speeds are changed!");
-
- pm_runtime_put_sync(cpsw->dev);
- return ret;
-}
-
-static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
-{
- int fifo, bw;
-
- for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
- bw = priv->fifo_bw[fifo];
- if (!bw)
- continue;
-
- cpsw_set_fifo_rlimit(priv, fifo, bw);
- }
-}
-
-static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- u32 tx_prio_map = 0;
- int i, tc, fifo;
- u32 tx_prio_rg;
-
- if (!priv->mqprio_hw)
- return;
-
- for (i = 0; i < 8; i++) {
- tc = netdev_get_prio_tc_map(priv->ndev, i);
- fifo = CPSW_FIFO_SHAPERS_NUM - tc;
- tx_prio_map |= fifo << (4 * i);
- }
-
- tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
-
- slave_write(slave, tx_prio_map, tx_prio_rg);
-}
-
static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
{
struct cpsw_priv *priv = arg;
@@ -1853,207 +960,6 @@ fail:
return NETDEV_TX_BUSY;
}
-#if IS_ENABLED(CONFIG_TI_CPTS)
-
-static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
- u32 ts_en, seq_id;
-
- if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
- slave_write(slave, 0, CPSW1_TS_CTL);
- return;
- }
-
- seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
- ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
-
- if (priv->tx_ts_enabled)
- ts_en |= CPSW_V1_TS_TX_EN;
-
- if (priv->rx_ts_enabled)
- ts_en |= CPSW_V1_TS_RX_EN;
-
- slave_write(slave, ts_en, CPSW1_TS_CTL);
- slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
-}
-
-static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
-{
- struct cpsw_slave *slave;
- struct cpsw_common *cpsw = priv->cpsw;
- u32 ctrl, mtype;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
-
- ctrl = slave_read(slave, CPSW2_CONTROL);
- switch (cpsw->version) {
- case CPSW_VERSION_2:
- ctrl &= ~CTRL_V2_ALL_TS_MASK;
-
- if (priv->tx_ts_enabled)
- ctrl |= CTRL_V2_TX_TS_BITS;
-
- if (priv->rx_ts_enabled)
- ctrl |= CTRL_V2_RX_TS_BITS;
- break;
- case CPSW_VERSION_3:
- default:
- ctrl &= ~CTRL_V3_ALL_TS_MASK;
-
- if (priv->tx_ts_enabled)
- ctrl |= CTRL_V3_TX_TS_BITS;
-
- if (priv->rx_ts_enabled)
- ctrl |= CTRL_V3_RX_TS_BITS;
- break;
- }
-
- mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
-
- slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
- slave_write(slave, ctrl, CPSW2_CONTROL);
- writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
- writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
-}
-
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
- struct cpsw_common *cpsw = priv->cpsw;
-
- if (cpsw->version != CPSW_VERSION_1 &&
- cpsw->version != CPSW_VERSION_2 &&
- cpsw->version != CPSW_VERSION_3)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
- return -ERANGE;
-
- switch (cfg.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- priv->rx_ts_enabled = 0;
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_NTP_ALL:
- return -ERANGE;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- break;
- default:
- return -ERANGE;
- }
-
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
-
- switch (cpsw->version) {
- case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(priv);
- break;
- case CPSW_VERSION_2:
- case CPSW_VERSION_3:
- cpsw_hwtstamp_v2(priv);
- break;
- default:
- WARN_ON(1);
- }
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
-
- if (cpsw->version != CPSW_VERSION_1 &&
- cpsw->version != CPSW_VERSION_2 &&
- cpsw->version != CPSW_VERSION_3)
- return -EOPNOTSUPP;
-
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
-
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
-#endif /*CONFIG_TI_CPTS*/
-
-static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
-
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
- return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
-}
-
-static void cpsw_ndo_tx_timeout(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ch;
-
- cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
- ndev->stats.tx_errors++;
- cpsw_intr_disable(cpsw);
- for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
- cpdma_chan_stop(cpsw->txv[ch].ch);
- cpdma_chan_start(cpsw->txv[ch].ch);
- }
-
- cpsw_intr_enable(cpsw);
- netif_trans_update(ndev);
- netif_tx_wake_all_queues(ndev);
-}
-
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -2215,168 +1121,13 @@ err:
return ret;
}
-static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 min_rate;
- u32 ch_rate;
- int i, ret;
-
- ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
- if (ch_rate == rate)
- return 0;
-
- ch_rate = rate * 1000;
- min_rate = cpdma_chan_get_min_rate(cpsw->dma);
- if ((ch_rate < min_rate && ch_rate)) {
- dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
- min_rate);
- return -EINVAL;
- }
-
- if (rate > cpsw->speed) {
- dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
- return -EINVAL;
- }
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
- pm_runtime_put(cpsw->dev);
-
- if (ret)
- return ret;
-
- /* update rates for slaves tx queues */
- for (i = 0; i < cpsw->data.slaves; i++) {
- slave = &cpsw->slaves[i];
- if (!slave->ndev)
- continue;
-
- netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
- }
-
- cpsw_split_res(cpsw);
- return ret;
-}
-
-static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
-{
- struct tc_mqprio_qopt_offload *mqprio = type_data;
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int fifo, num_tc, count, offset;
- struct cpsw_slave *slave;
- u32 tx_prio_map = 0;
- int i, tc, ret;
-
- num_tc = mqprio->qopt.num_tc;
- if (num_tc > CPSW_TC_NUM)
- return -EINVAL;
-
- if (mqprio->mode != TC_MQPRIO_MODE_DCB)
- return -EINVAL;
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- if (num_tc) {
- for (i = 0; i < 8; i++) {
- tc = mqprio->qopt.prio_tc_map[i];
- fifo = cpsw_tc_to_fifo(tc, num_tc);
- tx_prio_map |= fifo << (4 * i);
- }
-
- netdev_set_num_tc(ndev, num_tc);
- for (i = 0; i < num_tc; i++) {
- count = mqprio->qopt.count[i];
- offset = mqprio->qopt.offset[i];
- netdev_set_tc_queue(ndev, i, count, offset);
- }
- }
-
- if (!mqprio->qopt.hw) {
- /* restore default configuration */
- netdev_reset_tc(ndev);
- tx_prio_map = TX_PRIORITY_MAPPING;
- }
-
- priv->mqprio_hw = mqprio->qopt.hw;
-
- offset = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- slave_write(slave, tx_prio_map, offset);
-
- pm_runtime_put_sync(cpsw->dev);
-
- return 0;
-}
-
-static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_CBS:
- return cpsw_set_cbs(ndev, type_data);
-
- case TC_SETUP_QDISC_MQPRIO:
- return cpsw_set_mqprio(ndev, type_data);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
-{
- struct bpf_prog *prog = bpf->prog;
-
- if (!priv->xdpi.prog && !prog)
- return 0;
-
- if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
- return -EBUSY;
-
- WRITE_ONCE(priv->xdp_prog, prog);
-
- xdp_attachment_setup(&priv->xdpi, bpf);
-
- return 0;
-}
-
-static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
-
- switch (bpf->command) {
- case XDP_SETUP_PROG:
- return cpsw_xdp_prog_setup(priv, bpf);
-
- case XDP_QUERY_PROG:
- return xdp_attachment_query(&priv->xdpi, bpf);
-
- default:
- return -EINVAL;
- }
-}
-
static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
struct xdp_frame *xdpf;
- int i, drops = 0;
+ int i, drops = 0, port;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -2389,7 +1140,8 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
continue;
}
- if (cpsw_xdp_tx_frame(priv, xdpf, NULL))
+ port = priv->emac_port + cpsw->data.dual_emac;
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
drops++;
}
@@ -2619,11 +1371,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
i);
goto no_phy_slave;
}
- slave_data->phy_if = of_get_phy_mode(slave_node);
- if (slave_data->phy_if < 0) {
+ ret = of_get_phy_mode(slave_node, &slave_data->phy_if);
+ if (ret) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i);
- ret = slave_data->phy_if;
goto err_node_put;
}
@@ -2776,6 +1527,8 @@ static int cpsw_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, cpsw);
+ cpsw_slave_index = cpsw_slave_index_priv;
+
cpsw->dev = dev;
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 84025dcc78d5..929f3d3354e3 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -5,6 +5,8 @@
* Copyright (C) 2012 Texas Instruments
*
*/
+#include <linux/bitmap.h>
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -382,6 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int mcast_members;
int idx;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
@@ -390,11 +393,15 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask)
- cpsw_ale_set_port_mask(ale_entry, port_mask,
+ if (port_mask) {
+ mcast_members = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
+ mcast_members &= ~port_mask;
+ cpsw_ale_set_port_mask(ale_entry, mcast_members,
ale->port_mask_bits);
- else
+ } else {
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
cpsw_ale_write(ale, idx, ale_entry);
return 0;
@@ -415,7 +422,18 @@ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
}
-int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int untag_mask)
+{
+ cpsw_ale_set_vlan_untag_force(ale_entry,
+ untag_mask, ale->vlan_field_bits);
+ if (untag_mask & ALE_PORT_HOST)
+ bitmap_set(ale->p0_untag_vid_mask, vid, 1);
+ else
+ bitmap_clear(ale->p0_untag_vid_mask, vid, 1);
+}
+
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
int reg_mcast, int unreg_mcast)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -427,8 +445,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
cpsw_ale_set_vlan_id(ale_entry, vid);
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
- cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits);
if (!ale->params.nu_switch_ale) {
cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
ale->vlan_field_bits);
@@ -437,7 +455,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
} else {
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
- cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits);
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
+ ale->vlan_field_bits);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@@ -450,6 +469,41 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
return 0;
}
+static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int port_mask)
+{
+ int reg_mcast, unreg_mcast;
+ int members, untag;
+
+ members = cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+ members &= ~port_mask;
+
+ untag = cpsw_ale_get_vlan_untag_force(ale_entry,
+ ale->vlan_field_bits);
+ reg_mcast = cpsw_ale_get_vlan_reg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ untag &= members;
+ reg_mcast &= members;
+ unreg_mcast &= members;
+
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
+
+ if (!ale->params.nu_switch_ale) {
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
+ ale->vlan_field_bits);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+ ale->vlan_field_bits);
+ } else {
+ cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast,
+ unreg_mcast);
+ }
+ cpsw_ale_set_vlan_member_list(ale_entry, members,
+ ale->vlan_field_bits);
+}
+
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -461,16 +515,83 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask)
- cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
- ale->vlan_field_bits);
- else
+ if (port_mask) {
+ cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask);
+ } else {
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
cpsw_ale_write(ale, idx, ale_entry);
+
return 0;
}
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mask, int unreg_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int reg_mcast_members, unreg_mcast_members;
+ int vlan_members, untag_members;
+ int idx, ret = 0;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ vlan_members = cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+ reg_mcast_members = cpsw_ale_get_vlan_reg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ unreg_mcast_members =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ untag_members = cpsw_ale_get_vlan_untag_force(ale_entry,
+ ale->vlan_field_bits);
+
+ vlan_members |= port_mask;
+ untag_members = (untag_members & ~port_mask) | untag_mask;
+ reg_mcast_members = (reg_mcast_members & ~port_mask) | reg_mask;
+ unreg_mcast_members = (unreg_mcast_members & ~port_mask) | unreg_mask;
+
+ ret = cpsw_ale_add_vlan(ale, vid, vlan_members, untag_members,
+ reg_mcast_members, unreg_mcast_members);
+ if (ret) {
+ dev_err(ale->params.dev, "Unable to add vlan\n");
+ return ret;
+ }
+ dev_dbg(ale->params.dev, "port mask 0x%x untag 0x%x\n", vlan_members,
+ untag_mask);
+
+ return ret;
+}
+
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int unreg_members = 0;
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+
+ unreg_members =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ if (add)
+ unreg_members |= unreg_mcast_mask;
+ else
+ unreg_members &= ~unreg_mcast_mask;
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_members,
+ ale->vlan_field_bits);
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+}
+
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
{
u32 ale_entry[ALE_ENTRY_WORDS];
@@ -779,6 +900,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
void cpsw_ale_stop(struct cpsw_ale *ale)
{
del_timer_sync(&ale->timer);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
@@ -791,6 +913,13 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
if (!ale)
return NULL;
+ ale->p0_untag_vid_mask =
+ devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!ale->p0_untag_vid_mask)
+ return ERR_PTR(-ENOMEM);
+
ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
@@ -862,6 +991,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
}
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
return ale;
}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 370df254eb12..70d0955c2652 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -35,6 +35,7 @@ struct cpsw_ale {
u32 port_mask_bits;
u32 port_num_bits;
u32 vlan_field_bits;
+ unsigned long *p0_untag_vid_mask;
};
enum cpsw_ale_control {
@@ -115,4 +116,14 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
+{
+ return test_bit(vid, ale->p0_untag_vid_mask);
+}
+
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mcast, int unreg_mcast);
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add);
+
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
new file mode 100644
index 000000000000..71215db7934b
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -0,0 +1,2048 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
+
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+#include <net/devlink.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "cpsw_switchdev.h"
+#include "cpts.h"
+#include "davinci_cpdma.h"
+
+#include <net/pkt_sched.h>
+
+static int debug_level;
+static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
+static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+
+struct cpsw_devlink {
+ struct cpsw_common *cpsw;
+};
+
+enum cpsw_devlink_param_id {
+ CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ CPSW_DL_PARAM_SWITCH_MODE,
+ CPSW_DL_PARAM_ALE_BYPASS,
+};
+
+/* struct cpsw_common is not needed, kept here for compatibility
+ * reasons witrh the old driver
+ */
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ if (priv->emac_port == HOST_PORT_NUM)
+ return -1;
+
+ return priv->emac_port - 1;
+}
+
+static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
+{
+ return !cpsw->data.dual_emac;
+}
+
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ bool enable_uni = false;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw))
+ return;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev &&
+ (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
+ enable_uni = true;
+
+ if (!enable && enable_uni) {
+ enable = enable_uni;
+ dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable unknown unicast, reg/unreg mcast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 1);
+
+ dev_dbg(cpsw->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable unknown unicast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "promiscuity disabled\n");
+ }
+}
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret, slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (vid < 0)
+ vid = cpsw->slaves[slave_no].port_vlan;
+
+ mask = ALE_PORT_HOST;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ cpsw_set_promiscious(ndev, true);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
+ return;
+ }
+
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(cpsw->ale,
+ ndev->flags & IFF_ALLMULTI, priv->emac_port);
+
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
+}
+
+static unsigned int cpsw_rxbuf_total_len(unsigned int len)
+{
+ len += CPSW_HEADROOM;
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return SKB_DATA_ALIGN(len);
+}
+
+static void cpsw_rx_handler(void *token, int len, int status)
+{
+ struct page *new_page, *page = token;
+ void *pa = page_address(page);
+ int headroom = CPSW_HEADROOM;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpsw_common *cpsw;
+ struct net_device *ndev;
+ int port, ch, pkt_size;
+ struct cpsw_priv *priv;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ int ret = 0;
+ dma_addr_t dma;
+
+ xmeta = pa + CPSW_XMETA_OFFSET;
+ cpsw = ndev_to_cpsw(xmeta->ndev);
+ ndev = xmeta->ndev;
+ pkt_size = cpsw->rx_packet_max;
+ ch = xmeta->ch;
+
+ if (status >= 0) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port)
+ ndev = cpsw->slaves[--port].ndev;
+ }
+
+ priv = netdev_priv(ndev);
+ pool = cpsw->page_pool[ch];
+
+ if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+ /* In dual emac mode check for all interfaces */
+ if (cpsw->usage_count && status >= 0) {
+ /* The packet received is for the interface which
+ * is already down and the other interface is up
+ * and running, instead of freeing which results
+ * in reducing of the number of rx descriptor in
+ * DMA engine, requeue page back to cpdma.
+ */
+ new_page = page;
+ goto requeue;
+ }
+
+ /* the interface is going down, pages are purged */
+ page_pool_recycle_direct(pool, page);
+ return;
+ }
+
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
+ ndev->stats.rx_dropped++;
+ goto requeue;
+ }
+
+ if (priv->xdp_prog) {
+ if (status & CPDMA_RX_VLAN_ENCAP) {
+ xdp.data = pa + CPSW_HEADROOM +
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ xdp.data_end = xdp.data + len -
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ } else {
+ xdp.data = pa + CPSW_HEADROOM;
+ xdp.data_end = xdp.data + len;
+ }
+
+ xdp_set_data_meta_invalid(&xdp);
+
+ xdp.data_hard_start = pa;
+ xdp.rxq = &priv->xdp_rxq[ch];
+
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
+ if (ret != CPSW_XDP_PASS)
+ goto requeue;
+
+ /* XDP prog might have changed packet data and boundaries */
+ len = xdp.data_end - xdp.data;
+ headroom = xdp.data - xdp.data_hard_start;
+
+ /* XDP prog can modify vlan tag, so can't use encap header */
+ status &= ~CPDMA_RX_VLAN_ENCAP;
+ }
+
+ /* pass skb to netstack if no XDP prog or returned XDP_PASS */
+ skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb->offload_fwd_mark = priv->offload_fwd_mark;
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+ skb->dev = ndev;
+ if (status & CPDMA_RX_VLAN_ENCAP)
+ cpsw_rx_vlan_encap(skb);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* unmap page as no netstack skb page recycling */
+ page_pool_release_page(pool, page);
+ netif_receive_skb(skb);
+
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
+ pkt_size, 0);
+ if (ret < 0) {
+ WARN_ON(ret == -ENOMEM);
+ page_pool_recycle_direct(pool, new_page);
+ }
+}
+
+static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int mcast_mask;
+ u32 port_mask;
+ int ret;
+
+ port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
+
+ mcast_mask = ALE_PORT_HOST;
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = mcast_mask;
+
+ ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
+ unreg_mcast_mask);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ mcast_mask, ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev || !vid)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
+ /* restore MQPRIO offload */
+ cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* restore CBS offload */
+ cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+}
+
+static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
+{
+ char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
+
+ cpsw_ale_add_mcast(cpsw->ale, stpa,
+ ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ ALE_PORT_1 | ALE_PORT_2);
+
+ cpsw_init_stp_ale_entry(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_init_host_port(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 control_reg;
+
+ /* soft reset the controller and initialize ale */
+ soft_reset("cpsw", &cpsw->regs->soft_reset);
+ cpsw_ale_start(cpsw->ale);
+
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&cpsw->regs->control);
+ control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
+ writel(control_reg, &cpsw->regs->control);
+
+ /* setup host port priority mapping */
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+
+ /* disable priority elevation */
+ writel_relaxed(0, &cpsw->regs->ptype);
+
+ /* enable statistics collection only on all ports */
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
+
+ /* Enable internal fifo flow control */
+ writel(0x7, &cpsw->regs->flow_control);
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_init_host_port_switch(cpsw);
+ else
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+}
+
+static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+
+ cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN |
+ ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 0);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 0);
+ /* disabling SA_UPDATE required to make stp work, without this setting
+ * Host MAC addresses will jump between ports.
+ * As per TRM MAC address can be defined as unicast supervisory (super)
+ * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
+ * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
+ * causes STP packets to be dropped due to ingress filter
+ * if (source address found) and (secure) and
+ * (receive port number != port_number))
+ * then discard the packet
+ */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NO_SA_UPDATE, 1);
+
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+}
+
+static void cpsw_adjust_link(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ struct phy_device *phy;
+ u32 mac_control = 0;
+
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ phy = slave->phy;
+
+ if (!phy)
+ return;
+
+ if (phy->link) {
+ mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (phy->speed == 1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (phy->duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* set speed_in input in case RMII mode is used in 100Mbps */
+ if (phy->speed == 100)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
+ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
+
+ if (priv->rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (priv->tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ if (mac_control != slave->mac_control)
+ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
+ } else {
+ netif_tx_stop_all_queues(ndev);
+
+ mac_control = 0;
+ /* disable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
+
+ cpsw_sl_ctl_reset(slave->mac_sl);
+ }
+
+ if (mac_control != slave->mac_control)
+ phy_print_status(phy);
+
+ slave->mac_control = mac_control;
+
+ if (phy->link && cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct phy_device *phy;
+
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
+
+ /* setup priority mapping */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
+ RX_PRIORITY_MAPPING);
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
+ break;
+ }
+
+ /* setup max packet size, and mac address */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
+ cpsw->rx_packet_max);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_port_add_switch_def_ale_entries(priv, slave);
+ else
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+
+ if (!slave->data->phy_node)
+ dev_err(priv->dev, "no phy found on slave %d\n",
+ slave->slave_num);
+ phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ if (!phy) {
+ dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
+ slave->data->phy_node,
+ slave->slave_num);
+ return;
+ }
+ slave->phy = phy;
+
+ phy_attached_info(slave->phy);
+
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
+ slave->data->phy_if);
+}
+
+static int cpsw_ndo_stop(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+
+ cpsw_info(priv, ifdown, "shutting down ndev\n");
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ if (slave->phy)
+ phy_stop(slave->phy);
+
+ netif_tx_stop_all_queues(priv->ndev);
+
+ if (slave->phy) {
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+ }
+
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
+
+ if (cpsw->usage_count <= 1) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpts_unregister(cpsw->cpts);
+ cpsw_intr_disable(cpsw);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_ale_stop(cpsw->ale);
+ cpsw_destroy_xdp_rxqs(cpsw);
+ }
+
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+
+ cpsw->usage_count--;
+ pm_runtime_put_sync(cpsw->dev);
+ return 0;
+}
+
+static int cpsw_ndo_open(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ dev_info(priv->dev, "starting ndev. mode: %s\n",
+ cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto pm_cleanup;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto pm_cleanup;
+ }
+
+ /* Initialize host and slave ports */
+ if (!cpsw->usage_count)
+ cpsw_init_host_port(priv);
+ cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* initialize shared resources for every ndev */
+ if (!cpsw->usage_count) {
+ /* create rxqs for both infs in dual mac as they use same pool
+ * and must be destroyed together when no users.
+ */
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret < 0)
+ goto err_cleanup;
+
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+
+ napi_enable(&cpsw->napi_rx);
+ napi_enable(&cpsw->napi_tx);
+
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ cpsw_restore(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (cpsw->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = cpsw->coal_intvl;
+ cpsw_set_coalesce(ndev, &coal);
+ }
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ cpsw->usage_count++;
+
+ return 0;
+
+err_cleanup:
+ cpsw_ndo_stop(ndev);
+
+pm_cleanup:
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
+ struct netdev_queue *txq;
+ struct cpdma_chan *txch;
+ int ret, q_idx;
+
+ if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ cpsw_err(priv, tx_err, "packet pad failed\n");
+ ndev->stats.tx_dropped++;
+ return NET_XMIT_DROP;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ q_idx = skb_get_queue_mapping(skb);
+ if (q_idx >= cpsw->tx_ch_num)
+ q_idx = q_idx % cpsw->tx_ch_num;
+
+ txch = cpsw->txv[q_idx].ch;
+ txq = netdev_get_tx_queue(ndev, q_idx);
+ skb_tx_timestamp(skb);
+ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port);
+ if (unlikely(ret != 0)) {
+ cpsw_err(priv, tx_err, "desc submit failed\n");
+ goto fail;
+ }
+
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+ }
+
+ return NETDEV_TX_OK;
+fail:
+ ndev->stats.tx_dropped++;
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+}
+
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, slave_no;
+ int flags = 0;
+ u16 vid = 0;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ vid = cpsw->slaves[slave_no].port_vlan;
+ flags = ALE_VLAN | ALE_SECURE;
+
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+ flags, vid);
+ cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
+ flags, vid);
+
+ ether_addr_copy(priv->mac_addr, addr->sa_data);
+ ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
+
+ pm_runtime_put(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan)
+ goto err;
+ }
+
+ dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = snprintf(name, len, "p%d", priv->emac_port);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
+}
+#endif
+
+static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct xdp_frame *xdpf;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
+ drops++;
+ }
+
+ return n - drops;
+}
+
+static int cpsw_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ppid->id_len = sizeof(cpsw->base_mac);
+ memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops cpsw_netdev_ops = {
+ .ndo_open = cpsw_ndo_open,
+ .ndo_stop = cpsw_ndo_stop,
+ .ndo_start_xmit = cpsw_ndo_start_xmit,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
+ .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = cpsw_ndo_tx_timeout,
+ .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
+ .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cpsw_ndo_poll_controller,
+#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
+ .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
+ .ndo_bpf = cpsw_ndo_bpf,
+ .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_get_port_parent_id = cpsw_get_port_parent_id,
+};
+
+static void cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct platform_device *pdev;
+
+ pdev = to_platform_device(cpsw->dev);
+ strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
+ strlcpy(info->version, "2.0", sizeof(info->version));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static int cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!cpsw->slaves[slave_no].phy)
+ return -EINVAL;
+
+ if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
+ return -EINVAL;
+
+ priv->rx_pause = pause->rx_pause ? true : false;
+ priv->tx_pause = pause->tx_pause ? true : false;
+
+ phy_set_asym_pause(cpsw->slaves[slave_no].phy,
+ priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
+}
+
+static const struct ethtool_ops cpsw_ethtool_ops = {
+ .get_drvinfo = cpsw_get_drvinfo,
+ .get_msglevel = cpsw_get_msglevel,
+ .set_msglevel = cpsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_pauseparam = cpsw_get_pauseparam,
+ .set_pauseparam = cpsw_set_pauseparam,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
+ .get_regs_len = cpsw_get_regs_len,
+ .get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
+ .get_channels = cpsw_get_channels,
+ .set_channels = cpsw_set_channels,
+ .get_link_ksettings = cpsw_get_link_ksettings,
+ .set_link_ksettings = cpsw_set_link_ksettings,
+ .get_eee = cpsw_get_eee,
+ .set_eee = cpsw_set_eee,
+ .nway_reset = cpsw_nway_reset,
+ .get_ringparam = cpsw_get_ringparam,
+ .set_ringparam = cpsw_set_ringparam,
+};
+
+static int cpsw_probe_dt(struct cpsw_common *cpsw)
+{
+ struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device *dev = cpsw->dev;
+ int ret;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ tmp_node = of_get_child_by_name(node, "ethernet-ports");
+ if (!tmp_node)
+ return -ENOENT;
+ data->slaves = of_get_child_count(tmp_node);
+ if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
+ of_node_put(tmp_node);
+ return -ENOENT;
+ }
+
+ data->active_slave = 0;
+ data->channels = CPSW_MAX_QUEUES;
+ data->ale_entries = CPSW_ALE_NUM_ENTRIES;
+ data->dual_emac = 1;
+ data->bd_ram_size = CPSW_BD_RAM_SIZE;
+ data->mac_control = 0;
+
+ data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data)
+ return -ENOMEM;
+
+ /* Populate all the child nodes here...
+ */
+ ret = devm_of_platform_populate(dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ dev_warn(dev, "Doesn't have any child node\n");
+
+ for_each_child_of_node(tmp_node, port_np) {
+ struct cpsw_slave_data *slave_data;
+ const void *mac_addr;
+ u32 port_id;
+
+ ret = of_property_read_u32(port_np, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
+ dev_err(dev, "%pOF has invalid port_id %u\n",
+ port_np, port_id);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ slave_data = &data->slave_data[port_id - 1];
+
+ slave_data->disabled = !of_device_is_available(port_np);
+ if (slave_data->disabled)
+ continue;
+
+ slave_data->slave_node = port_np;
+ slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
+ if (IS_ERR(slave_data->ifphy)) {
+ ret = PTR_ERR(slave_data->ifphy);
+ dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (of_phy_is_fixed_link(port_np)) {
+ ret = of_phy_register_fixed_link(port_np);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+ slave_data->phy_node = of_node_get(port_np);
+ } else {
+ slave_data->phy_node =
+ of_parse_phandle(port_np, "phy-handle", 0);
+ }
+
+ if (!slave_data->phy_node) {
+ dev_err(dev, "%pOF no phy found\n", port_np);
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ ret = of_get_phy_mode(port_np, &slave_data->phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF read phy-mode err %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ mac_addr = of_get_mac_address(port_np);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(slave_data->mac_addr, mac_addr);
+ } else {
+ ret = ti_cm_get_macid(dev, port_id - 1,
+ slave_data->mac_addr);
+ if (ret)
+ goto err_node_put;
+ }
+
+ if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
+ &prop)) {
+ dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
+ port_np);
+ slave_data->dual_emac_res_vlan = port_id;
+ dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
+ port_np, slave_data->dual_emac_res_vlan);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
+ of_node_put(tmp_node);
+ return 0;
+
+err_node_put:
+ of_node_put(port_np);
+ return ret;
+}
+
+static void cpsw_remove_dt(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+ struct device_node *port_np = slave_data->phy_node;
+
+ if (port_np) {
+ if (of_phy_is_fixed_link(port_np))
+ of_phy_deregister_fixed_link(port_np);
+
+ of_node_put(port_np);
+ }
+ }
+}
+
+static int cpsw_create_ports(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct net_device *ndev, *napi_ndev = NULL;
+ struct device *dev = cpsw->dev;
+ struct cpsw_priv *priv;
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (slave_data->disabled)
+ continue;
+
+ ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES,
+ CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+ priv->dev = dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->emac_port = i + 1;
+
+ if (is_valid_ether_addr(slave_data->mac_addr)) {
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+ dev_info(cpsw->dev, "Detected MACID = %pM\n",
+ priv->mac_addr);
+ } else {
+ eth_random_addr(slave_data->mac_addr);
+ dev_info(cpsw->dev, "Random MACID = %pM\n",
+ priv->mac_addr);
+ }
+ ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+
+ cpsw->slaves[i].ndev = ndev;
+
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!napi_ndev) {
+ /* CPSW Host port CPDMA interface is shared between
+ * ports and there is only one TX and one RX IRQs
+ * available for all possible TX and RX channels
+ * accordingly.
+ */
+ netif_napi_add(ndev, &cpsw->napi_rx,
+ cpsw->quirk_irq ?
+ cpsw_rx_poll : cpsw_rx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ?
+ cpsw_tx_poll : cpsw_tx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ }
+
+ napi_ndev = ndev;
+ }
+
+ return ret;
+}
+
+static void cpsw_unregister_ports(struct cpsw_common *cpsw)
+{
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ unregister_netdev(cpsw->slaves[i].ndev);
+ }
+}
+
+static int cpsw_register_ports(struct cpsw_common *cpsw)
+{
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ /* register the network device */
+ ret = register_netdev(cpsw->slaves[i].ndev);
+ if (ret) {
+ dev_err(cpsw->dev,
+ "cpsw: err registering net device%d\n", i);
+ cpsw->slaves[i].ndev = NULL;
+ break;
+ }
+ }
+
+ if (ret)
+ cpsw_unregister_ports(cpsw);
+ return ret;
+}
+
+bool cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &cpsw_netdev_ops) {
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ return !cpsw->data.dual_emac;
+ }
+
+ return false;
+}
+
+static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
+{
+ int set_val = 0;
+ int i;
+
+ if (!cpsw->ale_bypass &&
+ (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
+ set_val = 1;
+
+ dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *sl_ndev = cpsw->slaves[i].ndev;
+ struct cpsw_priv *priv = netdev_priv(sl_ndev);
+
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+static int cpsw_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (!cpsw->br_members) {
+ cpsw->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (cpsw->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ cpsw->br_members |= BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ return NOTIFY_DONE;
+}
+
+static void cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ cpsw->br_members &= ~BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ if (!cpsw->br_members)
+ cpsw->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = cpsw_netdevice_port_link(ndev,
+ info->upper_dev);
+ else
+ cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block cpsw_netdevice_nb __read_mostly = {
+ .notifier_call = cpsw_netdevice_event,
+};
+
+static int cpsw_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_netdevice_notifier(&cpsw_netdevice_nb);
+ if (ret) {
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+
+ return ret;
+}
+
+static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops cpsw_devlink_ops = {
+};
+
+static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !cpsw->data.dual_emac;
+
+ return 0;
+}
+
+static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int vlan = cpsw->data.default_vlan;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->data.dual_emac)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ cpsw->data.dual_emac = !switch_en;
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(sl_ndev);
+ if (switch_en)
+ vlan = cpsw->data.default_vlan;
+ else
+ vlan = slave->data->dual_emac_res_vlan;
+ slave->port_vlan = vlan;
+ }
+ goto exit;
+ }
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(sl_ndev);
+ slave->port_vlan = vlan;
+ if (netif_running(sl_ndev))
+ cpsw_port_add_switch_def_ale_entries(priv,
+ slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = false;
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(slave->ndev);
+ slave->port_vlan = slave->data->dual_emac_res_vlan;
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = true;
+ }
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int ret = -EOPNOTSUPP;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
+ ctx->val.vbool);
+ if (!ret) {
+ cpsw->ale_bypass = ctx->val.vbool;
+ cpsw_port_offload_fwd_mark_update(cpsw);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
+ "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
+ NULL),
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
+ "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
+};
+
+static int cpsw_register_devlink(struct cpsw_common *cpsw)
+{
+ struct device *dev = cpsw->dev;
+ struct cpsw_devlink *dl_priv;
+ int ret = 0;
+
+ cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv));
+ if (!cpsw->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(cpsw->devlink);
+ dl_priv->cpsw = cpsw;
+
+ ret = devlink_register(cpsw->devlink, dev);
+ if (ret) {
+ dev_err(dev, "DL reg fail ret:%d\n", ret);
+ goto dl_free;
+ }
+
+ ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ if (ret) {
+ dev_err(dev, "DL params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+
+ devlink_params_publish(cpsw->devlink);
+ return ret;
+
+dl_unreg:
+ devlink_unregister(cpsw->devlink);
+dl_free:
+ devlink_free(cpsw->devlink);
+ return ret;
+}
+
+static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
+{
+ devlink_params_unpublish(cpsw->devlink);
+ devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ devlink_unregister(cpsw->devlink);
+ devlink_free(cpsw->devlink);
+}
+
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw-switch"},
+ { .compatible = "ti,am335x-cpsw-switch"},
+ { .compatible = "ti,am4372-cpsw-switch"},
+ { .compatible = "ti,dra7-cpsw-switch"},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+ { .family = "AM33xx", .revision = "ES1.0"},
+ { /* sentinel */ }
+};
+
+static int cpsw_probe(struct platform_device *pdev)
+{
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ struct cpsw_common *cpsw;
+ struct resource *ss_res;
+ struct gpio_descs *mode;
+ void __iomem *ss_regs;
+ int ret = 0, ch;
+ struct clk *clk;
+ int irq;
+
+ cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
+ cpsw_slave_index = cpsw_slave_index_priv;
+
+ cpsw->dev = dev;
+
+ cpsw->slaves = devm_kcalloc(dev,
+ CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave),
+ GFP_KERNEL);
+ if (!cpsw->slaves)
+ return -ENOMEM;
+
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(mode)) {
+ ret = PTR_ERR(mode);
+ dev_err(dev, "gpio request failed, ret %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "fck is not found %d\n", ret);
+ return ret;
+ }
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(dev, ss_res);
+ if (IS_ERR(ss_regs)) {
+ ret = PTR_ERR(ss_regs);
+ return ret;
+ }
+ cpsw->regs = ss_regs;
+
+ irq = platform_get_irq_byname(pdev, "rx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[0] = irq;
+
+ irq = platform_get_irq_byname(pdev, "tx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[1] = irq;
+
+ platform_set_drvdata(pdev, cpsw);
+ /* This may be required here for child devices. */
+ pm_runtime_enable(dev);
+
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ ret = cpsw_probe_dt(cpsw);
+ if (ret)
+ goto clean_dt_ret;
+
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = 1;
+
+ cpsw->rx_packet_max = rx_packet_max;
+ cpsw->descs_pool_size = descs_pool_size;
+ eth_random_addr(cpsw->base_mac);
+
+ ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
+ (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
+ descs_pool_size);
+ if (ret)
+ goto clean_dt_ret;
+
+ cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
+ ss_regs + CPSW1_WR_OFFSET :
+ ss_regs + CPSW2_WR_OFFSET;
+
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_cpts;
+ }
+
+ cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
+ goto clean_cpts;
+ }
+ cpsw_split_res(cpsw);
+
+ /* setup netdevs */
+ ret = cpsw_create_ports(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+ * MISC IRQs which are always kept disabled with this driver so
+ * we will not request them.
+ *
+ * If anyone wants to implement support for those, make sure to
+ * first request and append them to irqs_table array.
+ */
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ ret = cpsw_register_notifiers(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ ret = cpsw_register_devlink(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ ret = cpsw_register_ports(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
+ &ss_res->start, descs_pool_size,
+ cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
+ CPSW_MINOR_VERSION(cpsw->version),
+ CPSW_RTL_VERSION(cpsw->version));
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+clean_unregister_notifiers:
+ cpsw_unregister_notifiers(cpsw);
+clean_unregister_netdev:
+ cpsw_unregister_ports(cpsw);
+clean_cpts:
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int cpsw_remove(struct platform_device *pdev)
+{
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
+
+ cpsw_unregister_notifiers(cpsw);
+ cpsw_unregister_devlink(cpsw);
+ cpsw_unregister_ports(cpsw);
+
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver cpsw_driver = {
+ .driver = {
+ .name = "cpsw-switch",
+ .of_match_table = cpsw_of_mtable,
+ },
+ .probe = cpsw_probe,
+ .remove = cpsw_remove,
+};
+
+module_platform_driver(cpsw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 476d050a022c..b833cc1d188c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -5,20 +5,415 @@
* Copyright (C) 2019 Texas Instruments
*/
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+#include "cpsw.h"
#include "cpts.h"
#include "cpsw_ale.h"
#include "cpsw_priv.h"
#include "cpsw_sl.h"
#include "davinci_cpdma.h"
+int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
+
+void cpsw_intr_enable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, true);
+}
+
+void cpsw_intr_disable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, false);
+}
+
+void cpsw_tx_handler(void *token, int len, int status)
+{
+ struct cpsw_meta_xdp *xmeta;
+ struct xdp_frame *xdpf;
+ struct net_device *ndev;
+ struct netdev_queue *txq;
+ struct sk_buff *skb;
+ int ch;
+
+ if (cpsw_is_xdpf_handle(token)) {
+ xdpf = cpsw_handle_to_xdpf(token);
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ ndev = xmeta->ndev;
+ ch = xmeta->ch;
+ xdp_return_frame(xdpf);
+ } else {
+ skb = token;
+ ndev = skb->dev;
+ ch = skb_get_queue_mapping(skb);
+ cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
+ txq = netdev_get_tx_queue(ndev, ch);
+ if (unlikely(netif_tx_queue_stopped(txq)))
+ netif_tx_wake_queue(txq);
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+}
+
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->tx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[1]);
+ cpsw->tx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_tx);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+ writel(0, &cpsw->wr_regs->rx_en);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[0]);
+ cpsw->rx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_rx);
+ return IRQ_HANDLED;
+}
+
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *txv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+ if (!(ch_map & 0x80))
+ continue;
+
+ txv = &cpsw->txv[ch];
+ if (unlikely(txv->budget > budget - num_tx))
+ cur_budget = budget - num_tx;
+ else
+ cur_budget = txv->budget;
+
+ num_tx += cpdma_chan_process(txv->ch, cur_budget);
+ if (num_tx >= budget)
+ break;
+ }
+
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ }
+
+ return num_tx;
+}
+
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx;
+
+ num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+ }
+
+ return num_tx;
+}
+
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *rxv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
+ if (!(ch_map & 0x01))
+ continue;
+
+ rxv = &cpsw->rxv[ch];
+ if (unlikely(rxv->budget > budget - num_rx))
+ cur_budget = budget - num_rx;
+ else
+ cur_budget = rxv->budget;
+
+ num_rx += cpdma_chan_process(rxv->ch, cur_budget);
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ }
+
+ return num_rx;
+}
+
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx;
+
+ num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ return num_rx;
+}
+
+void cpsw_rx_vlan_encap(struct sk_buff *skb)
+{
+ struct cpsw_priv *priv = netdev_priv(skb->dev);
+ u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
+ struct cpsw_common *cpsw = priv->cpsw;
+ u16 vtag, vid, prio, pkt_type;
+
+ /* Remove VLAN header encapsulation word */
+ skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
+
+ pkt_type = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
+ /* Ignore unknown & Priority-tagged packets*/
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
+ pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
+ return;
+
+ vid = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
+ VLAN_VID_MASK;
+ /* Ignore vid 0 and pass packet as is */
+ if (!vid)
+ return;
+
+ /* Untag P0 packets if set for vlan */
+ if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
+ prio = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
+
+ vtag = (prio << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
+ /* strip vlan tag for VLAN-tagged packet */
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, VLAN_HLEN);
+ }
+}
+
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+ slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
+}
+
+void soft_reset(const char *module, void __iomem *reg)
+{
+ unsigned long timeout = jiffies + HZ;
+
+ writel_relaxed(1, reg);
+ do {
+ cpu_relax();
+ } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
+
+ WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
+}
+
+void cpsw_ndo_tx_timeout(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ch;
+
+ cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
+ ndev->stats.tx_errors++;
+ cpsw_intr_disable(cpsw);
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_stop(cpsw->txv[ch].ch);
+ cpdma_chan_start(cpsw->txv[ch].ch);
+ }
+
+ cpsw_intr_enable(cpsw);
+ netif_trans_update(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static int cpsw_get_common_speed(struct cpsw_common *cpsw)
+{
+ int i, speed;
+
+ for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
+ speed += cpsw->slaves[i].phy->speed;
+
+ return speed;
+}
+
+int cpsw_need_resplit(struct cpsw_common *cpsw)
+{
+ int i, rlim_ch_num;
+ int speed, ch_rate;
+
+ /* re-split resources only in case speed was changed */
+ speed = cpsw_get_common_speed(cpsw);
+ if (speed == cpsw->speed || !speed)
+ return 0;
+
+ cpsw->speed = speed;
+
+ for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
+ if (!ch_rate)
+ break;
+
+ rlim_ch_num++;
+ }
+
+ /* cases not dependent on speed */
+ if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
+ return 0;
+
+ return 1;
+}
+
+void cpsw_split_res(struct cpsw_common *cpsw)
+{
+ u32 consumed_rate = 0, bigest_rate = 0;
+ struct cpsw_vector *txv = cpsw->txv;
+ int i, ch_weight, rlim_ch_num = 0;
+ int budget, bigest_rate_ch = 0;
+ u32 ch_rate, max_rate;
+ int ch_budget = 0;
+
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (!ch_rate)
+ continue;
+
+ rlim_ch_num++;
+ consumed_rate += ch_rate;
+ }
+
+ if (cpsw->tx_ch_num == rlim_ch_num) {
+ max_rate = consumed_rate;
+ } else if (!rlim_ch_num) {
+ ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
+ bigest_rate = 0;
+ max_rate = consumed_rate;
+ } else {
+ max_rate = cpsw->speed * 1000;
+
+ /* if max_rate is less then expected due to reduced link speed,
+ * split proportionally according next potential max speed
+ */
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
+ ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ bigest_rate = (max_rate - consumed_rate) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ }
+
+ /* split tx weight/budget */
+ budget = CPSW_POLL_WEIGHT;
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (ch_rate) {
+ txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+ if (!txv[i].budget)
+ txv[i].budget++;
+ if (ch_rate > bigest_rate) {
+ bigest_rate_ch = i;
+ bigest_rate = ch_rate;
+ }
+
+ ch_weight = (ch_rate * 100) / max_rate;
+ if (!ch_weight)
+ ch_weight++;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
+ } else {
+ txv[i].budget = ch_budget;
+ if (!bigest_rate_ch)
+ bigest_rate_ch = i;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
+ }
+
+ budget -= txv[i].budget;
+ }
+
+ if (budget)
+ txv[bigest_rate_ch].budget += budget;
+
+ /* split rx budget */
+ budget = CPSW_POLL_WEIGHT;
+ ch_budget = budget / cpsw->rx_ch_num;
+ for (i = 0; i < cpsw->rx_ch_num; i++) {
+ cpsw->rxv[i].budget = ch_budget;
+ budget -= ch_budget;
+ }
+
+ if (budget)
+ cpsw->rxv[0].budget += budget;
+}
+
int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
int ale_ageout, phys_addr_t desc_mem_phys,
int descs_pool_size)
@@ -28,6 +423,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
struct cpsw_platform_data *data;
struct cpdma_params dma_params;
struct device *dev = cpsw->dev;
+ struct device_node *cpts_node;
void __iomem *cpts_regs;
int ret = 0, i;
@@ -122,11 +518,859 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
return -ENOMEM;
}
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+ cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
+ if (!cpts_node)
+ cpts_node = cpsw->dev->of_node;
+
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
}
+ of_node_put(cpts_node);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ u32 ts_en, seq_id;
+
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
+ slave_write(slave, 0, CPSW1_TS_CTL);
+ return;
+ }
+
+ seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+ if (priv->tx_ts_enabled)
+ ts_en |= CPSW_V1_TS_TX_EN;
+
+ if (priv->rx_ts_enabled)
+ ts_en |= CPSW_V1_TS_RX_EN;
+
+ slave_write(slave, ts_en, CPSW1_TS_CTL);
+ slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 ctrl, mtype;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+
+ ctrl = slave_read(slave, CPSW2_CONTROL);
+ switch (cpsw->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V2_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
+
+ mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+ slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+ slave_write(slave, ctrl, CPSW2_CONTROL);
+ writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ cpsw_hwtstamp_v1(priv);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ cpsw_hwtstamp_v2(priv);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(dev);
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+#endif /*CONFIG_TI_CPTS*/
+
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ }
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+}
+
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 min_rate;
+ u32 ch_rate;
+ int i, ret;
+
+ ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
+ if (ch_rate == rate)
+ return 0;
+
+ ch_rate = rate * 1000;
+ min_rate = cpdma_chan_get_min_rate(cpsw->dma);
+ if ((ch_rate < min_rate && ch_rate)) {
+ dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
+ min_rate);
+ return -EINVAL;
+ }
+
+ if (rate > cpsw->speed) {
+ dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
+ pm_runtime_put(cpsw->dev);
+
+ if (ret)
+ return ret;
+
+ /* update rates for slaves tx queues */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ slave = &cpsw->slaves[i];
+ if (!slave->ndev)
+ continue;
+
+ netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
+ }
+
+ cpsw_split_res(cpsw);
+ return ret;
+}
+
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+ if (tc == num_tc - 1)
+ return 0;
+
+ return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = 7 << shift;
+ val = val & mask;
+
+ return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = (1 << --fifo) << shift;
+ val = on ? val | mask : val & ~mask;
+
+ writel_relaxed(val, &cpsw->regs->ptype);
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 val = 0, send_pct, shift;
+ struct cpsw_slave *slave;
+ int pct = 0, i;
+
+ if (bw > priv->shp_cfg_speed * 1000)
+ goto err;
+
+ /* shaping has to stay enabled for highest fifos linearly
+ * and fifo bw no more then interface can allow
+ */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ send_pct = slave_read(slave, SEND_PERCENT);
+ for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+ if (!bw) {
+ if (i >= fifo || !priv->fifo_bw[i])
+ continue;
+
+ dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+ continue;
+ }
+
+ if (!priv->fifo_bw[i] && i > fifo) {
+ dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+ return -EINVAL;
+ }
+
+ shift = (i - 1) * 8;
+ if (i == fifo) {
+ send_pct &= ~(CPSW_PCT_MASK << shift);
+ val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+ if (!val)
+ val = 1;
+
+ send_pct |= val << shift;
+ pct += val;
+ continue;
+ }
+
+ if (priv->fifo_bw[i])
+ pct += (send_pct >> shift) & CPSW_PCT_MASK;
+ }
+
+ if (pct >= 100)
+ goto err;
+
+ slave_write(slave, send_pct, SEND_PERCENT);
+ priv->fifo_bw[fifo] = bw;
+
+ dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+ DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+ return 0;
+err:
+ dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+ return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 tx_in_ctl_rg, val;
+ int ret;
+
+ ret = cpsw_set_fifo_bw(priv, fifo, bw);
+ if (ret)
+ return ret;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+ if (!bw)
+ cpsw_fifo_shp_on(priv, fifo, bw);
+
+ val = slave_read(slave, tx_in_ctl_rg);
+ if (cpsw_shp_is_off(priv)) {
+ /* disable FIFOs rate limited queues */
+ val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+ /* set type of FIFO queues to normal priority mode */
+ val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+ /* set type of FIFO queues to be rate limited */
+ if (bw)
+ val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+ else
+ priv->shp_cfg_speed = 0;
+ }
+
+ /* toggle a FIFO rate limited queue */
+ if (bw)
+ val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ else
+ val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ slave_write(slave, val, tx_in_ctl_rg);
+
+ /* FIFO transmit shape enable */
+ cpsw_fifo_shp_on(priv, fifo, bw);
+ return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int prev_speed = 0;
+ int tc, ret, fifo;
+ u32 bw = 0;
+
+ tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+ /* enable channels in backward order, as highest FIFOs must be rate
+ * limited first and for compliance with CPDMA rate limited channels
+ * that also used in bacward order. FIFO0 cannot be rate limited.
+ */
+ fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+ if (!fifo) {
+ dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+ return -EINVAL;
+ }
+
+ /* do nothing, it's disabled anyway */
+ if (!qopt->enable && !priv->fifo_bw[fifo])
+ return 0;
+
+ /* shapers can be set if link speed is known */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ if (slave->phy && slave->phy->link) {
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed)
+ prev_speed = priv->shp_cfg_speed;
+
+ priv->shp_cfg_speed = slave->phy->speed;
+ }
+
+ if (!priv->shp_cfg_speed) {
+ dev_err(priv->dev, "Link speed is not known");
+ return -1;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ bw = qopt->enable ? qopt->idleslope : 0;
+ ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+ if (ret) {
+ priv->shp_cfg_speed = prev_speed;
+ prev_speed = 0;
+ }
+
+ if (bw && prev_speed)
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int fifo, num_tc, count, offset;
+ struct cpsw_slave *slave;
+ u32 tx_prio_map = 0;
+ int i, tc, ret;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > CPSW_TC_NUM)
+ return -EINVAL;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+ return -EINVAL;
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ if (num_tc) {
+ for (i = 0; i < 8; i++) {
+ tc = mqprio->qopt.prio_tc_map[i];
+ fifo = cpsw_tc_to_fifo(tc, num_tc);
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+ for (i = 0; i < num_tc; i++) {
+ count = mqprio->qopt.count[i];
+ offset = mqprio->qopt.offset[i];
+ netdev_set_tc_queue(ndev, i, count, offset);
+ }
+ }
+
+ if (!mqprio->qopt.hw) {
+ /* restore default configuration */
+ netdev_reset_tc(ndev);
+ tx_prio_map = TX_PRIORITY_MAPPING;
+ }
+
+ priv->mqprio_hw = mqprio->qopt.hw;
+
+ offset = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ slave_write(slave, tx_prio_map, offset);
+
+ pm_runtime_put_sync(cpsw->dev);
+
+ return 0;
+}
+
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return cpsw_set_cbs(ndev, type_data);
+
+ case TC_SETUP_QDISC_MQPRIO:
+ return cpsw_set_mqprio(ndev, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ int fifo, bw;
+
+ for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+ bw = priv->fifo_bw[fifo];
+ if (!bw)
+ continue;
+
+ cpsw_set_fifo_rlimit(priv, fifo, bw);
+ }
+}
+
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 tx_prio_map = 0;
+ int i, tc, fifo;
+ u32 tx_prio_rg;
+
+ if (!priv->mqprio_hw)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ tc = netdev_get_prio_tc_map(priv->ndev, i);
+ fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct page_pool *pool;
+ struct page *page;
+ int ch_buf_num;
+ int ch, i, ret;
+ dma_addr_t dma;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ pool = cpsw->page_pool[ch];
+ ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ for (i = 0; i < ch_buf_num; i++) {
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ cpsw_err(priv, ifup, "allocate rx page err\n");
+ return -ENOMEM;
+ }
+
+ xmeta = page_address(page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
+ page, dma,
+ cpsw->rx_packet_max,
+ 0);
+ if (ret < 0) {
+ cpsw_err(priv, ifup,
+ "cannot submit page to channel %d rx, error %d\n",
+ ch, ret);
+ page_pool_recycle_direct(pool, page);
+ return ret;
+ }
+ }
+
+ cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+ ch, ch_buf_num);
+ }
+
+ return 0;
+}
+
+static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
+ int size)
+{
+ struct page_pool_params pp_params;
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = size;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = cpsw->dev;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ dev_err(cpsw->dev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
+static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
+{
+ struct page_pool *pool;
+ int ret = 0, pool_size;
+
+ pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ pool = cpsw_create_page_pool(cpsw, pool_size);
+ if (IS_ERR(pool))
+ ret = PTR_ERR(pool);
+ else
+ cpsw->page_pool[ch] = pool;
+
+ return ret;
+}
+
+static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int ret;
+
+ pool = cpsw->page_pool[ch];
+ rxq = &priv->xdp_rxq[ch];
+
+ ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
+void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
+ }
+
+ page_pool_destroy(cpsw->page_pool[ch]);
+ cpsw->page_pool[ch] = NULL;
+ }
+}
+
+int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch, ret;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ ret = cpsw_create_rx_pool(cpsw, ch);
+ if (ret)
+ goto err_cleanup;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
+ if (ret)
+ goto err_cleanup;
+ }
+ }
+
+ return 0;
+
+err_cleanup:
+ cpsw_destroy_xdp_rxqs(cpsw);
+
+ return ret;
+}
+
+static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!priv->xdpi.prog && !prog)
+ return 0;
+
+ if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
+ return -EBUSY;
+
+ WRITE_ONCE(priv->xdp_prog, prog);
+
+ xdp_attachment_setup(&priv->xdpi, bpf);
+
+ return 0;
+}
+
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return cpsw_xdp_prog_setup(priv, bpf);
+
+ case XDP_QUERY_PROG:
+ return xdp_attachment_query(&priv->xdpi, bpf);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpdma_chan *txch;
+ dma_addr_t dma;
+ int ret;
+
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = 0;
+ txch = cpsw->txv[0].ch;
+
+ if (page) {
+ dma = page_pool_get_dma_addr(page);
+ dma += xdpf->headroom + sizeof(struct xdp_frame);
+ ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
+ dma, xdpf->len, port);
+ } else {
+ if (sizeof(*xmeta) > xdpf->headroom) {
+ xdp_return_frame_rx_napi(xdpf);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
+ xdpf->data, xdpf->len, port);
+ }
+
+ if (ret) {
+ priv->ndev->stats.tx_dropped++;
+ xdp_return_frame_rx_napi(xdpf);
+ }
+
+ return ret;
+}
+
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *ndev = priv->ndev;
+ int ret = CPSW_XDP_CONSUMED;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ ret = CPSW_XDP_PASS;
+ goto out;
+ }
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ ret = CPSW_XDP_PASS;
+ break;
+ case XDP_TX:
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ goto drop;
+
+ cpsw_xdp_tx_frame(priv, xdpf, page, port);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(ndev, xdp, prog))
+ goto drop;
+
+ /* Have to flush here, per packet, instead of doing it in bulk
+ * at the end of the napi handler. The RX devices on this
+ * particular hardware is sharing a common queue, so the
+ * incoming device might change per packet.
+ */
+ xdp_do_flush_map();
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ /* fall through -- handle aborts by dropping packet */
+ case XDP_DROP:
+ goto drop;
+ }
+out:
+ rcu_read_unlock();
+ return ret;
+drop:
+ rcu_read_unlock();
+ page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 362c5a986869..bc726356a72c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -54,6 +54,7 @@ do { \
#define HOST_PORT_NUM 0
#define CPSW_ALE_PORTS_NUM 3
+#define CPSW_SLAVE_PORTS_NUM 2
#define SLIVER_SIZE 0x40
#define CPSW1_HOST_PORT_OFFSET 0x028
@@ -65,6 +66,7 @@ do { \
#define CPSW1_CPTS_OFFSET 0x500
#define CPSW1_ALE_OFFSET 0x600
#define CPSW1_SLIVER_OFFSET 0x700
+#define CPSW1_WR_OFFSET 0x900
#define CPSW2_HOST_PORT_OFFSET 0x108
#define CPSW2_SLAVE_OFFSET 0x200
@@ -76,6 +78,7 @@ do { \
#define CPSW2_ALE_OFFSET 0xd00
#define CPSW2_SLIVER_OFFSET 0xd80
#define CPSW2_BD_OFFSET 0x2000
+#define CPSW2_WR_OFFSET 0x1200
#define CPDMA_RXTHRESH 0x0c0
#define CPDMA_RXFREE 0x0e0
@@ -113,12 +116,15 @@ do { \
#define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */
+#define CPSW_ALE_NUM_ENTRIES 1024
#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
#define CPSW_FIFO_SHAPE_EN_SHIFT 16
#define CPSW_FIFO_RATE_EN_SHIFT 20
#define CPSW_TC_NUM 4
#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
#define CPSW_PCT_MASK 0x7f
+#define CPSW_BD_RAM_SIZE 0x2000
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
@@ -275,10 +281,11 @@ struct cpsw_slave_data {
struct device_node *slave_node;
struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
+ phy_interface_t phy_if;
u8 mac_addr[ETH_ALEN];
u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
struct phy *ifphy;
+ bool disabled;
};
struct cpsw_platform_data {
@@ -286,9 +293,9 @@ struct cpsw_platform_data {
u32 ss_reg_ofs; /* Subsystem control register offset */
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */
u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
bool dual_emac; /* Enable Dual EMAC mode */
@@ -344,10 +351,15 @@ struct cpsw_common {
bool tx_irq_disabled;
u32 irqs_table[IRQ_NUM];
struct cpts *cpts;
+ struct devlink *devlink;
int rx_ch_num, tx_ch_num;
int speed;
int usage_count;
struct page_pool *page_pool[CPSW_MAX_QUEUES];
+ u8 br_members;
+ struct net_device *hw_bridge_dev;
+ bool ale_bypass;
+ u8 base_mac[ETH_ALEN];
};
struct cpsw_priv {
@@ -368,19 +380,14 @@ struct cpsw_priv {
u32 emac_port;
struct cpsw_common *cpsw;
+ int offload_fwd_mark;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
-#define cpsw_slave_index(cpsw, priv) \
- ((cpsw->data.dual_emac) ? priv->emac_port : \
- cpsw->data.active_slave)
-
-static inline int cpsw_get_slave_port(u32 slave_num)
-{
- return slave_num + 1;
-}
+extern int (*cpsw_slave_index)(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv);
struct addr_sync_ctx {
struct net_device *ndev;
@@ -389,6 +396,35 @@ struct addr_sync_ctx {
int flush; /* flush flag */
};
+#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+
+#define CPSW_XDP_CONSUMED 1
+#define CPSW_XDP_PASS 0
+
+struct __aligned(sizeof(long)) cpsw_meta_xdp {
+ struct net_device *ndev;
+ int ch;
+};
+
+/* The buf includes headroom compatible with both skb and xdpf */
+#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
+
+static inline int cpsw_is_xdpf_handle(void *handle)
+{
+ return (unsigned long)handle & BIT(0);
+}
+
+static inline void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
+{
+ return (void *)((unsigned long)xdpf | BIT(0));
+}
+
+static inline struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
+{
+ return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
+}
+
int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
int ale_ageout, phys_addr_t desc_mem_phys,
int descs_pool_size);
@@ -399,6 +435,29 @@ void cpsw_intr_disable(struct cpsw_common *cpsw);
void cpsw_tx_handler(void *token, int len, int status);
int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw);
void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw);
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port);
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port);
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget);
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget);
+void cpsw_rx_vlan_encap(struct sk_buff *skb);
+void soft_reset(const char *module, void __iomem *reg);
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_ndo_tx_timeout(struct net_device *ndev);
+int cpsw_need_resplit(struct cpsw_common *cpsw);
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+bool cpsw_shp_is_off(struct cpsw_priv *priv);
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
new file mode 100644
index 000000000000..985a929bb957
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments switchdev Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_switchdev.h"
+
+struct cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct cpsw_priv *priv;
+ unsigned long event;
+};
+
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
+ struct switchdev_trans *trans, u8 state)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u8 cpsw_state;
+ int ret = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, cpsw_state);
+ dev_dbg(priv->dev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ unsigned long brport_flags)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ bool unreg_mcast_add = false;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (brport_flags & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+ dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, priv->emac_port);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
+ unreg_mcast_add);
+
+ return 0;
+}
+
+static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
+{
+ if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_port_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ dev_dbg(priv->dev, "attr: id %u port: %u\n", attr->id, priv->emac_port);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state);
+ dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 cpsw_get_pvid(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 __iomem *port_vlan_reg;
+ u32 pvid;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ pvid = slave_read(cpsw->slaves + (priv->emac_port - 1), reg);
+ } else {
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ pvid = readl(port_vlan_reg);
+ }
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void cpsw_set_pvid(struct cpsw_priv *priv, u16 vid, bool cfi, u32 cos)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ void __iomem *port_vlan_reg;
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ /* no barrier */
+ slave_write(cpsw->slaves + (priv->emac_port - 1), pvid, reg);
+ } else {
+ /* CPU port */
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ writel(pvid, port_vlan_reg);
+ }
+}
+
+static int cpsw_port_vlan_add(struct cpsw_priv *priv, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(priv->emac_port);
+ flags = priv->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ dev_err(priv->dev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (!pvid)
+ return ret;
+
+ cpsw_set_pvid(priv, vid, 0, 0);
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+ return ret;
+}
+
+static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == cpsw_get_pvid(priv))
+ cpsw_set_pvid(priv, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, vid);
+ dev_dbg(priv->dev, "VID del: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int cpsw_port_vlans_add(struct cpsw_priv *priv,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
+ priv->ndev->name, vlan->vid_begin, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cpsw_port_vlans_del(struct cpsw_priv *priv,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = cpsw_port_vlan_del(priv, vid, orig_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cpsw_port_mdb_add(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ dev_dbg(priv->dev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int cpsw_port_mdb_del(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int del_mask;
+ int err;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ dev_dbg(priv->dev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return err;
+}
+
+static int cpsw_port_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_add: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlans_add(priv, vlan, trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_add(priv, mdb, trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int cpsw_port_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_del: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlans_del(priv, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_del(priv, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct cpsw_switchdev_event_work, work);
+ struct cpsw_priv *priv = switchdev_work->priv;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port = priv->emac_port;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ cpsw_fdb_offload_notify(priv->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(priv->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct cpsw_switchdev_event_work *switchdev_work;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work);
+ switchdev_work->priv = priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = cpsw_switchdev_event,
+};
+
+static int cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = cpsw_switchdev_blocking_event,
+};
+
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.h b/drivers/net/ethernet/ti/cpsw_switchdev.h
new file mode 100644
index 000000000000..04a045dba7d4
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch Driver
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+
+#include <net/switchdev.h>
+
+bool cpsw_port_dev_check(const struct net_device *dev);
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw);
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw);
+
+#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 61136428e2c0..729ce09dded9 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -459,7 +459,7 @@ int cpts_register(struct cpts *cpts)
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
- timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+ timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
if (IS_ERR(cpts->clock)) {
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2c1fac33136c..86a3f42a3dcc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2291,6 +2291,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
phy_interface_t phy_mode;
bool has_phy = false;
+ int err;
void (*hndlr)(struct net_device *) = gbe_adjust_link;
@@ -2320,11 +2321,11 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
slave->phy_port_t = PORT_MII;
} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
has_phy = true;
- phy_mode = of_get_phy_mode(slave->node);
+ err = of_get_phy_mode(slave->node, &phy_mode);
/* if phy-mode is not present, default to
* PHY_INTERFACE_MODE_RGMII
*/
- if (phy_mode < 0)
+ if (err)
phy_mode = PHY_INTERFACE_MODE_RGMII;
if (!phy_interface_mode_is_rgmii(phy_mode)) {
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 8d994cebb6b0..6304ebd8b5c6 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -6,7 +6,6 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -26,11 +25,10 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
select PHYLINK
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
- AXI bus interface used in Xilinx Virtex FPGAs.
+ AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 676006f32f91..20746b801959 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1405,8 +1405,8 @@ static void axienet_validate(struct phylink_config *config,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int axienet_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void axienet_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
@@ -1431,8 +1431,6 @@ static int axienet_mac_link_state(struct phylink_config *config,
state->an_complete = 0;
state->duplex = 1;
-
- return 1;
}
static void axienet_mac_an_restart(struct phylink_config *config)
@@ -1497,7 +1495,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = axienet_validate,
- .mac_link_state = axienet_mac_link_state,
+ .mac_pcs_get_state = axienet_mac_pcs_get_state,
.mac_an_restart = axienet_mac_an_restart,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
@@ -1761,11 +1759,9 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
} else {
- lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)lp->phy_mode < 0) {
- ret = -EINVAL;
+ ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
+ if (ret)
goto free_netdev;
- }
}
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
@@ -1790,10 +1786,6 @@ static int axienet_probe(struct platform_device *pdev)
/* Check for these resources directly on the Ethernet node. */
struct resource *res = platform_get_resource(pdev,
IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "unable to get DMA memory resource\n");
- goto free_netdev;
- }
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 670ef682f268..250bd90627a5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -609,7 +609,8 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */
u32 count;
- /* The offset of the send indirection table from top of this struct.
+ /* The offset of the send indirection table from the beginning of
+ * struct nvsp_message.
* The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number.
*/
@@ -822,7 +823,8 @@ struct nvsp_message {
#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
- NETIF_F_TSO6 | NETIF_F_LRO | NETIF_F_SG)
+ NETIF_F_TSO6 | NETIF_F_LRO | \
+ NETIF_F_SG | NETIF_F_RXHASH)
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
@@ -853,6 +855,7 @@ struct multi_recv_comp {
struct nvsc_rsc {
const struct ndis_pkt_8021q_info *vlan;
const struct ndis_tcp_ip_checksum_info *csum_info;
+ const u32 *hash_info;
u8 is_last; /* last RNDIS msg in a vmtransfer_page */
u32 cnt; /* #fragments in an RSC packet */
u32 pktlen; /* Full packet length */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d22a36fc7a7c..eab83e71567a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1178,20 +1178,39 @@ static int netvsc_receive(struct net_device *ndev,
}
static void netvsc_send_table(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+ struct netvsc_device *nvscdev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- u32 count, *tab;
+ u32 count, offset, *tab;
int i;
count = nvmsg->msg.v5_msg.send_table.count;
+ offset = nvmsg->msg.v5_msg.send_table.offset;
+
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
return;
}
- tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
- nvmsg->msg.v5_msg.send_table.offset);
+ /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
+ * wrong due to a host bug. So fix the offset here.
+ */
+ if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
+ msglen >= sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
+ offset = sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber);
+
+ /* Boundary check for all versions */
+ if (offset > msglen - count * sizeof(u32)) {
+ netdev_err(ndev, "Received send-table offset too big:%u\n",
+ offset);
+ return;
+ }
+
+ tab = (void *)nvmsg + offset;
for (i = 0; i < count; i++)
net_device_ctx->tx_table[i] = tab[i];
@@ -1209,12 +1228,14 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc ? "added" : "removed");
}
-static void netvsc_receive_inband(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+static void netvsc_receive_inband(struct net_device *ndev,
+ struct netvsc_device *nvscdev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
- netvsc_send_table(ndev, nvmsg);
+ netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
@@ -1232,6 +1253,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
{
struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
@@ -1247,7 +1269,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(ndev, nvmsg);
+ netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
break;
default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 963509add611..868e22e286ca 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash(
else if (flow.basic.n_proto == htons(ETH_P_IPV6))
hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
else
- hash = 0;
+ return 0;
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+ __skb_set_sw_hash(skb, hash, false);
}
return hash;
@@ -766,6 +766,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
nvchan->rsc.csum_info;
+ const u32 *hash_info = nvchan->rsc.hash_info;
struct sk_buff *skb;
int i;
@@ -795,14 +796,16 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->protocol == htons(ETH_P_IP))
netvsc_comp_ipcsum(skb);
- /* Do L4 checksum offload if enabled and present.
- */
+ /* Do L4 checksum offload if enabled and present. */
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+ if (hash_info && (net->features & NETIF_F_RXHASH))
+ skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
+
if (vlan) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index abaf8156d19d..206b4e77eaf0 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -358,6 +358,7 @@ static inline
void rsc_add_data(struct netvsc_channel *nvchan,
const struct ndis_pkt_8021q_info *vlan,
const struct ndis_tcp_ip_checksum_info *csum_info,
+ const u32 *hash_info,
void *data, u32 len)
{
u32 cnt = nvchan->rsc.cnt;
@@ -368,6 +369,7 @@ void rsc_add_data(struct netvsc_channel *nvchan,
nvchan->rsc.vlan = vlan;
nvchan->rsc.csum_info = csum_info;
nvchan->rsc.pktlen = len;
+ nvchan->rsc.hash_info = hash_info;
}
nvchan->rsc.data[cnt] = data;
@@ -385,6 +387,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
const struct rndis_pktinfo_id *pktinfo_id;
+ const u32 *hash_info;
u32 data_offset;
void *data;
bool rsc_more = false;
@@ -411,6 +414,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
+ hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0);
+
pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
data = (void *)msg + data_offset;
@@ -441,7 +446,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
- rsc_add_data(nvchan, vlan, csum_info, data, rndis_pkt->data_len);
+ rsc_add_data(nvchan, vlan, csum_info, hash_info,
+ data, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
@@ -1208,6 +1214,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
/* Compute tx offload settings based on hw capabilities */
net->hw_features |= NETIF_F_RXCSUM;
net->hw_features |= NETIF_F_SG;
+ net->hw_features |= NETIF_F_RXHASH;
if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
/* Can checksum TCP */
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 8af5b7e9f4ed..c92a62dbf398 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -74,9 +74,9 @@ config IEEE802154_ATUSB
The module will be called 'atusb'.
config IEEE802154_ADF7242
- tristate "ADF7242 transceiver driver"
- depends on IEEE802154_DRIVERS && MAC802154
- depends on SPI
+ tristate "ADF7242 transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
---help---
Say Y here to enable the ADF7242 SPI 802.15.4 wireless
controller.
@@ -107,9 +107,9 @@ config IEEE802154_CA8210_DEBUGFS
management entities.
config IEEE802154_MCR20A
- tristate "MCR20A transceiver driver"
- depends on IEEE802154_DRIVERS && MAC802154
- depends on SPI
+ tristate "MCR20A transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
---help---
Say Y here to enable the MCR20A SPI 802.15.4 wireless
controller.
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 43506948e444..89c046b204e0 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -218,7 +218,6 @@ static int
cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
{
int ret;
- u8 status = 0xff;
struct spi_message msg;
struct spi_transfer xfer = {
.len = 0,
@@ -236,8 +235,6 @@ cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
priv->buf[0]);
ret = spi_sync(priv->spi, &msg);
- if (!ret)
- status = priv->buf[0];
dev_vdbg(&priv->spi->dev,
"buf[0] = %02x\n", priv->buf[0]);
mutex_unlock(&priv->buffer_mutex);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index ba3dfac1d904..a70662261a5a 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -108,8 +108,8 @@ static void ipvlan_port_destroy(struct net_device *dev)
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
- NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+ NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
#define IPVLAN_STATE_MASK \
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 14545a8797a8..a1c77cc00416 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,7 +68,6 @@ EXPORT_SYMBOL(blackhole_netdev);
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct pcpu_lstats *lb_stats;
int len;
skb_tx_timestamp(skb);
@@ -85,27 +84,20 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
- /* it's OK to use per_cpu_ptr() because BHs are off */
- lb_stats = this_cpu_ptr(dev->lstats);
-
len = skb->len;
- if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
- u64_stats_update_begin(&lb_stats->syncp);
- lb_stats->bytes += len;
- lb_stats->packets++;
- u64_stats_update_end(&lb_stats->syncp);
- }
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ dev_lstats_add(dev, len);
return NETDEV_TX_OK;
}
-static void loopback_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes)
{
- u64 bytes = 0;
- u64 packets = 0;
int i;
+ *packets = 0;
+ *bytes = 0;
+
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
u64 tbytes, tpackets;
@@ -114,12 +106,22 @@ static void loopback_get_stats64(struct net_device *dev,
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
- tbytes = lb_stats->bytes;
- tpackets = lb_stats->packets;
+ tpackets = u64_stats_read(&lb_stats->packets);
+ tbytes = u64_stats_read(&lb_stats->bytes);
} while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
- bytes += tbytes;
- packets += tpackets;
+ *bytes += tbytes;
+ *packets += tpackets;
}
+}
+EXPORT_SYMBOL(dev_lstats_read);
+
+static void loopback_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 packets, bytes;
+
+ dev_lstats_read(dev, &packets, &bytes);
+
stats->rx_packets = packets;
stats->tx_packets = packets;
stats->rx_bytes = bytes;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 34fc59bd1e20..05631d97eeb4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -359,10 +359,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
+ schedule_work(&port->bc_work);
+
if (err)
goto free_nskb;
- schedule_work(&port->bc_work);
return;
free_nskb:
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 09f1315d2f2a..f4d8f62f28c2 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o fib.o bus.o
+ netdev.o dev.o fib.o bus.o health.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 1a0ff3d7747b..6aeed0c600f8 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -283,6 +283,7 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.bus = &nsim_bus;
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
+ nsim_bus_dev->initial_net = current->nsproxy->net_ns;
err = device_register(&nsim_bus_dev->dev);
if (err)
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 54ca6681ba31..059711edfc61 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -90,6 +90,10 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->test1);
debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
&nsim_dev_take_snapshot_fops);
+ debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->dont_allow_reload);
+ debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->fail_reload);
return 0;
}
@@ -123,39 +127,6 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
debugfs_remove_recursive(nsim_dev_port->ddir);
}
-static struct net *nsim_devlink_net(struct devlink *devlink)
-{
- return &init_net;
-}
-
-static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
-}
-
-static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
-}
-
-static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
-}
-
-static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
-}
-
static int nsim_dev_resources_register(struct devlink *devlink)
{
struct devlink_resource_size_params params = {
@@ -163,9 +134,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
.size_granularity = 1,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY
};
- struct net *net = nsim_devlink_net(devlink);
int err;
- u64 n;
/* Resources for IPv4 */
err = devlink_resource_register(devlink, "IPv4", (u64)-1,
@@ -177,8 +146,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV4_FIB,
NSIM_RESOURCE_IPV4, &params);
if (err) {
@@ -186,8 +154,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV4, &params);
if (err) {
@@ -205,8 +172,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6, &params);
if (err) {
@@ -214,8 +180,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_IPV6, &params);
if (err) {
@@ -223,22 +188,6 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB,
- nsim_dev_ipv4_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- nsim_dev_ipv4_fib_rules_res_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB,
- nsim_dev_ipv6_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- nsim_dev_ipv6_fib_rules_res_occ_get,
- net);
out:
return err;
}
@@ -524,36 +473,48 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
kfree(nsim_dev->trap_data);
}
-static int nsim_dev_reload_down(struct devlink *devlink,
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack);
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev);
+
+static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->dont_allow_reload) {
+ /* For testing purposes, user set debugfs dont_allow_reload
+ * value to true. So forbid it.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
+ return -EOPNOTSUPP;
+ }
+
+ nsim_dev_reload_destroy(nsim_dev);
return 0;
}
static int nsim_dev_reload_up(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
- enum nsim_resource_id res_ids[] = {
- NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
- };
- struct net *net = nsim_devlink_net(devlink);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
- int err;
- u64 val;
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
- err = devlink_resource_size_get(devlink, res_ids[i], &val);
- if (!err) {
- err = nsim_fib_set_max(net, res_ids[i], val, extack);
- if (err)
- return err;
- }
+ if (nsim_dev->fail_reload) {
+ /* For testing purposes, user set debugfs fail_reload
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
+ return -EINVAL;
}
- nsim_devlink_param_load_driverinit_values(devlink);
- return 0;
+ return nsim_dev_reload_create(nsim_dev, extack);
+}
+
+static int nsim_dev_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, DRV_NAME);
}
#define NSIM_DEV_FLASH_SIZE 500000
@@ -649,6 +610,7 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink,
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
+ .info_get = nsim_dev_info_get,
.flash_update = nsim_dev_flash_update,
.trap_init = nsim_dev_devlink_trap_init,
.trap_action_set = nsim_dev_devlink_trap_action_set,
@@ -657,93 +619,6 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
#define NSIM_DEV_MAX_MACS_DEFAULT 32
#define NSIM_DEV_TEST1_DEFAULT true
-static struct nsim_dev *
-nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
-{
- struct nsim_dev *nsim_dev;
- struct devlink *devlink;
- int err;
-
- devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
- if (!devlink)
- return ERR_PTR(-ENOMEM);
- nsim_dev = devlink_priv(devlink);
- nsim_dev->nsim_bus_dev = nsim_bus_dev;
- nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
- get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
- INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->port_list_lock);
- nsim_dev->fw_update_status = true;
- nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
- nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
-
- err = nsim_dev_resources_register(devlink);
- if (err)
- goto err_devlink_free;
-
- err = devlink_register(devlink, &nsim_bus_dev->dev);
- if (err)
- goto err_resources_unregister;
-
- err = devlink_params_register(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
- if (err)
- goto err_dl_unregister;
- nsim_devlink_set_params_init_values(nsim_dev, devlink);
-
- err = nsim_dev_dummy_region_init(nsim_dev, devlink);
- if (err)
- goto err_params_unregister;
-
- err = nsim_dev_traps_init(devlink);
- if (err)
- goto err_dummy_region_exit;
-
- err = nsim_dev_debugfs_init(nsim_dev);
- if (err)
- goto err_traps_exit;
-
- err = nsim_bpf_dev_init(nsim_dev);
- if (err)
- goto err_debugfs_exit;
-
- devlink_params_publish(devlink);
- return nsim_dev;
-
-err_debugfs_exit:
- nsim_dev_debugfs_exit(nsim_dev);
-err_traps_exit:
- nsim_dev_traps_exit(devlink);
-err_dummy_region_exit:
- nsim_dev_dummy_region_exit(nsim_dev);
-err_params_unregister:
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
-err_dl_unregister:
- devlink_unregister(devlink);
-err_resources_unregister:
- devlink_resources_unregister(devlink, NULL);
-err_devlink_free:
- devlink_free(devlink);
- return ERR_PTR(err);
-}
-
-static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
-{
- struct devlink *devlink = priv_to_devlink(nsim_dev);
-
- nsim_bpf_dev_exit(nsim_dev);
- nsim_dev_debugfs_exit(nsim_dev);
- nsim_dev_traps_exit(devlink);
- nsim_dev_dummy_region_exit(nsim_dev);
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
- devlink_unregister(devlink);
- devlink_resources_unregister(devlink, NULL);
- mutex_destroy(&nsim_dev->port_list_lock);
- devlink_free(devlink);
-}
-
static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
unsigned int port_index)
{
@@ -813,39 +688,195 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
mutex_unlock(&nsim_dev->port_list_lock);
}
-int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
+ unsigned int port_count)
{
- struct nsim_dev *nsim_dev;
- int i;
- int err;
-
- nsim_dev = nsim_dev_create(nsim_bus_dev, nsim_bus_dev->port_count);
- if (IS_ERR(nsim_dev))
- return PTR_ERR(nsim_dev);
- dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+ int i, err;
- mutex_lock(&nsim_dev->port_list_lock);
- for (i = 0; i < nsim_bus_dev->port_count; i++) {
+ for (i = 0; i < port_count; i++) {
err = __nsim_dev_port_add(nsim_dev, i);
if (err)
goto err_port_del_all;
}
- mutex_unlock(&nsim_dev->port_list_lock);
return 0;
err_port_del_all:
- mutex_unlock(&nsim_dev->port_list_lock);
nsim_dev_port_del_all(nsim_dev);
- nsim_dev_destroy(nsim_dev);
return err;
}
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = priv_to_devlink(nsim_dev);
+ nsim_dev = devlink_priv(devlink);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
+
+ nsim_dev->fib_data = nsim_fib_create(devlink, extack);
+ if (IS_ERR(nsim_dev->fib_data))
+ return PTR_ERR(nsim_dev->fib_data);
+
+ nsim_devlink_param_load_driverinit_values(devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_fib_destroy;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+ goto err_traps_exit;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_health_exit;
+
+ return 0;
+
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ return err;
+}
+
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
+ if (!devlink)
+ return -ENOMEM;
+ devlink_net_set(devlink, nsim_bus_dev->initial_net);
+ nsim_dev = devlink_priv(devlink);
+ nsim_dev->nsim_bus_dev = nsim_bus_dev;
+ nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
+ get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
+ nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
+ nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+
+ dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+
+ err = nsim_dev_resources_register(devlink);
+ if (err)
+ goto err_devlink_free;
+
+ nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_resources_unregister;
+ }
+
+ err = devlink_register(devlink, &nsim_bus_dev->dev);
+ if (err)
+ goto err_fib_destroy;
+
+ err = devlink_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ if (err)
+ goto err_dl_unregister;
+ nsim_devlink_set_params_init_values(nsim_dev, devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_params_unregister;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_debugfs_init(nsim_dev);
+ if (err)
+ goto err_traps_exit;
+
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+ goto err_debugfs_exit;
+
+ err = nsim_bpf_dev_init(nsim_dev);
+ if (err)
+ goto err_health_exit;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_bpf_dev_exit;
+
+ devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
+ return 0;
+
+err_bpf_dev_exit:
+ nsim_bpf_dev_exit(nsim_dev);
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
+err_debugfs_exit:
+ nsim_dev_debugfs_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_params_unregister:
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+err_dl_unregister:
+ devlink_unregister(devlink);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+err_resources_unregister:
+ devlink_resources_unregister(devlink, NULL);
+err_devlink_free:
+ devlink_free(devlink);
+ return err;
+}
+
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
+{
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+
+ if (devlink_is_reload_failed(devlink))
+ return;
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_health_exit(nsim_dev);
+ nsim_dev_traps_exit(devlink);
+ nsim_dev_dummy_region_exit(nsim_dev);
+ mutex_destroy(&nsim_dev->port_list_lock);
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+}
+
void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
- nsim_dev_port_del_all(nsim_dev);
- nsim_dev_destroy(nsim_dev);
+ devlink_reload_disable(devlink);
+
+ nsim_dev_reload_destroy(nsim_dev);
+
+ nsim_bpf_dev_exit(nsim_dev);
+ nsim_dev_debugfs_exit(nsim_dev);
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
+ devlink_free(devlink);
}
static struct nsim_dev_port *
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 1a251f76d09b..13540dee7364 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,7 +18,7 @@
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
-#include <net/netns/generic.h>
+#include <net/net_namespace.h>
#include "netdevsim.h"
@@ -33,15 +33,14 @@ struct nsim_per_fib_data {
};
struct nsim_fib_data {
+ struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
};
-static unsigned int nsim_fib_net_id;
-
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
switch (res_id) {
@@ -64,12 +63,10 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
return max ? entry->max : entry->num;
}
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
- struct netlink_ext_ack *extack)
+static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
- int err = 0;
switch (res_id) {
case NSIM_RESOURCE_IPV4_FIB:
@@ -85,20 +82,10 @@ int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
entry = &fib_data->ipv6.rules;
break;
default:
- return 0;
- }
-
- /* not allowing a new max to be less than curren occupancy
- * --> no means of evicting entries
- */
- if (val < entry->num) {
- NL_SET_ERR_MSG_MOD(extack, "New size is less than current occupancy");
- err = -EINVAL;
- } else {
- entry->max = val;
+ WARN_ON(1);
+ return;
}
-
- return err;
+ entry->max = val;
}
static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
@@ -120,9 +107,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_rule_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -157,9 +144,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -178,18 +165,22 @@ static int nsim_fib_event(struct fib_notifier_info *info, bool add)
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
struct fib_notifier_info *info = ptr;
int err = 0;
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
+ err = nsim_fib_rule_event(data, info,
+ event == FIB_EVENT_RULE_ADD);
break;
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info,
+ event == FIB_EVENT_ENTRY_ADD);
break;
}
@@ -199,69 +190,116 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
- struct nsim_fib_data *data;
- struct net *net;
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
- rcu_read_lock();
- for_each_net_rcu(net) {
- data = net_generic(net, nsim_fib_net_id);
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
+}
- data->ipv4.fib.num = 0ULL;
- data->ipv4.rules.num = 0ULL;
+static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
- data->ipv6.fib.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
- }
- rcu_read_unlock();
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB, false);
}
-static struct notifier_block nsim_fib_nb = {
- .notifier_call = nsim_fib_event_nb,
-};
-
-/* Initialize per network namespace state */
-static int __net_init nsim_fib_netns_init(struct net *net)
+static u64 nsim_fib_ipv4_rules_res_occ_get(void *priv)
{
- struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_data *data = priv;
- data->ipv4.fib.max = (u64)-1;
- data->ipv4.rules.max = (u64)-1;
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB_RULES, false);
+}
- data->ipv6.fib.max = (u64)-1;
- data->ipv6.rules.max = (u64)-1;
+static u64 nsim_fib_ipv6_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
- return 0;
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB, false);
}
-static struct pernet_operations nsim_fib_net_ops = {
- .init = nsim_fib_netns_init,
- .id = &nsim_fib_net_id,
- .size = sizeof(struct nsim_fib_data),
-};
+static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false);
+}
-void nsim_fib_exit(void)
+static void nsim_fib_set_max_all(struct nsim_fib_data *data,
+ struct devlink *devlink)
{
- unregister_fib_notifier(&nsim_fib_nb);
- unregister_pernet_subsys(&nsim_fib_net_ops);
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); i++) {
+ int err;
+ u64 val;
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (err)
+ val = (u64) -1;
+ nsim_fib_set_max(data, res_ids[i], val);
+ }
}
-int nsim_fib_init(void)
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
+ struct nsim_fib_data *data;
int err;
- err = register_pernet_subsys(&nsim_fib_net_ops);
- if (err < 0) {
- pr_err("Failed to register pernet subsystem\n");
- goto err_out;
- }
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ nsim_fib_set_max_all(data, devlink);
- err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
- if (err < 0) {
+ data->fib_nb.notifier_call = nsim_fib_event_nb;
+ err = register_fib_notifier(devlink_net(devlink), &data->fib_nb,
+ nsim_fib_dump_inconsistent, extack);
+ if (err) {
pr_err("Failed to register fib notifier\n");
- unregister_pernet_subsys(&nsim_fib_net_ops);
goto err_out;
}
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_fib_ipv4_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_fib_ipv4_rules_res_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_fib_ipv6_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_fib_ipv6_rules_res_occ_get,
+ data);
+ return data;
+
err_out:
- return err;
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
+{
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB);
+ unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ kfree(data);
}
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
new file mode 100644
index 000000000000..9aa637d162eb
--- /dev/null
+++ b/drivers/net/netdevsim/health.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "netdevsim.h"
+
+static int
+nsim_dev_empty_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int
+nsim_dev_empty_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_empty_reporter_ops = {
+ .name = "empty",
+ .dump = nsim_dev_empty_reporter_dump,
+ .diagnose = nsim_dev_empty_reporter_diagnose,
+};
+
+struct nsim_dev_dummy_reporter_ctx {
+ char *break_msg;
+};
+
+static int
+nsim_dev_dummy_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+
+ if (health->fail_recover) {
+ /* For testing purposes, user set debugfs fail_recover
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the recover to fail for testing purposes");
+ return -EINVAL;
+ }
+ if (ctx) {
+ kfree(health->recovered_break_msg);
+ health->recovered_break_msg = kstrdup(ctx->break_msg,
+ GFP_KERNEL);
+ if (!health->recovered_break_msg)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
+{
+ char *binary;
+ int err;
+ int i;
+
+ err = devlink_fmsg_bool_pair_put(fmsg, "test_bool", true);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring");
+ if (err)
+ return err;
+
+ binary = kmalloc(binary_len, GFP_KERNEL);
+ if (!binary)
+ return -ENOMEM;
+ get_random_bytes(binary, binary_len);
+ err = devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len);
+ kfree(binary);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, "test_nest");
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_bool_put(fmsg, true);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u8_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u32_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u64_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg,
+ "in_array_nested_test_bool",
+ false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg,
+ "in_array_nested_test_u8",
+ i);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ }
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int
+nsim_dev_dummy_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+ int err;
+
+ if (ctx) {
+ err = devlink_fmsg_string_pair_put(fmsg, "break_message",
+ ctx->break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static int
+nsim_dev_dummy_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ int err;
+
+ if (health->recovered_break_msg) {
+ err = devlink_fmsg_string_pair_put(fmsg,
+ "recovered_break_message",
+ health->recovered_break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_dummy_reporter_ops = {
+ .name = "dummy",
+ .recover = nsim_dev_dummy_reporter_recover,
+ .dump = nsim_dev_dummy_reporter_dump,
+ .diagnose = nsim_dev_dummy_reporter_diagnose,
+};
+
+static ssize_t nsim_dev_health_break_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev_health *health = file->private_data;
+ struct nsim_dev_dummy_reporter_ctx ctx;
+ char *break_msg;
+ int err;
+
+ break_msg = kmalloc(count + 1, GFP_KERNEL);
+ if (!break_msg)
+ return -ENOMEM;
+
+ if (copy_from_user(break_msg, data, count)) {
+ err = -EFAULT;
+ goto out;
+ }
+ break_msg[count] = '\0';
+ if (break_msg[count - 1] == '\n')
+ break_msg[count - 1] = '\0';
+
+ ctx.break_msg = break_msg;
+ err = devlink_health_report(health->dummy_reporter, break_msg, &ctx);
+ if (err)
+ goto out;
+
+out:
+ kfree(break_msg);
+ return err ?: count;
+}
+
+static const struct file_operations nsim_dev_health_break_fops = {
+ .open = simple_open,
+ .write = nsim_dev_health_break_write,
+ .llseek = generic_file_llseek,
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+ int err;
+
+ health->empty_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_empty_reporter_ops,
+ 0, false, health);
+ if (IS_ERR(health->empty_reporter))
+ return PTR_ERR(health->empty_reporter);
+
+ health->dummy_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_dummy_reporter_ops,
+ 0, false, health);
+ if (IS_ERR(health->dummy_reporter)) {
+ err = PTR_ERR(health->dummy_reporter);
+ goto err_empty_reporter_destroy;
+ }
+
+ health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
+ if (IS_ERR_OR_NULL(health->ddir)) {
+ err = PTR_ERR_OR_ZERO(health->ddir) ?: -EINVAL;
+ goto err_dummy_reporter_destroy;
+ }
+
+ health->recovered_break_msg = NULL;
+ debugfs_create_file("break_health", 0200, health->ddir, health,
+ &nsim_dev_health_break_fops);
+ health->binary_len = 16;
+ debugfs_create_u32("binary_len", 0600, health->ddir,
+ &health->binary_len);
+ health->fail_recover = false;
+ debugfs_create_bool("fail_recover", 0600, health->ddir,
+ &health->fail_recover);
+ return 0;
+
+err_dummy_reporter_destroy:
+ devlink_health_reporter_destroy(health->dummy_reporter);
+err_empty_reporter_destroy:
+ devlink_health_reporter_destroy(health->empty_reporter);
+ return err;
+}
+
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+
+ debugfs_remove_recursive(health->ddir);
+ kfree(health->recovered_break_msg);
+ devlink_health_reporter_destroy(health->dummy_reporter);
+ devlink_health_reporter_destroy(health->empty_reporter);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 55f57f76d01b..2908e0a0d6e1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -290,6 +290,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
if (!dev)
return ERR_PTR(-ENOMEM);
+ dev_net_set(dev, nsim_dev_net(nsim_dev));
ns = netdev_priv(dev);
ns->netdev = dev;
ns->nsim_dev = nsim_dev;
@@ -357,18 +358,12 @@ static int __init nsim_module_init(void)
if (err)
goto err_dev_exit;
- err = nsim_fib_init();
- if (err)
- goto err_bus_exit;
-
err = rtnl_link_register(&nsim_link_ops);
if (err)
- goto err_fib_exit;
+ goto err_bus_exit;
return 0;
-err_fib_exit:
- nsim_fib_exit();
err_bus_exit:
nsim_bus_exit();
err_dev_exit:
@@ -379,7 +374,6 @@ err_dev_exit:
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
- nsim_fib_exit();
nsim_bus_exit();
nsim_dev_exit();
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 66bf13765ad0..94df795ef4d3 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -134,6 +134,18 @@ enum nsim_resource_id {
NSIM_RESOURCE_IPV6_FIB_RULES,
};
+struct nsim_dev_health {
+ struct devlink_health_reporter *empty_reporter;
+ struct devlink_health_reporter *dummy_reporter;
+ struct dentry *ddir;
+ char *recovered_break_msg;
+ u32 binary_len;
+ bool fail_recover;
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+
struct nsim_dev_port {
struct list_head list;
struct devlink_port devlink_port;
@@ -161,9 +173,17 @@ struct nsim_dev {
bool fw_update_status;
u32 max_macs;
bool test1;
+ bool dont_allow_reload;
+ bool fail_reload;
struct devlink_region *dummy_region;
+ struct nsim_dev_health health;
};
+static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
+{
+ return devlink_net(priv_to_devlink(nsim_dev));
+}
+
int nsim_dev_init(void);
void nsim_dev_exit(void);
int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
@@ -173,11 +193,11 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
unsigned int port_index);
-int nsim_fib_init(void);
-void nsim_fib_exit(void);
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
- struct netlink_ext_ack *extack);
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack);
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data);
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max);
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
void nsim_ipsec_init(struct netdevsim *ns);
@@ -215,6 +235,9 @@ struct nsim_bus_dev {
struct device dev;
struct list_head list;
unsigned int port_count;
+ struct net *initial_net; /* Purpose of this is to carry net pointer
+ * during the probe time only.
+ */
unsigned int num_vfs;
struct nsim_vf_config *vfconfigs;
};
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 68771b2f351a..afb119f38325 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -9,13 +9,7 @@
static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len = skb->len;
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += len;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_lstats_add(dev, skb->len);
dev_kfree_skb(skb);
@@ -56,25 +50,9 @@ static int nlmon_close(struct net_device *dev)
static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
- u64 bytes = 0, packets = 0;
-
- for_each_possible_cpu(i) {
- const struct pcpu_lstats *nl_stats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- nl_stats = per_cpu_ptr(dev->lstats, i);
-
- do {
- start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
- tbytes = nl_stats->bytes;
- tpackets = nl_stats->packets;
- } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
+ u64 packets, bytes;
- packets += tpackets;
- bytes += tbytes;
- }
+ dev_lstats_read(dev, &packets, &bytes);
stats->rx_packets = packets;
stats->tx_packets = 0;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fe602648b99f..5848219005d7 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -282,11 +282,6 @@ config AX88796B_PHY
Currently supports the Asix Electronics PHY found in the X-Surf 100
AX88796B package.
-config AT803X_PHY
- tristate "AT803X PHYs"
- ---help---
- Currently supports the AT8030 and AT8035 model
-
config BCM63XX_PHY
tristate "Broadcom 63xx SOCs internal PHY"
depends on BCM63XX || COMPILE_TEST
@@ -364,6 +359,12 @@ config DP83867_PHY
---help---
Currently supports the DP83867 PHY.
+config DP83869_PHY
+ tristate "Texas Instruments DP83869 Gigabit PHY"
+ ---help---
+ Currently supports the DP83869 PHY. This PHY supports copper and
+ fiber connections.
+
config FIXED_PHY
tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
@@ -444,6 +445,12 @@ config NXP_TJA11XX_PHY
---help---
Currently supports the NXP TJA1100 and TJA1101 PHY.
+config AT803X_PHY
+ tristate "Qualcomm Atheros AR803X PHYs"
+ depends on REGULATOR
+ help
+ Currently supports the AR8030, AR8031, AR8033 and AR8035 model
+
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a03437e091f3..b433ec3bf9a6 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_DP83822_PHY) += dp83822.o
obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
+obj-$(CONFIG_DP83869_PHY) += dp83869.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
diff --git a/drivers/net/phy/aquantia.h b/drivers/net/phy/aquantia.h
index 5a16caab7b2f..c684b65c642c 100644
--- a/drivers/net/phy/aquantia.h
+++ b/drivers/net/phy/aquantia.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * HWMON driver for Aquantia PHY
+/* SPDX-License-Identifier: GPL-2.0 */
+/* HWMON driver for Aquantia PHY
*
* Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
* Author: Andrew Lunn <andrew@lunn.ch>
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 1eb5d4fb8925..aee62610bade 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -2,7 +2,7 @@
/*
* drivers/net/phy/at803x.c
*
- * Driver for Atheros 803x PHY
+ * Driver for Qualcomm Atheros AR803x PHY
*
* Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
*/
@@ -13,7 +13,12 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/of_gpio.h>
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/net/qca-ar803x.h>
#define AT803X_SPECIFIC_STATUS 0x11
#define AT803X_SS_SPEED_MASK (3 << 14)
@@ -62,17 +67,60 @@
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_DEBUG_REG_1F 0x1F
+#define AT803X_DEBUG_PLL_ON BIT(2)
+#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+
+/* AT803x supports either the XTAL input pad, an internal PLL or the
+ * DSP as clock reference for the clock output pad. The XTAL reference
+ * is only used for 25 MHz output, all other frequencies need the PLL.
+ * The DSP as a clock reference is used in synchronous ethernet
+ * applications.
+ *
+ * By default the PLL is only enabled if there is a link. Otherwise
+ * the PHY will go into low power state and disabled the PLL. You can
+ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+ * enabled.
+ */
+#define AT803X_MMD7_CLK25M 0x8016
+#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
+#define AT803X_CLK_OUT_25MHZ_XTAL 0
+#define AT803X_CLK_OUT_25MHZ_DSP 1
+#define AT803X_CLK_OUT_50MHZ_PLL 2
+#define AT803X_CLK_OUT_50MHZ_DSP 3
+#define AT803X_CLK_OUT_62_5MHZ_PLL 4
+#define AT803X_CLK_OUT_62_5MHZ_DSP 5
+#define AT803X_CLK_OUT_125MHZ_PLL 6
+#define AT803X_CLK_OUT_125MHZ_DSP 7
+
+/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+ * but doesn't support choosing between XTAL/PLL and DSP.
+ */
+#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
+
+#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
+#define AT803X_CLK_OUT_STRENGTH_FULL 0
+#define AT803X_CLK_OUT_STRENGTH_HALF 1
+#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+
+#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
#define AT803X_PHY_ID_MASK 0xffffffef
-MODULE_DESCRIPTION("Atheros 803x PHY driver");
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
struct at803x_priv {
- bool phy_reset:1;
+ int flags;
+#define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */
+ u16 clk_25m_reg;
+ u16 clk_25m_mask;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+ struct regulator *vddio;
};
struct at803x_context {
@@ -240,6 +288,193 @@ static int at803x_resume(struct phy_device *phydev)
return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
+static int at803x_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static bool at803x_match_phy_id(struct phy_device *phydev, u32 phy_id)
+{
+ return (phydev->phy_id & phydev->drv->phy_id_mask)
+ == (phy_id & phydev->drv->phy_id_mask);
+}
+
+static int at803x_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ unsigned int sel, mask;
+ u32 freq, strength;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+ if (!ret) {
+ mask = AT803X_CLK_OUT_MASK;
+ switch (freq) {
+ case 25000000:
+ sel = AT803X_CLK_OUT_25MHZ_XTAL;
+ break;
+ case 50000000:
+ sel = AT803X_CLK_OUT_50MHZ_PLL;
+ break;
+ case 62500000:
+ sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+ break;
+ case 125000000:
+ sel = AT803X_CLK_OUT_125MHZ_PLL;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+ return -EINVAL;
+ }
+
+ priv->clk_25m_reg |= FIELD_PREP(mask, sel);
+ priv->clk_25m_mask |= mask;
+
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
+ at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
+ priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK;
+ }
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+ if (!ret) {
+ priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+ switch (strength) {
+ case AR803X_STRENGTH_FULL:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+ break;
+ case AR803X_STRENGTH_HALF:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+ break;
+ case AR803X_STRENGTH_QUARTER:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-strength\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ priv->vddio = devm_regulator_get_optional(&phydev->mdio.dev,
+ "vddio");
+ if (IS_ERR(priv->vddio)) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return PTR_ERR(priv->vddio);
+ }
+
+ ret = regulator_enable(priv->vddio);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int at803x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -251,7 +486,40 @@ static int at803x_probe(struct phy_device *phydev)
phydev->priv = priv;
- return 0;
+ return at803x_parse_dt(phydev);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int val;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M);
+ if (val < 0)
+ return val;
+
+ val &= ~priv->clk_25m_mask;
+ val |= priv->clk_25m_reg;
+
+ return phy_write_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M, val);
+}
+
+static int at8031_pll_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is PLL OFF. After a soft reset, the
+ * values are retained.
+ */
+ if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_PLL_ON);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_PLL_ON, 0);
}
static int at803x_config_init(struct phy_device *phydev)
@@ -276,8 +544,20 @@ static int at803x_config_init(struct phy_device *phydev)
ret = at803x_enable_tx_delay(phydev);
else
ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
- return ret;
+ ret = at803x_clk_out_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
static int at803x_ack_interrupt(struct phy_device *phydev)
@@ -426,9 +706,9 @@ static int at803x_read_status(struct phy_device *phydev)
static struct phy_driver at803x_driver[] = {
{
- /* ATHEROS 8035 */
+ /* Qualcomm Atheros AR8035 */
.phy_id = ATH8035_PHY_ID,
- .name = "Atheros 8035 ethernet",
+ .name = "Qualcomm Atheros AR8035",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
@@ -441,9 +721,9 @@ static struct phy_driver at803x_driver[] = {
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
- /* ATHEROS 8030 */
+ /* Qualcomm Atheros AR8030 */
.phy_id = ATH8030_PHY_ID,
- .name = "Atheros 8030 ethernet",
+ .name = "Qualcomm Atheros AR8030",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
@@ -456,9 +736,9 @@ static struct phy_driver at803x_driver[] = {
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
- /* ATHEROS 8031 */
+ /* Qualcomm Atheros AR8031/AR8033 */
.phy_id = ATH8031_PHY_ID,
- .name = "Atheros 8031 ethernet",
+ .name = "Qualcomm Atheros AR8031/AR8033",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
@@ -471,6 +751,15 @@ static struct phy_driver at803x_driver[] = {
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+}, {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
} };
module_phy_driver(at803x_driver);
@@ -479,6 +768,7 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
{ ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
{ ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 5ecacb4e64f0..c86fb9d1240c 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
*/
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 937d0059e8ac..7d68b28bb893 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -26,18 +26,13 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+static int bcm54xx_config_clock_delay(struct phy_device *phydev);
+
static int bcm54210e_config_init(struct phy_device *phydev)
{
int val;
- val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
- val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- val |= MII_BCM54XX_AUXCTL_MISC_WREN;
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
-
- val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
- val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
- bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+ bcm54xx_config_clock_delay(phydev);
if (phydev->dev_flags & PHY_BRCM_EN_MASTER_MODE) {
val = phy_read(phydev, MII_CTRL1000);
@@ -52,26 +47,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
{
int reg;
- /* Clear TX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
- /* Disable TXD to GTXCLK clock delay (default set) */
- /* Bit 9 is the only field in shadow register 00011 */
- bcm_phy_write_shadow(phydev, 0x03, 0);
- }
-
- /* Clear RX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
- reg = bcm54xx_auxctl_read(phydev,
- MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
- /* Disable RXD to RXC delay (default set) */
- reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- /* Clear shadow selector field */
- reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
- MII_BCM54XX_AUXCTL_MISC_WREN | reg);
- }
+ bcm54xx_config_clock_delay(phydev);
/* Enable CLK125 MUX on LED4 if ref clock is enabled. */
if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
@@ -383,9 +359,9 @@ static int bcm5482_config_init(struct phy_device *phydev)
/*
* Select 1000BASE-X register set (primary SerDes)
*/
- reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_MODE);
- bcm_phy_write_shadow(phydev, BCM5482_SHD_MODE,
- reg | BCM5482_SHD_MODE_1000BX);
+ reg = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE,
+ reg | BCM54XX_SHD_MODE_1000BX);
/*
* LED1=ACTIVITYLED, LED3=LINKSPD[2]
@@ -451,12 +427,47 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_probe(struct phy_device *phydev)
+{
+ int val, intf_sel;
+
+ val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ if (val < 0)
+ return val;
+
+ /* The PHY is strapped in RGMII-fiber mode when INTERF_SEL[1:0]
+ * is 01b, and the link between PHY and its link partner can be
+ * either 1000Base-X or 100Base-FX.
+ * RGMII-1000Base-X is properly supported, but RGMII-100Base-FX
+ * support is still missing as of now.
+ */
+ intf_sel = (val & BCM54XX_SHD_INTF_SEL_MASK) >> 1;
+ if (intf_sel == 1) {
+ val = bcm_phy_read_shadow(phydev, BCM54616S_SHD_100FX_CTRL);
+ if (val < 0)
+ return val;
+
+ /* Bit 0 of the SerDes 100-FX Control register, when set
+ * to 1, sets the MII/RGMII -> 100BASE-FX configuration.
+ * When this bit is set to 0, it sets the GMII/RGMII ->
+ * 1000BASE-X configuration.
+ */
+ if (!(val & BCM54616S_100FX_MODE))
+ phydev->dev_flags |= PHY_BCM_FLAGS_MODE_1000BX;
+ }
+
+ return 0;
+}
+
static int bcm54616s_config_aneg(struct phy_device *phydev)
{
int ret;
/* Aneg firsly. */
- ret = genphy_config_aneg(phydev);
+ if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ ret = genphy_c37_config_aneg(phydev);
+ else
+ ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
bcm54xx_config_clock_delay(phydev);
@@ -464,6 +475,18 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_read_status(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ err = genphy_c37_read_status(phydev);
+ else
+ err = genphy_read_status(phydev);
+
+ return err;
+}
+
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@@ -655,6 +678,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .read_status = bcm54616s_read_status,
+ .probe = bcm54616s_probe,
}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6580094161a9..8f241b57fcf6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -469,6 +469,19 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
index = rq->extts.index;
if (index >= N_EXT_TS)
return -EINVAL;
@@ -491,6 +504,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT)
return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 37fceaf9fa10..0b95e7a2e273 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -21,8 +23,9 @@
#define MII_DP83867_PHYCTRL 0x10
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
-#define DP83867_CTRL 0x1f
+#define DP83867_CFG2 0x14
#define DP83867_CFG3 0x1e
+#define DP83867_CTRL 0x1f
/* Extended Registers */
#define DP83867_CFG4 0x0031
@@ -36,6 +39,13 @@
#define DP83867_STRAP_STS1 0x006E
#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
+#define DP83867_RXFCFG 0x0134
+#define DP83867_RXFPMD1 0x0136
+#define DP83867_RXFPMD2 0x0137
+#define DP83867_RXFPMD3 0x0138
+#define DP83867_RXFSOP1 0x0139
+#define DP83867_RXFSOP2 0x013A
+#define DP83867_RXFSOP3 0x013B
#define DP83867_IO_MUX_CFG 0x0170
#define DP83867_SGMIICTL 0x00D3
#define DP83867_10M_SGMII_CFG 0x016F
@@ -65,6 +75,13 @@
/* SGMIICTL bits */
#define DP83867_SGMII_TYPE BIT(14)
+/* RXFCFG bits*/
+#define DP83867_WOL_MAGIC_EN BIT(0)
+#define DP83867_WOL_BCAST_EN BIT(2)
+#define DP83867_WOL_UCAST_EN BIT(4)
+#define DP83867_WOL_SEC_EN BIT(5)
+#define DP83867_WOL_ENH_MAC BIT(7)
+
/* STRAP_STS1 bits */
#define DP83867_STRAP_STS1_RESERVED BIT(11)
@@ -95,6 +112,10 @@
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+/* CFG3 bits */
+#define DP83867_CFG3_INT_OE BIT(7)
+#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
+
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
@@ -126,6 +147,115 @@ static int dp83867_ack_interrupt(struct phy_device *phydev)
return 0;
}
+static int dp83867_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ u16 val_rxcfg, val_micr;
+ u8 *mac;
+
+ val_rxcfg = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+ val_micr = phy_read(phydev, MII_DP83867_MICR);
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_UCAST |
+ WAKE_BCAST)) {
+ val_rxcfg |= DP83867_WOL_ENH_MAC;
+ val_micr |= MII_DP83867_MICR_WOL_INT_EN;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ mac = (u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD1,
+ (mac[1] << 8 | mac[0]));
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD2,
+ (mac[3] << 8 | mac[2]));
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD3,
+ (mac[5] << 8 | mac[4]));
+
+ val_rxcfg |= DP83867_WOL_MAGIC_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_MAGIC_EN;
+ }
+
+ if (wol->wolopts & WAKE_MAGICSECURE) {
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[1] << 8) | wol->sopass[0]);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[3] << 8) | wol->sopass[2]);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[5] << 8) | wol->sopass[4]);
+
+ val_rxcfg |= DP83867_WOL_SEC_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_SEC_EN;
+ }
+
+ if (wol->wolopts & WAKE_UCAST)
+ val_rxcfg |= DP83867_WOL_UCAST_EN;
+ else
+ val_rxcfg &= ~DP83867_WOL_UCAST_EN;
+
+ if (wol->wolopts & WAKE_BCAST)
+ val_rxcfg |= DP83867_WOL_BCAST_EN;
+ else
+ val_rxcfg &= ~DP83867_WOL_BCAST_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_ENH_MAC;
+ val_micr &= ~MII_DP83867_MICR_WOL_INT_EN;
+ }
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG, val_rxcfg);
+ phy_write(phydev, MII_DP83867_MICR, val_micr);
+
+ return 0;
+}
+
+static void dp83867_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ u16 value, sopass_val;
+
+ wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_MAGICSECURE);
+ wol->wolopts = 0;
+
+ value = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+
+ if (value & DP83867_WOL_UCAST_EN)
+ wol->wolopts |= WAKE_UCAST;
+
+ if (value & DP83867_WOL_BCAST_EN)
+ wol->wolopts |= WAKE_BCAST;
+
+ if (value & DP83867_WOL_MAGIC_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (value & DP83867_WOL_SEC_EN) {
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP1);
+ wol->sopass[0] = (sopass_val & 0xff);
+ wol->sopass[1] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP2);
+ wol->sopass[2] = (sopass_val & 0xff);
+ wol->sopass[3] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP3);
+ wol->sopass[4] = (sopass_val & 0xff);
+ wol->sopass[5] = (sopass_val >> 8);
+
+ wol->wolopts |= WAKE_MAGICSECURE;
+ }
+
+ if (!(value & DP83867_WOL_ENH_MAC))
+ wol->wolopts = 0;
+}
+
static int dp83867_config_intr(struct phy_device *phydev)
{
int micr_status;
@@ -295,7 +425,7 @@ static int dp83867_probe(struct phy_device *phydev)
phydev->priv = dp83867;
- return 0;
+ return dp83867_of_init(phydev);
}
static int dp83867_config_init(struct phy_device *phydev)
@@ -304,10 +434,6 @@ static int dp83867_config_init(struct phy_device *phydev)
int ret, val, bs;
u16 delay;
- ret = dp83867_of_init(phydev);
- if (ret)
- return ret;
-
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
@@ -410,12 +536,13 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
}
+ val = phy_read(phydev, DP83867_CFG3);
/* Enable Interrupt output INT_OE in CFG3 register */
- if (phy_interrupt_is_valid(phydev)) {
- val = phy_read(phydev, DP83867_CFG3);
- val |= BIT(7);
- phy_write(phydev, DP83867_CFG3, val);
- }
+ if (phy_interrupt_is_valid(phydev))
+ val |= DP83867_CFG3_INT_OE;
+
+ val |= DP83867_CFG3_ROBUST_AUTO_MDIX;
+ phy_write(phydev, DP83867_CFG3, val);
if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
dp83867_config_port_mirroring(phydev);
@@ -463,6 +590,9 @@ static struct phy_driver dp83867_driver[] = {
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
+ .get_wol = dp83867_get_wol,
+ .set_wol = dp83867_set_wol,
+
/* IRQ related */
.ack_interrupt = dp83867_ack_interrupt,
.config_intr = dp83867_config_intr,
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
new file mode 100644
index 000000000000..93021904c5e4
--- /dev/null
+++ b/drivers/net/phy/dp83869.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for the Texas Instruments DP83869 PHY
+ * Copyright (C) 2019 Texas Instruments Inc.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+
+#include <dt-bindings/net/ti-dp83869.h>
+
+#define DP83869_PHY_ID 0x2000a0f1
+#define DP83869_DEVADDR 0x1f
+
+#define MII_DP83869_PHYCTRL 0x10
+#define MII_DP83869_MICR 0x12
+#define MII_DP83869_ISR 0x13
+#define DP83869_CTRL 0x1f
+#define DP83869_CFG4 0x1e
+
+/* Extended Registers */
+#define DP83869_GEN_CFG3 0x0031
+#define DP83869_RGMIICTL 0x0032
+#define DP83869_STRAP_STS1 0x006e
+#define DP83869_RGMIIDCTL 0x0086
+#define DP83869_IO_MUX_CFG 0x0170
+#define DP83869_OP_MODE 0x01df
+#define DP83869_FX_CTRL 0x0c00
+
+#define DP83869_SW_RESET BIT(15)
+#define DP83869_SW_RESTART BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83869_MICR_AN_ERR_INT_EN BIT(15)
+#define MII_DP83869_MICR_SPEED_CHNG_INT_EN BIT(14)
+#define MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN BIT(13)
+#define MII_DP83869_MICR_PAGE_RXD_INT_EN BIT(12)
+#define MII_DP83869_MICR_AUTONEG_COMP_INT_EN BIT(11)
+#define MII_DP83869_MICR_LINK_STS_CHNG_INT_EN BIT(10)
+#define MII_DP83869_MICR_FALSE_CARRIER_INT_EN BIT(8)
+#define MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4)
+#define MII_DP83869_MICR_WOL_INT_EN BIT(3)
+#define MII_DP83869_MICR_XGMII_ERR_INT_EN BIT(2)
+#define MII_DP83869_MICR_POL_CHNG_INT_EN BIT(1)
+#define MII_DP83869_MICR_JABBER_INT_EN BIT(0)
+
+#define MII_DP83869_BMCR_DEFAULT (BMCR_ANENABLE | \
+ BMCR_FULLDPLX | \
+ BMCR_SPEED1000)
+
+/* This is the same bit mask as the BMCR so re-use the BMCR default */
+#define DP83869_FX_CTRL_DEFAULT MII_DP83869_BMCR_DEFAULT
+
+/* CFG1 bits */
+#define DP83869_CFG1_DEFAULT (ADVERTISE_1000HALF | \
+ ADVERTISE_1000FULL | \
+ CTL1000_AS_MASTER)
+
+/* RGMIICTL bits */
+#define DP83869_RGMII_TX_CLK_DELAY_EN BIT(1)
+#define DP83869_RGMII_RX_CLK_DELAY_EN BIT(0)
+
+/* STRAP_STS1 bits */
+#define DP83869_STRAP_STS1_RESERVED BIT(11)
+
+/* PHYCTRL bits */
+#define DP83869_RX_FIFO_SHIFT 12
+#define DP83869_TX_FIFO_SHIFT 14
+
+/* PHY_CTRL lower bytes 0x48 are declared as reserved */
+#define DP83869_PHY_CTRL_DEFAULT 0x48
+#define DP83869_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 12)
+#define DP83869_PHYCR_RESERVED_MASK BIT(11)
+
+/* RGMIIDCTL bits */
+#define DP83869_RGMII_TX_CLK_DELAY_SHIFT 4
+
+/* IO_MUX_CFG bits */
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f
+
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+#define DP83869_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
+#define DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+
+/* CFG3 bits */
+#define DP83869_CFG3_PORT_MIRROR_EN BIT(0)
+
+/* CFG4 bits */
+#define DP83869_INT_OE BIT(7)
+
+/* OP MODE */
+#define DP83869_OP_MODE_MII BIT(5)
+#define DP83869_SGMII_RGMII_BRIDGE BIT(6)
+
+enum {
+ DP83869_PORT_MIRRORING_KEEP,
+ DP83869_PORT_MIRRORING_EN,
+ DP83869_PORT_MIRRORING_DIS,
+};
+
+struct dp83869_private {
+ int tx_fifo_depth;
+ int rx_fifo_depth;
+ int io_impedance;
+ int port_mirroring;
+ bool rxctrl_strap_quirk;
+ int clk_output_sel;
+ int mode;
+};
+
+static int dp83869_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DP83869_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83869_config_intr(struct phy_device *phydev)
+{
+ int micr_status = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ micr_status = phy_read(phydev, MII_DP83869_MICR);
+ if (micr_status < 0)
+ return micr_status;
+
+ micr_status |=
+ (MII_DP83869_MICR_AN_ERR_INT_EN |
+ MII_DP83869_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83869_MICR_AUTONEG_COMP_INT_EN |
+ MII_DP83869_MICR_LINK_STS_CHNG_INT_EN |
+ MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN |
+ MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+ return phy_write(phydev, MII_DP83869_MICR, micr_status);
+ }
+
+ return phy_write(phydev, MII_DP83869_MICR, micr_status);
+}
+
+static int dp83869_config_port_mirroring(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+
+ if (dp83869->port_mirroring == DP83869_PORT_MIRRORING_EN)
+ return phy_set_bits_mmd(phydev, DP83869_DEVADDR,
+ DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
+ else
+ return phy_clear_bits_mmd(phydev, DP83869_DEVADDR,
+ DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83869_of_init(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ int ret;
+
+ if (!of_node)
+ return -ENODEV;
+
+ dp83869->io_impedance = -EINVAL;
+
+ /* Optional configuration */
+ ret = of_property_read_u32(of_node, "ti,clk-output-sel",
+ &dp83869->clk_output_sel);
+ if (ret || dp83869->clk_output_sel > DP83869_CLK_O_SEL_REF_CLK)
+ dp83869->clk_output_sel = DP83869_CLK_O_SEL_REF_CLK;
+
+ ret = of_property_read_u32(of_node, "ti,op-mode", &dp83869->mode);
+ if (ret == 0) {
+ if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET ||
+ dp83869->mode > DP83869_SGMII_COPPER_ETHERNET)
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node, "ti,max-output-impedance"))
+ dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MAX;
+ else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
+ dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+
+ if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN;
+ else
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS;
+
+ if (of_property_read_u32(of_node, "rx-fifo-depth",
+ &dp83869->rx_fifo_depth))
+ dp83869->rx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ if (of_property_read_u32(of_node, "tx-fifo-depth",
+ &dp83869->tx_fifo_depth))
+ dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ return ret;
+}
+#else
+static int dp83869_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83869_configure_rgmii(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int ret = 0, val;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ val = phy_read(phydev, MII_DP83869_PHYCTRL);
+ if (val < 0)
+ return val;
+
+ val &= ~DP83869_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83869->tx_fifo_depth << DP83869_TX_FIFO_SHIFT);
+ val |= (dp83869->rx_fifo_depth << DP83869_RX_FIFO_SHIFT);
+
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL, val);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83869->io_impedance >= 0)
+ ret = phy_modify_mmd(phydev, DP83869_DEVADDR,
+ DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83869->io_impedance &
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
+
+ return ret;
+}
+
+static int dp83869_configure_mode(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int phy_ctrl_val;
+ int ret;
+
+ if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET ||
+ dp83869->mode > DP83869_SGMII_COPPER_ETHERNET)
+ return -EINVAL;
+
+ /* Below init sequence for each operational mode is defined in
+ * section 9.4.8 of the datasheet.
+ */
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ dp83869->mode);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_BMCR, MII_DP83869_BMCR_DEFAULT);
+ if (ret)
+ return ret;
+
+ phy_ctrl_val = (dp83869->rx_fifo_depth << DP83869_RX_FIFO_SHIFT |
+ dp83869->tx_fifo_depth << DP83869_TX_FIFO_SHIFT |
+ DP83869_PHY_CTRL_DEFAULT);
+
+ switch (dp83869->mode) {
+ case DP83869_RGMII_COPPER_ETHERNET:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_CTRL1000, DP83869_CFG1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = dp83869_configure_rgmii(phydev, dp83869);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_RGMII_SGMII_BRIDGE:
+ ret = phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ DP83869_SGMII_RGMII_BRIDGE,
+ DP83869_SGMII_RGMII_BRIDGE);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+
+ break;
+ case DP83869_1000M_MEDIA_CONVERT:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_100M_MEDIA_CONVERT:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_SGMII_COPPER_ETHERNET:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_CTRL1000, DP83869_CFG1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+
+ break;
+ case DP83869_RGMII_1000_BASE:
+ case DP83869_RGMII_100_BASE:
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return ret;
+}
+
+static int dp83869_config_init(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ int ret, val;
+
+ ret = dp83869_configure_mode(phydev, dp83869);
+ if (ret)
+ return ret;
+
+ /* Enable Interrupt output INT_OE in CFG4 register */
+ if (phy_interrupt_is_valid(phydev)) {
+ val = phy_read(phydev, DP83869_CFG4);
+ val |= DP83869_INT_OE;
+ phy_write(phydev, DP83869_CFG4, val);
+ }
+
+ if (dp83869->port_mirroring != DP83869_PORT_MIRRORING_KEEP)
+ dp83869_config_port_mirroring(phydev);
+
+ /* Clock output selection if muxing property is set */
+ if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK)
+ ret = phy_modify_mmd(phydev,
+ DP83869_DEVADDR, DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
+ dp83869->clk_output_sel <<
+ DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+
+ return ret;
+}
+
+static int dp83869_probe(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869;
+ int ret;
+
+ dp83869 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83869),
+ GFP_KERNEL);
+ if (!dp83869)
+ return -ENOMEM;
+
+ phydev->priv = dp83869;
+
+ ret = dp83869_of_init(phydev);
+ if (ret)
+ return ret;
+
+ return dp83869_config_init(phydev);
+}
+
+static int dp83869_phy_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, DP83869_CTRL, DP83869_SW_RESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10, 20);
+
+ /* Global sw reset sets all registers to default.
+ * Need to set the registers in the PHY to the right config.
+ */
+ return dp83869_config_init(phydev);
+}
+
+static struct phy_driver dp83869_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(DP83869_PHY_ID),
+ .name = "TI DP83869",
+
+ .probe = dp83869_probe,
+ .config_init = dp83869_config_init,
+ .soft_reset = dp83869_phy_reset,
+
+ /* IRQ related */
+ .ack_interrupt = dp83869_ack_interrupt,
+ .config_intr = dp83869_config_intr,
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ },
+};
+module_phy_driver(dp83869_driver);
+
+static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
+ { PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83869_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83869 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a7796134e3be..b1fbd1937328 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -53,16 +53,22 @@
#define MII_M1011_PHY_SCR 0x10
#define MII_M1011_PHY_SCR_DOWNSHIFT_EN BIT(11)
-#define MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT 12
-#define MII_M1011_PHY_SRC_DOWNSHIFT_MASK 0x7800
+#define MII_M1011_PHY_SCR_DOWNSHIFT_MASK GENMASK(14, 12)
+#define MII_M1011_PHY_SCR_DOWNSHIFT_MAX 8
#define MII_M1011_PHY_SCR_MDI (0x0 << 5)
#define MII_M1011_PHY_SCR_MDI_X (0x1 << 5)
#define MII_M1011_PHY_SCR_AUTO_CROSS (0x3 << 5)
+#define MII_M1011_PHY_SSR 0x11
+#define MII_M1011_PHY_SSR_DOWNSHIFT BIT(5)
+
#define MII_M1111_PHY_LED_CONTROL 0x18
#define MII_M1111_PHY_LED_DIRECT 0x4100
#define MII_M1111_PHY_LED_COMBINE 0x411c
#define MII_M1111_PHY_EXT_CR 0x14
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK GENMASK(11, 9)
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX 8
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN BIT(8)
#define MII_M1111_RGMII_RX_DELAY BIT(7)
#define MII_M1111_RGMII_TX_DELAY BIT(1)
#define MII_M1111_PHY_EXT_SR 0x1b
@@ -273,23 +279,6 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity)
return val != reg;
}
-static int marvell_set_downshift(struct phy_device *phydev, bool enable,
- u8 retries)
-{
- int reg;
-
- reg = phy_read(phydev, MII_M1011_PHY_SCR);
- if (reg < 0)
- return reg;
-
- reg &= MII_M1011_PHY_SRC_DOWNSHIFT_MASK;
- reg |= ((retries - 1) << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT);
- if (enable)
- reg |= MII_M1011_PHY_SCR_DOWNSHIFT_EN;
-
- return phy_write(phydev, MII_M1011_PHY_SCR, reg);
-}
-
static int marvell_config_aneg(struct phy_device *phydev)
{
int changed = 0;
@@ -658,41 +647,6 @@ static int marvell_config_init(struct phy_device *phydev)
return marvell_of_reg_init(phydev);
}
-static int m88e1116r_config_init(struct phy_device *phydev)
-{
- int err;
-
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
-
- msleep(500);
-
- err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
- if (err < 0)
- return err;
-
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
- if (err < 0)
- return err;
-
- err = marvell_set_downshift(phydev, true, 8);
- if (err < 0)
- return err;
-
- if (phy_interface_is_rgmii(phydev)) {
- err = m88e1121_config_aneg_rgmii_delays(phydev);
- if (err < 0)
- return err;
- }
-
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
-
- return marvell_config_init(phydev);
-}
-
static int m88e3016_config_init(struct phy_device *phydev)
{
int ret;
@@ -833,6 +787,172 @@ static int m88e1111_config_init(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
+static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, val) + 1;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val;
+
+ if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
+
+ val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
+
+ return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
+ val);
+}
+
+static int m88e1111_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1111_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1111_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1111_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, MII_M1011_PHY_SCR);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, val) + 1;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val;
+
+ if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN);
+
+ val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
+
+ return phy_modify(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN |
+ MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
+ val);
+}
+
+static int m88e1011_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1011_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void m88e1011_link_change_notify(struct phy_device *phydev)
+{
+ int status;
+
+ if (phydev->state != PHY_RUNNING)
+ return;
+
+ /* we may be on fiber page currently */
+ status = phy_read_paged(phydev, MII_MARVELL_COPPER_PAGE,
+ MII_M1011_PHY_SSR);
+
+ if (status > 0 && status & MII_M1011_PHY_SSR_DOWNSHIFT)
+ phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
+}
+
+static int m88e1116r_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ msleep(500);
+
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ return err;
+
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ if (err < 0)
+ return err;
+
+ err = m88e1011_set_downshift(phydev, 8);
+ if (err < 0)
+ return err;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ err = m88e1121_config_aneg_rgmii_delays(phydev);
+ if (err < 0)
+ return err;
+ }
+
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ return marvell_config_init(phydev);
+}
+
static int m88e1318_config_init(struct phy_device *phydev)
{
if (phy_interrupt_is_valid(phydev)) {
@@ -1117,6 +1237,8 @@ static int m88e1540_get_tunable(struct phy_device *phydev,
switch (tuna->id) {
case ETHTOOL_PHY_FAST_LINK_DOWN:
return m88e1540_get_fld(phydev, data);
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_get_downshift(phydev, data);
default:
return -EOPNOTSUPP;
}
@@ -1128,6 +1250,8 @@ static int m88e1540_set_tunable(struct phy_device *phydev,
switch (tuna->id) {
case ETHTOOL_PHY_FAST_LINK_DOWN:
return m88e1540_set_fld(phydev, data);
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_set_downshift(phydev, *(const u8 *)data);
default:
return -EOPNOTSUPP;
}
@@ -2163,6 +2287,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1111,
@@ -2182,6 +2309,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1118,
@@ -2220,6 +2350,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1318S,
@@ -2261,6 +2394,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1149R,
@@ -2314,6 +2450,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1510,
@@ -2337,6 +2476,9 @@ static struct phy_driver marvell_drivers[] = {
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
.set_loopback = genphy_loopback,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
@@ -2359,6 +2501,7 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2379,6 +2522,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -2421,6 +2567,7 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 3b99882692e3..1bf13017d288 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,7 @@
#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
@@ -206,6 +207,28 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
}
#endif
+static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ phy_interface_t iface;
+
+ sfp_parse_support(phydev->sfp_bus, id, support);
+ iface = sfp_select_interface(phydev->sfp_bus, id, support);
+
+ if (iface != PHY_INTERFACE_MODE_10GKR) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct sfp_upstream_ops mv3310_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = mv3310_sfp_insert,
+};
+
static int mv3310_probe(struct phy_device *phydev)
{
struct mv3310_priv *priv;
@@ -236,7 +259,7 @@ static int mv3310_probe(struct phy_device *phydev)
if (ret)
return ret;
- return 0;
+ return phy_sfp_probe(phydev, &mv3310_sfp_ops);
}
static int mv3310_suspend(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
index b7f89ad27465..e33d3ea9a907 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/phy/mdio-cavium.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2009-2016 Cavium, Inc.
*/
diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
index 751dab281f57..b1d27f7cd23f 100644
--- a/drivers/net/phy/mdio-i2c.h
+++ b/drivers/net/phy/mdio-i2c.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MDIO I2C bridge
*
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 58d6504495e0..f798de3276dc 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -145,8 +145,11 @@ err_out_free_mdiobus:
static int sun4i_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct sun4i_mdio_data *data = bus->priv;
mdiobus_unregister(bus);
+ if (data->regulator)
+ regulator_disable(data->regulator);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index b1f5ccb4ad9c..8af93ada8b64 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Applied Micro X-Gene SoC MDIO Driver
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e29ab841b4d..229e480179ff 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -62,13 +62,14 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
struct reset_control *reset = NULL;
if (mdiodev->dev.of_node)
- reset = devm_reset_control_get_exclusive(&mdiodev->dev,
- "phy");
- if (PTR_ERR(reset) == -ENOENT ||
- PTR_ERR(reset) == -ENOTSUPP)
- reset = NULL;
- else if (IS_ERR(reset))
- return PTR_ERR(reset);
+ reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
+ "phy");
+ if (IS_ERR(reset)) {
+ if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
+ reset = NULL;
+ else
+ return PTR_ERR(reset);
+ }
mdiodev->reset_ctrl = reset;
@@ -106,6 +107,8 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL;
+ reset_control_put(mdiodev->reset_ctrl);
+
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
return 0;
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 7ada1fd9ca71..d5f8f351d9ef 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -252,13 +252,21 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_TR_LSB 17
#define MSCC_PHY_TR_MSB 18
-/* Microsemi PHY ID's */
+/* Microsemi PHY ID's
+ * Code assumes lowest nibble is 0
+ */
+#define PHY_ID_VSC8504 0x000704c0
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8552 0x000704e0
+#define PHY_ID_VSC856X 0x000707e0
+#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8575 0x000707d0
+#define PHY_ID_VSC8582 0x000707b0
#define PHY_ID_VSC8584 0x000707c0
#define MSCC_VDDMAC_1500 1500
@@ -895,7 +903,7 @@ static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
static int vsc8531_pre_init_seq_set(struct phy_device *phydev)
{
int rc;
- const struct reg_val init_seq[] = {
+ static const struct reg_val init_seq[] = {
{0x0f90, 0x00688980},
{0x0696, 0x00000003},
{0x07fa, 0x0050100f},
@@ -939,7 +947,7 @@ out_unlock:
static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
{
- const struct reg_val init_eee[] = {
+ static const struct reg_val init_eee[] = {
{0x0f82, 0x0012b00a},
{0x1686, 0x00000004},
{0x168c, 0x00d2c46f},
@@ -1224,7 +1232,7 @@ out:
/* bus->mdio_lock should be locked when using this function */
static int vsc8574_config_pre_init(struct phy_device *phydev)
{
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x0fae, 0x000401bd},
{0x0fac, 0x000f000f},
{0x17a0, 0x00a0f147},
@@ -1272,7 +1280,7 @@ static int vsc8574_config_pre_init(struct phy_device *phydev)
{0x0fee, 0x0004a6a1},
{0x0ffe, 0x00b01807},
};
- const struct reg_val pre_init2[] = {
+ static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
@@ -1427,7 +1435,7 @@ out:
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_config_pre_init(struct phy_device *phydev)
{
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x07fa, 0x0050100f},
{0x1688, 0x00049f81},
{0x0f90, 0x00688980},
@@ -1451,7 +1459,7 @@ static int vsc8584_config_pre_init(struct phy_device *phydev)
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
};
- const struct reg_val pre_init2[] = {
+ static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
@@ -1595,6 +1603,9 @@ static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
else
addr = vsc8531->base_addr + i;
+ if (!map[addr])
+ continue;
+
phy = container_of(map[addr], struct phy_device, mdio);
if ((phy->phy_id & phydev->drv->phy_id_mask) !=
@@ -1647,14 +1658,29 @@ static int vsc8584_config_init(struct phy_device *phydev)
* in this pre-init function.
*/
if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
- if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
- (PHY_ID_VSC8574 & phydev->drv->phy_id_mask))
+ /* The following switch statement assumes that the lowest
+ * nibble of the phy_id_mask is always 0. This works because
+ * the lowest nibble of the PHY_ID's below are also 0.
+ */
+ WARN_ON(phydev->drv->phy_id_mask & 0xf);
+
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC8504:
+ case PHY_ID_VSC8552:
+ case PHY_ID_VSC8572:
+ case PHY_ID_VSC8574:
ret = vsc8574_config_pre_init(phydev);
- else if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
- (PHY_ID_VSC8584 & phydev->drv->phy_id_mask))
+ break;
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
ret = vsc8584_config_pre_init(phydev);
- else
+ break;
+ default:
ret = -EINVAL;
+ break;
+ }
if (ret)
goto err;
@@ -1786,7 +1812,7 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
* values to handle hardware performance of PHY. They
* are set at Power-On state and remain until PHY Reset.
*/
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x0f90, 0x00688980},
{0x0786, 0x00000003},
{0x07fa, 0x0050100f},
@@ -2322,6 +2348,32 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8504,
+ .name = "Microsemi GE VSC8504 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8514,
.name = "Microsemi GE VSC8514 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2445,6 +2497,82 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
},
{
+ .phy_id = PHY_ID_VSC8552,
+ .name = "Microsemi GE VSC8552 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC856X,
+ .name = "Microsemi GE VSC856X SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8572,
+ .name = "Microsemi GE VSC8572 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8574,
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2471,6 +2599,54 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
},
{
+ .phy_id = PHY_ID_VSC8575,
+ .name = "Microsemi GE VSC8575 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8582,
+ .name = "Microsemi GE VSC8582 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8584,
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2500,12 +2676,18 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8504, 0xfffffff0, },
{ PHY_ID_VSC8514, 0xfffffff0, },
{ PHY_ID_VSC8530, 0xfffffff0, },
{ PHY_ID_VSC8531, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
{ PHY_ID_VSC8541, 0xfffffff0, },
+ { PHY_ID_VSC8552, 0xfffffff0, },
+ { PHY_ID_VSC856X, 0xfffffff0, },
+ { PHY_ID_VSC8572, 0xfffffff0, },
{ PHY_ID_VSC8574, 0xfffffff0, },
+ { PHY_ID_VSC8575, 0xfffffff0, },
+ { PHY_ID_VSC8582, 0xfffffff0, },
{ PHY_ID_VSC8584, 0xfffffff0, },
{ }
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 9412669b579c..769a076514b0 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,7 +8,7 @@
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 69,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 74,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -42,6 +42,8 @@ const char *phy_speed_to_str(int speed)
return "100Gbps";
case SPEED_200000:
return "200Gbps";
+ case SPEED_400000:
+ return "400Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -70,6 +72,12 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
.bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
static const struct phy_setting settings[] = {
+ /* 400G */
+ PHY_SETTING( 400000, FULL, 400000baseCR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseKR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseDR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseSR8_Full ),
/* 200G */
PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
@@ -411,9 +419,9 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_read_mmd(phydev, devad, regnum);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -472,9 +480,9 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_write_mmd(phydev, devad, regnum, val);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -528,9 +536,9 @@ int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify_changed(phydev, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -572,9 +580,9 @@ int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify(phydev, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -631,9 +639,9 @@ int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -679,9 +687,9 @@ int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify_mmd(phydev, devad, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -689,11 +697,17 @@ EXPORT_SYMBOL_GPL(phy_modify_mmd);
static int __phy_read_page(struct phy_device *phydev)
{
+ if (WARN_ONCE(!phydev->drv->read_page, "read_page callback not available, PHY driver not loaded?\n"))
+ return -EOPNOTSUPP;
+
return phydev->drv->read_page(phydev);
}
static int __phy_write_page(struct phy_device *phydev, int page)
{
+ if (WARN_ONCE(!phydev->drv->write_page, "write_page callback not available, PHY driver not loaded?\n"))
+ return -EOPNOTSUPP;
+
return phydev->drv->write_page(phydev, page);
}
@@ -707,7 +721,7 @@ static int __phy_write_page(struct phy_device *phydev, int page)
*/
int phy_save_page(struct phy_device *phydev)
{
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
return __phy_read_page(phydev);
}
EXPORT_SYMBOL_GPL(phy_save_page);
@@ -774,7 +788,7 @@ int phy_restore_page(struct phy_device *phydev, int oldpage, int ret)
ret = oldpage;
}
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 105d389b58e7..80be4d691e5b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -23,6 +23,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/sfp.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
#include <linux/io.h>
@@ -252,66 +253,6 @@ static void phy_sanitize_settings(struct phy_device *phydev)
}
}
-/**
- * phy_ethtool_sset - generic ethtool sset function, handles all the details
- * @phydev: target phy_device struct
- * @cmd: ethtool_cmd
- *
- * A few notes about parameter checking:
- *
- * - We don't set port or transceiver, so we don't care what they
- * were set to.
- * - phy_start_aneg() will make sure forced settings are sane, and
- * choose the next best ones from the ones selected, so we don't
- * care if ethtool tries to give us bad values.
- */
-int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
- u32 speed = ethtool_cmd_speed(cmd);
-
- if (cmd->phy_address != phydev->mdio.addr)
- return -EINVAL;
-
- /* We make sure that we don't pass unsupported values in to the PHY */
- ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising);
- linkmode_and(advertising, advertising, phydev->supported);
-
- /* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
- return -EINVAL;
-
- if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
- return -EINVAL;
-
- if (cmd->autoneg == AUTONEG_DISABLE &&
- ((speed != SPEED_1000 &&
- speed != SPEED_100 &&
- speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
- return -EINVAL;
-
- phydev->autoneg = cmd->autoneg;
-
- phydev->speed = speed;
-
- linkmode_copy(phydev->advertising, advertising);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising, AUTONEG_ENABLE == cmd->autoneg);
-
- phydev->duplex = cmd->duplex;
-
- phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;
-
- /* Restart the PHY */
- phy_start_aneg(phydev);
-
- return 0;
-}
-EXPORT_SYMBOL(phy_ethtool_sset);
-
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd)
{
@@ -841,6 +782,9 @@ void phy_stop(struct phy_device *phydev)
mutex_lock(&phydev->lock);
+ if (phydev->sfp_bus)
+ sfp_upstream_stop(phydev->sfp_bus);
+
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
@@ -875,6 +819,9 @@ void phy_start(struct phy_device *phydev)
goto out;
}
+ if (phydev->sfp_bus)
+ sfp_upstream_start(phydev->sfp_bus);
+
/* if phy was suspended, bring the physical link up again */
__phy_resume(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index adb66a2fae18..0887ed2bb050 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -27,6 +27,7 @@
#include <linux/bitmap.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/sfp.h>
#include <linux/mdio.h>
#include <linux/io.h>
#include <linux/uaccess.h>
@@ -488,7 +489,7 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv)
if (phydev->is_c45) {
for (i = 1; i < num_ids; i++) {
- if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+ if (phydev->c45_ids.device_ids[i] == 0xffffffff)
continue;
if ((phydrv->phy_id & phydrv->phy_id_mask) ==
@@ -596,8 +597,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mdiodev->device_free = phy_mdio_device_free;
mdiodev->device_remove = phy_mdio_device_remove;
- dev->speed = 0;
- dev->duplex = -1;
+ dev->speed = SPEED_UNKNOWN;
+ dev->duplex = DUPLEX_UNKNOWN;
dev->pause = 0;
dev->asym_pause = 0;
dev->link = 0;
@@ -632,7 +633,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
int i;
for (i = 1; i < num_ids; i++) {
- if (!(c45_ids->devices_in_package & (1 << i)))
+ if (c45_ids->device_ids[i] == 0xffffffff)
continue;
ret = phy_request_driver_module(dev,
@@ -812,10 +813,13 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
*/
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
- struct phy_c45_device_ids c45_ids = {0};
+ struct phy_c45_device_ids c45_ids;
u32 phy_id = 0;
int r;
+ c45_ids.devices_in_package = 0;
+ memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids));
+
r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
if (r)
return ERR_PTR(r);
@@ -1175,6 +1179,65 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(phy_standalone);
/**
+ * phy_sfp_attach - attach the SFP bus to the PHY upstream network device
+ * @upstream: pointer to the phy device
+ * @bus: sfp bus representing cage being attached
+ *
+ * This is used to fill in the sfp_upstream_ops .attach member.
+ */
+void phy_sfp_attach(void *upstream, struct sfp_bus *bus)
+{
+ struct phy_device *phydev = upstream;
+
+ if (phydev->attached_dev)
+ phydev->attached_dev->sfp_bus = bus;
+ phydev->sfp_bus_attached = true;
+}
+EXPORT_SYMBOL(phy_sfp_attach);
+
+/**
+ * phy_sfp_detach - detach the SFP bus from the PHY upstream network device
+ * @upstream: pointer to the phy device
+ * @bus: sfp bus representing cage being attached
+ *
+ * This is used to fill in the sfp_upstream_ops .detach member.
+ */
+void phy_sfp_detach(void *upstream, struct sfp_bus *bus)
+{
+ struct phy_device *phydev = upstream;
+
+ if (phydev->attached_dev)
+ phydev->attached_dev->sfp_bus = NULL;
+ phydev->sfp_bus_attached = false;
+}
+EXPORT_SYMBOL(phy_sfp_detach);
+
+/**
+ * phy_sfp_probe - probe for a SFP cage attached to this PHY device
+ * @phydev: Pointer to phy_device
+ * @ops: SFP's upstream operations
+ */
+int phy_sfp_probe(struct phy_device *phydev,
+ const struct sfp_upstream_ops *ops)
+{
+ struct sfp_bus *bus;
+ int ret;
+
+ if (phydev->mdio.dev.fwnode) {
+ bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode);
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ phydev->sfp_bus = bus;
+
+ ret = sfp_bus_add_upstream(bus, phydev, ops);
+ sfp_bus_put(bus);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(phy_sfp_probe);
+
+/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
* @phydev: Pointer to phy_device to attach
@@ -1249,6 +1312,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (dev) {
phydev->attached_dev = dev;
dev->phydev = phydev;
+
+ if (phydev->sfp_bus_attached)
+ dev->sfp_bus = phydev->sfp_bus;
}
/* Some Ethernet drivers try to connect to a PHY device before
@@ -1270,7 +1336,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev_err(phydev, "error creating 'phy_standalone' sysfs entry\n");
}
- phydev->dev_flags = flags;
+ phydev->dev_flags |= flags;
phydev->interface = interface;
@@ -1608,6 +1674,40 @@ static int genphy_config_advert(struct phy_device *phydev)
}
/**
+ * genphy_c37_config_advert - sanitize and advertise auto-negotiation parameters
+ * @phydev: target phy_device struct
+ *
+ * Description: Writes MII_ADVERTISE with the appropriate values,
+ * after sanitizing the values to make sure we only advertise
+ * what is supported. Returns < 0 on error, 0 if the PHY's advertisement
+ * hasn't changed, and > 0 if it has changed. This function is intended
+ * for Clause 37 1000Base-X mode.
+ */
+static int genphy_c37_config_advert(struct phy_device *phydev)
+{
+ u16 adv = 0;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ return phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XHALF | ADVERTISE_1000XPSE_ASYM,
+ adv);
+}
+
+/**
* genphy_config_eee_advert - disable unwanted eee mode advertisement
* @phydev: target phy_device struct
*
@@ -1716,6 +1816,54 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
EXPORT_SYMBOL(__genphy_config_aneg);
/**
+ * genphy_c37_config_aneg - restart auto-negotiation or write BMCR
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we write the BMCR. This function is intended
+ * for use with Clause 37 1000Base-X mode.
+ */
+int genphy_c37_config_aneg(struct phy_device *phydev)
+{
+ int err, changed;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return genphy_setup_forced(phydev);
+
+ err = phy_modify(phydev, MII_BMCR, BMCR_SPEED1000 | BMCR_SPEED100,
+ BMCR_SPEED1000);
+ if (err)
+ return err;
+
+ changed = genphy_c37_config_advert(phydev);
+ if (changed < 0) /* error */
+ return changed;
+
+ if (!changed) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+ changed = 1; /* do restart aneg */
+ }
+
+ /* Only restart aneg if we are advertising something different
+ * than we were before.
+ */
+ if (changed > 0)
+ return genphy_restart_aneg(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c37_config_aneg);
+
+/**
* genphy_aneg_done - return auto-negotiation status
* @phydev: target phy_device struct
*
@@ -1887,6 +2035,63 @@ int genphy_read_status(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_read_status);
/**
+ * genphy_c37_read_status - check the link status and update current link state
+ * @phydev: target phy_device struct
+ *
+ * Description: Check the link, then figure out the current state
+ * by comparing what we advertise with what the link partner
+ * advertises. This function is for Clause 37 1000Base-X mode.
+ */
+int genphy_c37_read_status(struct phy_device *phydev)
+{
+ int lpa, err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->lp_advertising, lpa & LPA_LPACK);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->lp_advertising, lpa & LPA_1000XFULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising, lpa & LPA_1000XPAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising,
+ lpa & LPA_1000XPAUSE_ASYM);
+
+ phy_resolve_aneg_linkmode(phydev);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c37_read_status);
+
+/**
* genphy_soft_reset - software reset the PHY via BMCR_RESET bit
* @phydev: target phy_device struct
*
@@ -2279,6 +2484,9 @@ static int phy_remove(struct device *dev)
phydev->state = PHY_DOWN;
mutex_unlock(&phydev->lock);
+ sfp_bus_del_upstream(phydev->sfp_bus);
+ phydev->sfp_bus = NULL;
+
if (phydev->drv && phydev->drv->remove) {
phydev->drv->remove(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a578f7ebf715..9a616d6bc4eb 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -133,9 +133,7 @@ static int phylink_is_empty_linkmode(const unsigned long *linkmode)
phylink_set(tmp, Pause);
phylink_set(tmp, Asym_Pause);
- bitmap_andnot(tmp, linkmode, tmp, __ETHTOOL_LINK_MODE_MASK_NBITS);
-
- return linkmode_empty(tmp);
+ return linkmode_subset(linkmode, tmp);
}
static const char *phylink_an_mode_str(unsigned int mode)
@@ -359,9 +357,9 @@ static void phylink_mac_an_restart(struct phylink *pl)
pl->ops->mac_an_restart(pl->config);
}
-static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state)
+static void phylink_mac_pcs_get_state(struct phylink *pl,
+ struct phylink_link_state *state)
{
-
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
@@ -372,7 +370,7 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
state->an_complete = 0;
state->link = 1;
- return pl->ops->mac_link_state(pl->config, state);
+ pl->ops->mac_pcs_get_state(pl->config, state);
}
/* The fixed state is... fixed except for the link state,
@@ -495,7 +493,7 @@ static void phylink_resolve(struct work_struct *w)
break;
case MLO_AN_INBAND:
- phylink_get_mac_state(pl, &link_state);
+ phylink_mac_pcs_get_state(pl, &link_state);
/* If we have a phy, the "up" state is the union of
* both the PHY and the MAC */
@@ -566,28 +564,22 @@ static const struct sfp_upstream_ops sfp_phylink_ops;
static int phylink_register_sfp(struct phylink *pl,
struct fwnode_handle *fwnode)
{
- struct fwnode_reference_args ref;
+ struct sfp_bus *bus;
int ret;
- if (!fwnode)
- return 0;
-
- ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
- 0, 0, &ref);
- if (ret < 0) {
- if (ret == -ENOENT)
- return 0;
-
- phylink_err(pl, "unable to parse \"sfp\" node: %d\n",
- ret);
+ bus = sfp_bus_find_fwnode(fwnode);
+ if (IS_ERR(bus)) {
+ ret = PTR_ERR(bus);
+ phylink_err(pl, "unable to attach SFP bus: %d\n", ret);
return ret;
}
- pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops);
- if (!pl->sfp_bus)
- return -ENOMEM;
+ pl->sfp_bus = bus;
- return 0;
+ ret = sfp_bus_add_upstream(bus, pl, &sfp_phylink_ops);
+ sfp_bus_put(bus);
+
+ return ret;
}
/**
@@ -601,6 +593,8 @@ static int phylink_register_sfp(struct phylink *pl,
* Create a new phylink instance, and parse the link parameters found in @np.
* This will parse in-band modes, fixed-link or SFP configuration.
*
+ * Note: the rtnl lock must not be held when calling this function.
+ *
* Returns a pointer to a &struct phylink, or an error-pointer value. Users
* must use IS_ERR() to check for errors from this function.
*/
@@ -678,11 +672,12 @@ EXPORT_SYMBOL_GPL(phylink_create);
*
* Destroy a phylink instance. Any PHY that has been attached must have been
* cleaned up via phylink_disconnect_phy() prior to calling this function.
+ *
+ * Note: the rtnl lock must not be held when calling this function.
*/
void phylink_destroy(struct phylink *pl)
{
- if (pl->sfp_bus)
- sfp_unregister_upstream(pl->sfp_bus);
+ sfp_bus_del_upstream(pl->sfp_bus);
if (pl->link_gpio)
gpiod_put(pl->link_gpio);
@@ -722,11 +717,6 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
int ret;
- memset(&config, 0, sizeof(config));
- linkmode_copy(supported, phy->supported);
- linkmode_copy(config.advertising, phy->advertising);
- config.interface = pl->link_config.interface;
-
/*
* This is the new way of dealing with flow control for PHYs,
* as described by Timur Tabi in commit 529ed1275263 ("net: phy:
@@ -734,10 +724,12 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
* using our validate call to the MAC, we rely upon the MAC
* clearing the bits from both supported and advertising fields.
*/
- if (phylink_test(supported, Pause))
- phylink_set(config.advertising, Pause);
- if (phylink_test(supported, Asym_Pause))
- phylink_set(config.advertising, Asym_Pause);
+ phy_support_asym_pause(phy);
+
+ memset(&config, 0, sizeof(config));
+ linkmode_copy(supported, phy->supported);
+ linkmode_copy(config.advertising, phy->advertising);
+ config.interface = pl->link_config.interface;
ret = phylink_validate(pl, supported, &config);
if (ret)
@@ -1150,7 +1142,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
if (pl->phydev)
break;
- phylink_get_mac_state(pl, &link_state);
+ phylink_mac_pcs_get_state(pl, &link_state);
/* The MAC is reporting the link results from its own PCS
* layer via in-band status. Report these as the current
@@ -1254,7 +1246,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
- if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
+ /* If we have a PHY, phylib will call our link state function if the
+ * mode has changed, which will trigger a resolve and update the MAC
+ * configuration. For a fixed link, this isn't able to change any
+ * parameters, which just leaves inband mode.
+ */
+ if (pl->link_an_mode == MLO_AN_INBAND &&
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
phylink_mac_config(pl, &pl->link_config);
phylink_mac_an_restart(pl);
}
@@ -1334,15 +1332,16 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
if (pause->tx_pause)
config->pause |= MLO_PAUSE_TX;
- if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
+ /* If we have a PHY, phylib will call our link state function if the
+ * mode has changed, which will trigger a resolve and update the MAC
+ * configuration.
+ */
+ if (pl->phydev) {
+ phy_set_asym_pause(pl->phydev, pause->rx_pause,
+ pause->tx_pause);
+ } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state)) {
switch (pl->link_an_mode) {
- case MLO_AN_PHY:
- /* Silently mark the carrier down, and then trigger a resolve */
- if (pl->netdev)
- netif_carrier_off(pl->netdev);
- phylink_run_resolve(pl);
- break;
-
case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config);
@@ -1562,10 +1561,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
case MLO_AN_INBAND:
if (phy_id == 0) {
- val = phylink_get_mac_state(pl, &state);
- if (val < 0)
- return val;
-
+ phylink_mac_pcs_get_state(pl, &state);
val = phylink_mii_emul_read(reg, &state);
}
break;
@@ -1744,8 +1740,7 @@ static int phylink_sfp_module_insert(void *upstream,
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
- changed = !bitmap_equal(pl->supported, support,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ changed = !linkmode_equal(pl->supported, support);
if (changed) {
linkmode_copy(pl->supported, support);
linkmode_copy(pl->link_config.advertising, config.advertising);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index b23fc41896ef..5a72093ab6e7 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -4,11 +4,18 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/phylink.h>
+#include <linux/property.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "sfp.h"
+struct sfp_quirk {
+ const char *vendor;
+ const char *part;
+ void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes);
+};
+
/**
* struct sfp_bus - internal representation of a sfp bus
*/
@@ -21,6 +28,7 @@ struct sfp_bus {
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
struct sfp *sfp;
+ const struct sfp_quirk *sfp_quirk;
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
@@ -30,6 +38,71 @@ struct sfp_bus {
bool started;
};
+static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
+ unsigned long *modes)
+{
+ phylink_set(modes, 2500baseX_Full);
+}
+
+static const struct sfp_quirk sfp_quirks[] = {
+ {
+ // Alcatel Lucent G-010S-P can operate at 2500base-X, but
+ // incorrectly report 2500MBd NRZ in their EEPROM
+ .vendor = "ALCATELLUCENT",
+ .part = "G010SP",
+ .modes = sfp_quirk_2500basex,
+ }, {
+ // Alcatel Lucent G-010S-A can operate at 2500base-X, but
+ // report 3.2GBd NRZ in their EEPROM
+ .vendor = "ALCATELLUCENT",
+ .part = "3FE46541AA",
+ .modes = sfp_quirk_2500basex,
+ }, {
+ // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd
+ // NRZ in their EEPROM
+ .vendor = "HUAWEI",
+ .part = "MA5671A",
+ .modes = sfp_quirk_2500basex,
+ },
+};
+
+static size_t sfp_strlen(const char *str, size_t maxlen)
+{
+ size_t size, i;
+
+ /* Trailing characters should be filled with space chars */
+ for (i = 0, size = 0; i < maxlen; i++)
+ if (str[i] != ' ')
+ size = i + 1;
+
+ return size;
+}
+
+static bool sfp_match(const char *qs, const char *str, size_t len)
+{
+ if (!qs)
+ return true;
+ if (strlen(qs) != len)
+ return false;
+ return !strncmp(qs, str, len);
+}
+
+static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
+{
+ const struct sfp_quirk *q;
+ unsigned int i;
+ size_t vs, ps;
+
+ vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
+ ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
+
+ for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
+ if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
+ sfp_match(q->part, id->base.vendor_pn, ps))
+ return q;
+
+ return NULL;
+}
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -233,6 +306,9 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(modes, 1000baseX_Full);
}
+ if (bus->sfp_quirk)
+ bus->sfp_quirk->modes(id, modes);
+
bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
phylink_set(support, Autoneg);
@@ -328,10 +404,19 @@ static void sfp_bus_release(struct kref *kref)
kfree(bus);
}
-static void sfp_bus_put(struct sfp_bus *bus)
+/**
+ * sfp_bus_put() - put a reference on the &struct sfp_bus
+ * @bus: the &struct sfp_bus found via sfp_bus_find_fwnode()
+ *
+ * Put a reference on the &struct sfp_bus and free the underlying structure
+ * if this was the last reference.
+ */
+void sfp_bus_put(struct sfp_bus *bus)
{
- kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex);
+ if (bus)
+ kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex);
}
+EXPORT_SYMBOL_GPL(sfp_bus_put);
static int sfp_register_bus(struct sfp_bus *bus)
{
@@ -347,11 +432,11 @@ static int sfp_register_bus(struct sfp_bus *bus)
return ret;
}
}
+ bus->registered = true;
bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
bus->upstream_ops->attach(bus->upstream, bus);
- bus->registered = true;
return 0;
}
@@ -445,64 +530,111 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
}
/**
- * sfp_register_upstream() - Register the neighbouring device
- * @fwnode: firmware node for the SFP bus
+ * sfp_bus_find_fwnode() - parse and locate the SFP bus from fwnode
+ * @fwnode: firmware node for the parent device (MAC or PHY)
+ *
+ * Parse the parent device's firmware node for a SFP bus, and locate
+ * the sfp_bus structure, incrementing its reference count. This must
+ * be put via sfp_bus_put() when done.
+ *
+ * Returns: on success, a pointer to the sfp_bus structure,
+ * %NULL if no SFP is specified,
+ * on failure, an error pointer value:
+ * corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * %-ENOMEM if we failed to allocate the bus.
+ * an error from the upstream's connect_phy() method.
+ */
+struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+{
+ struct fwnode_reference_args ref;
+ struct sfp_bus *bus;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+ 0, 0, &ref);
+ if (ret == -ENOENT)
+ return NULL;
+ else if (ret < 0)
+ return ERR_PTR(ret);
+
+ bus = sfp_bus_get(ref.fwnode);
+ fwnode_handle_put(ref.fwnode);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
+
+/**
+ * sfp_bus_add_upstream() - parse and register the neighbouring device
+ * @bus: the &struct sfp_bus found via sfp_bus_find_fwnode()
* @upstream: the upstream private data
* @ops: the upstream's &struct sfp_upstream_ops
*
- * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
- * should use phylink, which will call this function for them. Returns
- * a pointer to the allocated &struct sfp_bus.
+ * Add upstream driver for the SFP bus, and if the bus is complete, register
+ * the SFP bus using sfp_register_upstream(). This takes a reference on the
+ * bus, so it is safe to put the bus after this call.
*
- * On error, returns %NULL.
+ * Returns: on success, a pointer to the sfp_bus structure,
+ * %NULL if no SFP is specified,
+ * on failure, an error pointer value:
+ * corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * %-ENOMEM if we failed to allocate the bus.
+ * an error from the upstream's connect_phy() method.
*/
-struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
- void *upstream,
- const struct sfp_upstream_ops *ops)
+int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops)
{
- struct sfp_bus *bus = sfp_bus_get(fwnode);
- int ret = 0;
+ int ret;
- if (bus) {
- rtnl_lock();
- bus->upstream_ops = ops;
- bus->upstream = upstream;
+ /* If no bus, return success */
+ if (!bus)
+ return 0;
- if (bus->sfp) {
- ret = sfp_register_bus(bus);
- if (ret)
- sfp_upstream_clear(bus);
- }
- rtnl_unlock();
+ rtnl_lock();
+ kref_get(&bus->kref);
+ bus->upstream_ops = ops;
+ bus->upstream = upstream;
+
+ if (bus->sfp) {
+ ret = sfp_register_bus(bus);
+ if (ret)
+ sfp_upstream_clear(bus);
+ } else {
+ ret = 0;
}
+ rtnl_unlock();
- if (ret) {
+ if (ret)
sfp_bus_put(bus);
- bus = NULL;
- }
- return bus;
+ return ret;
}
-EXPORT_SYMBOL_GPL(sfp_register_upstream);
+EXPORT_SYMBOL_GPL(sfp_bus_add_upstream);
/**
- * sfp_unregister_upstream() - Unregister sfp bus
+ * sfp_bus_del_upstream() - Delete a sfp bus
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
*
- * Unregister a previously registered upstream connection for the SFP
- * module. @bus is returned from sfp_register_upstream().
+ * Delete a previously registered upstream connection for the SFP
+ * module. @bus should have been added by sfp_bus_add_upstream().
*/
-void sfp_unregister_upstream(struct sfp_bus *bus)
+void sfp_bus_del_upstream(struct sfp_bus *bus)
{
- rtnl_lock();
- if (bus->sfp)
- sfp_unregister_bus(bus);
- sfp_upstream_clear(bus);
- rtnl_unlock();
+ if (bus) {
+ rtnl_lock();
+ if (bus->sfp)
+ sfp_unregister_bus(bus);
+ sfp_upstream_clear(bus);
+ rtnl_unlock();
- sfp_bus_put(bus);
+ sfp_bus_put(bus);
+ }
}
-EXPORT_SYMBOL_GPL(sfp_unregister_upstream);
+EXPORT_SYMBOL_GPL(sfp_bus_del_upstream);
/* Socket driver entry points */
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev)
@@ -553,6 +685,8 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
+ bus->sfp_quirk = sfp_lookup_quirk(id);
+
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
@@ -566,6 +700,8 @@ void sfp_module_remove(struct sfp_bus *bus)
if (ops && ops->module_remove)
ops->module_remove(bus->upstream);
+
+ bus->sfp_quirk = NULL;
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 272d5773573e..bdbbb76f8fd3 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -36,6 +36,8 @@ enum {
SFP_E_INSERT = 0,
SFP_E_REMOVE,
+ SFP_E_DEV_ATTACH,
+ SFP_E_DEV_DETACH,
SFP_E_DEV_DOWN,
SFP_E_DEV_UP,
SFP_E_TX_FAULT,
@@ -45,16 +47,21 @@ enum {
SFP_E_TIMEOUT,
SFP_MOD_EMPTY = 0,
+ SFP_MOD_ERROR,
SFP_MOD_PROBE,
+ SFP_MOD_WAITDEV,
SFP_MOD_HPOWER,
+ SFP_MOD_WAITPWR,
SFP_MOD_PRESENT,
- SFP_MOD_ERROR,
- SFP_DEV_DOWN = 0,
+ SFP_DEV_DETACHED = 0,
+ SFP_DEV_DOWN,
SFP_DEV_UP,
SFP_S_DOWN = 0,
+ SFP_S_WAIT,
SFP_S_INIT,
+ SFP_S_INIT_TX_FAULT,
SFP_S_WAIT_LOS,
SFP_S_LINK_UP,
SFP_S_TX_FAULT,
@@ -64,10 +71,12 @@ enum {
static const char * const mod_state_strings[] = {
[SFP_MOD_EMPTY] = "empty",
+ [SFP_MOD_ERROR] = "error",
[SFP_MOD_PROBE] = "probe",
+ [SFP_MOD_WAITDEV] = "waitdev",
[SFP_MOD_HPOWER] = "hpower",
+ [SFP_MOD_WAITPWR] = "waitpwr",
[SFP_MOD_PRESENT] = "present",
- [SFP_MOD_ERROR] = "error",
};
static const char *mod_state_to_str(unsigned short mod_state)
@@ -78,6 +87,7 @@ static const char *mod_state_to_str(unsigned short mod_state)
}
static const char * const dev_state_strings[] = {
+ [SFP_DEV_DETACHED] = "detached",
[SFP_DEV_DOWN] = "down",
[SFP_DEV_UP] = "up",
};
@@ -92,6 +102,8 @@ static const char *dev_state_to_str(unsigned short dev_state)
static const char * const event_strings[] = {
[SFP_E_INSERT] = "insert",
[SFP_E_REMOVE] = "remove",
+ [SFP_E_DEV_ATTACH] = "dev_attach",
+ [SFP_E_DEV_DETACH] = "dev_detach",
[SFP_E_DEV_DOWN] = "dev_down",
[SFP_E_DEV_UP] = "dev_up",
[SFP_E_TX_FAULT] = "tx_fault",
@@ -110,7 +122,9 @@ static const char *event_to_str(unsigned short event)
static const char * const sm_state_strings[] = {
[SFP_S_DOWN] = "down",
+ [SFP_S_WAIT] = "wait",
[SFP_S_INIT] = "init",
+ [SFP_S_INIT_TX_FAULT] = "init_tx_fault",
[SFP_S_WAIT_LOS] = "wait_los",
[SFP_S_LINK_UP] = "link_up",
[SFP_S_TX_FAULT] = "tx_fault",
@@ -141,6 +155,7 @@ static const enum gpiod_flags gpio_flags[] = {
GPIOD_ASIS,
};
+#define T_WAIT msecs_to_jiffies(50)
#define T_INIT_JIFFIES msecs_to_jiffies(300)
#define T_RESET_US 10
#define T_FAULT_RECOVER msecs_to_jiffies(1000)
@@ -149,22 +164,21 @@ static const enum gpiod_flags gpio_flags[] = {
* the same length on the PCB, which means it's possible for MOD DEF 0 to
* connect before the I2C bus on MOD DEF 1/2.
*
- * The SFP MSA specifies 300ms as t_init (the time taken for TX_FAULT to
- * be deasserted) but makes no mention of the earliest time before we can
- * access the I2C EEPROM. However, Avago modules require 300ms.
+ * The SFF-8472 specifies t_serial ("Time from power on until module is
+ * ready for data transmission over the two wire serial bus.") as 300ms.
*/
-#define T_PROBE_INIT msecs_to_jiffies(300)
-#define T_HPOWER_LEVEL msecs_to_jiffies(300)
-#define T_PROBE_RETRY msecs_to_jiffies(100)
+#define T_SERIAL msecs_to_jiffies(300)
+#define T_HPOWER_LEVEL msecs_to_jiffies(300)
+#define T_PROBE_RETRY_INIT msecs_to_jiffies(100)
+#define R_PROBE_RETRY_INIT 10
+#define T_PROBE_RETRY_SLOW msecs_to_jiffies(5000)
+#define R_PROBE_RETRY_SLOW 12
/* SFP modules appear to always have their PHY configured for bus address
* 0x56 (which with mdio-i2c, translates to a PHY address of 22).
*/
#define SFP_PHY_ADDR 22
-/* Give this long for the PHY to reset. */
-#define T_PHY_RESET_MS 50
-
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -187,20 +201,28 @@ struct sfp {
struct gpio_desc *gpio[GPIO_MAX];
int gpio_irq[GPIO_MAX];
- bool attached;
+ bool need_poll;
+
struct mutex st_mutex; /* Protects state */
+ unsigned int state_soft_mask;
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
struct mutex sm_mutex; /* Protects state machine */
unsigned char sm_mod_state;
+ unsigned char sm_mod_tries_init;
+ unsigned char sm_mod_tries;
unsigned char sm_dev_state;
unsigned short sm_state;
unsigned int sm_retries;
struct sfp_eeprom_id id;
+ unsigned int module_power_mW;
+
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
+ struct delayed_work hwmon_probe;
+ unsigned int hwmon_tries;
struct device *hwmon_dev;
char *hwmon_name;
#endif
@@ -376,24 +398,90 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
}
/* Interface */
-static unsigned int sfp_get_state(struct sfp *sfp)
+static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
{
- return sfp->get_state(sfp);
+ return sfp->read(sfp, a2, addr, buf, len);
}
-static void sfp_set_state(struct sfp *sfp, unsigned int state)
+static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
{
- sfp->set_state(sfp, state);
+ return sfp->write(sfp, a2, addr, buf, len);
}
-static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
+static unsigned int sfp_soft_get_state(struct sfp *sfp)
{
- return sfp->read(sfp, a2, addr, buf, len);
+ unsigned int state = 0;
+ u8 status;
+
+ if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
+ sizeof(status)) {
+ if (status & SFP_STATUS_RX_LOS)
+ state |= SFP_F_LOS;
+ if (status & SFP_STATUS_TX_FAULT)
+ state |= SFP_F_TX_FAULT;
+ }
+
+ return state & sfp->state_soft_mask;
}
-static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
+static void sfp_soft_set_state(struct sfp *sfp, unsigned int state)
{
- return sfp->write(sfp, a2, addr, buf, len);
+ u8 status;
+
+ if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
+ sizeof(status)) {
+ if (state & SFP_F_TX_DISABLE)
+ status |= SFP_STATUS_TX_DISABLE_FORCE;
+ else
+ status &= ~SFP_STATUS_TX_DISABLE_FORCE;
+
+ sfp_write(sfp, true, SFP_STATUS, &status, sizeof(status));
+ }
+}
+
+static void sfp_soft_start_poll(struct sfp *sfp)
+{
+ const struct sfp_eeprom_id *id = &sfp->id;
+
+ sfp->state_soft_mask = 0;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE &&
+ !sfp->gpio[GPIO_TX_DISABLE])
+ sfp->state_soft_mask |= SFP_F_TX_DISABLE;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT &&
+ !sfp->gpio[GPIO_TX_FAULT])
+ sfp->state_soft_mask |= SFP_F_TX_FAULT;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS &&
+ !sfp->gpio[GPIO_LOS])
+ sfp->state_soft_mask |= SFP_F_LOS;
+
+ if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
+ !sfp->need_poll)
+ mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+}
+
+static void sfp_soft_stop_poll(struct sfp *sfp)
+{
+ sfp->state_soft_mask = 0;
+}
+
+static unsigned int sfp_get_state(struct sfp *sfp)
+{
+ unsigned int state = sfp->get_state(sfp);
+
+ if (state & SFP_F_PRESENT &&
+ sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT))
+ state |= sfp_soft_get_state(sfp);
+
+ return state;
+}
+
+static void sfp_set_state(struct sfp *sfp, unsigned int state)
+{
+ sfp->set_state(sfp, state);
+
+ if (state & SFP_F_PRESENT &&
+ sfp->state_soft_mask & SFP_F_TX_DISABLE)
+ sfp_soft_set_state(sfp, state);
}
static unsigned int sfp_check(void *buf, size_t len)
@@ -1142,29 +1230,27 @@ static const struct hwmon_chip_info sfp_hwmon_chip_info = {
.info = sfp_hwmon_info,
};
-static int sfp_hwmon_insert(struct sfp *sfp)
+static void sfp_hwmon_probe(struct work_struct *work)
{
+ struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
int err, i;
- if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
- return 0;
-
- if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
- return 0;
-
- if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
- /* This driver in general does not support address
- * change.
- */
- return 0;
-
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
- if (err < 0)
- return err;
+ if (err < 0) {
+ if (sfp->hwmon_tries--) {
+ mod_delayed_work(system_wq, &sfp->hwmon_probe,
+ T_PROBE_RETRY_SLOW);
+ } else {
+ dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ }
+ return;
+ }
sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
- if (!sfp->hwmon_name)
- return -ENODEV;
+ if (!sfp->hwmon_name) {
+ dev_err(sfp->dev, "out of memory for hwmon name\n");
+ return;
+ }
for (i = 0; sfp->hwmon_name[i]; i++)
if (hwmon_is_bad_char(sfp->hwmon_name[i]))
@@ -1174,18 +1260,52 @@ static int sfp_hwmon_insert(struct sfp *sfp)
sfp->hwmon_name, sfp,
&sfp_hwmon_chip_info,
NULL);
+ if (IS_ERR(sfp->hwmon_dev))
+ dev_err(sfp->dev, "failed to register hwmon device: %ld\n",
+ PTR_ERR(sfp->hwmon_dev));
+}
+
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+ if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
+ return 0;
+
+ if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
+ return 0;
- return PTR_ERR_OR_ZERO(sfp->hwmon_dev);
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+ /* This driver in general does not support address
+ * change.
+ */
+ return 0;
+
+ mod_delayed_work(system_wq, &sfp->hwmon_probe, 1);
+ sfp->hwmon_tries = R_PROBE_RETRY_SLOW;
+
+ return 0;
}
static void sfp_hwmon_remove(struct sfp *sfp)
{
+ cancel_delayed_work_sync(&sfp->hwmon_probe);
if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
hwmon_device_unregister(sfp->hwmon_dev);
sfp->hwmon_dev = NULL;
kfree(sfp->hwmon_name);
}
}
+
+static int sfp_hwmon_init(struct sfp *sfp)
+{
+ INIT_DELAYED_WORK(&sfp->hwmon_probe, sfp_hwmon_probe);
+
+ return 0;
+}
+
+static void sfp_hwmon_exit(struct sfp *sfp)
+{
+ cancel_delayed_work_sync(&sfp->hwmon_probe);
+}
#else
static int sfp_hwmon_insert(struct sfp *sfp)
{
@@ -1195,6 +1315,15 @@ static int sfp_hwmon_insert(struct sfp *sfp)
static void sfp_hwmon_remove(struct sfp *sfp)
{
}
+
+static int sfp_hwmon_init(struct sfp *sfp)
+{
+ return 0;
+}
+
+static void sfp_hwmon_exit(struct sfp *sfp)
+{
+}
#endif
/* Helpers */
@@ -1245,7 +1374,7 @@ static void sfp_sm_next(struct sfp *sfp, unsigned int state,
sfp_sm_set_timer(sfp, timeout);
}
-static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state,
+static void sfp_sm_mod_next(struct sfp *sfp, unsigned int state,
unsigned int timeout)
{
sfp->sm_mod_state = state;
@@ -1266,8 +1395,6 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
struct phy_device *phy;
int err;
- msleep(T_PHY_RESET_MS);
-
phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
if (phy == ERR_PTR(-ENODEV)) {
dev_info(sfp->dev, "no PHY detected\n");
@@ -1335,7 +1462,7 @@ static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
event == SFP_E_LOS_LOW);
}
-static void sfp_sm_fault(struct sfp *sfp, bool warn)
+static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
{
if (sfp->sm_retries && !--sfp->sm_retries) {
dev_err(sfp->dev,
@@ -1345,21 +1472,12 @@ static void sfp_sm_fault(struct sfp *sfp, bool warn)
if (warn)
dev_err(sfp->dev, "module transmit fault indicated\n");
- sfp_sm_next(sfp, SFP_S_TX_FAULT, T_FAULT_RECOVER);
+ sfp_sm_next(sfp, next_state, T_FAULT_RECOVER);
}
}
-static void sfp_sm_mod_init(struct sfp *sfp)
+static void sfp_sm_probe_for_phy(struct sfp *sfp)
{
- sfp_module_tx_enable(sfp);
-
- /* Wait t_init before indicating that the link is up, provided the
- * current state indicates no TX_FAULT. If TX_FAULT clears before
- * this time, that's fine too.
- */
- sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
- sfp->sm_retries = 5;
-
/* Setting the serdes link mode is guesswork: there's no
* field in the EEPROM which indicates what mode should
* be used.
@@ -1375,69 +1493,83 @@ static void sfp_sm_mod_init(struct sfp *sfp)
sfp_sm_probe_phy(sfp);
}
-static int sfp_sm_mod_hpower(struct sfp *sfp)
+static int sfp_module_parse_power(struct sfp *sfp)
{
- u32 power;
- u8 val;
- int err;
+ u32 power_mW = 1000;
- power = 1000;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
- power = 1500;
+ power_mW = 1500;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
- power = 2000;
-
- if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE &&
- (sfp->id.ext.diagmon & (SFP_DIAGMON_DDM | SFP_DIAGMON_ADDRMODE)) !=
- SFP_DIAGMON_DDM) {
- /* The module appears not to implement bus address 0xa2,
- * or requires an address change sequence, so assume that
- * the module powers up in the indicated power mode.
- */
- if (power > sfp->max_power_mW) {
+ power_mW = 2000;
+
+ if (power_mW > sfp->max_power_mW) {
+ /* Module power specification exceeds the allowed maximum. */
+ if (sfp->id.ext.sff8472_compliance ==
+ SFP_SFF8472_COMPLIANCE_NONE &&
+ !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) {
+ /* The module appears not to implement bus address
+ * 0xa2, so assume that the module powers up in the
+ * indicated mode.
+ */
dev_err(sfp->dev,
"Host does not support %u.%uW modules\n",
- power / 1000, (power / 100) % 10);
+ power_mW / 1000, (power_mW / 100) % 10);
return -EINVAL;
+ } else {
+ dev_warn(sfp->dev,
+ "Host does not support %u.%uW modules, module left in power mode 1\n",
+ power_mW / 1000, (power_mW / 100) % 10);
+ return 0;
}
- return 0;
}
- if (power > sfp->max_power_mW) {
+ /* If the module requires a higher power mode, but also requires
+ * an address change sequence, warn the user that the module may
+ * not be functional.
+ */
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) {
dev_warn(sfp->dev,
- "Host does not support %u.%uW modules, module left in power mode 1\n",
- power / 1000, (power / 100) % 10);
+ "Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n",
+ power_mW / 1000, (power_mW / 100) % 10);
return 0;
}
- if (power <= 1000)
- return 0;
+ sfp->module_power_mW = power_mW;
+
+ return 0;
+}
+
+static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
+{
+ u8 val;
+ int err;
err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err);
- err = -EAGAIN;
- goto err;
+ return -EAGAIN;
}
- val |= BIT(0);
+ if (enable)
+ val |= BIT(0);
+ else
+ val &= ~BIT(0);
err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err);
- err = -EAGAIN;
- goto err;
+ return -EAGAIN;
}
- dev_info(sfp->dev, "Module switched to %u.%uW power level\n",
- power / 1000, (power / 100) % 10);
- return T_HPOWER_LEVEL;
+ if (enable)
+ dev_info(sfp->dev, "Module switched to %u.%uW power level\n",
+ sfp->module_power_mW / 1000,
+ (sfp->module_power_mW / 100) % 10);
-err:
- return err;
+ return 0;
}
-static int sfp_sm_mod_probe(struct sfp *sfp)
+static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
{
/* SFP module inserted - read I2C data */
struct sfp_eeprom_id id;
@@ -1447,7 +1579,8 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
ret = sfp_read(sfp, false, 0, &id, sizeof(id));
if (ret < 0) {
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ if (report)
+ dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
return -EAGAIN;
}
@@ -1505,7 +1638,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
(int)sizeof(id.ext.datecode), id.ext.datecode);
/* Check whether we support this module */
- if (!sfp->type->module_supported(&sfp->id)) {
+ if (!sfp->type->module_supported(&id)) {
dev_err(sfp->dev,
"module is not supported - phys id 0x%02x 0x%02x\n",
sfp->id.base.phys_id, sfp->id.base.phys_ext_id);
@@ -1517,106 +1650,174 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
dev_warn(sfp->dev,
"module address swap to access page 0xA2 is not supported.\n");
- ret = sfp_hwmon_insert(sfp);
- if (ret < 0)
- return ret;
-
- ret = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ /* Parse the module power requirement */
+ ret = sfp_module_parse_power(sfp);
if (ret < 0)
return ret;
- return sfp_sm_mod_hpower(sfp);
+ return 0;
}
static void sfp_sm_mod_remove(struct sfp *sfp)
{
- sfp_module_remove(sfp->sfp_bus);
+ if (sfp->sm_mod_state > SFP_MOD_WAITDEV)
+ sfp_module_remove(sfp->sfp_bus);
sfp_hwmon_remove(sfp);
- if (sfp->mod_phy)
- sfp_sm_phy_detach(sfp);
-
- sfp_module_tx_disable(sfp);
-
memset(&sfp->id, 0, sizeof(sfp->id));
+ sfp->module_power_mW = 0;
dev_info(sfp->dev, "module removed\n");
}
-static void sfp_sm_event(struct sfp *sfp, unsigned int event)
+/* This state machine tracks the upstream's state */
+static void sfp_sm_device(struct sfp *sfp, unsigned int event)
{
- mutex_lock(&sfp->sm_mutex);
+ switch (sfp->sm_dev_state) {
+ default:
+ if (event == SFP_E_DEV_ATTACH)
+ sfp->sm_dev_state = SFP_DEV_DOWN;
+ break;
- dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
- mod_state_to_str(sfp->sm_mod_state),
- dev_state_to_str(sfp->sm_dev_state),
- sm_state_to_str(sfp->sm_state),
- event_to_str(event));
+ case SFP_DEV_DOWN:
+ if (event == SFP_E_DEV_DETACH)
+ sfp->sm_dev_state = SFP_DEV_DETACHED;
+ else if (event == SFP_E_DEV_UP)
+ sfp->sm_dev_state = SFP_DEV_UP;
+ break;
+
+ case SFP_DEV_UP:
+ if (event == SFP_E_DEV_DETACH)
+ sfp->sm_dev_state = SFP_DEV_DETACHED;
+ else if (event == SFP_E_DEV_DOWN)
+ sfp->sm_dev_state = SFP_DEV_DOWN;
+ break;
+ }
+}
+
+/* This state machine tracks the insert/remove state of the module, probes
+ * the on-board EEPROM, and sets up the power level.
+ */
+static void sfp_sm_module(struct sfp *sfp, unsigned int event)
+{
+ int err;
+
+ /* Handle remove event globally, it resets this state machine */
+ if (event == SFP_E_REMOVE) {
+ if (sfp->sm_mod_state > SFP_MOD_PROBE)
+ sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
+ return;
+ }
+
+ /* Handle device detach globally */
+ if (sfp->sm_dev_state < SFP_DEV_DOWN &&
+ sfp->sm_mod_state > SFP_MOD_WAITDEV) {
+ if (sfp->module_power_mW > 1000 &&
+ sfp->sm_mod_state > SFP_MOD_HPOWER)
+ sfp_sm_mod_hpower(sfp, false);
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
+ return;
+ }
- /* This state machine tracks the insert/remove state of
- * the module, and handles probing the on-board EEPROM.
- */
switch (sfp->sm_mod_state) {
default:
- if (event == SFP_E_INSERT && sfp->attached) {
- sfp_module_tx_disable(sfp);
- sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
+ if (event == SFP_E_INSERT) {
+ sfp_sm_mod_next(sfp, SFP_MOD_PROBE, T_SERIAL);
+ sfp->sm_mod_tries_init = R_PROBE_RETRY_INIT;
+ sfp->sm_mod_tries = R_PROBE_RETRY_SLOW;
}
break;
case SFP_MOD_PROBE:
- if (event == SFP_E_REMOVE) {
- sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0);
- } else if (event == SFP_E_TIMEOUT) {
- int val = sfp_sm_mod_probe(sfp);
-
- if (val == 0)
- sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
- else if (val > 0)
- sfp_sm_ins_next(sfp, SFP_MOD_HPOWER, val);
- else if (val != -EAGAIN)
- sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0);
- else
- sfp_sm_set_timer(sfp, T_PROBE_RETRY);
+ /* Wait for T_PROBE_INIT to time out */
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ err = sfp_sm_mod_probe(sfp, sfp->sm_mod_tries == 1);
+ if (err == -EAGAIN) {
+ if (sfp->sm_mod_tries_init &&
+ --sfp->sm_mod_tries_init) {
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_INIT);
+ break;
+ } else if (sfp->sm_mod_tries && --sfp->sm_mod_tries) {
+ if (sfp->sm_mod_tries == R_PROBE_RETRY_SLOW - 1)
+ dev_warn(sfp->dev,
+ "please wait, module slow to respond\n");
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_SLOW);
+ break;
+ }
+ }
+ if (err < 0) {
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
+ break;
}
- break;
- case SFP_MOD_HPOWER:
- if (event == SFP_E_TIMEOUT) {
- sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
+ /* fall through */
+ case SFP_MOD_WAITDEV:
+ /* Ensure that the device is attached before proceeding */
+ if (sfp->sm_dev_state < SFP_DEV_DOWN)
+ break;
+
+ /* Report the module insertion to the upstream device */
+ err = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ if (err < 0) {
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
break;
}
- /* fallthrough */
- case SFP_MOD_PRESENT:
- case SFP_MOD_ERROR:
- if (event == SFP_E_REMOVE) {
- sfp_sm_mod_remove(sfp);
- sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0);
+
+ /* If this is a power level 1 module, we are done */
+ if (sfp->module_power_mW <= 1000)
+ goto insert;
+
+ sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0);
+ /* fall through */
+ case SFP_MOD_HPOWER:
+ /* Enable high power mode */
+ err = sfp_sm_mod_hpower(sfp, true);
+ if (err < 0) {
+ if (err != -EAGAIN) {
+ sfp_module_remove(sfp->sfp_bus);
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
+ } else {
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_INIT);
+ }
+ break;
}
+
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITPWR, T_HPOWER_LEVEL);
break;
- }
- /* This state machine tracks the netdev up/down state */
- switch (sfp->sm_dev_state) {
- default:
- if (event == SFP_E_DEV_UP)
- sfp->sm_dev_state = SFP_DEV_UP;
+ case SFP_MOD_WAITPWR:
+ /* Wait for T_HPOWER_LEVEL to time out */
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ insert:
+ sfp_sm_mod_next(sfp, SFP_MOD_PRESENT, 0);
break;
- case SFP_DEV_UP:
- if (event == SFP_E_DEV_DOWN) {
- /* If the module has a PHY, avoid raising TX disable
- * as this resets the PHY. Otherwise, raise it to
- * turn the laser off.
- */
- if (!sfp->mod_phy)
- sfp_module_tx_disable(sfp);
- sfp->sm_dev_state = SFP_DEV_DOWN;
- }
+ case SFP_MOD_PRESENT:
+ case SFP_MOD_ERROR:
break;
}
+#if IS_ENABLED(CONFIG_HWMON)
+ if (sfp->sm_mod_state >= SFP_MOD_WAITDEV &&
+ IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+ err = sfp_hwmon_insert(sfp);
+ if (err)
+ dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ }
+#endif
+}
+
+static void sfp_sm_main(struct sfp *sfp, unsigned int event)
+{
+ unsigned long timeout;
+
/* Some events are global */
if (sfp->sm_state != SFP_S_DOWN &&
(sfp->sm_mod_state != SFP_MOD_PRESENT ||
@@ -1626,29 +1827,87 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
sfp_sm_link_down(sfp);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
+ sfp_module_tx_disable(sfp);
+ sfp_soft_stop_poll(sfp);
sfp_sm_next(sfp, SFP_S_DOWN, 0);
- mutex_unlock(&sfp->sm_mutex);
return;
}
/* The main state machine */
switch (sfp->sm_state) {
case SFP_S_DOWN:
- if (sfp->sm_mod_state == SFP_MOD_PRESENT &&
- sfp->sm_dev_state == SFP_DEV_UP)
- sfp_sm_mod_init(sfp);
+ if (sfp->sm_mod_state != SFP_MOD_PRESENT ||
+ sfp->sm_dev_state != SFP_DEV_UP)
+ break;
+
+ if (!(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE))
+ sfp_soft_start_poll(sfp);
+
+ sfp_module_tx_enable(sfp);
+
+ /* Initialise the fault clearance retries */
+ sfp->sm_retries = 5;
+
+ /* We need to check the TX_FAULT state, which is not defined
+ * while TX_DISABLE is asserted. The earliest we want to do
+ * anything (such as probe for a PHY) is 50ms.
+ */
+ sfp_sm_next(sfp, SFP_S_WAIT, T_WAIT);
+ break;
+
+ case SFP_S_WAIT:
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ if (sfp->state & SFP_F_TX_FAULT) {
+ /* Wait t_init before indicating that the link is up,
+ * provided the current state indicates no TX_FAULT. If
+ * TX_FAULT clears before this time, that's fine too.
+ */
+ timeout = T_INIT_JIFFIES;
+ if (timeout > T_WAIT)
+ timeout -= T_WAIT;
+ else
+ timeout = 1;
+
+ sfp_sm_next(sfp, SFP_S_INIT, timeout);
+ } else {
+ /* TX_FAULT is not asserted, assume the module has
+ * finished initialising.
+ */
+ goto init_done;
+ }
break;
case SFP_S_INIT:
- if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT)
- sfp_sm_fault(sfp, true);
- else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR)
+ if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
+ /* TX_FAULT is still asserted after t_init, so assume
+ * there is a fault.
+ */
+ sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
+ sfp->sm_retries == 5);
+ } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
+ init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+ */
+ sfp_sm_probe_for_phy(sfp);
sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+ sfp->sm_retries = 5;
+ }
+ break;
+
+ case SFP_S_INIT_TX_FAULT:
+ if (event == SFP_E_TIMEOUT) {
+ sfp_module_tx_fault_reset(sfp);
+ sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
+ }
break;
case SFP_S_WAIT_LOS:
if (event == SFP_E_TX_FAULT)
- sfp_sm_fault(sfp, true);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, true);
else if (sfp_los_event_inactive(sfp, event))
sfp_sm_link_up(sfp);
break;
@@ -1656,7 +1915,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_LINK_UP:
if (event == SFP_E_TX_FAULT) {
sfp_sm_link_down(sfp);
- sfp_sm_fault(sfp, true);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, true);
} else if (sfp_los_event_active(sfp, event)) {
sfp_sm_link_down(sfp);
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -1672,7 +1931,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_REINIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- sfp_sm_fault(sfp, false);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, false);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
dev_info(sfp->dev, "module transmit fault recovered\n");
sfp_sm_link_check_los(sfp);
@@ -1682,6 +1941,21 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_TX_DISABLE:
break;
}
+}
+
+static void sfp_sm_event(struct sfp *sfp, unsigned int event)
+{
+ mutex_lock(&sfp->sm_mutex);
+
+ dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
+ mod_state_to_str(sfp->sm_mod_state),
+ dev_state_to_str(sfp->sm_dev_state),
+ sm_state_to_str(sfp->sm_state),
+ event_to_str(event));
+
+ sfp_sm_device(sfp, event);
+ sfp_sm_module(sfp, event);
+ sfp_sm_main(sfp, event);
dev_dbg(sfp->dev, "SM: exit %s:%s:%s\n",
mod_state_to_str(sfp->sm_mod_state),
@@ -1693,15 +1967,12 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
static void sfp_attach(struct sfp *sfp)
{
- sfp->attached = true;
- if (sfp->state & SFP_F_PRESENT)
- sfp_sm_event(sfp, SFP_E_INSERT);
+ sfp_sm_event(sfp, SFP_E_DEV_ATTACH);
}
static void sfp_detach(struct sfp *sfp)
{
- sfp->attached = false;
- sfp_sm_event(sfp, SFP_E_REMOVE);
+ sfp_sm_event(sfp, SFP_E_DEV_DETACH);
}
static void sfp_start(struct sfp *sfp)
@@ -1828,7 +2099,10 @@ static void sfp_poll(struct work_struct *work)
struct sfp *sfp = container_of(work, struct sfp, poll.work);
sfp_check_state(sfp);
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+
+ if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) ||
+ sfp->need_poll)
+ mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
}
static struct sfp *sfp_alloc(struct device *dev)
@@ -1846,6 +2120,8 @@ static struct sfp *sfp_alloc(struct device *dev)
INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);
+ sfp_hwmon_init(sfp);
+
return sfp;
}
@@ -1853,6 +2129,8 @@ static void sfp_cleanup(void *data)
{
struct sfp *sfp = data;
+ sfp_hwmon_exit(sfp);
+
cancel_delayed_work_sync(&sfp->poll);
cancel_delayed_work_sync(&sfp->timeout);
if (sfp->i2c_mii) {
@@ -1869,7 +2147,6 @@ static int sfp_probe(struct platform_device *pdev)
const struct sff_data *sff;
struct i2c_adapter *i2c;
struct sfp *sfp;
- bool poll = false;
int err, i;
sfp = sfp_alloc(&pdev->dev);
@@ -1964,6 +2241,11 @@ static int sfp_probe(struct platform_device *pdev)
sfp->state |= SFP_F_RATE_SELECT;
sfp_set_state(sfp, sfp->state);
sfp_module_tx_disable(sfp);
+ if (sfp->state & SFP_F_PRESENT) {
+ rtnl_lock();
+ sfp_sm_event(sfp, SFP_E_INSERT);
+ rtnl_unlock();
+ }
for (i = 0; i < GPIO_MAX; i++) {
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1971,7 +2253,7 @@ static int sfp_probe(struct platform_device *pdev)
sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
if (!sfp->gpio_irq[i]) {
- poll = true;
+ sfp->need_poll = true;
continue;
}
@@ -1983,11 +2265,11 @@ static int sfp_probe(struct platform_device *pdev)
dev_name(sfp->dev), sfp);
if (err) {
sfp->gpio_irq[i] = 0;
- poll = true;
+ sfp->need_poll = true;
}
}
- if (poll)
+ if (sfp->need_poll)
mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
/* We could have an issue in cases no Tx disable pin is available or
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cac64b96d545..2a91c192659f 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -855,6 +855,8 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ sl_free_netdev(sl->dev);
+ free_netdev(sl->dev);
err_exit:
rtnl_unlock();
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8156b33ee3e7..ca70a1d840eb 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2074,7 +2074,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
if (team_port_txable(port)) {
if (port->state.speed != SPEED_UNKNOWN)
speed += port->state.speed;
@@ -2083,6 +2084,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.duplex = port->state.duplex;
}
}
+ rcu_read_unlock();
+
cmd->base.speed = speed ? : SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a8d3141582a5..683d371e6e82 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -136,10 +136,10 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
struct tun_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -313,8 +313,8 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
tfile->napi_enabled = napi_en;
tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
- netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
+ NAPI_POLL_WEIGHT);
napi_enable(&tfile->napi);
}
}
@@ -1167,10 +1167,10 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
p = per_cpu_ptr(tun->pcpu_stats, i);
do {
start = u64_stats_fetch_begin(&p->syncp);
- rxpackets = p->rx_packets;
- rxbytes = p->rx_bytes;
- txpackets = p->tx_packets;
- txbytes = p->tx_bytes;
+ rxpackets = u64_stats_read(&p->rx_packets);
+ rxbytes = u64_stats_read(&p->rx_bytes);
+ txpackets = u64_stats_read(&p->tx_packets);
+ txbytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rxpackets;
@@ -1998,8 +1998,8 @@ drop:
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(stats);
@@ -2052,8 +2052,8 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += ret;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, ret);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
@@ -2147,8 +2147,8 @@ done:
/* caller is in process context, */
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len + vlan_hlen;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
@@ -2290,7 +2290,13 @@ static void tun_free_netdev(struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
BUG_ON(!(list_empty(&tun->disabled)));
+
free_percpu(tun->pcpu_stats);
+ /* We clear pcpu_stats so that tun_set_iff() can tell if
+ * tun_free_netdev() has been called from register_netdevice().
+ */
+ tun->pcpu_stats = NULL;
+
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2505,8 +2511,8 @@ build:
*/
stats = this_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += datasize;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, datasize);
u64_stats_update_end(&stats->syncp);
if (rxhash)
@@ -2782,9 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
- err = dev_get_valid_name(net, dev, name);
- if (err < 0)
- goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
@@ -2859,8 +2862,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err_detach:
tun_detach_all(dev);
- /* register_netdevice() already called tun_free_netdev() */
- goto err_free_dev;
+ /* We are here because register_netdevice() has failed.
+ * If register_netdevice() already called tun_free_netdev()
+ * while dealing with the error, tun->pcpu_stats has been cleared.
+ */
+ if (!tun->pcpu_stats)
+ goto err_free_dev;
err_free_flow:
tun_flow_uninit(tun);
diff --git a/drivers/net/usb/aqc111.h b/drivers/net/usb/aqc111.h
index 4d68b3a6067c..b562db4da337 100644
--- a/drivers/net/usb/aqc111.h
+++ b/drivers/net/usb/aqc111.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later
- * Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (C) 2002-2003 TiVo Inc.
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 011bd4cb546e..af3994e0853b 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -196,7 +196,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
- if (ret < 0) {
+ if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
goto free;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index daa54486ab09..93044cf1417a 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -827,6 +827,7 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
.get_link_ksettings = ax88179_get_link_ksettings,
.set_link_ksettings = ax88179_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static void ax88179_set_multicast(struct net_device *net)
@@ -1214,6 +1215,32 @@ static int ax88179_led_setting(struct usbnet *dev)
return 0;
}
+static void ax88179_get_mac_addr(struct usbnet *dev)
+{
+ u8 mac[ETH_ALEN];
+
+ /* Maybe the boot loader passed the MAC address via device tree */
+ if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from device tree");
+ } else {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ ETH_ALEN, mac);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from ASIX chip");
+ }
+
+ if (is_valid_ether_addr(mac)) {
+ memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ } else {
+ netdev_info(dev->net, "invalid MAC address, using random\n");
+ eth_hw_addr_random(dev->net);
+ }
+
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
+ dev->net->dev_addr);
+}
+
static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
{
u8 buf[5];
@@ -1240,8 +1267,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
msleep(100);
- ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
- ETH_ALEN, dev->net->dev_addr);
+ /* Read MAC address from DTB or asix chip */
+ ax88179_get_mac_addr(dev);
memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
/* RX bulk configuration */
@@ -1541,8 +1568,8 @@ static int ax88179_reset(struct usbnet *dev)
/* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev, 0);
- ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
- dev->net->dev_addr);
+ /* Read MAC address from DTB or asix chip */
+ ax88179_get_mac_addr(dev);
/* RX bulk configuration */
memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe630438f67b..0cdb2ce47645 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -766,6 +766,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* ThinkPad Thunderbolt 3 Dock Gen 2 (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3082, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index a245597a3902..c2c82e6391b4 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -579,7 +579,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
- if (err < sizeof(max_datagram_size)) {
+ if (err != sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out;
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 74849da031fa..ca827802f291 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1214,8 +1214,9 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
* This needs to be a tasklet otherwise we will
* end up recursively calling this function.
*/
-static void hso_unthrottle_tasklet(struct hso_serial *serial)
+static void hso_unthrottle_tasklet(unsigned long data)
{
+ struct hso_serial *serial = (struct hso_serial *)data;
unsigned long flags;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1265,7 +1266,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
/* Force default termio settings */
_hso_serial_set_termios(tty, NULL);
tasklet_init(&serial->unthrottle_tasklet,
- (void (*)(unsigned long))hso_unthrottle_tasklet,
+ hso_unthrottle_tasklet,
(unsigned long)serial);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f24a1b0b801f..cf1f3f0a4b9b 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3995,9 +3995,6 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
u32 buf;
int ret;
- int event;
-
- event = message.event;
if (!dev->suspend_count++) {
spin_lock_irq(&dev->txq.lock);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 56d334b9ad45..4196c0e32740 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1371,6 +1371,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
+ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d4a95b50bda6..c5ebf35d2488 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,9 +24,11 @@
#include <linux/suspend.h>
#include <linux/atomic.h>
#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <crypto/hash.h>
/* Information for net-next */
-#define NETNEXT_VERSION "10"
+#define NETNEXT_VERSION "11"
/* Information for net */
#define NET_VERSION "10"
@@ -54,8 +56,11 @@
#define PLA_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
+#define PLA_UPHY_TIMER 0xd388
#define PLA_SUSPEND_FLAG 0xd38a
#define PLA_INDICATE_FALG 0xd38c
+#define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */
+#define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */
#define PLA_EXTRA_STATUS 0xd398
#define PLA_EFUSE_DATA 0xdd00
#define PLA_EFUSE_CMD 0xdd02
@@ -110,7 +115,12 @@
#define USB_CONNECT_TIMER 0xcbf8
#define USB_MSC_TIMER 0xcbfc
#define USB_BURST_SIZE 0xcfc0
+#define USB_FW_FIX_EN0 0xcfca
+#define USB_FW_FIX_EN1 0xcfcc
#define USB_LPM_CONFIG 0xcfd8
+#define USB_CSTMR 0xcfef /* RTL8153A */
+#define USB_FW_CTRL 0xd334 /* RTL8153B */
+#define USB_FC_TIMER 0xd340
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
@@ -126,6 +136,7 @@
#define USB_LPM_CTRL 0xd41a
#define USB_BMU_RESET 0xd4b0
#define USB_U1U2_TIMER 0xd4da
+#define USB_FW_TASK 0xd4e8 /* RTL8153B */
#define USB_UPS_CTRL 0xd800
#define USB_POWER_CUT 0xd80a
#define USB_MISC_0 0xd81a
@@ -133,18 +144,19 @@
#define USB_AFE_CTRL2 0xd824
#define USB_UPS_CFG 0xd842
#define USB_UPS_FLAGS 0xd848
+#define USB_WDT1_CTRL 0xe404
#define USB_WDT11_CTRL 0xe43c
-#define USB_BP_BA 0xfc26
-#define USB_BP_0 0xfc28
-#define USB_BP_1 0xfc2a
-#define USB_BP_2 0xfc2c
-#define USB_BP_3 0xfc2e
-#define USB_BP_4 0xfc30
-#define USB_BP_5 0xfc32
-#define USB_BP_6 0xfc34
-#define USB_BP_7 0xfc36
-#define USB_BP_EN 0xfc38
-#define USB_BP_8 0xfc38
+#define USB_BP_BA PLA_BP_BA
+#define USB_BP_0 PLA_BP_0
+#define USB_BP_1 PLA_BP_1
+#define USB_BP_2 PLA_BP_2
+#define USB_BP_3 PLA_BP_3
+#define USB_BP_4 PLA_BP_4
+#define USB_BP_5 PLA_BP_5
+#define USB_BP_6 PLA_BP_6
+#define USB_BP_7 PLA_BP_7
+#define USB_BP_EN PLA_BP_EN /* RTL8153A */
+#define USB_BP_8 0xfc38 /* RTL8153B */
#define USB_BP_9 0xfc3a
#define USB_BP_10 0xfc3c
#define USB_BP_11 0xfc3e
@@ -175,6 +187,7 @@
#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
#define OCP_PHY_PATCH_STAT 0xb800
#define OCP_PHY_PATCH_CMD 0xb820
+#define OCP_PHY_LOCK 0xb82e
#define OCP_ADC_IOFFSET 0xbcfc
#define OCP_ADC_CFG 0xbc06
#define OCP_SYSCLK_CFG 0xc416
@@ -185,6 +198,7 @@
#define SRAM_10M_AMP1 0x8080
#define SRAM_10M_AMP2 0x8082
#define SRAM_IMPEDANCE 0x8084
+#define SRAM_PHY_LOCK 0xb82e
/* PLA_RCR */
#define RCR_AAP 0x00000001
@@ -346,7 +360,12 @@
/* PLA_INDICATE_FALG */
#define UPCOMING_RUNTIME_D3 BIT(0)
+/* PLA_MACDBG_PRE and PLA_MACDBG_POST */
+#define DEBUG_OE BIT(0)
+#define DEBUG_LTSSM 0x0082
+
/* PLA_EXTRA_STATUS */
+#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
#define LINK_CHANGE_FLAG BIT(8)
/* USB_USB2PHY */
@@ -368,6 +387,12 @@
#define STAT_SPEED_HIGH 0x0000
#define STAT_SPEED_FULL 0x0002
+/* USB_FW_FIX_EN0 */
+#define FW_FIX_SUSPEND BIT(14)
+
+/* USB_FW_FIX_EN1 */
+#define FW_IP_RESET_EN BIT(9)
+
/* USB_LPM_CONFIG */
#define LPM_U1U2_EN BIT(0)
@@ -392,12 +417,24 @@
#define OWN_UPDATE BIT(0)
#define OWN_CLEAR BIT(1)
+/* USB_FW_TASK */
+#define FC_PATCH_TASK BIT(1)
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
#define RESUME_INDICATE 0x0001
+/* USB_CSTMR */
+#define FORCE_SUPER BIT(0)
+
+/* USB_FW_CTRL */
+#define FLOW_CTRL_PATCH_OPT BIT(1)
+
+/* USB_FC_TIMER */
+#define CTRL_TIMER_EN BIT(15)
+
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
#define RX_ZERO_EN 0x0080
@@ -419,6 +456,9 @@
#define COALESCE_HIGH 250000U
#define COALESCE_SLOW 524280U
+/* USB_WDT1_CTRL */
+#define WTD1_EN BIT(0)
+
/* USB_WDT11_CTRL */
#define TIMER11_EN 0x0001
@@ -539,6 +579,9 @@ enum spd_duplex {
/* OCP_PHY_PATCH_CMD */
#define PATCH_REQUEST BIT(4)
+/* OCP_PHY_LOCK */
+#define PATCH_LOCK BIT(0)
+
/* OCP_ADC_CFG */
#define CKADSEL_L 0x0100
#define ADC_EN 0x0080
@@ -563,6 +606,9 @@ enum spd_duplex {
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* SRAM_PHY_LOCK */
+#define PHY_PATCH_LOCK 0x0001
+
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
#define BND_MASK 0x0004
@@ -570,6 +616,8 @@ enum spd_duplex {
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
+#define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
@@ -622,6 +670,7 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
DELL_TB_RX_AGG_BUG,
+ LENOVO_MACPASSTHRU,
};
/* Define these values to match your device */
@@ -736,16 +785,16 @@ struct r8152 {
struct tasklet_struct tx_tl;
struct rtl_ops {
- void (*init)(struct r8152 *);
- int (*enable)(struct r8152 *);
- void (*disable)(struct r8152 *);
- void (*up)(struct r8152 *);
- void (*down)(struct r8152 *);
- void (*unload)(struct r8152 *);
- int (*eee_get)(struct r8152 *, struct ethtool_eee *);
- int (*eee_set)(struct r8152 *, struct ethtool_eee *);
- bool (*in_nway)(struct r8152 *);
- void (*hw_phy_cfg)(struct r8152 *);
+ void (*init)(struct r8152 *tp);
+ int (*enable)(struct r8152 *tp);
+ void (*disable)(struct r8152 *tp);
+ void (*up)(struct r8152 *tp);
+ void (*down)(struct r8152 *tp);
+ void (*unload)(struct r8152 *tp);
+ int (*eee_get)(struct r8152 *tp, struct ethtool_eee *eee);
+ int (*eee_set)(struct r8152 *tp, struct ethtool_eee *eee);
+ bool (*in_nway)(struct r8152 *tp);
+ void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
@@ -766,6 +815,19 @@ struct r8152 {
u32 ctap_short_off:1;
} ups_info;
+#define RTL_VER_SIZE 32
+
+ struct rtl_fw {
+ const char *fw_name;
+ const struct firmware *fw;
+
+ char version[RTL_VER_SIZE];
+ int (*pre_fw)(struct r8152 *tp);
+ int (*post_fw)(struct r8152 *tp);
+
+ bool retry;
+ } rtl_fw;
+
atomic_t rx_count;
bool eee_en;
@@ -788,6 +850,131 @@ struct r8152 {
u8 autoneg;
};
+/**
+ * struct fw_block - block type and total length
+ * @type: type of the current block, such as RTL_FW_END, RTL_FW_PLA,
+ * RTL_FW_USB and so on.
+ * @length: total length of the current block.
+ */
+struct fw_block {
+ __le32 type;
+ __le32 length;
+} __packed;
+
+/**
+ * struct fw_header - header of the firmware file
+ * @checksum: checksum of sha256 which is calculated from the whole file
+ * except the checksum field of the file. That is, calculate sha256
+ * from the version field to the end of the file.
+ * @version: version of this firmware.
+ * @blocks: the first firmware block of the file
+ */
+struct fw_header {
+ u8 checksum[32];
+ char version[RTL_VER_SIZE];
+ struct fw_block blocks[0];
+} __packed;
+
+/**
+ * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB.
+ * The layout of the firmware block is:
+ * <struct fw_mac> + <info> + <firmware data>.
+ * @fw_offset: offset of the firmware binary data. The start address of
+ * the data would be the address of struct fw_mac + @fw_offset.
+ * @fw_reg: the register to load the firmware. Depends on chip.
+ * @bp_ba_addr: the register to write break point base address. Depends on
+ * chip.
+ * @bp_ba_value: break point base address. Depends on chip.
+ * @bp_en_addr: the register to write break point enabled mask. Depends
+ * on chip.
+ * @bp_en_value: break point enabled mask. Depends on the firmware.
+ * @bp_start: the start register of break points. Depends on chip.
+ * @bp_num: the break point number which needs to be set for this firmware.
+ * Depends on the firmware.
+ * @bp: break points. Depends on firmware.
+ * @fw_ver_reg: the register to store the fw version.
+ * @fw_ver_data: the firmware version of the current type.
+ * @info: additional information for debugging, and is followed by the
+ * binary data of firmware.
+ */
+struct fw_mac {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 fw_reg;
+ __le16 bp_ba_addr;
+ __le16 bp_ba_value;
+ __le16 bp_en_addr;
+ __le16 bp_en_value;
+ __le16 bp_start;
+ __le16 bp_num;
+ __le16 bp[16]; /* any value determined by firmware */
+ __le32 reserved;
+ __le16 fw_ver_reg;
+ u8 fw_ver_data;
+ char info[0];
+} __packed;
+
+/**
+ * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START.
+ * This is used to set patch key when loading the firmware of PHY.
+ * @key_reg: the register to write the patch key.
+ * @key_data: patch key.
+ */
+struct fw_phy_patch_key {
+ struct fw_block blk_hdr;
+ __le16 key_reg;
+ __le16 key_data;
+ __le32 reserved;
+} __packed;
+
+/**
+ * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC.
+ * The layout of the firmware block is:
+ * <struct fw_phy_nc> + <info> + <firmware data>.
+ * @fw_offset: offset of the firmware binary data. The start address of
+ * the data would be the address of struct fw_phy_nc + @fw_offset.
+ * @fw_reg: the register to load the firmware. Depends on chip.
+ * @ba_reg: the register to write the base address. Depends on chip.
+ * @ba_data: base address. Depends on chip.
+ * @patch_en_addr: the register of enabling patch mode. Depends on chip.
+ * @patch_en_value: patch mode enabled mask. Depends on the firmware.
+ * @mode_reg: the regitster of switching the mode.
+ * @mod_pre: the mode needing to be set before loading the firmware.
+ * @mod_post: the mode to be set when finishing to load the firmware.
+ * @bp_start: the start register of break points. Depends on chip.
+ * @bp_num: the break point number which needs to be set for this firmware.
+ * Depends on the firmware.
+ * @bp: break points. Depends on firmware.
+ * @info: additional information for debugging, and is followed by the
+ * binary data of firmware.
+ */
+struct fw_phy_nc {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 fw_reg;
+ __le16 ba_reg;
+ __le16 ba_data;
+ __le16 patch_en_addr;
+ __le16 patch_en_value;
+ __le16 mode_reg;
+ __le16 mode_pre;
+ __le16 mode_post;
+ __le16 reserved;
+ __le16 bp_start;
+ __le16 bp_num;
+ __le16 bp[4];
+ char info[0];
+} __packed;
+
+enum rtl_fw_type {
+ RTL_FW_END = 0,
+ RTL_FW_PLA,
+ RTL_FW_USB,
+ RTL_FW_PHY_START,
+ RTL_FW_PHY_STOP,
+ RTL_FW_PHY_NC,
+};
+
enum rtl_version {
RTL_VER_UNKNOWN = 0,
RTL_VER_01,
@@ -1222,38 +1409,52 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
int ret = -EINVAL;
u32 ocp_data;
unsigned char buf[6];
-
- /* test for -AD variant of RTL8153 */
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- if ((ocp_data & AD_MASK) == 0x1000) {
- /* test for MAC address pass-through bit */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
- if ((ocp_data & PASS_THRU_MASK) != 1) {
- netif_dbg(tp, probe, tp->netdev,
- "No efuse for RTL8153-AD MAC pass through\n");
- return -ENODEV;
- }
+ char *mac_obj_name;
+ acpi_object_type mac_obj_type;
+ int mac_strlen;
+
+ if (test_bit(LENOVO_MACPASSTHRU, &tp->flags)) {
+ mac_obj_name = "\\MACA";
+ mac_obj_type = ACPI_TYPE_STRING;
+ mac_strlen = 0x16;
} else {
- /* test for RTL8153-BND and RTL8153-BD */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
- if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
- netif_dbg(tp, probe, tp->netdev,
- "Invalid variant for MAC pass through\n");
- return -ENODEV;
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) == 0x1000) {
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1) {
+ netif_dbg(tp, probe, tp->netdev,
+ "No efuse for RTL8153-AD MAC pass through\n");
+ return -ENODEV;
+ }
+ } else {
+ /* test for RTL8153-BND and RTL8153-BD */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
+ netif_dbg(tp, probe, tp->netdev,
+ "Invalid variant for MAC pass through\n");
+ return -ENODEV;
+ }
}
+
+ mac_obj_name = "\\_SB.AMAC";
+ mac_obj_type = ACPI_TYPE_BUFFER;
+ mac_strlen = 0x17;
}
/* returns _AUXMAC_#AABBCCDDEEFF# */
- status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ status = acpi_evaluate_object(NULL, mac_obj_name, NULL, &buffer);
obj = (union acpi_object *)buffer.pointer;
if (!ACPI_SUCCESS(status))
return -ENODEV;
- if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ if (obj->type != mac_obj_type || obj->string.length != mac_strlen) {
netif_warn(tp, probe, tp->netdev,
"Invalid buffer for pass-thru MAC addr: (%d, %d)\n",
obj->type, obj->string.length);
goto amacout;
}
+
if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
netif_warn(tp, probe, tp->netdev,
@@ -1688,7 +1889,7 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
}
/* r8152_csum_workaround()
- * The hw limites the value the transport offset. When the offset is out of the
+ * The hw limits the value of the transport offset. When the offset is out of
* range, calculate the checksum by sw.
*/
static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
@@ -2178,6 +2379,7 @@ static void tx_bottom(struct r8152 *tp)
int res;
do {
+ struct net_device *netdev = tp->netdev;
struct tx_agg *agg;
if (skb_queue_empty(&tp->tx_queue))
@@ -2188,24 +2390,23 @@ static void tx_bottom(struct r8152 *tp)
break;
res = r8152_tx_agg_fill(tp, agg);
- if (res) {
- struct net_device *netdev = tp->netdev;
+ if (!res)
+ continue;
- if (res == -ENODEV) {
- rtl_set_unplug(tp);
- netif_device_detach(netdev);
- } else {
- struct net_device_stats *stats = &netdev->stats;
- unsigned long flags;
+ if (res == -ENODEV) {
+ rtl_set_unplug(tp);
+ netif_device_detach(netdev);
+ } else {
+ struct net_device_stats *stats = &netdev->stats;
+ unsigned long flags;
- netif_warn(tp, tx_err, netdev,
- "failed tx_urb %d\n", res);
- stats->tx_dropped += agg->skb_num;
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_dropped += agg->skb_num;
- spin_lock_irqsave(&tp->tx_lock, flags);
- list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock_irqrestore(&tp->tx_lock, flags);
- }
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
}
} while (res == 0);
}
@@ -3226,6 +3427,688 @@ static void rtl_reset_bmu(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
}
+/* Clear the bp to stop the firmware before loading a new one */
+static void rtl_clear_bp(struct r8152 *tp, u16 type)
+{
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ ocp_write_byte(tp, type, PLA_BP_EN, 0);
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ default:
+ if (type == MCU_TYPE_USB) {
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0);
+ } else {
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ }
+ break;
+ }
+
+ ocp_write_word(tp, type, PLA_BP_0, 0);
+ ocp_write_word(tp, type, PLA_BP_1, 0);
+ ocp_write_word(tp, type, PLA_BP_2, 0);
+ ocp_write_word(tp, type, PLA_BP_3, 0);
+ ocp_write_word(tp, type, PLA_BP_4, 0);
+ ocp_write_word(tp, type, PLA_BP_5, 0);
+ ocp_write_word(tp, type, PLA_BP_6, 0);
+ ocp_write_word(tp, type, PLA_BP_7, 0);
+
+ /* wait 3 ms to make sure the firmware is stopped */
+ usleep_range(3000, 6000);
+ ocp_write_word(tp, type, PLA_BP_BA, 0);
+}
+
+static int r8153_patch_request(struct r8152 *tp, bool request)
+{
+ u16 data;
+ int i;
+
+ data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
+ if (request)
+ data |= PATCH_REQUEST;
+ else
+ data &= ~PATCH_REQUEST;
+ ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
+
+ for (i = 0; request && i < 5000; i++) {
+ usleep_range(1000, 2000);
+ if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ break;
+ }
+
+ if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
+ netif_err(tp, drv, tp->netdev, "patch request fail\n");
+ r8153_patch_request(tp, false);
+ return -ETIME;
+ } else {
+ return 0;
+ }
+}
+
+static int r8153_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key)
+{
+ if (r8153_patch_request(tp, true)) {
+ dev_err(&tp->intf->dev, "patch request fail\n");
+ return -ETIME;
+ }
+
+ sram_write(tp, key_addr, patch_key);
+ sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK);
+
+ return 0;
+}
+
+static int r8153_post_ram_code(struct r8152 *tp, u16 key_addr)
+{
+ u16 data;
+
+ sram_write(tp, 0x0000, 0x0000);
+
+ data = ocp_reg_read(tp, OCP_PHY_LOCK);
+ data &= ~PATCH_LOCK;
+ ocp_reg_write(tp, OCP_PHY_LOCK, data);
+
+ sram_write(tp, key_addr, 0x0000);
+
+ r8153_patch_request(tp, false);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+
+ return 0;
+}
+
+static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy)
+{
+ u32 length;
+ u16 fw_offset, fw_reg, ba_reg, patch_en_addr, mode_reg, bp_start;
+ bool rc = false;
+
+ switch (tp->version) {
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ fw_reg = 0xa014;
+ ba_reg = 0xa012;
+ patch_en_addr = 0xa01a;
+ mode_reg = 0xb820;
+ bp_start = 0xa000;
+ break;
+ default:
+ goto out;
+ }
+
+ fw_offset = __le16_to_cpu(phy->fw_offset);
+ if (fw_offset < sizeof(*phy)) {
+ dev_err(&tp->intf->dev, "fw_offset too small\n");
+ goto out;
+ }
+
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ if (length < fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= __le16_to_cpu(phy->fw_offset);
+ if (!length || (length & 1)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->fw_reg) != fw_reg) {
+ dev_err(&tp->intf->dev, "invalid register to load firmware\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->ba_reg) != ba_reg) {
+ dev_err(&tp->intf->dev, "invalid base address register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->patch_en_addr) != patch_en_addr) {
+ dev_err(&tp->intf->dev,
+ "invalid patch mode enabled register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->mode_reg) != mode_reg) {
+ dev_err(&tp->intf->dev,
+ "invalid register to switch the mode\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->bp_start) != bp_start) {
+ dev_err(&tp->intf->dev,
+ "invalid start register of break point\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->bp_num) > 4) {
+ dev_err(&tp->intf->dev, "invalid break point number\n");
+ goto out;
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
+{
+ u16 fw_reg, bp_ba_addr, bp_en_addr, bp_start, fw_offset;
+ bool rc = false;
+ u32 length, type;
+ int i, max_bp;
+
+ type = __le32_to_cpu(mac->blk_hdr.type);
+ if (type == RTL_FW_PLA) {
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = 0;
+ bp_start = PLA_BP_0;
+ max_bp = 8;
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ case RTL_VER_08:
+ case RTL_VER_09:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = PLA_BP_EN;
+ bp_start = PLA_BP_0;
+ max_bp = 8;
+ break;
+ default:
+ goto out;
+ }
+ } else if (type == RTL_FW_USB) {
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ fw_reg = 0xf800;
+ bp_ba_addr = USB_BP_BA;
+ bp_en_addr = USB_BP_EN;
+ bp_start = USB_BP_0;
+ max_bp = 8;
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ fw_reg = 0xe600;
+ bp_ba_addr = USB_BP_BA;
+ bp_en_addr = USB_BP2_EN;
+ bp_start = USB_BP_0;
+ max_bp = 16;
+ break;
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ default:
+ goto out;
+ }
+ } else {
+ goto out;
+ }
+
+ fw_offset = __le16_to_cpu(mac->fw_offset);
+ if (fw_offset < sizeof(*mac)) {
+ dev_err(&tp->intf->dev, "fw_offset too small\n");
+ goto out;
+ }
+
+ length = __le32_to_cpu(mac->blk_hdr.length);
+ if (length < fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= fw_offset;
+ if (length < 4 || (length & 3)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->fw_reg) != fw_reg) {
+ dev_err(&tp->intf->dev, "invalid register to load firmware\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_ba_addr) != bp_ba_addr) {
+ dev_err(&tp->intf->dev, "invalid base address register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_en_addr) != bp_en_addr) {
+ dev_err(&tp->intf->dev, "invalid enabled mask register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_start) != bp_start) {
+ dev_err(&tp->intf->dev,
+ "invalid start register of break point\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_num) > max_bp) {
+ dev_err(&tp->intf->dev, "invalid break point number\n");
+ goto out;
+ }
+
+ for (i = __le16_to_cpu(mac->bp_num); i < max_bp; i++) {
+ if (mac->bp[i]) {
+ dev_err(&tp->intf->dev, "unused bp%u is not zero\n", i);
+ goto out;
+ }
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+/* Verify the checksum for the firmware file. It is calculated from the version
+ * field to the end of the file. Compare the result with the checksum field to
+ * make sure the file is correct.
+ */
+static long rtl8152_fw_verify_checksum(struct r8152 *tp,
+ struct fw_header *fw_hdr, size_t size)
+{
+ unsigned char checksum[sizeof(fw_hdr->checksum)];
+ struct crypto_shash *alg;
+ struct shash_desc *sdesc;
+ size_t len;
+ long rc;
+
+ alg = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(alg)) {
+ rc = PTR_ERR(alg);
+ goto out;
+ }
+
+ if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) {
+ rc = -EFAULT;
+ dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n",
+ crypto_shash_digestsize(alg));
+ goto free_shash;
+ }
+
+ len = sizeof(*sdesc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(len, GFP_KERNEL);
+ if (!sdesc) {
+ rc = -ENOMEM;
+ goto free_shash;
+ }
+ sdesc->tfm = alg;
+
+ len = size - sizeof(fw_hdr->checksum);
+ rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum);
+ kfree(sdesc);
+ if (rc)
+ goto free_shash;
+
+ if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) {
+ dev_err(&tp->intf->dev, "checksum fail\n");
+ rc = -EFAULT;
+ }
+
+free_shash:
+ crypto_free_shash(alg);
+out:
+ return rc;
+}
+
+static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
+{
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_header *fw_hdr = (struct fw_header *)fw->data;
+ struct fw_mac *pla = NULL, *usb = NULL;
+ struct fw_phy_patch_key *start = NULL;
+ struct fw_phy_nc *phy_nc = NULL;
+ struct fw_block *stop = NULL;
+ long ret = -EFAULT;
+ int i;
+
+ if (fw->size < sizeof(*fw_hdr)) {
+ dev_err(&tp->intf->dev, "file too small\n");
+ goto fail;
+ }
+
+ ret = rtl8152_fw_verify_checksum(tp, fw_hdr, fw->size);
+ if (ret)
+ goto fail;
+
+ ret = -EFAULT;
+
+ for (i = sizeof(*fw_hdr); i < fw->size;) {
+ struct fw_block *block = (struct fw_block *)&fw->data[i];
+ u32 type;
+
+ if ((i + sizeof(*block)) > fw->size)
+ goto fail;
+
+ type = __le32_to_cpu(block->type);
+ switch (type) {
+ case RTL_FW_END:
+ if (__le32_to_cpu(block->length) != sizeof(*block))
+ goto fail;
+ goto fw_end;
+ case RTL_FW_PLA:
+ if (pla) {
+ dev_err(&tp->intf->dev,
+ "multiple PLA firmware encountered");
+ goto fail;
+ }
+
+ pla = (struct fw_mac *)block;
+ if (!rtl8152_is_fw_mac_ok(tp, pla)) {
+ dev_err(&tp->intf->dev,
+ "check PLA firmware failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_USB:
+ if (usb) {
+ dev_err(&tp->intf->dev,
+ "multiple USB firmware encountered");
+ goto fail;
+ }
+
+ usb = (struct fw_mac *)block;
+ if (!rtl8152_is_fw_mac_ok(tp, usb)) {
+ dev_err(&tp->intf->dev,
+ "check USB firmware failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_PHY_START:
+ if (start || phy_nc || stop) {
+ dev_err(&tp->intf->dev,
+ "check PHY_START fail\n");
+ goto fail;
+ }
+
+ if (__le32_to_cpu(block->length) != sizeof(*start)) {
+ dev_err(&tp->intf->dev,
+ "Invalid length for PHY_START\n");
+ goto fail;
+ }
+
+ start = (struct fw_phy_patch_key *)block;
+ break;
+ case RTL_FW_PHY_STOP:
+ if (stop || !start) {
+ dev_err(&tp->intf->dev,
+ "Check PHY_STOP fail\n");
+ goto fail;
+ }
+
+ if (__le32_to_cpu(block->length) != sizeof(*block)) {
+ dev_err(&tp->intf->dev,
+ "Invalid length for PHY_STOP\n");
+ goto fail;
+ }
+
+ stop = block;
+ break;
+ case RTL_FW_PHY_NC:
+ if (!start || stop) {
+ dev_err(&tp->intf->dev,
+ "check PHY_NC fail\n");
+ goto fail;
+ }
+
+ if (phy_nc) {
+ dev_err(&tp->intf->dev,
+ "multiple PHY NC encountered\n");
+ goto fail;
+ }
+
+ phy_nc = (struct fw_phy_nc *)block;
+ if (!rtl8152_is_fw_phy_nc_ok(tp, phy_nc)) {
+ dev_err(&tp->intf->dev,
+ "check PHY NC firmware failed\n");
+ goto fail;
+ }
+
+ break;
+ default:
+ dev_warn(&tp->intf->dev, "Unknown type %u is found\n",
+ type);
+ break;
+ }
+
+ /* next block */
+ i += ALIGN(__le32_to_cpu(block->length), 8);
+ }
+
+fw_end:
+ if ((phy_nc || start) && !stop) {
+ dev_err(&tp->intf->dev, "without PHY_STOP\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return ret;
+}
+
+static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
+{
+ u16 mode_reg, bp_index;
+ u32 length, i, num;
+ __le16 *data;
+
+ mode_reg = __le16_to_cpu(phy->mode_reg);
+ sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
+ sram_write(tp, __le16_to_cpu(phy->ba_reg),
+ __le16_to_cpu(phy->ba_data));
+
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ length -= __le16_to_cpu(phy->fw_offset);
+ num = length / 2;
+ data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset));
+
+ ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg));
+ for (i = 0; i < num; i++)
+ ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i]));
+
+ sram_write(tp, __le16_to_cpu(phy->patch_en_addr),
+ __le16_to_cpu(phy->patch_en_value));
+
+ bp_index = __le16_to_cpu(phy->bp_start);
+ num = __le16_to_cpu(phy->bp_num);
+ for (i = 0; i < num; i++) {
+ sram_write(tp, bp_index, __le16_to_cpu(phy->bp[i]));
+ bp_index += 2;
+ }
+
+ sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_post));
+
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info);
+}
+
+static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
+{
+ u16 bp_en_addr, bp_index, type, bp_num, fw_ver_reg;
+ u32 length;
+ u8 *data;
+ int i;
+
+ switch (__le32_to_cpu(mac->blk_hdr.type)) {
+ case RTL_FW_PLA:
+ type = MCU_TYPE_PLA;
+ break;
+ case RTL_FW_USB:
+ type = MCU_TYPE_USB;
+ break;
+ default:
+ return;
+ }
+
+ rtl_clear_bp(tp, type);
+
+ /* Enable backup/restore of MACDBG. This is required after clearing PLA
+ * break points and before applying the PLA firmware.
+ */
+ if (tp->version == RTL_VER_04 && type == MCU_TYPE_PLA &&
+ !(ocp_read_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST) & DEBUG_OE)) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_PRE, DEBUG_LTSSM);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST, DEBUG_LTSSM);
+ }
+
+ length = __le32_to_cpu(mac->blk_hdr.length);
+ length -= __le16_to_cpu(mac->fw_offset);
+
+ data = (u8 *)mac;
+ data += __le16_to_cpu(mac->fw_offset);
+
+ generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data,
+ type);
+
+ ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr),
+ __le16_to_cpu(mac->bp_ba_value));
+
+ bp_index = __le16_to_cpu(mac->bp_start);
+ bp_num = __le16_to_cpu(mac->bp_num);
+ for (i = 0; i < bp_num; i++) {
+ ocp_write_word(tp, type, bp_index, __le16_to_cpu(mac->bp[i]));
+ bp_index += 2;
+ }
+
+ bp_en_addr = __le16_to_cpu(mac->bp_en_addr);
+ if (bp_en_addr)
+ ocp_write_word(tp, type, bp_en_addr,
+ __le16_to_cpu(mac->bp_en_value));
+
+ fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg);
+ if (fw_ver_reg)
+ ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg,
+ mac->fw_ver_data);
+
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info);
+}
+
+static void rtl8152_apply_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+ const struct firmware *fw;
+ struct fw_header *fw_hdr;
+ struct fw_phy_patch_key *key;
+ u16 key_addr = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rtl_fw->fw))
+ return;
+
+ fw = rtl_fw->fw;
+ fw_hdr = (struct fw_header *)fw->data;
+
+ if (rtl_fw->pre_fw)
+ rtl_fw->pre_fw(tp);
+
+ for (i = offsetof(struct fw_header, blocks); i < fw->size;) {
+ struct fw_block *block = (struct fw_block *)&fw->data[i];
+
+ switch (__le32_to_cpu(block->type)) {
+ case RTL_FW_END:
+ goto post_fw;
+ case RTL_FW_PLA:
+ case RTL_FW_USB:
+ rtl8152_fw_mac_apply(tp, (struct fw_mac *)block);
+ break;
+ case RTL_FW_PHY_START:
+ key = (struct fw_phy_patch_key *)block;
+ key_addr = __le16_to_cpu(key->key_reg);
+ r8153_pre_ram_code(tp, key_addr,
+ __le16_to_cpu(key->key_data));
+ break;
+ case RTL_FW_PHY_STOP:
+ WARN_ON(!key_addr);
+ r8153_post_ram_code(tp, key_addr);
+ break;
+ case RTL_FW_PHY_NC:
+ rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block);
+ break;
+ default:
+ break;
+ }
+
+ i += ALIGN(__le32_to_cpu(block->length), 8);
+ }
+
+post_fw:
+ if (rtl_fw->post_fw)
+ rtl_fw->post_fw(tp);
+
+ strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
+ dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
+}
+
+static void rtl8152_release_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+
+ if (!IS_ERR_OR_NULL(rtl_fw->fw)) {
+ release_firmware(rtl_fw->fw);
+ rtl_fw->fw = NULL;
+ }
+}
+
+static int rtl8152_request_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+ long rc;
+
+ if (rtl_fw->fw || !rtl_fw->fw_name) {
+ dev_info(&tp->intf->dev, "skip request firmware\n");
+ rc = 0;
+ goto result;
+ }
+
+ rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, &tp->intf->dev);
+ if (rc < 0)
+ goto result;
+
+ rc = rtl8152_check_firmware(tp, rtl_fw);
+ if (rc < 0)
+ release_firmware(rtl_fw->fw);
+
+result:
+ if (rc) {
+ rtl_fw->fw = ERR_PTR(rc);
+
+ dev_warn(&tp->intf->dev,
+ "unable to load firmware patch %s (%ld)\n",
+ rtl_fw->fw_name, rc);
+ }
+
+ return rc;
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
@@ -3370,6 +4253,7 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
+ rtl8152_apply_firmware(tp);
rtl_eee_enable(tp, tp->eee_en);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -3377,11 +4261,23 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
set_bit(PHY_RESET, &tp->flags);
}
-static void r8152b_exit_oob(struct r8152 *tp)
+static void wait_oob_link_list_ready(struct r8152 *tp)
{
u32 ocp_data;
int i;
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ usleep_range(1000, 2000);
+ }
+}
+
+static void r8152b_exit_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -3399,23 +4295,13 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_data &= ~MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
rtl8152_nic_reset(tp);
@@ -3457,7 +4343,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
static void r8152b_enter_oob(struct r8152 *tp)
{
u32 ocp_data;
- int i;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -3469,23 +4354,13 @@ static void r8152b_enter_oob(struct r8152 *tp)
rtl_disable(tp);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
@@ -3506,31 +4381,124 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static int r8153_patch_request(struct r8152 *tp, bool request)
+static int r8153_pre_firmware_1(struct r8152 *tp)
{
- u16 data;
int i;
- data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
- if (request)
- data |= PATCH_REQUEST;
- else
- data &= ~PATCH_REQUEST;
- ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
+ /* Wait till the WTD timer is ready. It would take at most 104 ms. */
+ for (i = 0; i < 104; i++) {
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
- for (i = 0; request && i < 5000; i++) {
- usleep_range(1000, 2000);
- if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ if (!(ocp_data & WTD1_EN))
break;
+ usleep_range(1000, 2000);
}
- if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
- netif_err(tp, drv, tp->netdev, "patch request fail\n");
- r8153_patch_request(tp, false);
- return -ETIME;
- } else {
- return 0;
+ return 0;
+}
+
+static int r8153_post_firmware_1(struct r8152 *tp)
+{
+ /* set USB_BP_4 to support USB_SPEED_SUPER only */
+ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER)
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, BP4_SUPER_ONLY);
+
+ /* reset UPHY timer to 36 ms */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16);
+
+ return 0;
+}
+
+static int r8153_pre_firmware_2(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ r8153_pre_firmware_1(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0);
+ ocp_data &= ~FW_FIX_SUSPEND;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data);
+
+ return 0;
+}
+
+static int r8153_post_firmware_2(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ /* enable bp0 if support USB_SPEED_SUPER only */
+ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN);
+ ocp_data |= BIT(0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data);
+ }
+
+ /* reset UPHY timer to 36 ms */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16);
+
+ /* enable U3P3 check, set the counter to 4 */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, U3P3_CHECK_EN | 4);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0);
+ ocp_data |= FW_FIX_SUSPEND;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY);
+ ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data);
+
+ return 0;
+}
+
+static int r8153_post_firmware_3(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY);
+ ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1);
+ ocp_data |= FW_IP_RESET_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data);
+
+ return 0;
+}
+
+static int r8153b_pre_firmware_1(struct r8152 *tp)
+{
+ /* enable fc timer and set timer to 1 second. */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER,
+ CTRL_TIMER_EN | (1000 / 8));
+
+ return 0;
+}
+
+static int r8153b_post_firmware_1(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ /* enable bp0 for RTL8153-BND */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if (ocp_data & BND_MASK) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN);
+ ocp_data |= BIT(0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data);
}
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL);
+ ocp_data |= FLOW_CTRL_PATCH_OPT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1);
+ ocp_data |= FW_IP_RESET_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data);
+
+ return 0;
}
static void r8153_aldps_en(struct r8152 *tp, bool enable)
@@ -3567,6 +4535,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
+ rtl8152_apply_firmware(tp);
+
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
data &= ~CTAP_SHORT_EN;
@@ -3639,6 +4609,8 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
+ rtl8152_apply_firmware(tp);
+
r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
data = sram_read(tp, SRAM_GREEN_CFG);
@@ -3711,7 +4683,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
- int i;
r8153_mac_clk_spd(tp, false);
rxdy_gated_en(tp, true);
@@ -3732,23 +4703,13 @@ static void r8153_first_init(struct r8152 *tp)
ocp_data &= ~MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
@@ -3773,7 +4734,6 @@ static void r8153_first_init(struct r8152 *tp)
static void r8153_enter_oob(struct r8152 *tp)
{
u32 ocp_data;
- int i;
r8153_mac_clk_spd(tp, true);
@@ -3784,23 +4744,13 @@ static void r8153_enter_oob(struct r8152 *tp)
rtl_disable(tp);
rtl_reset_bmu(tp);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
@@ -4187,11 +5137,22 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
mutex_lock(&tp->control);
+ if (rtl8152_request_firmware(tp) == -ENODEV && tp->rtl_fw.retry) {
+ tp->rtl_fw.retry = false;
+ tp->rtl_fw.fw = NULL;
+
+ /* Delay execution in case request_firmware() is not ready yet.
+ */
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, HZ * 10);
+ goto ignore_once;
+ }
+
tp->rtl_ops.hw_phy_cfg(tp);
rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex,
tp->advertising);
+ignore_once:
mutex_unlock(&tp->control);
usb_autopm_put_interface(tp->intf);
@@ -4229,6 +5190,11 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ if (work_busy(&tp->hw_phy_work.work) & WORK_BUSY_PENDING) {
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ rtl_hw_phy_work_func_t(&tp->hw_phy_work.work);
+ }
+
res = alloc_all_mem(tp);
if (res)
goto out;
@@ -4283,10 +5249,10 @@ static int rtl8152_close(struct net_device *netdev)
unregister_pm_notifier(&tp->pm_notifier);
#endif
tasklet_disable(&tp->tx_tl);
- napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ napi_disable(&tp->napi);
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
@@ -4552,10 +5518,10 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
netif_stop_queue(netdev);
tasklet_disable(&tp->tx_tl);
- napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
@@ -4673,7 +5639,7 @@ static int rtl8152_system_resume(struct r8152 *tp)
netif_device_attach(netdev);
- if (netif_running(netdev) && netdev->flags & IFF_UP) {
+ if (netif_running(netdev) && (netdev->flags & IFF_UP)) {
tp->rtl_ops.up(tp);
netif_carrier_off(netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -4875,6 +5841,9 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
+ if (!IS_ERR_OR_NULL(tp->rtl_fw.fw))
+ strlcpy(info->fw_version, tp->rtl_fw.version,
+ sizeof(info->fw_version));
}
static
@@ -5244,9 +6213,15 @@ static int rtl8152_set_tunable(struct net_device *netdev,
}
if (tp->rx_copybreak != val) {
- napi_disable(&tp->napi);
- tp->rx_copybreak = val;
- napi_enable(&tp->napi);
+ if (netdev->flags & IFF_UP) {
+ mutex_lock(&tp->control);
+ napi_disable(&tp->napi);
+ tp->rx_copybreak = val;
+ napi_enable(&tp->napi);
+ mutex_unlock(&tp->control);
+ } else {
+ tp->rx_copybreak = val;
+ }
}
break;
default:
@@ -5274,9 +6249,15 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
return -EINVAL;
if (tp->rx_pending != ring->rx_pending) {
- napi_disable(&tp->napi);
- tp->rx_pending = ring->rx_pending;
- napi_enable(&tp->napi);
+ if (netdev->flags & IFF_UP) {
+ mutex_lock(&tp->control);
+ napi_disable(&tp->napi);
+ tp->rx_pending = ring->rx_pending;
+ napi_enable(&tp->napi);
+ mutex_unlock(&tp->control);
+ } else {
+ tp->rx_pending = ring->rx_pending;
+ }
}
return 0;
@@ -5499,6 +6480,47 @@ static int rtl_ops_init(struct r8152 *tp)
return ret;
}
+#define FIRMWARE_8153A_2 "rtl_nic/rtl8153a-2.fw"
+#define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw"
+#define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw"
+#define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw"
+
+MODULE_FIRMWARE(FIRMWARE_8153A_2);
+MODULE_FIRMWARE(FIRMWARE_8153A_3);
+MODULE_FIRMWARE(FIRMWARE_8153A_4);
+MODULE_FIRMWARE(FIRMWARE_8153B_2);
+
+static int rtl_fw_init(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+
+ switch (tp->version) {
+ case RTL_VER_04:
+ rtl_fw->fw_name = FIRMWARE_8153A_2;
+ rtl_fw->pre_fw = r8153_pre_firmware_1;
+ rtl_fw->post_fw = r8153_post_firmware_1;
+ break;
+ case RTL_VER_05:
+ rtl_fw->fw_name = FIRMWARE_8153A_3;
+ rtl_fw->pre_fw = r8153_pre_firmware_2;
+ rtl_fw->post_fw = r8153_post_firmware_2;
+ break;
+ case RTL_VER_06:
+ rtl_fw->fw_name = FIRMWARE_8153A_4;
+ rtl_fw->post_fw = r8153_post_firmware_3;
+ break;
+ case RTL_VER_09:
+ rtl_fw->fw_name = FIRMWARE_8153B_2;
+ rtl_fw->pre_fw = r8153b_pre_firmware_1;
+ rtl_fw->post_fw = r8153b_post_firmware_1;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static u8 rtl_get_version(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
@@ -5606,6 +6628,8 @@ static int rtl8152_probe(struct usb_interface *intf,
if (ret)
goto out;
+ rtl_fw_init(tp);
+
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
@@ -5632,8 +6656,13 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
+ if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
+ set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
- (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
+ (!strcmp(udev->serial, "000001000000") ||
+ !strcmp(udev->serial, "000002000000"))) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
}
@@ -5676,6 +6705,10 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
+#if IS_BUILTIN(CONFIG_USB_RTL8152)
+ /* Retry in case request_firmware() is not ready yet. */
+ tp->rtl_fw.retry = true;
+#endif
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp);
@@ -5721,6 +6754,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
+ rtl8152_release_firmware(tp);
free_netdev(tp->netdev);
}
}
@@ -5752,6 +6786,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dde05e2fdc3e..30e511c2c8d0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1573,6 +1573,13 @@ static void usbnet_bh (struct timer_list *t)
}
}
+static void usbnet_bh_tasklet(unsigned long data)
+{
+ struct timer_list *t = (struct timer_list *)data;
+
+ usbnet_bh(t);
+}
+
/*-------------------------------------------------------------------------
*
@@ -1700,7 +1707,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = (void (*)(unsigned long))usbnet_bh;
+ dev->bh.func = usbnet_bh_tasklet;
dev->bh.data = (unsigned long)&dev->delay;
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 9f3c839f9e5f..a552df37a347 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -260,14 +260,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- if (!rcv_xdp) {
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += length;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
- }
+ if (!rcv_xdp)
+ dev_lstats_add(dev, length);
} else {
drop:
atomic64_inc(&priv->dropped);
@@ -281,26 +275,11 @@ drop:
return NETDEV_TX_OK;
}
-static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
+static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
{
struct veth_priv *priv = netdev_priv(dev);
- int cpu;
-
- result->packets = 0;
- result->bytes = 0;
- for_each_possible_cpu(cpu) {
- struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
- u64 packets, bytes;
- unsigned int start;
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- result->packets += packets;
- result->bytes += bytes;
- }
+ dev_lstats_read(dev, packets, bytes);
return atomic64_read(&priv->dropped);
}
@@ -335,11 +314,11 @@ static void veth_get_stats64(struct net_device *dev,
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
struct veth_rq_stats rx;
- struct pcpu_lstats tx;
+ u64 packets, bytes;
- tot->tx_dropped = veth_stats_tx(&tx, dev);
- tot->tx_bytes = tx.bytes;
- tot->tx_packets = tx.packets;
+ tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
+ tot->tx_bytes = bytes;
+ tot->tx_packets = packets;
veth_stats_rx(&rx, dev);
tot->rx_dropped = rx.xdp_drops;
@@ -349,9 +328,9 @@ static void veth_get_stats64(struct net_device *dev,
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped += veth_stats_tx(&tx, peer);
- tot->rx_bytes += tx.bytes;
- tot->rx_packets += tx.packets;
+ tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
+ tot->rx_bytes += bytes;
+ tot->rx_packets += packets;
veth_stats_rx(&rx, peer);
tot->tx_bytes += rx.xdp_bytes;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5a635f028bdc..4d7d5434cc5d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2445,11 +2445,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (!prog && !old_prog)
return 0;
- if (prog) {
- prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
- }
+ if (prog)
+ bpf_prog_add(prog, vi->max_queue_pairs - 1);
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 14e324b84617..e8563acf98e8 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -47,13 +47,7 @@ static int vsockmon_close(struct net_device *dev)
static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len = skb->len;
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += len;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_lstats_add(dev, skb->len);
dev_kfree_skb(skb);
@@ -63,30 +57,9 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
static void
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
- u64 bytes = 0, packets = 0;
-
- for_each_possible_cpu(i) {
- const struct pcpu_lstats *vstats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- vstats = per_cpu_ptr(dev->lstats, i);
+ dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
- do {
- start = u64_stats_fetch_begin_irq(&vstats->syncp);
- tbytes = vstats->bytes;
- tpackets = vstats->packets;
- } while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
-
- packets += tpackets;
- bytes += tbytes;
- }
-
- stats->rx_packets = packets;
stats->tx_packets = 0;
-
- stats->rx_bytes = bytes;
stats->tx_bytes = 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8869154fad88..bf04bc2e68c2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -793,8 +793,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
- const u8 *mac, __u16 state,
+static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state,
__be32 src_vni, __u16 ndm_flags)
{
struct vxlan_fdb *f;
@@ -835,7 +834,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return -ENOSPC;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+ f = vxlan_fdb_alloc(mac, state, src_vni, ndm_flags);
if (!f)
return -ENOMEM;
@@ -3176,9 +3175,29 @@ static void vxlan_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
}
+static int vxlan_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+ dst->remote_ifindex);
+
+ if (!lowerdev) {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ return 0;
+ }
+
+ return __ethtool_get_link_ksettings(lowerdev, cmd);
+}
+
static const struct ethtool_ops vxlan_ethtool_ops = {
- .get_drvinfo = vxlan_get_drvinfo,
- .get_link = ethtool_op_get_link,
+ .get_drvinfo = vxlan_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = vxlan_get_link_ksettings,
};
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 73f5892ce6c1..1c640b41ea4c 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -26,7 +26,7 @@ int debugfs_netdev_queue_stopped_get(void *data, u64 *val)
*val = netif_queue_stopped(i2400m->wimax_dev.net_dev);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_netdev_queue_stopped,
debugfs_netdev_queue_stopped_get,
NULL, "%llu\n");
@@ -154,7 +154,7 @@ int debugfs_i2400m_suspend_set(void *data, u64 val)
result = 0;
return result;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_suspend,
NULL, debugfs_i2400m_suspend_set,
"%llu\n");
@@ -183,7 +183,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
}
return result;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_reset,
NULL, debugfs_i2400m_reset_set,
"%llu\n");
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 6953f904232f..9659f9e1aaa6 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -511,7 +511,7 @@ error_alloc_netdev:
/*
- * Disconect a i2400m from the system.
+ * Disconnect a i2400m from the system.
*
* i2400m_stop() has been called before, so al the rx and tx contexts
* have been taken down already. Make sure the queue is stopped,
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 46f1427e6e9e..ba326f6c1214 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1781,8 +1781,8 @@ static int adm8211_probe(struct pci_dev *pdev,
{
struct ieee80211_hw *dev;
struct adm8211_priv *priv;
- unsigned long mem_addr, mem_len;
- unsigned int io_addr, io_len;
+ unsigned long mem_len;
+ unsigned int io_len;
int err;
u32 reg;
u8 perm_addr[ETH_ALEN];
@@ -1794,9 +1794,7 @@ static int adm8211_probe(struct pci_dev *pdev,
return err;
}
- io_addr = pci_resource_start(pdev, 0);
io_len = pci_resource_len(pdev, 0);
- mem_addr = pci_resource_start(pdev, 1);
mem_len = pci_resource_len(pdev, 1);
if (io_len < 256 || mem_len < 1024) {
printk(KERN_ERR "%s (adm8211): Too short PCI resources\n",
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 56616d988c96..7b90b8546162 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -30,12 +30,12 @@ config ATH_DEBUG
Right now only ath9k makes use of this.
config ATH_TRACEPOINTS
- bool "Atheros wireless tracing"
- depends on ATH_DEBUG
- depends on EVENT_TRACING
- ---help---
- This option enables tracepoints for atheros wireless drivers.
- Currently, ath9k makes use of this facility.
+ bool "Atheros wireless tracing"
+ depends on ATH_DEBUG
+ depends on EVENT_TRACING
+ ---help---
+ This option enables tracepoints for atheros wireless drivers.
+ Currently, ath9k makes use of this facility.
config ATH_REG_DYNAMIC_USER_REG_HINTS
bool "Atheros dynamic user regulatory hints"
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
index 65b39c7d035d..e82df5f1ea67 100644
--- a/drivers/net/wireless/ath/ar5523/Kconfig
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: ISC
config AR5523
- tristate "Atheros AR5523 wireless driver support"
- depends on MAC80211 && USB
- select ATH_COMMON
- select FW_LOADER
- ---help---
- This module add support for AR5523 based USB dongles such as D-Link
- DWL-G132, Netgear WPN111 and many more.
+ tristate "Atheros AR5523 wireless driver support"
+ depends on MAC80211 && USB
+ select ATH_COMMON
+ select FW_LOADER
+ ---help---
+ This module add support for AR5523 based USB dongles such as D-Link
+ DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index b94759daeacc..da2d179430ca 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -255,7 +255,8 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
if (flags & AR5523_CMD_FLAG_MAGIC)
hdr->magic = cpu_to_be32(1 << 24);
- memcpy(hdr + 1, idata, ilen);
+ if (ilen)
+ memcpy(hdr + 1, idata, ilen);
cmd->odata = odata;
cmd->olen = olen;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index eca87f7c5b6c..294fbc1e89ab 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1704,9 +1704,6 @@ ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
/* Correctly initialize memory to 0 to prevent garbage
* data crashing system when download firmware
*/
- memset(dest_ring->base_addr_owner_space_unaligned, 0,
- nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
-
dest_ring->base_addr_owner_space =
PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
@@ -2019,8 +2016,6 @@ void ath10k_ce_alloc_rri(struct ath10k *ar)
value |= ar->hw_ce_regs->upd->mask;
ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
}
-
- memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
}
EXPORT_SYMBOL(ath10k_ce_alloc_rri);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 383d4fa555a8..4f76ba5d78a9 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -11,6 +11,7 @@
#include <linux/property.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/pm_qos.h>
#include <asm/byteorder.h>
#include "core.h"
@@ -677,13 +678,22 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
complete(&ar->target_suspend);
}
-static void ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
+ int ret;
u32 param = 0;
- ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
- ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
- ath10k_bmi_read32(ar, hi_acs_flags, &param);
+ ret = ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_read32(ar, hi_acs_flags, &param);
+ if (ret)
+ return ret;
/* Data transfer is not initiated, when reduced Tx completion
* is used for SDIO. disable it until fixed
@@ -700,14 +710,23 @@ static void ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
else
param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
- ath10k_bmi_write32(ar, hi_acs_flags, param);
+ ret = ath10k_bmi_write32(ar, hi_acs_flags, param);
+ if (ret)
+ return ret;
/* Explicitly set fwlog prints to zero as target may turn it on
* based on scratch registers.
*/
- ath10k_bmi_read32(ar, hi_option_flag, &param);
+ ret = ath10k_bmi_read32(ar, hi_option_flag, &param);
+ if (ret)
+ return ret;
+
param |= HI_OPTION_DISABLE_DBGLOG;
- ath10k_bmi_write32(ar, hi_option_flag, param);
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int ath10k_init_configure_target(struct ath10k *ar)
@@ -1009,6 +1028,7 @@ static int ath10k_download_fw(struct ath10k *ar)
u32 address, data_len;
const void *data;
int ret;
+ struct pm_qos_request latency_qos;
address = ar->hw_params.patch_load_addr;
@@ -1042,8 +1062,14 @@ static int ath10k_download_fw(struct ath10k *ar)
ret);
}
- return ath10k_bmi_fast_download(ar, address,
- data, data_len);
+ memset(&latency_qos, 0, sizeof(latency_qos));
+ pm_qos_add_request(&latency_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
+
+ pm_qos_remove_request(&latency_qos);
+
+ return ret;
}
void ath10k_core_free_board_files(struct ath10k *ar)
@@ -2565,8 +2591,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (status)
goto err;
- if (ar->hif.bus == ATH10K_BUS_SDIO)
- ath10k_init_sdio(ar, mode);
+ if (ar->hif.bus == ATH10K_BUS_SDIO) {
+ status = ath10k_init_sdio(ar, mode);
+ if (status) {
+ ath10k_err(ar, "failed to init SDIO: %d\n", status);
+ goto err;
+ }
+ }
}
ar->htc.htc_ops.target_send_suspend_complete =
@@ -2787,7 +2818,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
status = ath10k_hif_set_target_log_mode(ar, fw_diag_log);
if (status && status != -EOPNOTSUPP) {
- ath10k_warn(ar, "set traget log mode faileds: %d\n", status);
+ ath10k_warn(ar, "set target log mode failed: %d\n", status);
goto err_hif_stop;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 4d7db07db6ba..af68eb5d0776 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -169,6 +169,7 @@ struct ath10k_wmi {
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
+ struct wmi_peer_param_map *peer_param;
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
@@ -963,12 +964,20 @@ struct ath10k {
u32 hw_eeprom_rd;
u32 ht_cap_info;
u32 vht_cap_info;
+ u32 vht_supp_mcs;
u32 num_rf_chains;
u32 max_spatial_stream;
/* protected by conf_mutex */
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
u32 low_5ghz_chan;
u32 high_5ghz_chan;
bool ani_enabled;
+ u32 sys_cap_info;
+
+ /* protected by data_lock */
+ bool hw_rfkill_on;
+
/* protected by conf_mutex */
u8 ps_state_enable;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index b6d2932383cf..2a4498067024 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -703,7 +703,7 @@ static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -786,7 +786,7 @@ static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -891,7 +891,7 @@ static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -951,6 +951,19 @@ static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
},
};
+static const struct ath10k_mem_region wcn399x_hw10_mem_regions[] = {
+ {
+ /* MSA region start is not fixed, hence it is assigned at runtime */
+ .type = ATH10K_MEM_REGION_TYPE_MSA,
+ .len = 0x100000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_0_VERSION,
@@ -1048,6 +1061,14 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
.size = ARRAY_SIZE(qca4019_hw10_mem_regions),
},
},
+ {
+ .hw_id = WCN3990_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_WCN3990,
+ .region_table = {
+ .regions = wcn399x_hw10_mem_regions,
+ .size = ARRAY_SIZE(wcn399x_hw10_mem_regions),
+ },
+ },
};
static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
@@ -1208,9 +1229,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
- memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
- crash_data->ramdump_buf_len);
- sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ if (crash_data->ramdump_buf_len) {
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
}
mutex_unlock(&ar->dump_mutex);
@@ -1257,6 +1280,9 @@ int ath10k_coredump_register(struct ath10k *ar)
if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+ if (!crash_data->ramdump_buf_len)
+ return 0;
+
crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
if (!crash_data->ramdump_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 09de41922f97..8bf03e8c1d3a 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -115,6 +115,7 @@ enum ath10k_mem_region_type {
ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
ATH10K_MEM_REGION_TYPE_IOSRAM = 6,
ATH10K_MEM_REGION_TYPE_IOREG = 7,
+ ATH10K_MEM_REGION_TYPE_MSA = 8,
};
/* Define a section of the region which should be copied. As not all parts
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index bd2b5628f850..04c50a26a4f4 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1516,7 +1516,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
*len += scnprintf(buf + *len, buf_len - *len,
"No. Preamble Rate_code ");
- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++)
+ for (i = 0; i < tpc_stats->num_tx_chain; i++)
*len += scnprintf(buf + *len, buf_len - *len,
"tpc_value%d ", i);
@@ -2532,6 +2532,7 @@ void ath10k_debug_destroy(struct ath10k *ar)
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
+ kfree(ar->debug.tpc_stats_final);
}
int ath10k_debug_register(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 42931a669b02..367539f2c370 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -430,7 +430,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
}
ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
- WMI_PEER_DEBUG, peer_debug_trigger);
+ ar->wmi.peer_param->debug, peer_debug_trigger);
if (ret) {
ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
ret);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 53f1095de8ff..d95b63f133ab 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2073,7 +2073,7 @@ static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
case 24:
pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
break;
- };
+ }
}
static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
@@ -2726,7 +2726,7 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
- if (!peer) {
+ if (!peer || !peer->sta) {
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
continue;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index c415e971735b..2451e0fb8ee5 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -155,6 +155,9 @@ const struct ath10k_hw_values qca6174_values = {
.num_target_ce_config_wlan = 7,
.ce_desc_meta_data_mask = 0xFFFC,
.ce_desc_meta_data_lsb = 2,
+ .rfkill_pin = 16,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 1,
};
const struct ath10k_hw_values qca99x0_values = {
@@ -1145,6 +1148,7 @@ static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
const struct ath10k_hw_ops qca99x0_ops = {
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
.rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
const struct ath10k_hw_ops qca6174_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 2ae57c1de7b5..35a362329a4f 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -379,6 +379,9 @@ struct ath10k_hw_values {
u8 num_target_ce_config_wlan;
u16 ce_desc_meta_data_mask;
u8 ce_desc_meta_data_lsb;
+ u32 rfkill_pin;
+ u32 rfkill_cfg;
+ bool rfkill_on_level;
};
extern const struct ath10k_hw_values qca988x_values;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a6d21856b7e7..83cc8778ca1e 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -12,6 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
#include "hif.h"
#include "core.h"
@@ -2773,7 +2774,7 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
return -EINVAL;
return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
- WMI_PEER_SMPS_STATE,
+ ar->wmi.peer_param->smps_state,
ath10k_smps_map[smps]);
}
@@ -2930,7 +2931,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
* poked with peer param command.
*/
ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
- WMI_PEER_DUMMY_VAR, 1);
+ ar->wmi.peer_param->dummy_var, 1);
if (ret) {
ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
arvif->bssid, arvif->vdev_id, ret);
@@ -3708,7 +3709,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
struct ieee80211_vif *vif,
enum ath10k_hw_txrx_mode txmode,
enum ath10k_mac_tx_path txpath,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool noque_offchan)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -3738,10 +3739,10 @@ static int ath10k_mac_tx(struct ath10k *ar,
}
}
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ skb, skb->len);
skb_queue_tail(&ar->offchan_tx_queue, skb);
ieee80211_queue_work(hw, &ar->offchan_tx_work);
@@ -3803,8 +3804,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ skb, skb->len);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
@@ -3850,7 +3851,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
if (ret) {
ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
ret);
@@ -3860,8 +3861,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
- skb);
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ skb, skb->len);
if (!peer && tmp_peer_created) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
@@ -3903,8 +3904,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
ar->running_fw->fw_file.fw_features)) {
paddr = dma_map_single(ar->dev, skb->data,
skb->len, DMA_TO_DEVICE);
- if (!paddr)
+ if (dma_mapping_error(ar->dev, paddr)) {
+ ieee80211_free_txskb(ar->hw, skb);
continue;
+ }
ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
if (ret) {
ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
@@ -4065,7 +4068,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
if (ret)
return ret;
- skb = ieee80211_tx_dequeue(hw, txq);
+ skb = ieee80211_tx_dequeue_ni(hw, txq);
if (!skb) {
spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt);
@@ -4097,7 +4100,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (unlikely(ret)) {
ath10k_warn(ar, "failed to push frame: %d\n", ret);
@@ -4378,7 +4381,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (ret) {
ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
if (is_htt) {
@@ -4754,6 +4757,63 @@ static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar,
return 0;
}
+static int ath10k_mac_rfkill_config(struct ath10k *ar)
+{
+ u32 param;
+ int ret;
+
+ if (ar->hw_values->rfkill_pin == 0) {
+ ath10k_warn(ar, "ath10k does not support hardware rfkill with this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
+ ar->hw_values->rfkill_pin, ar->hw_values->rfkill_cfg,
+ ar->hw_values->rfkill_on_level);
+
+ param = FIELD_PREP(WMI_TLV_RFKILL_CFG_RADIO_LEVEL,
+ ar->hw_values->rfkill_on_level) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM,
+ ar->hw_values->rfkill_pin) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_PIN_AS_GPIO,
+ ar->hw_values->rfkill_cfg);
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->rfkill_config,
+ param);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set rfkill config 0x%x: %d\n",
+ param, ret);
+ return ret;
+ }
+ return 0;
+}
+
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable)
+{
+ enum wmi_tlv_rfkill_enable_radio param;
+ int ret;
+
+ if (enable)
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_ON;
+ else
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_OFF;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac rfkill enable %d", param);
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rfkill_enable,
+ param);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rfkill enable param %d: %d\n",
+ param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
@@ -4788,6 +4848,16 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err;
}
+ spin_lock_bh(&ar->data_lock);
+
+ if (ar->hw_rfkill_on) {
+ ar->hw_rfkill_on = false;
+ spin_unlock_bh(&ar->data_lock);
+ goto err;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath10k_err(ar, "Could not init hif: %d\n", ret);
@@ -4801,6 +4871,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_power_down;
}
+ if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
+ ret = ath10k_mac_rfkill_config(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure rfkill: %d", ret);
+ goto err_core_stop;
+ }
+ }
+
param = ar->wmi.pdev_param->pmf_qos;
ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
@@ -4960,7 +5038,8 @@ static void ath10k_stop(struct ieee80211_hw *hw)
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF) {
- ath10k_halt(ar);
+ if (!ar->hw_rfkill_on)
+ ath10k_halt(ar);
ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex);
@@ -5635,6 +5714,37 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = ar->wmi.vdev_param->mgmt_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
+}
+
static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -5645,10 +5755,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
- u8 rate, basic_rate_idx, rateidx;
- int ret = 0, hw_rate_code, mcast_rate;
+ u8 rate, rateidx;
+ int ret = 0, mcast_rate;
enum nl80211_band band;
- const struct ieee80211_supported_band *sband;
mutex_lock(&ar->conf_mutex);
@@ -5872,29 +5981,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
- if (changed & BSS_CHANGED_BASIC_RATES) {
- if (ath10k_mac_vif_chan(vif, &def)) {
- mutex_unlock(&ar->conf_mutex);
- return;
- }
-
- sband = ar->hw->wiphy->bands[def.chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
- bitrate = sband->bitrates[basic_rate_idx].bitrate;
-
- hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
- if (hw_rate_code < 0) {
- ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
- mutex_unlock(&ar->conf_mutex);
- return;
- }
-
- vdev_param = ar->wmi.vdev_param->mgmt_rate;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- hw_rate_code);
- if (ret)
- ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
- }
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath10k_mac_vif_chan(arvif->vif, &def))
+ ath10k_recalculate_mgmt_rate(ar, vif, &def);
mutex_unlock(&ar->conf_mutex);
}
@@ -6239,7 +6328,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta && sta->tdls)
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_AUTHORIZE, 1);
+ ar->wmi.peer_param->authorize, 1);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -6330,7 +6419,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, bw, mode);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_PHYMODE, mode);
+ ar->wmi.peer_param->phymode, mode);
if (err) {
ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
sta->addr, mode, err);
@@ -6338,7 +6427,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_CHAN_WIDTH, bw);
+ ar->wmi.peer_param->chan_width, bw);
if (err)
ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
@@ -6349,7 +6438,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, nss);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_NSS, nss);
+ ar->wmi.peer_param->nss, nss);
if (err)
ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
sta->addr, nss, err);
@@ -6360,7 +6449,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_SMPS_STATE, smps);
+ ar->wmi.peer_param->smps_state, smps);
if (err)
ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
sta->addr, smps, err);
@@ -6434,7 +6523,7 @@ static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_USE_FIXED_PWR, txpwr);
+ ar->wmi.peer_param->use_fixed_power, txpwr);
if (ret) {
ath10k_warn(ar, "failed to set tx power for station ret: %d\n",
ret);
@@ -6515,6 +6604,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
GFP_KERNEL);
if (!arsta->tx_stats) {
+ ath10k_mac_dec_num_stations(arvif, sta);
ret = -ENOMEM;
goto exit;
}
@@ -7419,7 +7509,7 @@ static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_PARAM_FIXED_RATE, rate);
if (err)
- ath10k_warn(ar, "failed to eanble STA %pM peer fixed rate: %d\n",
+ ath10k_warn(ar, "failed to enable STA %pM peer fixed rate: %d\n",
sta->addr, err);
return true;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 1fe84948b868..98d83a26ea60 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -72,6 +72,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
u8 tid);
int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
void ath10k_mac_wait_tx_complete(struct ath10k *ar);
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable);
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index a0b4d265c6eb..bb44f5a0941b 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2567,35 +2567,31 @@ static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
}
static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CE_RST_MASK);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
msleep(10);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
}
static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_LF_TIMER_CONTROL0_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_LF_TIMER_CONTROL0_ADDRESS,
- val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
}
static int ath10k_pci_warm_reset(struct ath10k *ar)
@@ -3490,7 +3486,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
struct ath10k_bus_params bus_params = {};
- bool pci_ps;
+ bool pci_ps, is_qca988x = false;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
@@ -3500,6 +3496,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
+ is_qca988x = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
@@ -3619,25 +3616,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.link_can_suspend = true;
+ /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
+ * fall off the bus during chip_reset. These chips have the same pci
+ * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
+ */
+ if (is_qca988x) {
+ bus_params.chip_id =
+ ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id != 0xffffffff) {
+ if (!ath10k_pci_chip_is_supported(pdev->device,
+ bus_params.chip_id))
+ goto err_unsupported;
+ }
+ }
+
ret = ath10k_pci_chip_reset(ar);
if (ret) {
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_free_irq;
}
- bus_params.dev_type = ATH10K_DEV_TYPE_LL;
- bus_params.link_can_suspend = true;
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (bus_params.chip_id == 0xffffffff) {
- ath10k_err(ar, "failed to get chip id\n");
- goto err_free_irq;
- }
+ if (bus_params.chip_id == 0xffffffff)
+ goto err_unsupported;
- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
- ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
- pdev->device, bus_params.chip_id);
+ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
goto err_free_irq;
- }
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
@@ -3647,6 +3653,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
return 0;
+err_unsupported:
+ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+ pdev->device, bus_params.chip_id);
+
err_free_irq:
ath10k_pci_free_irq(ar);
ath10k_pci_rx_retry_sync(ar);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 3b63b6257c43..a0ba07b85362 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -111,6 +111,7 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
struct wlfw_msa_info_resp_msg_v01 resp = {};
struct wlfw_msa_info_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ phys_addr_t max_mapped_addr;
struct qmi_txn txn;
int ret;
int i;
@@ -150,8 +151,20 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
goto out;
}
+ max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
qmi->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
+ if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
+ resp.mem_region_info[i].region_addr > max_mapped_addr ||
+ resp.mem_region_info[i].region_addr < qmi->msa_pa ||
+ resp.mem_region_info[i].size +
+ resp.mem_region_info[i].region_addr > max_mapped_addr) {
+ ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
+ resp.mem_region_info[i].region_addr,
+ resp.mem_region_info[i].size);
+ ret = -EINVAL;
+ goto fail_unwind;
+ }
qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
qmi->mem_region[i].size = resp.mem_region_info[i].size;
qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
@@ -165,6 +178,8 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
return 0;
+fail_unwind:
+ memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
out:
return ret;
}
@@ -291,10 +306,16 @@ static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
struct wlfw_cal_report_resp_msg_v01 resp = {};
struct wlfw_cal_report_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int i, j = 0;
int ret;
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_data_valid = 1;
+ req.xo_cal_data = ar_snoc->xo_cal_data;
+ }
+
ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
&resp);
if (ret < 0)
@@ -581,22 +602,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
{
struct wlfw_host_cap_resp_msg_v01 resp = {};
struct wlfw_host_cap_req_msg_v01 req = {};
+ struct qmi_elem_info *req_ei;
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int ret;
req.daemon_support_valid = 1;
req.daemon_support = 0;
- ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
- wlfw_host_cap_resp_msg_v01_ei, &resp);
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
+ &resp);
if (ret < 0)
goto out;
+ if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
+ req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
+ else
+ req_ei = wlfw_host_cap_req_msg_v01_ei;
+
ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
QMI_WLFW_HOST_CAP_REQ_V01,
WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
- wlfw_host_cap_req_msg_v01_ei, &req);
+ req_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath10k_err(ar, "failed to send host capability request: %d\n", ret);
@@ -643,7 +671,7 @@ int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
wlfw_ini_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
- ath10k_err(ar, "fail to send fw log reqest: %d\n", ret);
+ ath10k_err(ar, "failed to send fw log request: %d\n", ret);
goto out;
}
@@ -652,7 +680,7 @@ int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath10k_err(ar, "fw log request rejectedr: %d\n",
+ ath10k_err(ar, "fw log request rejected: %d\n",
resp.resp.error);
ret = -EINVAL;
goto out;
@@ -671,6 +699,7 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
struct wlfw_ind_register_resp_msg_v01 resp = {};
struct wlfw_ind_register_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int ret;
@@ -681,6 +710,11 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
req.msa_ready_enable_valid = 1;
req.msa_ready_enable = 1;
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_enable_valid = 1;
+ req.xo_cal_enable = 1;
+ }
+
ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
wlfw_ind_register_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -739,6 +773,13 @@ static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
if (ret)
return;
+ /*
+ * HACK: sleep for a while inbetween receiving the msa info response
+ * and the XPU update to prevent SDM845 from crashing due to a security
+ * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
+ */
+ msleep(20);
+
ret = ath10k_qmi_setup_msa_permissions(qmi);
if (ret)
return;
@@ -795,9 +836,13 @@ ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
{
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
ath10k_qmi_remove_msa_permission(qmi);
ath10k_core_free_board_files(ar);
+ if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
+ ath10k_snoc_fw_crashed_dump(ar);
+
ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
}
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
index 1fe05c6218c3..86fcf4e1de5f 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{}
};
+struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {}
+};
+
struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
index bca1186e1560..4d107e1364a8 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 {
#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 9870d2d095c8..120200a93bcc 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -2086,9 +2086,6 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
- /* TODO: remove this once SDIO support is fully implemented */
- ath10k_warn(ar, "WARNING: ath10k SDIO support is work-in-progress, problems may arise!\n");
-
return 0;
err_free_wq:
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index b491361e6ed4..16177497bba7 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -9,9 +9,11 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include "ce.h"
+#include "coredump.h"
#include "debug.h"
#include "hif.h"
#include "htc.h"
@@ -36,15 +38,15 @@ static char *const ce_name[] = {
"WLAN_CE_11",
};
-static struct ath10k_vreg_info vreg_cfg[] = {
- {NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
- {NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
- {NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
- {NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
+static const char * const ath10k_regulators[] = {
+ "vdd-0.8-cx-mx",
+ "vdd-1.8-xo",
+ "vdd-1.3-rfa",
+ "vdd-3.3-ch0",
};
-static struct ath10k_clk_info clk_cfg[] = {
- {NULL, "cxo_ref_clk_pin", 0, false},
+static const char * const ath10k_clocks[] = {
+ "cxo_ref_clk_pin",
};
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
@@ -976,8 +978,7 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar,
sizeof(struct ath10k_svc_pipe_cfg);
cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
- cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
- sizeof(struct ath10k_shadow_reg_cfg);
+ cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
@@ -1257,10 +1258,29 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
ar_snoc->ce_irqs[i].irq_line = res->start;
}
+ ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
+ &ar_snoc->xo_cal_data);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
+ if (ret == 0) {
+ ar_snoc->xo_cal_supported = true;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
+ ar_snoc->xo_cal_data);
+ }
+ ret = 0;
+
out:
return ret;
}
+static void ath10k_snoc_quirks_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *dev = &ar_snoc->dev->dev;
+
+ if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
+ set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
+}
+
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@@ -1337,296 +1357,102 @@ static void ath10k_snoc_release_resource(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
- struct ath10k_vreg_info *vreg_info)
-{
- struct regulator *reg;
- int ret = 0;
-
- reg = devm_regulator_get_optional(dev, vreg_info->name);
-
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
-
- if (ret == -EPROBE_DEFER) {
- ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
- vreg_info->name);
- return ret;
- }
- if (vreg_info->required) {
- ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- return ret;
- }
- ath10k_dbg(ar, ATH10K_DBG_SNOC,
- "Optional regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- goto done;
- }
-
- vreg_info->reg = reg;
-
-done:
- ath10k_dbg(ar, ATH10K_DBG_SNOC,
- "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v,
- vreg_info->load_ua, vreg_info->settle_delay);
-
- return 0;
-}
-
-static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
- struct ath10k_clk_info *clk_info)
-{
- struct clk *handle;
- int ret = 0;
-
- handle = devm_clk_get(dev, clk_info->name);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- if (clk_info->required) {
- ath10k_err(ar, "snoc clock %s isn't available: %d\n",
- clk_info->name, ret);
- return ret;
- }
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
- clk_info->name,
- ret);
- return 0;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
- clk_info->name, clk_info->freq);
-
- clk_info->handle = handle;
-
- return ret;
-}
-
-static int __ath10k_snoc_vreg_on(struct ath10k *ar,
- struct ath10k_vreg_info *vreg_info)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
- vreg_info->name);
-
- ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
- vreg_info->max_v);
- if (ret) {
- ath10k_err(ar,
- "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v);
- return ret;
- }
-
- if (vreg_info->load_ua) {
- ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
- if (ret < 0) {
- ath10k_err(ar, "failed to set regulator %s load: %d\n",
- vreg_info->name, vreg_info->load_ua);
- goto err_set_load;
- }
- }
-
- ret = regulator_enable(vreg_info->reg);
- if (ret) {
- ath10k_err(ar, "failed to enable regulator %s\n",
- vreg_info->name);
- goto err_enable;
- }
-
- if (vreg_info->settle_delay)
- udelay(vreg_info->settle_delay);
-
- return 0;
-
-err_enable:
- regulator_set_load(vreg_info->reg, 0);
-err_set_load:
- regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
-
- return ret;
-}
-
-static int __ath10k_snoc_vreg_off(struct ath10k *ar,
- struct ath10k_vreg_info *vreg_info)
+static int ath10k_hw_power_on(struct ath10k *ar)
{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int ret;
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
- vreg_info->name);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
- ret = regulator_disable(vreg_info->reg);
+ ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
if (ret)
- ath10k_err(ar, "failed to disable regulator %s\n",
- vreg_info->name);
-
- ret = regulator_set_load(vreg_info->reg, 0);
- if (ret < 0)
- ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
+ return ret;
- ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
if (ret)
- ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
+ goto vreg_off;
return ret;
-}
-
-static int ath10k_snoc_vreg_on(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_vreg_info *vreg_info;
- int ret = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
- vreg_info = &ar_snoc->vreg[i];
-
- if (!vreg_info->reg)
- continue;
-
- ret = __ath10k_snoc_vreg_on(ar, vreg_info);
- if (ret)
- goto err_reg_config;
- }
-
- return 0;
-
-err_reg_config:
- for (i = i - 1; i >= 0; i--) {
- vreg_info = &ar_snoc->vreg[i];
-
- if (!vreg_info->reg)
- continue;
-
- __ath10k_snoc_vreg_off(ar, vreg_info);
- }
+vreg_off:
+ regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
return ret;
}
-static int ath10k_snoc_vreg_off(struct ath10k *ar)
+static int ath10k_hw_power_off(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_vreg_info *vreg_info;
- int ret = 0;
- int i;
-
- for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
- vreg_info = &ar_snoc->vreg[i];
- if (!vreg_info->reg)
- continue;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
- ret = __ath10k_snoc_vreg_off(ar, vreg_info);
- }
+ clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
- return ret;
+ return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
}
-static int ath10k_snoc_clk_init(struct ath10k *ar)
+static void ath10k_msa_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_clk_info *clk_info;
- int ret = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- clk_info = &ar_snoc->clk[i];
-
- if (!clk_info->handle)
- continue;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
- clk_info->name);
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ size_t buf_len;
+ u8 *buf;
- if (clk_info->freq) {
- ret = clk_set_rate(clk_info->handle, clk_info->freq);
-
- if (ret) {
- ath10k_err(ar, "failed to set clock %s freq %u\n",
- clk_info->name, clk_info->freq);
- goto err_clock_config;
- }
- }
-
- ret = clk_prepare_enable(clk_info->handle);
- if (ret) {
- ath10k_err(ar, "failed to enable clock %s\n",
- clk_info->name);
- goto err_clock_config;
- }
- }
-
- return 0;
-
-err_clock_config:
- for (i = i - 1; i >= 0; i--) {
- clk_info = &ar_snoc->clk[i];
-
- if (!clk_info->handle)
- continue;
-
- clk_disable_unprepare(clk_info->handle);
- }
+ if (!crash_data || !crash_data->ramdump_buf)
+ return;
- return ret;
-}
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
-static int ath10k_snoc_clk_deinit(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_clk_info *clk_info;
- int i;
+ current_region = &mem_layout->region_table.regions[0];
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- clk_info = &ar_snoc->clk[i];
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+ memset(buf, 0, buf_len);
- if (!clk_info->handle)
- continue;
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
- clk_info->name);
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va);
+ hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size);
- clk_disable_unprepare(clk_info->handle);
+ if (current_region->len < ar_snoc->qmi->msa_mem_size) {
+ memcpy(buf, ar_snoc->qmi->msa_va, current_region->len);
+ ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
+ current_region->len, ar_snoc->qmi->msa_mem_size);
+ } else {
+ memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size);
}
-
- return 0;
}
-static int ath10k_hw_power_on(struct ath10k *ar)
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
{
- int ret;
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+ mutex_lock(&ar->dump_mutex);
- ret = ath10k_snoc_vreg_on(ar);
- if (ret)
- return ret;
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
- ret = ath10k_snoc_clk_init(ar);
- if (ret)
- goto vreg_off;
+ crash_data = ath10k_coredump_new(ar);
- return ret;
-
-vreg_off:
- ath10k_snoc_vreg_off(ar);
- return ret;
-}
-
-static int ath10k_hw_power_off(struct ath10k *ar)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
-
- ath10k_snoc_clk_deinit(ar);
-
- ret = ath10k_snoc_vreg_off(ar);
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
- return ret;
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_msa_dump_memory(ar, crash_data);
+ mutex_unlock(&ar->dump_mutex);
}
static const struct of_device_id ath10k_snoc_dt_match[] = {
@@ -1678,6 +1504,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar->ce_priv = &ar_snoc->ce;
msa_size = drv_data->msa_size;
+ ath10k_snoc_quirks_init(ar);
+
ret = ath10k_snoc_resource_init(ar);
if (ret) {
ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
@@ -1695,20 +1523,37 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_release_resource;
}
- ar_snoc->vreg = vreg_cfg;
- for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
- ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
- if (ret)
- goto err_free_irq;
+ ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
+ ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
+ sizeof(*ar_snoc->vregs), GFP_KERNEL);
+ if (!ar_snoc->vregs) {
+ ret = -ENOMEM;
+ goto err_free_irq;
}
+ for (i = 0; i < ar_snoc->num_vregs; i++)
+ ar_snoc->vregs[i].supply = ath10k_regulators[i];
- ar_snoc->clk = clk_cfg;
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
- if (ret)
- goto err_free_irq;
+ ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
+ ar_snoc->vregs);
+ if (ret < 0)
+ goto err_free_irq;
+
+ ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
+ ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
+ sizeof(*ar_snoc->clks), GFP_KERNEL);
+ if (!ar_snoc->clks) {
+ ret = -ENOMEM;
+ goto err_free_irq;
}
+ for (i = 0; i < ar_snoc->num_clks; i++)
+ ar_snoc->clks[i].id = ath10k_clocks[i];
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
+ ar_snoc->clks);
+ if (ret)
+ goto err_free_irq;
+
ret = ath10k_hw_power_on(ar);
if (ret) {
ath10k_err(ar, "failed to power on device: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index d62f53501fbb..c05df45a3945 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -42,29 +42,16 @@ struct ath10k_snoc_ce_irq {
u32 irq_line;
};
-struct ath10k_vreg_info {
- struct regulator *reg;
- const char *name;
- u32 min_v;
- u32 max_v;
- u32 load_ua;
- unsigned long settle_delay;
- bool required;
-};
-
-struct ath10k_clk_info {
- struct clk *handle;
- const char *name;
- u32 freq;
- bool required;
-};
-
enum ath10k_snoc_flags {
ATH10K_SNOC_FLAG_REGISTERED,
ATH10K_SNOC_FLAG_UNREGISTERING,
ATH10K_SNOC_FLAG_RECOVERY,
+ ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK,
};
+struct clk_bulk_data;
+struct regulator_bulk_data;
+
struct ath10k_snoc {
struct platform_device *dev;
struct ath10k *ar;
@@ -76,10 +63,14 @@ struct ath10k_snoc {
struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
struct ath10k_ce ce;
struct timer_list rx_post_retry;
- struct ath10k_vreg_info *vreg;
- struct ath10k_clk_info *clk;
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+ struct clk_bulk_data *clks;
+ size_t num_clks;
struct ath10k_qmi *qmi;
unsigned long flags;
+ bool xo_cal_supported;
+ u32 xo_cal_data;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@@ -88,5 +79,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
}
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar);
#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 4102df016931..39abf8b12903 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -95,6 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index e1420f67f776..1e0343081be9 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -38,6 +38,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
struct ath10k_urb_context *urb_context = NULL;
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return NULL;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context = list_first_entry(&pipe->urb_list_head,
@@ -55,6 +59,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
{
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;
@@ -435,6 +443,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb bulk transmit failed: %d\n", ret);
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
ret = -EINVAL;
goto err_free_urb_to_pipe;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 4d5d10c01064..69a1ec53df29 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -409,6 +409,49 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
return 0;
}
+static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct wmi_tlv_rfkill_state_change_ev *ev;
+ const void **tb;
+ bool radio;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar,
+ "failed to parse rfkill state change event: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
+ __le32_to_cpu(ev->gpio_pin_num),
+ __le32_to_cpu(ev->int_type),
+ __le32_to_cpu(ev->radio_state));
+
+ radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!radio)
+ ar->hw_rfkill_on = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* notify cfg80211 radio state change */
+ ath10k_mac_rfkill_enable_radio(ar, radio);
+ wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
+}
+
static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -629,6 +672,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_TX_PAUSE_EVENTID:
ath10k_wmi_tlv_event_tx_pause(ar, skb);
break;
+ case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
+ ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
+ break;
case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_tlv_event_temperature(ar, skb);
break;
@@ -1201,17 +1247,21 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->abi.abi_ver0;
arg->sw_ver1 = ev->abi.abi_ver1;
arg->fw_build = ev->fw_build_vers;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_2ghz_chan = reg->low_2ghz_chan;
+ arg->high_2ghz_chan = reg->high_2ghz_chan;
arg->low_5ghz_chan = reg->low_5ghz_chan;
arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+ arg->sys_cap_info = ev->sys_cap_info;
ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
ath10k_wmi_tlv_parse_mem_reqs, arg);
@@ -1649,8 +1699,9 @@ ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
static void
ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
{
- struct host_memory_chunk *chunk;
+ struct host_memory_chunk_tlv *chunk;
struct wmi_tlv *tlv;
+ dma_addr_t paddr;
int i;
__le16 tlv_len, tlv_tag;
@@ -1666,6 +1717,12 @@ ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+ if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ ar->wmi.svc_map)) {
+ paddr = ar->wmi.mem_chunks[i].paddr;
+ chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
+ }
+
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
i,
@@ -1689,7 +1746,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
void *ptr;
chunks_len = ar->wmi.num_mem_chunks *
- (sizeof(struct host_memory_chunk) + sizeof(*tlv));
+ (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
len = (sizeof(*tlv) + sizeof(*cmd)) +
(sizeof(*tlv) + sizeof(*cfg)) +
(sizeof(*tlv) + chunks_len);
@@ -4204,6 +4261,26 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+};
+
+static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
+ .smps_state = WMI_TLV_PEER_SMPS_STATE,
+ .ampdu = WMI_TLV_PEER_AMPDU,
+ .authorize = WMI_TLV_PEER_AUTHORIZE,
+ .chan_width = WMI_TLV_PEER_CHAN_WIDTH,
+ .nss = WMI_TLV_PEER_NSS,
+ .use_4addr = WMI_TLV_PEER_USE_4ADDR,
+ .membership = WMI_TLV_PEER_MEMBERSHIP,
+ .user_pos = WMI_TLV_PEER_USERPOS,
+ .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
+ .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
+ .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
+ .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
+ .phymode = WMI_TLV_PEER_PHYMODE,
+ .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
+ .dummy_var = WMI_TLV_PEER_DUMMY_VAR,
};
static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -4394,6 +4471,7 @@ void ath10k_wmi_tlv_attach(struct ath10k *ar)
ar->wmi.cmd = &wmi_tlv_cmd_map;
ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+ ar->wmi.peer_param = &wmi_tlv_peer_param_map;
ar->wmi.ops = &wmi_tlv_ops;
ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 649b229a41e9..4972dc12991c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -7,6 +7,8 @@
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
+#include <linux/bitops.h>
+
#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_TLV_CMD_UNSUPPORTED 0
@@ -528,6 +530,24 @@ enum wmi_tlv_vdev_param {
WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
};
+enum wmi_tlv_peer_param {
+ WMI_TLV_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_TLV_PEER_AMPDU = 0x2,
+ WMI_TLV_PEER_AUTHORIZE = 0x3,
+ WMI_TLV_PEER_CHAN_WIDTH = 0x4,
+ WMI_TLV_PEER_NSS = 0x5,
+ WMI_TLV_PEER_USE_4ADDR = 0x6,
+ WMI_TLV_PEER_MEMBERSHIP = 0x7,
+ WMI_TLV_PEER_USERPOS = 0x8,
+ WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED = 0x9,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR = 0xa,
+ WMI_TLV_PEER_SET_HW_RETRY_CTS2S = 0xb,
+ WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH = 0xc,
+ WMI_TLV_PEER_PHYMODE = 0xd,
+ WMI_TLV_PEER_USE_FIXED_PWR = 0xe,
+ WMI_TLV_PEER_DUMMY_VAR = 0xff,
+};
+
enum wmi_tlv_peer_flags {
WMI_TLV_PEER_AUTH = 0x00000001,
WMI_TLV_PEER_QOS = 0x00000002,
@@ -1409,6 +1429,11 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
WMI_TLV_MAX_EXT_SERVICE = 256,
};
@@ -1588,6 +1613,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
WMI_TLV_MAX_SERVICE);
SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_TLV_MAX_SERVICE);
}
#undef SVCMAP
@@ -1743,6 +1771,21 @@ struct wmi_tlv_resource_config {
__le32 host_capab;
} __packed;
+/* structure describing host memory chunk. */
+struct host_memory_chunk_tlv {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+
+ /* the physical address the memory chunk */
+ __le32 ptr;
+
+ /* size of the chunk */
+ __le32 size;
+
+ /* the upper 32 bit address valid only for more than 32 bit target */
+ __le32 ptr_high;
+} __packed;
+
struct wmi_tlv_init_cmd {
struct wmi_tlv_abi_version abi;
__le32 num_host_mem_chunks;
@@ -2235,6 +2278,31 @@ struct wmi_tlv_tdls_peer_event {
__le32 vdev_id;
} __packed;
+enum wmi_tlv_sys_cap_info_flags {
+ WMI_TLV_SYS_CAP_INFO_RXTX_LED = BIT(0),
+ WMI_TLV_SYS_CAP_INFO_RFKILL = BIT(1),
+};
+
+#define WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
+#define WMI_TLV_RFKILL_CFG_RADIO_LEVEL BIT(6)
+#define WMI_TLV_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
+
+enum wmi_tlv_rfkill_enable_radio {
+ WMI_TLV_RFKILL_ENABLE_RADIO_ON = 0,
+ WMI_TLV_RFKILL_ENABLE_RADIO_OFF = 1,
+};
+
+enum wmi_tlv_rfkill_radio_state {
+ WMI_TLV_RFKILL_RADIO_STATE_OFF = 1,
+ WMI_TLV_RFKILL_RADIO_STATE_ON = 2,
+};
+
+struct wmi_tlv_rfkill_state_change_ev {
+ __le32 gpio_pin_num;
+ __le32 int_type;
+ __le32 radio_state;
+};
+
void ath10k_wmi_tlv_attach(struct ath10k *ar);
enum wmi_nlo_auth_algorithm {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 4f707c6394bb..9f564e2b7a14 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -742,6 +742,19 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
};
+static struct wmi_peer_param_map wmi_peer_param_map = {
+ .smps_state = WMI_PEER_SMPS_STATE,
+ .ampdu = WMI_PEER_AMPDU,
+ .authorize = WMI_PEER_AUTHORIZE,
+ .chan_width = WMI_PEER_CHAN_WIDTH,
+ .nss = WMI_PEER_NSS,
+ .use_4addr = WMI_PEER_USE_4ADDR,
+ .use_fixed_power = WMI_PEER_USE_FIXED_PWR,
+ .debug = WMI_PEER_DEBUG,
+ .phymode = WMI_PEER_PHYMODE,
+ .dummy_var = WMI_PEER_DUMMY_VAR,
+};
+
/* MAIN WMI VDEV param map */
static struct wmi_vdev_param_map wmi_vdev_param_map = {
.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
@@ -4668,16 +4681,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
rate_code[i],
type);
@@ -4790,7 +4800,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_config_event *ev;
@@ -4806,6 +4816,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
+ rate_max, WMI_TPC_RATE_MAX);
+ rate_max = WMI_TPC_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
@@ -4822,8 +4839,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -5018,16 +5035,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
rate_code[i],
type, pream_idx);
@@ -5043,7 +5057,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_final_table_event *ev;
@@ -5051,12 +5065,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
+ rate_max, WMI_TPC_FINAL_RATE_MAX);
+ rate_max = WMI_TPC_FINAL_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
- num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
-
ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
num_tx_chain);
@@ -5069,8 +5095,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -5344,11 +5370,14 @@ ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->sw_version;
arg->sw_ver1 = ev->sw_version_1;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
@@ -5383,16 +5412,25 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->sw_version;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = ev->wmi_service_bitmap;
arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+ /* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
+ * different values. We would need a translation to handle that,
+ * but as we don't currently need anything from sys_cap_info from
+ * WMI interface (only from WMI-TLV) safest it to skip it.
+ */
+
n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
ARRAY_SIZE(arg->mem_reqs));
for (i = 0; i < n; i++)
@@ -5432,6 +5470,7 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
+ ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
ar->fw_version_major =
(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
@@ -5441,11 +5480,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->phy_capability = __le32_to_cpu(arg.phy_capab);
ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
+ ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
+ ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
+ ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
+ ar->sys_cap_info);
if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
@@ -5544,17 +5588,22 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
skip_mem_alloc:
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
__le32_to_cpu(arg.min_tx_power),
__le32_to_cpu(arg.max_tx_power),
__le32_to_cpu(arg.ht_cap),
__le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.vht_supp_mcs),
__le32_to_cpu(arg.sw_ver0),
__le32_to_cpu(arg.sw_ver1),
__le32_to_cpu(arg.fw_build),
__le32_to_cpu(arg.phy_capab),
__le32_to_cpu(arg.num_rf_chains),
__le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.low_2ghz_chan),
+ __le32_to_cpu(arg.high_2ghz_chan),
+ __le32_to_cpu(arg.low_5ghz_chan),
+ __le32_to_cpu(arg.high_5ghz_chan),
__le32_to_cpu(arg.num_mem_reqs));
dev_kfree_skb(skb);
@@ -5623,7 +5672,7 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
__le32_to_cpu(arg.sw_version),
__le32_to_cpu(arg.abi_version),
arg.mac_addr,
@@ -9332,6 +9381,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.cmd = &wmi_10_4_cmd_map;
ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9340,6 +9390,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_2_4_ops;
ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9348,6 +9399,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_2_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9356,6 +9408,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_1_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9364,6 +9417,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_ops;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9440,7 +9494,5 @@ void ath10k_wmi_detach(struct ath10k *ar)
}
cancel_work_sync(&ar->svc_rdy_work);
-
- if (ar->svc_rdy_skb)
- dev_kfree_skb(ar->svc_rdy_skb);
+ dev_kfree_skb(ar->svc_rdy_skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index e80dbe7e8f4c..74adce1dd3a9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -202,6 +202,7 @@ enum wmi_service {
WMI_SERVICE_REPORT_AIRTIME,
WMI_SERVICE_SYNC_DELETE_CMDS,
WMI_SERVICE_TX_PWR_PER_PEER,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
/* Remember to add the new value to wmi_service_name()! */
@@ -496,6 +497,7 @@ static inline char *wmi_service_name(enum wmi_service service_id)
SVCSTR(WMI_SERVICE_REPORT_AIRTIME);
SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS);
SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER);
+ SVCSTR(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS);
case WMI_SERVICE_MAX:
return NULL;
@@ -3786,6 +3788,8 @@ struct wmi_pdev_param_map {
u32 arp_srcaddr;
u32 arp_dstaddr;
u32 enable_btcoex;
+ u32 rfkill_config;
+ u32 rfkill_enable;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -5071,6 +5075,25 @@ enum wmi_rate_preamble {
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
+struct wmi_peer_param_map {
+ u32 smps_state;
+ u32 ampdu;
+ u32 authorize;
+ u32 chan_width;
+ u32 nss;
+ u32 use_4addr;
+ u32 membership;
+ u32 use_fixed_power;
+ u32 user_pos;
+ u32 crit_proto_hint_enabled;
+ u32 tx_fail_cnt_thr;
+ u32 set_hw_retry_cts2s;
+ u32 ibss_atim_win_len;
+ u32 debug;
+ u32 phymode;
+ u32 dummy_var;
+};
+
struct wmi_vdev_param_map {
u32 rts_threshold;
u32 fragmentation_threshold;
@@ -6842,6 +6865,7 @@ struct wmi_svc_rdy_ev_arg {
__le32 max_tx_power;
__le32 ht_cap;
__le32 vht_cap;
+ __le32 vht_supp_mcs;
__le32 sw_ver0;
__le32 sw_ver1;
__le32 fw_build;
@@ -6849,8 +6873,11 @@ struct wmi_svc_rdy_ev_arg {
__le32 num_rf_chains;
__le32 eeprom_rd;
__le32 num_mem_reqs;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
__le32 low_5ghz_chan;
__le32 high_5ghz_chan;
+ __le32 sys_cap_info;
const __le32 *service_map;
size_t service_map_len;
const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 94d34ee02265..307f1fea0a88 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1707,7 +1707,7 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 offset;
u16 val;
- int ret = 0, i;
+ int i;
offset = AR5K_EEPROM_CTL(ee->ee_version) +
AR5K_EEPROM_N_CTLS(ee->ee_version);
@@ -1730,7 +1730,7 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
}
}
- return ret;
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index d5ee32ce9eb3..43b4ae86e5fb 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -300,8 +300,7 @@ ath5k_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct ath5k_hw *ah = hw->priv;
ath5k_led_off(ah);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 2382c6c46851..6885d2ded53a 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -3650,7 +3650,7 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
if (wait)
return -EINVAL; /* Offload for wait not supported */
- buf = kmalloc(data_len, GFP_KERNEL);
+ buf = kmemdup(data, data_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -3661,7 +3661,6 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
}
kfree(wmi->last_mgmt_tx_frame);
- memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
@@ -3689,7 +3688,7 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
if (wait)
return -EINVAL; /* Offload for wait not supported */
- buf = kmalloc(data_len, GFP_KERNEL);
+ buf = kmemdup(data, data_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -3700,7 +3699,6 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
}
kfree(wmi->last_mgmt_tx_frame);
- memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index c99f42284465..78620c6b64a2 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -144,13 +144,13 @@ config ATH9K_RFKILL
a platform that can toggle the RF-Kill GPIO.
config ATH9K_CHANNEL_CONTEXT
- bool "Channel Context support"
- depends on ATH9K
- default n
- ---help---
- This option enables channel context support in ath9k, which is needed
- for multi-channel concurrency. Enable this if P2P PowerSave support
- is required.
+ bool "Channel Context support"
+ depends on ATH9K
+ default n
+ ---help---
+ This option enables channel context support in ath9k, which is needed
+ for multi-channel concurrency. Enable this if P2P PowerSave support
+ is required.
config ATH9K_PCOEM
bool "Atheros ath9k support for PC OEM cards" if EXPERT
@@ -162,32 +162,32 @@ config ATH9K_PCI_NO_EEPROM
depends on ATH9K_PCI
default n
help
- This separate driver provides a loader in order to support the
- AR500X to AR92XX-generation of ath9k PCI(e) WiFi chips, which have
- their initialization data (which contains the real PCI Device ID
- that ath9k will need) stored together with the calibration data out
- of reach for the ath9k chip.
+ This separate driver provides a loader in order to support the
+ AR500X to AR92XX-generation of ath9k PCI(e) WiFi chips, which have
+ their initialization data (which contains the real PCI Device ID
+ that ath9k will need) stored together with the calibration data out
+ of reach for the ath9k chip.
- These devices are usually various network appliances, routers or
- access Points and such.
+ These devices are usually various network appliances, routers or
+ access Points and such.
- If unsure say N.
+ If unsure say N.
config ATH9K_HTC
- tristate "Atheros HTC based wireless cards support"
- depends on USB && MAC80211
- select ATH9K_HW
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
- select ATH9K_COMMON
- ---help---
- Support for Atheros HTC based cards.
- Chipsets supported: AR9271
-
- For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
-
- The built module will be ath9k_htc.
+ tristate "Atheros HTC based wireless cards support"
+ depends on USB && MAC80211
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ Support for Atheros HTC based cards.
+ Chipsets supported: AR9271
+
+ For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+
+ The built module will be ath9k_htc.
config ATH9K_HTC_DEBUGFS
bool "Atheros ath9k_htc debugging"
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 2b29bf4730f6..b4885a700296 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
{
- u32 data, ko, kg;
+ u32 data = 0, ko, kg;
if (!AR_SREV_9462_20_OR_LATER(ah))
return;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 2fe12b0de5b4..42f00a2a8c80 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1037,7 +1037,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
}
/*
- * Configire PCIE after Ini init. SERDES values now come from ini file
+ * Configure PCIE after Ini init. SERDES values now come from ini file
* This enables PCIe low power mode.
*/
array = power_off ? &ah->iniPcieSerdes :
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 159490f5a111..956fa7828d0c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -12,7 +12,6 @@
* initialize the chip when the user-space is ready to extract the init code.
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/completion.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index a82ad739ab80..791f6633667c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1674,7 +1674,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
if (!ret)
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 4e8e80ac8341..9cec5c216e1f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
+ __be16 rs_datalen;
+ bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
rxstatus = (struct ath_htc_rx_status *)skb->data;
- if (be16_to_cpu(rxstatus->rs_datalen) -
- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
+ if (unlikely(rs_datalen -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
ath_err(common,
"Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
- rxstatus->rs_datalen, skb->len);
+ rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
+ /*
+ * Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
+ */
+ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
+ ath_warn(common,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
@@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* Process PHY errors and return so that the packet
* can be dropped.
*/
- if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ if (unlikely(is_phyerr)) {
/* TODO: Not using DFS processing now. */
if (ath_cmn_process_fft(&priv->spec_priv, hdr,
&rx_stats, rx_status->mactime)) {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 34121fbf32e3..0548aa3702e3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1921,7 +1921,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ath9k_ps_wakeup(sc);
ret = ath_tx_aggr_start(sc, sta, tid, ssn);
if (!ret)
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 92b2dd396436..f3461b193c7a 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -1021,13 +1021,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
static int ath_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(device);
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
if (test_bit(ATH_OP_WOW_ENABLED, &common->op_flags)) {
- dev_info(&pdev->dev, "WOW is enabled, bypassing PCI suspend\n");
+ dev_info(device, "WOW is enabled, bypassing PCI suspend\n");
return 0;
}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 40a8054f8aa6..5914926a5c5b 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1449,8 +1449,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
rcu_assign_pointer(sta_info->agg[tid], tid_info);
spin_unlock_bh(&ar->tx_ampdu_list_lock);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index e25bfdf78c2e..20f4f8ea9f89 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -33,33 +33,33 @@ static int __ath_regd_init(struct ath_regulatory *reg);
*/
/* Only these channels all allow active scan on all world regulatory domains */
-#define ATH9K_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+#define ATH_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
/* We enable active scan on these a case by case basis by regulatory domain */
-#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+#define ATH_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
NL80211_RRF_NO_IR)
-#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
+#define ATH_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
NL80211_RRF_NO_IR | \
NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
-#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
+#define ATH_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
+#define ATH_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
+#define ATH_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
- ATH9K_2GHZ_CH12_13, \
- ATH9K_2GHZ_CH14
+#define ATH_2GHZ_ALL ATH_2GHZ_CH01_11, \
+ ATH_2GHZ_CH12_13, \
+ ATH_2GHZ_CH14
-#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
- ATH9K_5GHZ_5470_5850
+#define ATH_5GHZ_ALL ATH_5GHZ_5150_5350, \
+ ATH_5GHZ_5470_5850
/* This one skips what we call "mid band" */
-#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
- ATH9K_5GHZ_5725_5850
+#define ATH_5GHZ_NO_MIDBAND ATH_5GHZ_5150_5350, \
+ ATH_5GHZ_5725_5850
/* Can be used for:
* 0x60, 0x61, 0x62 */
@@ -67,8 +67,8 @@ static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = {
.n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_ALL,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_ALL,
+ ATH_5GHZ_ALL,
}
};
@@ -77,9 +77,9 @@ static const struct ieee80211_regdomain ath_world_regdom_63_65 = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_2GHZ_CH12_13,
- ATH9K_5GHZ_NO_MIDBAND,
+ ATH_2GHZ_CH01_11,
+ ATH_2GHZ_CH12_13,
+ ATH_5GHZ_NO_MIDBAND,
}
};
@@ -88,8 +88,8 @@ static const struct ieee80211_regdomain ath_world_regdom_64 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_5GHZ_NO_MIDBAND,
+ ATH_2GHZ_CH01_11,
+ ATH_5GHZ_NO_MIDBAND,
}
};
@@ -98,8 +98,8 @@ static const struct ieee80211_regdomain ath_world_regdom_66_69 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_CH01_11,
+ ATH_5GHZ_ALL,
}
};
@@ -108,9 +108,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_2GHZ_CH12_13,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_CH01_11,
+ ATH_2GHZ_CH12_13,
+ ATH_5GHZ_ALL,
}
};
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 8abda2760e04..6ba0fd57c951 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2091,7 +2091,7 @@ struct wcn36xx_hal_set_bss_key_rsp_msg {
/*
* This is used configure the key information on a given station.
* When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
- * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a preconfigured key from a BSS the station associated with; otherwise
* a new key descriptor is created based on the key field.
*/
struct wcn36xx_hal_set_sta_key_req_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 79998a3ddb7a..c30fdd0cbf1e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -935,8 +935,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
out:
mutex_unlock(&wcn->conf_mutex);
-
- return;
}
/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
@@ -1084,6 +1082,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
+ int ret = 0;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
@@ -1106,7 +1105,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
spin_unlock_bh(&sta_priv->ampdu_lock);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
spin_lock_bh(&sta_priv->ampdu_lock);
@@ -1131,7 +1130,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
mutex_unlock(&wcn->conf_mutex);
- return 0;
+ return ret;
}
static const struct ieee80211_ops wcn36xx_ops = {
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
index d32c1f4e533a..a8a43c25f843 100644
--- a/drivers/net/wireless/ath/wil6210/boot_loader.h
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains the definitions for the boot loader
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c70854ea5634..7d6f14420855 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index a9befb971cc4..396c94c53702 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 304b4d4e506a..11d0c79e9056 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index a04c87ffd37b..912c4eaf017b 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 3e2bbbceca06..6d3413a44b05 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/firmware.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index fa3164765b20..540fa1607794 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL_FW_H__
#define __WIL_FW_H__
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 94ebfa338e3f..fbc84c03406b 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Algorithmic part of the firmware download.
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index b00a13d6d530..b1480b41cd3a 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/interrupt.h>
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 9b72202eeadc..06091d8a9e23 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/moduleparam.h>
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index a87bb84a8286..07b4a252a23c 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index db087ea58ddf..f26bf046d889 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 18dd8b246022..c174323c5c0b 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
@@ -629,8 +618,7 @@ static int __maybe_unused wil6210_pm_resume(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
wil_dbg_pm(wil, "Runtime idle\n");
@@ -644,8 +632,7 @@ static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 56143e7670ed..ed4df561e5c5 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 4b7ac14fc2a7..9b4ca6b256d2 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
index 92b8c4d84a6a..b3d79eb50a43 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.h
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+/* SPDX-License-Identifier: ISC */
+/* Copyright (c) 2012-2015 Qualcomm Atheros, Inc. */
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 13246d216803..d385bc03033a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
index cd2534b9c5aa..6909e989baec 100644
--- a/drivers/net/wireless/ath/wil6210/trace.c
+++ b/drivers/net/wireless/ath/wil6210/trace.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 36ebfcf9ef30..11c989e95880 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 598c1fba9dac..8ebc6d59aa74 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5120475b0cd7..1f4c8ec75be8 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WIL6210_TXRX_H
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 04d576deae72..778b63be6a9a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index 136c51c338cf..c744c65225da 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WIL6210_TXRX_EDMA_H
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 0783c7963621..97626bfd4dac 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL6210_H__
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 772cb00c2002..1332eb8c831f 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 4eed05bddb60..10e10dc9fedf 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*/
#include <linux/device.h>
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index bca090611477..5ff66202238d 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL_PLATFORM_H__
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 153b84447e40..7a0d934eb271 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/moduleparam.h>
@@ -2505,7 +2494,8 @@ int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie)
cmd->mgmt_frm_type = type;
/* BUG: FW API define ieLen as u8. Will fix FW */
cmd->ie_len = cpu_to_le16(ie_len);
- memcpy(cmd->ie_info, ie, ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, vif->mid, cmd, len);
kfree(cmd);
out:
@@ -2541,7 +2531,8 @@ int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie)
}
cmd->ie_len = cpu_to_le16(ie_len);
- memcpy(cmd->ie_info, ie, ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len);
kfree(cmd);
@@ -2715,7 +2706,7 @@ int wmi_get_all_temperatures(struct wil6210_priv *wil,
return rc;
if (reply.evt.status == WMI_FW_STATUS_FAILURE) {
- wil_err(wil, "Failed geting TEMP_SENSE_ALL\n");
+ wil_err(wil, "Failed getting TEMP_SENSE_ALL\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index a2f7034489ae..6bd4ccee28ab 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
diff --git a/drivers/net/wireless/atmel/Kconfig b/drivers/net/wireless/atmel/Kconfig
index 4c0556b3a5ba..c2142c70f25d 100644
--- a/drivers/net/wireless/atmel/Kconfig
+++ b/drivers/net/wireless/atmel/Kconfig
@@ -13,29 +13,29 @@ config WLAN_VENDOR_ATMEL
if WLAN_VENDOR_ATMEL
config ATMEL
- tristate "Atmel at76c50x chipset 802.11b support"
- depends on CFG80211 && (PCI || PCMCIA)
- select WIRELESS_EXT
- select WEXT_PRIV
- select FW_LOADER
- select CRC32
- ---help---
- A driver 802.11b wireless cards based on the Atmel fast-vnet
- chips. This driver supports standard Linux wireless extensions.
-
- Many cards based on this chipset do not have flash memory
- and need their firmware loaded at start-up. If yours is
- one of these, you will need to provide a firmware image
- to be loaded into the card by the driver. The Atmel
- firmware package can be downloaded from
- <http://www.thekelleys.org.uk/atmel>
+ tristate "Atmel at76c50x chipset 802.11b support"
+ depends on CFG80211 && (PCI || PCMCIA)
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select FW_LOADER
+ select CRC32
+ ---help---
+ A driver 802.11b wireless cards based on the Atmel fast-vnet
+ chips. This driver supports standard Linux wireless extensions.
+
+ Many cards based on this chipset do not have flash memory
+ and need their firmware loaded at start-up. If yours is
+ one of these, you will need to provide a firmware image
+ to be loaded into the card by the driver. The Atmel
+ firmware package can be downloaded from
+ <http://www.thekelleys.org.uk/atmel>
config PCI_ATMEL
- tristate "Atmel at76c506 PCI cards"
- depends on ATMEL && PCI
- ---help---
- Enable support for PCI and mini-PCI cards containing the
- Atmel at76c506 chip.
+ tristate "Atmel at76c506 PCI cards"
+ depends on ATMEL && PCI
+ ---help---
+ Enable support for PCI and mini-PCI cards containing the
+ Atmel at76c506 chip.
config PCMCIA_ATMEL
tristate "Atmel at76c502/at76c504 PCMCIA cards"
diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c
index 7afc9c5329fb..368eebefa741 100644
--- a/drivers/net/wireless/atmel/atmel_cs.c
+++ b/drivers/net/wireless/atmel/atmel_cs.c
@@ -117,11 +117,9 @@ static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int atmel_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int ret;
const struct pcmcia_device_id *did;
- dev = link->priv;
did = dev_get_drvdata(&link->dev);
dev_dbg(&link->dev, "atmel_config\n");
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 31bf71a80c26..9733c64bf978 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1400,7 +1400,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
ieee80211_stop_queue(dev->wl->hw, skb_mapping);
- dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1566,7 +1566,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
- dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ dev->wl->tx_queue_stopped[ring->queue_prio] = false;
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index b85603e91c7a..39da1a4c30ac 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -3600,7 +3600,7 @@ static void b43_tx_work(struct work_struct *work)
else
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
- wl->tx_queue_stopped[queue_num] = 1;
+ wl->tx_queue_stopped[queue_num] = true;
ieee80211_stop_queue(wl->hw, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
@@ -3611,7 +3611,7 @@ static void b43_tx_work(struct work_struct *work)
}
if (!err)
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
#if B43_DEBUG
@@ -5603,7 +5603,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
/* Initialize queues and flags. */
for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
skb_queue_head_init(&wl->tx_queue[queue_num]);
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
snprintf(chip_name, ARRAY_SIZE(chip_name),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index fc12598b2dd3..96fd8e2bf773 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1108,7 +1108,8 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct sdio_func *func;
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
- mmc_pm_flag_t sdio_flags;
+ mmc_pm_flag_t pm_caps, sdio_flags;
+ int ret = 0;
func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
@@ -1119,19 +1120,33 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- brcmf_sdiod_freezer_on(sdiodev);
- brcmf_sdio_wd_timer(sdiodev->bus, 0);
+ pm_caps = sdio_get_host_pm_caps(func);
+
+ if (pm_caps & MMC_PM_KEEP_POWER) {
+ /* preserve card power during suspend */
+ brcmf_sdiod_freezer_on(sdiodev);
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
+ sdio_flags = MMC_PM_KEEP_POWER;
+ if (sdiodev->wowl_enabled) {
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
+
+ if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
+ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
- sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->wowl_enabled) {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ } else {
+ /* power will be cut so remove device, probe again in resume */
+ brcmf_sdiod_intr_unregister(sdiodev);
+ ret = brcmf_sdiod_remove(sdiodev);
+ if (ret)
+ brcmf_err("Failed to remove device on suspend\n");
}
- if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
- brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
- return 0;
+
+ return ret;
}
static int brcmf_ops_sdio_resume(struct device *dev)
@@ -1139,13 +1154,23 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
+ mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
+ int ret = 0;
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- brcmf_sdiod_freezer_off(sdiodev);
- return 0;
+ if (!(pm_caps & MMC_PM_KEEP_POWER)) {
+ /* bus was powered off and device removed, probe again */
+ ret = brcmf_sdiod_probe(sdiodev);
+ if (ret)
+ brcmf_err("Failed to probe device on resume\n");
+ } else {
+ brcmf_sdiod_freezer_off(sdiodev);
+ }
+
+ return ret;
}
static const struct dev_pm_ops brcmf_sdio_pm_ops = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index e3ebb7abbdae..5598bbd09b62 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1282,6 +1282,31 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
return err;
}
+static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
+ u16 pwd_len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ int err;
+
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, pwd_data, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
+}
+
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
@@ -1505,6 +1530,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3)
+ val = WPA3_AUTH_SAE_PSK;
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
@@ -1537,6 +1564,10 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
val = 1;
brcmf_dbg(CONN, "shared key\n");
break;
+ case NL80211_AUTHTYPE_SAE:
+ val = 3;
+ brcmf_dbg(CONN, "SAE authentication\n");
+ break;
default:
val = 2;
brcmf_dbg(CONN, "automatic, auth type (%d)\n", sme->auth_type);
@@ -1647,6 +1678,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
u16 count;
profile->use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
+ profile->is_ft = false;
if (!sme->crypto.n_akm_suites)
return 0;
@@ -1691,11 +1723,23 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
break;
case WLAN_AKM_SUITE_FT_8021X:
val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
+ profile->is_ft = true;
if (sme->want_1x)
profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
break;
case WLAN_AKM_SUITE_FT_PSK:
val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
+ profile->is_ft = true;
+ break;
+ default:
+ bphy_err(drvr, "invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
+ }
+ } else if (val & WPA3_AUTH_SAE_PSK) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_SAE:
+ val = WPA3_AUTH_SAE_PSK;
break;
default:
bphy_err(drvr, "invalid cipher group (%d)\n",
@@ -1773,7 +1817,8 @@ brcmf_set_sharedkey(struct net_device *ndev,
brcmf_dbg(CONN, "wpa_versions 0x%x cipher_pairwise 0x%x\n",
sec->wpa_versions, sec->cipher_pairwise);
- if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
+ if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2 |
+ NL80211_WPA_VERSION_3))
return 0;
if (!(sec->cipher_pairwise &
@@ -1980,7 +2025,13 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- if (sme->crypto.psk) {
+ if (sme->crypto.sae_pwd) {
+ brcmf_dbg(INFO, "using SAE offload\n");
+ profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
+ }
+
+ if (sme->crypto.psk &&
+ profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) {
if (WARN_ON(profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE)) {
err = -EINVAL;
goto done;
@@ -1998,12 +2049,23 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
}
}
- if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK) {
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
- if (err)
+ else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) {
+ /* clean up user-space RSNE */
+ if (brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) {
+ bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
+ }
+ err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
+ sme->crypto.sae_pwd_len);
+ if (!err && sme->crypto.psk)
+ err = brcmf_set_pmk(ifp, sme->crypto.psk,
+ BRCMF_WSEC_MAX_PSK_LEN);
}
+ if (err)
+ goto done;
/* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
@@ -5359,7 +5421,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
brcmf_dbg(CONN, "Processing set ssid\n");
memcpy(vif->profile.bssid, e->addr, ETH_ALEN);
- if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_PSK)
+ if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_PSK &&
+ vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_SAE)
return true;
set_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
@@ -5554,6 +5617,11 @@ done:
cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
brcmf_dbg(CONN, "Report roaming result\n");
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X && profile->is_ft) {
+ cfg80211_port_authorized(ndev, profile->bssid, GFP_KERNEL);
+ brcmf_dbg(CONN, "Report port authorized\n");
+ }
+
set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
brcmf_dbg(TRACE, "Exit\n");
return err;
@@ -6664,6 +6732,9 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD);
}
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 14d5bbad1db1..6ce48f6275a4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -107,7 +107,8 @@ struct brcmf_cfg80211_security {
enum brcmf_profile_fwsup {
BRCMF_PROFILE_FWSUP_NONE,
BRCMF_PROFILE_FWSUP_PSK,
- BRCMF_PROFILE_FWSUP_1X
+ BRCMF_PROFILE_FWSUP_1X,
+ BRCMF_PROFILE_FWSUP_SAE
};
/**
@@ -122,6 +123,7 @@ struct brcmf_cfg80211_profile {
struct brcmf_cfg80211_security sec;
struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
enum brcmf_profile_fwsup use_fwsup;
+ bool is_ft;
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index dd586a96b57a..a795d781b4c5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -778,7 +778,6 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
{
u8 desc;
u32 val, szdesc;
- u8 mpnum = 0;
u8 stype, sztype, wraptype;
*regbase = 0;
@@ -786,7 +785,6 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
if (desc == DMP_DESC_MASTER_PORT) {
- mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
wraptype = DMP_SLAVE_TYPE_MWRAP;
} else if (desc == DMP_DESC_ADDRESS) {
/* revert erom address */
@@ -854,7 +852,7 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
u8 desc_type = 0;
u32 val;
u16 id;
- u8 nmp, nsp, nmw, nsw, rev;
+ u8 nmw, nsw, rev;
u32 base, wrap;
int err;
@@ -880,8 +878,6 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
return -EFAULT;
/* only look at cores with master port(s) */
- nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
- nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 406b367c284c..85cf96461dde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1350,6 +1350,11 @@ void brcmf_detach(struct device *dev)
brcmf_fweh_detach(drvr);
brcmf_proto_detach(drvr);
+ if (drvr->mon_if) {
+ brcmf_net_detach(drvr->mon_if->ndev, false);
+ drvr->mon_if = NULL;
+ }
+
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
if (drvr->iflist[i])
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 2c3526aeca6f..1c9c74cc958e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -39,7 +39,8 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
- { BRCMF_FEAT_DOT11H, "802.11h" }
+ { BRCMF_FEAT_DOT11H, "802.11h" },
+ { BRCMF_FEAT_SAE, "sae" },
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 736a8179f62f..280a1f6412d4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -26,6 +26,7 @@
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
+ * SAE: simultaneous authentication of equals
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -45,7 +46,8 @@
BRCMF_FEAT_DEF(MONITOR) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
- BRCMF_FEAT_DEF(DOT11H)
+ BRCMF_FEAT_DEF(DOT11H) \
+ BRCMF_FEAT_DEF(SAE)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 37c512036e0e..de0ef1b545c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -61,6 +61,8 @@
#define BRCMF_WSEC_MAX_PSK_LEN 32
#define BRCMF_WSEC_PASSPHRASE BIT(0)
+#define BRCMF_WSEC_MAX_SAE_PASSWORD_LEN 128
+
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2
@@ -518,6 +520,17 @@ struct brcmf_wsec_pmk_le {
u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
};
+/**
+ * struct brcmf_wsec_sae_pwd_le - firmware SAE password material.
+ *
+ * @key_len: number of octets in key materials.
+ * @key: SAE password material.
+ */
+struct brcmf_wsec_sae_pwd_le {
+ __le16 key_len;
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
+};
+
/* Used to get specific STA parameters */
struct brcmf_scb_val_le {
__le32 val;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 6c463475e90b..f64ce5074a55 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1024,8 +1024,6 @@ brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
- memset(ring, 0, size);
-
return (ring);
}
@@ -1427,6 +1425,8 @@ static int brcmf_pcie_reset(struct device *dev)
struct brcmf_fw_request *fwreq;
int err;
+ brcmf_pcie_intr_disable(devinfo);
+
brcmf_pcie_bus_console_read(devinfo, true);
brcmf_detach(dev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 14e530601ef3..fabfbb0b40b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -57,6 +57,10 @@ static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
mutex_lock(&pi->req_lock);
+ /* Nothing to do if we have no requests */
+ if (pi->n_reqs == 0)
+ goto done;
+
/* find request */
for (i = 0; i < pi->n_reqs; i++) {
if (pi->reqs[i]->reqid == reqid)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index db783e94f929..5a6d9c86552a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -496,13 +496,11 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
* table and override CDD later
*/
if (li_mimo == &locale_bn) {
- if (li_mimo == &locale_bn) {
- maxpwr20 = QDB(16);
- maxpwr40 = 0;
+ maxpwr20 = QDB(16);
+ maxpwr40 = 0;
- if (chan >= 3 && chan <= 11)
- maxpwr40 = QDB(16);
- }
+ if (chan >= 3 && chan <= 11)
+ maxpwr40 = QDB(16);
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_siso[i] = (u8) maxpwr20;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 6188275b17e5..8e8b685cfe09 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -850,8 +850,7 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
"START: tid %d is not agg\'able\n", tid);
return -EINVAL;
}
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 080e829da9b3..3f09d89ba922 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -838,9 +838,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
struct dma_pub *dma = NULL;
struct d11txh *txh = NULL;
struct scb *scb = NULL;
- bool free_pdu;
- int tx_rts, tx_frame_count, tx_rts_count;
- uint totlen, supr_status;
+ int tx_frame_count;
+ uint supr_status;
bool lastframe;
struct ieee80211_hdr *h;
u16 mcl;
@@ -917,11 +916,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
CHSPEC_CHANNEL(wlc->default_bss->chanspec));
}
- tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
tx_frame_count =
(txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT;
- tx_rts_count =
- (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT;
lastframe = !ieee80211_has_morefrags(h->frame_control);
@@ -989,9 +985,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- totlen = p->len;
- free_pdu = true;
-
if (lastframe) {
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
@@ -1816,8 +1809,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
udelay(2);
brcms_b_core_phy_clk(wlc_hw, ON);
- if (pih)
- wlc_phy_anacore(pih, ON);
+ wlc_phy_anacore(pih, ON);
}
/* switch to and initialize new band */
@@ -7384,9 +7376,7 @@ static void brcms_c_update_beacon_hw(struct brcms_c_info *wlc,
false, true);
/* mark beacon0 valid */
bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN1VLD);
- return;
}
- return;
}
/*
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
index 7b31c212694d..7552bdb91991 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
@@ -231,6 +231,8 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
#define WPA2_AUTH_FT 0x4000 /* Fast BSS Transition */
#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
+#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
+
#define DOT11_DEFAULT_RTS_LEN 2347
#define DOT11_DEFAULT_FRAG_LEN 2346
diff --git a/drivers/net/wireless/cisco/Kconfig b/drivers/net/wireless/cisco/Kconfig
index 01e173ede894..7a3b3bb2ce15 100644
--- a/drivers/net/wireless/cisco/Kconfig
+++ b/drivers/net/wireless/cisco/Kconfig
@@ -17,7 +17,7 @@ config AIRO
depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
select WIRELESS_EXT
select CRYPTO
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select WEXT_SPY
select WEXT_PRIV
---help---
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 8dfbaff2d1fe..c4c83ab60cbc 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5565,7 +5565,7 @@ static void shim__set_security(struct net_device *dev,
struct libipw_security *sec)
{
struct ipw2100_priv *priv = libipw_priv(dev);
- int i, force_update = 0;
+ int i;
mutex_lock(&priv->action_mutex);
if (!(priv->status & STATUS_INITIALIZED))
@@ -5605,7 +5605,6 @@ static void shim__set_security(struct net_device *dev,
priv->ieee->sec.flags |= SEC_ENABLED;
priv->ieee->sec.enabled = sec->enabled;
priv->status |= STATUS_SECURITY_UPDATED;
- force_update = 1;
}
if (sec->flags & SEC_ENCRYPT)
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ed0f06532d5e..31e43fc1d12b 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -6788,9 +6788,6 @@ static int ipw_wx_set_mlme(struct net_device *dev,
{
struct ipw_priv *priv = libipw_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- __le16 reason;
-
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 34cfd8162855..0cb36d1b983a 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -999,13 +999,12 @@ static int libipw_read_qos_info_element(struct
/*
* Write QoS parameters from the ac parameters.
*/
-static int libipw_qos_convert_ac_to_parameters(struct
+static void libipw_qos_convert_ac_to_parameters(struct
libipw_qos_parameter_info
*param_elm, struct
libipw_qos_parameters
*qos_param)
{
- int rc = 0;
int i;
struct libipw_qos_ac_parameter *ac_params;
u32 txop;
@@ -1030,7 +1029,6 @@ static int libipw_qos_convert_ac_to_parameters(struct
txop = le16_to_cpu(ac_params->tx_op_limit) * 32;
qos_param->tx_op_limit[i] = cpu_to_le16(txop);
}
- return rc;
}
/*
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 4fbcc7fba3cc..1168055da182 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2301,9 +2301,7 @@ __il3945_down(struct il_priv *il)
il3945_hw_txq_ctx_free(il);
exit:
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
/* clear out any free frames */
@@ -3846,9 +3844,7 @@ il3945_pci_remove(struct pci_dev *pdev)
il_free_channel_map(il);
il_free_geos(il);
kfree(il->scan_cmd);
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_kfree_skb(il->beacon_skb);
ieee80211_free_hw(il->hw);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index ffb705b18fb1..3664f56f8cbd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -2265,7 +2265,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
if (tid_data->tfds_in_queue == 0) {
D_HT("HW queue is empty\n");
tid_data->agg.state = IL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
D_HT("HW queue is NOT empty: %d packets in HW queue\n",
tid_data->tfds_in_queue);
@@ -3331,7 +3331,6 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
- int ret = 0;
__le16 key_flags = 0;
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
@@ -3368,7 +3367,7 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
spin_unlock_irqrestore(&il->sta_lock, flags);
- return ret;
+ return 0;
}
void
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 73f7bbf742bc..d966b29b45ee 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1072,7 +1072,7 @@ EXPORT_SYMBOL(il_get_channel_info);
static void
il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
{
- const __le32 interval[3][IL_POWER_VEC_SIZE] = {
+ static const __le32 interval[3][IL_POWER_VEC_SIZE] = {
SLP_VEC(2, 2, 4, 6, 0xFF),
SLP_VEC(2, 4, 7, 10, 10),
SLP_VEC(4, 7, 10, 10, 0xFF)
@@ -5182,8 +5182,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
/* new association get rid of ibss beacon skb */
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
il->timestamp = 0;
@@ -5302,10 +5301,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_irqsave(&il->lock, flags);
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = skb;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index ff41987a7e35..0aae3fa4128c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -14,7 +14,8 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
+iwlwifi-objs += fw/dbg.o
+iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 5e355c4957df..56dc335a788c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -54,9 +54,10 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 50
+#define IWL_22000_UCODE_API_MAX 52
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -137,7 +138,7 @@ static const struct iwl_base_params iwl_22000_base_params = {
.pcie_l1_allowed = true,
};
-static const struct iwl_base_params iwl_22560_base_params = {
+static const struct iwl_base_params iwl_ax210_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
@@ -183,33 +184,57 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.min_umac_error_event_table = 0x400000, \
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 60 * 1024, \
- .fw_mon_smem_write_ptr_addr = 0xa0c16c, \
- .fw_mon_smem_write_ptr_msk = 0xfffff, \
- .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \
- .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22000, \
.trans.base_params = &iwl_22000_base_params, \
.trans.csr = &iwl_csr_v1, \
- .gp2_reg_addr = 0xa02c68
-
-#define IWL_DEVICE_22560 \
- IWL_DEVICE_22000_COMMON, \
- .trans.device_family = IWL_DEVICE_FAMILY_22560, \
- .trans.base_params = &iwl_22560_base_params, \
- .trans.csr = &iwl_csr_v2
+ .gp2_reg_addr = 0xa02c68, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = MON_BUFF_WRPTR_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ .cycle_cnt = { \
+ .addr = MON_BUFF_CYCLE_CNT_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ }
#define IWL_DEVICE_AX210 \
IWL_DEVICE_22000_COMMON, \
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_AX210, \
- .trans.base_params = &iwl_22560_base_params, \
+ .trans.base_params = &iwl_ax210_base_params, \
.trans.csr = &iwl_csr_v1, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 512
+ .min_256_ba_txq_size = 512, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = DBGC_DBGBUF_WRAP_AROUND, \
+ .mask = 0xffffffff, \
+ }, \
+ .cur_frag = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
+ }, \
+ }
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
@@ -292,39 +317,39 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
};
const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e8372b67df03..e9155b9b5ee4 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -55,6 +55,7 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "fw/file.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 46
@@ -149,10 +150,26 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .fw_mon_smem_write_ptr_addr = 0xa0476c, \
- .fw_mon_smem_write_ptr_msk = 0xfffff, \
- .fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \
- .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = MON_BUFF_WRPTR_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ .cycle_cnt = { \
+ .addr = MON_BUFF_CYCLE_CNT_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ }
const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index dd387aba3317..e8a4d604b910 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -171,6 +171,9 @@ void iwl_leds_init(struct iwl_priv *priv)
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
+ if (!priv->led.name)
+ return;
+
priv->led.brightness_set = iwl_led_brightness_set;
priv->led.blink_set = iwl_led_blink_set;
priv->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 74229fcb63a9..226165db7dfd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -851,7 +851,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
* Is there a need to switch between
* full concurrency and 3-wire?
*/
- if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
+ if (priv->bt_ci_compliance)
full_concurrent = true;
else
full_concurrent = false;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 3029e3f6de63..cd73fc5cfcbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -621,7 +621,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
tid_data->agg.ssn);
tid_data->agg.state = IWL_AGG_STARTING;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
"next_reclaimed = %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index c2db758b9d54..40fe2d667622 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -61,6 +61,7 @@
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
+#include "fw/runtime.h"
void *iwl_acpi_get_object(struct device *dev, acpi_string method)
{
@@ -245,3 +246,289 @@ out_free:
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
+
+int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
+{
+ int i;
+
+ profile->enabled = enabled;
+
+ for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
+ if (table[i].type != ACPI_TYPE_INTEGER ||
+ table[i].integer.value > U8_MAX)
+ return -EINVAL;
+
+ profile->table[i] = table[i].integer.value;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_set_profile);
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b)
+{
+ int i, j, idx;
+ int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
+
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
+ ACPI_SAR_TABLE_SIZE);
+
+ for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
+ if (profs[i] > ACPI_SAR_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /* if one of the profiles is disabled, we fail all */
+ return -ENOENT;
+ }
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
+ idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
+ per_chain_restriction[i][j] =
+ cpu_to_le16(prof->table[idx]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->table[idx]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *table, *data;
+ bool enabled;
+ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+ /* position of the actual table */
+ table = &wifi_pkg->package.elements[2];
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled);
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ bool enabled;
+ int i, n_profiles, tbl_rev;
+ int ret = 0;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+ /*
+ * Check the validity of n_profiles. The EWRD profiles start
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+ int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
+ &fwrt->sar_profiles[i + 1],
+ enabled);
+ if (ret < 0)
+ break;
+
+ /* go to the next table */
+ pos += ACPI_SAR_TABLE_SIZE;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int i, j, ret, tbl_rev;
+ int idx = 1;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ fwrt->geo_rev = tbl_rev;
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[idx++];
+ if (entry->type != ACPI_TYPE_INTEGER ||
+ entry->integer.value > U8_MAX) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->geo_profiles[i].values[j] = entry->integer.value;
+ }
+ }
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The GEO_TX_POWER_LIMIT command is not supported on earlier
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_geo_tx_power_profiles_resp *resp;
+ int ret;
+
+ resp = (void *)cmd->resp_pkt->data;
+ ret = le32_to_cpu(resp->profile_idx);
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
+ ret = -EIO;
+ IWL_WARN(fwrt, "Invalid geographic profile idx (%d)\n", ret);
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
+
+void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table)
+{
+ int ret, i, j;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return;
+
+ ret = iwl_sar_get_wgds_table(fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(fwrt,
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ return;
+ }
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+ ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
+
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ struct iwl_per_chain_offset *chain =
+ (struct iwl_per_chain_offset *)&table[i];
+
+ for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ u8 *value;
+
+ value = &fwrt->geo_profiles[i].values[j *
+ ACPI_GEO_PER_CHAIN_SIZE];
+ chain[j].max_tx_power = cpu_to_le16(value[0]);
+ chain[j].chain_a = value[1];
+ chain[j].chain_b = value[2];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j, value[1], value[2], value[0]);
+ }
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 6cb2d1f5efea..4a6e8262974b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -61,6 +61,12 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
#define ACPI_WRDS_METHOD "WRDS"
#define ACPI_EWRD_METHOD "EWRD"
@@ -104,9 +110,21 @@
#define ACPI_PPAG_MIN_HB -16
#define ACPI_PPAG_MAX_HB 40
+struct iwl_sar_profile {
+ bool enabled;
+ u8 table[ACPI_SAR_TABLE_SIZE];
+};
+
+struct iwl_geo_profile {
+ u8 values[ACPI_GEO_TABLE_SIZE];
+};
+
#ifdef CONFIG_ACPI
+struct iwl_fw_runtime;
+
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev);
@@ -134,6 +152,27 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev);
*/
int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
+int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled);
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd);
+
+void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -164,5 +203,50 @@ static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
return -ENOENT;
}
+static inline int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
+static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd)
+{
+ return -ENOENT;
+}
+
+static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table)
+{
+}
+
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 4c3219e7beb6..3643b6ba6385 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -65,6 +65,14 @@
#define __iwl_fw_api_d3_h__
/**
+ * enum iwl_d0i3_flags - d0i3 flags
+ * @IWL_D0I3_RESET_REQUIRE: FW require reset upon resume
+ */
+enum iwl_d0i3_flags {
+ IWL_D0I3_RESET_REQUIRE = BIT(0),
+};
+
+/**
* enum iwl_d3_wakeup_flags - D3 manager wakeup flags
* @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index ba586f148c14..b9d7ed93311c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -60,52 +60,10 @@
#include <linux/bitops.h>
-/**
- * struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
- *
- * @tlv_version: version info
- * @apply_point: &enum iwl_fw_ini_apply_point
- * @data: TLV data followed
- */
-struct iwl_fw_ini_header {
- __le32 tlv_version;
- __le32 apply_point;
- u8 data[];
-} __packed; /* FW_DEBUG_TLV_HEADER_S */
-
-/**
- * struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION)
- * buffer allocation TLV - for debug
- *
- * @iwl_fw_ini_header: header
- * @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd
- * if needed (DBGC1/DBGC2/SDFX/...)
- * @buffer_location: type of iwl_fw_ini_buffer_location
- * @size: size in bytes
- * @max_fragments: the maximum allowed fragmentation in the desired memory
- * allocation above
- * @min_frag_size: the minimum allowed fragmentation size in bytes
- */
-struct iwl_fw_ini_allocation_tlv {
- struct iwl_fw_ini_header header;
- __le32 allocation_id;
- __le32 buffer_location;
- __le32 size;
- __le32 max_fragments;
- __le32 min_frag_size;
-} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
-
-/**
- * enum iwl_fw_ini_dbg_domain - debug domains
- * allows to send host cmd or collect memory region if a given domain is enabled
- *
- * @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on
- * @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain
- */
-enum iwl_fw_ini_dbg_domain {
- IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0,
- IWL_FW_INI_DBG_DOMAIN_REPORT_PS,
-}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */
+#define IWL_FW_INI_MAX_REGION_ID 64
+#define IWL_FW_INI_MAX_NAME 32
+#define IWL_FW_INI_MAX_CFG_NAME 64
+#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
/**
* struct iwl_fw_ini_hcmd
@@ -123,279 +81,198 @@ struct iwl_fw_ini_hcmd {
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
- * struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD)
- * Generic Host command pass through TLV
- *
- * @header: header
- * @domain: send command only if the specific domain is enabled
- * &enum iwl_fw_ini_dbg_domain
- * @period_msec: period in which the hcmd will be sent to FW. Measured in msec
- * (0 = one time command).
- * @hcmd: a variable length host-command to be sent to apply the configuration.
+ * struct iwl_fw_ini_header - Common Header for all ini debug TLV's structures
+ *
+ * @version: TLV version
+ * @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain
+ * @data: TLV data
*/
-struct iwl_fw_ini_hcmd_tlv {
- struct iwl_fw_ini_header header;
+struct iwl_fw_ini_header {
+ __le32 version;
__le32 domain;
- __le32 period_msec;
- struct iwl_fw_ini_hcmd hcmd;
-} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */
+ u8 data[];
+} __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */
-#define IWL_FW_INI_MAX_REGION_ID 64
-#define IWL_FW_INI_MAX_NAME 32
+/**
+ * struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses
+ *
+ * @size: size of each memory chunk
+ * @offset: offset to add to the base address of each chunk
+ */
+struct iwl_fw_ini_region_dev_addr {
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump.
+ * struct iwl_fw_ini_region_fifos - Configuration to read Tx/Rx fifos
*
- * @id_and_grp: id and group of dhc response.
- * @desc: dhc response descriptor.
+ * @fid: fifos ids array. Used to determine what fifos to collect
+ * @hdr_only: if non zero, collect only the registers
+ * @offset: offset to add to the registers addresses
*/
-struct iwl_fw_ini_region_cfg_dhc {
- __le32 id_and_grp;
- __le32 desc;
-} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */
+struct iwl_fw_ini_region_fifos {
+ __le32 fid[2];
+ __le32 hdr_only;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_FIFOS_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
+ * struct iwl_fw_ini_region_err_table - error table region data
+ *
+ * Configuration to read Umac/Lmac error table
*
- * @num_of_range: the amount of ranges in the region
- * @range_data_size: size of the data to read per range, in bytes.
+ * @version: version of the error table
+ * @base_addr: base address of the error table
+ * @size: size of the error table
+ * @offset: offset to add to &base_addr
*/
-struct iwl_fw_ini_region_cfg_internal {
- __le32 num_of_ranges;
- __le32 range_data_size;
-} __packed; /* FW_DEBUG_TLV_REGION_NIC_INTERNAL_RANGES_S */
+struct iwl_fw_ini_region_err_table {
+ __le32 version;
+ __le32 base_addr;
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
- *
- * @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
- * @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
- * It is unused for tx.
- * @num_of_registers: number of prph registers in the region, each register is
- * 4 bytes size.
- * @header_only: none zero value indicates that this region does not include
- * fifo data and includes only the given registers.
+ * struct iwl_fw_ini_region_internal_buffer - internal buffer region data
+ *
+ * Configuration to read internal monitor buffer
+ *
+ * @alloc_id: allocation id one of &enum iwl_fw_ini_allocation_id
+ * @base_addr: internal buffer base address
+ * @size: size internal buffer size
*/
-struct iwl_fw_ini_region_cfg_fifos {
- __le32 fid1;
- __le32 fid2;
- __le32 num_of_registers;
- __le32 header_only;
-} __packed; /* FW_DEBUG_TLV_REGION_FIFOS_S */
+struct iwl_fw_ini_region_internal_buffer {
+ __le32 alloc_id;
+ __le32 base_addr;
+ __le32 size;
+} __packed; /* FW_TLV_DEBUG_REGION_INTERNAL_BUFFER_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg
- *
- * @region_id: ID of this dump configuration
- * @region_type: &enum iwl_fw_ini_region_type
- * @domain: dump this region only if the specific domain is enabled
- * &enum iwl_fw_ini_dbg_domain
- * @name_len: name length
- * @name: file name to use for this region
- * @internal: used in case the region uses internal memory.
- * @allocation_id: For DRAM type field substitutes for allocation_id
- * @fifos: used in case of fifos region.
- * @dhc_desc: dhc response descriptor.
- * @notif_id_and_grp: dump this region only if the specific notification
- * occurred.
- * @offset: offset to use for each memory base address
- * @start_addr: array of addresses.
+ * struct iwl_fw_ini_region_tlv - region TLV
+ *
+ * Configures parameters for region data collection
+ *
+ * @hdr: debug header
+ * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
+ * @type: region type. One of &enum iwl_fw_ini_region_type
+ * @name: region name
+ * @dev_addr: device address configuration. Used by
+ * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
+ * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
+ * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
+ * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
+ * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
+ * &IWL_FW_INI_REGION_RXF
+ * @err_table: error table configuration. Used by
+ * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
+ * IWL_FW_INI_REGION_UMAC_ERROR_TABLE
+ * @internal_buffer: internal monitor buffer configuration. Used by
+ * &IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
+ * Used by &IWL_FW_INI_REGION_DRAM_BUFFER
+ * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
+ * @addrs: array of addresses attached to the end of the region tlv
*/
-struct iwl_fw_ini_region_cfg {
- __le32 region_id;
- __le32 region_type;
- __le32 domain;
- __le32 name_len;
+struct iwl_fw_ini_region_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 id;
+ __le32 type;
u8 name[IWL_FW_INI_MAX_NAME];
union {
- struct iwl_fw_ini_region_cfg_internal internal;
- __le32 allocation_id;
- struct iwl_fw_ini_region_cfg_fifos fifos;
- struct iwl_fw_ini_region_cfg_dhc dhc_desc;
- __le32 notif_id_and_grp;
- }; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */
- __le32 offset;
- __le32 start_addr[];
-} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */
+ struct iwl_fw_ini_region_dev_addr dev_addr;
+ struct iwl_fw_ini_region_fifos fifos;
+ struct iwl_fw_ini_region_err_table err_table;
+ struct iwl_fw_ini_region_internal_buffer internal_buffer;
+ __le32 dram_alloc_id;
+ __le32 tlv_mask;
+ }; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */
+ __le32 addrs[];
+} __packed; /* FW_TLV_DEBUG_REGION_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS)
- * defines memory regions to dump
+ * struct iwl_fw_ini_debug_info_tlv
+ *
+ * debug configuration name for a specific image
*
- * @header: header
- * @num_regions: how many different region section and IDs are coming next
- * @region_config: list of dump configurations
+ * @hdr: debug header
+ * @image_type: image type
+ * @debug_cfg_name: debug configuration name
*/
-struct iwl_fw_ini_region_tlv {
- struct iwl_fw_ini_header header;
- __le32 num_regions;
- struct iwl_fw_ini_region_cfg region_config[];
-} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */
+struct iwl_fw_ini_debug_info_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 image_type;
+ u8 debug_cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed; /* FW_TLV_DEBUG_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_allocation_tlv - Allocates DRAM buffers
+ *
+ * @hdr: debug header
+ * @alloc_id: allocation id. One of &enum iwl_fw_ini_allocation_id
+ * @buf_location: buffer location. One of &enum iwl_fw_ini_buffer_location
+ * @req_size: requested buffer size
+ * @max_frags_num: maximum number of fragments
+ * @min_size: minimum buffer size
+ */
+struct iwl_fw_ini_allocation_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 alloc_id;
+ __le32 buf_location;
+ __le32 req_size;
+ __le32 max_frags_num;
+ __le32 min_size;
+} __packed; /* FW_TLV_DEBUG_BUFFER_ALLOCATION_API_S_VER_1 */
/**
- * struct iwl_fw_ini_trigger
- *
- * @trigger_id: &enum iwl_fw_ini_trigger_id
- * @override_trig: determines how apply trigger in case a trigger with the
- * same id is already in use. Using the first 2 bytes:
- * Byte 0: if 0, override trigger configuration, otherwise use the
- * existing configuration.
- * Byte 1: if 0, override trigger regions, otherwise append regions to
- * existing trigger.
+ * struct iwl_fw_ini_trigger_tlv - trigger TLV
+ *
+ * Trigger that upon firing, determines what regions to collect
+ *
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @trigger_reason: trigger reason
+ * @apply_policy: uses &enum iwl_fw_ini_trigger_apply_policy
* @dump_delay: delay from trigger fire to dump, in usec
- * @occurrences: max amount of times to be fired
- * @reserved: to align to FW struct
+ * @occurrences: max trigger fire occurrences allowed
+ * @reserved: unused
* @ignore_consec: ignore consecutive triggers, in usec
- * @force_restart: force FW restart
+ * @reset_fw: if non zero, will reset and reload the FW
* @multi_dut: initiate debug dump data on several DUTs
- * @trigger_data: generic data to be utilized per trigger
- * @num_regions: number of dump regions defined for this trigger
- * @data: region IDs
+ * @regions_mask: mask of regions to collect
+ * @data: trigger data
*/
-struct iwl_fw_ini_trigger {
- __le32 trigger_id;
- __le32 override_trig;
+struct iwl_fw_ini_trigger_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 trigger_reason;
+ __le32 apply_policy;
__le32 dump_delay;
__le32 occurrences;
__le32 reserved;
__le32 ignore_consec;
- __le32 force_restart;
+ __le32 reset_fw;
__le32 multi_dut;
- __le32 trigger_data;
- __le32 num_regions;
+ __le64 regions_mask;
__le32 data[];
-} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */
-
-/**
- * struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS)
- * Triggers that hold memory regions to dump in case a trigger fires
- *
- * @header: header
- * @num_triggers: how many different triggers section and IDs are coming next
- * @trigger_config: list of trigger configurations
- */
-struct iwl_fw_ini_trigger_tlv {
- struct iwl_fw_ini_header header;
- __le32 num_triggers;
- struct iwl_fw_ini_trigger trigger_config[];
-} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
-
-#define IWL_FW_INI_MAX_IMG_NAME_LEN 32
-#define IWL_FW_INI_MAX_DBG_CFG_NAME_LEN 64
+} __packed; /* FW_TLV_DEBUG_TRIGGER_API_S_VER_1 */
/**
- * struct iwl_fw_ini_debug_info_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_INFO)
- *
- * holds image name and debug configuration name
+ * struct iwl_fw_ini_hcmd_tlv - Generic Host command pass through TLV
*
- * @header: header
- * @img_name_len: length of the image name string
- * @img_name: image name string
- * @dbg_cfg_name_len : length of the debug configuration name string
- * @dbg_cfg_name: debug configuration name string
- */
-struct iwl_fw_ini_debug_info_tlv {
- struct iwl_fw_ini_header header;
- __le32 img_name_len;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- __le32 dbg_cfg_name_len;
- u8 dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
-} __packed; /* FW_DEBUG_TLV_INFO_API_S_VER_1 */
-
-/**
- * enum iwl_fw_ini_trigger_id
- *
- * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
- * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
- * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
- * @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
- * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification
- * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
- * @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically
- * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
- * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
- * threshold was crossed
- * @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed
- * @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host
- * @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request
- * @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request
- * @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined
- * @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined
- * @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received
- * @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed
- * @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed
- * @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth
- * @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed
- * @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start
- * @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end
- * @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events
- * @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events
- * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined
- * @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association
- * failed
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete
- * @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received
- * @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed
- * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @period_msec: interval at which the hcmd will be sent to the FW.
+ * Measured in msec (0 = one time command)
+ * @hcmd: a variable length host-command to be sent to apply the configuration
*/
-enum iwl_fw_ini_trigger_id {
- IWL_FW_TRIGGER_ID_INVALID = 0,
-
- /* Errors triggers */
- IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
- IWL_FW_TRIGGER_ID_FW_HW_ERROR = 2,
- IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 3,
-
- /* FW triggers */
- IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
- IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5,
-
- /* User trigger */
- IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
-
- /* periodic uses the data field for the interval time */
- IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7,
-
- /* Host triggers */
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8,
- IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9,
- IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10,
- IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11,
- IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12,
- IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13,
- IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14,
- IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15,
- IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16,
- IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17,
- IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18,
- IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19,
- IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20,
- IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21,
- IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22,
- IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23,
- IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25,
- IWL_FW_TRIGGER_ID_HOST_D3_START = 26,
- IWL_FW_TRIGGER_ID_HOST_D3_END = 27,
- IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28,
- IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29,
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30,
- IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32,
- IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33,
- IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34,
- IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35,
- IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36,
-
- IWL_FW_TRIGGER_ID_NUM,
-}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
+struct iwl_fw_ini_hcmd_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 period_msec;
+ struct iwl_fw_ini_hcmd hcmd;
+} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
* enum iwl_fw_ini_allocation_id
@@ -404,9 +281,6 @@ enum iwl_fw_ini_trigger_id {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
- * @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module
- * @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps
- * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@@ -414,9 +288,6 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
- IWL_FW_INI_ALLOCATION_ID_SDFX,
- IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
- IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@@ -436,58 +307,47 @@ enum iwl_fw_ini_buffer_location {
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
- * enum iwl_fw_ini_debug_flow
- *
- * @IWL_FW_INI_DEBUG_INVALID: invalid
- * @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
- * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
- */
-enum iwl_fw_ini_debug_flow {
- IWL_FW_INI_DEBUG_INVALID,
- IWL_FW_INI_DEBUG_DBTR_FLOW,
- IWL_FW_INI_DEBUG_TB2DTF_FLOW,
-}; /* FW_DEBUG_TLV_FLOW_E_VER_1 */
-
-/**
* enum iwl_fw_ini_region_type
*
* @IWL_FW_INI_REGION_INVALID: invalid
+ * @IWL_FW_INI_REGION_TLV: uCode and debug TLVs
+ * @IWL_FW_INI_REGION_INTERNAL_BUFFER: monitor SMEM buffer
+ * @IWL_FW_INI_REGION_DRAM_BUFFER: monitor DRAM buffer
+ * @IWL_FW_INI_REGION_TXF: TX fifos
+ * @IWL_FW_INI_REGION_RXF: RX fifo
+ * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
+ * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
+ * @IWL_FW_INI_REGION_RSP_OR_NOTIF: FW response or notification data
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
* @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY
* @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX
- * @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer
- * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
- * @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined
- * @IWL_FW_INI_REGION_TXF: TX fifos
- * @IWL_FW_INI_REGION_RXF: RX fifo
* @IWL_FW_INI_REGION_PAGING: paging memory
* @IWL_FW_INI_REGION_CSR: CSR registers
- * @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
- * @IWL_FW_INI_REGION_DHC: dhc response to dump
- * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
- * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
+ * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
+ * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_INVALID,
+ IWL_FW_INI_REGION_TLV,
+ IWL_FW_INI_REGION_INTERNAL_BUFFER,
+ IWL_FW_INI_REGION_DRAM_BUFFER,
+ IWL_FW_INI_REGION_TXF,
+ IWL_FW_INI_REGION_RXF,
+ IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_RSP_OR_NOTIF,
IWL_FW_INI_REGION_DEVICE_MEMORY,
IWL_FW_INI_REGION_PERIPHERY_MAC,
IWL_FW_INI_REGION_PERIPHERY_PHY,
IWL_FW_INI_REGION_PERIPHERY_AUX,
- IWL_FW_INI_REGION_DRAM_BUFFER,
- IWL_FW_INI_REGION_DRAM_IMR,
- IWL_FW_INI_REGION_INTERNAL_BUFFER,
- IWL_FW_INI_REGION_TXF,
- IWL_FW_INI_REGION_RXF,
IWL_FW_INI_REGION_PAGING,
IWL_FW_INI_REGION_CSR,
- IWL_FW_INI_REGION_NOTIFICATION,
- IWL_FW_INI_REGION_DHC,
- IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
- IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_DRAM_IMR,
+ IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
IWL_FW_INI_REGION_NUM
-}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
+}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
/**
* enum iwl_fw_ini_time_point
@@ -557,4 +417,22 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
+/**
+ * enum iwl_fw_ini_trigger_apply_policy - Determines how to apply triggers
+ *
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT: match by time point
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_DATA: match by trigger data
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS: override regions mask.
+ * Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
+ * Append otherwise
+ */
+enum iwl_fw_ini_trigger_apply_policy {
+ IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
+ IWL_FW_INI_APPLY_POLICY_MATCH_DATA = BIT(1),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
+};
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 6b4d59daacd6..e7a1acedbcf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -78,6 +78,20 @@ enum iwl_mac_conf_subcmd_ids {
*/
CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4,
/**
+ * @MISSED_VAP_NOTIF: &struct iwl_missed_vap_notif
+ */
+ MISSED_VAP_NOTIF = 0xFA,
+ /**
+ * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
+ */
+ SESSION_PROTECTION_CMD = 0x5,
+
+ /**
+ * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
+ */
+ SESSION_PROTECTION_NOTIF = 0xFB,
+
+ /**
* @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
*/
PROBE_RESPONSE_DATA_NOTIF = 0xFC,
@@ -131,6 +145,21 @@ struct iwl_probe_resp_data_notif {
} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
/**
+ * struct iwl_missed_vap_notif - notification of missing vap detection
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @num_beacon_intervals_elapsed: beacons elpased with no vap profile inside
+ * @profile_periodicity: beacons period to have our profile inside
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_missed_vap_notif {
+ __le32 mac_id;
+ u8 num_beacon_intervals_elapsed;
+ u8 profile_periodicity;
+ u8 reserved[2];
+} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
+
+/**
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*
* @id_and_color: ID and color of the MAC
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index a93449db7bb2..88bc7733065f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -260,6 +260,11 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
+#define RX_MPDU_BAND_POS 6
+#define RX_MPDU_BAND_MASK 0xC0
+#define BAND_IN_RX_STATUS(_val) \
+ (((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS)
+
enum iwl_rx_l3_proto_values {
IWL_RX_L3_TYPE_NONE,
IWL_RX_L3_TYPE_IPV4,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index c0750ced5ac2..408798f351c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -70,6 +70,9 @@
/* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20
+#define SCAN_SHORT_SSID_MAX_SIZE 8
+#define SCAN_BSSID_MAX_SIZE 16
+
/**
* struct iwl_ssid_ie - directed scan network information element
*
@@ -278,6 +281,9 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3),
+ IWL_SCAN_CHANNEL_FLAG_FORCE_EBS = BIT(4),
+ IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER = BIT(5),
+ IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6),
};
/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
@@ -638,6 +644,47 @@ enum iwl_umac_scan_general_flags2 {
};
/**
+ * enum iwl_umac_scan_general_flags_v2 - UMAC scan general flags version 2
+ *
+ * The FW flags were reordered and hence the driver introduce version 2
+ *
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC: periodic or scheduled
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL: pass all probe responses and beacons
+ * during scan iterations
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE: send complete notification
+ * on every iteration instead of only once after the last iteration
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1: fragmented scan LMAC1
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2: fragmented scan LMAC2
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH: does this scan check for profile matching
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS: use all valid chains for RX
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL: works with adaptive dwell
+ * for active channel
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE: can be preempted by other requests
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START: send notification of scan start
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID: matching on multiple SSIDs
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE: all the channels scanned
+ * as passive
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and
+ * 5.2Ghz bands scan, trigger scan on 6GHz band to discover
+ * the reported collocated APs
+ */
+enum iwl_umac_scan_general_flags_v2 {
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START = BIT(9),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12),
+};
+
+/**
* struct iwl_scan_channel_cfg_umac
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channel_num: channel number 1-13 etc.
@@ -831,6 +878,217 @@ struct iwl_scan_req_umac {
#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
/**
+ * struct iwl_scan_probe_params_v3
+ * @preq: scan probe request params
+ * @ssid_num: number of valid SSIDs in direct scan array
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwl_scan_probe_params_v3 {
+ struct iwl_scan_probe_req preq;
+ u8 ssid_num;
+ u8 short_ssid_num;
+ u8 bssid_num;
+ u8 reserved;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
+ u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */
+
+/**
+ * struct iwl_scan_probe_params_v4
+ * @preq: scan probe request params
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwl_scan_probe_params_v4 {
+ struct iwl_scan_probe_req preq;
+ u8 short_ssid_num;
+ u8 bssid_num;
+ __le16 reserved;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
+ u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
+
+#define SCAN_MAX_NUM_CHANS_V3 67
+
+/**
+ * struct iwl_scan_channel_params_v3
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwl_scan_channel_params_v3 {
+ u8 flags;
+ u8 count;
+ __le16 reserved;
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_3 */
+
+/**
+ * struct iwl_scan_channel_params_v4
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @num_of_aps_override: override the number of APs the FW uses to calculate
+ * dwell time when adaptive dwell is used
+ * @reserved: for future use and alignment
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ * @adwell_ch_override_bitmap: when using adaptive dwell, override the number
+ * of APs value with &num_of_aps_override for the channel.
+ * To cast channel to index, use &iwl_mvm_scan_ch_and_band_to_idx
+ */
+struct iwl_scan_channel_params_v4 {
+ u8 flags;
+ u8 count;
+ u8 num_of_aps_override;
+ u8 reserved;
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+ u8 adwell_ch_override_bitmap[16];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_4 also
+ SCAN_CHANNEL_PARAMS_API_S_VER_5 */
+/**
+ * struct iwl_scan_general_params_v10
+ * @flags: &enum iwl_umac_scan_flags
+ * @reserved: reserved for future
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @active_dwell: dwell time for active scan per LMAC
+ * @adwell_default_2g: adaptive dwell default number of APs
+ * for 2.4GHz channel
+ * @adwell_default_5g: adaptive dwell default number of APs
+ * for 5GHz channels
+ * @adwell_default_social_chn: adaptive dwell default number of
+ * APs per social channel
+ * @reserved1: reserved for future
+ * @adwell_max_budget: the maximal number of TUs that adaptive dwell
+ * can add to the total scan time
+ * @max_out_of_time: max out of serving channel time, per LMAC
+ * @suspend_time: max suspend time, per LMAC
+ * @scan_priority: priority of the request
+ * @passive_dwell: continues dwell time for passive channel
+ * (without adaptive dwell)
+ * @num_of_fragments: number of fragments needed for full fragmented
+ * scan coverage.
+ */
+struct iwl_scan_general_params_v10 {
+ __le16 flags;
+ u8 reserved;
+ u8 scan_start_mac_id;
+ u8 active_dwell[SCAN_TWO_LMACS];
+ u8 adwell_default_2g;
+ u8 adwell_default_5g;
+ u8 adwell_default_social_chn;
+ u8 reserved1;
+ __le16 adwell_max_budget;
+ __le32 max_out_of_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ u8 passive_dwell[SCAN_TWO_LMACS];
+ u8 num_of_fragments[SCAN_TWO_LMACS];
+} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */
+
+/**
+ * struct iwl_scan_periodic_parms_v1
+ * @schedule: can scheduling parameter
+ * @delay: initial delay of the periodic scan in seconds
+ * @reserved: reserved for future
+ */
+struct iwl_scan_periodic_parms_v1 {
+ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+ __le16 delay;
+ __le16 reserved;
+} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_params_v11
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v3
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v3
+ */
+struct iwl_scan_req_params_v11 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v3 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v3 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_11 */
+
+/**
+ * struct iwl_scan_req_params_v12
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v4
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v3
+ */
+struct iwl_scan_req_params_v12 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v4 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v3 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
+
+/**
+ * struct iwl_scan_req_params_v13
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v4
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v4
+ */
+struct iwl_scan_req_params_v13 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v4 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v4 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
+
+/**
+ * struct iwl_scan_req_umac_v11
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v11 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v11 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_11 */
+
+/**
+ * struct iwl_scan_req_umac_v12
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v12 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v12 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
+
+/**
+ * struct iwl_scan_req_umac_v13
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v13 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v13 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
+
+/**
* struct iwl_umac_scan_abort
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index 450227f81706..970e9e508ad0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -288,8 +288,7 @@ struct iwl_mvm_keyinfo {
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
- * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
- * alone. 1 - modify, 0 - don't change.
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
* @reserved3: reserved
* @station_flags: look at &enum iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed,
@@ -369,8 +368,7 @@ enum iwl_sta_type {
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
- * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
- * alone. 1 - modify, 0 - don't change.
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
* @reserved3: reserved
* @station_flags: look at &enum iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 4621ef93a2cf..a731f28e101a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -393,4 +393,82 @@ struct iwl_hs20_roc_res {
__le32 status;
} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
+/**
+ * enum iwl_mvm_session_prot_conf_id - session protection's configurations
+ * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
+ * The firmware will allocate two events.
+ * Valid for BSS_STA and P2P_STA.
+ * * A rather short event that can't be fragmented and with a very
+ * high priority. If every goes well (99% of the cases) the
+ * association should complete within this first event. During
+ * that event, no other activity will happen in the firmware,
+ * which is why it can't be too long.
+ * The length of this event is hard-coded in the firmware: 300TUs.
+ * * Another event which can be much longer (it's duration is
+ * configurable by the driver) which has a slightly lower
+ * priority and that can be fragmented allowing other activities
+ * to run while this event is running.
+ * The firmware will automatically remove both events once the driver sets
+ * the BSS MAC as associated. Neither of the events will be removed
+ * for the P2P_STA MAC.
+ * Only the duration is configurable for this protection.
+ * @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used
+ * @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in
+ * listen mode. Will be fragmented. Valid only on the P2P Device MAC.
+ * Valid only on the P2P Device MAC. The firmware will take into account
+ * the duration, the interval and the repetition count.
+ * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
+ * able to run the GO Negotiation. Will not be fragmented and not
+ * repetitive. Valid only on the P2P Device MAC. Only the duration will
+ * be taken into account.
+ */
+enum iwl_mvm_session_prot_conf_id {
+ SESSION_PROTECT_CONF_ASSOC,
+ SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
+ SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
+ SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
+}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_cmd - configure a session protection
+ * @id_and_color: the id and color of the mac for which this session protection
+ * is sent
+ * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE
+ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @duration_tu: the duration of the whole protection in TUs.
+ * @repetition_count: not used
+ * @interval: not used
+ *
+ * Note: the session protection will always be scheduled to start as
+ * early as possible, but the maximum delay is configuration dependent.
+ * The firmware supports only one concurrent session protection per vif.
+ * Adding a new session protection will remove any currently running session.
+ */
+struct iwl_mvm_session_prot_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 conf_id;
+ __le32 duration_tu;
+ __le32 repetition_count;
+ __le32 interval;
+} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_notif - session protection started / ended
+ * @mac_id: the mac id for which the session protection started / ended
+ * @status: 1 means success, 0 means failure
+ * @start: 1 means the session protection started, 0 means it ended
+ * @conf_id: the configuration id of the session that started / eneded
+ *
+ * Note that any session protection will always get two notifications: start
+ * and end even the firmware could not schedule it.
+ */
+struct iwl_mvm_session_prot_notif {
+ __le32 mac_id;
+ __le32 status;
+ __le32 start;
+ __le32 conf_id;
+} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */
+
#endif /* __iwl_fw_api_time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 8511e735c374..f89a9e16a8c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -323,7 +323,7 @@ struct iwl_tx_cmd_gen2 {
} __packed; /* TX_CMD_API_S_VER_7 */
/**
- * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @flags: combination of &enum iwl_tx_cmd_flags
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 87421807e040..ed90dd104366 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1055,19 +1055,31 @@ out:
return dump_file;
}
+/**
+ * struct iwl_dump_ini_region_data - region data
+ * @reg_tlv: region TLV
+ * @dump_data: dump data
+ */
+struct iwl_dump_ini_region_data {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fwrt_dump_data *dump_data;
+};
+
static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
u32 prph_val;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
prph_val = iwl_read_prph(fwrt->trans, addr + i);
if (prph_val == 0x5a5a5a5a)
return -EBUSY;
@@ -1078,39 +1090,42 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4)
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4)
*val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
+ range->range_data_size = reg->dev_addr.size;
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
- le32_to_cpu(reg->internal.range_data_size));
+ le32_to_cpu(reg->dev_addr.size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
void *range_ptr, int idx)
{
/* increase idx by 1 since the pages are from 1 to
@@ -1133,14 +1148,14 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
if (!fwrt->trans->trans_cfg->gen2)
- return _iwl_dump_ini_paging_iter(fwrt, reg, range_ptr, idx);
+ return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@@ -1155,45 +1170,61 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *range_ptr,
- int idx)
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 start_addr = iwl_read_umac_prph(fwrt->trans,
- MON_BUFF_BASE_ADDR_VER2);
+ struct iwl_dram_data *frag;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
- if (start_addr == 0x5a5a5a5a)
- return -EBUSY;
+ frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx];
+
+ range->dram_base_addr = cpu_to_le64(frag->physical);
+ range->range_data_size = cpu_to_le32(frag->size);
+
+ memcpy(range->data, frag->block, frag->size);
- range->dram_base_addr = cpu_to_le64(start_addr);
- range->range_data_size = cpu_to_le32(fwrt->trans->dbg.fw_mon[idx].size);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
- memcpy(range->data, fwrt->trans->dbg.fw_mon[idx].block,
- fwrt->trans->dbg.fw_mon[idx].size);
+static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(reg->internal_buffer.base_addr);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->internal_buffer.size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(reg->internal_buffer.size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, int idx)
+ struct iwl_dump_ini_region_data *reg_data, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
int txf_num = cfg->num_txfifo_entries;
int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size);
- u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid1);
+ u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]);
if (!idx) {
- if (le32_to_cpu(reg->offset) &&
- WARN_ONCE(cfg->num_lmacs == 1,
- "Invalid lmac offset: 0x%x\n",
- le32_to_cpu(reg->offset)))
+ if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) {
+ IWL_ERR(fwrt, "WRT: Invalid lmac offset 0x%x\n",
+ le32_to_cpu(reg->fifos.offset));
return false;
+ }
iter->internal_txf = 0;
iter->fifo_size = 0;
iter->fifo = -1;
- if (le32_to_cpu(reg->offset))
+ if (le32_to_cpu(reg->fifos.offset))
iter->lmac = 1;
else
iter->lmac = 0;
@@ -1224,27 +1255,28 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
- u32 offs = le32_to_cpu(reg->offset), addr;
- u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
- if (!iwl_ini_txf_iter(fwrt, reg, idx))
+ if (!iwl_ini_txf_iter(fwrt, reg_data, idx))
return -EIO;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
- range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
@@ -1253,8 +1285,8 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
* read txf registers. for each register, write to the dump the
* register address and its value
*/
- for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
- addr = le32_to_cpu(reg->start_addr[i]) + offs;
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
reg_dump->addr = cpu_to_le32(addr);
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
@@ -1263,7 +1295,7 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
reg_dump++;
}
- if (reg->fifos.header_only) {
+ if (reg->fifos.hdr_only) {
range->range_data_size = cpu_to_le32(registers_size);
goto out;
}
@@ -1294,11 +1326,12 @@ struct iwl_ini_rxf_data {
};
static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
struct iwl_ini_rxf_data *data)
{
- u32 fid1 = le32_to_cpu(reg->fifos.fid1);
- u32 fid2 = le32_to_cpu(reg->fifos.fid2);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 fid1 = le32_to_cpu(reg->fifos.fid[0]);
+ u32 fid2 = le32_to_cpu(reg->fifos.fid[1]);
u32 fifo_idx;
if (!data)
@@ -1330,20 +1363,21 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_ini_rxf_data rxf_data;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
- u32 offs = le32_to_cpu(reg->offset), addr;
- u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
- iwl_ini_get_rxf_data(fwrt, reg, &rxf_data);
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data);
if (!rxf_data.size)
return -EIO;
@@ -1351,15 +1385,15 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
- range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
/*
* read rxf registers. for each register, write to the dump the
* register address and its value
*/
- for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
- addr = le32_to_cpu(reg->start_addr[i]) + offs;
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
reg_dump->addr = cpu_to_le32(addr);
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
@@ -1368,7 +1402,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
reg_dump++;
}
- if (reg->fifos.header_only) {
+ if (reg->fifos.hdr_only) {
range->range_data_size = cpu_to_le32(registers_size);
goto out;
}
@@ -1399,9 +1433,50 @@ out:
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
-static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static int
+iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(err_table->base_addr) +
+ le32_to_cpu(err_table->offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = err_table->size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(err_table->size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
+ u32 pkt_len;
+
+ if (!pkt)
+ return -EIO;
+
+ pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr));
+ range->range_data_size = cpu_to_le32(pkt_len);
+
+ memcpy(range->data, pkt->data, pkt_len);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static void *
+iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_error_dump *dump = data;
@@ -1410,14 +1485,45 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->ranges;
}
-static void
-*iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- struct iwl_fw_ini_monitor_dump *data,
- u32 write_ptr_addr, u32 write_ptr_msk,
- u32 cycle_cnt_addr, u32 cycle_cnt_msk)
+/**
+ * mask_apply_and_normalize - applies mask on val and normalize the result
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * @val: value
+ * @mask: mask to apply and to normalize with
+ */
+static u32 mask_apply_and_normalize(u32 val, u32 mask)
+{
+ return (val & mask) >> (ffs(mask) - 1);
+}
+
+static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
+ const struct iwl_fw_mon_reg *reg_info)
+{
+ u32 val, offs;
+
+ /* The header addresses of DBGCi is calculate as follows:
+ * DBGC1 address + (0x100 * i)
+ */
+ offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100;
+
+ if (!reg_info || !reg_info->addr || !reg_info->mask)
+ return 0;
+
+ val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs);
+
+ return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask));
+}
+
+static void *
+iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ struct iwl_fw_ini_monitor_dump *data,
+ const struct iwl_fw_mon_regs *addrs)
{
- u32 write_ptr, cycle_cnt;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
unsigned long flags;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
@@ -1425,76 +1531,66 @@ static void
return NULL;
}
- write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr);
- cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr);
+ data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->write_ptr);
+ data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cycle_cnt);
+ data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cur_frag);
iwl_trans_release_nic_access(fwrt->trans, &flags);
data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
- data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk);
- data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk);
return data->ranges;
}
-static void
-*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static void *
+iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
- u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk;
-
- switch (fwrt->trans->trans_cfg->device_family) {
- case IWL_DEVICE_FAMILY_9000:
- case IWL_DEVICE_FAMILY_22000:
- write_ptr_addr = MON_BUFF_WRPTR_VER2;
- write_ptr_msk = -1;
- cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2;
- cycle_cnt_msk = -1;
- break;
- default:
- IWL_ERR(fwrt, "Unsupported device family %d\n",
- fwrt->trans->trans_cfg->device_family);
- return NULL;
- }
- return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr,
- write_ptr_msk, cycle_cnt_addr,
- cycle_cnt_msk);
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_dram_regs);
}
-static void
-*iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static void *
+iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
- const struct iwl_cfg *cfg = fwrt->trans->cfg;
- if (fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
- fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000) {
- IWL_ERR(fwrt, "Unsupported device family %d\n",
- fwrt->trans->trans_cfg->device_family);
- return NULL;
- }
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_smem_regs);
+}
- return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump,
- cfg->fw_mon_smem_write_ptr_addr,
- cfg->fw_mon_smem_write_ptr_msk,
- cfg->fw_mon_smem_cycle_cnt_ptr_addr,
- cfg->fw_mon_smem_cycle_cnt_ptr_msk);
+static void *
+iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_err_table_dump *dump = data;
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->version = reg->err_table.version;
+
+ return dump->ranges;
}
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
- return le32_to_cpu(reg->internal.num_of_ranges);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+
+ return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
}
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
if (fwrt->trans->trans_cfg->gen2)
return fwrt->trans->init_dram.paging_cnt;
@@ -1502,54 +1598,73 @@ static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
return fwrt->num_of_paging_blk;
}
-static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- return 1;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ if (!fw_mon->frags[i].size)
+ break;
+
+ ranges++;
+ }
+
+ return ranges;
}
static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
u32 num_of_fifos = 0;
- while (iwl_ini_txf_iter(fwrt, reg, num_of_fifos))
+ while (iwl_ini_txf_iter(fwrt, reg_data, num_of_fifos))
num_of_fifos++;
return num_of_fifos;
}
-static u32 iwl_dump_ini_rxf_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- /* Each Rx fifo needs a different offset and therefore, it's
- * region can contain only one fifo, i.e. 1 memory range.
- */
return 1;
}
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
- return sizeof(struct iwl_fw_ini_error_dump) +
- iwl_dump_ini_mem_ranges(fwrt, reg) *
- (sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->internal.range_data_size));
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_error_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
}
-static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
int i;
u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
u32 size = sizeof(struct iwl_fw_ini_error_dump);
if (fwrt->trans->trans_cfg->gen2) {
- for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg); i++)
+ for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg_data); i++)
size += range_header_len +
fwrt->trans->init_dram.paging[i].size;
} else {
- for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++)
+ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data);
+ i++)
size += range_header_len +
fwrt->fw_paging_db[i].fw_paging_size;
}
@@ -1557,66 +1672,128 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
-static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- u32 size = sizeof(struct iwl_fw_ini_monitor_dump) +
- sizeof(struct iwl_fw_ini_error_dump_range);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
- if (fwrt->trans->dbg.num_blocks)
- size += fwrt->trans->dbg.fw_mon[0].size;
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ struct iwl_dram_data *frag = &fw_mon->frags[i];
+
+ if (!frag->size)
+ break;
+
+ size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size;
+ }
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_monitor_dump);
return size;
}
-static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- return sizeof(struct iwl_fw_ini_monitor_dump) +
- iwl_dump_ini_mem_ranges(fwrt, reg) *
- (sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->internal.range_data_size));
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id), size;
+
+ fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_SRAM_PATH)
+ return 0;
+
+ size = le32_to_cpu(reg->internal_buffer.size);
+ if (!size)
+ return 0;
+
+ size += sizeof(struct iwl_fw_ini_monitor_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
}
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 size = 0;
u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) *
- sizeof(struct iwl_fw_ini_error_dump_register);
+ registers_num *
+ sizeof(struct iwl_fw_ini_error_dump_register);
- while (iwl_ini_txf_iter(fwrt, reg, size)) {
+ while (iwl_ini_txf_iter(fwrt, reg_data, size)) {
size += fifo_hdr;
- if (!reg->fifos.header_only)
+ if (!reg->fifos.hdr_only)
size += iter->fifo_size;
}
- if (size)
- size += sizeof(struct iwl_fw_ini_error_dump);
+ if (!size)
+ return 0;
- return size;
+ return size + sizeof(struct iwl_fw_ini_error_dump);
}
static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_ini_rxf_data rx_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 size = sizeof(struct iwl_fw_ini_error_dump) +
sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) *
- sizeof(struct iwl_fw_ini_error_dump_register);
+ registers_num * sizeof(struct iwl_fw_ini_error_dump_register);
- if (reg->fifos.header_only)
+ if (reg->fifos.hdr_only)
return size;
- iwl_ini_get_rxf_data(fwrt, reg, &rx_data);
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rx_data);
size += rx_data.size;
return size;
}
+static u32
+iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->err_table.size);
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_err_table_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+
+ if (!reg_data->dump_data->fw_pkt)
+ return 0;
+
+ size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt);
+ if (size)
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@@ -1628,14 +1805,15 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
*/
struct iwl_dump_ini_mem_ops {
u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg);
+ struct iwl_dump_ini_region_data *reg_data);
u32 (*get_size)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg);
+ struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *data);
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *range,
- int idx);
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range, int idx);
};
/**
@@ -1650,60 +1828,61 @@ struct iwl_dump_ini_mem_ops {
* @ops: memory dump operations
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
const struct iwl_dump_ini_mem_ops *ops)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
- u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type), size;
+ u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id);
+ u32 num_of_ranges, i, size;
void *range;
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
!ops->fill_range)
return 0;
- size = ops->get_size(fwrt, reg);
+ size = ops->get_size(fwrt, reg_data);
if (!size)
return 0;
- entry = kmalloc(sizeof(*entry) + sizeof(*tlv) + size, GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
return 0;
entry->size = sizeof(*tlv) + size;
tlv = (void *)entry->data;
- tlv->type = cpu_to_le32(type);
+ tlv->type = reg->type;
tlv->len = cpu_to_le32(size);
- IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
+ type);
- num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
+ num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
- header->region_id = reg->region_id;
+ header->region_id = reg->id;
header->num_of_ranges = cpu_to_le32(num_of_ranges);
- header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME,
- le32_to_cpu(reg->name_len)));
- memcpy(header->name, reg->name, le32_to_cpu(header->name_len));
+ header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
+ memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
- range = ops->fill_mem_hdr(fwrt, reg, header);
+ range = ops->fill_mem_hdr(fwrt, reg_data, header);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ id, type);
goto out_err;
}
for (i = 0; i < num_of_ranges; i++) {
- int range_size = ops->fill_range(fwrt, reg, range, i);
+ int range_size = ops->fill_range(fwrt, reg_data, range, i);
if (range_size < 0) {
IWL_ERR(fwrt,
"WRT: Failed to dump region: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ id, type);
goto out_err;
}
range = range + range_size;
@@ -1714,22 +1893,29 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
return entry->size;
out_err:
- kfree(entry);
+ vfree(entry);
return 0;
}
static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fw_ini_trigger_tlv *trigger,
struct list_head *list)
{
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_error_dump_data *tlv;
struct iwl_fw_ini_dump_info *dump;
- u32 reg_ids_size = le32_to_cpu(trigger->num_regions) * sizeof(__le32);
- u32 size = sizeof(*tlv) + sizeof(*dump) + reg_ids_size;
+ struct iwl_dbg_tlv_node *node;
+ struct iwl_fw_ini_dump_cfg_name *cfg_name;
+ u32 size = sizeof(*tlv) + sizeof(*dump);
+ u32 num_of_cfg_names = 0;
+
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ size += sizeof(*cfg_name);
+ num_of_cfg_names++;
+ }
- entry = kmalloc(sizeof(*entry) + size, GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + size);
if (!entry)
return 0;
@@ -1737,13 +1923,14 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
tlv = (void *)entry->data;
tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE);
- tlv->len = cpu_to_le32(sizeof(*dump) + reg_ids_size);
+ tlv->len = cpu_to_le32(size - sizeof(*tlv));
dump = (void *)tlv->data;
dump->version = cpu_to_le32(IWL_INI_DUMP_VER);
- dump->trigger_id = trigger->trigger_id;
- dump->is_external_cfg =
+ dump->time_point = trigger->time_point;
+ dump->trigger_reason = trigger->trigger_reason;
+ dump->external_cfg_state =
cpu_to_le32(fwrt->trans->dbg.external_ini_cfg);
dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
@@ -1763,26 +1950,26 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major);
dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
+ dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest);
+ dump->regions_mask = trigger->regions_mask;
+
dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
memcpy(dump->build_tag, fwrt->fw->human_readable,
sizeof(dump->build_tag));
- dump->img_name_len = cpu_to_le32(sizeof(dump->img_name));
- memcpy(dump->img_name, fwrt->dump.img_name, sizeof(dump->img_name));
-
- dump->internal_dbg_cfg_name_len =
- cpu_to_le32(sizeof(dump->internal_dbg_cfg_name));
- memcpy(dump->internal_dbg_cfg_name, fwrt->dump.internal_dbg_cfg_name,
- sizeof(dump->internal_dbg_cfg_name));
-
- dump->external_dbg_cfg_name_len =
- cpu_to_le32(sizeof(dump->external_dbg_cfg_name));
-
- memcpy(dump->external_dbg_cfg_name, fwrt->dump.external_dbg_cfg_name,
- sizeof(dump->external_dbg_cfg_name));
-
- dump->regions_num = trigger->num_regions;
- memcpy(dump->region_ids, trigger->data, reg_ids_size);
+ cfg_name = dump->cfg_names;
+ dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names);
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ struct iwl_fw_ini_debug_info_tlv *debug_info =
+ (void *)node->tlv.data;
+
+ cfg_name->image_type = debug_info->image_type;
+ cfg_name->cfg_name_len =
+ cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
+ sizeof(cfg_name->cfg_name));
+ cfg_name++;
+ }
/* add dump info TLV to the beginning of the list since it needs to be
* the first TLV in the dump
@@ -1794,33 +1981,18 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
[IWL_FW_INI_REGION_INVALID] = {},
- [IWL_FW_INI_REGION_DEVICE_MEMORY] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
- },
- [IWL_FW_INI_REGION_PERIPHERY_MAC] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_prph_iter,
+ [IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_mon_smem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header,
+ .fill_range = iwl_dump_ini_mon_smem_iter,
},
- [IWL_FW_INI_REGION_PERIPHERY_PHY] = {},
- [IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
[IWL_FW_INI_REGION_DRAM_BUFFER] = {
.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges,
.get_size = iwl_dump_ini_mon_dram_get_size,
.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header,
.fill_range = iwl_dump_ini_mon_dram_iter,
},
- [IWL_FW_INI_REGION_DRAM_IMR] = {},
- [IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mon_smem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
- },
[IWL_FW_INI_REGION_TXF] = {
.get_num_of_ranges = iwl_dump_ini_txf_ranges,
.get_size = iwl_dump_ini_txf_get_size,
@@ -1828,70 +2000,91 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_range = iwl_dump_ini_txf_iter,
},
[IWL_FW_INI_REGION_RXF] = {
- .get_num_of_ranges = iwl_dump_ini_rxf_ranges,
+ .get_num_of_ranges = iwl_dump_ini_single_range,
.get_size = iwl_dump_ini_rxf_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_rxf_iter,
},
- [IWL_FW_INI_REGION_PAGING] = {
+ [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_RSP_OR_NOTIF] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_fw_pkt_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .get_num_of_ranges = iwl_dump_ini_paging_ranges,
- .get_size = iwl_dump_ini_paging_get_size,
- .fill_range = iwl_dump_ini_paging_iter,
+ .fill_range = iwl_dump_ini_fw_pkt_iter,
},
- [IWL_FW_INI_REGION_CSR] = {
+ [IWL_FW_INI_REGION_DEVICE_MEMORY] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_csr_iter,
+ .fill_range = iwl_dump_ini_dev_mem_iter,
},
- [IWL_FW_INI_REGION_NOTIFICATION] = {},
- [IWL_FW_INI_REGION_DHC] = {},
- [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = {
+ [IWL_FW_INI_REGION_PERIPHERY_MAC] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
+ .fill_range = iwl_dump_ini_prph_iter,
},
- [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = {
+ [IWL_FW_INI_REGION_PERIPHERY_PHY] = {},
+ [IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
+ [IWL_FW_INI_REGION_PAGING] = {
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .get_num_of_ranges = iwl_dump_ini_paging_ranges,
+ .get_size = iwl_dump_ini_paging_get_size,
+ .fill_range = iwl_dump_ini_paging_iter,
+ },
+ [IWL_FW_INI_REGION_CSR] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
+ .fill_range = iwl_dump_ini_csr_iter,
},
+ [IWL_FW_INI_REGION_DRAM_IMR] = {},
+ [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {},
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
+ struct iwl_dump_ini_region_data reg_data = {
+ .dump_data = dump_data,
+ };
int i;
u32 size = 0;
+ u64 regions_mask = le64_to_cpu(trigger->regions_mask);
- for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) {
- u32 reg_id = le32_to_cpu(trigger->data[i]), reg_type;
- struct iwl_fw_ini_region_cfg *reg;
+ for (i = 0; i < 64; i++) {
+ u32 reg_type;
+ struct iwl_fw_ini_region_tlv *reg;
- if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
+ if (!(BIT_ULL(i) & regions_mask))
continue;
- reg = fwrt->dump.active_regs[reg_id];
- if (!reg) {
+ reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ if (!reg_data.reg_tlv) {
IWL_WARN(fwrt,
- "WRT: Unassigned region id %d, skipping\n",
- reg_id);
+ "WRT: Unassigned region id %d, skipping\n", i);
continue;
}
- /* currently the driver supports always on domain only */
- if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
- continue;
-
- reg_type = le32_to_cpu(reg->region_type);
+ reg = (void *)reg_data.reg_tlv->data;
+ reg_type = le32_to_cpu(reg->type);
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
- size += iwl_dump_ini_mem(fwrt, list, reg,
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
@@ -1901,31 +2094,43 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
return size;
}
+static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger_tlv *trig)
+{
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
+ u32 usec = le32_to_cpu(trig->ignore_consec);
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
+ tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp_id >= IWL_FW_INI_TIME_POINT_NUM ||
+ iwl_fw_dbg_no_trig_window(fwrt, tp_id, usec))
+ return false;
+
+ return true;
+}
+
static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id trig_id,
+ struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_ini_dump_file_hdr *hdr;
- struct iwl_fw_ini_trigger *trigger;
u32 size;
- if (!iwl_fw_ini_trigger_on(fwrt, trig_id))
- return 0;
-
- trigger = fwrt->dump.active_trigs[trig_id].trig;
- if (!trigger || !le32_to_cpu(trigger->num_regions))
+ if (!trigger || !iwl_fw_ini_trigger_on(fwrt, trigger) ||
+ !le64_to_cpu(trigger->regions_mask))
return 0;
- entry = kmalloc(sizeof(*entry) + sizeof(*hdr), GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + sizeof(*hdr));
if (!entry)
return 0;
entry->size = sizeof(*hdr);
- size = iwl_dump_ini_trigger(fwrt, trigger, list);
+ size = iwl_dump_ini_trigger(fwrt, dump_data, list);
if (!size) {
- kfree(entry);
+ vfree(entry);
return 0;
}
@@ -1991,18 +2196,24 @@ static void iwl_dump_ini_list_free(struct list_head *list)
list_entry(list->next, typeof(*entry), list);
list_del(&entry->list);
- kfree(entry);
+ vfree(entry);
}
}
-static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
+{
+ dump_data->trig = NULL;
+ kfree(dump_data->fw_pkt);
+ dump_data->fw_pkt = NULL;
+}
+
+static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
- enum iwl_fw_ini_trigger_id trig_id = fwrt->dump.wks[wk_idx].ini_trig_id;
struct list_head dump_list = LIST_HEAD_INIT(dump_list);
struct scatterlist *sg_dump_data;
- u32 file_len;
+ u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
- file_len = iwl_dump_ini_file_gen(fwrt, trig_id, &dump_list);
if (!file_len)
goto out;
@@ -2023,7 +2234,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_dump_ini_list_free(&dump_list);
out:
- fwrt->dump.wks[wk_idx].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
+ iwl_fw_error_dump_data_free(dump_data);
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@@ -2038,15 +2249,9 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
- u32 trig_type = le32_to_cpu(desc->trig_desc.type);
- int ret;
-
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
- ret = iwl_fw_dbg_ini_collect(fwrt, trig_type);
- if (!ret)
- iwl_fw_free_dump_desc(fwrt);
-
- return ret;
+ iwl_fw_free_dump_desc(fwrt);
+ return 0;
}
/* use wks[0] since dump flow prior to ini does not need to support
@@ -2138,35 +2343,29 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
-int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id)
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
- struct iwl_fw_ini_active_triggers *active;
+ struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig;
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
u32 occur, delay;
unsigned long idx;
- if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id)))
- return -EINVAL;
+ if (test_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
+ return -EBUSY;
- if (!iwl_fw_ini_trigger_on(fwrt, id)) {
+ if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
- id);
+ tp_id);
return -EINVAL;
}
- active = &fwrt->dump.active_trigs[id];
- delay = le32_to_cpu(active->trig->dump_delay);
- occur = le32_to_cpu(active->trig->occurrences);
+ delay = le32_to_cpu(trig->dump_delay);
+ occur = le32_to_cpu(trig->occurrences);
if (!occur)
return 0;
- active->trig->occurrences = cpu_to_le32(--occur);
-
- if (le32_to_cpu(active->trig->force_restart)) {
- IWL_WARN(fwrt, "WRT: Force restart: trigger %d fired.\n", id);
- iwl_force_nmi(fwrt->trans);
- return 0;
- }
+ trig->occurrences = cpu_to_le32(--occur);
/* Check there is an available worker.
* ffz return value is undefined if no zero exists,
@@ -2181,36 +2380,14 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
return -EBUSY;
- fwrt->dump.wks[idx].ini_trig_id = id;
+ fwrt->dump.wks[idx].dump_data = *dump_data;
- IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", id);
+ IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", tp_id);
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
-IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect);
-
-int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id)
-{
- int id;
-
- switch (legacy_trigger_id) {
- case FW_DBG_TRIGGER_FW_ASSERT:
- case FW_DBG_TRIGGER_ALIVE_TIMEOUT:
- case FW_DBG_TRIGGER_DRIVER:
- id = IWL_FW_TRIGGER_ID_FW_ASSERT;
- break;
- case FW_DBG_TRIGGER_USER:
- id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
- break;
- default:
- return -EIO;
- }
-
- return _iwl_fw_dbg_ini_collect(fwrt, id);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
@@ -2219,6 +2396,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
int ret, len = 0;
char buf[64];
+ if (iwl_trans_dbg_ini_valid(fwrt->trans))
+ return 0;
+
if (fmt) {
va_list ap;
@@ -2322,7 +2502,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
if (iwl_trans_dbg_ini_valid(fwrt->trans))
- iwl_fw_error_ini_dump(fwrt, wk_idx);
+ iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
else
iwl_fw_error_dump(fwrt);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
@@ -2335,11 +2515,10 @@ out:
void iwl_fw_error_dump_wk(struct work_struct *work)
{
- struct iwl_fw_runtime *fwrt;
- typeof(fwrt->dump.wks[0]) *wks;
-
- wks = container_of(work, typeof(fwrt->dump.wks[0]), wk.work);
- fwrt = container_of(wks, struct iwl_fw_runtime, dump.wks[wks->idx]);
+ struct iwl_fwrt_wk_data *wks =
+ container_of(work, typeof(*wks), wk.work);
+ struct iwl_fw_runtime *fwrt =
+ container_of(wks, typeof(*fwrt), dump.wks[wks->idx]);
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index e3b5dd34643f..179f2905d56b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -114,9 +114,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only, unsigned int delay);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type);
-int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id);
-int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id);
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig, const char *str,
size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
@@ -222,29 +221,6 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
})
-static inline bool
-iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id)
-{
- struct iwl_fw_ini_trigger *trig;
- u32 usec;
-
- if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
- id == IWL_FW_TRIGGER_ID_INVALID || id >= IWL_FW_TRIGGER_ID_NUM ||
- !fwrt->dump.active_trigs[id].active)
- return false;
-
- trig = fwrt->dump.active_trigs[id].trig;
- usec = le32_to_cpu(trig->ignore_consec);
-
- if (iwl_fw_dbg_no_trig_window(fwrt, id, usec)) {
- IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id);
- return false;
- }
-
- return true;
-}
-
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@@ -315,10 +291,8 @@ static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt)
int i;
iwl_dbg_tlv_del_timers(fwrt->trans);
- for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
flush_delayed_work(&fwrt->dump.wks[i].wk);
- fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
- }
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -381,12 +355,21 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
{
- if (iwl_trans_dbg_ini_valid(fwrt->trans) && fwrt->trans->dbg.hw_error) {
- _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
+ enum iwl_fw_ini_time_point tp_id;
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ return;
+ }
+
+ if (fwrt->trans->dbg.hw_error) {
+ tp_id = IWL_FW_INI_TIME_POINT_FW_HW_ERROR;
fwrt->trans->dbg.hw_error = false;
} else {
- iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
}
+
+ iwl_dbg_tlv_time_point(fwrt, tp_id, NULL);
}
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index c1aa4360736b..ca3b1a461dea 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -320,10 +320,45 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
+static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count)
+{
+ u32 new_domain;
+ int ret;
+
+ if (!iwl_trans_fw_running(fwrt->trans))
+ return -EIO;
+
+ ret = kstrtou32(buf, 0, &new_domain);
+ if (ret)
+ return ret;
+
+ if (new_domain != fwrt->trans->dbg.domains_bitmap) {
+ ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
+ if (ret)
+ return ret;
+
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
+ NULL);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ return scnprintf(buf, size, "0x%08x\n",
+ fwrt->trans->dbg.domains_bitmap);
+}
+
+FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20);
+
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 2e763678dbdb..f008e1bbfdf4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -65,6 +65,7 @@
#define __fw_error_dump_h__
#include <linux/types.h>
+#include "fw/api/cmdhdr.h"
#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
#define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633
@@ -327,6 +328,7 @@ struct iwl_fw_ini_fifo_hdr {
* @dram_base_addr: base address of dram monitor range
* @page_num: page number of memory range
* @fifo_hdr: fifo header of memory range
+ * @fw_pkt: FW packet header of memory range
* @data: the actual memory
*/
struct iwl_fw_ini_error_dump_range {
@@ -336,6 +338,7 @@ struct iwl_fw_ini_error_dump_range {
__le64 dram_base_addr;
__le32 page_num;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
+ struct iwl_cmd_header fw_pkt_hdr;
};
__le32 data[];
} __packed;
@@ -379,12 +382,23 @@ struct iwl_fw_ini_error_dump_register {
__le32 data;
} __packed;
+/**
+ * struct iwl_fw_ini_dump_cfg_name - configuration name
+ * @image_type: image type the configuration is related to
+ * @cfg_name_len: length of the configuration name
+ * @cfg_name: name of the configuraiton
+ */
+struct iwl_fw_ini_dump_cfg_name {
+ __le32 image_type;
+ __le32 cfg_name_len;
+ u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed;
+
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
- * @trigger_id: trigger id that caused the dump collection
- * @trigger_reason: not supported yet
- * @is_external_cfg: 1 if an external debug configuration was loaded
- * and 0 otherwise
+ * @time_point: time point that caused the dump collection
+ * @trigger_reason: reason of the trigger
+ * @external_cfg_state: &enum iwl_ini_cfg_state
* @ver_type: FW version type
* @ver_subtype: FW version subype
* @hw_step: HW step
@@ -397,22 +411,18 @@ struct iwl_fw_ini_error_dump_register {
* @lmac_minor: lmac minor version
* @umac_major: umac major version
* @umac_minor: umac minor version
+ * @fw_mon_mode: FW monitor mode &enum iwl_fw_ini_buffer_location
+ * @regions_mask: bitmap mask of regions ids in the dump
* @build_tag_len: length of the build tag
* @build_tag: build tag string
- * @img_name_len: length of the FW image name
- * @img_name: FW image name
- * @internal_dbg_cfg_name_len: length of the internal debug configuration name
- * @internal_dbg_cfg_name: internal debug configuration name
- * @external_dbg_cfg_name_len: length of the external debug configuration name
- * @external_dbg_cfg_name: external debug configuration name
- * @regions_num: number of region ids
- * @region_ids: region ids the trigger configured to collect
+ * @num_of_cfg_names: number of configuration name structs
+ * @cfg_names: configuration names
*/
struct iwl_fw_ini_dump_info {
__le32 version;
- __le32 trigger_id;
+ __le32 time_point;
__le32 trigger_reason;
- __le32 is_external_cfg;
+ __le32 external_cfg_state;
__le32 ver_type;
__le32 ver_subtype;
__le32 hw_step;
@@ -425,17 +435,24 @@ struct iwl_fw_ini_dump_info {
__le32 lmac_minor;
__le32 umac_major;
__le32 umac_minor;
+ __le32 fw_mon_mode;
+ __le64 regions_mask;
__le32 build_tag_len;
u8 build_tag[FW_VER_HUMAN_READABLE_SZ];
- __le32 img_name_len;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- __le32 internal_dbg_cfg_name_len;
- u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- __le32 external_dbg_cfg_name_len;
- u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- __le32 regions_num;
- __le32 region_ids[];
+ __le32 num_of_cfg_names;
+ struct iwl_fw_ini_dump_cfg_name cfg_names[];
+} __packed;
+/**
+ * struct iwl_fw_ini_err_table_dump - ini error table dump
+ * @header: header of the region
+ * @version: error table version
+ * @ranges: the memory ranges of this this region
+ */
+struct iwl_fw_ini_err_table_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ __le32 version;
+ struct iwl_fw_ini_error_dump_range ranges[];
} __packed;
/**
@@ -457,12 +474,14 @@ struct iwl_fw_error_dump_rb {
* @header: header of the region
* @write_ptr: write pointer position in the buffer
* @cycle_cnt: cycles count
+ * @cur_frag: current fragment in use
* @ranges: the memory ranges of this this region
*/
struct iwl_fw_ini_monitor_dump {
struct iwl_fw_ini_error_dump_header header;
__le32 write_ptr;
__le32 cycle_cnt;
+ __le32 cur_frag;
struct iwl_fw_ini_error_dump_range ranges[];
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 0d5bc4ce5c07..1554f5fdd483 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -93,7 +93,7 @@ struct iwl_ucode_header {
} u;
};
-#define IWL_UCODE_INI_TLV_GROUP 0x1000000
+#define IWL_UCODE_TLV_DEBUG_BASE 0x1000005
/*
* new TLV uCode file layout
@@ -151,7 +151,6 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
- IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_INI_TLV_GROUP,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2,
@@ -326,6 +325,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56,
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
+
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -449,6 +450,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 039576d71276..994880a83652 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -228,18 +228,6 @@ struct iwl_fw_dbg {
};
/**
- * struct iwl_fw_ini_active_triggers
- * @active: is this trigger active
- * @size: allocated memory size of the trigger
- * @trig: trigger
- */
-struct iwl_fw_ini_active_triggers {
- bool active;
- size_t size;
- struct iwl_fw_ini_trigger *trig;
-};
-
-/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
@@ -325,4 +313,22 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
+static inline u8 iwl_mvm_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return IWL_FW_CMD_VER_UNKNOWN;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWL_FW_CMD_VER_UNKNOWN;
+}
+
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index be436c18a047..c24575ff0e54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -65,7 +65,11 @@
#include "img.h"
#include "fw/api/debug.h"
#include "fw/api/paging.h"
+#include "fw/api/power.h"
#include "iwl-eeprom-parse.h"
+#include "fw/acpi.h"
+
+#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
@@ -91,6 +95,27 @@ struct iwl_fwrt_shared_mem_cfg {
#define IWL_FW_RUNTIME_DUMP_WK_NUM 5
/**
+ * struct iwl_fwrt_dump_data - dump data
+ * @trig: trigger the worker was scheduled upon
+ * @fw_pkt: packet received from FW
+ */
+struct iwl_fwrt_dump_data {
+ struct iwl_fw_ini_trigger_tlv *trig;
+ struct iwl_rx_packet *fw_pkt;
+};
+
+/**
+ * struct iwl_fwrt_wk_data - dump worker data struct
+ * @idx: index of the worker
+ * @wk: worker
+ */
+struct iwl_fwrt_wk_data {
+ u8 idx;
+ struct delayed_work wk;
+ struct iwl_fwrt_dump_data dump_data;
+};
+
+/**
* struct iwl_txf_iter_data - Tx fifo iterator data struct
* @fifo: fifo number
* @lmac: lmac number
@@ -105,6 +130,14 @@ struct iwl_txf_iter_data {
};
/**
+ * enum iwl_fw_runtime_status - fw runtime status flags
+ * @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
+ */
+enum iwl_fw_runtime_status {
+ STATUS_GEN_ACTIVE_TRIGS,
+};
+
+/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
* @cfg: NIC configuration
@@ -117,6 +150,7 @@ struct iwl_txf_iter_data {
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
+ * @status: &enum iwl_fw_runtime_status
* @dump: debug dump data
*/
struct iwl_fw_runtime {
@@ -137,33 +171,25 @@ struct iwl_fw_runtime {
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
+ unsigned long status;
+
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
- struct {
- u8 idx;
- enum iwl_fw_ini_trigger_id ini_trig_id;
- struct delayed_work wk;
- } wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
+ struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
unsigned long active_wks;
u8 conf;
/* ts of the beginning of a non-collect fw dbg data period */
- unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM];
+ unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
u32 *d3_debug_data;
- struct iwl_fw_ini_region_cfg *active_regs[IWL_FW_INI_MAX_REGION_ID];
- struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
struct iwl_txf_iter_data txf_iter_data;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
-
struct {
u8 type;
u8 subtype;
@@ -179,7 +205,16 @@ struct iwl_fw_runtime {
u32 delay;
u64 seq;
} timestamp;
+ bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
+#ifdef CONFIG_ACPI
+ struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ u8 sar_chain_a_profile;
+ u8 sar_chain_b_profile;
+ struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
+ u32 geo_rev;
+ struct iwl_ppag_table_cmd ppag_table;
+#endif
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
@@ -194,16 +229,6 @@ static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
kfree(fwrt->dump.d3_debug_data);
fwrt->dump.d3_debug_data = NULL;
- for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) {
- struct iwl_fw_ini_active_triggers *active =
- &fwrt->dump.active_trigs[i];
-
- active->active = false;
- active->size = 0;
- kfree(active->trig);
- active->trig = NULL;
- }
-
iwl_dbg_tlv_del_timers(fwrt->trans);
for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
cancel_delayed_work_sync(&fwrt->dump.wks[i].wk);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 214495a7165f..317eac066082 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -88,7 +88,6 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
- IWL_DEVICE_FAMILY_22560,
IWL_DEVICE_FAMILY_AX210,
};
@@ -360,6 +359,28 @@ struct iwl_cfg_trans_params {
};
/**
+ * struct iwl_fw_mon_reg - FW monitor register info
+ * @addr: register address
+ * @mask: register mask
+ */
+struct iwl_fw_mon_reg {
+ u32 addr;
+ u32 mask;
+};
+
+/**
+ * struct iwl_fw_mon_regs - FW monitor registers
+ * @write_ptr: write pointer register
+ * @cycle_cnt: cycle count register
+ * @cur_frag: current fragment in use
+ */
+struct iwl_fw_mon_regs {
+ struct iwl_fw_mon_reg write_ptr;
+ struct iwl_fw_mon_reg cycle_cnt;
+ struct iwl_fw_mon_reg cur_frag;
+};
+
+/**
* struct iwl_cfg
* @trans: the trans-specific configuration part
* @name: Official name of the device
@@ -388,7 +409,6 @@ struct iwl_cfg_trans_params {
* @mac_addr_from_csr: read HW address from CSR registers
* @features: hw features, any combination of feature_whitelist
* @pwr_tx_backoffs: translation table between power limits and backoffs
- * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
* @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
* station can receive in HT
@@ -460,7 +480,6 @@ struct iwl_cfg {
u8 valid_rx_ant;
u8 non_shared_ant;
u8 nvm_hw_section_num;
- u8 max_rx_agg_size;
u8 max_tx_agg_size;
u8 max_ht_ampdu_exponent;
u8 max_vht_ampdu_exponent;
@@ -471,12 +490,10 @@ struct iwl_cfg {
u32 d3_debug_data_base_addr;
u32 d3_debug_data_length;
u32 min_txq_size;
- u32 fw_mon_smem_write_ptr_addr;
- u32 fw_mon_smem_write_ptr_msk;
- u32 fw_mon_smem_cycle_cnt_ptr_addr;
- u32 fw_mon_smem_cycle_cnt_ptr_msk;
u32 gp2_reg_addr;
u32 min_256_ba_txq_size;
+ const struct iwl_fw_mon_regs mon_dram_regs;
+ const struct iwl_fw_mon_regs mon_smem_regs;
};
extern const struct iwl_csr_params iwl_csr_v1;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 695bbaa86273..92d9898ab7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -603,9 +603,7 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
- MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
MSIX_HW_INT_CAUSES_REG_IML = BIT(2),
- MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 3d7f8ff8ef58..f266647dc08c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -95,6 +95,20 @@ struct iwl_dbg_tlv_ver_data {
int max_ver;
};
+/**
+ * struct iwl_dbg_tlv_timer_node - timer node struct
+ * @list: list of &struct iwl_dbg_tlv_timer_node
+ * @timer: timer
+ * @fwrt: &struct iwl_fw_runtime
+ * @tlv: TLV attach to the timer node
+ */
+struct iwl_dbg_tlv_timer_node {
+ struct list_head list;
+ struct timer_list timer;
+ struct iwl_fw_runtime *fwrt;
+ struct iwl_ucode_tlv *tlv;
+};
+
static const struct iwl_dbg_tlv_ver_data
dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
@@ -104,12 +118,27 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
+static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
+{
+ u32 len = le32_to_cpu(tlv->length);
+ struct iwl_dbg_tlv_node *node;
+
+ node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
+ list_add_tail(&node->list, list);
+
+ return 0;
+}
+
static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
- u32 ver = le32_to_cpu(hdr->tlv_version);
+ u32 ver = le32_to_cpu(hdr->version);
if (ver < dbg_ver_table[tlv_idx].min_ver ||
ver > dbg_ver_table[tlv_idx].max_ver)
@@ -118,27 +147,169 @@ static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
return true;
}
+static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
+
+ if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
+ return -EINVAL;
+
+ IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
+ debug_info->debug_cfg_name);
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+}
+
+static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
+ u32 buf_location = le32_to_cpu(alloc->buf_location);
+ u32 alloc_id = le32_to_cpu(alloc->alloc_id);
+
+ if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
+ (buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
+ buf_location != IWL_FW_INI_LOCATION_DRAM_PATH))
+ return -EINVAL;
+
+ if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) ||
+ (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
+ (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM))) {
+ IWL_ERR(trans,
+ "WRT: Invalid allocation id %u for allocation TLV\n",
+ alloc_id);
+ return -EINVAL;
+ }
+
+ trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
+ u32 tp = le32_to_cpu(hcmd->time_point);
+
+ if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
+ return -EINVAL;
+
+ /* Host commands can not be sent in early time point since the FW
+ * is not ready
+ */
+ if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp >= IWL_FW_INI_TIME_POINT_NUM ||
+ tp == IWL_FW_INI_TIME_POINT_EARLY) {
+ IWL_ERR(trans,
+ "WRT: Invalid time point %u for host command TLV\n",
+ tp);
+ return -EINVAL;
+ }
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+}
+
+static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
+ struct iwl_ucode_tlv **active_reg;
+ u32 id = le32_to_cpu(reg->id);
+ u32 type = le32_to_cpu(reg->type);
+ u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
+
+ if (le32_to_cpu(tlv->length) < sizeof(*reg))
+ return -EINVAL;
+
+ if (id >= IWL_FW_INI_MAX_REGION_ID) {
+ IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
+ return -EINVAL;
+ }
+
+ if (type <= IWL_FW_INI_REGION_INVALID ||
+ type >= IWL_FW_INI_REGION_NUM) {
+ IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
+ return -EINVAL;
+ }
+
+ active_reg = &trans->dbg.active_regions[id];
+ if (*active_reg) {
+ IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
+
+ kfree(*active_reg);
+ }
+
+ *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
+ if (!*active_reg)
+ return -ENOMEM;
+
+ IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
+ u32 tp = le32_to_cpu(trig->time_point);
+
+ if (le32_to_cpu(tlv->length) < sizeof(*trig))
+ return -EINVAL;
+
+ if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
+ tp >= IWL_FW_INI_TIME_POINT_NUM) {
+ IWL_ERR(trans,
+ "WRT: Invalid time point %u for trigger TLV\n",
+ tp);
+ return -EINVAL;
+ }
+
+ if (!le32_to_cpu(trig->occurrences))
+ trig->occurrences = cpu_to_le32(-1);
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+}
+
+static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv) = {
+ [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
+ [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
+ [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
+ [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
+ [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
+};
+
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
- u32 pnt = le32_to_cpu(hdr->apply_point);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
+ int ret;
- IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
- type, pnt);
-
- if (tlv_idx >= IWL_DBG_TLV_TYPE_NUM) {
- IWL_ERR(trans, "WRT: Unsupported TLV 0x%x\n", type);
+ if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
+ IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err;
}
if (!iwl_dbg_tlv_ver_support(tlv)) {
IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
- le32_to_cpu(hdr->tlv_version));
+ le32_to_cpu(hdr->version));
+ goto out_err;
+ }
+
+ ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
+ if (ret) {
+ IWL_ERR(trans,
+ "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
+ type, ret, ext);
goto out_err;
}
@@ -153,13 +324,91 @@ out_err:
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
{
- /* will be used later */
+ struct list_head *timer_list = &trans->dbg.periodic_trig_list;
+ struct iwl_dbg_tlv_timer_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, timer_list, list) {
+ del_timer(&node->timer);
+ list_del(&node->list);
+ kfree(node);
+ }
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
+static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ int i;
+
+ if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return;
+
+ fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ struct iwl_dram_data *frag = &fw_mon->frags[i];
+
+ dma_free_coherent(trans->dev, frag->size, frag->block,
+ frag->physical);
+
+ frag->physical = 0;
+ frag->block = NULL;
+ frag->size = 0;
+ }
+
+ kfree(fw_mon->frags);
+ fw_mon->frags = NULL;
+ fw_mon->num_frags = 0;
+}
+
void iwl_dbg_tlv_free(struct iwl_trans *trans)
{
- /* will be used again later */
+ struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
+ int i;
+
+ iwl_dbg_tlv_del_timers(trans);
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
+ struct iwl_ucode_tlv **active_reg =
+ &trans->dbg.active_regions[i];
+
+ kfree(*active_reg);
+ *active_reg = NULL;
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp,
+ &trans->dbg.debug_info_tlv_list, list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &trans->dbg.time_point[i];
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
+ list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
+ list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp,
+ &tp->active_trig_list, list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
+ iwl_dbg_tlv_fragments_free(trans, i);
}
static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
@@ -196,7 +445,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
if (!iwlwifi_mod_params.enable_ini)
return;
- res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
+ res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev);
if (res)
return;
@@ -205,10 +454,628 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
release_firmware(fw);
}
+void iwl_dbg_tlv_init(struct iwl_trans *trans)
+{
+ int i;
+
+ INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
+ INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &trans->dbg.time_point[i];
+
+ INIT_LIST_HEAD(&tp->trig_list);
+ INIT_LIST_HEAD(&tp->hcmd_list);
+ INIT_LIST_HEAD(&tp->active_trig_list);
+ }
+}
+
+static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
+ struct iwl_dram_data *frag, u32 pages)
+{
+ void *block = NULL;
+ dma_addr_t physical;
+
+ if (!frag || frag->size || !pages)
+ return -EIO;
+
+ while (pages) {
+ block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
+ &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (block)
+ break;
+
+ IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
+ pages * PAGE_SIZE);
+
+ pages = DIV_ROUND_UP(pages, 2);
+ }
+
+ if (!block)
+ return -ENOMEM;
+
+ frag->physical = physical;
+ frag->block = block;
+ frag->size = pages * PAGE_SIZE;
+
+ return pages;
+}
+
+static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 num_frags, remain_pages, frag_pages;
+ int i;
+
+ if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return -EIO;
+
+ fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ if (fw_mon->num_frags ||
+ fw_mon_cfg->buf_location !=
+ cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
+ return 0;
+
+ num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
+ if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ return -EIO;
+ num_frags = 1;
+ }
+
+ remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
+ PAGE_SIZE);
+ num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
+ num_frags = min_t(u32, num_frags, remain_pages);
+ frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
+
+ fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
+ if (!fw_mon->frags)
+ return -ENOMEM;
+
+ for (i = 0; i < num_frags; i++) {
+ int pages = min_t(u32, frag_pages, remain_pages);
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
+ alloc_id, i, pages * PAGE_SIZE);
+
+ pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
+ pages);
+ if (pages < 0) {
+ u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
+ (remain_pages * PAGE_SIZE);
+
+ if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
+ iwl_dbg_tlv_fragments_free(fwrt->trans,
+ alloc_id);
+ return pages;
+ }
+ break;
+ }
+
+ remain_pages -= pages;
+ fw_mon->num_frags++;
+ }
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ u32 remain_frags, num_commands;
+ int i, fw_mon_idx = 0;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
+ return 0;
+
+ if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return -EIO;
+
+ if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH)
+ return 0;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ /* the first fragment of DBGC1 is given to the FW via register
+ * or context info
+ */
+ if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ fw_mon_idx++;
+
+ remain_frags = fw_mon->num_frags - fw_mon_idx;
+ if (!remain_frags)
+ return 0;
+
+ num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
+
+ IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ for (i = 0; i < num_commands; i++) {
+ u32 num_frags = min_t(u32, remain_frags,
+ BUF_ALLOC_MAX_NUM_FRAGS);
+ struct iwl_buf_alloc_cmd data = {
+ .alloc_id = cpu_to_le32(alloc_id),
+ .num_frags = cpu_to_le32(num_frags),
+ .buf_location =
+ cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
+ .data[0] = &data,
+ .len[0] = sizeof(data),
+ };
+ int ret, j;
+
+ for (j = 0; j < num_frags; j++) {
+ struct iwl_buf_alloc_frag *frag = &data.frags[j];
+ struct iwl_dram_data *fw_mon_frag =
+ &fw_mon->frags[fw_mon_idx++];
+
+ frag->addr = cpu_to_le64(fw_mon_frag->physical);
+ frag->size = cpu_to_le32(fw_mon_frag->size);
+ }
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ if (ret)
+ return ret;
+
+ remain_frags -= num_frags;
+ }
+
+ return 0;
+}
+
+static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
+{
+ int ret, i;
+
+ for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
+ ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
+ if (ret)
+ IWL_WARN(fwrt,
+ "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
+ i, ret);
+ }
+}
+
+static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
+ struct list_head *hcmd_list)
+{
+ struct iwl_dbg_tlv_node *node;
+
+ list_for_each_entry(node, hcmd_list, list) {
+ struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
+ struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
+ u32 domain = le32_to_cpu(hcmd->hdr.domain);
+ u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
+ .len = { hcmd_len, },
+ .data = { hcmd_data->data, },
+ };
+
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & fwrt->trans->dbg.domains_bitmap))
+ continue;
+
+ iwl_trans_send_cmd(fwrt->trans, &cmd);
+ }
+}
+
+static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
+{
+ struct iwl_dbg_tlv_timer_node *timer_node =
+ from_timer(timer_node, t, timer);
+ struct iwl_fwrt_dump_data dump_data = {
+ .trig = (void *)timer_node->tlv->data,
+ };
+ int ret;
+
+ ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data);
+ if (!ret || ret == -EBUSY) {
+ u32 occur = le32_to_cpu(dump_data.trig->occurrences);
+ u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
+
+ if (!occur)
+ return;
+
+ mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
+
+static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_dbg_tlv_node *node;
+ struct list_head *trig_list =
+ &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
+
+ list_for_each_entry(node, trig_list, list) {
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
+ struct iwl_dbg_tlv_timer_node *timer_node;
+ u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
+ u32 min_interval = 100;
+
+ if (!occur)
+ continue;
+
+ /* make sure there is at least one dword of data for the
+ * interval value
+ */
+ if (le32_to_cpu(node->tlv.length) <
+ sizeof(*trig) + sizeof(__le32)) {
+ IWL_ERR(fwrt,
+ "WRT: Invalid periodic trigger data was not given\n");
+ continue;
+ }
+
+ if (le32_to_cpu(trig->data[0]) < min_interval) {
+ IWL_WARN(fwrt,
+ "WRT: Override min interval from %u to %u msec\n",
+ le32_to_cpu(trig->data[0]), min_interval);
+ trig->data[0] = cpu_to_le32(min_interval);
+ }
+
+ collect_interval = le32_to_cpu(trig->data[0]);
+
+ timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
+ if (!timer_node) {
+ IWL_ERR(fwrt,
+ "WRT: Failed to allocate periodic trigger\n");
+ continue;
+ }
+
+ timer_node->fwrt = fwrt;
+ timer_node->tlv = &node->tlv;
+ timer_setup(&timer_node->timer,
+ iwl_dbg_tlv_periodic_trig_handler, 0);
+
+ list_add_tail(&timer_node->list,
+ &fwrt->trans->dbg.periodic_trig_list);
+
+ IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
+
+ mod_timer(&timer_node->timer,
+ jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
+
+static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
+ struct iwl_ucode_tlv *old)
+{
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
+ struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
+ __le32 *new_data = new_trig->data, *old_data = old_trig->data;
+ u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
+ u32 old_dwords_num = iwl_tlv_array_len(new, new_trig, data);
+ int i, j;
+
+ for (i = 0; i < new_dwords_num; i++) {
+ bool match = false;
+
+ for (j = 0; j < old_dwords_num; j++) {
+ if (new_data[i] == old_data[j]) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ return true;
+}
+
+static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
+ struct iwl_ucode_tlv *trig_tlv,
+ struct iwl_dbg_tlv_node *node)
+{
+ struct iwl_ucode_tlv *node_tlv = &node->tlv;
+ struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
+ u32 policy = le32_to_cpu(trig->apply_policy);
+ u32 size = le32_to_cpu(trig_tlv->length);
+ u32 trig_data_len = size - sizeof(*trig);
+ u32 offset = 0;
+
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
+ u32 data_len = le32_to_cpu(node_tlv->length) -
+ sizeof(*node_trig);
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Appending trigger data (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ offset += data_len;
+ size += data_len;
+ } else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger data (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+ }
+
+ if (size != le32_to_cpu(node_tlv->length)) {
+ struct list_head *prev = node->list.prev;
+ struct iwl_dbg_tlv_node *tmp;
+
+ list_del(&node->list);
+
+ tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
+ if (!tmp) {
+ IWL_WARN(fwrt,
+ "WRT: No memory to override trigger (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ list_add(&node->list, prev);
+
+ return -ENOMEM;
+ }
+
+ list_add(&tmp->list, prev);
+ node_tlv = &tmp->tlv;
+ node_trig = (void *)node_tlv->data;
+ }
+
+ memcpy(node_trig->data + offset, trig->data, trig_data_len);
+ node_tlv->length = cpu_to_le32(size);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger configuration (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ /* the first 11 dwords are configuration related */
+ memcpy(node_trig, trig, sizeof(__le32) * 11);
+ }
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger regions (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ node_trig->regions_mask = trig->regions_mask;
+ } else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Appending trigger regions (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ node_trig->regions_mask |= trig->regions_mask;
+ }
+
+ return 0;
+}
+
+static int
+iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
+ struct list_head *trig_list,
+ struct iwl_ucode_tlv *trig_tlv)
+{
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
+ struct iwl_dbg_tlv_node *node, *match = NULL;
+ u32 policy = le32_to_cpu(trig->apply_policy);
+
+ list_for_each_entry(node, trig_list, list) {
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
+ break;
+
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
+ is_trig_data_contained(trig_tlv, &node->tlv)) {
+ match = node;
+ break;
+ }
+ }
+
+ if (!match) {
+ IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+ return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ }
+
+ return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
+}
+
+static void
+iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
+ struct iwl_dbg_tlv_time_point_data *tp)
+{
+ struct iwl_dbg_tlv_node *node, *tmp;
+ struct list_head *trig_list = &tp->trig_list;
+ struct list_head *active_trig_list = &tp->active_trig_list;
+
+ list_for_each_entry_safe(node, tmp, active_trig_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ list_for_each_entry(node, trig_list, list) {
+ struct iwl_ucode_tlv *tlv = &node->tlv;
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
+ u32 domain = le32_to_cpu(trig->hdr.domain);
+
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & fwrt->trans->dbg.domains_bitmap))
+ continue;
+
+ iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
+ }
+}
+
+int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
+{
+ int i;
+
+ if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
+ return -EBUSY;
+
+ iwl_fw_flush_dumps(fwrt);
+
+ fwrt->trans->dbg.domains_bitmap = new_domain;
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Generating active triggers list, domain 0x%x\n",
+ fwrt->trans->dbg.domains_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &fwrt->trans->dbg.time_point[i];
+
+ iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
+ }
+
+ clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
+
+ return 0;
+}
+
+static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ u32 trig_data)
+{
+ struct iwl_rx_packet *pkt = tp_data->fw_pkt;
+ struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
+
+ if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
+ (pkt->hdr.cmd == wanted_hdr->cmd &&
+ pkt->hdr.group_id == wanted_hdr->group_id))) {
+ struct iwl_rx_packet *fw_pkt =
+ kmemdup(pkt,
+ sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
+ GFP_ATOMIC);
+
+ if (!fw_pkt)
+ return false;
+
+ dump_data->fw_pkt = fw_pkt;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int
+iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
+ struct list_head *active_trig_list,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ bool (*data_check)(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ u32 trig_data))
+{
+ struct iwl_dbg_tlv_node *node;
+
+ list_for_each_entry(node, active_trig_list, list) {
+ struct iwl_fwrt_dump_data dump_data = {
+ .trig = (void *)node->tlv.data,
+ };
+ u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
+ data);
+ int ret, i;
+
+ if (!num_data) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_data; i++) {
+ if (!data_check ||
+ data_check(fwrt, &dump_data, tp_data,
+ le32_to_cpu(dump_data.trig->data[i]))) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
+ if (ret)
+ return ret;
+
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
+{
+ enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
+ int ret, i;
+
+ iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN);
+
+ *ini_dest = IWL_FW_INI_LOCATION_INVALID;
+ for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &fwrt->trans->dbg.fw_mon_cfg[i];
+ u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
+
+ if (dest == IWL_FW_INI_LOCATION_INVALID)
+ continue;
+
+ if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
+ *ini_dest = dest;
+
+ if (dest != *ini_dest)
+ continue;
+
+ ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
+ if (ret)
+ IWL_WARN(fwrt,
+ "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
+ i, ret);
+ }
+}
+
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
- /* will be used later */
+ struct list_head *hcmd_list, *trig_list;
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
+ tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp_id >= IWL_FW_INI_TIME_POINT_NUM)
+ return;
+
+ hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
+ trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
+
+ switch (tp_id) {
+ case IWL_FW_INI_TIME_POINT_EARLY:
+ iwl_dbg_tlv_init_cfg(fwrt);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
+ iwl_dbg_tlv_apply_buffers(fwrt);
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ case IWL_FW_INI_TIME_POINT_PERIODIC:
+ iwl_dbg_tlv_set_periodic_trigs(fwrt);
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ break;
+ case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
+ case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
+ iwl_dbg_tlv_check_fw_pkt);
+ break;
+ default:
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ }
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index e257ad358c94..f18946872569 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -65,11 +65,11 @@
#include <linux/types.h>
/**
- * struct iwl_apply_point_data
- * @list: list to go through the TLVs of the apply point
- * @tlv: a debug TLV
+ * struct iwl_dbg_tlv_node - debug TLV node
+ * @list: list of &struct iwl_dbg_tlv_node
+ * @tlv: debug TLV
*/
-struct iwl_apply_point_data {
+struct iwl_dbg_tlv_node {
struct list_head list;
struct iwl_ucode_tlv tlv;
};
@@ -82,6 +82,18 @@ union iwl_dbg_tlv_tp_data {
struct iwl_rx_packet *fw_pkt;
};
+/**
+ * struct iwl_dbg_tlv_time_point_data
+ * @trig_list: list of triggers
+ * @active_trig_list: list of active triggers
+ * @hcmd_list: list of host commands
+ */
+struct iwl_dbg_tlv_time_point_data {
+ struct list_head trig_list;
+ struct list_head active_trig_list;
+ struct list_head hcmd_list;
+};
+
struct iwl_trans;
struct iwl_fw_runtime;
@@ -89,9 +101,11 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans);
void iwl_dbg_tlv_free(struct iwl_trans *trans);
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext);
+void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
+int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 9e8643618578..1bc6ecc32140 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -21,16 +21,18 @@
TRACE_EVENT(iwlwifi_dev_tx_tb,
TP_PROTO(const struct device *dev, struct sk_buff *skb,
- u8 *data_src, size_t data_len),
- TP_ARGS(dev, skb, data_src, data_len),
+ u8 *data_src, dma_addr_t phys, size_t data_len),
+ TP_ARGS(dev, skb, data_src, phys, data_len),
TP_STRUCT__entry(
DEV_ENTRY
+ __field(u64, phys)
__dynamic_array(u8, data,
iwl_trace_data(skb) ? data_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
+ __entry->phys = phys;
if (iwl_trace_data(skb))
memcpy(__get_dynamic_array(data), data_src, data_len);
),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ff0519ea00a5..4096ccf58b07 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1560,6 +1560,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
+ iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
+
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@@ -1636,8 +1638,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
- iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the device debugfs entries. */
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
@@ -1804,7 +1804,7 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
- "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+ "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, "
"4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 0c12df558240..8836f85afe85 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -148,7 +148,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*
* Bits 3:0:
* Define the maximum number of pending read requests.
- * Maximum configration value allowed is 0xC
+ * Maximum configuration value allowed is 0xC
* Bits 9:8:
* Define the maximum transfer size. (64 / 128 / 256)
* Bit 10:
@@ -768,7 +768,7 @@ struct iwlagn_scd_bc_tbl {
/**
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
- * For 22560 and on:
+ * For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index c8972f6e38ba..1e240a2a8329 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -156,11 +156,10 @@ static const u16 iwl_uhb_nvm_channels[] = {
96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161, 165, 169, 173, 177, 181,
/* 6-7 GHz */
- 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241,
- 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297,
- 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353,
- 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409,
- 413, 417, 421
+ 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
+ 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
+ 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
+ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
};
#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
@@ -256,12 +255,12 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
#undef CHECK_AND_PRINT_I
}
-static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
u32 nvm_flags, const struct iwl_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
- if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ if (band == NL80211_BAND_2GHZ && (nvm_flags & NVM_CHANNEL_40MHZ)) {
if (ch_num <= LAST_2GHZ_HT_PLUS)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
if (ch_num >= FIRST_2GHZ_HT_MINUS)
@@ -299,6 +298,13 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
return flags;
}
+static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx)
+{
+ if (ch_idx >= NUM_2GHZ_CHANNELS)
+ return NL80211_BAND_5GHZ;
+ return NL80211_BAND_2GHZ;
+}
+
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const void * const nvm_ch_flags,
@@ -308,7 +314,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int n_channels = 0;
struct ieee80211_channel *channel;
u32 ch_flags;
- int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS;
+ int num_of_ch;
const u16 *nvm_chan;
if (cfg->uhb_supported) {
@@ -323,7 +329,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
- bool is_5ghz = (ch_idx >= num_2ghz_channels);
+ enum nl80211_band band =
+ iwl_nl80211_band_from_channel_idx(ch_idx);
if (v4)
ch_flags =
@@ -332,12 +339,13 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
ch_flags =
__le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
- if (is_5ghz && !data->sku_cap_band_52ghz_enable)
+ if (band == NL80211_BAND_5GHZ &&
+ !data->sku_cap_band_52ghz_enable)
continue;
/* workaround to disable wide channels in 5GHz */
if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
- is_5ghz) {
+ band == NL80211_BAND_5GHZ) {
ch_flags &= ~(NVM_CHANNEL_40MHZ |
NVM_CHANNEL_80MHZ |
NVM_CHANNEL_160MHZ);
@@ -362,8 +370,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = is_5ghz ?
- NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+ channel->band = band;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -379,7 +386,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* don't put limitations in case we're using LAR */
if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
- ch_idx, is_5ghz,
+ ch_idx, band,
ch_flags, cfg);
else
channel->flags = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 23c25a7665f2..14c8ba23f3b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -374,6 +374,7 @@
#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c)
#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c)
#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff)
+#define DBGC_CUR_DBGBUF_STATUS_IDX_MSK (0x0f000000)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
@@ -381,6 +382,12 @@
#define DBGC_IN_SAMPLE (0xa03c00)
#define DBGC_OUT_CTRL (0xa03c0c)
+/* M2S registers */
+#define LDBG_M2S_BUF_WPTR (0xa0476c)
+#define LDBG_M2S_BUF_WRAP_CNT (0xa04774)
+#define LDBG_M2S_BUF_WPTR_VAL_MSK (0x000fffff)
+#define LDBG_M2S_BUF_WRAP_CNT_VAL_MSK (0x000fffff)
+
/* enable the ID buf for read */
#define WFPM_PS_CTL_CLR 0xA0300C
#define WFMP_MAC_ADDR_0 0xA03080
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index a31408188ed0..8cadad7364ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -679,6 +679,16 @@ struct iwl_dram_data {
};
/**
+ * struct iwl_fw_mon - fw monitor per allocation id
+ * @num_frags: number of fragments
+ * @frags: an array of DRAM buffer fragments
+ */
+struct iwl_fw_mon {
+ u32 num_frags;
+ struct iwl_dram_data *frags;
+};
+
+/**
* struct iwl_self_init_dram - dram data used by self init process
* @fw: lmac and umac dram data
* @fw_cnt: total number of items in array
@@ -706,10 +716,17 @@ struct iwl_self_init_dram {
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
* @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
- * @num_blocks: number of blocks in fw_mon
- * @fw_mon: address of the buffers for firmware monitor
+ * @fw_mon_cfg: debug buffer allocation configuration
+ * @fw_mon_ini: DRAM buffer fragments per allocation id
+ * @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @active_regions: active regions
+ * @debug_info_tlv_list: list of debug info TLVs
+ * @time_point: array of debug time points
+ * @periodic_trig_list: periodic triggers list
+ * @domains_bitmap: bitmap of active domains other than
+ * &IWL_FW_INI_DOMAIN_ALWAYS_ON
*/
struct iwl_trans_debug {
u8 n_dest_reg;
@@ -726,11 +743,21 @@ struct iwl_trans_debug {
enum iwl_ini_cfg_state internal_ini_cfg;
enum iwl_ini_cfg_state external_ini_cfg;
- int num_blocks;
- struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM];
+ struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
+ struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
+
+ struct iwl_dram_data fw_mon;
bool hw_error;
enum iwl_fw_ini_buffer_location ini_dest;
+
+ struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
+ struct list_head debug_info_tlv_list;
+ struct iwl_dbg_tlv_time_point_data
+ time_point[IWL_FW_INI_TIME_POINT_NUM];
+ struct list_head periodic_trig_list;
+
+ u32 domains_bitmap;
};
/**
@@ -1222,6 +1249,11 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
iwl_op_mode_nic_error(trans->op_mode);
}
+static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
+{
+ return trans->state == IWL_TRANS_FW_ALIVE;
+}
+
static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
{
if (trans->ops->sync_nmi)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 86c2c587e755..43ebb2149b63 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1939,6 +1939,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (iwl_mvm_check_rt_status(mvm, vif)) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
ret = 1;
@@ -1955,12 +1957,39 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
}
if (d0i3_first) {
- ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ struct iwl_host_cmd cmd = {
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int len;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0) {
IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
ret);
goto err;
}
+ switch (mvm->cmd_ver.d0i3_resp) {
+ case 0:
+ break;
+ case 1:
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len != sizeof(u32)) {
+ IWL_ERR(mvm,
+ "Error with D0I3_END_CMD response size (%d)\n",
+ len);
+ goto err;
+ }
+ if (IWL_D0I3_RESET_REQUIRE &
+ le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
+ iwl_write32(mvm->trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_FORCE_NMI);
+ iwl_free_resp(&cmd);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index ad18c2f1a806..aa659162a7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -148,7 +148,8 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
"FLUSHING all tids queues on sta_id = %d\n",
flush_arg);
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
+ ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF, 0)
+ ? : count;
mutex_unlock(&mvm->mutex);
return ret;
}
@@ -377,7 +378,7 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
pos = scnprintf(buf, bufsz,
"SAR geographic profile disabled\n");
} else {
- value = &mvm->geo_profiles[tbl_idx - 1].values[0];
+ value = &mvm->fwrt.geo_profiles[tbl_idx - 1].values[0];
pos += scnprintf(buf + pos, bufsz - pos,
"Use geographic profile %d\n", tbl_idx);
@@ -1174,7 +1175,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
int bin_len = count / 2;
int ret = -EINVAL;
size_t mpdu_cmd_hdr_size = (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
@@ -1375,6 +1376,9 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (count == 0)
return 0;
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER,
+ NULL);
+
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index d9eb2b286438..dd685f7eb410 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -514,6 +514,19 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
struct iwl_phy_cfg_cmd phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+ if (iwl_mvm_has_unified_ucode(mvm) &&
+ !mvm->trans->cfg->tx_with_siso_diversity)
+ return 0;
+
+ if (mvm->trans->cfg->tx_with_siso_diversity) {
+ /*
+ * TODO: currently we don't set the antenna but letting the NIC
+ * to decide which antenna to use. This should come from BIOS.
+ */
+ phy_cfg_cmd.phy_cfg =
+ cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
+ }
+
/* Set parameters */
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
@@ -665,181 +678,14 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
}
#ifdef CONFIG_ACPI
-static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
- union acpi_object *table,
- struct iwl_mvm_sar_profile *profile,
- bool enabled)
-{
- int i;
-
- profile->enabled = enabled;
-
- for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
- if ((table[i].type != ACPI_TYPE_INTEGER) ||
- (table[i].integer.value > U8_MAX))
- return -EINVAL;
-
- profile->table[i] = table[i].integer.value;
- }
-
- return 0;
-}
-
-static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *table, *data;
- bool enabled;
- int ret, tbl_rev;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
- tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
-
- /* position of the actual table */
- table = &wifi_pkg->package.elements[2];
-
- /* The profile from WRDS is officially profile 1, but goes
- * into sar_profiles[0] (because we don't have a profile 0).
- */
- ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
- enabled);
-out_free:
- kfree(data);
- return ret;
-}
-
-static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data;
- bool enabled;
- int i, n_profiles, ret, tbl_rev;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
- (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
- tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
- n_profiles = wifi_pkg->package.elements[2].integer.value;
-
- /*
- * Check the validity of n_profiles. The EWRD profiles start
- * from index 1, so the maximum value allowed here is
- * ACPI_SAR_PROFILES_NUM - 1.
- */
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
- ret = -EINVAL;
- goto out_free;
- }
-
- for (i = 0; i < n_profiles; i++) {
- /* the tables start at element 3 */
- int pos = 3;
-
- /* The EWRD profiles officially go from 2 to 4, but we
- * save them in sar_profiles[1-3] (because we don't
- * have profile 0). So in the array we start from 1.
- */
- ret = iwl_mvm_sar_set_profile(mvm,
- &wifi_pkg->package.elements[pos],
- &mvm->sar_profiles[i + 1],
- enabled);
- if (ret < 0)
- break;
-
- /* go to the next table */
- pos += ACPI_SAR_TABLE_SIZE;
- }
-
-out_free:
- kfree(data);
- return ret;
-}
-
-static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data;
- int i, j, ret, tbl_rev;
- int idx = 1;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->geo_rev = tbl_rev;
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
- union acpi_object *entry;
-
- entry = &wifi_pkg->package.elements[idx++];
- if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX)) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->geo_profiles[i].values[j] = entry->integer.value;
- }
- }
- ret = 0;
-out_free:
- kfree(data);
- return ret;
-}
-
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
union {
struct iwl_dev_tx_power_cmd v5;
struct iwl_dev_tx_power_cmd_v4 v4;
} cmd;
- int i, j, idx;
- int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
- int len;
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
- ACPI_SAR_TABLE_SIZE);
+ u16 len = 0;
cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
@@ -848,174 +694,76 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
len = sizeof(cmd.v5);
else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
- len = sizeof(cmd.v4);
+ len = sizeof(struct iwl_dev_tx_power_cmd_v4);
else
len = sizeof(cmd.v4.v3);
- for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
- struct iwl_mvm_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &mvm->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
- profs[i]);
- /* if one of the profiles is disabled, we fail all */
- return -ENOENT;
- }
-
- IWL_DEBUG_INFO(mvm,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
- idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
- cmd.v5.v3.per_chain_restriction[i][j] =
- cpu_to_le16(prof->table[idx]);
- IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
- j, prof->table[idx]);
- }
- }
+ if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
+ prof_a, prof_b))
+ return -ENOENT;
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
-
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
-static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
-{
- /*
- * The GEO_TX_POWER_LIMIT command is not supported on earlier
- * firmware versions. Unfortunately, we don't have a TLV API
- * flag to rely on, so rely on the major version which is in
- * the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
- ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
- struct iwl_geo_tx_power_profiles_resp *resp;
- int ret;
+ union geo_tx_power_profiles_cmd geo_tx_cmd;
u16 len;
- void *data;
- struct iwl_geo_tx_power_profiles_cmd geo_cmd;
- struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
+ int ret;
struct iwl_host_cmd cmd;
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- geo_cmd.ops =
+ if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ geo_tx_cmd.geo_cmd.ops =
cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_cmd);
- data = &geo_cmd;
+ len = sizeof(geo_tx_cmd.geo_cmd);
} else {
- geo_cmd_v1.ops =
+ geo_tx_cmd.geo_cmd_v1.ops =
cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_cmd_v1);
- data = &geo_cmd_v1;
+ len = sizeof(geo_tx_cmd.geo_cmd_v1);
}
+ if (!iwl_sar_geo_support(&mvm->fwrt))
+ return -EOPNOTSUPP;
+
cmd = (struct iwl_host_cmd){
.id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
.len = { len, },
.flags = CMD_WANT_SKB,
- .data = { data },
+ .data = { &geo_tx_cmd },
};
- if (!iwl_mvm_sar_geo_support(mvm))
- return -EOPNOTSUPP;
-
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
return ret;
}
-
- resp = (void *)cmd.resp_pkt->data;
- ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
- ret = -EIO;
- IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
- }
-
+ ret = iwl_validate_sar_geo_profile(&mvm->fwrt, &cmd);
iwl_free_resp(&cmd);
return ret;
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
- struct iwl_geo_tx_power_profiles_cmd cmd = {
- .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
- };
- int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+ union geo_tx_power_profiles_cmd cmd;
+ u16 len;
- if (!iwl_mvm_sar_geo_support(mvm))
- return 0;
-
- ret = iwl_mvm_sar_get_wgds_table(mvm);
- if (ret < 0) {
- IWL_DEBUG_RADIO(mvm,
- "Geo SAR BIOS table invalid or unavailable. (%d)\n",
- ret);
- /* we don't fail if the table is not available */
- return 0;
- }
-
- IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
- ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
-
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- struct iwl_per_chain_offset *chain =
- (struct iwl_per_chain_offset *)&cmd.table[i];
-
- for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
- u8 *value;
+ cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
- value = &mvm->geo_profiles[i].values[j *
- ACPI_GEO_PER_CHAIN_SIZE];
- chain[j].max_tx_power = cpu_to_le16(value[0]);
- chain[j].chain_a = value[1];
- chain[j].chain_b = value[2];
- IWL_DEBUG_RADIO(mvm,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j, value[1], value[2], value[0]);
- }
- }
+ iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
- cmd.table_revision = cpu_to_le32(mvm->geo_rev);
+ cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
- sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
- &cmd);
+ if (!fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ len = sizeof(struct iwl_geo_tx_power_profiles_cmd_v1);
+ } else {
+ len = sizeof(cmd.geo_cmd);
}
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, len, &cmd);
}
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
@@ -1024,7 +772,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
int i, j, ret, tbl_rev;
int idx = 2;
- mvm->ppag_table.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -1049,8 +797,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
- mvm->ppag_table.enabled = cpu_to_le32(enabled->integer.value);
- if (!mvm->ppag_table.enabled) {
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(enabled->integer.value);
+ if (!mvm->fwrt.ppag_table.enabled) {
ret = 0;
goto out_free;
}
@@ -1070,11 +818,11 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
(j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
(j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
(j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
- mvm->ppag_table.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
ret = -EINVAL;
goto out_free;
}
- mvm->ppag_table.gain[i][j] = ent->integer.value;
+ mvm->fwrt.ppag_table.gain[i][j] = ent->integer.value;
}
}
ret = 0;
@@ -1095,20 +843,20 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
- mvm->ppag_table.enabled ? "enabled" : "disabled");
+ mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
IWL_DEBUG_RADIO(mvm,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, mvm->ppag_table.gain[i][j]);
+ i, j, mvm->fwrt.ppag_table.gain[i][j]);
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- 0, sizeof(mvm->ppag_table),
- &mvm->ppag_table);
+ 0, sizeof(mvm->fwrt.ppag_table),
+ &mvm->fwrt.ppag_table);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@@ -1131,17 +879,14 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
}
#else /* CONFIG_ACPI */
-static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
+ int prof_a, int prof_b)
{
return -ENOENT;
}
-static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
return -ENOENT;
}
@@ -1151,17 +896,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
return 0;
}
-int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
- int prof_b)
-{
- return -ENOENT;
-}
-
-int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
return -ENOENT;
@@ -1169,7 +903,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
- return -ENOENT;
+ return 0;
}
#endif /* CONFIG_ACPI */
@@ -1228,7 +962,7 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
int ret;
- ret = iwl_mvm_sar_get_wrds_table(mvm);
+ ret = iwl_sar_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1240,16 +974,14 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
return 1;
}
- ret = iwl_mvm_sar_get_ewrd_table(mvm);
+ ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use WRDS, so don't fail */
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
ret);
- /* choose profile 1 (WRDS) as default for both chains */
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
-
/*
* If we don't have profile 0 from BIOS, just skip it. This
* means that SAR Geo will not be enabled either, even if we
@@ -1344,12 +1076,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
goto error;
-
- ret = iwl_send_phy_cfg_cmd(mvm);
- if (ret)
- goto error;
}
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
ret = iwl_mvm_send_bt_init_conf(mvm);
if (ret)
goto error;
@@ -1480,7 +1212,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0) {
ret = iwl_mvm_sar_geo_init(mvm);
- } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
+ } else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index d104da9170ca..72c4b2b8399d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -129,6 +129,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(mvm->hw->wiphy));
+ if (!mvm->led.name)
+ return -ENOMEM;
+
mvm->led.brightness_set = iwl_led_brightness_set;
mvm->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 9c417dd06291..b78992e341d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -855,11 +855,10 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
struct ieee80211_vif *vif)
{
u8 rate;
-
- if (info->band == NL80211_BAND_5GHZ || vif->p2p)
- rate = IWL_FIRST_OFDM_RATE;
- else
+ if (info->band == NL80211_BAND_2GHZ && !vif->p2p)
rate = IWL_FIRST_CCK_RATE;
+ else
+ rate = IWL_FIRST_OFDM_RATE;
return rate;
}
@@ -1404,6 +1403,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
struct ieee80211_vif *vif;
u32 id = le32_to_cpu(mb->mac_id);
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
IWL_DEBUG_INFO(mvm,
"missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
@@ -1432,7 +1432,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
ieee80211_beacon_loss(vif);
iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_MISSED_BEACONS, NULL);
+ IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
@@ -1609,3 +1609,26 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
out_unlock:
rcu_read_unlock();
}
+
+void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_vap_notif *mb = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(mb->mac_id);
+
+ IWL_DEBUG_INFO(mvm,
+ "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n",
+ le32_to_cpu(mb->mac_id),
+ mb->num_beacon_intervals_elapsed,
+ mb->profile_periodicity);
+
+ rcu_read_lock();
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (vif)
+ iwl_mvm_connection_loss(mvm, vif, "missed vap beacon");
+
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d31f96c3f925..32dc9d6f0fb6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -339,14 +339,14 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
return ret;
}
-const static u8 he_if_types_ext_capa_sta[] = {
+static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
-const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
+static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
{
.iftype = NL80211_IFTYPE_STATION,
.extended_capabilities = he_if_types_ext_capa_sta,
@@ -355,6 +355,15 @@ const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
},
};
+static int
+iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ *tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+ *rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
+ return 0;
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -734,6 +743,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
+ hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
+
ret = ieee80211_register_hw(mvm->hw);
if (ret) {
iwl_mvm_leds_exit(mvm);
@@ -2280,7 +2292,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status)) {
+ &mvm->status) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
/*
* If we're restarting then the firmware will
* obviously have lost synchronisation with
@@ -2294,6 +2308,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*
* Set a large maximum delay to allow for more
* than a single interface.
+ *
+ * For new firmware versions, rely on the
+ * firmware. This is relevant for DCM scenarios
+ * only anyway.
*/
u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
iwl_mvm_protect_session(mvm, vif, dur, dur,
@@ -2384,8 +2402,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
+ * A firmware with the new API will remove it automatically.
*/
- iwl_mvm_stop_session_protection(mvm, vif);
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@ -3255,8 +3276,22 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
duration = req_duration;
mutex_lock(&mvm->mutex);
- /* Try really hard to protect the session and hear a beacon */
- iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
+ /* Try really hard to protect the session and hear a beacon
+ * The new session protection command allows us to protect the
+ * session for a much longer time since the firmware will internally
+ * create two events: a 300TU one with a very high priority that
+ * won't be fragmented which should be enough for 99% of the cases,
+ * and another one (which we configure here to be 900TU long) which
+ * will have a slightly lower priority, but more importantly, can be
+ * fragmented so that it'll allow other activities to run.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, 900,
+ min_duration);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ min_duration, 500, false);
mutex_unlock(&mvm->mutex);
}
@@ -3613,8 +3648,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
- (channel->band == NL80211_BAND_2GHZ) ?
- PHY_BAND_24 : PHY_BAND_5,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
PHY_VHT_CHANNEL_MODE20,
0);
@@ -3848,7 +3882,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter\n");
mutex_lock(&mvm->mutex);
- iwl_mvm_stop_roc(mvm);
+ iwl_mvm_stop_roc(mvm, vif);
mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
@@ -4622,7 +4656,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
continue;
if (drop)
- iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF, 0);
else
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
@@ -5006,6 +5040,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action,
+ .get_antenna = iwl_mvm_op_get_antenna,
.start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
.stop = iwl_mvm_mac_stop,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 5ca50f39a023..3ec8de00f3aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -188,6 +188,11 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
+union geo_tx_power_profiles_cmd {
+ struct iwl_geo_tx_power_profiles_cmd geo_cmd;
+ struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
+};
+
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -774,14 +779,6 @@ enum iwl_mvm_queue_status {
#define IWL_MVM_NUM_CIPHERS 10
-struct iwl_mvm_sar_profile {
- bool enabled;
- u8 table[ACPI_SAR_TABLE_SIZE];
-};
-
-struct iwl_mvm_geo_profile {
- u8 values[ACPI_GEO_TABLE_SIZE];
-};
struct iwl_mvm_txq {
struct list_head list;
@@ -1122,6 +1119,10 @@ struct iwl_mvm {
int responses[IWL_MVM_TOF_MAX_APS];
} ftm_initiator;
+ struct {
+ u8 d0i3_resp;
+ } cmd_ver;
+
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
@@ -1140,14 +1141,6 @@ struct iwl_mvm {
/* sniffer data to include in radiotap */
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
-
-#ifdef CONFIG_ACPI
- struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
- struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
- u32 geo_rev;
- struct iwl_ppag_table_cmd ppag_table;
- u32 ppag_rev;
-#endif
};
/* Extract MVM priv from op_mode and _hw */
@@ -1405,12 +1398,19 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
}
+
static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
}
+static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA);
+}
+
static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
@@ -1682,6 +1682,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
@@ -2077,6 +2079,19 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
+static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return PHY_BAND_24;
+ case NL80211_BAND_5GHZ:
+ return PHY_BAND_5;
+ default:
+ WARN_ONCE(1, "Unsupported band (%u)\n", band);
+ return PHY_BAND_5;
+ }
+}
+
/* Channel info utils */
static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
{
@@ -2125,11 +2140,12 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
struct cfg80211_chan_def *chandef)
{
+ enum nl80211_band band = chandef->chan->band;
+
iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
- (chandef->chan->band == NL80211_BAND_2GHZ ?
- PHY_BAND_24 : PHY_BAND_5),
- iwl_mvm_get_channel_width(chandef),
- iwl_mvm_get_ctrl_pos(chandef));
+ iwl_mvm_phy_band_from_nl80211(band),
+ iwl_mvm_get_channel_width(chandef),
+ iwl_mvm_get_ctrl_pos(chandef));
}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 3acbd5b7ab4b..1b07a8e8f069 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -263,6 +263,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
+ iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
RX_HANDLER_ASYNC_LOCKED),
@@ -432,6 +434,8 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
+ HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(SESSION_PROTECTION_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
};
@@ -608,6 +612,27 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
+static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!mvm->fw->ucode_capa.cmd_versions ||
+ !mvm->fw->ucode_capa.n_cmd_versions)
+ return def;
+
+ entry = mvm->fw->ucode_capa.cmd_versions;
+ for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
+ return entry->notif_ver;
+ }
+ }
+
+ return def;
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -639,10 +664,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
- if (cfg->max_rx_agg_size)
- hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
- else
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
@@ -667,7 +689,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
op_mode->ops = &iwl_mvm_ops_mq;
trans->rx_mpdu_cmd_hdr_size =
(trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
} else {
@@ -722,6 +744,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
+ mvm->cmd_ver.d0i3_resp =
+ iwl_mvm_lookup_notif_ver(mvm, LEGACY_GROUP, D0I3_END_CMD, 0);
+ /* we only support version 1 */
+ if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
+ goto out_free;
+
/*
* Populate the state variables that the transport layer needs
* to know about.
@@ -730,7 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
rb_size_default = IWL_AMSDU_2K;
else
rb_size_default = IWL_AMSDU_4K;
@@ -756,7 +784,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
- mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560;
+ mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 22136e4832ea..25d7faea1c62 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -370,8 +370,6 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
if (dtimper >= 10)
return;
- /* TODO: check that multicast wake lock is off */
-
if (host_awake) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 8f50e2b121bd..e2cf9e015ef8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -341,16 +341,24 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
lq_sta = &mvmsta->lq_sta.rs_fw;
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ char pretty_rate[100];
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
- IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
- lq_sta->last_rate_n_flags);
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
+ lq_sta->last_rate_n_flags);
+ IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) {
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
- if (WARN_ON(sta->max_amsdu_len < size))
+ /*
+ * In debug sta->max_amsdu_len < size
+ * so also check with orig_amsdu_len which holds the original
+ * data before debugfs changed the value
+ */
+ if (WARN_ON(sta->max_amsdu_len < size &&
+ mvmsta->orig_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
@@ -378,7 +386,7 @@ out:
rcu_read_unlock();
}
-static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 42d525e46e80..1a990ed9c3ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1533,6 +1533,8 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int i;
+ sta->max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
+
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
* or 0, since there is no per-TID alg.
@@ -3683,7 +3685,6 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
IWL_DEBUG_RATE(mvm, "leave\n");
}
-#ifdef CONFIG_MAC80211_DEBUGFS
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
@@ -3739,14 +3740,15 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
- "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
- type, rs_pretty_ant(ant), bw, mcs, nss,
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
+#ifdef CONFIG_MAC80211_DEBUGFS
/**
* Program the device to use fixed rate for frame transmit
* This is for debugging/testing only
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 428642e66658..32104c9f8f5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -445,10 +445,6 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
#endif
-#ifdef CONFIG_MAC80211_DEBUGFS
-void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
-#endif
-
void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update);
@@ -456,4 +452,6 @@ int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 0ad8ed23a455..5ee33c8ae9d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -60,6 +60,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <asm/unaligned.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "iwl-trans.h"
@@ -357,7 +358,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
len = le16_to_cpu(rx_res->byte_count);
- rx_pkt_status = le32_to_cpup((__le32 *)
+ rx_pkt_status = get_unaligned_le32((__le32 *)
(pkt->data + sizeof(*rx_res) + len));
/* Dont use dev_alloc_skb(), we'll have enough headroom once
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 77b03b757193..ef99c49247b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -23,7 +23,7 @@
* in the file called COPYING.
*
* Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
@@ -1542,6 +1542,19 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb,
}
}
+static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -1565,7 +1578,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
channel = desc->v3.channel;
gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
@@ -1667,7 +1680,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u64 tsf_on_air_rise;
if (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560)
+ IWL_DEVICE_FAMILY_AX210)
tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
else
tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
@@ -1678,8 +1691,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
rx_status->device_timestamp = gp2_on_air_rise;
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
- NL80211_BAND_2GHZ;
+ if (iwl_mvm_is_band_in_rx_supported(mvm)) {
+ u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
+
+ rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
+ } else {
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ }
rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index fcafa22ec6ce..a046ac9fa852 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -79,9 +79,6 @@
#define IWL_SCAN_NUM_OF_FRAGS 3
#define IWL_SCAN_LAST_2_4_CHN 14
-#define IWL_SCAN_BAND_5_2 0
-#define IWL_SCAN_BAND_2_4 1
-
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
/* adaptive dwell max budget time [TU] for directed scan */
@@ -92,6 +89,10 @@
#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
/* adaptive dwell default APs number in social channels (1, 6, 11) */
#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+/* number of scan channels */
+#define IWL_SCAN_NUM_CHANNELS 112
+/* adaptive dwell default number of APs override */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE 10
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
@@ -196,14 +197,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
-{
- if (band == NL80211_BAND_2GHZ)
- return cpu_to_le32(PHY_BAND_24);
- else
- return cpu_to_le32(PHY_BAND_5);
-}
-
static inline __le32
iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
bool no_cck)
@@ -550,6 +543,7 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
{
int i, j;
int index;
+ u32 tmp_bitmap = 0;
/*
* copy SSIDs from match list.
@@ -569,7 +563,6 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
}
/* add SSIDs from scan SSID list */
- *ssid_bitmap = 0;
for (j = params->n_ssids - 1;
j >= 0 && i < PROBE_OPTION_MAX;
i++, j--) {
@@ -581,11 +574,13 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
ssids[i].len = params->ssids[j].ssid_len;
memcpy(ssids[i].ssid, params->ssids[j].ssid,
ssids[i].len);
- *ssid_bitmap |= BIT(i);
+ tmp_bitmap |= BIT(i);
} else {
- *ssid_bitmap |= BIT(index);
+ tmp_bitmap |= BIT(index);
}
}
+ if (ssid_bitmap)
+ *ssid_bitmap = tmp_bitmap;
}
static int
@@ -981,10 +976,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->fw->ucode_capa.n_scan_channels);
u32 ssid_bitmap = 0;
int i;
-
- lockdep_assert_held(&mvm->mutex);
-
- memset(cmd, 0, ksize(cmd));
+ u8 band;
if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
return -EINVAL;
@@ -1000,7 +992,8 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
vif));
- cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
+ band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
+ cmd->flags = cpu_to_le32(band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
@@ -1414,21 +1407,176 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
+static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
+{
+ return iwl_mvm_is_regular_scan(params) ?
+ IWL_SCAN_PRIORITY_EXT_6 :
+ IWL_SCAN_PRIORITY_EXT_2;
+}
+
+static void
+iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
+ struct iwl_scan_general_params_v10 *general_params,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_scan_timing_params *timing, *hb_timing;
+ u8 active_dwell, passive_dwell;
+
+ timing = &scan_timing[params->type];
+ active_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+
+ general_params->adwell_default_social_chn =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ /* if custom max budget was configured with debugfs */
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+ else if (params->ssids && params->ssids[0].ssid_len)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ general_params->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+
+ hb_timing = &scan_timing[params->hb_type];
+
+ general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ general_params->suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+
+ general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
+ general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
+}
+
+struct iwl_mvm_scan_channel_segment {
+ u8 start_idx;
+ u8 end_idx;
+ u8 first_channel_id;
+ u8 last_channel_id;
+ u8 channel_spacing_shift;
+ u8 band;
+};
+
+static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
+ {
+ .start_idx = 0,
+ .end_idx = 13,
+ .first_channel_id = 1,
+ .last_channel_id = 14,
+ .channel_spacing_shift = 0,
+ .band = PHY_BAND_24
+ },
+ {
+ .start_idx = 14,
+ .end_idx = 41,
+ .first_channel_id = 36,
+ .last_channel_id = 144,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+ {
+ .start_idx = 42,
+ .end_idx = 50,
+ .first_channel_id = 149,
+ .last_channel_id = 181,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+};
+
+static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
+{
+ int i, index;
+
+ if (!channel_id)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
+ const struct iwl_mvm_scan_channel_segment *ch_segment =
+ &scan_channel_segments[i];
+ u32 ch_offset;
+
+ if (ch_segment->band != band ||
+ ch_segment->first_channel_id > channel_id ||
+ ch_segment->last_channel_id < channel_id)
+ continue;
+
+ ch_offset = (channel_id - ch_segment->first_channel_id) >>
+ ch_segment->channel_spacing_shift;
+
+ index = scan_channel_segments[i].start_idx + ch_offset;
+ if (index < IWL_SCAN_NUM_CHANNELS)
+ return index;
+
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
+ u8 ch_id, u8 band, u8 *ch_bitmap,
+ size_t bitmap_n_entries)
+{
+ int i;
+ static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+ };
+
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (p2p_go_friendly_chs[i] == ch_id) {
+ int ch_idx, bitmap_idx;
+
+ ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
+ if (ch_idx < 0)
+ return;
+
+ bitmap_idx = ch_idx / 8;
+ if (bitmap_idx >= bitmap_n_entries)
+ return;
+
+ ch_idx = ch_idx % 8;
+ ch_bitmap[bitmap_idx] |= BIT(ch_idx);
+
+ return;
+ }
+ }
+}
+
static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
- int n_channels, u32 ssid_bitmap,
+ int n_channels, u32 flags,
struct iwl_scan_channel_cfg_umac *channel_cfg)
{
int i;
for (i = 0; i < n_channels; i++) {
- channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
+ channel_cfg[i].flags = cpu_to_le32(flags);
channel_cfg[i].v1.channel_num = channels[i]->hw_value;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
+ enum nl80211_band band = channels[i]->band;
+
channel_cfg[i].v2.band =
- channels[i]->hw_value <= IWL_SCAN_LAST_2_4_CHN ?
- IWL_SCAN_BAND_2_4 : IWL_SCAN_BAND_5_2;
+ iwl_mvm_phy_band_from_nl80211(band);
channel_cfg[i].v2.iter_count = 1;
channel_cfg[i].v2.iter_interval = 0;
} else {
@@ -1438,6 +1586,92 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
+static void
+iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v4 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ u8 *bitmap = cp->adwell_ch_override_bitmap;
+ size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[i];
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+
+ iwl_mvm_scan_ch_add_n_aps_override(vif_type,
+ cfg->v2.channel_num,
+ cfg->v2.band, bitmap,
+ bitmap_n_entries);
+ }
+}
+
+static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ u8 flags = 0;
+
+ flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ /* set fragmented ebs for fragmented scan on HB channels */
+ if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+
+ return flags;
+}
+
+static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ int type)
+{
+ u16 flags = 0;
+
+ if (params->n_ssids == 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+
+ if (iwl_mvm_is_scan_fragmented(params->type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
+
+ if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
+
+ if (!iwl_mvm_is_regular_scan(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
+
+ if (params->measurement_dwell ||
+ mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+
+ if (IWL_MVM_ADWELL_ENABLE)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
+
+ return flags;
+}
+
static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -1481,8 +1715,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
- if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE &&
- vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
/*
@@ -1517,9 +1750,42 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
return flags;
}
+static int
+iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_umac_schedule *schedule,
+ __le16 *delay)
+{
+ int i;
+ if (WARN_ON(!params->n_scan_plans ||
+ params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ for (i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ schedule[i].iter_count = scan_plan->iterations;
+ schedule[i].interval =
+ cpu_to_le16(scan_plan->interval);
+ }
+
+ /*
+ * If the number of iterations of the last scan plan is set to
+ * zero, it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!schedule[params->n_scan_plans - 1].iter_count)
+ schedule[params->n_scan_plans - 1].iter_count = 0xff;
+
+ *delay = cpu_to_le16(params->delay);
+
+ return 0;
+}
+
static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params,
- int type)
+ int type, int uid)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
@@ -1530,7 +1796,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
struct iwl_scan_req_umac_tail_v1 *tail_v1;
struct iwl_ssid_ie *direct_scan;
- int uid, i;
+ int ret = 0;
u32 ssid_bitmap = 0;
u8 channel_flags = 0;
u16 gen_flags;
@@ -1538,17 +1804,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
- lockdep_assert_held(&mvm->mutex);
-
- if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
- return -EINVAL;
-
- uid = iwl_mvm_scan_uid_by_status(mvm, 0);
- if (uid < 0)
- return uid;
-
- memset(cmd, 0, ksize(cmd));
-
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
mvm->scan_uid_status[uid] = type;
@@ -1591,25 +1846,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
chan_param->flags = channel_flags;
chan_param->count = params->n_channels;
- for (i = 0; i < params->n_scan_plans; i++) {
- struct cfg80211_sched_scan_plan *scan_plan =
- &params->scan_plans[i];
-
- tail_v2->schedule[i].iter_count = scan_plan->iterations;
- tail_v2->schedule[i].interval =
- cpu_to_le16(scan_plan->interval);
- }
-
- /*
- * If the number of iterations of the last scan plan is set to
- * zero, it should run infinitely. However, this is not always the case.
- * For example, when regular scan is requested the driver sets one scan
- * plan with one iteration.
- */
- if (!tail_v2->schedule[i - 1].iter_count)
- tail_v2->schedule[i - 1].iter_count = 0xff;
-
- tail_v2->delay = cpu_to_le16(params->delay);
+ ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
+ &tail_v2->delay);
+ if (ret)
+ return ret;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
tail_v2->preq = params->preq;
@@ -1627,6 +1867,174 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static void
+iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_general_params_v10 *gp,
+ u16 gen_flags)
+{
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_scan_umac_dwell_v10(mvm, gp, params);
+
+ gp->flags = cpu_to_le16(gen_flags);
+
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
+ gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+
+ gp->scan_start_mac_id = scan_vif->id;
+}
+
+static void
+iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v3 *pp)
+{
+ pp->preq = params->preq;
+ pp->ssid_num = params->n_ssids;
+ iwl_scan_build_ssids(params, pp->direct_scan, NULL);
+}
+
+static void
+iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp,
+ u32 *bitmap_ssid)
+{
+ pp->preq = params->preq;
+ iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v3 *cp)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, 0,
+ cp->channel_config);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v4 *cp,
+ u32 channel_cfg_flags)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+ cp->num_of_aps_override = IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE;
+
+ iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
+ params->n_channels,
+ channel_cfg_flags,
+ vif->type);
+}
+
+static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
+ iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif,
+ &scan_p->channel_params);
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
+ iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
+ &scan_p->channel_params, 0);
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v13 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v13 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+ u32 bitmap_ssid = 0;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
+ &bitmap_ssid);
+ iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
+ &scan_p->channel_params, bitmap_ssid);
+
+ return 0;
+}
+
static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
{
return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
@@ -1729,6 +2137,64 @@ static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
}
}
+struct iwl_scan_umac_handler {
+ u8 version;
+ int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type, int uid);
+};
+
+#define IWL_SCAN_UMAC_HANDLER(_ver) { \
+ .version = _ver, \
+ .handler = iwl_mvm_scan_umac_v##_ver, \
+}
+
+static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
+ /* set the newest version first to shorten the list traverse time */
+ IWL_SCAN_UMAC_HANDLER(13),
+ IWL_SCAN_UMAC_HANDLER(12),
+ IWL_SCAN_UMAC_HANDLER(11),
+};
+
+static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_host_cmd *hcmd,
+ struct iwl_mvm_scan_params *params,
+ int type)
+{
+ int uid, i;
+ u8 scan_ver;
+
+ lockdep_assert_held(&mvm->mutex);
+ memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd));
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
+
+ return iwl_mvm_scan_lmac(mvm, vif, params);
+ }
+
+ uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+ if (uid < 0)
+ return uid;
+
+ hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+
+ scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
+ const struct iwl_scan_umac_handler *ver_handler =
+ &iwl_scan_umac_handlers[i];
+
+ if (ver_handler->version != scan_ver)
+ continue;
+
+ return ver_handler->handler(mvm, vif, params, type, uid);
+ }
+
+ return iwl_mvm_scan_umac(mvm, vif, params, type, uid);
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1786,14 +2252,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- ret = iwl_mvm_scan_umac(mvm, vif, &params,
- IWL_MVM_SCAN_REGULAR);
- } else {
- hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
- ret = iwl_mvm_scan_lmac(mvm, vif, &params);
- }
+ ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
+ IWL_MVM_SCAN_REGULAR);
if (ret)
return ret;
@@ -1891,13 +2351,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
- } else {
- hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
- ret = iwl_mvm_scan_lmac(mvm, vif, &params);
- }
+ ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
if (ret)
return ret;
@@ -2048,10 +2502,31 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
+#define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \
+ case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \
+}
+
+static int iwl_scan_req_umac_get_size(u8 scan_ver)
+{
+ switch (scan_ver) {
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11);
+ }
+
+ return 0;
+}
+
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
- int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- int tail_size;
+ int base_size, tail_size;
+ u8 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
+
+ base_size = iwl_scan_req_umac_get_size(scan_ver);
+ if (base_size)
+ return base_size;
+
if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
@@ -2059,6 +2534,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
else if (iwl_mvm_cdb_scan_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
+ else
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
if (iwl_mvm_is_scan_ext_chan_supported(mvm))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index b3768d5d852a..7b35f416404c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2844,13 +2844,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (normalized_ssn == tid_data->next_reclaimed) {
tid_data->state = IWL_AGG_STARTING;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ ret = 0;
}
- ret = 0;
-
out:
spin_unlock_bh(&mvmsta->lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a06bc63fb516..51b138673ddb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -734,6 +734,11 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
return;
}
+/*
+ * When the firmware supports the session protection API,
+ * this is not needed since it'll automatically remove the
+ * session protection after association + beacon reception.
+ */
void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -757,6 +762,101 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
}
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+
+ rcu_read_lock();
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
+ true);
+
+ if (!vif)
+ goto out_unlock;
+
+ /* The vif is not a P2P_DEVICE, maintain its time_event_data */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data =
+ &mvmvif->time_event_data;
+
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ "Session protection failure");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ }
+
+ if (le32_to_cpu(notif->start)) {
+ spin_lock_bh(&mvm->time_event_lock);
+ te_data->running = le32_to_cpu(notif->start);
+ te_data->end_jiffies =
+ TU_TO_EXP_TIME(te_data->duration);
+ spin_unlock_bh(&mvm->time_event_lock);
+ } else {
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ "No beacon heard and the session protection is over already...");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ }
+
+ goto out_unlock;
+ }
+
+ if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ /* End TE, notify mac80211 */
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ iwl_mvm_roc_finished(mvm);
+ } else if (le32_to_cpu(notif->start)) {
+ set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ }
+
+ out_unlock:
+ rcu_read_unlock();
+}
+
+static int
+iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ cmd.conf_id =
+ cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ cmd.conf_id =
+ cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid ROC type\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+}
+
int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type)
{
@@ -770,6 +870,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
+ duration,
+ type);
+
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
@@ -847,11 +953,44 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
-void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
+static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif)
+{
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+}
+
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_cancel_session_protection(mvm, mvmvif);
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+
+ iwl_mvm_roc_finished(mvm);
+
+ return;
+ }
+
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
@@ -916,3 +1055,51 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
+
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (te_data->running &&
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return;
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ te_data->duration = le32_to_cpu(cmd.duration_tu);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
+ le32_to_cpu(cmd.duration_tu));
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 1dd3d01245ea..df6832b79666 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -178,12 +180,13 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/**
* iwl_mvm_stop_roc - stop remain on channel functionality
* @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is stopped
*
* This function can be used to cancel an ongoing ROC session.
* The function is async, it will instruct the FW to stop serving the ROC
* session, but will not wait for the actual stopping of the session.
*/
-void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/**
* iwl_mvm_remove_time_event - general function to clean up of time event
@@ -242,4 +245,20 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
return !!te_data->uid;
}
+/**
+ * iwl_mvm_schedule_session_protection - schedule a session protection
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the duration of the protection
+ */
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration);
+
+/**
+ * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+ */
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f0c539b37ea7..b5a16f00bada 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -8,6 +8,7 @@
* Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -482,26 +484,27 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
/* budget in mWatt */
static const u32 iwl_mvm_cdev_budgets[] = {
- 2000, /* cooling state 0 */
- 1800, /* cooling state 1 */
- 1600, /* cooling state 2 */
- 1400, /* cooling state 3 */
- 1200, /* cooling state 4 */
- 1000, /* cooling state 5 */
- 900, /* cooling state 6 */
- 800, /* cooling state 7 */
- 700, /* cooling state 8 */
- 650, /* cooling state 9 */
- 600, /* cooling state 10 */
- 550, /* cooling state 11 */
- 500, /* cooling state 12 */
- 450, /* cooling state 13 */
- 400, /* cooling state 14 */
- 350, /* cooling state 15 */
- 300, /* cooling state 16 */
- 250, /* cooling state 17 */
- 200, /* cooling state 18 */
- 150, /* cooling state 19 */
+ 2400, /* cooling state 0 */
+ 2000, /* cooling state 1 */
+ 1800, /* cooling state 2 */
+ 1600, /* cooling state 3 */
+ 1400, /* cooling state 4 */
+ 1200, /* cooling state 5 */
+ 1000, /* cooling state 6 */
+ 900, /* cooling state 7 */
+ 800, /* cooling state 8 */
+ 700, /* cooling state 9 */
+ 650, /* cooling state 10 */
+ 600, /* cooling state 11 */
+ 550, /* cooling state 12 */
+ 500, /* cooling state 13 */
+ 450, /* cooling state 14 */
+ 400, /* cooling state 15 */
+ 350, /* cooling state 16 */
+ 300, /* cooling state 17 */
+ 250, /* cooling state 18 */
+ 200, /* cooling state 19 */
+ 150, /* cooling state 20 */
};
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8a059da7a1fa..dc5c02fbc65a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -341,8 +341,11 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
rate_idx = rate_lowest_index(
&mvm->nvm_data->bands[info->band], sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == NL80211_BAND_5GHZ)
+ /*
+ * For non 2 GHZ band, remap mac80211 rate
+ * indices into driver indices
+ */
+ if (info->band != NL80211_BAND_2GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* For 2.4 GHZ band, check that there is no need to remap */
@@ -547,7 +550,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
}
if (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) {
+ IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
cmd->offload_assist |= cpu_to_le32(offload_assist);
@@ -935,7 +938,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
- max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
+ /*
+ * Take the min of ieee80211 station and mvm station
+ */
+ max_amsdu_len =
+ min_t(unsigned int, sta->max_amsdu_len,
+ iwl_mvm_max_amsdu_size(mvm, sta, tid));
/*
* Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
@@ -2051,7 +2059,7 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
- 0xff | BIT(IWL_MGMT_TID), flags);
+ 0xffff, flags);
if (internal)
return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 8686107da116..6096276cb0d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -217,7 +217,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
int band_offset = 0;
/* Legacy rate format, search for match in table */
- if (band == NL80211_BAND_5GHZ)
+ if (band != NL80211_BAND_2GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (fw_rate_idx_to_plcp[idx] == rate)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 74980382e64c..a4e09a5b1816 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -55,6 +55,66 @@
#include "internal.h"
#include "iwl-prph.h"
+static void
+iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
+ struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
+ u32 *control_flags)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 dbg_flags = 0;
+
+ if (!iwl_trans_dbg_ini_valid(trans)) {
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+
+ if (fw_mon->size) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM buffer destination\n");
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
+ }
+
+ goto out;
+ }
+
+ fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying SMEM buffer destination\n");
+
+ goto out;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_DRAM_PATH &&
+ trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ }
+
+out:
+ if (dbg_flags)
+ *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
+}
+
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
@@ -86,24 +146,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
IWL_PRPH_SCRATCH_MTR_MODE |
(IWL_PRPH_MTR_FORMAT_256B &
- IWL_PRPH_SCRATCH_MTR_FORMAT) |
- IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
- IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
- prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ IWL_PRPH_SCRATCH_MTR_FORMAT);
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
- /* Configure debug, for integration */
- if (!iwl_trans_dbg_ini_valid(trans))
- iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans->dbg.num_blocks) {
- prph_sc_ctrl->hwm_cfg.hwm_base_addr =
- cpu_to_le64(trans->dbg.fw_mon[0].physical);
- prph_sc_ctrl->hwm_cfg.hwm_size =
- cpu_to_le32(trans->dbg.fw_mon[0].size);
- }
+ iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
+ &control_flags);
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 1047d48beaa5..a091690f6c79 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -166,7 +166,7 @@ struct iwl_rx_completion_desc {
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
- * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
+ * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
@@ -264,7 +264,7 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
return READ_ONCE(*rb_stts);
@@ -646,7 +646,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
/*****************************************************
* RX
******************************************************/
-int _iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
@@ -660,7 +659,6 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq);
-int iwl_pcie_rx_alloc(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
@@ -702,9 +700,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 19dd075f2f63..452da44a21e0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -200,8 +200,8 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- /* TODO: remove this for 22560 once fw does it */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* TODO: remove this once fw does it */
iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -247,11 +247,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560)
- iwl_write32(trans, HBUS_TARG_WRPTR,
- (rxq->write_actual |
- ((FIRST_RX_QUEUE + rxq->id) << 16)));
- else if (trans->trans_cfg->mq_rx_supported)
+ if (trans->trans_cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -279,7 +275,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
@@ -326,7 +322,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb);
- rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+ rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
rxq->free_count--;
}
spin_unlock(&rxq->lock);
@@ -691,7 +687,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
{
struct device *dev = trans->dev;
bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
if (rxq->bd)
@@ -712,7 +708,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (rxq->tr_tail)
@@ -736,7 +732,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
int i;
int free_size;
bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ IWL_DEVICE_FAMILY_AX210);
size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
sizeof(struct iwl_rb_status);
@@ -784,11 +780,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
&rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
- /*
- * W/A 22560 device step Z0 must be non zero bug
- * TODO: remove this when stop supporting Z0
- */
- *rxq->cr_tail = cpu_to_le16(500);
return 0;
@@ -802,13 +793,13 @@ err:
return -ENOMEM;
}
-int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, ret;
size_t rb_stts_size = trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
if (WARN_ON(trans_pcie->rxq))
@@ -1033,7 +1024,7 @@ int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-int _iwl_pcie_rx_init(struct iwl_trans *trans)
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *def_rxq;
@@ -1074,8 +1065,9 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ memset(rxq->rb_stts, 0,
+ (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1152,7 +1144,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
size_t rb_stts_size = trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
/*
@@ -1347,7 +1339,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1399,7 +1391,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
}
/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
else
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
@@ -1429,6 +1421,7 @@ out_err:
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct napi_struct *napi;
struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
@@ -1515,7 +1508,7 @@ out:
/* Backtrack one entry */
rxq->read = i;
/* update cr tail with the rxq read pointer */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
@@ -1534,8 +1527,16 @@ out:
if (unlikely(emergency && count))
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- if (rxq->napi.poll)
- napi_gro_flush(&rxq->napi, false);
+ napi = &rxq->napi;
+ if (napi->poll) {
+ if (napi->rx_count) {
+ netif_receive_skb_list(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+ }
+
+ napi_gro_flush(napi, false);
+ }
iwl_pcie_rxq_restock(trans, rxq);
}
@@ -2152,8 +2153,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -2185,17 +2185,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
- inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
- /* Reflect IML transfer status */
- int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
-
- IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
- if (res == IWL_IMAGE_RESP_FAIL) {
- isr_stats->sw++;
- iwl_pcie_irq_handle_error(trans);
- }
- } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
u32 sleep_notif =
le32_to_cpu(trans_pcie->prph_info->sleep_notif);
if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index ca3bb4d65b00..0252716c0b24 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -193,7 +193,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_pcie_ctxt_info_gen3_free(trans);
else
iwl_pcie_ctxt_info_free(trans);
@@ -365,7 +365,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
else
ret = iwl_pcie_ctxt_info_init(trans, fw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 6961f00ff812..af9bc6b64542 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -190,32 +190,36 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
- int i;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- for (i = 0; i < trans->dbg.num_blocks; i++) {
- dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
- trans->dbg.fw_mon[i].block,
- trans->dbg.fw_mon[i].physical);
- trans->dbg.fw_mon[i].block = NULL;
- trans->dbg.fw_mon[i].physical = 0;
- trans->dbg.fw_mon[i].size = 0;
- trans->dbg.num_blocks--;
- }
+ if (!fw_mon->size)
+ return;
+
+ dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
+ fw_mon->physical);
+
+ fw_mon->block = NULL;
+ fw_mon->physical = 0;
+ fw_mon->size = 0;
}
static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
u8 max_power, u8 min_power)
{
- void *cpu_addr = NULL;
- dma_addr_t phys = 0;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+ void *block = NULL;
+ dma_addr_t physical = 0;
u32 size = 0;
u8 power;
+ if (fw_mon->size)
+ return;
+
for (power = max_power; power >= min_power; power--) {
size = BIT(power);
- cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
- GFP_KERNEL | __GFP_NOWARN);
- if (!cpu_addr)
+ block = dma_alloc_coherent(trans->dev, size, &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!block)
continue;
IWL_INFO(trans,
@@ -224,7 +228,7 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
break;
}
- if (WARN_ON_ONCE(!cpu_addr))
+ if (WARN_ON_ONCE(!block))
return;
if (power != max_power)
@@ -233,10 +237,9 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
- trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
- trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
- trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
- trans->dbg.num_blocks++;
+ fw_mon->block = block;
+ fw_mon->physical = physical;
+ fw_mon->size = size;
}
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
@@ -253,11 +256,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
max_power))
return;
- /*
- * This function allocats the default fw monitor.
- * The optional additional ones will be allocated in runtime
- */
- if (trans->dbg.num_blocks)
+ if (trans->dbg.fw_mon.size)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
@@ -891,24 +890,51 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
+static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &trans->dbg.fw_mon_cfg[alloc_id];
+ struct iwl_dram_data *frag;
+
+ if (!iwl_trans_dbg_ini_valid(trans))
+ return;
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
+ /* set sram monitor by enabling bit 7 */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+
+ return;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH ||
+ !trans->dbg.fw_mon_ini[alloc_id].num_frags)
+ return;
+
+ frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
+ frag->physical >> MON_BUFF_SHIFT_VER2);
+ iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
+ (frag->physical + frag->size - 256) >>
+ MON_BUFF_SHIFT_VER2);
+}
+
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
+ const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
int i;
if (iwl_trans_dbg_ini_valid(trans)) {
- if (!trans->dbg.num_blocks)
- return;
-
- IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM buffer[0] destination\n");
- iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- trans->dbg.fw_mon[0].physical >>
- MON_BUFF_SHIFT_VER2);
- iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- MON_BUFF_SHIFT_VER2);
+ iwl_pcie_apply_destination_ini(trans);
return;
}
@@ -959,20 +985,17 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
+ if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- trans->dbg.fw_mon[0].physical >>
- dest->base_shift);
+ fw_mon->physical >> dest->base_shift);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size -
+ 256) >> dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size) >>
+ dest->end_shift);
}
}
@@ -1006,14 +1029,14 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_pcie_alloc_fw_monitor(trans, 0);
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- if (trans->dbg.fw_mon[0].size) {
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ if (fw_mon->size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- trans->dbg.fw_mon[0].physical >> 4);
+ fw_mon->physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >> 4);
+ (fw_mon->physical + fw_mon->size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
@@ -1112,30 +1135,12 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
-static struct iwl_causes_list causes_list_v2[] = {
- {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
- {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
- {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
- {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
- {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
- {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
- {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
- {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
- {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
- {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
- {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
- {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
-};
-
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i, arr_size =
- (trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
- ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
+ int i, arr_size = ARRAY_SIZE(causes_list);
+ struct iwl_causes_list *causes = causes_list;
/*
* Access all non RX causes and map them to the default irq.
@@ -1143,11 +1148,6 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < arr_size; i++) {
- struct iwl_causes_list *causes =
- (trans->trans_cfg->device_family !=
- IWL_DEVICE_FAMILY_22560) ?
- causes_list : causes_list_v2;
-
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
causes[i].cause_num);
@@ -1871,7 +1871,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0x00FFFFFF;
else
return 0x000FFFFF;
@@ -2559,7 +2559,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
char *buf;
int pos = 0, i, ret;
- size_t bufsz = sizeof(buf);
+ size_t bufsz;
bufsz = sizeof(char) * 121 * trans->num_rx_queues;
@@ -2801,7 +2801,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
+ void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@@ -2840,7 +2840,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
write_ptr < data->prev_wr_ptr) {
- size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
+ size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
curr_buf = cpu_addr + data->prev_wr_ptr;
b_full = iwl_write_to_user_buf(user_buf, count,
curr_buf, &size,
@@ -3087,10 +3087,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
u32 monitor_len)
{
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
u32 len = 0;
if (trans->dbg.dest_tlv ||
- (trans->dbg.num_blocks &&
+ (fw_mon->size &&
(trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
@@ -3101,12 +3102,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
- if (trans->dbg.num_blocks) {
- memcpy(fw_mon_data->data,
- trans->dbg.fw_mon[0].block,
- trans->dbg.fw_mon[0].size);
-
- monitor_len = trans->dbg.fw_mon[0].size;
+ if (fw_mon->size) {
+ memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
+ monitor_len = fw_mon->size;
} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
@@ -3145,11 +3143,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
- if (trans->dbg.num_blocks) {
+ if (trans->dbg.fw_mon.size) {
*len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans->dbg.fw_mon[0].size;
- return trans->dbg.fw_mon[0].size;
+ trans->dbg.fw_mon.size;
+ return trans->dbg.fw_mon.size;
} else if (trans->dbg.dest_tlv) {
u32 base, end, cfg_reg, monitor_len;
@@ -3604,6 +3602,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->fw_mon_data.mutex);
#endif
+ iwl_dbg_tlv_init(trans);
+
return trans;
out_free_ict:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8894027429d6..8ca0250de99e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -86,9 +86,9 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -113,14 +113,14 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- /* Starting from 22560, the HW expects bytes */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* Starting from AX210, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
} else {
- /* Until 22560, the HW expects DW */
+ /* Before AX210, the HW expects DW */
WARN_ON(!trans_pcie->bc_table_dword);
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
@@ -251,27 +251,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, iv_len, amsdu_pad;
+ u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
struct page **page_ptr;
struct tso_t tso;
- /* if the packet is protected, then it must be CCMP or GCMP */
- iv_len = ieee80211_has_protected(hdr->frame_control) ?
- IEEE80211_CCMP_HDR_LEN : 0;
-
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
/* total amount of header we may need for this A-MSDU */
hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
hdr_page = get_page_hdr(trans, hdr_room);
@@ -282,14 +278,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
start_hdr = hdr_page->pos;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
*page_ptr = hdr_page->page;
- memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
- hdr_page->pos += iv_len;
/*
- * Pull the ieee80211 header + IV to be able to use TSO core,
+ * Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow.
*/
- skb_pull(skb, hdr_len + iv_len);
+ skb_pull(skb, hdr_len);
/*
* Remove the length of all the headers that we don't actually
@@ -339,7 +333,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -357,15 +352,15 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_len);
+ tb_phys, tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
}
}
- /* re -add the WiFi header and IV */
- skb_push(skb, hdr_len + iv_len);
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
return 0;
@@ -447,9 +442,8 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb_frag_address(frag),
- skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
+ tb_phys, skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
@@ -474,6 +468,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
dma_addr_t tb_phys;
int len, tb1_len, tb2_len;
void *tb1_addr;
+ struct sk_buff *frag;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
@@ -514,14 +509,25 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb->data + hdr_len,
- tb2_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
+ tb_phys, tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
+ skb_walk_frags(skb, frag) {
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
+ tb_phys, skb_headlen(frag));
+ if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
return tfd;
out_err:
@@ -547,7 +553,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
memset(tfd, 0, sizeof(*tfd));
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = sizeof(struct iwl_tx_cmd_gen2);
else
len = sizeof(struct iwl_tx_cmd_gen3);
@@ -629,7 +635,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
(void *)dev_cmd->payload;
@@ -1130,7 +1136,7 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
(trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4806a04cec8c..f21f16ab2a97 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -949,7 +949,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
bc_tbls_size *= (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
@@ -2019,9 +2019,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
head_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb->data + hdr_len,
- head_tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
+ tb_phys, head_tb_len);
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
@@ -2039,9 +2038,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb_frag_address(frag),
- skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
+ tb_phys, skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
if (tb_idx < 0)
@@ -2222,7 +2220,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- hdr_tb_len);
+ hdr_tb_phys, hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -2248,7 +2246,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- size);
+ tb_phys, size);
data_left -= size;
tso_build_data(skb, &tso, size);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 14f562cd715c..03738107fd10 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -148,23 +148,25 @@ static const char *hwsim_alpha2s[] = {
};
static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = {
- .n_reg_rules = 4,
+ .n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(2484-10, 2484+10, 40, 0, 20, 0),
REG_RULE(5150-10, 5240+10, 40, 0, 30, 0),
REG_RULE(5745-10, 5825+10, 40, 0, 30, 0),
+ REG_RULE(5855-10, 5925+10, 40, 0, 33, 0),
}
};
static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
- .n_reg_rules = 2,
+ .n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(5725-10, 5850+10, 40, 0, 30,
NL80211_RRF_NO_IR),
+ REG_RULE(5855-10, 5925+10, 40, 0, 33, 0),
}
};
@@ -354,6 +356,24 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5805), /* Channel 161 */
CHAN5G(5825), /* Channel 165 */
CHAN5G(5845), /* Channel 169 */
+
+ CHAN5G(5855), /* Channel 171 */
+ CHAN5G(5860), /* Channel 172 */
+ CHAN5G(5865), /* Channel 173 */
+ CHAN5G(5870), /* Channel 174 */
+
+ CHAN5G(5875), /* Channel 175 */
+ CHAN5G(5880), /* Channel 176 */
+ CHAN5G(5885), /* Channel 177 */
+ CHAN5G(5890), /* Channel 178 */
+ CHAN5G(5895), /* Channel 179 */
+ CHAN5G(5900), /* Channel 180 */
+ CHAN5G(5905), /* Channel 181 */
+
+ CHAN5G(5910), /* Channel 182 */
+ CHAN5G(5915), /* Channel 183 */
+ CHAN5G(5920), /* Channel 184 */
+ CHAN5G(5925), /* Channel 185 */
};
static const struct ieee80211_rate hwsim_rates[] = {
@@ -749,8 +769,8 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
+ "%llu\n");
static int hwsim_write_simulate_radar(void *dat, u64 val)
{
@@ -761,8 +781,8 @@ static int hwsim_write_simulate_radar(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
- hwsim_write_simulate_radar, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_simulate_radar, NULL,
+ hwsim_write_simulate_radar, "%llu\n");
static int hwsim_fops_group_read(void *dat, u64 *val)
{
@@ -778,9 +798,9 @@ static int hwsim_fops_group_write(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
- hwsim_fops_group_read, hwsim_fops_group_write,
- "%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group,
+ hwsim_fops_group_read, hwsim_fops_group_write,
+ "%llx\n");
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -1550,7 +1570,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_ADHOC)
+ vif->type != NL80211_IFTYPE_ADHOC &&
+ vif->type != NL80211_IFTYPE_OCB)
return;
skb = ieee80211_beacon_get(hw, vif);
@@ -1604,6 +1625,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
}
static const char * const hwsim_chanwidths[] = {
+ [NL80211_CHAN_WIDTH_5] = "ht5",
+ [NL80211_CHAN_WIDTH_10] = "ht10",
[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
[NL80211_CHAN_WIDTH_20] = "ht20",
[NL80211_CHAN_WIDTH_40] = "ht40",
@@ -1979,8 +2002,7 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
@@ -2723,7 +2745,8 @@ static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
BIT(NL80211_IFTYPE_P2P_CLIENT) | \
BIT(NL80211_IFTYPE_P2P_GO) | \
BIT(NL80211_IFTYPE_ADHOC) | \
- BIT(NL80211_IFTYPE_MESH_POINT))
+ BIT(NL80211_IFTYPE_MESH_POINT) | \
+ BIT(NL80211_IFTYPE_OCB))
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
@@ -2847,6 +2870,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
+ BIT(NL80211_CHAN_WIDTH_5) |
+ BIT(NL80211_CHAN_WIDTH_10) |
BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 242d8845da3f..30f1025ecb9b 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1179,6 +1179,10 @@ static int if_sdio_probe(struct sdio_func *func,
spin_lock_init(&card->lock);
card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!card->workqueue)) {
+ ret = -ENOMEM;
+ goto err_queue;
+ }
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
init_waitqueue_head(&card->pwron_waitq);
@@ -1230,6 +1234,7 @@ err_activate_card:
lbs_remove_card(priv);
free:
destroy_workqueue(card->workqueue);
+err_queue:
while (card->packets) {
packet = card->packets;
card->packets = card->packets->next;
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index 2747c957d18c..44c8a550da4c 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -1003,7 +1003,6 @@ static int lbs_add_mesh(struct lbs_private *priv)
if (priv->mesh_tlv) {
sprintf(mesh_wdev->ssid, "mesh");
mesh_wdev->mesh_id_up_len = 4;
- ret = 1;
}
mesh_wdev->netdev = mesh_dev;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index a9657ae6d782..d14e55e3c9da 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -631,6 +631,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
+ adapter->is_up = true;
goto done;
err_add_intf:
@@ -1469,6 +1470,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
mwifiex_deauthenticate(priv, NULL);
mwifiex_uninit_sw(adapter);
+ adapter->is_up = false;
if (adapter->if_ops.down_dev)
adapter->if_ops.down_dev(adapter);
@@ -1730,7 +1732,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
if (!adapter)
return 0;
- mwifiex_uninit_sw(adapter);
+ if (adapter->is_up)
+ mwifiex_uninit_sw(adapter);
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 095837fba300..547ff3c578ee 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1017,6 +1017,7 @@ struct mwifiex_adapter {
/* For synchronizing FW initialization with device lifecycle. */
struct completion *fw_done;
+ bool is_up;
bool ext_scan;
u8 fw_api_ver;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index eff06d59e9df..fc1706d0647d 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -687,8 +687,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
+ kfree(card->evtbd_ring_vbase);
return -1;
+ }
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -1029,8 +1032,10 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
return -1;
+ }
card->cmdrsp_buf = skb;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 593c594982cb..98f942b797f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1270,7 +1270,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_FH_PARAMS:
- if (element_len + 2 < sizeof(*fh_param_set))
+ if (total_ie_len < sizeof(*fh_param_set))
return -EINVAL;
fh_param_set =
(struct ieee_types_fh_param_set *) current_ptr;
@@ -1280,7 +1280,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_DS_PARAMS:
- if (element_len + 2 < sizeof(*ds_param_set))
+ if (total_ie_len < sizeof(*ds_param_set))
return -EINVAL;
ds_param_set =
(struct ieee_types_ds_param_set *) current_ptr;
@@ -1293,7 +1293,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_CF_PARAMS:
- if (element_len + 2 < sizeof(*cf_param_set))
+ if (total_ie_len < sizeof(*cf_param_set))
return -EINVAL;
cf_param_set =
(struct ieee_types_cf_param_set *) current_ptr;
@@ -1303,7 +1303,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_IBSS_PARAMS:
- if (element_len + 2 < sizeof(*ibss_param_set))
+ if (total_ie_len < sizeof(*ibss_param_set))
return -EINVAL;
ibss_param_set =
(struct ieee_types_ibss_param_set *)
@@ -1460,10 +1460,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
}
- current_ptr += element_len + 2;
-
- /* Need to account for IE ID and IE Len */
- bytes_left -= (element_len + 2);
+ current_ptr += total_ie_len;
+ bytes_left -= total_ie_len;
} /* while (bytes_left > 2) */
return ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 24c041dad9f6..fec38b6e86ff 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -444,6 +444,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
return 0;
}
+ if (!adapter->is_up)
+ return -EBUSY;
+
mwifiex_enable_wake(adapter);
/* Enable the Host Sleep */
@@ -2220,22 +2223,30 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
struct sdio_func *func = card->func;
int ret;
+ /* Prepare the adapter for the reset. */
mwifiex_shutdown_sw(adapter);
+ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
- /* power cycle the adapter */
+ /* Run a HW reset of the SDIO interface. */
sdio_claim_host(func);
- mmc_hw_reset(func->card->host);
+ ret = mmc_hw_reset(func->card->host);
sdio_release_host(func);
- /* Previous save_adapter won't be valid after this. We will cancel
- * pending work requests.
- */
- clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
- clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
-
- ret = mwifiex_reinit_sw(adapter);
- if (ret)
- dev_err(&func->dev, "reinit failed: %d\n", ret);
+ switch (ret) {
+ case 1:
+ dev_dbg(&func->dev, "SDIO HW reset asynchronous\n");
+ complete_all(adapter->fw_done);
+ break;
+ case 0:
+ ret = mwifiex_reinit_sw(adapter);
+ if (ret)
+ dev_err(&func->dev, "reinit failed: %d\n", ret);
+ break;
+ default:
+ dev_err(&func->dev, "SDIO HW reset failed: %d\n", ret);
+ break;
+ }
}
/* This function read/write firmware */
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index c4db6417748f..d55f229abeea 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5520,7 +5520,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rc = -EBUSY;
break;
}
- ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+ rc = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index d7a1ddc9e407..99bbc74acda8 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o
+ tx.o agg-rx.o mcu.o airtime.o
mt76-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 8f3d36a15e17..53b5a4b2dcc5 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -130,8 +130,10 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
return;
spin_lock_bh(&tid->lock);
- mt76_rx_aggr_release_frames(tid, frames, seqno);
- mt76_rx_aggr_release_head(tid, frames);
+ if (!tid->stopped) {
+ mt76_rx_aggr_release_frames(tid, frames, seqno);
+ mt76_rx_aggr_release_head(tid, frames);
+ }
spin_unlock_bh(&tid->lock);
}
@@ -257,8 +259,6 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
u8 size = tid->size;
int i;
- cancel_delayed_work(&tid->reorder_work);
-
spin_lock_bh(&tid->lock);
tid->stopped = true;
@@ -273,21 +273,19 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
}
spin_unlock_bh(&tid->lock);
+
+ cancel_delayed_work_sync(&tid->reorder_work);
}
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
{
- struct mt76_rx_tid *tid;
+ struct mt76_rx_tid *tid = NULL;
- rcu_read_lock();
-
- tid = rcu_dereference(wcid->aggr[tidno]);
+ rcu_swap_protected(wcid->aggr[tidno], tid,
+ lockdep_is_held(&dev->mutex));
if (tid) {
- rcu_assign_pointer(wcid->aggr[tidno], NULL);
mt76_rx_aggr_shutdown(dev, tid);
kfree_rcu(tid, rcu_head);
}
-
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
new file mode 100644
index 000000000000..55116f395f9a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/airtime.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76.h"
+
+#define AVG_PKT_SIZE 1024
+
+/* Number of bits for an average sized packet */
+#define MCS_NBITS (AVG_PKT_SIZE << 3)
+
+/* Number of symbols for a packet with (bps) bits per symbol */
+#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
+
+/* Transmission time (1024 usec) for a packet containing (syms) * symbols */
+#define MCS_SYMBOL_TIME(sgi, syms) \
+ (sgi ? \
+ ((syms) * 18 * 1024 + 4 * 1024) / 5 : /* syms * 3.6 us */ \
+ ((syms) * 1024) << 2 /* syms * 4 us */ \
+ )
+
+/* Transmit duration for the raw data part of an average sized packet */
+#define MCS_DURATION(streams, sgi, bps) \
+ MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+
+#define BW_20 0
+#define BW_40 1
+#define BW_80 2
+
+/*
+ * Define group sort order: HT40 -> SGI -> #streams
+ */
+#define MT_MAX_STREAMS 4
+#define MT_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
+#define MT_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
+
+#define MT_HT_GROUPS_NB (MT_MAX_STREAMS * \
+ MT_HT_STREAM_GROUPS)
+#define MT_VHT_GROUPS_NB (MT_MAX_STREAMS * \
+ MT_VHT_STREAM_GROUPS)
+#define MT_GROUPS_NB (MT_HT_GROUPS_NB + \
+ MT_VHT_GROUPS_NB)
+
+#define MT_HT_GROUP_0 0
+#define MT_VHT_GROUP_0 (MT_HT_GROUP_0 + MT_HT_GROUPS_NB)
+
+#define MCS_GROUP_RATES 10
+
+#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
+ MT_HT_GROUP_0 + \
+ MT_MAX_STREAMS * 2 * _ht40 + \
+ MT_MAX_STREAMS * _sgi + \
+ _streams - 1
+
+#define _MAX(a, b) (((a)>(b))?(a):(b))
+
+#define GROUP_SHIFT(duration) \
+ _MAX(0, 16 - __builtin_clz(duration))
+
+/* MCS rate information for an MCS group */
+#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
+ [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \
+ .shift = _s, \
+ .duration = { \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
+ } \
+}
+
+#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
+ GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
+
+#define MCS_GROUP(_streams, _sgi, _ht40) \
+ __MCS_GROUP(_streams, _sgi, _ht40, \
+ MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
+
+#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
+ (MT_VHT_GROUP_0 + \
+ MT_MAX_STREAMS * 2 * (_bw) + \
+ MT_MAX_STREAMS * (_sgi) + \
+ (_streams) - 1)
+
+#define BW2VBPS(_bw, r3, r2, r1) \
+ (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
+
+#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
+ [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
+ .shift = _s, \
+ .duration = { \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 117, 54, 26)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 234, 108, 52)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 351, 162, 78)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 468, 216, 104)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 702, 324, 156)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 936, 432, 208)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1560, 720, 346)) >> _s \
+ } \
+}
+
+#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
+ GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 117, 54, 26)))
+
+#define VHT_GROUP(_streams, _sgi, _bw) \
+ __VHT_GROUP(_streams, _sgi, _bw, \
+ VHT_GROUP_SHIFT(_streams, _sgi, _bw))
+
+struct mcs_group {
+ u8 shift;
+ u16 duration[MCS_GROUP_RATES];
+};
+
+static const struct mcs_group airtime_mcs_groups[] = {
+ MCS_GROUP(1, 0, BW_20),
+ MCS_GROUP(2, 0, BW_20),
+ MCS_GROUP(3, 0, BW_20),
+ MCS_GROUP(4, 0, BW_20),
+
+ MCS_GROUP(1, 1, BW_20),
+ MCS_GROUP(2, 1, BW_20),
+ MCS_GROUP(3, 1, BW_20),
+ MCS_GROUP(4, 1, BW_20),
+
+ MCS_GROUP(1, 0, BW_40),
+ MCS_GROUP(2, 0, BW_40),
+ MCS_GROUP(3, 0, BW_40),
+ MCS_GROUP(4, 0, BW_40),
+
+ MCS_GROUP(1, 1, BW_40),
+ MCS_GROUP(2, 1, BW_40),
+ MCS_GROUP(3, 1, BW_40),
+ MCS_GROUP(4, 1, BW_40),
+
+ VHT_GROUP(1, 0, BW_20),
+ VHT_GROUP(2, 0, BW_20),
+ VHT_GROUP(3, 0, BW_20),
+ VHT_GROUP(4, 0, BW_20),
+
+ VHT_GROUP(1, 1, BW_20),
+ VHT_GROUP(2, 1, BW_20),
+ VHT_GROUP(3, 1, BW_20),
+ VHT_GROUP(4, 1, BW_20),
+
+ VHT_GROUP(1, 0, BW_40),
+ VHT_GROUP(2, 0, BW_40),
+ VHT_GROUP(3, 0, BW_40),
+ VHT_GROUP(4, 0, BW_40),
+
+ VHT_GROUP(1, 1, BW_40),
+ VHT_GROUP(2, 1, BW_40),
+ VHT_GROUP(3, 1, BW_40),
+ VHT_GROUP(4, 1, BW_40),
+
+ VHT_GROUP(1, 0, BW_80),
+ VHT_GROUP(2, 0, BW_80),
+ VHT_GROUP(3, 0, BW_80),
+ VHT_GROUP(4, 0, BW_80),
+
+ VHT_GROUP(1, 1, BW_80),
+ VHT_GROUP(2, 1, BW_80),
+ VHT_GROUP(3, 1, BW_80),
+ VHT_GROUP(4, 1, BW_80),
+};
+
+static u32
+mt76_calc_legacy_rate_duration(const struct ieee80211_rate *rate, bool short_pre,
+ int len)
+{
+ u32 duration;
+
+ switch (rate->hw_value >> 8) {
+ case MT_PHY_TYPE_CCK:
+ duration = 144 + 48; /* preamble + PLCP */
+ if (short_pre)
+ duration >>= 1;
+
+ duration += 10; /* SIFS */
+ break;
+ case MT_PHY_TYPE_OFDM:
+ duration = 20 + 16; /* premable + SIFS */
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ len <<= 3;
+ duration += (len * 10) / rate->bitrate;
+
+ return duration;
+}
+
+u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
+ int len)
+{
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rate;
+ bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
+ bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
+ int bw, streams;
+ u32 duration;
+ int group, idx;
+
+ switch (status->bw) {
+ case RATE_INFO_BW_20:
+ bw = BW_20;
+ break;
+ case RATE_INFO_BW_40:
+ bw = BW_40;
+ break;
+ case RATE_INFO_BW_80:
+ bw = BW_80;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ switch (status->encoding) {
+ case RX_ENC_LEGACY:
+ if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
+ return 0;
+
+ sband = dev->hw->wiphy->bands[status->band];
+ if (!sband || status->rate_idx > sband->n_bitrates)
+ return 0;
+
+ rate = &sband->bitrates[status->rate_idx];
+
+ return mt76_calc_legacy_rate_duration(rate, sp, len);
+ case RX_ENC_VHT:
+ streams = status->nss;
+ idx = status->rate_idx;
+ group = VHT_GROUP_IDX(streams, sgi, bw);
+ break;
+ case RX_ENC_HT:
+ streams = ((status->rate_idx >> 3) & 3) + 1;
+ idx = status->rate_idx & 7;
+ group = HT_GROUP_IDX(streams, sgi, bw);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(streams > 4))
+ return 0;
+
+ duration = airtime_mcs_groups[group].duration[idx];
+ duration <<= airtime_mcs_groups[group].shift;
+ duration *= len;
+ duration /= AVG_PKT_SIZE;
+ duration /= 1024;
+
+ duration += 36 + (streams << 2);
+
+ return duration;
+}
+
+u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
+ int len)
+{
+ struct mt76_rx_status stat = {
+ .band = info->band,
+ };
+ u32 duration = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ struct ieee80211_tx_rate *rate = &info->status.rates[i];
+ u32 cur_duration;
+
+ if (rate->idx < 0 || !rate->count)
+ break;
+
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ stat.bw = RATE_INFO_BW_80;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ stat.bw = RATE_INFO_BW_40;
+ else
+ stat.bw = RATE_INFO_BW_20;
+
+ stat.enc_flags = 0;
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ stat.rate_idx = rate->idx;
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ stat.encoding = RX_ENC_VHT;
+ stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ stat.nss = ieee80211_rate_get_vht_nss(rate);
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ stat.encoding = RX_ENC_HT;
+ } else {
+ stat.encoding = RX_ENC_LEGACY;
+ }
+
+ cur_duration = mt76_calc_rx_airtime(dev, &stat, len);
+ duration += cur_duration * rate->count;
+ }
+
+ return duration;
+}
+EXPORT_SYMBOL_GPL(mt76_calc_tx_airtime);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index d95b73fd0d2b..d2202acb8dc6 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -25,8 +25,7 @@ mt76_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
"0x%08llx\n");
-static int
-mt76_queues_read(struct seq_file *s, void *data)
+int mt76_queues_read(struct seq_file *s, void *data)
{
struct mt76_dev *dev = dev_get_drvdata(s->private);
int i;
@@ -45,6 +44,7 @@ mt76_queues_read(struct seq_file *s, void *data)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76_queues_read);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len)
@@ -90,7 +90,6 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
- debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
mt76_read_rate_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 8f69d00bd940..6173c80189ba 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -166,7 +166,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
- if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
+ if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
@@ -301,7 +301,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
- if (dev->drv->tx_aligned4_skbs)
+ if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
@@ -365,7 +365,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
int frames = 0;
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
- int idx;
spin_lock_bh(&q->lock);
@@ -384,7 +383,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
- idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
@@ -539,10 +538,8 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
rcu_read_unlock();
- if (done < budget) {
- napi_complete(napi);
+ if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
- }
return done;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 1a2c143b34d0..b9f2a401041a 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -105,7 +105,15 @@ static int mt76_led_init(struct mt76_dev *dev)
dev->led_al = of_property_read_bool(np, "led-active-low");
}
- return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+ return led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static void mt76_led_cleanup(struct mt76_dev *dev)
+{
+ if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ return;
+
+ led_classdev_unregister(&dev->led_cdev);
}
static void mt76_init_stream_cap(struct mt76_dev *dev,
@@ -180,6 +188,7 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
sband->bitrates = rates;
sband->n_bitrates = n_rates;
dev->chandef.chan = &sband->channels[0];
+ dev->chan_state = &msband->chan[0];
ht_cap = &sband->ht_cap;
ht_cap->ht_supported = true;
@@ -306,6 +315,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
wiphy->available_antennas_tx = dev->antenna_mask;
wiphy->available_antennas_rx = dev->antenna_mask;
@@ -327,8 +337,16 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
if (dev->cap.has_2ghz) {
ret = mt76_init_sband_2g(dev, rates, n_rates);
@@ -360,6 +378,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
+ mt76_led_cleanup(dev);
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
}
@@ -398,28 +417,64 @@ bool mt76_has_tx_pending(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
+static struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+ struct mt76_sband *msband;
+ int idx;
+
+ if (c->band == NL80211_BAND_2GHZ)
+ msband = &dev->sband_2g;
+ else
+ msband = &dev->sband_5g;
+
+ idx = c - &msband->sband.channels[0];
+ return &msband->chan[idx];
+}
+
+void mt76_update_survey(struct mt76_dev *dev)
+{
+ struct mt76_channel_state *state = dev->chan_state;
+ ktime_t cur_time;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+ return;
+
+ if (dev->drv->update_survey)
+ dev->drv->update_survey(dev);
+
+ cur_time = ktime_get_boottime();
+ state->cc_active += ktime_to_us(ktime_sub(cur_time,
+ dev->survey_time));
+ dev->survey_time = cur_time;
+
+ if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
+ spin_lock_bh(&dev->cc_lock);
+ state->cc_bss_rx += dev->cur_cc_bss_rx;
+ dev->cur_cc_bss_rx = 0;
+ spin_unlock_bh(&dev->cc_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_update_survey);
+
void mt76_set_channel(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
- struct mt76_channel_state *state;
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
-
- if (dev->drv->update_survey)
- dev->drv->update_survey(dev);
+ mt76_update_survey(dev);
dev->chandef = *chandef;
+ dev->chan_state = mt76_channel_state(dev, chandef->chan);
if (!offchannel)
dev->main_chan = chandef->chan;
- if (chandef->chan != dev->main_chan) {
- state = mt76_channel_state(dev, chandef->chan);
- memset(state, 0, sizeof(*state));
- }
+ if (chandef->chan != dev->main_chan)
+ memset(dev->chan_state, 0, sizeof(*dev->chan_state));
}
EXPORT_SYMBOL_GPL(mt76_set_channel);
@@ -432,8 +487,9 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct mt76_channel_state *state;
int ret = 0;
+ mutex_lock(&dev->mutex);
if (idx == 0 && dev->drv->update_survey)
- dev->drv->update_survey(dev);
+ mt76_update_survey(dev);
sband = &dev->sband_2g;
if (idx >= sband->sband.n_channels) {
@@ -441,8 +497,10 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
sband = &dev->sband_5g;
}
- if (idx >= sband->sband.n_channels)
- return -ENOENT;
+ if (idx >= sband->sband.n_channels) {
+ ret = -ENOENT;
+ goto out;
+ }
chan = &sband->sband.channels[idx];
state = mt76_channel_state(dev, chan);
@@ -450,14 +508,26 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
memset(survey, 0, sizeof(*survey));
survey->channel = chan;
survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
- if (chan == dev->main_chan)
+ survey->filled |= dev->drv->survey_flags;
+ if (chan == dev->main_chan) {
survey->filled |= SURVEY_INFO_IN_USE;
- spin_lock_bh(&dev->cc_lock);
- survey->time = div_u64(state->cc_active, 1000);
+ if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
+ survey->filled |= SURVEY_INFO_TIME_BSS_RX;
+ }
+
survey->time_busy = div_u64(state->cc_busy, 1000);
+ survey->time_rx = div_u64(state->cc_rx, 1000);
+ survey->time = div_u64(state->cc_active, 1000);
+
+ spin_lock_bh(&dev->cc_lock);
+ survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
+ survey->time_tx = div_u64(state->cc_tx, 1000);
spin_unlock_bh(&dev->cc_lock);
+out:
+ mutex_unlock(&dev->mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(mt76_get_survey);
@@ -502,6 +572,7 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
status->band = mstat.band;
status->signal = mstat.signal;
status->chains = mstat.chains;
+ status->ampdu_reference = mstat.ampdu_ref;
BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
BUILD_BUG_ON(sizeof(status->chain_signal) !=
@@ -552,6 +623,84 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
}
static void
+mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
+ int len)
+{
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_sta *sta;
+ u32 airtime;
+
+ airtime = mt76_calc_rx_airtime(dev, status, len);
+ spin_lock(&dev->cc_lock);
+ dev->cur_cc_bss_rx += airtime;
+ spin_unlock(&dev->cc_lock);
+
+ if (!wcid || !wcid->sta)
+ return;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ ieee80211_sta_register_airtime(sta, status->tid, 0, airtime);
+}
+
+static void
+mt76_airtime_flush_ampdu(struct mt76_dev *dev)
+{
+ struct mt76_wcid *wcid;
+ int wcid_idx;
+
+ if (!dev->rx_ampdu_len)
+ return;
+
+ wcid_idx = dev->rx_ampdu_status.wcid_idx;
+ if (wcid_idx < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+ else
+ wcid = NULL;
+ dev->rx_ampdu_status.wcid = wcid;
+
+ mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
+
+ dev->rx_ampdu_len = 0;
+ dev->rx_ampdu_ref = 0;
+}
+
+static void
+mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+
+ if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
+ return;
+
+ if (!wcid || !wcid->sta) {
+ if (!ether_addr_equal(hdr->addr1, dev->macaddr))
+ return;
+
+ wcid = NULL;
+ }
+
+ if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
+ status->ampdu_ref != dev->rx_ampdu_ref)
+ mt76_airtime_flush_ampdu(dev);
+
+ if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+ if (!dev->rx_ampdu_len ||
+ status->ampdu_ref != dev->rx_ampdu_ref) {
+ dev->rx_ampdu_status = *status;
+ dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
+ dev->rx_ampdu_ref = status->ampdu_ref;
+ }
+
+ dev->rx_ampdu_len += skb->len;
+ return;
+ }
+
+ mt76_airtime_report(dev, status, skb->len);
+}
+
+static void
mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -567,6 +716,8 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
}
+ mt76_airtime_check(dev, skb);
+
if (!wcid || !wcid->sta)
return;
@@ -886,3 +1037,16 @@ void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
clear_bit(MT76_SCANNING, &dev->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
+
+int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct mt76_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ *tx_ant = dev->antenna_mask;
+ *rx_ant = dev->antenna_mask;
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_antenna);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 8aec7ccf2d79..fb077760347a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -49,8 +49,8 @@ struct mt76_bus_ops {
enum mt76_bus_type type;
};
-#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
-#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
+#define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
+#define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
@@ -152,10 +152,6 @@ struct mt76_queue_ops {
int idx, int n_desc, int bufsize,
u32 ring_base);
- int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76_queue_buf *buf, int nbufs, u32 info,
- struct sk_buff *skb, void *txwi);
-
int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
@@ -191,8 +187,6 @@ DECLARE_EWMA(signal, 10, 8);
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
- struct work_struct aggr_work;
-
unsigned long flags;
struct ewma_signal rssi;
@@ -282,11 +276,13 @@ struct mt76_hw_cap {
bool has_5ghz;
};
-#define MT_TXWI_NO_FREE BIT(0)
+#define MT_DRV_TXWI_NO_FREE BIT(0)
+#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
+#define MT_DRV_SW_RX_AIRTIME BIT(2)
struct mt76_driver_ops {
- bool tx_aligned4_skbs;
- u32 txwi_flags;
+ u32 drv_flags;
+ u32 survey_flags;
u16 txwi_size;
void (*update_survey)(struct mt76_dev *dev);
@@ -322,6 +318,9 @@ struct mt76_driver_ops {
struct mt76_channel_state {
u64 cc_active;
u64 cc_busy;
+ u64 cc_rx;
+ u64 cc_bss_rx;
+ u64 cc_tx;
};
struct mt76_sband {
@@ -367,8 +366,8 @@ enum mt76u_in_ep {
enum mt76u_out_ep {
MT_EP_OUT_INBAND_CMD,
- MT_EP_OUT_AC_BK,
MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_BK,
MT_EP_OUT_AC_VI,
MT_EP_OUT_AC_VO,
MT_EP_OUT_HCCA,
@@ -388,7 +387,8 @@ struct mt76_usb {
};
struct tasklet_struct rx_tasklet;
- struct delayed_work stat_work;
+ struct workqueue_struct *stat_wq;
+ struct work_struct stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
u8 in_ep[__MT_EP_IN_MAX];
@@ -421,14 +421,49 @@ struct mt76_mmio {
u32 irqmask;
};
+struct mt76_rx_status {
+ union {
+ struct mt76_wcid *wcid;
+ u8 wcid_idx;
+ };
+
+ unsigned long reorder_time;
+
+ u32 ampdu_ref;
+
+ u8 iv[6];
+
+ u8 aggr:1;
+ u8 tid;
+ u16 seqno;
+
+ u16 freq;
+ u32 flag;
+ u8 enc_flags;
+ u8 encoding:2, bw:3;
+ u8 rate_idx;
+ u8 nss;
+ u8 band;
+ s8 signal;
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
struct mt76_dev {
struct ieee80211_hw *hw;
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
+ struct mt76_channel_state *chan_state;
spinlock_t lock;
spinlock_t cc_lock;
+ u32 cur_cc_bss_rx;
+
+ struct mt76_rx_status rx_ampdu_status;
+ u32 rx_ampdu_len;
+ u32 rx_ampdu_ref;
+
struct mutex mutex;
const struct mt76_bus_ops *bus;
@@ -440,6 +475,7 @@ struct mt76_dev {
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+ u32 ampdu_ref;
struct list_head txwi_cache;
struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
@@ -463,6 +499,8 @@ struct mt76_dev {
u32 rev;
unsigned long state;
+ u32 aggr_stats[32];
+
u8 antenna_mask;
u16 chainmask;
@@ -509,29 +547,6 @@ enum mt76_phy_type {
MT_PHY_TYPE_VHT,
};
-struct mt76_rx_status {
- struct mt76_wcid *wcid;
-
- unsigned long reorder_time;
-
- u8 iv[6];
-
- u8 aggr:1;
- u8 tid;
- u16 seqno;
-
- u16 freq;
- u32 flag;
- u8 enc_flags;
- u8 encoding:2, bw:3;
- u8 rate_idx;
- u8 nss;
- u8 band;
- s8 signal;
- u8 chains;
- s8 chain_signal[IEEE80211_MAX_CHAINS];
-};
-
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
@@ -602,21 +617,6 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
-static inline struct mt76_channel_state *
-mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
-{
- struct mt76_sband *msband;
- int idx;
-
- if (c->band == NL80211_BAND_2GHZ)
- msband = &dev->sband_2g;
- else
- msband = &dev->sband_5g;
-
- idx = c - &msband->sband.channels[0];
- return &msband->chan[idx];
-}
-
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops);
@@ -626,6 +626,7 @@ void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+int mt76_queues_read(struct seq_file *s, void *data);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
@@ -718,6 +719,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
bool more_data);
bool mt76_has_tx_pending(struct mt76_dev *dev);
void mt76_set_channel(struct mt76_dev *dev);
+void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
@@ -759,6 +761,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
+int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
int mt76_get_rate(struct mt76_dev *dev,
@@ -768,6 +771,8 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
+ int len);
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
@@ -778,6 +783,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
+ int len);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -799,7 +806,8 @@ static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;
@@ -817,6 +825,7 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76u_deinit(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
void mt76u_stop_rx(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index 5942fe76c6e9..47c85a9fac28 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -69,6 +69,41 @@ mt7603_edcca_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
mt7603_edcca_set, "%lld\n");
+static int
+mt7603_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt7603_dev *dev = file->private;
+ int bound[3], i, range;
+
+ range = mt76_rr(dev, MT_AGG_ASRCR);
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i], bound[i + 1]);
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ return 0;
+}
+
+static int
+mt7603_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7603_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt7603_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void mt7603_init_debugfs(struct mt7603_dev *dev)
{
struct dentry *dir;
@@ -77,6 +112,9 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
if (!dir)
return;
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt76_queues_read);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 24d82a20d046..a6ab73060aad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -152,6 +152,8 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
+ mt7603_mac_sta_poll(dev);
+
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index ad2ccdbe7258..0696dbf28c5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -7,6 +7,8 @@
const struct mt76_driver_ops mt7603_drv_ops = {
.txwi_size = MT_TXD_SIZE,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.tx_prepare_skb = mt7603_tx_prepare_skb,
.tx_complete_skb = mt7603_tx_complete_skb,
.rx_skb = mt7603_queue_rx_skb,
@@ -524,6 +526,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
bus_ops->rmw = mt7603_rmw;
dev->mt76.bus = bus_ops;
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
spin_lock_init(&dev->ps_lock);
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
@@ -552,7 +556,6 @@ int mt7603_register_device(struct mt7603_dev *dev)
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
/* init led callbacks */
@@ -561,16 +564,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
}
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
-
wiphy->reg_notifier = mt7603_regd_notifier;
ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index c328192307c4..812d081ad943 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -31,6 +31,16 @@ mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
}
+void mt7603_mac_reset_counters(struct mt7603_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+}
+
void mt7603_mac_set_timing(struct mt7603_dev *dev)
{
u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
@@ -150,6 +160,8 @@ void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
addr = mt7603_wtbl4_addr(idx);
for (i = 0; i < MT_WTBL4_SIZE; i += 4)
mt76_wr(dev, addr + i, 0);
+
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
}
static void
@@ -370,6 +382,84 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
}
+void mt7603_mac_sta_poll(struct mt7603_dev *dev)
+{
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct ieee80211_sta *sta;
+ struct mt7603_sta *msta;
+ u32 total_airtime = 0;
+ u32 airtime[4];
+ u32 addr;
+ int i;
+
+ rcu_read_lock();
+
+ while (1) {
+ bool clear = false;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&dev->sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+
+ msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta,
+ poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ addr = mt7603_wtbl4_addr(msta->wcid.idx);
+ for (i = 0; i < 4; i++) {
+ u32 airtime_last = msta->tx_airtime_ac[i];
+
+ msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8);
+ airtime[i] = msta->tx_airtime_ac[i] - airtime_last;
+ airtime[i] *= 32;
+ total_airtime += airtime[i];
+
+ if (msta->tx_airtime_ac[i] & BIT(22))
+ clear = true;
+ }
+
+ if (clear) {
+ mt7603_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->tx_airtime_ac, 0,
+ sizeof(msta->tx_airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ for (i = 0; i < 4; i++) {
+ struct mt76_queue *q = dev->mt76.q_tx[i].q;
+ u8 qidx = q->hw_idx;
+ u8 tid = ac_to_tid[i];
+ u32 txtime = airtime[qidx];
+
+ if (!txtime)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, txtime, 0);
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!total_airtime)
+ return;
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ dev->mt76.chan_state->cc_tx += total_airtime;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
static struct mt76_wcid *
mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
{
@@ -435,6 +525,20 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (dev->rx_ampdu_ts != rxd[12]) {
+ if (!++dev->mt76.ampdu_ref)
+ dev->mt76.ampdu_ref++;
+ }
+ dev->rx_ampdu_ts = rxd[12];
+
+ status->ampdu_ref = dev->mt76.ampdu_ref;
+ }
+
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
@@ -1032,8 +1136,10 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
if (idx && (cur_rate->idx != info->status.rates[i].idx ||
cur_rate->flags != info->status.rates[i].flags)) {
i++;
- if (i == ARRAY_SIZE(info->status.rates))
+ if (i == ARRAY_SIZE(info->status.rates)) {
+ i--;
break;
+ }
info->status.rates[i] = *cur_rate;
info->status.rates[i].count = 0;
@@ -1135,6 +1241,12 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
msta = container_of(wcid, struct mt7603_sta, wcid);
sta = wcid_to_sta(wcid);
+ if (list_empty(&msta->poll_list)) {
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
@@ -1461,22 +1573,9 @@ void mt7603_update_channel(struct mt76_dev *mdev)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt76_channel_state *state;
- ktime_t cur_time;
- u32 busy;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return;
- state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
- busy = mt76_rr(dev, MT_MIB_STAT_PSCCA);
-
- spin_lock_bh(&dev->mt76.cc_lock);
- cur_time = ktime_get_boottime();
- state->cc_busy += busy;
- state->cc_active += ktime_to_us(ktime_sub(cur_time,
- dev->mt76.survey_time));
- dev->mt76.survey_time = cur_time;
- spin_unlock_bh(&dev->mt76.cc_lock);
+ state = mdev->chan_state;
+ state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
}
void
@@ -1677,15 +1776,23 @@ void mt7603_mac_work(struct work_struct *work)
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
mt76.mac_work.work);
bool reset = false;
+ int i, idx;
mt76_tx_status_check(&dev->mt76, NULL, false);
mutex_lock(&dev->mt76.mutex);
dev->mac_work_count++;
- mt7603_update_channel(&dev->mt76);
+ mt76_update_survey(&dev->mt76);
mt7603_edcca_check(dev);
+ for (i = 0, idx = 0; i < 2; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
+ }
+
if (dev->mac_work_count == 10)
mt7603_false_cca_check(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 25d5b1608bc9..962e2822d19f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -13,6 +13,7 @@ mt7603_start(struct ieee80211_hw *hw)
{
struct mt7603_dev *dev = hw->priv;
+ mt7603_mac_reset_counters(dev);
mt7603_mac_start(dev);
dev->mt76.survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -65,6 +66,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
dev->vif_mask |= BIT(mvif->idx);
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@@ -86,8 +88,9 @@ static void
mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_sta *msta = &mvif->sta;
struct mt7603_dev *dev = hw->priv;
- int idx = mvif->sta.wcid.idx;
+ int idx = msta->wcid.idx;
mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0);
mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0);
@@ -98,6 +101,11 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mt76_txq_remove(&dev->mt76, vif->txq);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
@@ -324,6 +332,7 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ INIT_LIST_HEAD(&msta->poll_list);
__skb_queue_head_init(&msta->psq);
msta->ps = ~0;
msta->smps = ~0;
@@ -360,6 +369,11 @@ mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7603_filter_tx(dev, wcid->idx, true);
spin_unlock_bh(&dev->ps_lock);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
mt7603_wtbl_clear(dev, wcid->idx);
}
@@ -555,12 +569,14 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 ssn = params->ssn;
u8 ba_size = params->buf_size;
struct mt76_txq *mtxq;
+ int ret = 0;
if (!txq)
return -EINVAL;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
@@ -582,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
@@ -590,8 +606,9 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
+ mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
static void
@@ -676,6 +693,7 @@ const struct ieee80211_ops mt7603_ops = {
.set_coverage_class = mt7603_set_coverage_class,
.set_tim = mt76_set_tim,
.get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
};
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 257300fec4f8..ab54b0612e98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -61,6 +61,9 @@ struct mt7603_sta {
struct mt7603_vif *vif;
+ struct list_head poll_list;
+ u32 tx_airtime_ac[4];
+
struct sk_buff_head psq;
struct ieee80211_tx_rate rates[4];
@@ -103,12 +106,16 @@ struct mt7603_dev {
u8 vif_mask;
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
struct mt7603_sta global_sta;
u32 agc0, agc3;
u32 false_cca_ofdm, false_cca_cck;
unsigned long last_cca_adj;
+ __le32 rx_ampdu_ts;
u8 rssi_offset[3];
u8 slottime;
@@ -118,8 +125,6 @@ struct mt7603_dev {
ktime_t ed_time;
- struct mt76_queue q_rx;
-
spinlock_t ps_lock;
u8 mac_work_count;
@@ -191,6 +196,7 @@ static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
+void mt7603_mac_reset_counters(struct mt7603_dev *dev);
void mt7603_mac_dma_start(struct mt7603_dev *dev);
void mt7603_mac_start(struct mt7603_dev *dev);
void mt7603_mac_stop(struct mt7603_dev *dev);
@@ -202,6 +208,7 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size);
+void mt7603_mac_sta_poll(struct mt7603_dev *dev);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index eb9eefe8e125..6e23ed3dfdff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -212,6 +212,9 @@
#define MT_AGG_PCR_RTS_THR GENMASK(19, 0)
#define MT_AGG_PCR_RTS_PKT_THR GENMASK(31, 25)
+#define MT_AGG_ASRCR MT_WF_AGG(0x060)
+#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
+
#define MT_AGG_CONTROL MT_WF_AGG(0x070)
#define MT_AGG_CONTROL_NO_BA_RULE BIT(0)
#define MT_AGG_CONTROL_NO_BA_AR_RULE BIT(1)
@@ -555,6 +558,8 @@ enum {
#define MT_MIB_STAT_PSCCA MT_MIB_STAT(16)
#define MT_MIB_STAT_PSCCA_MASK GENMASK(23, 0)
+#define MT_TX_AGG_CNT(n) MT_MIB(0xa8 + ((n) << 2))
+
#define MT_MIB_STAT_ED MT_MIB_STAT(18)
#define MT_MIB_STAT_ED_MASK GENMASK(23, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 2428a4659a1c..f6b75f832e6a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -37,6 +37,44 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
mt7615_scs_set, "%lld\n");
static int
+mt7615_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt7615_dev *dev = file->private;
+ int bound[7], i, range;
+
+ range = mt76_rr(dev, MT_AGG_ASRCR0);
+ for (i = 0; i < 4; i++)
+ bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+ range = mt76_rr(dev, MT_AGG_ASRCR1);
+ for (i = 0; i < 3; i++)
+ bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i], bound[i + 1]);
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ return 0;
+}
+
+static int
+mt7615_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7615_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt7615_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
mt7615_radio_read(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
@@ -61,6 +99,63 @@ static int mt7615_read_temperature(struct seq_file *s, void *data)
return 0;
}
+static int
+mt7615_queues_acq(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ int j, acs = i / 4, index = i % 4;
+ u32 ctrl, val, qlen = 0;
+
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
+ ctrl = BIT(31) | BIT(15) | (acs << 8);
+
+ for (j = 0; j < 32; j++) {
+ if (val & BIT(j))
+ continue;
+
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
+ ctrl | (j + (index << 5)));
+ qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ }
+ seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ }
+
+ return 0;
+}
+
+static int
+mt7615_queues_read(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ static const struct {
+ char *queue;
+ int id;
+ } queue_map[] = {
+ { "PDMA0", MT_TXQ_BE },
+ { "MCUQ", MT_TXQ_MCU },
+ { "MCUFWQ", MT_TXQ_FWDL },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+
+ if (!q->q)
+ continue;
+
+ seq_printf(s,
+ "%s: queued=%d head=%d tail=%d\n",
+ queue_map[i].queue, q->q->queued, q->q->head,
+ q->q->tail);
+ }
+
+ return 0;
+}
+
int mt7615_init_debugfs(struct mt7615_dev *dev)
{
struct dentry *dir;
@@ -69,6 +164,11 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
if (!dir)
return -ENOMEM;
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7615_queues_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
+ mt7615_queues_acq);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index fe532cecbbdd..285d4f1d6178 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -110,6 +110,8 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_mac_sta_poll(dev);
+
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 515bb58e19fd..eccad4987ac8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -93,6 +93,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
u8 val, *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask, rx_mask, max_nss;
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
@@ -108,6 +109,23 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
dev->mt76.cap.has_5ghz = true;
break;
}
+
+ /* read tx-rx mask from eeprom */
+ val = mt76_rr(dev, MT_TOP_STRAP_STA);
+ max_nss = val & MT_TOP_3NSS ? 3 : 4;
+
+ rx_mask = FIELD_GET(MT_EE_NIC_CONF_RX_MASK,
+ eeprom[MT_EE_NIC_CONF_0]);
+ if (!rx_mask || rx_mask > max_nss)
+ rx_mask = max_nss;
+
+ tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
+ eeprom[MT_EE_NIC_CONF_0]);
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
+
+ dev->mt76.chainmask = tx_mask << 8 | rx_mask;
+ dev->mt76.antenna_mask = BIT(tx_mask) - 1;
}
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index f4a4280768d2..c3bc69ac210e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -24,6 +24,9 @@ enum mt7615_eeprom_field {
__MT_EE_MAX = 0x3bf
};
+#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
+#define MT_EE_NIC_CONF_RX_MASK GENMASK(3, 0)
+
#define MT_EE_NIC_CONF_TSSI_2G BIT(5)
#define MT_EE_NIC_CONF_TSSI_5G BIT(6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 1104e4c8aaa6..553bd4d988f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -20,7 +20,8 @@ static void mt7615_phy_init(struct mt7615_dev *dev)
static void mt7615_mac_init(struct mt7615_dev *dev)
{
- u32 val;
+ u32 val, mask, set;
+ int i;
/* enable band 0/1 clk */
mt76_set(dev, MT_CFG_CCR,
@@ -50,7 +51,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_TMAC_CTCR0_INS_DDLMT_EN);
mt7615_mcu_set_rts_thresh(dev, 0x92b);
- mt7615_mac_set_scs(dev, false);
+ mt7615_mac_set_scs(dev, true);
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
@@ -85,6 +86,24 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+
+ mask = MT_DMA_RCFR0_MCU_RX_MGMT |
+ MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_DMA_RCFR0_MCU_RX_CTL_BAR |
+ MT_DMA_RCFR0_MCU_RX_BYPASS |
+ MT_DMA_RCFR0_RX_DROPPED_UCAST |
+ MT_DMA_RCFR0_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
+ FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
+ mt76_rmw(dev, MT_DMA_BN0RCFR0, mask, set);
+ mt76_rmw(dev, MT_DMA_BN1RCFR0, mask, set);
+
+ for (i = 0; i < MT7615_WTBL_SIZE; i++)
+ mt7615_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
@@ -158,6 +177,9 @@ static struct ieee80211_rate mt7615_rates[] = {
static const struct ieee80211_iface_limit if_limits[] = {
{
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
.max = MT7615_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
@@ -249,12 +271,14 @@ int mt7615_register_device(struct mt7615_dev *dev)
struct wiphy *wiphy = hw->wiphy;
int ret;
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+
ret = mt7615_init_hardware(dev);
if (ret)
return ret;
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
-
hw->queues = 4;
hw->max_rates = 3;
hw->max_report_rates = 7;
@@ -268,7 +292,8 @@ int mt7615_register_device(struct mt7615_dev *dev)
wiphy->reg_notifier = mt7615_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
@@ -278,16 +303,8 @@ int mt7615_register_device(struct mt7615_dev *dev)
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
- dev->mt76.chainmask = 0x404;
- dev->mt76.antenna_mask = 0xf;
dev->dfs_state = -1;
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_AP);
-
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index e07ce2c10013..c77adc5d2552 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -41,6 +41,25 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
return &sta->vif->sta.wcid;
}
+void mt7615_mac_reset_counters(struct mt7615_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+
+ /* TODO: add DBDC support */
+
+ /* reset airtime counters */
+ mt76_rr(dev, MT_MIB_SDR9(0));
+ mt76_rr(dev, MT_MIB_SDR36(0));
+ mt76_rr(dev, MT_MIB_SDR37(0));
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -62,6 +81,16 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
+ if (status->wcid) {
+ struct mt7615_sta *msta;
+
+ msta = container_of(status->wcid, struct mt7615_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
/* TODO: properly support DBDC */
status->freq = dev->mt76.chandef.chan->center_freq;
status->band = dev->mt76.chandef.chan->band;
@@ -83,6 +112,20 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (dev->rx_ampdu_ts != rxd[12]) {
+ if (!++dev->mt76.ampdu_ref)
+ dev->mt76.ampdu_ref++;
+ }
+ dev->rx_ampdu_ts = rxd[12];
+
+ status->ampdu_ref = dev->mt76.ampdu_ref;
+ }
+
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
@@ -460,6 +503,91 @@ static u32 mt7615_mac_wtbl_addr(int wcid)
return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
}
+bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+void mt7615_mac_sta_poll(struct mt7615_dev *dev)
+{
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ static const u8 hw_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+ u32 addr, tx_time[4], rx_time[4];
+ int i;
+
+ rcu_read_lock();
+
+ while (true) {
+ bool clear = false;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&dev->sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+ msta = list_first_entry(&dev->sta_poll_list,
+ struct mt7615_sta, poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ addr = mt7615_mac_wtbl_addr(msta->wcid.idx) + 19 * 4;
+
+ for (i = 0; i < 4; i++, addr += 8) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+ }
+
+ if (clear) {
+ mt7615_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < 4; i++) {
+ u32 tx_cur = tx_time[i];
+ u32 rx_cur = rx_time[hw_queue_map[i]];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur,
+ rx_cur);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
@@ -692,11 +820,8 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
mt76_wr(dev, MT_WTBL_RICR0, w0);
mt76_wr(dev, MT_WTBL_RICR1, w1);
- mt76_wr(dev, MT_WTBL_UPDATE,
- FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid->idx) |
- MT_WTBL_UPDATE_RXINFO_UPDATE);
-
- if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ if (!mt7615_mac_wtbl_update(dev, wcid->idx,
+ MT_WTBL_UPDATE_RXINFO_UPDATE))
return -ETIMEDOUT;
return 0;
@@ -914,8 +1039,10 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
if (idx && (cur_rate->idx != info->status.rates[i].idx ||
cur_rate->flags != info->status.rates[i].flags)) {
i++;
- if (i == ARRAY_SIZE(info->status.rates))
+ if (i == ARRAY_SIZE(info->status.rates)) {
+ i--;
break;
+ }
info->status.rates[i] = *cur_rate;
info->status.rates[i].count = 0;
@@ -1026,6 +1153,11 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
msta = container_of(wcid, struct mt7615_sta, wcid);
sta = wcid_to_sta(wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
@@ -1239,38 +1371,49 @@ void mt7615_update_channel(struct mt76_dev *mdev)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt76_channel_state *state;
- ktime_t cur_time;
- u32 busy;
-
- if (!test_bit(MT76_STATE_RUNNING, &mdev->state))
- return;
+ u64 busy_time, tx_time, rx_time, obss_time;
- state = mt76_channel_state(mdev, mdev->chandef.chan);
/* TODO: add DBDC support */
- busy = mt76_get_field(dev, MT_MIB_SDR16(0), MT_MIB_BUSY_MASK);
-
- spin_lock_bh(&mdev->cc_lock);
- cur_time = ktime_get_boottime();
- state->cc_busy += busy;
- state->cc_active += ktime_to_us(ktime_sub(cur_time,
- mdev->survey_time));
- mdev->survey_time = cur_time;
- spin_unlock_bh(&mdev->cc_lock);
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(0),
+ MT_MIB_SDR9_BUSY_MASK);
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(0),
+ MT_MIB_SDR36_TXTIME_MASK);
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(0),
+ MT_MIB_SDR37_RXTIME_MASK);
+ obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_TIME5,
+ MT_MIB_OBSSTIME_MASK);
+
+ state = mdev->chan_state;
+ state->cc_busy += busy_time;
+ state->cc_tx += tx_time;
+ state->cc_rx += rx_time + obss_time;
+ state->cc_bss_rx += rx_time;
+
+ /* reset obss airtime */
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_dev *dev;
+ int i, idx;
dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
mac_work.work);
mutex_lock(&dev->mt76.mutex);
- mt7615_update_channel(&dev->mt76);
+ mt76_update_survey(&dev->mt76);
if (++dev->mac_work_count == 5) {
mt7615_mac_scs_check(dev);
dev->mac_work_count = 0;
}
+
+ for (i = 0, idx = 0; i < 4; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
+ }
mutex_unlock(&dev->mt76.mutex);
mt76_tx_status_check(&dev->mt76, NULL, false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 87c748715b5d..070b03403894 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -16,6 +16,8 @@ static int mt7615_start(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = hw->priv;
+ mt7615_mac_reset_counters(dev);
+
dev->mt76.survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
@@ -39,6 +41,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
switch (type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
/* ap use hw bssid 0 and ext bssid */
if (~mask & BIT(HW_BSSID_0))
return HW_BSSID_0;
@@ -58,7 +61,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
default:
WARN_ON(1);
break;
- };
+ }
return -1;
}
@@ -97,8 +100,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
idx = MT7615_WTBL_RESERVED - mvif->idx;
+
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
+ mt7615_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
@@ -115,8 +122,9 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = &mvif->sta;
struct mt7615_dev *dev = hw->priv;
- int idx = mvif->sta.wcid.idx;
+ int idx = msta->wcid.idx;
/* TODO: disable beacon for the bss */
@@ -129,6 +137,11 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
dev->vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
}
static int mt7615_set_channel(struct mt7615_dev *dev)
@@ -151,8 +164,8 @@ static int mt7615_set_channel(struct mt7615_dev *dev)
ret = mt7615_dfs_init_radar_detector(dev);
mt7615_mac_cca_stats_reset(dev);
dev->mt76.survey_time = ktime_get_boottime();
- /* TODO: add DBDC support */
- mt76_rr(dev, MT_MIB_SDR16(0));
+
+ mt7615_mac_reset_counters(dev);
out:
clear_bit(MT76_RESET, &dev->mt76.state);
@@ -263,6 +276,11 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7615_dev *dev = hw->priv;
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
@@ -296,6 +314,11 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
*total_flags = flags;
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1, ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1, ctl_flags);
}
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
@@ -348,9 +371,12 @@ int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ INIT_LIST_HEAD(&msta->poll_list);
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
+ mt7615_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
mt7615_mcu_add_wtbl(dev, vif, sta);
mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
@@ -371,9 +397,18 @@ void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
mt7615_mcu_del_wtbl(dev, sta);
+
+ mt7615_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
}
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
@@ -449,12 +484,14 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
+ int ret = 0;
if (!txq)
return -EINVAL;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
@@ -477,7 +514,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
@@ -485,8 +522,9 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
+ mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
const struct ieee80211_ops mt7615_ops = {
@@ -511,4 +549,5 @@ const struct ieee80211_ops mt7615_ops = {
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7615_channel_switch_beacon,
.get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 842cd81704db..f229c9ce9f65 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -848,6 +848,11 @@ int mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
conn_type = CONNECTION_INFRA_STA;
break;
}
+ case NL80211_IFTYPE_ADHOC:
+ conn_type = CONNECTION_IBSS_ADHOC;
+ tx_wlan_idx = mvif->sta.wcid.idx;
+ net_type = NETWORK_IBSS;
+ break;
default:
WARN_ON(1);
break;
@@ -1073,10 +1078,13 @@ int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
case NL80211_IFTYPE_STATION:
req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
break;
+ case NL80211_IFTYPE_ADHOC:
+ req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
default:
WARN_ON(1);
break;
- };
+ }
if (en) {
req.basic.conn_state = CONN_STATE_PORT_SECURE;
@@ -1297,8 +1305,10 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
};
int ret;
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 7963e302d705..21486831172c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -56,6 +56,9 @@ struct mt7615_sta {
struct mt7615_vif *vif;
+ struct list_head poll_list;
+ u32 airtime_ac[8];
+
struct ieee80211_tx_rate rates[4];
struct mt7615_rate_set rateset[2];
@@ -81,6 +84,11 @@ struct mt7615_dev {
u32 vif_mask;
u32 omac_mask;
+ __le32 rx_ampdu_ts;
+
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
struct {
u8 n_pulses;
u32 period;
@@ -229,8 +237,11 @@ static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
}
void mt7615_update_channel(struct mt76_dev *mdev);
+bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
+void mt7615_mac_reset_counters(struct mt7615_dev *dev);
void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev);
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
+void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index e250607e0a80..1eb1eb659c3f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -72,7 +72,10 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
- .txwi_flags = MT_TXWI_NO_FREE,
+ .drv_flags = MT_DRV_TXWI_NO_FREE,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
.rx_skb = mt7615_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index b193814d5cf8..61a4aa9ac6e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -6,6 +6,8 @@
#define MT_HW_REV 0x1000
#define MT_HW_CHIPID 0x1008
+#define MT_TOP_STRAP_STA 0x1010
+#define MT_TOP_3NSS BIT(24)
#define MT_TOP_MISC2 0x1134
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
@@ -65,6 +67,17 @@
#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
+#define MT_PLE_BASE 0x8000
+#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
+ ((n) << 2))
+
#define MT_WF_PHY_BASE 0x10000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
@@ -125,6 +138,10 @@
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+#define MT_AGG_ASRCR0 MT_WF_AGG(0x060)
+#define MT_AGG_ASRCR1 MT_WF_AGG(0x064)
+#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
+
#define MT_AGG_ACR0 MT_WF_AGG(0x070)
#define MT_AGG_ACR1 MT_WF_AGG(0x170)
#define MT_AGG_ACR_NO_BA_RULE BIT(0)
@@ -176,6 +193,22 @@
#define MT_WF_RFCR_DROP_NDPA BIT(20)
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+#define MT_WF_RFCR1 MT_WF_RMAC(0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
+
+#define MT_WF_RMAC_MIB_AIRTIME0 MT_WF_RMAC(0x0380)
+
+#define MT_WF_RMAC_MIB_TIME5 MT_WF_RMAC(0x03d8)
+#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
+
#define MT_WF_DMA_BASE 0x21800
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
@@ -183,6 +216,15 @@
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+#define MT_DMA_BN0RCFR0 MT_WF_DMA(0x070)
+#define MT_DMA_BN1RCFR0 MT_WF_DMA(0x0b0)
+#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
+#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
+#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
+#define MT_DMA_RCFR0_MCU_RX_BYPASS BIT(21)
+#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24)
+#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26)
+
#define MT_WTBL_BASE 0x30000
#define MT_WTBL_ENTRY_SIZE 256
@@ -198,6 +240,7 @@
#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030)
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
#define MT_WTBL_UPDATE_RXINFO_UPDATE BIT(11)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
#define MT_WTBL_UPDATE_BUSY BIT(31)
@@ -255,8 +298,18 @@
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR16(n) MT_WF_MIB(0x48 + ((n) << 9))
-#define MT_MIB_BUSY_MASK GENMASK(23, 0)
+#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9))
+#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR16(n) MT_WF_MIB(0x048 + ((n) << 9))
+#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR36(n) MT_WF_MIB(0x098 + ((n) << 9))
+#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9))
+#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+
+#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2))
#define MT_EFUSE_BASE 0x81070000
#define MT_EFUSE_BASE_CTRL 0x000
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 9d4426f6905f..a03e2d01fba7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -212,7 +212,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
void mt76x0_get_power_info(struct mt76x02_dev *dev,
struct ieee80211_channel *chan, s8 *tp)
{
- struct mt76x0_chan_map {
+ static const struct mt76x0_chan_map {
u8 chan;
u8 offset;
} chan_map[] = {
@@ -343,6 +343,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
version, fae);
mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ mt76_eeprom_override(&dev->mt76);
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index cf7fc307322b..388b54cded1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -150,31 +150,6 @@ static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
mt76_rmw(dev, MT_WMM_CTRL, 0x3ff, 0x201);
}
-static void mt76x0_reset_counters(struct mt76x02_dev *dev)
-{
- mt76_rr(dev, MT_RX_STAT_0);
- mt76_rr(dev, MT_RX_STAT_1);
- mt76_rr(dev, MT_RX_STAT_2);
- mt76_rr(dev, MT_TX_STA_0);
- mt76_rr(dev, MT_TX_STA_1);
- mt76_rr(dev, MT_TX_STA_2);
-}
-
-int mt76x0_mac_start(struct mt76x02_dev *dev)
-{
- mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
-
- if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
- return -ETIMEDOUT;
-
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
-
- return !mt76x02_wait_for_wpdma(&dev->mt76, 50) ? -ETIMEDOUT : 0;
-}
-EXPORT_SYMBOL_GPL(mt76x0_mac_start);
-
void mt76x0_mac_stop(struct mt76x02_dev *dev)
{
int i = 200, ok = 0;
@@ -244,8 +219,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
for (i = 0; i < 256; i++)
mt76x02_mac_wcid_setup(dev, i, 0, NULL);
- mt76x0_reset_counters(dev);
-
ret = mt76x0_eeprom_init(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index efb7ca93863d..b2ccf50512dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -13,19 +13,16 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
{
cancel_delayed_work_sync(&dev->cal_work);
mt76x02_pre_tbtt_enable(dev, false);
- if (mt76_is_mmio(dev))
+ if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mt76_set_channel(&dev->mt76);
mt76x0_phy_set_channel(dev, chandef);
- /* channel cycle counters read-and-clear */
- mt76_rr(dev, MT_CH_IDLE);
- mt76_rr(dev, MT_CH_BUSY);
-
+ mt76x02_mac_cc_reset(dev);
mt76x02_edcca_init(dev);
- if (mt76_is_mmio(dev)) {
+ if (mt76_is_mmio(&dev->mt76)) {
mt76x02_dfs_init_params(dev);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 26517e062bdb..6953f253a28a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -30,7 +30,7 @@
static inline bool is_mt7610e(struct mt76x02_dev *dev)
{
- if (!mt76_is_mmio(dev))
+ if (!mt76_is_mmio(&dev->mt76))
return false;
return mt76_chip(&dev->mt76) == 0x7610;
@@ -46,7 +46,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev);
int mt76x0_register_device(struct mt76x02_dev *dev);
void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
-int mt76x0_mac_start(struct mt76x02_dev *dev);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 7705e55aa3d1..e2974e0ae1fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -51,19 +51,6 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
mt76x0e_stop_hw(dev);
}
-static int
-mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- if (is_mt7630(dev))
- return -EOPNOTSUPP;
-
- return mt76x02_set_key(hw, cmd, vif, sta, key);
-}
-
static void
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
@@ -80,7 +67,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
- .set_key = mt76x0e_set_key,
+ .set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
.sw_scan_complete = mt76x02_sw_scan_complete,
@@ -94,6 +81,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x02_set_coverage_class,
.set_rts_threshold = mt76x02_set_rts_threshold,
+ .get_antenna = mt76_get_antenna,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
@@ -132,15 +120,6 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY |
- MT_CH_CCA_RC_EN |
- FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
-
err = mt76x0_register_device(dev);
if (err < 0)
return err;
@@ -155,7 +134,9 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
- .tx_aligned4_skbs = true,
+ .drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+ MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 711a352dfd5c..2ecd45f8af90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -102,7 +102,7 @@ out:
static int
mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
struct mt76_reg_pair pair = {
.reg = offset,
.value = val,
@@ -121,7 +121,7 @@ static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
int ret;
u32 val;
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
struct mt76_reg_pair pair = {
.reg = offset,
};
@@ -176,7 +176,7 @@ mt76x0_phy_rf_csr_wr_rp(struct mt76x02_dev *dev,
}
#define RF_RANDOM_WRITE(dev, tab) do { \
- if (mt76_is_mmio(dev)) \
+ if (mt76_is_mmio(&dev->mt76)) \
mt76x0_phy_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
else \
mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
@@ -744,7 +744,7 @@ mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
if (!tx_mode) {
data = mt76_rr(dev, MT_BBP(CORE, 1));
- if (is_mt7630(dev) && mt76_is_mmio(dev)) {
+ if (is_mt7630(dev) && mt76_is_mmio(&dev->mt76)) {
int offset;
/* 2.3 * 8192 or 1.5 * 8192 */
@@ -899,7 +899,6 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
}
mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val);
- msleep(350);
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
usleep_range(15000, 20000);
@@ -967,7 +966,7 @@ void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
break;
}
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
mt76x0_phy_bbp_set_bw(dev, chandef->width);
} else {
if (chandef->width == NL80211_CHAN_WIDTH_80 ||
@@ -1123,7 +1122,7 @@ static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev,
switch (reg) {
case MT_RF(0, 3):
- if (mt76_is_mmio(dev)) {
+ if (mt76_is_mmio(&dev->mt76)) {
if (is_mt7630(dev))
val = 0x70;
else
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 00a445d27599..65ba9fc6ea0b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -103,7 +103,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- ret = mt76x0_mac_start(dev);
+ ret = mt76x02u_mac_start(dev);
if (ret)
return ret;
@@ -138,6 +138,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.get_survey = mt76_get_survey,
.set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
+ .get_antenna = mt76_get_antenna,
};
static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
@@ -165,13 +166,6 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY);
-
return 0;
}
@@ -211,6 +205,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
@@ -226,7 +222,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
u32 mac_rev;
int ret;
- mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -278,6 +274,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
err:
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
+ mt76u_deinit(&dev->mt76);
ieee80211_free_hw(mdev->hw);
return ret;
@@ -297,6 +294,7 @@ static void mt76x0_disconnect(struct usb_interface *usb_intf)
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
+ mt76u_deinit(&dev->mt76);
ieee80211_free_hw(dev->mt76.hw);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index e858bba8c8ff..0ca0bbfe8769 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -81,6 +81,7 @@ struct mt76x02_dev {
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
spinlock_t txstatus_fifo_lock;
+ u32 tx_airtime;
struct sk_buff *rx_head;
@@ -92,8 +93,6 @@ struct mt76x02_dev {
const struct mt76x02_beacon_ops *beacon_ops;
- u32 aggr_stats[32];
-
struct sk_buff *beacons[8];
u8 beacon_data_mask;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 92305bd31aa1..4209209ac940 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -77,10 +77,7 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
if (vif_idx == i) {
force_update = !!dev->beacons[i] ^ !!skb;
-
- if (dev->beacons[i])
- dev_kfree_skb(dev->beacons[i]);
-
+ dev_kfree_skb(dev->beacons[i]);
dev->beacons[i] = skb;
__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
} else if (force_update && dev->beacons[i]) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index 0cb2a7b35fe5..68b40d63a46d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -19,7 +19,8 @@ mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
seq_puts(file, "\n");
seq_puts(file, "Count: ");
for (j = 0; j < 8; j++)
- seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+ seq_printf(file, "%8d | ",
+ dev->mt76.aggr_stats[i * 8 + j]);
seq_puts(file, "\n");
seq_puts(file, "--------");
for (j = 0; j < 8; j++)
@@ -143,6 +144,8 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
if (!dir)
return;
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt76_queues_read);
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index abacb4ea7179..4460548f346a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -7,6 +7,27 @@
#include "mt76x02.h"
#include "mt76x02_trace.h"
+void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
+{
+ int i;
+
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
+
static enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
@@ -462,8 +483,8 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
if (st->pktid & MT_PACKET_ID_HAS_RATE) {
- first_rate = st->rate & ~MT_RXWI_RATE_INDEX;
- first_rate |= st->pktid & MT_RXWI_RATE_INDEX;
+ first_rate = st->rate & ~MT_PKTID_RATE;
+ first_rate |= st->pktid & MT_PKTID_RATE;
mt76x02_mac_process_tx_rate(&rate[0], first_rate,
dev->mt76.chandef.chan->band);
@@ -516,10 +537,20 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct ieee80211_tx_status status = {
.info = &info
};
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
struct mt76_wcid *wcid = NULL;
struct mt76x02_sta *msta = NULL;
struct mt76_dev *mdev = &dev->mt76;
struct sk_buff_head list;
+ u32 duration = 0;
+ u8 cur_pktid;
+ u32 ac = 0;
+ int len = 0;
if (stat->pktid == MT_PACKET_ID_NO_ACK)
return;
@@ -549,10 +580,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
mt76_tx_status_unlock(mdev, &list);
- rcu_read_unlock();
- return;
+ goto out;
}
+
if (msta && stat->aggr && !status.skb) {
u32 stat_val, stat_cache;
@@ -565,10 +596,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
stat->wcid == msta->status.wcid && msta->n_frames < 32) {
msta->n_frames++;
mt76_tx_status_unlock(mdev, &list);
- rcu_read_unlock();
- return;
+ goto out;
}
+ cur_pktid = msta->status.pktid;
mt76x02_mac_fill_tx_status(dev, msta, status.info,
&msta->status, msta->n_frames);
@@ -576,16 +607,39 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
msta->n_frames = 1;
*update = 0;
} else {
+ cur_pktid = stat->pktid;
mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
*update = 1;
}
- if (status.skb)
+ if (status.skb) {
+ info = *status.info;
+ len = status.skb->len;
+ ac = skb_get_queue_mapping(status.skb);
mt76_tx_status_skb_done(mdev, status.skb, &list);
+ } else if (msta) {
+ len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen);
+ ac = FIELD_GET(MT_PKTID_AC, cur_pktid);
+ }
+
mt76_tx_status_unlock(mdev, &list);
if (!status.skb)
ieee80211_tx_status_ext(mt76_hw(dev), &status);
+
+ if (!len)
+ goto out;
+
+ duration = mt76_calc_tx_airtime(&dev->mt76, &info, len);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ dev->tx_airtime += duration;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+
+ if (msta)
+ ieee80211_sta_register_airtime(status.sta, ac_to_tid[ac], duration, 0);
+
+out:
rcu_read_unlock();
}
@@ -768,6 +822,21 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
status->aggr = true;
+ if (rxinfo & MT_RXINFO_AMPDU) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+ status->ampdu_ref = dev->mt76.ampdu_ref;
+
+ /*
+ * When receiving an A-MPDU subframe and RSSI info is not valid,
+ * we can assume that more subframes belonging to the same A-MPDU
+ * are coming. The last one will have valid RSSI info
+ */
+ if (rxinfo & MT_RXINFO_RSSI) {
+ if (!++dev->mt76.ampdu_ref)
+ dev->mt76.ampdu_ref++;
+ }
+ }
+
if (WARN_ON_ONCE(len > skb->len))
return -EINVAL;
@@ -948,16 +1017,13 @@ void mt76x02_update_channel(struct mt76_dev *mdev)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
- u32 active, busy;
- state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
-
- busy = mt76_rr(dev, MT_CH_BUSY);
- active = busy + mt76_rr(dev, MT_CH_IDLE);
+ state = mdev->chan_state;
+ state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
spin_lock_bh(&dev->mt76.cc_lock);
- state->cc_busy += busy;
- state->cc_active += active;
+ state->cc_tx += dev->tx_airtime;
+ dev->tx_airtime = 0;
spin_unlock_bh(&dev->mt76.cc_lock);
}
EXPORT_SYMBOL_GPL(mt76x02_update_channel);
@@ -1094,12 +1160,12 @@ void mt76x02_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
- mt76x02_update_channel(&dev->mt76);
+ mt76_update_survey(&dev->mt76);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
- dev->aggr_stats[idx++] += val & 0xffff;
- dev->aggr_stats[idx++] += val >> 16;
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
}
if (!dev->mt76.beacon_mask)
@@ -1116,6 +1182,25 @@ void mt76x02_mac_work(struct work_struct *work)
MT_MAC_WORK_INTERVAL);
}
+void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
+{
+ dev->mt76.survey_time = ktime_get_boottime();
+
+ mt76_wr(dev, MT_CH_TIME_CFG,
+ MT_CH_TIME_CFG_TIMER_EN |
+ MT_CH_TIME_CFG_TX_AS_BUSY |
+ MT_CH_TIME_CFG_RX_AS_BUSY |
+ MT_CH_TIME_CFG_NAV_AS_BUSY |
+ MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ MT_CH_CCA_RC_EN |
+ FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+ /* channel cycle counters read-and-clear */
+ mt76_rr(dev, MT_CH_BUSY);
+ mt76_rr(dev, MT_CH_IDLE);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset);
+
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
{
idx &= 7;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index efa4ef945e35..7d946aa77182 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -23,11 +23,16 @@ struct mt76x02_tx_status {
#define MT_VIF_WCID(_n) (254 - ((_n) & 7))
#define MT_MAX_VIFS 8
+#define MT_PKTID_RATE GENMASK(4, 0)
+#define MT_PKTID_AC GENMASK(6, 5)
+
struct mt76x02_vif {
struct mt76_wcid group_wcid; /* must be first */
u8 idx;
};
+DECLARE_EWMA(pktlen, 8, 8);
+
struct mt76x02_sta {
struct mt76_wcid wcid; /* must be first */
@@ -35,6 +40,7 @@ struct mt76x02_sta {
struct mt76x02_tx_status status;
int n_frames;
+ struct ewma_pktlen pktlen;
};
#define MT_RXINFO_BA BIT(0)
@@ -161,6 +167,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
return false;
}
+void mt76x02_mac_reset_counters(struct mt76x02_dev *dev);
void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable);
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
@@ -192,6 +199,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
+void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 4be7a24097cc..6274b6a24b07 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -114,7 +114,7 @@ int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param)
.id = cpu_to_le32(type),
.value = cpu_to_le32(param),
};
- bool is_mt76x2e = mt76_is_mmio(dev) && is_mt76x2(dev);
+ bool is_mt76x2e = mt76_is_mmio(&dev->mt76) && is_mt76x2(dev);
int ret;
if (is_mt76x2e)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index dc773070481d..4e2371c926d8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -343,6 +343,7 @@ EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
void mt76x02_mac_start(struct mt76x02_dev *dev)
{
+ mt76x02_mac_reset_counters(dev);
mt76x02_dma_enable(dev);
mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
mt76_wr(dev, MT_MAC_SYS_CTRL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index f27aade34c1e..13825f642087 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -158,7 +158,9 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
/* encode packet rate for no-skb packet id to fix up status reporting */
if (pid == MT_PACKET_ID_NO_SKB)
pid = MT_PACKET_ID_HAS_RATE |
- (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+ (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX) |
+ FIELD_PREP(MT_PKTID_AC,
+ skb_get_queue_mapping(tx_info->skb));
txwi->pktid = pid;
@@ -171,6 +173,12 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
tx_info->info |= MT_TXD_INFO_WIV;
+ if (sta) {
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+
+ ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index 98329debc033..a57dcc8820aa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -8,6 +8,7 @@
#include "mt76x02.h"
+int mt76x02u_mac_start(struct mt76x02_dev *dev);
void mt76x02u_init_mcu(struct mt76_dev *dev);
void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 78dfc1e7f27b..d03d3c8e296c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -23,6 +23,27 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
+int mt76x02u_mac_start(struct mt76x02_dev *dev)
+{
+ mt76x02_mac_reset_counters(dev);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
+ return -ETIMEDOUT;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
+
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
struct sk_buff *iter, *last = skb;
@@ -83,7 +104,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
/* encode packet rate for no-skb packet id to fix up status reporting */
if (pid == MT_PACKET_ID_NO_SKB)
pid = MT_PACKET_ID_HAS_RATE |
- (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+ (le16_to_cpu(txwi->rate) & MT_PKTID_RATE) |
+ FIELD_PREP(MT_PKTID_AC,
+ skb_get_queue_mapping(tx_info->skb));
txwi->pktid = pid;
@@ -97,6 +120,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
flags |= MT_TXD_INFO_WIV;
+ if (sta) {
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+
+ ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
+ }
+
return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index aec73a0295e8..0960fc56b672 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -153,15 +153,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
wiphy->iface_combinations = mt76x02u_if_comb;
@@ -190,7 +182,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
@@ -264,6 +255,7 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.hw_key_idx = -1;
mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
mt76x02_mac_wcid_set_drop(dev, idx, false);
+ ewma_pktlen_init(&msta->pktlen);
if (vif->type == NL80211_IFTYPE_AP)
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
@@ -365,12 +357,14 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
+ int ret = 0;
if (!txq)
return -EINVAL;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
@@ -393,15 +387,16 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
+ mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
@@ -443,7 +438,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
* data registers and sent via HW beacons engine, they require to
* be already encrypted.
*/
- if (mt76_is_usb(dev) &&
+ if (mt76_is_usb(&dev->mt76) &&
vif->type == NL80211_IFTYPE_AP &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
@@ -624,7 +619,7 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
- if (mt76_is_mmio(dev))
+ if (mt76_is_mmio(mdev))
mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
index a1583021e1e9..d5c3d26b94c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -12,7 +12,6 @@ struct mt76x02_dev;
struct mt76x2_sta;
struct mt76x02_vif;
-int mt76x2_mac_start(struct mt76x02_dev *dev);
void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
static inline void mt76x2_mac_resume(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
index c876bac43751..f9d37c6cf1f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -24,7 +24,6 @@ void mt76x2u_cleanup(struct mt76x02_dev *dev);
void mt76x2u_stop_hw(struct mt76x02_dev *dev);
int mt76x2u_mac_reset(struct mt76x02_dev *dev);
-int mt76x2u_mac_start(struct mt76x02_dev *dev);
int mt76x2u_mac_stop(struct mt76x02_dev *dev);
int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index cf611d1b817c..53ca0cedf026 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -21,7 +21,9 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
- .tx_aligned4_skbs = true,
+ .drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+ MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 343127f2d621..33fcec9179b2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -7,6 +7,7 @@
#include "mt76x2.h"
#include "eeprom.h"
#include "mcu.h"
+#include "../mt76x02_mac.h"
static void
mt76x2_mac_pbf_init(struct mt76x02_dev *dev)
@@ -132,36 +133,11 @@ int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
for (i = 0; i < 16; i++)
mt76_rr(dev, MT_TX_STAT_FIFO);
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY |
- MT_CH_CCA_RC_EN |
- FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
-
mt76x02_set_tx_ackto(dev);
return 0;
}
-int mt76x2_mac_start(struct mt76x02_dev *dev)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- mt76_rr(dev, MT_TX_AGG_CNT(i));
-
- for (i = 0; i < 16; i++)
- mt76_rr(dev, MT_TX_STAT_FIFO);
-
- memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
- mt76x02_mac_start(dev);
-
- return 0;
-}
-
static void
mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
{
@@ -264,9 +240,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
- ret = mt76x2_mac_start(dev);
- if (ret)
- return ret;
+ mt76x02_mac_start(dev);
ret = mt76x2_mcu_init(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 4971685aafe8..cfe8905ce73f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -4,6 +4,7 @@
*/
#include "mt76x2.h"
+#include "../mt76x02_mac.h"
static int
mt76x2_start(struct ieee80211_hw *hw)
@@ -11,10 +12,7 @@ mt76x2_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- ret = mt76x2_mac_start(dev);
- if (ret)
- return ret;
-
+ mt76x02_mac_start(dev);
ret = mt76x2_phy_start(dev);
if (ret)
return ret;
@@ -54,10 +52,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
- /* channel cycle counters read-and-clear */
- mt76_rr(dev, MT_CH_IDLE);
- mt76_rr(dev, MT_CH_BUSY);
-
+ mt76x02_mac_cc_reset(dev);
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
@@ -140,19 +135,6 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
return 0;
}
-static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
- u32 *rx_ant)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
- *tx_ant = dev->mt76.antenna_mask;
- *rx_ant = dev->mt76.antenna_mask;
- mutex_unlock(&dev->mt76.mutex);
-
- return 0;
-}
-
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x02_tx,
.start = mt76x2_start,
@@ -177,7 +159,7 @@ const struct ieee80211_ops mt76x2_ops = {
.get_survey = mt76_get_survey,
.set_tim = mt76_set_tim,
.set_antenna = mt76x2_set_antenna,
- .get_antenna = mt76x2_get_antenna,
+ .get_antenna = mt76_get_antenna,
.set_rts_threshold = mt76x02_set_rts_threshold,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index da5e0f9a8bae..b64ad816cc25 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -25,6 +25,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
@@ -39,7 +41,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
struct mt76_dev *mdev;
int err;
- mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops,
+ mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -71,6 +73,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
err:
ieee80211_free_hw(mt76_hw(dev));
+ mt76u_deinit(&dev->mt76);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
@@ -86,6 +89,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
set_bit(MT76_REMOVED, &dev->mt76.state);
ieee80211_unregister_hw(hw);
mt76x2u_cleanup(dev);
+ mt76u_deinit(&dev->mt76);
ieee80211_free_hw(hw);
usb_set_intfdata(intf, NULL);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index e305b374c904..2910068f4e79 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -184,13 +184,6 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_phy_set_rxpath(dev);
mt76x02_phy_set_txdac(dev);
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY);
-
return mt76x2u_mac_stop(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index e7fea3a6f1fd..59cbe826188a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -6,16 +6,6 @@
#include "mt76x2u.h"
#include "eeprom.h"
-static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev)
-{
- mt76_rr(dev, MT_RX_STAT_0);
- mt76_rr(dev, MT_RX_STAT_1);
- mt76_rr(dev, MT_RX_STAT_2);
- mt76_rr(dev, MT_TX_STA_0);
- mt76_rr(dev, MT_TX_STA_1);
- mt76_rr(dev, MT_TX_STA_2);
-}
-
static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
{
s8 offset = 0;
@@ -102,23 +92,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
return 0;
}
-int mt76x2u_mac_start(struct mt76x02_dev *dev)
-{
- mt76x2u_mac_reset_counters(dev);
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
- mt76x02_wait_for_wpdma(&dev->mt76, 1000);
- usleep_range(50, 100);
-
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
-
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX |
- MT_MAC_SYS_CTRL_ENABLE_RX);
-
- return 0;
-}
-
int mt76x2u_mac_stop(struct mt76x02_dev *dev)
{
int i, count = 0, val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index eb73cb856c81..9e97204841f5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -4,13 +4,14 @@
*/
#include "mt76x2u.h"
+#include "../mt76x02_usb.h"
static int mt76x2u_start(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
int ret;
- ret = mt76x2u_mac_start(dev);
+ ret = mt76x02u_mac_start(dev);
if (ret)
return ret;
@@ -48,10 +49,7 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
err = mt76x2u_phy_set_channel(dev, chandef);
- /* channel cycle counters read-and-clear */
- mt76_rr(dev, MT_CH_IDLE);
- mt76_rr(dev, MT_CH_BUSY);
-
+ mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
clear_bit(MT76_RESET, &dev->mt76.state);
@@ -121,4 +119,5 @@ const struct ieee80211_ops mt76x2u_ops = {
.get_survey = mt76_get_survey,
.set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
+ .get_antenna = mt76_get_antenna,
};
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index c22a05f06fd0..7ee91d946882 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
- struct mt76_txq *mtxq, bool *empty)
+ struct mt76_txq *mtxq)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
@@ -392,16 +392,12 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
bool probe;
int idx;
- if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
- *empty = true;
+ if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
return 0;
- }
skb = mt76_txq_dequeue(dev, mtxq, false);
- if (!skb) {
- *empty = true;
+ if (!skb)
return 0;
- }
info = IEEE80211_SKB_CB(skb);
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
@@ -431,10 +427,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
return -EBUSY;
skb = mt76_txq_dequeue(dev, mtxq, false);
- if (!skb) {
- *empty = true;
+ if (!skb)
break;
- }
info = IEEE80211_SKB_CB(skb);
cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
@@ -481,8 +475,6 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
spin_lock_bh(&hwq->lock);
while (1) {
- bool empty = false;
-
if (sq->swq_queued >= 4)
break;
@@ -513,10 +505,9 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
spin_lock_bh(&hwq->lock);
}
- ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
- if (skb_queue_empty(&mtxq->retry_q))
- empty = true;
- ieee80211_return_txq(dev->hw, txq, !empty);
+ ret += mt76_txq_send_burst(dev, sq, mtxq);
+ ieee80211_return_txq(dev->hw, txq,
+ !skb_queue_empty(&mtxq->retry_q));
}
spin_unlock_bh(&hwq->lock);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 20c6fe510e9d..d6d47081e281 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -15,15 +15,17 @@ static bool disable_usb_sg;
module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
-/* should be called with usb_ctrl_mtx locked */
static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
int i, ret;
+ lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
+
pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
: usb_sndctrlpipe(udev, 0);
for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
@@ -60,7 +62,6 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-/* should be called with usb_ctrl_mtx locked */
static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
@@ -103,7 +104,6 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-/* should be called with usb_ctrl_mtx locked */
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
@@ -235,7 +235,8 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
static bool mt76u_check_sg(struct mt76_dev *dev)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
(udev->bus->no_sg_constraint ||
@@ -370,7 +371,8 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
struct urb *urb, usb_complete_t complete_fn,
void *context)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
if (dir == USB_DIR_IN)
@@ -695,10 +697,7 @@ static void mt76u_tx_tasklet(unsigned long data)
mt76_txq_schedule(dev, i);
if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
- ieee80211_queue_delayed_work(dev->hw,
- &dev->usb.stat_work,
- msecs_to_jiffies(10));
-
+ queue_work(dev->usb.stat_wq, &dev->usb.stat_work);
if (wake)
ieee80211_wake_queue(dev->hw, i);
}
@@ -711,7 +710,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
u8 update = 1;
u16 count = 0;
- usb = container_of(work, struct mt76_usb, stat_work.work);
+ usb = container_of(work, struct mt76_usb, stat_work);
dev = container_of(usb, struct mt76_dev, usb);
while (true) {
@@ -724,8 +723,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
- ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
- msecs_to_jiffies(10));
+ queue_work(usb->stat_wq, &usb->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->state);
}
@@ -906,7 +904,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
}
}
- cancel_delayed_work_sync(&dev->usb.stat_work);
+ cancel_work_sync(&dev->usb.stat_work);
clear_bit(MT76_READING_STATS, &dev->state);
mt76_tx_status_check(dev, NULL, true);
@@ -952,24 +950,40 @@ int mt76u_init(struct mt76_dev *dev,
.rd_rp = mt76u_rd_rp,
.type = MT76_BUS_USB,
};
+ struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
- INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+ INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+ usb->stat_wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
+ if (!usb->stat_wq)
+ return -ENOMEM;
+
mutex_init(&usb->mcu.mutex);
mutex_init(&usb->usb_ctrl_mtx);
dev->bus = &mt76u_ops;
dev->queue_ops = &usb_queue_ops;
+ dev_set_drvdata(&udev->dev, dev);
+
usb->sg_en = mt76u_check_sg(dev);
return mt76u_set_endpoints(intf, usb);
}
EXPORT_SYMBOL_GPL(mt76u_init);
+void mt76u_deinit(struct mt76_dev *dev)
+{
+ if (dev->usb.stat_wq) {
+ destroy_workqueue(dev->usb.stat_wq);
+ dev->usb.stat_wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76u_deinit);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 5e549831370c..300242bce799 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -27,7 +27,7 @@ mt76_reg_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
static int
mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 72e608cc53af..671d8897ae76 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -372,8 +372,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
msta->agg_ssn[tid] = ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 06f5702ab4bd..d863ab4a66c9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -213,7 +213,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
do {
val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
- if (val && ~val)
+ if (val && val != 0xff)
break;
} while (--i);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 7cea08f71838..87d048df09d1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -12,6 +12,16 @@
#define QTNF_MAX_MAC 3
+#define HBM_FRAME_META_MAGIC_PATTERN_S 0xAB
+#define HBM_FRAME_META_MAGIC_PATTERN_E 0xBA
+
+struct qtnf_frame_meta_info {
+ u8 magic_s;
+ u8 ifidx;
+ u8 macid;
+ u8 magic_e;
+} __packed;
+
enum qtnf_fw_state {
QTNF_FW_STATE_DETACHED,
QTNF_FW_STATE_BOOT_DONE,
@@ -31,8 +41,10 @@ struct qtnf_bus_ops {
int (*control_tx)(struct qtnf_bus *, struct sk_buff *);
/* data xfer methods */
- int (*data_tx)(struct qtnf_bus *, struct sk_buff *);
+ int (*data_tx)(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid);
void (*data_tx_timeout)(struct qtnf_bus *, struct net_device *);
+ void (*data_tx_use_meta_set)(struct qtnf_bus *bus, bool use_meta);
void (*data_rx_start)(struct qtnf_bus *);
void (*data_rx_stop)(struct qtnf_bus *);
};
@@ -42,7 +54,7 @@ struct qtnf_bus {
enum qtnf_fw_state fw_state;
u32 chip;
u32 chiprev;
- const struct qtnf_bus_ops *bus_ops;
+ struct qtnf_bus_ops *bus_ops;
struct qtnf_wmac *mac[QTNF_MAX_MAC];
struct qtnf_qlink_transport trans;
struct qtnf_hw_info hw_info;
@@ -54,6 +66,8 @@ struct qtnf_bus {
struct work_struct event_work;
struct mutex bus_lock; /* lock during command/event processing */
struct dentry *dbg_dir;
+ struct notifier_block netdev_nb;
+ u8 hw_id[ETH_ALEN];
/* bus private data */
char bus_priv[0] __aligned(sizeof(void *));
};
@@ -99,9 +113,10 @@ static inline void qtnf_bus_stop(struct qtnf_bus *bus)
bus->bus_ops->stop(bus);
}
-static inline int qtnf_bus_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+static inline int qtnf_bus_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
{
- return bus->bus_ops->data_tx(bus, skb);
+ return bus->bus_ops->data_tx(bus, skb, macid, vifid);
}
static inline void
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index d90016125dfc..59d089e092f9 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -238,22 +238,29 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
pr_err("VIF%u.%u: FW reported bad MAC: %pM\n",
mac->macid, vif->vifid, vif->mac_addr);
ret = -EINVAL;
- goto err_mac;
+ goto error_del_vif;
}
ret = qtnf_core_net_attach(mac, vif, name, name_assign_t);
if (ret) {
pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
vif->vifid);
- goto err_net;
+ goto error_del_vif;
+ }
+
+ if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
+ if (ret) {
+ unregister_netdevice(vif->netdev);
+ vif->netdev = NULL;
+ goto error_del_vif;
+ }
}
vif->wdev.netdev = vif->netdev;
return &vif->wdev;
-err_net:
- vif->netdev = NULL;
-err_mac:
+error_del_vif:
qtnf_cmd_send_del_intf(vif);
err_cmd:
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
@@ -897,6 +904,45 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
+ int ret;
+
+ ret = qtnf_cmd_get_tx_power(vif, dbm);
+ if (ret)
+ pr_err("MAC%u: failed to get Tx power\n", vif->mac->macid);
+
+ return ret;
+}
+
+static int qtnf_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ struct qtnf_vif *vif;
+ int ret;
+
+ if (wdev) {
+ vif = qtnf_netdev_get_priv(wdev->netdev);
+ } else {
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n",
+ mac->macid);
+ return -EFAULT;
+ }
+ }
+
+ ret = qtnf_cmd_set_tx_power(vif, type, mbm);
+ if (ret)
+ pr_err("MAC%u: failed to set Tx power\n", vif->mac->macid);
+
+ return ret;
+}
+
#ifdef CONFIG_PM
static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
{
@@ -991,6 +1037,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
.set_power_mgmt = qtnf_set_power_mgmt,
+ .get_tx_power = qtnf_get_tx_power,
+ .set_tx_power = qtnf_set_tx_power,
#ifdef CONFIG_PM
.suspend = qtnf_suspend,
.resume = qtnf_resume,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index dc0c7244b60e..548f6ff6d0f2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -83,6 +83,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
struct qlink_cmd *cmd;
struct qlink_resp *resp = NULL;
struct sk_buff *resp_skb = NULL;
+ int resp_res = 0;
u16 cmd_id;
u8 mac_id;
u8 vif_id;
@@ -113,6 +114,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
}
resp = (struct qlink_resp *)resp_skb->data;
+ resp_res = le16_to_cpu(resp->result);
ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
const_resp_size);
if (ret)
@@ -128,8 +130,8 @@ out:
else
consume_skb(resp_skb);
- if (!ret && resp)
- return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
+ if (!ret)
+ return qtnf_cmd_resp_result_decode(resp_res);
pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
mac_id, vif_id, cmd_id, ret);
@@ -212,6 +214,20 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
return true;
}
+static void qtnf_cmd_tlv_ie_ext_add(struct sk_buff *cmd_skb, u8 eid_ext,
+ const void *buf, size_t len)
+{
+ struct qlink_tlv_ext_ie *tlv;
+
+ tlv = (struct qlink_tlv_ext_ie *)skb_put(cmd_skb, sizeof(*tlv) + len);
+ tlv->hdr.type = cpu_to_le16(WLAN_EID_EXTENSION);
+ tlv->hdr.len = cpu_to_le16(sizeof(*tlv) + len - sizeof(tlv->hdr));
+ tlv->eid_ext = eid_ext;
+
+ if (len && buf)
+ memcpy(tlv->ie_data, buf, len);
+}
+
int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s)
{
@@ -307,6 +323,10 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap));
}
+ if (s->he_cap)
+ qtnf_cmd_tlv_ie_ext_add(cmd_skb, WLAN_EID_EXT_HE_CAPABILITY,
+ s->he_cap, sizeof(*s->he_cap));
+
if (s->acl) {
size_t acl_size = struct_size(s->acl, mac_addrs,
s->acl->n_acl_entries);
@@ -1240,10 +1260,7 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info = &mac->macinfo;
mac_info->bands_cap = resp_info->bands_cap;
- memcpy(&mac_info->dev_mac, &resp_info->dev_mac,
- sizeof(mac_info->dev_mac));
-
- ether_addr_copy(mac->macaddr, mac_info->dev_mac);
+ ether_addr_copy(mac->macaddr, resp_info->dev_mac);
vif = qtnf_mac_get_base_vif(mac);
if (vif)
@@ -1293,6 +1310,69 @@ static void qtnf_cmd_resp_band_fill_vhtcap(const u8 *info,
memcpy(&bcap->vht_mcs, &vht_cap->supp_mcs, sizeof(bcap->vht_mcs));
}
+static void qtnf_cmd_conv_iftype(struct ieee80211_sband_iftype_data
+ *iftype_data,
+ const struct qlink_sband_iftype_data
+ *qlink_data)
+{
+ iftype_data->types_mask = le16_to_cpu(qlink_data->types_mask);
+
+ iftype_data->he_cap.has_he = true;
+ memcpy(&iftype_data->he_cap.he_cap_elem, &qlink_data->he_cap_elem,
+ sizeof(qlink_data->he_cap_elem));
+ memcpy(iftype_data->he_cap.ppe_thres, qlink_data->ppe_thres,
+ ARRAY_SIZE(qlink_data->ppe_thres));
+
+ iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 =
+ qlink_data->he_mcs_nss_supp.rx_mcs_80;
+ iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 =
+ qlink_data->he_mcs_nss_supp.tx_mcs_80;
+ iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 =
+ qlink_data->he_mcs_nss_supp.rx_mcs_160;
+ iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 =
+ qlink_data->he_mcs_nss_supp.tx_mcs_160;
+ iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80p80 =
+ qlink_data->he_mcs_nss_supp.rx_mcs_80p80;
+ iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80p80 =
+ qlink_data->he_mcs_nss_supp.tx_mcs_80p80;
+}
+
+static int qtnf_cmd_band_fill_iftype(const u8 *data,
+ struct ieee80211_supported_band *band)
+{
+ unsigned int i;
+ struct ieee80211_sband_iftype_data *iftype_data;
+ const struct qlink_tlv_iftype_data *tlv =
+ (const struct qlink_tlv_iftype_data *)data;
+ size_t payload_len = tlv->n_iftype_data * sizeof(*tlv->iftype_data) +
+ sizeof(*tlv) -
+ sizeof(struct qlink_tlv_hdr);
+
+ if (tlv->hdr.len != cpu_to_le16(payload_len)) {
+ pr_err("bad IFTYPE_DATA TLV len %u\n", tlv->hdr.len);
+ return -EINVAL;
+ }
+
+ kfree(band->iftype_data);
+ band->iftype_data = NULL;
+ band->n_iftype_data = tlv->n_iftype_data;
+ if (band->n_iftype_data == 0)
+ return 0;
+
+ iftype_data = kcalloc(band->n_iftype_data, sizeof(*iftype_data),
+ GFP_KERNEL);
+ if (!iftype_data) {
+ band->n_iftype_data = 0;
+ return -ENOMEM;
+ }
+ band->iftype_data = iftype_data;
+
+ for (i = 0; i < band->n_iftype_data; i++)
+ qtnf_cmd_conv_iftype(iftype_data++, &tlv->iftype_data[i]);
+
+ return 0;
+}
+
static int
qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
struct qlink_resp_band_info_get *resp,
@@ -1306,6 +1386,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
struct ieee80211_channel *chan;
unsigned int chidx = 0;
u32 qflags;
+ int ret = -EINVAL;
memset(&band->ht_cap, 0, sizeof(band->ht_cap));
memset(&band->vht_cap, 0, sizeof(band->vht_cap));
@@ -1443,6 +1524,12 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
qtnf_cmd_resp_band_fill_vhtcap(tlv->val,
&band->vht_cap);
break;
+ case QTN_TLV_ID_IFTYPE_DATA:
+ ret = qtnf_cmd_band_fill_iftype((const uint8_t *)tlv,
+ band);
+ if (ret)
+ goto error_ret;
+ break;
default:
pr_warn("unknown TLV type: %#x\n", tlv_type);
break;
@@ -1470,7 +1557,7 @@ error_ret:
band->channels = NULL;
band->n_channels = 0;
- return -EINVAL;
+ return ret;
}
static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
@@ -2641,6 +2728,71 @@ out:
return ret;
}
+int qtnf_cmd_get_tx_power(const struct qtnf_vif *vif, int *dbm)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ const struct qlink_resp_txpwr *resp;
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_cmd_txpwr *cmd;
+ struct sk_buff *cmd_skb;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_TXPWR, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_txpwr *)cmd_skb->data;
+ cmd->op_type = QLINK_TXPWR_GET;
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), NULL);
+ if (ret)
+ goto out;
+
+ resp = (const struct qlink_resp_txpwr *)resp_skb->data;
+ *dbm = MBM_TO_DBM(le32_to_cpu(resp->txpwr));
+
+out:
+ qtnf_bus_unlock(bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ const struct qlink_resp_txpwr *resp;
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_cmd_txpwr *cmd;
+ struct sk_buff *cmd_skb;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_TXPWR, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_txpwr *)cmd_skb->data;
+ cmd->op_type = QLINK_TXPWR_SET;
+ cmd->txpwr_setting = type;
+ cmd->txpwr = cpu_to_le32(mbm);
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), NULL);
+
+ qtnf_bus_unlock(bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl)
{
@@ -2689,3 +2841,35 @@ out:
qtnf_bus_unlock(bus);
return ret;
}
+
+int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_ndev_changeupper *cmd;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_NDEV_EVENT,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ pr_debug("[VIF%u.%u] set broadcast domain to %d\n",
+ vif->mac->macid, vif->vifid, br_domain);
+
+ cmd = (struct qlink_cmd_ndev_changeupper *)cmd_skb->data;
+ cmd->nehdr.event = cpu_to_le16(QLINK_NDEV_EVENT_CHANGEUPPER);
+ cmd->upper_type = QLINK_NDEV_UPPER_TYPE_BRIDGE;
+ cmd->br_domain = cpu_to_le32(br_domain);
+
+ qtnf_bus_lock(bus);
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ qtnf_bus_unlock(bus);
+
+ if (ret)
+ pr_err("[VIF%u.%u] failed to set broadcast domain\n",
+ vif->mac->macid, vif->vifid);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 88d7a3cd90d2..761755bf9ede 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -70,7 +70,11 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
const struct cfg80211_acl_data *params);
int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_get_tx_power(const struct qtnf_vif *vif, int *dbm);
+int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
+ enum nl80211_tx_power_setting type, int mbm);
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl);
+int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 8d699cc03d26..5fb598389487 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -12,6 +12,7 @@
#include "cfg80211.h"
#include "event.h"
#include "util.h"
+#include "switchdev.h"
#define QTNF_DMP_MAX_LEN 48
#define QTNF_PRIMARY_VIF_IDX 0
@@ -22,13 +23,6 @@ MODULE_PARM_DESC(slave_radar, "set 0 to disable radar detection in slave mode");
static struct dentry *qtnf_debugfs_dir;
-struct qtnf_frame_meta_info {
- u8 magic_s;
- u8 ifidx;
- u8 macid;
- u8 magic_e;
-} __packed;
-
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
@@ -67,6 +61,14 @@ static int qtnf_netdev_close(struct net_device *ndev)
return 0;
}
+static void qtnf_packet_send_hi_pri(struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
+
+ skb_queue_tail(&vif->high_pri_tx_queue, skb);
+ queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
+}
+
/* Netdev handler for data transmission.
*/
static netdev_tx_t
@@ -107,7 +109,13 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* tx path is enabled: reset vif timeout */
vif->cons_tx_timeout_cnt = 0;
- return qtnf_bus_data_tx(mac->bus, skb);
+ if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
+ qtnf_packet_send_hi_pri(skb);
+ qtnf_update_tx_stats(ndev, skb);
+ return NETDEV_TX_OK;
+ }
+
+ return qtnf_bus_data_tx(mac->bus, skb, mac->macid, vif->vifid);
}
/* Netdev handler for getting stats.
@@ -197,6 +205,21 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
+static int qtnf_netdev_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ const struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ const struct qtnf_bus *bus = vif->mac->bus;
+
+ if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bus->hw_id);
+ memcpy(&ppid->id, bus->hw_id, ppid->id_len);
+
+ return 0;
+}
+
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
.ndo_open = qtnf_netdev_open,
@@ -205,6 +228,7 @@ const struct net_device_ops qtnf_netdev_ops = {
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
.ndo_get_stats64 = qtnf_netdev_get_stats64,
.ndo_set_mac_address = qtnf_netdev_set_mac_address,
+ .ndo_get_port_parent_id = qtnf_netdev_port_parent_id,
};
static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -451,10 +475,8 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev = alloc_netdev_mqs(sizeof(struct qtnf_vif *), name,
name_assign_type, ether_setup, 1, 1);
- if (!dev) {
- vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ if (!dev)
return -ENOMEM;
- }
vif->netdev = dev;
@@ -469,6 +491,9 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->tx_queue_len = 100;
dev->ethtool_ops = &qtnf_ethtool_ops;
+ if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)
+ dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info);
+
qdev_vif = netdev_priv(dev);
*((void **)qdev_vif) = vif;
@@ -477,7 +502,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
ret = register_netdevice(dev);
if (ret) {
free_netdev(dev);
- vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ vif->netdev = NULL;
}
return ret;
@@ -518,6 +543,9 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
if (!wiphy->bands[band])
continue;
+ kfree(wiphy->bands[band]->iftype_data);
+ wiphy->bands[band]->n_iftype_data = 0;
+
kfree(wiphy->bands[band]->channels);
wiphy->bands[band]->n_channels = 0;
@@ -557,6 +585,10 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
+ /* Use MAC address of the first active radio as a unique device ID */
+ if (is_zero_ether_addr(mac->bus->hw_id))
+ ether_addr_copy(mac->bus->hw_id, mac->macaddr);
+
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not ready\n", macid);
@@ -574,19 +606,19 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
ret = qtnf_cmd_send_get_phy_params(mac);
if (ret) {
pr_err("MAC%u: failed to get PHY settings\n", macid);
- goto error;
+ goto error_del_vif;
}
ret = qtnf_mac_init_bands(mac);
if (ret) {
pr_err("MAC%u: failed to init bands\n", macid);
- goto error;
+ goto error_del_vif;
}
ret = qtnf_wiphy_register(&bus->hw_info, mac);
if (ret) {
pr_err("MAC%u: wiphy registration failed\n", macid);
- goto error;
+ goto error_del_vif;
}
mac->wiphy_registered = 1;
@@ -598,20 +630,75 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
if (ret) {
pr_err("MAC%u: failed to attach netdev\n", macid);
- vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- vif->netdev = NULL;
- goto error;
+ goto error_del_vif;
+ }
+
+ if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
+ if (ret)
+ goto error;
}
pr_debug("MAC%u initialized\n", macid);
return 0;
+error_del_vif:
+ qtnf_cmd_send_del_intf(vif);
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
error:
qtnf_core_mac_detach(bus, macid);
return ret;
}
+bool qtnf_netdev_is_qtn(const struct net_device *ndev)
+{
+ return ndev->netdev_ops == &qtnf_netdev_ops;
+}
+
+static int qtnf_core_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ const struct netdev_notifier_changeupper_info *info;
+ struct qtnf_vif *vif;
+ int br_domain;
+ int ret = 0;
+
+ if (!qtnf_netdev_is_qtn(ndev))
+ return NOTIFY_DONE;
+
+ if (!net_eq(dev_net(ndev), &init_net))
+ return NOTIFY_OK;
+
+ vif = qtnf_netdev_get_priv(ndev);
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (!netif_is_bridge_master(info->upper_dev))
+ break;
+
+ pr_debug("[VIF%u.%u] change bridge: %s %s\n",
+ vif->mac->macid, vif->vifid,
+ netdev_name(info->upper_dev),
+ info->linking ? "add" : "del");
+
+ if (info->linking)
+ br_domain = info->upper_dev->ifindex;
+ else
+ br_domain = ndev->ifindex;
+
+ ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
int qtnf_core_attach(struct qtnf_bus *bus)
{
unsigned int i;
@@ -656,6 +743,10 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
+ if ((bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) &&
+ bus->bus_ops->data_tx_use_meta_set)
+ bus->bus_ops->data_tx_use_meta_set(bus, true);
+
if (bus->hw_info.num_mac > QTNF_MAX_MAC) {
pr_err("no support for number of MACs=%u\n",
bus->hw_info.num_mac);
@@ -672,6 +763,15 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
}
+ if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
+ ret = register_netdevice_notifier(&bus->netdev_nb);
+ if (ret) {
+ pr_err("failed to register netdev notifier: %d\n", ret);
+ goto error;
+ }
+ }
+
bus->fw_state = QTNF_FW_STATE_RUNNING;
return 0;
@@ -685,6 +785,7 @@ void qtnf_core_detach(struct qtnf_bus *bus)
{
unsigned int macid;
+ unregister_netdevice_notifier(&bus->netdev_nb);
qtnf_bus_data_rx_stop(bus);
for (macid = 0; macid < QTNF_MAX_MAC; macid++)
@@ -713,7 +814,8 @@ EXPORT_SYMBOL_GPL(qtnf_core_detach);
static inline int qtnf_is_frame_meta_magic_valid(struct qtnf_frame_meta_info *m)
{
- return m->magic_s == 0xAB && m->magic_e == 0xBA;
+ return m->magic_s == HBM_FRAME_META_MAGIC_PATTERN_S &&
+ m->magic_e == HBM_FRAME_META_MAGIC_PATTERN_E;
}
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
@@ -768,6 +870,8 @@ struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
}
__skb_trim(skb, skb->len - sizeof(*meta));
+ /* Firmware always handles packets that require flooding */
+ qtnfmac_switch_mark_skb_flooded(skb);
out:
return ndev;
@@ -841,15 +945,6 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
-void qtnf_packet_send_hi_pri(struct sk_buff *skb)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
-
- skb_queue_tail(&vif->high_pri_tx_queue, skb);
- queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
-}
-EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
-
struct dentry *qtnf_get_debugfs_dir(void)
{
return qtnf_debugfs_dir;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 322858df600c..116ec16aa15b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -74,7 +74,6 @@ struct qtnf_vif {
struct qtnf_mac_info {
u8 bands_cap;
- u8 dev_mac[ETH_ALEN];
u8 num_tx_chain;
u8 num_rx_chain;
u16 max_ap_assoc_sta;
@@ -152,8 +151,8 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
-void qtnf_packet_send_hi_pri(struct sk_buff *skb);
struct dentry *qtnf_get_debugfs_dir(void);
+bool qtnf_netdev_is_qtn(const struct net_device *ndev);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index b57c8c18a8d0..51af93bdf06e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -171,8 +171,9 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
return -EPROTO;
}
- pr_debug("VIF%u.%u: BSSID:%pM status:%u\n",
- vif->mac->macid, vif->vifid, join_info->bssid, status);
+ pr_debug("VIF%u.%u: BSSID:%pM chan:%u status:%u\n",
+ vif->mac->macid, vif->vifid, join_info->bssid,
+ le16_to_cpu(join_info->chan.chan.center_freq), status);
if (status != WLAN_STATUS_SUCCESS)
goto done;
@@ -181,7 +182,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
if (!cfg80211_chandef_valid(&chandef)) {
pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
vif->mac->macid, vif->vifid,
- chandef.chan->center_freq,
+ chandef.chan ? chandef.chan->center_freq : 0,
chandef.center_freq1,
chandef.center_freq2,
chandef.width);
@@ -617,6 +618,42 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
return ret;
}
+static int
+qtnf_event_handle_mic_failure(struct qtnf_vif *vif,
+ const struct qlink_event_mic_failure *mic_ev,
+ u16 len)
+{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ u8 pairwise;
+
+ if (len < sizeof(*mic_ev)) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_mic_failure));
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ pr_err("VIF%u.%u: MIC_FAILURE event when not in STA mode\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ pairwise = mic_ev->pairwise ?
+ NL80211_KEYTYPE_PAIRWISE : NL80211_KEYTYPE_GROUP;
+
+ pr_info("%s: MIC error: src=%pM key_index=%u pairwise=%u\n",
+ vif->netdev->name, mic_ev->src, mic_ev->key_index, pairwise);
+
+ cfg80211_michael_mic_failure(vif->netdev, mic_ev->src, pairwise,
+ mic_ev->key_index, NULL, GFP_KERNEL);
+
+ return 0;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -679,6 +716,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_external_auth(vif, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_MIC_FAILURE:
+ ret = qtnf_event_handle_mic_failure(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 8ae318b5fe54..5337e67092ca 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -33,7 +33,7 @@ static unsigned int tx_bd_size_param;
module_param(tx_bd_size_param, uint, 0644);
MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
-static unsigned int rx_bd_size_param = 256;
+static unsigned int rx_bd_size_param;
module_param(rx_bd_size_param, uint, 0644);
MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
@@ -130,6 +130,8 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
{
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+ char card_id[64];
int ret;
bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
@@ -137,7 +139,9 @@ int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
if (ret) {
pr_err("failed to attach core\n");
} else {
- qtnf_debugfs_init(bus, DRV_NAME);
+ snprintf(card_id, sizeof(card_id), "%s:%s",
+ DRV_NAME, pci_name(priv->pdev));
+ qtnf_debugfs_init(bus, card_id);
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
@@ -337,7 +341,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
bus->fw_state = QTNF_FW_STATE_DETACHED;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
- pcie_priv->rx_bd_num = rx_bd_size_param;
pcie_priv->flashboot = flashboot;
if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ)
@@ -354,7 +357,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
- pcie_priv->tx_eapol = 0;
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
@@ -377,7 +379,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->epmem_bar = epmem_bar;
pci_save_state(pdev);
- ret = pcie_priv->probe_cb(bus, tx_bd_size_param);
+ ret = pcie_priv->probe_cb(bus, tx_bd_size_param, rx_bd_size_param);
if (ret)
goto error;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
index 5e8b9cb68419..2a6a928e13bd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -23,7 +23,8 @@
struct qtnf_pcie_bus_priv {
struct pci_dev *pdev;
- int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size);
+ int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size,
+ unsigned int rx_bd_size);
void (*remove_cb)(struct qtnf_bus *bus);
int (*suspend_cb)(struct qtnf_bus *bus);
int (*resume_cb)(struct qtnf_bus *bus);
@@ -62,7 +63,6 @@ struct qtnf_pcie_bus_priv {
u32 tx_done_count;
u32 tx_reclaim_done;
u32 tx_reclaim_req;
- u32 tx_eapol;
u8 msi_enabled;
u8 tx_stopped;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 3aa3714d4dfd..8e0d8018208a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -24,6 +24,7 @@
#include "debug.h"
#define PEARL_TX_BD_SIZE_DEFAULT 32
+#define PEARL_RX_BD_SIZE_DEFAULT 256
struct qtnf_pearl_bda {
__le16 bda_len;
@@ -244,8 +245,6 @@ static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
/* tx bd */
- memset(vaddr, 0, len);
-
ps->bd_table_vaddr = vaddr;
ps->bd_table_paddr = paddr;
ps->bd_table_len = len;
@@ -399,7 +398,8 @@ static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
}
static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
- unsigned int tx_bd_size)
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
int ret;
@@ -411,28 +411,29 @@ static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
- pr_warn("bad tx_bd_size value %u\n", tx_bd_size);
+ pr_warn("invalid tx_bd_size value %u, use default %u\n",
+ tx_bd_size, PEARL_TX_BD_SIZE_DEFAULT);
priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
} else {
priv->tx_bd_num = tx_bd_size;
}
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
+ if (rx_bd_size == 0)
+ rx_bd_size = PEARL_RX_BD_SIZE_DEFAULT;
- if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
- pr_err("rx_bd_size_param %u is not power of two\n",
- priv->rx_bd_num);
- return -EINVAL;
- }
+ val = rx_bd_size * sizeof(dma_addr_t);
- val = priv->rx_bd_num * sizeof(dma_addr_t);
- if (val > PCIE_HHBM_MAX_SIZE) {
- pr_err("rx_bd_size_param %u is too large\n",
- priv->rx_bd_num);
- return -EINVAL;
+ if (!is_power_of_2(rx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("invalid rx_bd_size value %u, use default %u\n",
+ rx_bd_size, PEARL_RX_BD_SIZE_DEFAULT);
+ priv->rx_bd_num = PEARL_RX_BD_SIZE_DEFAULT;
+ } else {
+ priv->rx_bd_num = rx_bd_size;
}
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
ret = pearl_hhbm_init(ps);
if (ret) {
pr_err("failed to init h/w queues\n");
@@ -531,7 +532,7 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps)
return 1;
}
-static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
{
struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
struct qtnf_pcie_bus_priv *priv = &ps->base;
@@ -607,6 +608,38 @@ tx_done:
return NETDEV_TX_OK;
}
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
+{
+ return qtnf_pcie_skb_send(bus, skb);
+}
+
+static int qtnf_pcie_data_tx_meta(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
+{
+ struct qtnf_frame_meta_info *meta;
+ int tail_need = sizeof(*meta) - skb_tailroom(skb);
+ int ret;
+
+ if (tail_need > 0 && pskb_expand_head(skb, 0, tail_need, GFP_ATOMIC)) {
+ skb->dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ meta = skb_put(skb, sizeof(*meta));
+ meta->magic_s = HBM_FRAME_META_MAGIC_PATTERN_S;
+ meta->magic_e = HBM_FRAME_META_MAGIC_PATTERN_E;
+ meta->macid = macid;
+ meta->ifidx = vifid;
+
+ ret = qtnf_pcie_skb_send(bus, skb);
+ if (unlikely(ret == NETDEV_TX_BUSY))
+ __skb_trim(skb, skb->len - sizeof(*meta));
+
+ return ret;
+}
+
static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
{
struct qtnf_bus *bus = (struct qtnf_bus *)data;
@@ -795,13 +828,22 @@ static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
qtnf_disable_hdp_irqs(ps);
}
-static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
+static void qtnf_pearl_tx_use_meta_info_set(struct qtnf_bus *bus, bool use_meta)
+{
+ if (use_meta)
+ bus->bus_ops->data_tx = qtnf_pcie_data_tx_meta;
+ else
+ bus->bus_ops->data_tx = qtnf_pcie_data_tx;
+}
+
+static struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
/* control path methods */
.control_tx = qtnf_pcie_control_tx,
/* data path methods */
.data_tx = qtnf_pcie_data_tx,
.data_tx_timeout = qtnf_pcie_data_tx_timeout,
+ .data_tx_use_meta_set = qtnf_pearl_tx_use_meta_info_set,
.data_rx_start = qtnf_pcie_data_rx_start,
.data_rx_stop = qtnf_pcie_data_rx_stop,
};
@@ -904,7 +946,7 @@ static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size,
memcpy(pdata, pblk, len);
hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
- ret = qtnf_pcie_data_tx(bus, skb);
+ ret = qtnf_pcie_skb_send(bus, skb);
return (ret == NETDEV_TX_OK) ? len : 0;
}
@@ -1066,7 +1108,8 @@ static u64 qtnf_pearl_dma_mask_get(void)
#endif
}
-static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
+static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_shm_ipc_int ipc_int;
struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
@@ -1081,7 +1124,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
ps->bda = ps->base.epmem_bar;
writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
- ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size);
+ ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size, rx_bd_size);
if (ret) {
pr_err("PCIE xfer init failed\n");
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 9a4380ed7f1b..dbf3c5fd751f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -23,6 +23,7 @@
#include "debug.h"
#define TOPAZ_TX_BD_SIZE_DEFAULT 128
+#define TOPAZ_RX_BD_SIZE_DEFAULT 256
struct qtnf_topaz_tx_bd {
__le32 addr;
@@ -199,8 +200,6 @@ static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
if (!vaddr)
return -ENOMEM;
- memset(vaddr, 0, len);
-
/* tx bd */
ts->tx_bd_vbase = vaddr;
@@ -333,7 +332,8 @@ static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
}
static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
- unsigned int tx_bd_size)
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_topaz_bda __iomem *bda = ts->bda;
struct qtnf_pcie_bus_priv *priv = &ts->base;
@@ -351,6 +351,17 @@ static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
priv->tx_bd_num = tx_bd_size;
qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
+
+ if (rx_bd_size == 0)
+ rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
+
+ if (rx_bd_size > TOPAZ_RX_BD_SIZE_DEFAULT) {
+ pr_warn("RX BD queue cannot exceed %d\n",
+ TOPAZ_RX_BD_SIZE_DEFAULT);
+ rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
+ }
+
+ priv->rx_bd_num = rx_bd_size;
qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
priv->rx_bd_w_index = 0;
@@ -486,7 +497,8 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
return 1;
}
-static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
{
struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
struct qtnf_pcie_bus_priv *priv = &ts->base;
@@ -498,13 +510,6 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
int len;
int i;
- if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
- qtnf_packet_send_hi_pri(skb);
- qtnf_update_tx_stats(skb->dev, skb);
- priv->tx_eapol++;
- return NETDEV_TX_OK;
- }
-
spin_lock_irqsave(&priv->tx_lock, flags);
if (!qtnf_tx_queue_ready(ts)) {
@@ -736,7 +741,7 @@ static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
napi_disable(&bus->mux_napi);
}
-static const struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
+static struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
/* control path methods */
.control_tx = qtnf_pcie_control_tx,
@@ -768,7 +773,6 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
- seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
@@ -1113,7 +1117,8 @@ static u64 qtnf_topaz_dma_mask_get(void)
return DMA_BIT_MASK(32);
}
-static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
+static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
+ unsigned int tx_bd_num, unsigned int rx_bd_num)
{
struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
struct pci_dev *pdev = ts->base.pdev;
@@ -1147,7 +1152,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
return ret;
}
- ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num);
+ ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num, rx_bd_num);
if (ret) {
pr_err("PCIE xfer init failed\n");
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 8a3c6344fa8e..75527f1bb306 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -59,6 +59,7 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
* Randomization in probe requests.
* @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning.
+ * @QLINK_HW_CAPAB_HW_BRIDGE: device has hardware switch capabilities.
*/
enum qlink_hw_capab {
QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
@@ -69,6 +70,7 @@ enum qlink_hw_capab {
QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
QLINK_HW_CAPAB_SAE = BIT(8),
+ QLINK_HW_CAPAB_HW_BRIDGE = BIT(9),
};
enum qlink_iface_type {
@@ -217,6 +219,10 @@ struct qlink_sta_info_state {
* command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE
* capability.
* @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel.
+ * @QLINK_CMD_TXPWR: get or set current channel transmit power for
+ * the specified MAC.
+ * @QLINK_CMD_NDEV_EVENT: signalizes changes made with a corresponding network
+ * device.
*/
enum qlink_cmd_type {
QLINK_CMD_FW_INIT = 0x0001,
@@ -249,11 +255,13 @@ enum qlink_cmd_type {
QLINK_CMD_DEL_STA = 0x0052,
QLINK_CMD_SCAN = 0x0053,
QLINK_CMD_CHAN_STATS = 0x0054,
+ QLINK_CMD_NDEV_EVENT = 0x0055,
QLINK_CMD_CONNECT = 0x0060,
QLINK_CMD_DISCONNECT = 0x0061,
QLINK_CMD_PM_SET = 0x0062,
QLINK_CMD_WOWLAN_SET = 0x0063,
QLINK_CMD_EXTERNAL_AUTH = 0x0066,
+ QLINK_CMD_TXPWR = 0x0067,
};
/**
@@ -719,6 +727,32 @@ struct qlink_cmd_pm_set {
} __packed;
/**
+ * enum qlink_txpwr_op - transmit power operation type
+ * @QLINK_TXPWR_SET: set tx power
+ * @QLINK_TXPWR_GET: get current tx power setting
+ */
+enum qlink_txpwr_op {
+ QLINK_TXPWR_SET,
+ QLINK_TXPWR_GET
+};
+
+/**
+ * struct qlink_cmd_txpwr - get or set current transmit power
+ *
+ * @txpwr: new transmit power setting, in mBm
+ * @txpwr_setting: transmit power setting type, one of
+ * &enum nl80211_tx_power_setting
+ * @op_type: type of operation, one of &enum qlink_txpwr_op
+ */
+struct qlink_cmd_txpwr {
+ struct qlink_cmd chdr;
+ __le32 txpwr;
+ u8 txpwr_setting;
+ u8 op_type;
+ u8 rsvd[2];
+} __packed;
+
+/**
* enum qlink_wowlan_trigger
*
* @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
@@ -742,6 +776,42 @@ struct qlink_cmd_wowlan_set {
u8 data[0];
} __packed;
+enum qlink_ndev_event_type {
+ QLINK_NDEV_EVENT_CHANGEUPPER,
+};
+
+/**
+ * struct qlink_cmd_ndev_event - data for QLINK_CMD_NDEV_EVENT command
+ *
+ * @event: type of event, one of &enum qlink_ndev_event_type
+ */
+struct qlink_cmd_ndev_event {
+ struct qlink_cmd chdr;
+ __le16 event;
+ u8 rsvd[2];
+} __packed;
+
+enum qlink_ndev_upper_type {
+ QLINK_NDEV_UPPER_TYPE_NONE,
+ QLINK_NDEV_UPPER_TYPE_BRIDGE,
+};
+
+/**
+ * struct qlink_cmd_ndev_changeupper - data for QLINK_NDEV_EVENT_CHANGEUPPER
+ *
+ * @br_domain: layer 2 broadcast domain ID that ndev is a member of
+ * @upper_type: type of upper device, one of &enum qlink_ndev_upper_type
+ */
+struct qlink_cmd_ndev_changeupper {
+ struct qlink_cmd_ndev_event nehdr;
+ __le64 flags;
+ __le32 br_domain;
+ __le32 netspace_id;
+ __le16 vlanid;
+ u8 upper_type;
+ u8 rsvd[1];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -944,6 +1014,19 @@ struct qlink_resp_channel_get {
struct qlink_chandef chan;
} __packed;
+/**
+ * struct qlink_resp_txpwr - response for QLINK_CMD_TXPWR command
+ *
+ * This response is intended for QLINK_TXPWR_GET operation and does not
+ * contain any meaningful information in case of QLINK_TXPWR_SET operation.
+ *
+ * @txpwr: current transmit power setting, in mBm
+ */
+struct qlink_resp_txpwr {
+ struct qlink_resp rhdr;
+ __le32 txpwr;
+} __packed;
+
/* QLINK Events messages related definitions
*/
@@ -958,6 +1041,7 @@ enum qlink_event_type {
QLINK_EVENT_FREQ_CHANGE = 0x0028,
QLINK_EVENT_RADAR = 0x0029,
QLINK_EVENT_EXTERNAL_AUTH = 0x0030,
+ QLINK_EVENT_MIC_FAILURE = 0x0031,
};
/**
@@ -1151,6 +1235,20 @@ struct qlink_event_external_auth {
u8 action;
} __packed;
+/**
+ * struct qlink_event_mic_failure - data for QLINK_EVENT_MIC_FAILURE event
+ *
+ * @src: source MAC address of the frame
+ * @key_index: index of the key being reported
+ * @pairwise: whether the key is pairwise or group
+ */
+struct qlink_event_mic_failure {
+ struct qlink_event ehdr;
+ u8 src[ETH_ALEN];
+ u8 key_index;
+ u8 pairwise;
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
@@ -1171,6 +1269,7 @@ struct qlink_event_external_auth {
* @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
* during a scan including off-channel dwell time and operating channel
* time.
+ * @QTN_TLV_ID_IFTYPE_DATA: supported band data.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1206,6 +1305,7 @@ enum qlink_tlv_id {
QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
+ QTN_TLV_ID_IFTYPE_DATA = 0x0418,
};
struct qlink_tlv_hdr {
@@ -1367,6 +1467,39 @@ struct qlink_tlv_ie_set {
u8 ie_data[0];
} __packed;
+/**
+ * struct qlink_tlv_ext_ie - extension IE
+ *
+ * @eid_ext: element ID extension, one of &enum ieee80211_eid_ext.
+ * @ie_data: IEs data.
+ */
+struct qlink_tlv_ext_ie {
+ struct qlink_tlv_hdr hdr;
+ u8 eid_ext;
+ u8 ie_data[0];
+} __packed;
+
+#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
+struct qlink_sband_iftype_data {
+ __le16 types_mask;
+ struct ieee80211_he_cap_elem he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+ u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
+} __packed;
+
+/**
+ * struct qlink_tlv_iftype_data - data for QTN_TLV_ID_IFTYPE_DATA
+ *
+ * @n_iftype_data: number of entries in iftype_data.
+ * @iftype_data: interface type data entries.
+ */
+struct qlink_tlv_iftype_data {
+ struct qlink_tlv_hdr hdr;
+ u8 n_iftype_data;
+ u8 rsvd[3];
+ struct qlink_sband_iftype_data iftype_data[0];
+} __packed;
+
struct qlink_chan_stats {
__le32 chan_num;
__le32 cca_tx;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/switchdev.h b/drivers/net/wireless/quantenna/qtnfmac/switchdev.h
new file mode 100644
index 000000000000..b962e670c4b0
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/switchdev.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2019 Quantenna Communications. All rights reserved. */
+
+#ifndef QTNFMAC_SWITCHDEV_H_
+#define QTNFMAC_SWITCHDEV_H_
+
+#include <linux/skbuff.h>
+
+#ifdef CONFIG_NET_SWITCHDEV
+
+static inline void qtnfmac_switch_mark_skb_flooded(struct sk_buff *skb)
+{
+ skb->offload_fwd_mark = 1;
+}
+
+#else
+
+static inline void qtnfmac_switch_mark_skb_flooded(struct sk_buff *skb)
+{
+}
+
+#endif
+
+#endif /* QTNFMAC_SWITCHDEV_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig
index f8a9244ce012..d4969d617822 100644
--- a/drivers/net/wireless/ralink/rt2x00/Kconfig
+++ b/drivers/net/wireless/ralink/rt2x00/Kconfig
@@ -95,20 +95,20 @@ config RT2800PCI_RT35XX
config RT2800PCI_RT53XX
- bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
- default y
- ---help---
- This adds support for rt53xx wireless chipset family to the
- rt2800pci driver.
- Supported chips: RT5390
+ bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
+ default y
+ ---help---
+ This adds support for rt53xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT5390
config RT2800PCI_RT3290
- bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
- default y
- ---help---
- This adds support for rt3290 wireless chipset family to the
- rt2800pci driver.
- Supported chips: RT3290
+ bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
+ default y
+ ---help---
+ This adds support for rt3290 wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3290
endif
config RT2500USB
@@ -174,18 +174,18 @@ config RT2800USB_RT3573
in the rt2800usb driver.
config RT2800USB_RT53XX
- bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
- ---help---
- This adds support for rt53xx wireless chipset family to the
- rt2800usb driver.
- Supported chips: RT5370
+ bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
+ ---help---
+ This adds support for rt53xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT5370
config RT2800USB_RT55XX
- bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
- ---help---
- This adds support for rt55xx wireless chipset family to the
- rt2800usb driver.
- Supported chips: RT5572
+ bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
+ ---help---
+ This adds support for rt55xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT5572
config RT2800USB_UNKNOWN
bool "rt2800usb - Include support for unknown (USB) devices"
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index f1cdcd61c54a..a36c3fea7495 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5839,8 +5839,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21);
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT6352)) {
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -5854,8 +5853,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
- rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
@@ -10476,7 +10473,7 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* when the hw reorders frames due to aggregation.
*/
if (sta_priv->wcid > WCID_END)
- return 1;
+ return -ENOSPC;
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -10489,7 +10486,7 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
break;
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index 23cd4ff78e54..e1bf41c278a5 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -37,53 +37,11 @@ static const u8 cck_ofdm_gain_settings[] = {
0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
};
-static const u8 rtl8225se_tx_gain_cck_ofdm[] = {
- 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
-};
-
-static const u8 rtl8225se_tx_power_cck[] = {
- 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
- 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
- 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
- 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
- 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
- 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
-};
-
-static const u8 rtl8225se_tx_power_cck_ch14[] = {
- 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
- 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
- 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
- 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
- 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
- 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
-};
-
-static const u8 rtl8225se_tx_power_ofdm[] = {
- 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
-};
-
static const u32 rtl8225se_chan[] = {
0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
};
-static const u8 rtl8225sez2_tx_power_cck_ch14[] = {
- 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
-};
-
-static const u8 rtl8225sez2_tx_power_cck_B[] = {
- 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04
-};
-
-static const u8 rtl8225sez2_tx_power_cck_A[] = {
- 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04
-};
-
-static const u8 rtl8225sez2_tx_power_cck[] = {
- 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
-};
-
static const u8 ZEBRA_AGC[] = {
0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A,
0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index ade057d868f7..6598c8d786ea 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1187,6 +1187,79 @@ struct rtl8723bu_c2h {
struct rtl8xxxu_fileops;
+/*mlme related.*/
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0,
+ /* Sub-Element */
+ WIRELESS_MODE_B = BIT(0),
+ WIRELESS_MODE_G = BIT(1),
+ WIRELESS_MODE_A = BIT(2),
+ WIRELESS_MODE_N_24G = BIT(3),
+ WIRELESS_MODE_N_5G = BIT(4),
+ WIRELESS_AUTO = BIT(5),
+ WIRELESS_MODE_AC = BIT(6),
+ WIRELESS_MODE_MAX = 0x7F,
+};
+
+/* from rtlwifi/wifi.h */
+enum ratr_table_mode_new {
+ RATEID_IDX_BGN_40M_2SS = 0,
+ RATEID_IDX_BGN_40M_1SS = 1,
+ RATEID_IDX_BGN_20M_2SS_BN = 2,
+ RATEID_IDX_BGN_20M_1SS_BN = 3,
+ RATEID_IDX_GN_N2SS = 4,
+ RATEID_IDX_GN_N1SS = 5,
+ RATEID_IDX_BG = 6,
+ RATEID_IDX_G = 7,
+ RATEID_IDX_B = 8,
+ RATEID_IDX_VHT_2SS = 9,
+ RATEID_IDX_VHT_1SS = 10,
+ RATEID_IDX_MIX1 = 11,
+ RATEID_IDX_MIX2 = 12,
+ RATEID_IDX_VHT_3SS = 13,
+ RATEID_IDX_BGN_3SS = 14,
+};
+
+#define BT_INFO_8723B_1ANT_B_FTP BIT(7)
+#define BT_INFO_8723B_1ANT_B_A2DP BIT(6)
+#define BT_INFO_8723B_1ANT_B_HID BIT(5)
+#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT(4)
+#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT(3)
+#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT(2)
+#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT(1)
+#define BT_INFO_8723B_1ANT_B_CONNECTION BIT(0)
+
+enum _BT_8723B_1ANT_STATUS {
+ BT_8723B_1ANT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_1ANT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_1ANT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_1ANT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_1ANT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_1ANT_STATUS_MAX
+};
+
+struct rtl8xxxu_btcoex {
+ u8 bt_status;
+ bool bt_busy;
+ bool has_sco;
+ bool has_a2dp;
+ bool has_hid;
+ bool has_pan;
+ bool hid_only;
+ bool a2dp_only;
+ bool c2h_bt_inquiry;
+};
+
+#define RTL8XXXU_RATR_STA_INIT 0
+#define RTL8XXXU_RATR_STA_HIGH 1
+#define RTL8XXXU_RATR_STA_MID 2
+#define RTL8XXXU_RATR_STA_LOW 3
+
+#define RTL8XXXU_NOISE_FLOOR_MIN -100
+#define RTL8XXXU_SNR_THRESH_HIGH 50
+#define RTL8XXXU_SNR_THRESH_LOW 20
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1291,6 +1364,17 @@ struct rtl8xxxu_priv {
u8 pi_enabled:1;
u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
+ u8 rssi_level;
+ /*
+ * Only one virtual interface permitted because only STA mode
+ * is supported and no iface_combinations are provided.
+ */
+ struct ieee80211_vif *vif;
+ struct delayed_work ra_watchdog;
+ struct work_struct c2hcmd_work;
+ struct sk_buff_head c2hcmd_queue;
+ spinlock_t c2hcmd_lock;
+ struct rtl8xxxu_btcoex bt_coex;
};
struct rtl8xxxu_rx_urb {
@@ -1326,7 +1410,7 @@ struct rtl8xxxu_fileops {
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1341,6 +1425,7 @@ struct rtl8xxxu_fileops {
u8 has_s0s1:1;
u8 has_tx_report:1;
u8 gen2_thermal_meter:1;
+ u8 needs_full_init:1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
@@ -1411,9 +1496,9 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
@@ -1437,6 +1522,8 @@ void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
+void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+ u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5);
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index c747f6a1922d..9f1f93d04145 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1011,7 +1011,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok, path_b_ok;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -1021,11 +1021,11 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index ceffe05bd65b..a71e1816e632 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -882,7 +882,7 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok /*, path_b_ok */;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -892,11 +892,11 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
@@ -1580,9 +1580,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
/*
* Software control, antenna at WiFi side
*/
-#ifdef NEED_PS_TDMA
rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
-#endif
rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
@@ -1670,6 +1668,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.has_s0s1 = 1,
.has_tx_report = 1,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index c6c41fb962ff..aa2bb2ae9809 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1255,7 +1255,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
- u32 val32, rsr;
+ u32 val32;
u8 val8, subchannel;
u16 rf_mode_bw;
bool ht = true;
@@ -1264,7 +1264,6 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
rf_mode_bw = rtl8xxxu_read16(priv, REG_WMAC_TRXPTCL_CTL);
rf_mode_bw &= ~WMAC_TRXPTCL_CTL_BW_MASK;
- rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
channel = hw->conf.chandef.chan->hw_value;
/* Hack */
@@ -3115,7 +3114,7 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok, path_b_ok;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -3125,11 +3124,11 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
@@ -3820,9 +3819,8 @@ void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
}
-#ifdef NEED_PS_TDMA
-static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
- u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
+void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+ u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
{
struct h2c_cmd h2c;
@@ -3835,7 +3833,6 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
h2c.b_type_dma.data5 = arg5;
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
}
-#endif
void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
{
@@ -3902,6 +3899,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
else
macpower = true;
+ if (fops->needs_full_init)
+ macpower = false;
+
ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
@@ -4304,7 +4304,8 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
}
-void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
+void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
@@ -4324,7 +4325,7 @@ void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
}
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+ u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
u8 bw = 0;
@@ -4338,7 +4339,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
h2c.ramask.arg = 0x80;
- h2c.b_macid_cfg.data1 = 0;
+ h2c.b_macid_cfg.data1 = rateid;
if (sgi)
h2c.b_macid_cfg.data1 |= BIT(7);
@@ -4478,6 +4479,35 @@ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
}
+static u16
+rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
+{
+ u16 network_type = WIRELESS_MODE_UNKNOWN;
+
+ if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) {
+ if (sta->vht_cap.vht_supported)
+ network_type = WIRELESS_MODE_AC;
+ else if (sta->ht_cap.ht_supported)
+ network_type = WIRELESS_MODE_N_5G;
+
+ network_type |= WIRELESS_MODE_A;
+ } else {
+ if (sta->vht_cap.vht_supported)
+ network_type = WIRELESS_MODE_AC;
+ else if (sta->ht_cap.ht_supported)
+ network_type = WIRELESS_MODE_N_24G;
+
+ if (sta->supp_rates[0] <= 0xf)
+ network_type |= WIRELESS_MODE_B;
+ else if (sta->supp_rates[0] & 0xf)
+ network_type |= (WIRELESS_MODE_B | WIRELESS_MODE_G);
+ else
+ network_type |= WIRELESS_MODE_G;
+ }
+
+ return network_type;
+}
+
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changed)
@@ -4520,7 +4550,10 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
sgi = 1;
rcu_read_unlock();
- priv->fops->update_rate_mask(priv, ramask, sgi);
+ priv->vif = vif;
+ priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
+
+ priv->fops->update_rate_mask(priv, ramask, 0, sgi);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -5148,12 +5181,263 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
}
}
+/*
+ * The RTL8723BU/RTL8192EU vendor driver use coexistence table type
+ * 0-7 to represent writing different combinations of register values
+ * to REG_BT_COEX_TABLEs. It's for different kinds of coexistence use
+ * cases which Realtek doesn't provide detail for these settings. Keep
+ * this aligned with vendor driver for easier maintenance.
+ */
+static
+void rtl8723bu_set_coex_with_type(struct rtl8xxxu_priv *priv, u8 type)
+{
+ switch (type) {
+ case 0:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 1:
+ case 3:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 2:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 4:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaa5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 5:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaa5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 6:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 7:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ default:
+ break;
+ }
+}
+
+static
+void rtl8723bu_update_bt_link_info(struct rtl8xxxu_priv *priv, u8 bt_info)
+{
+ struct rtl8xxxu_btcoex *btcoex = &priv->bt_coex;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE)
+ btcoex->c2h_bt_inquiry = true;
+ else
+ btcoex->c2h_bt_inquiry = false;
+
+ if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_NON_CONNECTED_IDLE;
+ btcoex->has_sco = false;
+ btcoex->has_hid = false;
+ btcoex->has_pan = false;
+ btcoex->has_a2dp = false;
+ } else {
+ if ((bt_info & 0x1f) == BT_INFO_8723B_1ANT_B_CONNECTION)
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_CONNECTED_IDLE;
+ else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY))
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_SCO_BUSY;
+ else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY)
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_ACL_BUSY;
+ else
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_MAX;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
+ btcoex->has_pan = true;
+ else
+ btcoex->has_pan = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_A2DP)
+ btcoex->has_a2dp = true;
+ else
+ btcoex->has_a2dp = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_HID)
+ btcoex->has_hid = true;
+ else
+ btcoex->has_hid = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+ btcoex->has_sco = true;
+ else
+ btcoex->has_sco = false;
+ }
+
+ if (!btcoex->has_a2dp && !btcoex->has_sco &&
+ !btcoex->has_pan && btcoex->has_hid)
+ btcoex->hid_only = true;
+ else
+ btcoex->hid_only = false;
+
+ if (!btcoex->has_sco && !btcoex->has_pan &&
+ !btcoex->has_hid && btcoex->has_a2dp)
+ btcoex->has_a2dp = true;
+ else
+ btcoex->has_a2dp = false;
+
+ if (btcoex->bt_status == BT_8723B_1ANT_STATUS_SCO_BUSY ||
+ btcoex->bt_status == BT_8723B_1ANT_STATUS_ACL_BUSY)
+ btcoex->bt_busy = true;
+ else
+ btcoex->bt_busy = false;
+}
+
+static
+void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_btcoex *btcoex;
+ bool wifi_connected;
+
+ vif = priv->vif;
+ btcoex = &priv->bt_coex;
+ wifi_connected = (vif && vif->bss_conf.assoc);
+
+ if (!wifi_connected) {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 0);
+ } else if (btcoex->has_sco || btcoex->has_hid || btcoex->has_a2dp) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35, 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if (btcoex->has_pan) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x3f, 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 7);
+ }
+}
+
+static
+void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_btcoex *btcoex;
+ bool wifi_connected;
+
+ vif = priv->vif;
+ btcoex = &priv->bt_coex;
+ wifi_connected = (vif && vif->bss_conf.assoc);
+
+ if (wifi_connected) {
+ u32 val32 = 0;
+ u32 high_prio_tx = 0, high_prio_rx = 0;
+
+ val32 = rtl8xxxu_read32(priv, 0x770);
+ high_prio_tx = val32 & 0x0000ffff;
+ high_prio_rx = (val32 & 0xffff0000) >> 16;
+
+ if (btcoex->bt_busy) {
+ if (btcoex->hid_only) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x20,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 5);
+ } else if (btcoex->a2dp_only) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if ((btcoex->has_a2dp && btcoex->has_pan) ||
+ (btcoex->has_hid && btcoex->has_a2dp &&
+ btcoex->has_pan)) {
+ rtl8723bu_set_ps_tdma(priv, 0x51, 0x21,
+ 0x3, 0x10, 0x10);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if (btcoex->has_hid && btcoex->has_a2dp) {
+ rtl8723bu_set_ps_tdma(priv, 0x51, 0x21,
+ 0x3, 0x10, 0x10);
+ rtl8723bu_set_coex_with_type(priv, 3);
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ }
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ if (high_prio_tx + high_prio_rx <= 60)
+ rtl8723bu_set_coex_with_type(priv, 2);
+ else
+ rtl8723bu_set_coex_with_type(priv, 7);
+ }
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 0);
+ }
+}
+
+static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+{
+ struct rtl8xxxu_priv *priv;
+ struct rtl8723bu_c2h *c2h;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ u8 bt_info = 0;
+ struct rtl8xxxu_btcoex *btcoex;
+
+ priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
+ btcoex = &priv->bt_coex;
+
+ if (priv->rf_paths > 1)
+ goto out;
+
+ while (!skb_queue_empty(&priv->c2hcmd_queue)) {
+ spin_lock_irqsave(&priv->c2hcmd_lock, flags);
+ skb = __skb_dequeue(&priv->c2hcmd_queue);
+ spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+
+ c2h = (struct rtl8723bu_c2h *)skb->data;
+
+ switch (c2h->id) {
+ case C2H_8723B_BT_INFO:
+ bt_info = c2h->bt_info.bt_info;
+
+ rtl8723bu_update_bt_link_info(priv, bt_info);
+ if (btcoex->c2h_bt_inquiry) {
+ rtl8723bu_handle_bt_inquiry(priv);
+ break;
+ }
+ rtl8723bu_handle_bt_info(priv);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
struct sk_buff *skb)
{
struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
struct device *dev = &priv->udev->dev;
int len;
+ unsigned long flags;
len = skb->len - 2;
@@ -5191,6 +5475,12 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
16, 1, c2h->raw.payload, len, false);
break;
}
+
+ spin_lock_irqsave(&priv->c2hcmd_lock, flags);
+ __skb_queue_tail(&priv->c2hcmd_queue, skb);
+ spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+
+ schedule_work(&priv->c2hcmd_work);
}
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
@@ -5315,7 +5605,6 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "%s: C2H packet\n", __func__);
rtl8723bu_handle_c2h(priv, skb);
- dev_kfree_skb(skb);
return RX_TYPE_C2H;
}
@@ -5444,6 +5733,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
goto error;
}
@@ -5464,6 +5754,10 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ if (!priv->vif)
+ priv->vif = vif;
+ else
+ return -EOPNOTSUPP;
rtl8xxxu_stop_tx_beacon(priv);
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
@@ -5487,6 +5781,9 @@ static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
struct rtl8xxxu_priv *priv = hw->priv;
dev_dbg(&priv->udev->dev, "%s\n", __func__);
+
+ if (priv->vif)
+ priv->vif = NULL;
}
static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
@@ -5772,6 +6069,178 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
+static u8 rtl8xxxu_signal_to_snr(int signal)
+{
+ if (signal < RTL8XXXU_NOISE_FLOOR_MIN)
+ signal = RTL8XXXU_NOISE_FLOOR_MIN;
+ else if (signal > 0)
+ signal = 0;
+ return (u8)(signal - RTL8XXXU_NOISE_FLOOR_MIN);
+}
+
+static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
+ int signal, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ u16 wireless_mode;
+ u8 rssi_level, ratr_idx;
+ u8 txbw_40mhz;
+ u8 snr, snr_thresh_high, snr_thresh_low;
+ u8 go_up_gap = 5;
+
+ rssi_level = priv->rssi_level;
+ snr = rtl8xxxu_signal_to_snr(signal);
+ snr_thresh_high = RTL8XXXU_SNR_THRESH_HIGH;
+ snr_thresh_low = RTL8XXXU_SNR_THRESH_LOW;
+ txbw_40mhz = (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) ? 1 : 0;
+
+ switch (rssi_level) {
+ case RTL8XXXU_RATR_STA_MID:
+ snr_thresh_high += go_up_gap;
+ break;
+ case RTL8XXXU_RATR_STA_LOW:
+ snr_thresh_high += go_up_gap;
+ snr_thresh_low += go_up_gap;
+ break;
+ default:
+ break;
+ }
+
+ if (snr > snr_thresh_high)
+ rssi_level = RTL8XXXU_RATR_STA_HIGH;
+ else if (snr > snr_thresh_low)
+ rssi_level = RTL8XXXU_RATR_STA_MID;
+ else
+ rssi_level = RTL8XXXU_RATR_STA_LOW;
+
+ if (rssi_level != priv->rssi_level) {
+ int sgi = 0;
+ u32 rate_bitmap = 0;
+
+ rcu_read_lock();
+ rate_bitmap = (sta->supp_rates[0] & 0xfff) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12) |
+ (sta->ht_cap.mcs.rx_mask[1] << 20);
+ if (sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+ sgi = 1;
+ rcu_read_unlock();
+
+ wireless_mode = rtl8xxxu_wireless_mode(hw, sta);
+ switch (wireless_mode) {
+ case WIRELESS_MODE_B:
+ ratr_idx = RATEID_IDX_B;
+ if (rate_bitmap & 0x0000000c)
+ rate_bitmap &= 0x0000000d;
+ else
+ rate_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_A:
+ case WIRELESS_MODE_G:
+ ratr_idx = RATEID_IDX_G;
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH)
+ rate_bitmap &= 0x00000f00;
+ else
+ rate_bitmap &= 0x00000ff0;
+ break;
+ case (WIRELESS_MODE_B | WIRELESS_MODE_G):
+ ratr_idx = RATEID_IDX_BG;
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH)
+ rate_bitmap &= 0x00000f00;
+ else if (rssi_level == RTL8XXXU_RATR_STA_MID)
+ rate_bitmap &= 0x00000ff0;
+ else
+ rate_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ case (WIRELESS_MODE_G | WIRELESS_MODE_N_24G):
+ case (WIRELESS_MODE_A | WIRELESS_MODE_N_5G):
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_GN_N2SS;
+ else
+ ratr_idx = RATEID_IDX_GN_N1SS;
+ break;
+ case (WIRELESS_MODE_B | WIRELESS_MODE_G | WIRELESS_MODE_N_24G):
+ case (WIRELESS_MODE_B | WIRELESS_MODE_N_24G):
+ if (txbw_40mhz) {
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_BGN_40M_2SS;
+ else
+ ratr_idx = RATEID_IDX_BGN_40M_1SS;
+ } else {
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_BGN_20M_2SS_BN;
+ else
+ ratr_idx = RATEID_IDX_BGN_20M_1SS_BN;
+ }
+
+ if (priv->tx_paths == 2 && priv->rx_paths == 2) {
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH) {
+ rate_bitmap &= 0x0f8f0000;
+ } else if (rssi_level == RTL8XXXU_RATR_STA_MID) {
+ rate_bitmap &= 0x0f8ff000;
+ } else {
+ if (txbw_40mhz)
+ rate_bitmap &= 0x0f8ff015;
+ else
+ rate_bitmap &= 0x0f8ff005;
+ }
+ } else {
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH) {
+ rate_bitmap &= 0x000f0000;
+ } else if (rssi_level == RTL8XXXU_RATR_STA_MID) {
+ rate_bitmap &= 0x000ff000;
+ } else {
+ if (txbw_40mhz)
+ rate_bitmap &= 0x000ff015;
+ else
+ rate_bitmap &= 0x000ff005;
+ }
+ }
+ break;
+ default:
+ ratr_idx = RATEID_IDX_BGN_40M_2SS;
+ rate_bitmap &= 0x0fffffff;
+ break;
+ }
+
+ priv->rssi_level = rssi_level;
+ priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi);
+ }
+}
+
+static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_priv *priv;
+
+ priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
+ vif = priv->vif;
+
+ if (vif && vif->type == NL80211_IFTYPE_STATION) {
+ int signal;
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ struct device *dev = &priv->udev->dev;
+
+ dev_dbg(dev, "%s: no sta found\n", __func__);
+ rcu_read_unlock();
+ goto out;
+ }
+ rcu_read_unlock();
+
+ signal = ieee80211_ave_rssi(vif);
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta);
+ }
+
+out:
+ schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
+}
+
static int rtl8xxxu_start(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -5828,6 +6297,8 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
}
+
+ schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
exit:
/*
* Accept all data and mgmt frames
@@ -5879,6 +6350,8 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_delayed_work_sync(&priv->ra_watchdog);
+
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
}
@@ -6001,7 +6474,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
break;
case 0x7392:
- if (id->idProduct == 0x7811)
+ if (id->idProduct == 0x7811 || id->idProduct == 0xa611)
untested = 0;
break;
case 0x050d:
@@ -6051,6 +6524,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&priv->rx_urb_pending_list);
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
+ INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
+ spin_lock_init(&priv->c2hcmd_lock);
+ INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
+ skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -6205,6 +6682,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192eu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723bu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa611, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8723bu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ac746c322554..c75192c4447f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1776,8 +1776,7 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
tid_data->agg.agg_state = RTL_AGG_START;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- return 0;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
}
int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 3ebc7c93b1e9..3c96c320236c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -1578,10 +1578,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
static int up, dn, m, n, wait_cnt;
- /* 0: no change, +1: increase WiFi duration,
- * -1: decrease WiFi duration
- */
- int result;
u8 retry_cnt = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -1669,7 +1665,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
m = 1;
n = 3;
- result = 0;
wait_cnt = 0;
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
@@ -1679,7 +1674,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
up, dn, m, n, wait_cnt);
- result = 0;
wait_cnt++;
/* no retry in the last 2-second duration */
if (retry_cnt == 0) {
@@ -1694,7 +1688,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
n = 3;
up = 0;
dn = 0;
- result = 1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex]Increase wifi duration!!\n");
}
@@ -1718,7 +1711,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- result = -1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Reduce wifi duration for retry<3\n");
}
@@ -1735,7 +1727,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- result = -1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Decrease wifi duration for retryCounter>3!!\n");
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 5f5739904edf..528e442f25a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -1424,17 +1424,11 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
* -1: decrease WiFi duration
*/
s32 result;
- u8 retry_count = 0, bt_info_ext;
- bool wifi_busy = false;
+ u8 retry_count = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], TdmaDurationAdjustForAcl()\n");
- if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY)
- wifi_busy = true;
- else
- wifi_busy = false;
-
if ((wifi_status ==
BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN) ||
(wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN) ||
@@ -1472,7 +1466,6 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
} else {
/* acquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- bt_info_ext = coex_sta->bt_info_ext;
if ((coex_sta->low_priority_tx) > 1050 ||
(coex_sta->low_priority_rx) > 1250)
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 264667203f6f..cef9f2a9303b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -915,7 +915,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct target_pkt;
u8 write_state = PG_STATE_HEADER;
- int continual = true, dataempty = true, result = true;
+ int continual = true, result = true;
u16 efuse_addr = 0;
u8 efuse_data;
u8 target_word_cnts = 0;
@@ -942,7 +942,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
if (write_state == PG_STATE_HEADER) {
- dataempty = true;
badworden = 0x0F;
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_HEADER\n");
@@ -1179,13 +1178,12 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
int continual = true;
u16 efuse_addr = 0;
- u8 hoffset, hworden;
+ u8 hworden;
u8 efuse_data, word_cnts;
while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
(efuse_addr < EFUSE_MAX_SIZE)) {
if (efuse_data != 0xFF) {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = efuse_calculate_word_cnts(hworden);
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index fff8dda14023..e5e1ec5a41dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -68,7 +68,6 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate rtstate;
bool actionallowed = false;
u16 rfwait_cnt = 0;
@@ -102,8 +101,6 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
}
}
- rtstate = ppsc->rfpwr_state;
-
switch (state_toset) {
case ERFON:
ppsc->rfoff_reason &= (~changesource);
@@ -161,8 +158,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
if (ppsc->inactive_pwrstate == ERFON &&
rtlhal->interface == INTF_PCI) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
- RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
- rtlhal->interface == INTF_PCI) {
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
rtlpriv->intf_ops->disable_aspm(hw);
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index c10432cd703e..8be31e0ad878 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -386,7 +386,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
struct wiphy *wiphy = hw->wiphy;
struct country_code_to_enum_rd *country = NULL;
- if (wiphy == NULL || &rtlpriv->regd == NULL)
+ if (!wiphy)
return -EINVAL;
/* init country_code from efuse channel plan */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 333e355c9281..dceb04a9b3f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -759,14 +759,8 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
}
/* Indicate Rx signal strength to FW. */
- if (rtlpriv->dm.useramask) {
- u8 h2c_parameter[3] = { 0 };
-
- h2c_parameter[2] = (u8)(rtlpriv->dm.undec_sm_pwdb & 0xFF);
- h2c_parameter[0] = 0x20;
- } else {
+ if (!rtlpriv->dm.useramask)
rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
- }
}
void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 96d8f25b120f..5ca900f97d66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -85,20 +85,19 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -112,13 +111,12 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl88e_phy_rf_serial_read(hw,
@@ -133,7 +131,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl88e_phy_rf_serial_write(hw, rfpath, regaddr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -166,9 +164,9 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
(newoffset << 23) | BLSSIREADEDGE;
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
- mdelay(1);
+ udelay(10);
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(2);
+ udelay(120);
if (rfpath == RF90_PATH_A)
rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
BIT(8));
@@ -649,7 +647,7 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
int i;
u32 *phy_reg_page;
u16 phy_reg_page_len;
- u32 v1 = 0, v2 = 0, v3 = 0;
+ u32 v1 = 0, v2 = 0;
phy_reg_page_len = RTL8188EEPHY_REG_ARRAY_PGLEN;
phy_reg_page = RTL8188EEPHY_REG_ARRAY_PG;
@@ -658,7 +656,6 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
for (i = 0; i < phy_reg_page_len; i = i + 3) {
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
if (v1 < 0xcdcdcdcd) {
if (phy_reg_page[i] == 0xfe)
@@ -689,13 +686,11 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
while (v2 != 0xDEAD &&
i < phy_reg_page_len - 5) {
i += 3;
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
}
}
}
@@ -769,7 +764,6 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
@@ -778,7 +772,6 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
process_path_a(hw, radioa_arraylen, radioa_array_table);
@@ -1940,9 +1933,9 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
struct rtl_phy *rtlphy = &rtlpriv->phy;
long result[4][8];
u8 i, final_candidate;
- bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ bool b_patha_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[9] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1971,7 +1964,6 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
}
final_candidate = 0xff;
b_patha_ok = false;
- b_pathb_ok = false;
is12simular = false;
is23simular = false;
is13simular = false;
@@ -2014,27 +2006,20 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
reg_e94 = result[final_candidate][0];
reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
rtlphy->reg_eb4 = reg_eb4;
rtlphy->reg_ebc = reg_ebc;
rtlphy->reg_e94 = reg_e94;
rtlphy->reg_e9c = reg_e9c;
b_patha_ok = true;
- b_pathb_ok = true;
} else {
rtlphy->reg_e94 = 0x100;
rtlphy->reg_eb4 = 0x100;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index f2908ee5f860..4bef237f488d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -1649,8 +1649,6 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
(rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_SPECIAL_LOW)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- } else if (rtlpriv->btcoexist.bt_service == BT_PAN) {
- rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 0efd19aa4fe5..661249d618c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -1369,8 +1369,8 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[10] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1445,21 +1445,17 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 56cc3bc30860..f070f25bb735 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1540,6 +1540,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
* This is maybe necessary:
* rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
*/
+ dev_kfree_skb(skb);
+
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index c7f29a9be50d..146fe144f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1176,6 +1176,7 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
}
void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
@@ -1185,7 +1186,7 @@ void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
- synchronize_irq(rtlpci->pdev->irq);
+ rtlpci->irq_enabled = false;
}
static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
@@ -1351,7 +1352,7 @@ void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
bcn_interval = mac->beacon_interval;
atim_window = 2;
- /*rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
@@ -1371,9 +1372,9 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
"beacon_interval:%d\n", bcn_interval);
- /* rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
- /* rtl92de_enable_interrupt(hw); */
+ rtl92de_enable_interrupt(hw);
}
void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 0ae6371b6318..4b672199c81d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -307,16 +307,15 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
regaddr, rfpath, bitmask, original_value);
@@ -329,14 +328,13 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
if (bitmask == 0)
return;
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92d_phy_rf_serial_read(hw,
@@ -347,7 +345,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
}
_rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
}
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 99e5cd9a5c86..1dbdddce0823 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -216,6 +216,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
.get_desc = rtl92de_get_desc,
+ .is_tx_desc_closed = rtl92de_is_tx_desc_closed,
.tx_polling = rtl92de_tx_polling,
.enable_hw_sec = rtl92de_enable_hw_security_config,
.set_key = rtl92de_set_key,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 2494e1f118f8..92c9fb45f800 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -804,13 +804,15 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
break;
}
} else {
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = GET_RX_DESC_OWN(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = GET_RX_DESC_PKT_LEN(p_desc);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ ret = GET_RX_DESC_BUFF_ADDR(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
@@ -821,6 +823,23 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
return ret;
}
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN);
+
+ /* a beacon packet will only use the first
+ * descriptor by defaut, and the own bit may not
+ * be cleared by the hardware
+ */
+ if (own)
+ return false;
+ return true;
+}
+
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 36820070fd76..635989e15282 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -715,6 +715,8 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
u64 rtl92de_get_desc(struct ieee80211_hw *hw,
u8 *p_desc, bool istx, u8 desc_name);
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool b_firstseg, bool b_lastseg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 67305ce915ec..05462422d247 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -108,7 +108,6 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
- int err;
enum version_8192e version = rtlhal->version;
if (!rtlhal->pfirmware)
@@ -146,9 +145,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
_rtl92ee_write_fw(hw, version, pfwdata, fwsize);
_rtl92ee_enable_fw_download(hw, false);
- err = _rtl92ee_fw_free_to_go(hw);
-
- return 0;
+ return _rtl92ee_fw_free_to_go(hw);
}
static bool _rtl92ee_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 222abc41669c..6dba576aa81e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -84,19 +84,18 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr);
bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x),rfpath(%#x),bitmask(%#x),original_value(%#x)\n",
@@ -111,13 +110,12 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
addr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr);
@@ -127,7 +125,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl92ee_phy_rf_serial_write(hw, rfpath, addr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -160,9 +158,8 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
(newoffset << 23) | BLSSIREADEDGE;
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
- mdelay(1);
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(2);
+ udelay(20);
if (rfpath == RF90_PATH_A)
rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
BIT(8));
@@ -2801,8 +2798,8 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac;
- long reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ long reg_e94, reg_e9c, reg_ea4;
+ long reg_eb4, reg_ebc, reg_ec4;
bool is12simular, is13simular, is23simular;
u8 idx;
u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
@@ -2873,11 +2870,9 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
@@ -2886,13 +2881,11 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e9c = result[final_candidate][1];
rtlphy->reg_e9c = reg_e9c;
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
rtlphy->reg_eb4 = reg_eb4;
reg_ebc = result[final_candidate][5];
rtlphy->reg_ebc = reg_ebc;
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 27f1a631b569..dc7b515bdc85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -651,7 +651,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb);
u16 seq_number;
__le16 fc = hdr->frame_control;
- unsigned int buf_len;
u8 fw_qsel = _rtl92ee_map_hwqueue_to_fwqueue(skb, hw_queue);
bool firstseg = ((hdr->seq_ctrl &
cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
@@ -659,7 +658,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
dma_addr_t mapping;
u8 bw_40 = 0;
- u8 short_gi;
__le32 *pdesc = (__le32 *)pdesc8;
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -677,7 +675,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
skb_push(skb, EM_HDR_LEN);
memset(skb->data, 0, EM_HDR_LEN);
}
- buf_len = skb->len;
mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
@@ -724,11 +721,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
}
}
- if (ptcb_desc->hw_rate > DESC_RATEMCS0)
- short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
- else
- short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
-
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
set_tx_desc_agg_enable(pdesc, 1);
set_tx_desc_max_agg_num(pdesc, 0x14);
@@ -1010,14 +1002,13 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
{
- u16 cur_tx_rp, cur_tx_wp;
+ u16 cur_tx_rp;
u32 tmpu32;
tmpu32 =
rtl_read_dword(rtlpriv,
get_desc_addr_fr_q_idx(hw_queue));
cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
- cur_tx_wp = (u16)(tmpu32 & 0x0fff);
/* don't need to update ring->cur_tx_wp */
ring->cur_tx_rp = cur_tx_rp;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
index bb6b60814762..f43331224851 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
@@ -24,192 +24,186 @@
#define TX_DESC_SIZE_RTL8192S (16 * 4)
#define TX_CMDDESC_SIZE_RTL8192S (16 * 4)
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
/* Dword 0 */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_AMSDU(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GREEN_FIELD(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
/* Dword 1 */
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
-#define SET_TX_DESC_MORE_DATA(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 5, 1, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 6, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 8, 5, __val)
-#define SET_TX_DESC_ACK_POLICY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 13, 2, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
-#define SET_TX_DESC_NON_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 16, 1, __val)
-#define SET_TX_DESC_KEY_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 17, 2, __val)
-#define SET_TX_DESC_OUI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 19, 1, __val)
-#define SET_TX_DESC_PKT_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 22, 2, __val)
-#define SET_TX_DESC_WDS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 26, 5, __val)
-#define SET_TX_DESC_HWPC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_non_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
/* Dword 2 */
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 0, 6, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 6, 1, __val)
-#define SET_TX_DESC_TSFL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 7, 5, __val)
-#define SET_TX_DESC_RTS_RETRY_COUNT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 12, 6, __val)
-#define SET_TX_DESC_DATA_RETRY_COUNT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 18, 6, __val)
-#define SET_TX_DESC_RSVD_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(((__pdesc) + 8), 24, 5, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 29, 1, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
-#define SET_TX_DESC_OWN_MAC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 31, 1, __val)
+static inline void set_tx_desc_rsvd_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(28, 24));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(29));
+}
/* Dword 3 */
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 16, 12, __val)
-#define SET_TX_DESC_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 28, 4, __val)
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
/* Dword 4 */
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 0, 6, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 6, 1, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 7, 4, __val)
-#define SET_TX_DESC_CTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 12, 1, __val)
-#define SET_TX_DESC_RA_BRSR_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 13, 3, __val)
-#define SET_TX_DESC_TXHT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 16, 1, __val)
-#define SET_TX_DESC_TX_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 17, 1, __val)
-#define SET_TX_DESC_TX_BANDWIDTH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 18, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 19, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 21, 2, __val)
-#define SET_TX_DESC_TX_REVERSE_DIRECTION(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 23, 1, __val)
-#define SET_TX_DESC_RTS_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 24, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 25, 1, __val)
-#define SET_TX_DESC_RTS_BANDWIDTH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 26, 1, __val)
-#define SET_TX_DESC_RTS_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 27, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 29, 2, __val)
-#define SET_TX_DESC_USER_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 31, 1, __val)
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_cts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_ra_brsr_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(15, 13));
+}
+
+static inline void set_tx_desc_txht(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(16));
+}
+
+static inline void set_tx_desc_tx_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(17));
+}
+
+static inline void set_tx_desc_tx_bandwidth(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(18));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(20, 19));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_bandwidth(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(28, 27));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(30, 29));
+}
+
+static inline void set_tx_desc_user_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(31));
+}
/* Dword 5 */
-#define SET_TX_DESC_PACKET_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 0, 9, __val)
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 9, 6, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 15, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 16, 5, __val)
-#define SET_TX_DESC_TX_AGC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 21, 11, __val)
-
-/* Dword 6 */
-#define SET_TX_DESC_IP_CHECK_SUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 0, 16, __val)
-#define SET_TX_DESC_TCP_CHECK_SUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 16, 16, __val)
+static inline void set_tx_desc_packet_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(8, 0));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(14, 9));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(20, 16));
+}
/* Dword 7 */
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 0, 16, __val)
-#define SET_TX_DESC_IP_HEADER_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 16, 8, __val)
-#define SET_TX_DESC_TCP_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 31, 1, __val)
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
/* Dword 8 */
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 32, 0, 32, __val)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 32, 0, 32)
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 8)));
+}
/* Dword 9 */
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 36, 0, 32, __val)
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 9) = cpu_to_le32(__val);
+}
/* Because the PCI Tx descriptors are chaied at the
* initialization and all the NextDescAddresses in
@@ -226,208 +220,115 @@
#define RX_DRV_INFO_SIZE_UNIT 8
/* DWORD 0 */
-#define SET_RX_STATUS_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_STATUS_DESC_CRC32(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 14, 1, __val)
-#define SET_RX_STATUS_DESC_ICV(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 15, 1, __val)
-#define SET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 4, __val)
-#define SET_RX_STATUS_DESC_SECURITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 20, 3, __val)
-#define SET_RX_STATUS_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 23, 1, __val)
-#define SET_RX_STATUS_DESC_SHIFT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
-#define SET_RX_STATUS_DESC_PHY_STATUS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_RX_STATUS_DESC_SWDEC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_RX_STATUS_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_RX_STATUS_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_RX_STATUS_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_STATUS_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_STATUS_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_STATUS_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_STATUS_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_STATUS_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_STATUS_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_STATUS_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_STATUS_DESC_PHY_STATUS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_STATUS_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_STATUS_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_STATUS_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_STATUS_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_STATUS_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+static inline void set_rx_status_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_status_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_status_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_status_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline u32 get_rx_status_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline u32 get_rx_status_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline u32 get_rx_status_desc_drvinfo_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline u32 get_rx_status_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline u32 get_rx_status_desc_phy_status(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline u32 get_rx_status_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline u32 get_rx_status_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
/* DWORD 1 */
-#define SET_RX_STATUS_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
-#define SET_RX_STATUS_DESC_TID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 5, 4, __val)
-#define SET_RX_STATUS_DESC_PAGGR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 14, 1, __val)
-#define SET_RX_STATUS_DESC_FAGGR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
-#define SET_RX_STATUS_DESC_A1_FIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 16, 4, __val)
-#define SET_RX_STATUS_DESC_A2_FIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 20, 4, __val)
-#define SET_RX_STATUS_DESC_PAM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
-#define SET_RX_STATUS_DESC_PWR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
-#define SET_RX_STATUS_DESC_MOREDATA(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 26, 1, __val)
-#define SET_RX_STATUS_DESC_MOREFRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
-#define SET_RX_STATUS_DESC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 28, 2, __val)
-#define SET_RX_STATUS_DESC_MC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 30, 1, __val)
-#define SET_RX_STATUS_DESC_BC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 31, 1, __val)
-
-#define GET_RX_STATUS_DEC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 0, 5)
-#define GET_RX_STATUS_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 5, 4)
-#define GET_RX_STATUS_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 14, 1)
-#define GET_RX_STATUS_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 15, 1)
-#define GET_RX_STATUS_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 16, 4)
-#define GET_RX_STATUS_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 20, 4)
-#define GET_RX_STATUS_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 24, 1)
-#define GET_RX_STATUS_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 25, 1)
-#define GET_RX_STATUS_DESC_MORE_DATA(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 26, 1)
-#define GET_RX_STATUS_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 27, 1)
-#define GET_RX_STATUS_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 28, 2)
-#define GET_RX_STATUS_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 30, 1)
-#define GET_RX_STATUS_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 31, 1)
-
-/* DWORD 2 */
-#define SET_RX_STATUS_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 0, 12, __val)
-#define SET_RX_STATUS_DESC_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 12, 4, __val)
-#define SET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 16, 8, __val)
-#define SET_RX_STATUS_DESC_NEXT_IND(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
-
-#define GET_RX_STATUS_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 0, 12)
-#define GET_RX_STATUS_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 12, 4)
-#define GET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 16, 8)
-#define GET_RX_STATUS_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 30, 1)
+static inline u32 get_rx_status_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_status_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
/* DWORD 3 */
-#define SET_RX_STATUS_DESC_RX_MCS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 0, 6, __val)
-#define SET_RX_STATUS_DESC_RX_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 6, 1, __val)
-#define SET_RX_STATUS_DESC_AMSDU(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 7, 1, __val)
-#define SET_RX_STATUS_DESC_SPLCP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 8, 1, __val)
-#define SET_RX_STATUS_DESC_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 9, 1, __val)
-#define SET_RX_STATUS_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 10, 1, __val)
-#define SET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 11, 1, __val)
-#define SET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 12, 1, __val)
-#define SET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 13, 1, __val)
-#define SET_RX_STATUS_DESC_HWPC_ERR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 14, 1, __val)
-#define SET_RX_STATUS_DESC_HWPC_IND(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 15, 1, __val)
-#define SET_RX_STATUS_DESC_IV0(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 16, 16, __val)
-
-#define GET_RX_STATUS_DESC_RX_MCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 0, 6)
-#define GET_RX_STATUS_DESC_RX_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 6, 1)
-#define GET_RX_STATUS_DESC_AMSDU(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 7, 1)
-#define GET_RX_STATUS_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 8, 1)
-#define GET_RX_STATUS_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 9, 1)
-#define GET_RX_STATUS_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 10, 1)
-#define GET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 11, 1)
-#define GET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 12, 1)
-#define GET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 13, 1)
-#define GET_RX_STATUS_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 14, 1)
-#define GET_RX_STATUS_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 15, 1)
-#define GET_RX_STATUS_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 16, 16)
-
-/* DWORD 4 */
-#define SET_RX_STATUS_DESC_IV1(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 0, 32, __val)
-#define GET_RX_STATUS_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 16, 0, 32)
+static inline u32 get_rx_status_desc_rx_mcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_status_desc_rx_ht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_status_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_status_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
/* DWORD 5 */
-#define SET_RX_STATUS_DESC_TSFL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 0, 32, __val)
-#define GET_RX_STATUS_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 20, 0, 32)
+static inline u32 get_rx_status_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 5)));
+}
/* DWORD 6 */
-#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
-#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
+static inline void set_rx_status__desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_status_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
- (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE1M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE2M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE5_5M ||\
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE11M)
+ (get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE1M || \
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE2M || \
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE5_5M ||\
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE11M)
enum rf_optype {
RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 541b7881735e..47a5b95ca2b9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -442,17 +442,20 @@ static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
memset((ph2c_buffer + totallen + tx_desclen), 0, len);
/* CMD len */
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 0, 16, pcmd_len[i]);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), pcmd_len[i],
+ GENMASK(15, 0));
/* CMD ID */
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 16, 8, pelement_id[i]);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), pelement_id[i],
+ GENMASK(23, 16));
/* CMD Sequence */
*cmd_start_seq = *cmd_start_seq % 0x80;
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 24, 7, *cmd_start_seq);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), *cmd_start_seq,
+ GENMASK(30, 24));
++*cmd_start_seq;
/* Copy memory */
@@ -462,8 +465,9 @@ static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
/* CMD continue */
/* set the continue in prevoius cmd. */
if (i < cmd_num - 1)
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + pre_continueoffset),
- 31, 1, 1);
+ le32p_replace_bits((__le32 *)(ph2c_buffer +
+ pre_continueoffset),
+ 1, BIT(31));
pre_continueoffset = totallen;
@@ -559,8 +563,8 @@ void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
pwrmode.flag_dps_en = 0;
pwrmode.bcn_rx_en = 0;
pwrmode.bcn_to = 0;
- SET_BITS_TO_LE_2BYTE((u8 *)(&pwrmode) + 8, 0, 16,
- mac->vif->bss_conf.beacon_int);
+ le16p_replace_bits((__le16 *)(((u8 *)(&pwrmode) + 8)),
+ mac->vif->bss_conf.beacon_int, GENMASK(15, 0));
pwrmode.app_itv = 0;
pwrmode.awake_bcn_itvl = ppsc->reg_max_lps_awakeintvl;
pwrmode.smart_ps = 1;
@@ -602,9 +606,10 @@ void rtl92s_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw,
joinbss_rpt.bssid[3] = mac->bssid[3];
joinbss_rpt.bssid[4] = mac->bssid[4];
joinbss_rpt.bssid[5] = mac->bssid[5];
- SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 8, 0, 16,
- mac->vif->bss_conf.beacon_int);
- SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 10, 0, 16, mac->assoc_id);
+ le16p_replace_bits((__le16 *)(((u8 *)(&joinbss_rpt) + 8)),
+ mac->vif->bss_conf.beacon_int, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)(((u8 *)(&joinbss_rpt) + 10)),
+ mac->assoc_id, GENMASK(15, 0));
_rtl92s_firmware_set_h2c_cmd(hw, FW_H2C_JOINBSSRPT, (u8 *)&joinbss_rpt);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 6d6e8994460d..81313e0ca834 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1352,9 +1352,9 @@ static void _rtl92s_phy_set_rfhalt(struct ieee80211_hw *hw)
/* SW/HW radio off or halt adapter!! For example S3/S4 */
} else {
/* LED function disable. Power range is about 8mA now. */
- /* if write 0xF1 disconnet_pci power
+ /* if write 0xF1 disconnect_pci power
* ifconfig wlan0 down power are both high 35:70 */
- /* if write oxF9 disconnet_pci power
+ /* if write oxF9 disconnect_pci power
* ifconfig wlan0 down power are both low 12:45*/
rtl_write_byte(rtlpriv, 0x03, 0xF9);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index efb432c6d785..9eaa5348b556 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -33,7 +33,7 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
}
static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstats, u8 *pdesc,
+ struct rtl_stats *pstats, __le32 *pdesc,
struct rx_fwinfo *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
@@ -193,11 +193,10 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb, struct rtl_stats *pstats,
- u8 *pdesc, struct rx_fwinfo *p_drvinfo)
+ __le32 *pdesc, struct rx_fwinfo *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
@@ -232,29 +231,30 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
}
bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status, u8 *pdesc,
+ struct ieee80211_rx_status *rx_status, u8 *pdesc8,
struct sk_buff *skb)
{
struct rx_fwinfo *p_drvinfo;
- u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
+ __le32 *pdesc = (__le32 *)pdesc8;
+ u32 phystatus = (u32)get_rx_status_desc_phy_status(pdesc);
struct ieee80211_hdr *hdr;
- stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
- stats->rx_bufshift = (u8)(GET_RX_STATUS_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16)GET_RX_STATUS_DESC_ICV(pdesc);
- stats->crc = (u16)GET_RX_STATUS_DESC_CRC32(pdesc);
+ stats->length = (u16)get_rx_status_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_status_desc_drvinfo_size(pdesc) * 8;
+ stats->rx_bufshift = (u8)(get_rx_status_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_status_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_status_desc_crc32(pdesc);
stats->hwerror = (u16)(stats->crc | stats->icv);
- stats->decrypted = !GET_RX_STATUS_DESC_SWDEC(pdesc);
-
- stats->rate = (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc);
- stats->shortpreamble = (u16)GET_RX_STATUS_DESC_SPLCP(pdesc);
- stats->isampdu = (bool)(GET_RX_STATUS_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_STATUS_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_STATUS_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_STATUS_DESC_RX_HT(pdesc);
+ stats->decrypted = !get_rx_status_desc_swdec(pdesc);
+
+ stats->rate = (u8)get_rx_status_desc_rx_mcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_status_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_status_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_status_desc_paggr(pdesc) == 1) &&
+ (get_rx_status_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_status_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_status_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_status_desc_rx_ht(pdesc);
stats->is_cck = SE_RX_HAL_IS_CCK_RATE(pdesc);
if (stats->hwerror)
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -320,7 +320,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 reserved_macid = 0;
@@ -360,13 +360,13 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
if (rtlpriv->dm.useramask) {
/* set txdesc macId */
if (ptcb_desc->mac_id < 32) {
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
reserved_macid |= ptcb_desc->mac_id;
}
}
- SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
+ set_tx_desc_rsvd_macid(pdesc, reserved_macid);
- SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
+ set_tx_desc_txht(pdesc, ((ptcb_desc->hw_rate >=
DESC_RATEMCS0) ? 1 : 0));
if (rtlhal->version == VERSION_8192S_ACUT) {
@@ -378,31 +378,32 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
}
}
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
- SET_TX_DESC_TX_SHORT(pdesc, 0);
+ set_tx_desc_tx_short(pdesc, 0);
/* Aggregation related */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ set_tx_desc_agg_enable(pdesc, 1);
/* For AMPDU, we must insert SSN into TX_DESC */
- SET_TX_DESC_SEQ(pdesc, seq_number);
+ set_tx_desc_seq(pdesc, seq_number);
/* Protection mode related */
/* For 92S, if RTS/CTS are set, HW will execute RTS. */
/* We choose only one protection mode to execute */
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
- !ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS_ENABLE(pdesc, ((ptcb_desc->cts_enable) ?
+ set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ?
+ 1 : 0));
+ set_tx_desc_cts_enable(pdesc, ((ptcb_desc->cts_enable) ?
1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bandwidth(pdesc, 0);
+ set_tx_desc_rts_sub_carrier(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <=
DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0)
: (ptcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -411,27 +412,27 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
/* Set Bandwidth and sub-channel settings. */
if (bw_40) {
if (ptcb_desc->packet_bw) {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 1);
+ set_tx_desc_tx_bandwidth(pdesc, 1);
/* use duplicated mode */
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
} else {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_tx_bandwidth(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_tx_bandwidth(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
/* 3 Fill necessary field in First Descriptor */
/*DWORD 0*/
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_OFFSET(pdesc, 32);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_offset(pdesc, 32);
+ set_tx_desc_pkt_size(pdesc, (u16)skb->len);
/*DWORD 1*/
- SET_TX_DESC_RA_BRSR_ID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_ra_brsr_id(pdesc, ptcb_desc->ratr_index);
/* Fill security related */
if (info->control.hw_key) {
@@ -441,62 +442,63 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x2);
+ set_tx_desc_sec_type(pdesc, 0x2);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
/* Set Packet ID */
- SET_TX_DESC_PACKET_ID(pdesc, 0);
+ set_tx_desc_packet_id(pdesc, 0);
/* We will assign magement queue to BK. */
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
/* Alwasy enable all rate fallback range */
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
/* Fix: I don't kown why hw use 6.5M to tx when set it */
- SET_TX_DESC_USER_RATE(pdesc,
+ set_tx_desc_user_rate(pdesc,
ptcb_desc->use_driver_rate ? 1 : 0);
/* Set NON_QOS bit. */
if (!ieee80211_is_data_qos(fc))
- SET_TX_DESC_NON_QOS(pdesc, 1);
+ set_tx_desc_non_qos(pdesc, 1);
}
/* Fill fields that are required to be initialized
* in all of the descriptors */
/*DWORD 0 */
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
/* DWORD 7 */
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
/* DOWRD 8 */
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
-void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
- bool firstseg, bool lastseg, struct sk_buff *skb)
+void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
+ bool firstseg, bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -512,53 +514,55 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
/* This bit indicate this packet is used for FW download. */
if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
/* For firmware downlaod we only need to set LINIP */
- SET_TX_DESC_LINIP(pdesc, tcb_desc->last_inipkt);
+ set_tx_desc_linip(pdesc, tcb_desc->last_inipkt);
/* 92SE must set as 1 for firmware download HW DMA error */
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
/* 92SE need not to set TX packet size when firmware download */
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
} else { /* H2C Command Desc format (Host TXCMD) */
/* 92SE must set as 1 for firmware download HW DMA error */
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
+ set_tx_desc_offset(pdesc, 0x20);
/* Buffer size + command header */
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
/* Fixed queue of H2C command */
- SET_TX_DESC_QUEUE_SEL(pdesc, 0x13);
-
- SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
+ set_tx_desc_queue_sel(pdesc, 0x13);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ le32p_replace_bits((__le32 *)skb->data, rtlhal->h2c_txcmd_seq,
+ GENMASK(30, 24));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
}
}
-void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
@@ -569,16 +573,16 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_STATUS_DESC_OWN(pdesc, 1);
+ set_rx_status_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_STATUS__DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_status__desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_STATUS_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_status_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_STATUS_DESC_EOR(pdesc, 1);
+ set_rx_status_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
@@ -589,17 +593,18 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
u64 rtl92se_get_desc(struct ieee80211_hw *hw,
- u8 *desc, bool istx, u8 desc_name)
+ u8 *desc8, bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *desc = (__le32 *)desc8;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(desc);
+ ret = get_tx_desc_own(desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
+ ret = get_tx_desc_tx_buffer_address(desc);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
@@ -609,13 +614,13 @@ u64 rtl92se_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_STATUS_DESC_OWN(desc);
+ ret = get_rx_status_desc_own(desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
+ ret = get_rx_status_desc_pkt_len(desc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
+ ret = get_rx_status_desc_buff_addr(desc);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 54a3aec1dfa7..772aecedf0b4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -37,13 +37,12 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value = 0, readback_value, bitshift;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
original_value = rtl8723_phy_rf_serial_read(hw,
@@ -53,7 +52,7 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -69,13 +68,12 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 original_value = 0, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
@@ -99,7 +97,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl8723e_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
}
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -485,15 +483,12 @@ bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
int i;
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
radioa_arraylen = RTL8723ERADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8723E_RADIOA_1TARRAY;
- rtstatus = true;
-
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -1341,9 +1336,9 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
- bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ bool b_patha_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[10] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1372,7 +1367,6 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
}
final_candidate = 0xff;
b_patha_ok = false;
- b_pathb_ok = false;
is12simular = false;
is23simular = false;
is13simular = false;
@@ -1412,23 +1406,16 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
- b_pathb_ok = true;
} else {
rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index aa8a0950fcea..9528ac3f3b87 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -33,19 +33,18 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -59,13 +58,12 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, path);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value = rtl8723_phy_rf_serial_read(hw, path,
@@ -77,7 +75,7 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
rtl8723_phy_rf_serial_write(hw, path, regaddr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -2251,8 +2249,8 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate, idx;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4;
- long reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4;
+ long reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[9] = {
ROFDM0_XARXIQIMBALANCE,
@@ -2334,11 +2332,9 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
reg_e94 = result[final_candidate][0];
@@ -2346,13 +2342,11 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e9c = result[final_candidate][1];
rtlphy->reg_e9c = reg_e9c;
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
rtlphy->reg_eb4 = reg_eb4;
reg_ebc = result[final_candidate][5];
rtlphy->reg_ebc = reg_ebc;
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index 18ce2856a91b..37036e653e56 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -223,7 +223,6 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
struct sk_buff *pskb = NULL;
- u8 own;
unsigned long flags;
ring = &rtlpci->tx_ring[BEACON_QUEUE];
@@ -233,9 +232,6 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[0];
- own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, true,
- HW_DESC_OWN);
-
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
__skb_queue_tail(&ring->queue, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index aae14c68bf69..debecc623a01 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -89,12 +89,10 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
(newoffset << 23) | BLSSIREADEDGE;
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
- mdelay(1);
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(1);
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong | BLSSIREADEDGE);
- mdelay(1);
+ udelay(120);
if (rfpath == RF90_PATH_A)
rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
BIT(8));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 979e434a4e73..b8a2b2326902 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -139,19 +139,18 @@ u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl8821ae_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -166,13 +165,12 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value =
@@ -183,7 +181,7 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl8821ae_phy_rf_serial_write(hw, rfpath, regaddr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -2076,7 +2074,6 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- bool rtstatus = true;
u32 *radioa_array_table_a, *radioa_array_table_b;
u16 radioa_arraylen_a, radioa_arraylen_b;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2088,7 +2085,6 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2111,7 +2107,6 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2121,7 +2116,6 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2351,7 +2345,7 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
short band_temp = -1, regulation = -1, bandwidth_temp = -1,
rate_section = -1, channel_temp = -1;
- u16 bd, regu, bdwidth, sec, chnl;
+ u16 regu, bdwidth, sec, chnl;
s8 power_limit = MAX_POWER_INDEX;
if (rtlefuse->eeprom_regulatory == 2)
@@ -2472,7 +2466,6 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
return MAX_POWER_INDEX;
}
- bd = band_temp;
regu = regulation;
bdwidth = bandwidth_temp;
sec = rate_section;
@@ -3553,8 +3546,6 @@ void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
if (36 <= channel && channel <= 64)
data = 0x114E9;
- else if (100 <= channel && channel <= 140)
- data = 0x110E9;
else
data = 0x110E9;
rtl8821ae_phy_set_rf_reg(hw, path, RF_APK,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 4b59f3b46b28..348b0072cdd6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1021,8 +1021,10 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->hw = hw;
rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32),
GFP_KERNEL);
- if (!rtlpriv->usb_data)
+ if (!rtlpriv->usb_data) {
+ ieee80211_free_hw(hw);
return -ENOMEM;
+ }
/* this spin lock must be initialized early */
spin_lock_init(&rtlpriv->locks.usb_lock);
@@ -1083,6 +1085,7 @@ error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
+ kfree(rtlpriv->usb_data);
return -ENODEV;
}
EXPORT_SYMBOL(rtl_usb_probe);
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 77edee2df8b8..15e12155a04c 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -14,6 +14,7 @@ rtw88-y += main.o \
fw.o \
ps.o \
sec.o \
+ bf.o \
regd.o
rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
new file mode 100644
index 000000000000..fda771d23f71
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation.
+ */
+
+#include "main.h"
+#include "reg.h"
+#include "bf.h"
+#include "debug.h"
+
+void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ if (bfee->role == RTW_BFEE_NONE)
+ return;
+
+ if (bfee->role == RTW_BFEE_MU)
+ bfinfo->bfer_mu_cnt--;
+ else if (bfee->role == RTW_BFEE_SU)
+ bfinfo->bfer_su_cnt--;
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, false);
+
+ bfee->role = RTW_BFEE_NONE;
+}
+
+void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct ieee80211_sta *sta;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_sta_vht_cap *ic_vht_cap;
+ const u8 *bssid = bss_conf->bssid;
+ u32 sound_dim;
+ u8 bfee_role = RTW_BFEE_NONE;
+ u8 i;
+
+ if (!(chip->band & RTW_BAND_5G))
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(vif, bssid);
+ if (!sta) {
+ rtw_warn(rtwdev, "failed to find station entry for bss %pM\n",
+ bssid);
+ goto out_unlock;
+ }
+
+ ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
+ vht_cap = &sta->vht_cap;
+
+ if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ if (bfinfo->bfer_mu_cnt >= chip->bfer_mu_max_num) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "mu bfer number over limit\n");
+ goto out_unlock;
+ }
+
+ ether_addr_copy(bfee->mac_addr, bssid);
+ bfee_role = RTW_BFEE_MU;
+ bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
+ bfee->aid = bss_conf->aid;
+ bfinfo->bfer_mu_cnt++;
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ } else if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "su bfer number over limit\n");
+ goto out_unlock;
+ }
+
+ sound_dim = vht_cap->cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+
+ ether_addr_copy(bfee->mac_addr, bssid);
+ bfee_role = RTW_BFEE_SU;
+ bfee->sound_dim = (u8)sound_dim;
+ bfee->g_id = 0;
+ bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
+ bfinfo->bfer_su_cnt++;
+ for (i = 0; i < chip->bfer_su_max_num; i++) {
+ if (!test_bit(i, bfinfo->bfer_su_reg_maping)) {
+ set_bit(i, bfinfo->bfer_su_reg_maping);
+ bfee->su_reg_index = i;
+ break;
+ }
+ }
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ }
+
+out_unlock:
+ bfee->role = bfee_role;
+ rcu_read_unlock();
+}
+
+void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
+ struct mu_bfer_init_para *param)
+{
+ u16 mu_bf_ctl = 0;
+ u8 *addr = param->bfer_address;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, REG_ASSOCIATED_BFMER0_INFO + i, addr[i]);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 6, param->paid);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, param->csi_para);
+
+ mu_bf_ctl = rtw_read16(rtwdev, REG_WMAC_MU_BF_CTL) & 0xC000;
+ mu_bf_ctl |= param->my_aid | (param->csi_length_sel << 12);
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, mu_bf_ctl);
+}
+
+void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ enum rtw_trx_desc_rate rate)
+{
+ u32 psf_ctl = 0;
+ u8 csi_rsc = 0x1;
+
+ psf_ctl = rtw_read32(rtwdev, REG_BBPSF_CTRL) |
+ BIT_WMAC_USE_NDPARATE |
+ (csi_rsc << 13);
+
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, 0x26);
+ rtw_write8_clr(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF_REPORT_POLL);
+ rtw_write8_clr(rtwdev, REG_RXFLTMAP4, BIT_RXFLTMAP4_BF_REPORT_POLL);
+
+ if (vif->net_type == RTW_NET_AP_MODE)
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl | BIT(12));
+ else
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl & ~BIT(12));
+}
+
+void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param)
+{
+ u8 mu_tbl_sel;
+ u8 mu_valid;
+
+ mu_valid = rtw_read8(rtwdev, REG_MU_TX_CTL) &
+ ~BIT_MASK_R_MU_TABLE_VALID;
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL,
+ (mu_valid | BIT(0) | BIT(1)) & ~(BIT(7)));
+
+ mu_tbl_sel = rtw_read8(rtwdev, REG_MU_TX_CTL + 1) & 0xF8;
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel);
+ rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[0]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[0]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4,
+ param->given_user_pos[1]);
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel | 1);
+ rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[1]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[2]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4,
+ param->given_user_pos[3]);
+}
+
+void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev)
+{
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0);
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0);
+ rtw_write8(rtwdev, REG_MU_TX_CTL, 0);
+}
+
+void rtw_bf_del_sounding(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, 0);
+}
+
+void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ u8 nc_index = 1;
+ u8 nr_index = bfee->sound_dim;
+ u8 grouping = 0, codebookinfo = 1, coefficientsize = 3;
+ u32 addr_bfer_info, addr_csi_rpt, csi_param;
+ u8 i;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "config as an su bfee\n");
+
+ switch (bfee->su_reg_index) {
+ case 1:
+ addr_bfer_info = REG_ASSOCIATED_BFMER1_INFO;
+ addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20 + 2;
+ break;
+ case 0:
+ default:
+ addr_bfer_info = REG_ASSOCIATED_BFMER0_INFO;
+ addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20;
+ break;
+ }
+
+ /* Sounding protocol control */
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+
+ /* MAC address/Partial AID of Beamformer */
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, addr_bfer_info + i, bfee->mac_addr[i]);
+
+ csi_param = (u16)((coefficientsize << 10) |
+ (codebookinfo << 8) |
+ (grouping << 6) |
+ (nr_index << 3) |
+ nc_index);
+ rtw_write16(rtwdev, addr_csi_rpt, csi_param);
+
+ /* ndp rx standby timer */
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, RTW_NDP_RX_STANDBY_TIME);
+}
+
+/* nc index: 1 2T2R 0 1T1R
+ * nr index: 1 use Nsts 0 use reg setting
+ * codebookinfo: 1 802.11ac 3 802.11n
+ */
+void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bf_info = &rtwdev->bf_info;
+ struct mu_bfer_init_para param;
+ u8 nc_index = 1, nr_index = 1;
+ u8 grouping = 0, codebookinfo = 1, coefficientsize = 0;
+ u32 csi_param;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "config as an mu bfee\n");
+
+ csi_param = (u16)((coefficientsize << 10) |
+ (codebookinfo << 8) |
+ (grouping << 6) |
+ (nr_index << 3) |
+ nc_index);
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "nc=%d nr=%d group=%d codebookinfo=%d coefficientsize=%d\n",
+ nc_index, nr_index, grouping, codebookinfo,
+ coefficientsize);
+
+ param.paid = bfee->p_aid;
+ param.csi_para = csi_param;
+ param.my_aid = bfee->aid & 0xfff;
+ param.csi_length_sel = HAL_CSI_SEG_4K;
+ ether_addr_copy(param.bfer_address, bfee->mac_addr);
+
+ rtw_bf_init_bfer_entry_mu(rtwdev, &param);
+
+ bf_info->cur_csi_rpt_rate = DESC_RATE6M;
+ rtw_bf_cfg_sounding(rtwdev, vif, DESC_RATE6M);
+
+ /* accept action_no_ack */
+ rtw_write16_set(rtwdev, REG_RXFLTMAP0, BIT_RXFLTMAP0_ACTIONNOACK);
+
+ /* accept NDPA and BF report poll */
+ rtw_write16_set(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF);
+}
+
+void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "remove as a su bfee\n");
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+
+ switch (bfee->su_reg_index) {
+ case 0:
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, 0);
+ break;
+ case 1:
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER1_INFO, 0);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER1_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20 + 2, 0);
+ break;
+ }
+
+ clear_bit(bfee->su_reg_index, bfinfo->bfer_su_reg_maping);
+ bfee->su_reg_index = 0xFF;
+}
+
+void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+
+ rtw_bf_del_bfer_entry_mu(rtwdev);
+
+ if (bfinfo->bfer_su_cnt == 0 && bfinfo->bfer_mu_cnt == 0)
+ rtw_bf_del_sounding(rtwdev);
+}
+
+void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct cfg_mumimo_para param;
+
+ if (bfee->role != RTW_BFEE_MU) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "this vif is not mu bfee\n");
+ return;
+ }
+
+ param.grouping_bitmap = 0;
+ param.mu_tx_en = 0;
+ memset(param.sounding_sts, 0, 6);
+ memcpy(param.given_gid_tab, conf->mu_group.membership, 8);
+ memcpy(param.given_user_pos, conf->mu_group.position, 16);
+ rtw_dbg(rtwdev, RTW_DBG_BF, "STA0: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n",
+ param.given_gid_tab[0], param.given_user_pos[0],
+ param.given_user_pos[1]);
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "STA1: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n",
+ param.given_gid_tab[1], param.given_user_pos[2],
+ param.given_user_pos[3]);
+
+ rtw_bf_cfg_mu_bfee(rtwdev, &param);
+}
+
+void rtw_bf_phy_init(struct rtw_dev *rtwdev)
+{
+ u8 tmp8;
+ u32 tmp32;
+ u8 retry_limit = 0xA;
+ u8 ndpa_rate = 0x10;
+ u8 ack_policy = 3;
+
+ tmp32 = rtw_read32(rtwdev, REG_MU_TX_CTL);
+ /* Enable P1 aggr new packet according to P0 transfer time */
+ tmp32 |= BIT_MU_P1_WAIT_STATE_EN;
+ /* MU Retry Limit */
+ tmp32 &= ~BIT_MASK_R_MU_RL;
+ tmp32 |= (retry_limit << BIT_SHIFT_R_MU_RL) & BIT_MASK_R_MU_RL;
+ /* Disable Tx MU-MIMO until sounding done */
+ tmp32 &= ~BIT_EN_MU_MIMO;
+ /* Clear validity of MU STAs */
+ tmp32 &= ~BIT_MASK_R_MU_TABLE_VALID;
+ rtw_write32(rtwdev, REG_MU_TX_CTL, tmp32);
+
+ /* MU-MIMO Option as default value */
+ tmp8 = ack_policy << BIT_SHIFT_WMAC_TXMU_ACKPOLICY;
+ tmp8 |= BIT_WMAC_TXMU_ACKPOLICY_EN;
+ rtw_write8(rtwdev, REG_WMAC_MU_BF_OPTION, tmp8);
+
+ /* MU-MIMO Control as default value */
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0);
+ /* Set MU NDPA rate & BW source */
+ rtw_write32_set(rtwdev, REG_TXBF_CTRL, BIT_USE_NDPA_PARAMETER);
+ /* Set NDPA Rate */
+ rtw_write8(rtwdev, REG_NDPA_OPT_CTRL, ndpa_rate);
+
+ rtw_write32_mask(rtwdev, REG_BBPSF_CTRL, BIT_MASK_CSI_RATE,
+ DESC_RATE6M);
+}
+
+void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate)
+{
+ u32 csi_cfg;
+ u16 cur_rrsr;
+
+ csi_cfg = rtw_read32(rtwdev, REG_BBPSF_CTRL) & ~BIT_MASK_CSI_RATE;
+ cur_rrsr = rtw_read16(rtwdev, REG_RRSR);
+
+ if (rssi >= 40) {
+ if (cur_rate != DESC_RATE54M) {
+ cur_rrsr |= BIT(DESC_RATE54M);
+ csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) <<
+ BIT_SHIFT_CSI_RATE;
+ rtw_write16(rtwdev, REG_RRSR, cur_rrsr);
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg);
+ }
+ *new_rate = DESC_RATE54M;
+ } else {
+ if (cur_rate != DESC_RATE24M) {
+ cur_rrsr &= ~BIT(DESC_RATE54M);
+ csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) <<
+ BIT_SHIFT_CSI_RATE;
+ rtw_write16(rtwdev, REG_RRSR, cur_rrsr);
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg);
+ }
+ *new_rate = DESC_RATE24M;
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw88/bf.h b/drivers/net/wireless/realtek/rtw88/bf.h
new file mode 100644
index 000000000000..96a8216dd11f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/bf.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation.
+ */
+
+#ifndef __RTW_BF_H_
+#define __RTW_BF_H_
+
+#define REG_TXBF_CTRL 0x042C
+#define REG_RRSR 0x0440
+#define REG_NDPA_OPT_CTRL 0x045F
+
+#define REG_ASSOCIATED_BFMER0_INFO 0x06E4
+#define REG_ASSOCIATED_BFMER1_INFO 0x06EC
+#define REG_TX_CSI_RPT_PARAM_BW20 0x06F4
+#define REG_SND_PTCL_CTRL 0x0718
+#define REG_MU_TX_CTL 0x14C0
+#define REG_MU_STA_GID_VLD 0x14C4
+#define REG_MU_STA_USER_POS_INFO 0x14C8
+#define REG_CSI_RRSR 0x1678
+#define REG_WMAC_MU_BF_OPTION 0x167C
+#define REG_WMAC_MU_BF_CTL 0x1680
+
+#define BIT_WMAC_USE_NDPARATE BIT(30)
+#define BIT_WMAC_TXMU_ACKPOLICY_EN BIT(6)
+#define BIT_USE_NDPA_PARAMETER BIT(30)
+#define BIT_MU_P1_WAIT_STATE_EN BIT(16)
+#define BIT_EN_MU_MIMO BIT(7)
+
+#define R_MU_RL 0xf
+#define BIT_SHIFT_R_MU_RL 12
+#define BIT_SHIFT_WMAC_TXMU_ACKPOLICY 4
+#define BIT_SHIFT_CSI_RATE 24
+
+#define BIT_MASK_R_MU_RL (R_MU_RL << BIT_SHIFT_R_MU_RL)
+#define BIT_MASK_R_MU_TABLE_VALID 0x3f
+#define BIT_MASK_CSI_RATE_VAL 0x3F
+#define BIT_MASK_CSI_RATE (BIT_MASK_CSI_RATE_VAL << BIT_SHIFT_CSI_RATE)
+
+#define BIT_RXFLTMAP0_ACTIONNOACK BIT(14)
+#define BIT_RXFLTMAP1_BF (BIT(4) | BIT(5))
+#define BIT_RXFLTMAP1_BF_REPORT_POLL BIT(4)
+#define BIT_RXFLTMAP4_BF_REPORT_POLL BIT(4)
+
+#define RTW_NDP_RX_STANDBY_TIME 0x70
+#define RTW_SND_CTRL_REMOVE 0xD8
+#define RTW_SND_CTRL_SOUNDING 0xDB
+
+enum csi_seg_len {
+ HAL_CSI_SEG_4K = 0,
+ HAL_CSI_SEG_8K = 1,
+ HAL_CSI_SEG_11K = 2,
+};
+
+struct cfg_mumimo_para {
+ u8 sounding_sts[6];
+ u16 grouping_bitmap;
+ u8 mu_tx_en;
+ u32 given_gid_tab[2];
+ u32 given_user_pos[4];
+};
+
+struct mu_bfer_init_para {
+ u16 paid;
+ u16 csi_para;
+ u16 my_aid;
+ enum csi_seg_len csi_length_sel;
+ u8 bfer_address[ETH_ALEN];
+};
+
+void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
+ struct mu_bfer_init_para *param);
+void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ enum rtw_trx_desc_rate rate);
+void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param);
+void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev);
+void rtw_bf_del_sounding(struct rtw_dev *rtwdev);
+void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee);
+void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee);
+void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, struct rtw_bfee *bfee);
+void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, struct rtw_bfee *bfee);
+void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
+void rtw_bf_phy_init(struct rtw_dev *rtwdev);
+void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 793b40bdbf7c..4dfb2ec395ee 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -383,9 +383,9 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
u8 rssi_step;
u8 rssi;
- scan = rtw_flag_check(rtwdev, RTW_FLAG_SCANNING);
+ scan = test_bit(RTW_FLAG_SCANNING, rtwdev->flags);
coex_stat->wl_connected = !!rtwdev->sta_cnt;
- coex_stat->wl_gl_busy = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
if (stats->tx_throughput > stats->rx_throughput)
coex_stat->wl_tput_dir = COEX_WL_TPUT_TX;
@@ -810,8 +810,6 @@ static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable)
static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
u8 lps_val, u8 rpwm_val)
{
- struct rtw_lps_conf *lps_conf = &rtwdev->lps_conf;
- struct rtw_vif *rtwvif;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 lps_mode = 0x0;
@@ -823,18 +821,14 @@ static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
/* recover to original 32k low power setting */
coex_stat->wl_force_lps_ctrl = false;
- rtwvif = lps_conf->rtwvif;
- if (rtwvif && rtw_in_lps(rtwdev))
- rtw_leave_lps(rtwdev, rtwvif);
+ rtw_leave_lps(rtwdev);
break;
case COEX_PS_LPS_OFF:
coex_stat->wl_force_lps_ctrl = true;
if (lps_mode)
rtw_fw_coex_tdma_type(rtwdev, 0x8, 0, 0, 0, 0);
- rtwvif = lps_conf->rtwvif;
- if (rtwvif && rtw_in_lps(rtwdev))
- rtw_leave_lps(rtwdev, rtwvif);
+ rtw_leave_lps(rtwdev);
break;
default:
break;
@@ -1308,6 +1302,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
struct rtw_chip_info *chip = rtwdev->chip;
bool wl_hi_pri = false;
u8 table_case, tdma_case;
+ u32 slot_type = 0;
if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 ||
coex_stat->wl_hi_pri_task2)
@@ -1318,14 +1313,16 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
if (wl_hi_pri) {
table_case = 15;
if (coex_stat->bt_a2dp_exist &&
- !coex_stat->bt_pan_exist)
+ !coex_stat->bt_pan_exist) {
+ slot_type = TDMA_4SLOT;
tdma_case = 11;
- else if (coex_stat->wl_hi_pri_task1)
+ } else if (coex_stat->wl_hi_pri_task1) {
tdma_case = 6;
- else if (!coex_stat->bt_page)
+ } else if (!coex_stat->bt_page) {
tdma_case = 8;
- else
+ } else {
tdma_case = 9;
+ }
} else if (coex_stat->wl_connected) {
table_case = 10;
tdma_case = 10;
@@ -1361,7 +1358,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, table_case);
- rtw_coex_tdma(rtwdev, false, tdma_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
@@ -1475,13 +1472,13 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
+ slot_type = TDMA_4SLOT;
+
if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
table_case = 10;
else
table_case = 9;
- slot_type = TDMA_4SLOT;
-
if (coex_stat->wl_gl_busy)
tdma_case = 13;
else
@@ -1585,13 +1582,14 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
+ slot_type = TDMA_4SLOT;
+
if (coex_stat->bt_ble_exist)
table_case = 26;
else
table_case = 9;
if (coex_stat->wl_gl_busy) {
- slot_type = TDMA_4SLOT;
tdma_case = 13;
} else {
tdma_case = 14;
@@ -1794,10 +1792,12 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
+ u32 slot_type = 0;
if (efuse->share_ant) {
/* Shared-Ant */
if (coex_stat->bt_a2dp_exist) {
+ slot_type = TDMA_4SLOT;
table_case = 9;
tdma_case = 11;
} else {
@@ -1818,7 +1818,7 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, table_case);
- rtw_coex_tdma(rtwdev, false, tdma_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 6ad985e98e42..5a181e01ebef 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -498,12 +498,32 @@ static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate)
seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n);
}
+static void rtw_print_rate(struct seq_file *m, u8 rate)
+{
+ switch (rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ rtw_print_cck_rate_txt(m, rate);
+ break;
+ case DESC_RATE6M...DESC_RATE54M:
+ rtw_print_ofdm_rate_txt(m, rate);
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS15:
+ rtw_print_ht_rate_txt(m, rate);
+ break;
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ rtw_print_vht_rate_txt(m, rate);
+ break;
+ default:
+ seq_printf(m, " Unknown rate=0x%x\n", rate);
+ break;
+ }
+}
+
static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_hal *hal = &rtwdev->hal;
- void (*print_rate)(struct seq_file *, u8) = NULL;
u8 path, rate;
struct rtw_power_params pwr_param = {0};
u8 bw = hal->current_band_width;
@@ -528,30 +548,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
rate < DESC_RATEVHT1SS_MCS0)
continue;
- switch (rate) {
- case DESC_RATE1M...DESC_RATE11M:
- print_rate = rtw_print_cck_rate_txt;
- break;
- case DESC_RATE6M...DESC_RATE54M:
- print_rate = rtw_print_ofdm_rate_txt;
- break;
- case DESC_RATEMCS0...DESC_RATEMCS15:
- print_rate = rtw_print_ht_rate_txt;
- break;
- case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
- print_rate = rtw_print_vht_rate_txt;
- break;
- default:
- print_rate = NULL;
- break;
- }
-
rtw_get_tx_power_params(rtwdev, path, rate, bw,
ch, regd, &pwr_param);
seq_printf(m, "%4c ", path + 'A');
- if (print_rate)
- print_rate(m, rate);
+ rtw_print_rate(m, rate);
seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n",
hal->tx_pwr_tbl[path][rate],
hal->tx_pwr_tbl[path][rate],
@@ -567,6 +568,132 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
return 0;
}
+static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ struct rtw_pkt_count *last_cnt = &dm_info->last_pkt_count;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct ewma_evm *ewma_evm = dm_info->ewma_evm;
+ struct ewma_snr *ewma_snr = dm_info->ewma_snr;
+ u8 ss, rate_id;
+
+ seq_puts(m, "==========[Common Info]========\n");
+ seq_printf(m, "Is link = %c\n", rtw_is_assoc(rtwdev) ? 'Y' : 'N');
+ seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel);
+ seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width);
+ seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]);
+ seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n",
+ stats->tx_throughput, stats->rx_throughput);
+
+ seq_puts(m, "==========[Tx Phy Info]========\n");
+ seq_puts(m, "[Tx Rate] = ");
+ rtw_print_rate(m, dm_info->tx_rate);
+ seq_printf(m, "(0x%x)\n\n", dm_info->tx_rate);
+
+ seq_puts(m, "==========[Rx Phy Info]========\n");
+ seq_printf(m, "[Rx Beacon Count] = %u\n", last_cnt->num_bcn_pkt);
+ seq_puts(m, "[Rx Rate] = ");
+ rtw_print_rate(m, dm_info->curr_rx_rate);
+ seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate);
+
+ seq_puts(m, "[Rx Rate Count]:\n");
+ seq_printf(m, " * CCK = {%u, %u, %u, %u}\n",
+ last_cnt->num_qry_pkt[DESC_RATE1M],
+ last_cnt->num_qry_pkt[DESC_RATE2M],
+ last_cnt->num_qry_pkt[DESC_RATE5_5M],
+ last_cnt->num_qry_pkt[DESC_RATE11M]);
+
+ seq_printf(m, " * OFDM = {%u, %u, %u, %u, %u, %u, %u, %u}\n",
+ last_cnt->num_qry_pkt[DESC_RATE6M],
+ last_cnt->num_qry_pkt[DESC_RATE9M],
+ last_cnt->num_qry_pkt[DESC_RATE12M],
+ last_cnt->num_qry_pkt[DESC_RATE18M],
+ last_cnt->num_qry_pkt[DESC_RATE24M],
+ last_cnt->num_qry_pkt[DESC_RATE36M],
+ last_cnt->num_qry_pkt[DESC_RATE48M],
+ last_cnt->num_qry_pkt[DESC_RATE54M]);
+
+ for (ss = 0; ss < efuse->hw_cap.nss; ss++) {
+ rate_id = DESC_RATEMCS0 + ss * 8;
+ seq_printf(m, " * HT_MCS[%u:%u] = {%u, %u, %u, %u, %u, %u, %u, %u}\n",
+ ss * 8, ss * 8 + 7,
+ last_cnt->num_qry_pkt[rate_id],
+ last_cnt->num_qry_pkt[rate_id + 1],
+ last_cnt->num_qry_pkt[rate_id + 2],
+ last_cnt->num_qry_pkt[rate_id + 3],
+ last_cnt->num_qry_pkt[rate_id + 4],
+ last_cnt->num_qry_pkt[rate_id + 5],
+ last_cnt->num_qry_pkt[rate_id + 6],
+ last_cnt->num_qry_pkt[rate_id + 7]);
+ }
+
+ for (ss = 0; ss < efuse->hw_cap.nss; ss++) {
+ rate_id = DESC_RATEVHT1SS_MCS0 + ss * 10;
+ seq_printf(m, " * VHT_MCS-%uss MCS[0:9] = {%u, %u, %u, %u, %u, %u, %u, %u, %u, %u}\n",
+ ss + 1,
+ last_cnt->num_qry_pkt[rate_id],
+ last_cnt->num_qry_pkt[rate_id + 1],
+ last_cnt->num_qry_pkt[rate_id + 2],
+ last_cnt->num_qry_pkt[rate_id + 3],
+ last_cnt->num_qry_pkt[rate_id + 4],
+ last_cnt->num_qry_pkt[rate_id + 5],
+ last_cnt->num_qry_pkt[rate_id + 6],
+ last_cnt->num_qry_pkt[rate_id + 7],
+ last_cnt->num_qry_pkt[rate_id + 8],
+ last_cnt->num_qry_pkt[rate_id + 9]);
+ }
+
+ seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n",
+ dm_info->rssi[RF_PATH_A] - 100,
+ dm_info->rssi[RF_PATH_B] - 100);
+ seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n",
+ dm_info->rx_evm_dbm[RF_PATH_A],
+ dm_info->rx_evm_dbm[RF_PATH_B]);
+ seq_printf(m, "[Rx SNR] = {%d, %d}\n",
+ dm_info->rx_snr[RF_PATH_A],
+ dm_info->rx_snr[RF_PATH_B]);
+ seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n",
+ dm_info->cfo_tail[RF_PATH_A],
+ dm_info->cfo_tail[RF_PATH_B]);
+
+ if (dm_info->curr_rx_rate >= DESC_RATE11M) {
+ seq_puts(m, "[Rx Average Status]:\n");
+ seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_OFDM_A]));
+ seq_printf(m, " * 1SS, EVM: {-%d}, SNR: {%d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_1SS]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_1SS_A]));
+ seq_printf(m, " * 2SS, EVM: {-%d, -%d}, SNR: {%d, %d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_A]),
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B]));
+ }
+
+ seq_puts(m, "[Rx Counter]:\n");
+ seq_printf(m, " * CCA (CCK, OFDM, Total) = (%u, %u, %u)\n",
+ dm_info->cck_cca_cnt,
+ dm_info->ofdm_cca_cnt,
+ dm_info->total_cca_cnt);
+ seq_printf(m, " * False Alarm (CCK, OFDM, Total) = (%u, %u, %u)\n",
+ dm_info->cck_fa_cnt,
+ dm_info->ofdm_fa_cnt,
+ dm_info->total_fa_cnt);
+ seq_printf(m, " * CCK cnt (ok, err) = (%u, %u)\n",
+ dm_info->cck_ok_cnt, dm_info->cck_err_cnt);
+ seq_printf(m, " * OFDM cnt (ok, err) = (%u, %u)\n",
+ dm_info->ofdm_ok_cnt, dm_info->ofdm_err_cnt);
+ seq_printf(m, " * HT cnt (ok, err) = (%u, %u)\n",
+ dm_info->ht_ok_cnt, dm_info->ht_err_cnt);
+ seq_printf(m, " * VHT cnt (ok, err) = (%u, %u)\n",
+ dm_info->vht_ok_cnt, dm_info->vht_err_cnt);
+ return 0;
+}
+
#define rtw_debug_impl_mac(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
.cb_read = rtw_debug_get_mac_page, \
@@ -653,6 +780,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
.cb_read = rtw_debugfs_get_rsvd_page,
};
+static struct rtw_debugfs_priv rtw_debug_priv_phy_info = {
+ .cb_read = rtw_debugfs_get_phy_info,
+};
+
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -682,6 +813,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_rw(rf_read);
rtw_debugfs_add_rw(dump_cam);
rtw_debugfs_add_rw(rsvd_page);
+ rtw_debugfs_add_r(phy_info);
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 45851cbbd2ab..cd28f675e9cb 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -16,6 +16,8 @@ enum rtw_debug_mask {
RTW_DBG_RFK = 0x00000080,
RTW_DBG_REGD = 0x00000100,
RTW_DBG_DEBUGFS = 0x00000200,
+ RTW_DBG_PS = 0x00000400,
+ RTW_DBG_BF = 0x00000800,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index b082e2cc95f5..b8c581161f61 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -7,7 +7,9 @@
#include "fw.h"
#include "tx.h"
#include "reg.h"
+#include "sec.h"
#include "debug.h"
+#include "util.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -27,6 +29,100 @@ static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
}
}
+static u16 get_max_amsdu_len(u32 bit_rate)
+{
+ /* lower than ofdm, do not aggregate */
+ if (bit_rate < 550)
+ return 1;
+
+ /* lower than 20M 2ss mcs8, make it small */
+ if (bit_rate < 1800)
+ return 1200;
+
+ /* lower than 40M 2ss mcs9, make it medium */
+ if (bit_rate < 4000)
+ return 2600;
+
+ /* not yet 80M 2ss mcs8/9, make it twice regular packet size */
+ if (bit_rate < 7000)
+ return 3500;
+
+ /* unlimited */
+ return 0;
+}
+
+struct rtw_fw_iter_ra_data {
+ struct rtw_dev *rtwdev;
+ u8 *payload;
+};
+
+static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_fw_iter_ra_data *ra_data = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ u8 mac_id, rate, sgi, bw;
+ u8 mcs, nss;
+ u32 bit_rate;
+
+ mac_id = GET_RA_REPORT_MACID(ra_data->payload);
+ if (si->mac_id != mac_id)
+ return;
+
+ si->ra_report.txrate.flags = 0;
+
+ rate = GET_RA_REPORT_RATE(ra_data->payload);
+ sgi = GET_RA_REPORT_SGI(ra_data->payload);
+ bw = GET_RA_REPORT_BW(ra_data->payload);
+
+ if (rate < DESC_RATEMCS0) {
+ si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
+ goto legacy;
+ }
+
+ rtw_desc_to_mcsrate(rate, &mcs, &nss);
+ if (rate >= DESC_RATEVHT1SS_MCS0)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+ else if (rate >= DESC_RATEMCS0)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
+
+ if (rate >= DESC_RATEMCS0) {
+ si->ra_report.txrate.mcs = mcs;
+ si->ra_report.txrate.nss = nss;
+ }
+
+ if (sgi)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (bw == RTW_CHANNEL_WIDTH_80)
+ si->ra_report.txrate.bw = RATE_INFO_BW_80;
+ else if (bw == RTW_CHANNEL_WIDTH_40)
+ si->ra_report.txrate.bw = RATE_INFO_BW_40;
+ else
+ si->ra_report.txrate.bw = RATE_INFO_BW_20;
+
+legacy:
+ bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
+
+ si->ra_report.desc_rate = rate;
+ si->ra_report.bit_rate = bit_rate;
+
+ sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
+}
+
+static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_fw_iter_ra_data ra_data;
+
+ if (WARN(length < 7, "invalid ra report c2h length\n"))
+ return;
+
+ rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
+ ra_data.rtwdev = rtwdev;
+ ra_data.payload = payload;
+ rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
+}
+
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
@@ -49,6 +145,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_HALMAC:
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
break;
+ case C2H_RA_RPT:
+ rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
+ break;
default:
break;
}
@@ -397,6 +496,24 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
return location;
}
+void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 loc_pg, loc_dpk;
+
+ loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
+ loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
+
+ LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
+ LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
+ LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -442,6 +559,58 @@ rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return skb_new;
}
+static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
+ struct rtw_lps_pg_dpk_hdr *dpk_hdr;
+ struct sk_buff *skb;
+ u32 size;
+
+ size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+ dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
+ dpk_hdr->dpk_ch = dpk_info->dpk_ch;
+ dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
+ memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
+ memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
+ memcpy(dpk_hdr->coef, dpk_info->coef, 160);
+
+ return skb;
+}
+
+static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ struct rtw_lps_pg_info_hdr *pg_info_hdr;
+ struct sk_buff *skb;
+ u32 size;
+
+ size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+ pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
+ pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
+ pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ pg_info_hdr->sec_cam_count =
+ rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
+
+ conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
+
+ return skb;
+}
+
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum rtw_rsvd_packet_type type)
@@ -464,6 +633,12 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
case RSVD_QOS_NULL:
skb_new = ieee80211_nullfunc_get(hw, vif, true);
break;
+ case RSVD_LPS_PG_DPK:
+ skb_new = rtw_lps_pg_dpk_get(hw);
+ break;
+ case RSVD_LPS_PG_INFO:
+ skb_new = rtw_lps_pg_info_get(hw, vif);
+ break;
default:
return NULL;
}
@@ -498,9 +673,6 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
{
struct sk_buff *skb = rsvd_pkt->skb;
- if (rsvd_pkt->add_txdesc)
- rtw_fill_rsvd_page_desc(rtwdev, skb);
-
if (page >= 1)
memcpy(buf + page_margin + page_size * (page - 1),
skb->data, skb->len);
@@ -625,16 +797,37 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
if (!iter) {
- rtw_err(rtwdev, "fail to build rsvd packet\n");
+ rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
}
+
+ /* Fill the tx_desc for the rsvd pkt that requires one.
+ * And iter->len will be added with size of tx_desc_sz.
+ */
+ if (rsvd_pkt->add_txdesc)
+ rtw_fill_rsvd_page_desc(rtwdev, iter);
+
rsvd_pkt->skb = iter;
rsvd_pkt->page = total_page;
- if (rsvd_pkt->add_txdesc)
+
+ /* Reserved page is downloaded via TX path, and TX path will
+ * generate a tx_desc at the header to describe length of
+ * the buffer. If we are not counting page numbers with the
+ * size of tx_desc added at the first rsvd_pkt (usually a
+ * beacon, firmware default refer to the first page as the
+ * content of beacon), we could generate a buffer which size
+ * is smaller than the actual size of the whole rsvd_page
+ */
+ if (total_page == 0) {
+ if (rsvd_pkt->type != RSVD_BEACON) {
+ rtw_err(rtwdev, "first page should be a beacon\n");
+ goto release_skb;
+ }
total_page += rtw_len_to_page(iter->len + tx_desc_sz,
page_size);
- else
+ } else {
total_page += rtw_len_to_page(iter->len, page_size);
+ }
}
if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
@@ -647,13 +840,24 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
if (!buf)
goto release_skb;
+ /* Copy the content of each rsvd_pkt to the buf, and they should
+ * be aligned to the pages.
+ *
+ * Note that the first rsvd_pkt is a beacon no matter what vif->type.
+ * And that rsvd_pkt does not require tx_desc because when it goes
+ * through TX path, the TX path will generate one for it.
+ */
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
page, buf, rsvd_pkt);
- page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
- }
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ if (page == 0)
+ page += rtw_len_to_page(rsvd_pkt->skb->len +
+ tx_desc_sz, page_size);
+ else
+ page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
+
kfree_skb(rsvd_pkt->skb);
+ }
return buf;
@@ -706,6 +910,11 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
goto free;
}
+ /* The last thing is to download the *ONLY* beacon again, because
+ * the previous tx_desc is to describe the total rsvd page. Download
+ * the beacon again to replace the TX desc header, and we will get
+ * a correct tx_desc for the beacon in the rsvd page.
+ */
ret = rtw_download_beacon(rtwdev, vif);
if (ret) {
rtw_err(rtwdev, "failed to download beacon\n");
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index e95d85bd097f..73d1b9ca8efc 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -11,22 +11,6 @@
/* FW bin information */
#define FW_HDR_SIZE 64
#define FW_HDR_CHKSUM_SIZE 8
-#define FW_HDR_VERSION 4
-#define FW_HDR_SUBVERSION 6
-#define FW_HDR_SUBINDEX 7
-#define FW_HDR_MONTH 16
-#define FW_HDR_DATE 17
-#define FW_HDR_HOUR 18
-#define FW_HDR_MIN 19
-#define FW_HDR_YEAR 20
-#define FW_HDR_MEM_USAGE 24
-#define FW_HDR_H2C_FMT_VER 28
-#define FW_HDR_DMEM_ADDR 32
-#define FW_HDR_DMEM_SIZE 36
-#define FW_HDR_IMEM_SIZE 48
-#define FW_HDR_EMEM_SIZE 52
-#define FW_HDR_EMEM_ADDR 56
-#define FW_HDR_IMEM_ADDR 60
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
@@ -36,6 +20,7 @@
enum rtw_c2h_cmd_id {
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
+ C2H_RA_RPT = 0x0c,
C2H_HW_FEATURE_REPORT = 0x19,
C2H_WLAN_INFO = 0x27,
C2H_HW_FEATURE_DUMP = 0xfd,
@@ -58,6 +43,8 @@ enum rtw_rsvd_packet_type {
RSVD_PROBE_RESP,
RSVD_NULL,
RSVD_QOS_NULL,
+ RSVD_LPS_PG_DPK,
+ RSVD_LPS_PG_INFO,
};
enum rtw_fw_rf_type {
@@ -86,6 +73,25 @@ struct rtw_iqk_para {
u8 segment_iqk;
};
+struct rtw_lps_pg_dpk_hdr {
+ u16 dpk_path_ok;
+ u8 dpk_txagc[2];
+ u16 dpk_gs[2];
+ u32 coef[2][20];
+ u8 dpk_ch;
+} __packed;
+
+struct rtw_lps_pg_info_hdr {
+ u8 macid;
+ u8 mbssid;
+ u8 pattern_count;
+ u8 mu_tab_group_id;
+ u8 sec_cam_count;
+ u8 tx_bu_page_count;
+ u16 rsvd;
+ u8 sec_cam[MAX_PG_CAM_BACKUP_NUM];
+} __packed;
+
struct rtw_rsvd_page {
struct list_head list;
struct sk_buff *skb;
@@ -94,10 +100,44 @@ struct rtw_rsvd_page {
bool add_txdesc;
};
+struct rtw_fw_hdr {
+ __le16 signature;
+ u8 category;
+ u8 function;
+ __le16 version; /* 0x04 */
+ u8 subversion;
+ u8 subindex;
+ __le32 rsvd; /* 0x08 */
+ __le32 rsvd2; /* 0x0C */
+ u8 month; /* 0x10 */
+ u8 day;
+ u8 hour;
+ u8 min;
+ __le16 year; /* 0x14 */
+ __le16 rsvd3;
+ u8 mem_usage; /* 0x18 */
+ u8 rsvd4[3];
+ __le16 h2c_fmt_ver; /* 0x1C */
+ __le16 rsvd5;
+ __le32 dmem_addr; /* 0x20 */
+ __le32 dmem_size;
+ __le32 rsvd6;
+ __le32 rsvd7;
+ __le32 imem_size; /* 0x30 */
+ __le32 emem_size;
+ __le32 emem_addr;
+ __le32 imem_addr;
+} __packed;
+
/* C2H */
#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
+#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f)
+#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7)
+#define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6])
+#define GET_RA_REPORT_MACID(c2h_payload) (c2h_payload[1])
+
/* PKT H2C */
#define H2C_PKT_CMD_ID 0xFF
#define H2C_PKT_CATEGORY 0x01
@@ -146,6 +186,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
#define H2C_CMD_SET_PWR_MODE 0x20
+#define H2C_CMD_LPS_PG_INFO 0x2b
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
@@ -177,6 +218,12 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 5))
#define SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define LPS_PG_INFO_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define LPS_PG_DPK_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define LPS_PG_SEC_CAM_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
@@ -270,6 +317,7 @@ void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
+void rtw_fw_set_pg_info(struct rtw_dev *rtwdev);
void rtw_fw_query_bt_info(struct rtw_dev *rtwdev);
void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw);
void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index aba329c9d0cf..3d91aea942c3 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -13,6 +13,8 @@ struct rtw_hci_ops {
int (*setup)(struct rtw_dev *rtwdev);
int (*start)(struct rtw_dev *rtwdev);
void (*stop)(struct rtw_dev *rtwdev);
+ void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
+ void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -47,6 +49,16 @@ static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
rtwdev->hci.ops->stop(rtwdev);
}
+static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ rtwdev->hci.ops->deep_ps(rtwdev, enter);
+}
+
+static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ rtwdev->hci.ops->link_ps(rtwdev, enter);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index b61b073031e5..507970387b2a 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -47,7 +47,7 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
value8 = value8 & ~BIT_CHECK_CCK_EN;
- if (channel > 35)
+ if (IS_CH_5G_BAND(channel))
value8 |= BIT_CHECK_CCK_EN;
rtw_write8(rtwdev, REG_CCK_CHECK, value8);
}
@@ -261,7 +261,7 @@ static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
- rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
@@ -312,15 +312,16 @@ void rtw_mac_power_off(struct rtw_dev *rtwdev)
static bool check_firmware_size(const u8 *data, u32 size)
{
+ const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
u32 dmem_size;
u32 imem_size;
u32 emem_size;
u32 real_size;
- dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
- imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
- emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
- le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size = le32_to_cpu(fw_hdr->dmem_size);
+ imem_size = le32_to_cpu(fw_hdr->imem_size);
+ emem_size = (fw_hdr->mem_usage & BIT(4)) ?
+ le32_to_cpu(fw_hdr->emem_size) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
@@ -566,27 +567,10 @@ download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
return 0;
}
-static void update_firmware_info(struct rtw_dev *rtwdev,
- struct rtw_fw_state *fw)
-{
- const u8 *data = fw->firmware->data;
-
- fw->h2c_version =
- le16_to_cpu(*((__le16 *)(data + FW_HDR_H2C_FMT_VER)));
- fw->version =
- le16_to_cpu(*((__le16 *)(data + FW_HDR_VERSION)));
- fw->sub_version = *(data + FW_HDR_SUBVERSION);
- fw->sub_index = *(data + FW_HDR_SUBINDEX);
-
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw h2c version: %x\n", fw->h2c_version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw version: %x\n", fw->version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub version: %x\n", fw->sub_version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub index: %x\n", fw->sub_index);
-}
-
static int
start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
{
+ const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
const u8 *cur_fw;
u16 val;
u32 imem_size;
@@ -595,10 +579,10 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
u32 addr;
int ret;
- dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
- imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
- emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
- le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size = le32_to_cpu(fw_hdr->dmem_size);
+ imem_size = le32_to_cpu(fw_hdr->imem_size);
+ emem_size = (fw_hdr->mem_usage & BIT(4)) ?
+ le32_to_cpu(fw_hdr->emem_size) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
@@ -608,14 +592,14 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
cur_fw = data + FW_HDR_SIZE;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->dmem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
if (ret)
return ret;
cur_fw = data + FW_HDR_SIZE + dmem_size;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->imem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
if (ret)
@@ -623,7 +607,7 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
if (emem_size) {
cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->emem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
emem_size);
@@ -699,15 +683,13 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
if (ret)
goto dlfw_fail;
- update_firmware_info(rtwdev, fw);
-
/* reset desc and index */
rtw_hci_setup(rtwdev);
rtwdev->h2c.last_box_num = 0;
rtwdev->h2c.seq = 0;
- rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
+ set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
return 0;
@@ -719,6 +701,93 @@ dlfw_fail:
return ret;
}
+static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
+{
+ struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+ u32 prio_queues = 0;
+
+ if (queues & BIT(IEEE80211_AC_VO))
+ prio_queues |= BIT(rqpn->dma_map_vo);
+ if (queues & BIT(IEEE80211_AC_VI))
+ prio_queues |= BIT(rqpn->dma_map_vi);
+ if (queues & BIT(IEEE80211_AC_BE))
+ prio_queues |= BIT(rqpn->dma_map_be);
+ if (queues & BIT(IEEE80211_AC_BK))
+ prio_queues |= BIT(rqpn->dma_map_bk);
+
+ return prio_queues;
+}
+
+static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
+ u32 prio_queue, bool drop)
+{
+ u32 addr;
+ u16 avail_page, rsvd_page;
+ int i;
+
+ switch (prio_queue) {
+ case RTW_DMA_MAPPING_EXTRA:
+ addr = REG_FIFOPAGE_INFO_4;
+ break;
+ case RTW_DMA_MAPPING_LOW:
+ addr = REG_FIFOPAGE_INFO_2;
+ break;
+ case RTW_DMA_MAPPING_NORMAL:
+ addr = REG_FIFOPAGE_INFO_3;
+ break;
+ case RTW_DMA_MAPPING_HIGH:
+ addr = REG_FIFOPAGE_INFO_1;
+ break;
+ default:
+ return;
+ }
+
+ /* check if all of the reserved pages are available for 100 msecs */
+ for (i = 0; i < 5; i++) {
+ rsvd_page = rtw_read16(rtwdev, addr);
+ avail_page = rtw_read16(rtwdev, addr + 2);
+ if (rsvd_page == avail_page)
+ return;
+
+ msleep(20);
+ }
+
+ /* priority queue is still not empty, throw a warning,
+ *
+ * Note that if we want to flush the tx queue when having a lot of
+ * traffic (ex, 100Mbps up), some of the packets could be dropped.
+ * And it requires like ~2secs to flush the full priority queue.
+ */
+ if (!drop)
+ rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
+}
+
+static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
+ u32 prio_queues, bool drop)
+{
+ u32 q;
+
+ for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
+ if (prio_queues & BIT(q))
+ __rtw_mac_flush_prio_queue(rtwdev, q, drop);
+}
+
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
+{
+ u32 prio_queues = 0;
+
+ /* If all of the hardware queues are requested to flush,
+ * or the priority queues are not mapped yet,
+ * flush all of the priority queues
+ */
+ if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
+ prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
+ else
+ prio_queues = get_priority_queues(rtwdev, queues);
+
+ rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
+}
+
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -743,6 +812,7 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
return -EINVAL;
}
+ rtwdev->fifo.rqpn = rqpn;
txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index efe6f731f240..592dc830160c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -31,5 +31,11 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev);
void rtw_mac_power_off(struct rtw_dev *rtwdev);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
+
+static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
+{
+ rtw_mac_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, drop);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index e5e3605bb693..34a1c3b53cd4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -10,6 +10,7 @@
#include "coex.h"
#include "ps.h"
#include "reg.h"
+#include "bf.h"
#include "debug.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
@@ -17,19 +18,30 @@ static void rtw_ops_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_tx_pkt_info pkt_info = {0};
- if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
- goto out;
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
- rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
- if (rtw_hci_tx(rtwdev, &pkt_info, skb))
- goto out;
+ rtw_tx(rtwdev, control, skb);
+}
- return;
+static void rtw_ops_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv;
-out:
- ieee80211_free_txskb(hw, skb);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ return;
+
+ spin_lock_bh(&rtwdev->txq_lock);
+ if (list_empty(&rtwtxq->list))
+ list_add_tail(&rtwtxq->list, &rtwdev->txqs);
+ spin_unlock_bh(&rtwdev->txq_lock);
+
+ tasklet_schedule(&rtwdev->tx_tasklet);
}
static int rtw_ops_start(struct ieee80211_hw *hw)
@@ -60,6 +72,8 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (hw->conf.flags & IEEE80211_CONF_IDLE) {
rtw_enter_ips(rtwdev);
@@ -72,6 +86,15 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (hw->conf.flags & IEEE80211_CONF_PS) {
+ rtwdev->ps_enabled = true;
+ } else {
+ rtwdev->ps_enabled = false;
+ rtw_leave_lps(rtwdev);
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
@@ -135,10 +158,14 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
rtwvif->in_lps = false;
+ memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
+ rtw_txq_init(rtwdev, vif->txq);
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
@@ -181,6 +208,10 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
+ rtw_txq_cleanup(rtwdev, vif->txq);
+
eth_zero_addr(rtwvif->mac_addr);
config |= PORT_SET_MAC_ADDR;
rtwvif->net_type = RTW_NET_NO_LINK;
@@ -204,6 +235,8 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
rtwdev->hal.rcr |= BIT_AM | BIT_AB;
@@ -238,6 +271,54 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+/* Only have one group of EDCA parameters now */
+static const u32 ac_to_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM,
+ [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM,
+ [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM,
+ [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM,
+};
+
+static u8 rtw_aifsn_to_aifs(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif, u8 aifsn)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u8 slot_time;
+ u8 sifs;
+
+ slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
+ sifs = rtwdev->hal.current_band_type == RTW_BAND_5G ? 16 : 10;
+
+ return aifsn * slot_time + sifs;
+}
+
+static void __rtw_conf_tx(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif, u16 ac)
+{
+ struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac];
+ u32 edca_param = ac_to_edca_param[ac];
+ u8 ecw_max, ecw_min;
+ u8 aifs;
+
+ /* 2^ecw - 1 = cw; ecw = log2(cw + 1) */
+ ecw_max = ilog2(params->cw_max + 1);
+ ecw_min = ilog2(params->cw_min + 1);
+ aifs = rtw_aifsn_to_aifs(rtwdev, rtwvif, params->aifs);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_TXOP_LMT, params->txop);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMAX, ecw_max);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMIN, ecw_min);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_AIFS, aifs);
+}
+
+static void rtw_conf_tx(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ u16 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ __rtw_conf_tx(rtwdev, rtwvif, ac);
+}
+
static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
@@ -249,6 +330,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed & BSS_CHANGED_ASSOC) {
struct rtw_chip_info *chip = rtwdev->chip;
enum rtw_net_type net_type;
@@ -262,13 +345,19 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
rtw_fw_download_rsvd_page(rtwdev, vif);
rtw_send_rsvd_page_h2c(rtwdev);
rtw_coex_media_status_notify(rtwdev, conf->assoc);
+ if (rtw_bf_support)
+ rtw_bf_assoc(rtwdev, vif, conf);
} else {
+ rtw_leave_lps(rtwdev);
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
rtw_reset_rsvd_page(rtwdev);
+ rtw_bf_disassoc(rtwdev, vif, conf);
}
rtwvif->net_type = net_type;
@@ -284,11 +373,39 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON)
rtw_fw_download_rsvd_page(rtwdev, vif);
+ if (changed & BSS_CHANGED_MU_GROUPS) {
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->set_gid_table(rtwdev, vif, conf);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT)
+ rtw_conf_tx(rtwdev, rtwvif);
+
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ rtw_leave_lps_deep(rtwdev);
+
+ rtwvif->tx_params[ac] = *params;
+ __rtw_conf_tx(rtwdev, rtwvif, ac);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
@@ -311,6 +428,7 @@ static int rtw_ops_sta_add(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
int ret = 0;
mutex_lock(&rtwdev->mutex);
@@ -325,6 +443,8 @@ static int rtw_ops_sta_add(struct ieee80211_hw *hw,
si->vif = vif;
si->init_ra_lv = 1;
ewma_rssi_init(&si->avg_rssi);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_init(rtwdev, sta->txq[i]);
rtw_update_sta_info(rtwdev, si);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
@@ -345,12 +465,18 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
mutex_lock(&rtwdev->mutex);
rtw_release_macid(rtwdev, si->mac_id);
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_cleanup(rtwdev, sta->txq[i]);
+
+ kfree(si->mask);
+
rtwdev->sta_cnt--;
rtw_info(rtwdev, "sta %pM with macid %d left\n",
@@ -397,6 +523,8 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
hw_key_idx = rtw_sec_get_free_cam(sec);
} else {
@@ -418,10 +546,15 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
hw_key_type, hw_key_idx);
break;
case DISABLE_KEY:
+ rtw_mac_flush_all_queues(rtwdev, false);
rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
break;
}
+ /* download new cam settings for PG to backup */
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ rtw_fw_download_rsvd_page(rtwdev, vif);
+
out:
mutex_unlock(&rtwdev->mutex);
@@ -434,17 +567,21 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
{
struct ieee80211_sta *sta = params->sta;
u16 tid = params->tid;
+ struct ieee80211_txq *txq = sta->txq[tid];
+ struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv;
switch (params->action) {
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ clear_bit(RTW_TXQ_AMPDU, &rtwtxq->flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ set_bit(RTW_TXQ_AMPDU, &rtwtxq->flags);
+ break;
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
break;
@@ -464,18 +601,18 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_leave_lps(rtwdev, rtwvif);
-
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps(rtwdev);
+
ether_addr_copy(rtwvif->mac_addr, mac_addr);
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
- rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
- rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
+ set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
+ set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
mutex_unlock(&rtwdev->mutex);
}
@@ -489,8 +626,8 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- rtw_flag_clear(rtwdev, RTW_FLAG_SCANNING);
- rtw_flag_clear(rtwdev, RTW_FLAG_DIG_DISABLE);
+ clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
+ clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
ether_addr_copy(rtwvif->mac_addr, vif->addr);
config |= PORT_SET_MAC_ADDR;
@@ -508,12 +645,99 @@ static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtwdev->rts_threshold = value;
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static void rtw_ops_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ sinfo->txrate = si->ra_report.txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static void rtw_ops_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
+ rtw_mac_flush_queues(rtwdev, queues, drop);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+struct rtw_iter_bitrate_mask_data {
+ struct rtw_dev *rtwdev;
+ struct ieee80211_vif *vif;
+ const struct cfg80211_bitrate_mask *mask;
+};
+
+static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_iter_bitrate_mask_data *br_data = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ if (si->vif != br_data->vif)
+ return;
+
+ /* free previous mask setting */
+ kfree(si->mask);
+ si->mask = kmemdup(br_data->mask, sizeof(struct cfg80211_bitrate_mask),
+ GFP_ATOMIC);
+ if (!si->mask) {
+ si->use_cfg_mask = false;
+ return;
+ }
+
+ si->use_cfg_mask = true;
+ rtw_update_sta_info(br_data->rtwdev, si);
+}
+
+static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw_iter_bitrate_mask_data br_data;
+
+ br_data.rtwdev = rtwdev;
+ br_data.vif = vif;
+ br_data.mask = mask;
+ rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
+}
+
+static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ rtw_ra_mask_info_update(rtwdev, vif, mask);
+
+ return 0;
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
+ .wake_tx_queue = rtw_ops_wake_tx_queue,
.start = rtw_ops_start,
.stop = rtw_ops_stop,
.config = rtw_ops_config,
@@ -521,6 +745,7 @@ const struct ieee80211_ops rtw_ops = {
.remove_interface = rtw_ops_remove_interface,
.configure_filter = rtw_ops_configure_filter,
.bss_info_changed = rtw_ops_bss_info_changed,
+ .conf_tx = rtw_ops_conf_tx,
.sta_add = rtw_ops_sta_add,
.sta_remove = rtw_ops_sta_remove,
.set_key = rtw_ops_set_key,
@@ -528,5 +753,9 @@ const struct ieee80211_ops rtw_ops = {
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
.mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
+ .set_rts_threshold = rtw_ops_set_rts_threshold,
+ .sta_statistics = rtw_ops_sta_statistics,
+ .flush = rtw_ops_flush,
+ .set_bitrate_mask = rtw_ops_set_bitrate_mask,
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6dd457741b15..ae61415e1665 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -12,16 +12,22 @@
#include "phy.h"
#include "reg.h"
#include "efuse.h"
+#include "tx.h"
#include "debug.h"
+#include "bf.h"
-static bool rtw_fw_support_lps;
+unsigned int rtw_fw_lps_deep_mode;
+EXPORT_SYMBOL(rtw_fw_lps_deep_mode);
+bool rtw_bf_support = true;
unsigned int rtw_debug_mask;
EXPORT_SYMBOL(rtw_debug_mask);
-module_param_named(support_lps, rtw_fw_support_lps, bool, 0644);
+module_param_named(lps_deep_mode, rtw_fw_lps_deep_mode, uint, 0644);
+module_param_named(support_bf, rtw_bf_support, bool, 0644);
module_param_named(debug_mask, rtw_debug_mask, uint, 0644);
-MODULE_PARM_DESC(support_lps, "Set Y to enable Leisure Power Save support, to turn radio off between beacons");
+MODULE_PARM_DESC(lps_deep_mode, "Deeper PS mode. If 0, deep PS is disabled");
+MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support");
MODULE_PARM_DESC(debug_mask, "Debugging mask");
static struct ieee80211_channel rtw_channeltable_2g[] = {
@@ -84,6 +90,18 @@ static struct ieee80211_rate rtw_ratetable[] = {
{.bitrate = 540, .hw_value = 0x0b,},
};
+u16 rtw_desc_to_bitrate(u8 desc_rate)
+{
+ struct ieee80211_rate rate;
+
+ if (WARN(desc_rate >= ARRAY_SIZE(rtw_ratetable), "invalid desc rate\n"))
+ return 0;
+
+ rate = rtw_ratetable[desc_rate];
+
+ return rate.bitrate;
+}
+
static struct ieee80211_supported_band rtw_band_2ghz = {
.band = NL80211_BAND_2GHZ,
@@ -112,29 +130,40 @@ static struct ieee80211_supported_band rtw_band_5ghz = {
};
struct rtw_watch_dog_iter_data {
+ struct rtw_dev *rtwdev;
struct rtw_vif *rtwvif;
- bool active;
- u8 assoc_cnt;
};
+static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_bf_info *bf_info = &rtwdev->bf_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 fix_rate_enable = 0;
+ u8 new_csi_rate_idx;
+
+ if (rtwvif->bfee.role != RTW_BFEE_SU &&
+ rtwvif->bfee.role != RTW_BFEE_MU)
+ return;
+
+ chip->ops->cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi,
+ bf_info->cur_csi_rpt_rate,
+ fix_rate_enable, &new_csi_rate_idx);
+
+ if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate)
+ bf_info->cur_csi_rpt_rate = new_csi_rate_idx;
+}
+
static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rtw_watch_dog_iter_data *iter_data = data;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
- if (vif->type == NL80211_IFTYPE_STATION) {
- if (vif->bss_conf.assoc) {
- iter_data->assoc_cnt++;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->bss_conf.assoc)
iter_data->rtwvif = rtwvif;
- }
- if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD ||
- rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
- iter_data->active = true;
- } else {
- /* only STATION mode can enter lps */
- iter_data->active = true;
- }
+
+ rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif);
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
@@ -149,46 +178,74 @@ static void rtw_watch_dog_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
watch_dog_work.work);
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
struct rtw_watch_dog_iter_data data = {};
- bool busy_traffic = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ bool ps_active;
- if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
- return;
+ mutex_lock(&rtwdev->mutex);
+
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ goto unlock;
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100)
- rtw_flag_set(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ set_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
else
- rtw_flag_clear(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
- if (busy_traffic != rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC))
+ if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev);
+ if (stats->tx_cnt > RTW_LPS_THRESHOLD ||
+ stats->rx_cnt > RTW_LPS_THRESHOLD)
+ ps_active = true;
+ else
+ ps_active = false;
+
+ ewma_tp_add(&stats->tx_ewma_tp,
+ (u32)(stats->tx_unicast >> RTW_TP_SHIFT));
+ ewma_tp_add(&stats->rx_ewma_tp,
+ (u32)(stats->rx_unicast >> RTW_TP_SHIFT));
+ stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
+ stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
+
/* reset tx/rx statictics */
- rtwdev->stats.tx_unicast = 0;
- rtwdev->stats.rx_unicast = 0;
- rtwdev->stats.tx_cnt = 0;
- rtwdev->stats.rx_cnt = 0;
+ stats->tx_unicast = 0;
+ stats->rx_unicast = 0;
+ stats->tx_cnt = 0;
+ stats->rx_cnt = 0;
+
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ goto unlock;
+
+ /* make sure BB/RF is working for dynamic mech */
+ rtw_leave_lps(rtwdev);
+
+ rtw_phy_dynamic_mechanism(rtwdev);
+ data.rtwdev = rtwdev;
/* use atomic version to avoid taking local->iflist_mtx mutex */
rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data);
/* fw supports only one station associated to enter lps, if there are
* more than two stations associated to the AP, then we can not enter
* lps, because fw does not handle the overlapped beacon interval
+ *
+ * mac80211 should iterate vifs and determine if driver can enter
+ * ps by passing IEEE80211_CONF_PS to us, all we need to do is to
+ * get that vif and check if device is having traffic more than the
+ * threshold.
*/
- if (rtw_fw_support_lps &&
- data.rtwvif && !data.active && data.assoc_cnt == 1)
- rtw_enter_lps(rtwdev, data.rtwvif);
-
- if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
- return;
-
- rtw_phy_dynamic_mechanism(rtwdev);
+ if (rtwdev->ps_enabled && data.rtwvif && !ps_active)
+ rtw_enter_lps(rtwdev, data.rtwvif->port);
rtwdev->watch_dog_cnt++;
+
+unlock:
+ mutex_unlock(&rtwdev->mutex);
}
static void rtw_c2h_work(struct work_struct *work)
@@ -203,6 +260,40 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+struct rtw_txq_ba_iter_data {
+};
+
+static void rtw_txq_ba_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int ret;
+ u8 tid;
+
+ tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS);
+ while (tid != IEEE80211_NUM_TIDS) {
+ clear_bit(tid, si->tid_ba);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 0);
+ if (ret == -EINVAL) {
+ struct ieee80211_txq *txq;
+ struct rtw_txq *rtwtxq;
+
+ txq = sta->txq[tid];
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ set_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags);
+ }
+
+ tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS);
+ }
+}
+
+static void rtw_txq_ba_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work);
+ struct rtw_txq_ba_iter_data data;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data);
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
@@ -311,7 +402,7 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
if (hal->current_band_type == RTW_BAND_5G) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
} else {
- if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G);
else
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN);
@@ -529,12 +620,71 @@ static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
+static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
+ struct rtw_sta_info *si,
+ u64 ra_mask, bool is_vht_enable,
+ u8 wireless_set)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct cfg80211_bitrate_mask *mask = si->mask;
+ u64 cfg_mask = GENMASK_ULL(63, 0);
+ u8 rssi_level, band;
+
+ if (wireless_set != WIRELESS_CCK) {
+ rssi_level = si->rssi_level;
+ if (rssi_level == 0)
+ ra_mask &= 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ ra_mask &= 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ ra_mask &= 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ ra_mask &= 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ ra_mask &= 0xffffffffffff8f80ULL;
+ else if (rssi_level >= 5)
+ ra_mask &= 0xffffffffffff0f00ULL;
+ }
+
+ if (!si->use_cfg_mask)
+ return ra_mask;
+
+ band = hal->current_band_type;
+ if (band == RTW_BAND_2G) {
+ band = NL80211_BAND_2GHZ;
+ cfg_mask = mask->control[band].legacy;
+ } else if (band == RTW_BAND_5G) {
+ band = NL80211_BAND_5GHZ;
+ cfg_mask = u64_encode_bits(mask->control[band].legacy,
+ RA_MASK_OFDM_RATES);
+ }
+
+ if (!is_vht_enable) {
+ if (ra_mask & RA_MASK_HT_RATES_1SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
+ RA_MASK_HT_RATES_1SS);
+ if (ra_mask & RA_MASK_HT_RATES_2SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
+ RA_MASK_HT_RATES_2SS);
+ } else {
+ if (ra_mask & RA_MASK_VHT_RATES_1SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
+ RA_MASK_VHT_RATES_1SS);
+ if (ra_mask & RA_MASK_VHT_RATES_2SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
+ RA_MASK_VHT_RATES_2SS);
+ }
+
+ ra_mask &= cfg_mask;
+
+ return ra_mask;
+}
+
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
struct ieee80211_sta *sta = si->sta;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_hal *hal = &rtwdev->hal;
- u8 rssi_level;
u8 wireless_set;
u8 bw_mode;
u8 rate_id;
@@ -627,21 +777,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
- if (wireless_set != WIRELESS_CCK) {
- rssi_level = si->rssi_level;
- if (rssi_level == 0)
- ra_mask &= 0xffffffffffffffffULL;
- else if (rssi_level == 1)
- ra_mask &= 0xfffffffffffffff0ULL;
- else if (rssi_level == 2)
- ra_mask &= 0xffffffffffffefe0ULL;
- else if (rssi_level == 3)
- ra_mask &= 0xffffffffffffcfc0ULL;
- else if (rssi_level == 4)
- ra_mask &= 0xffffffffffff8f80ULL;
- else if (rssi_level >= 5)
- ra_mask &= 0xffffffffffff0f00ULL;
- }
+ ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable,
+ wireless_set);
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
@@ -737,7 +874,7 @@ int rtw_core_start(struct rtw_dev *rtwdev)
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
- rtw_flag_set(rtwdev, RTW_FLAG_RUNNING);
+ set_bit(RTW_FLAG_RUNNING, rtwdev->flags);
return 0;
}
@@ -752,8 +889,8 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
{
struct rtw_coex *coex = &rtwdev->coex;
- rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
- rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
+ clear_bit(RTW_FLAG_RUNNING, rtwdev->flags);
+ clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
@@ -814,6 +951,12 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
0;
+
+ vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ vht_cap->cap |= (rtwdev->hal.bfee_sts_cap <<
+ IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
@@ -879,12 +1022,25 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
struct rtw_dev *rtwdev = context;
struct rtw_fw_state *fw = &rtwdev->fw;
+ const struct rtw_fw_hdr *fw_hdr;
- if (!firmware)
+ if (!firmware || !firmware->data) {
rtw_err(rtwdev, "failed to request firmware\n");
+ complete_all(&fw->completion);
+ return;
+ }
+
+ fw_hdr = (const struct rtw_fw_hdr *)firmware->data;
+ fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver);
+ fw->version = le16_to_cpu(fw_hdr->version);
+ fw->sub_version = fw_hdr->subversion;
+ fw->sub_index = fw_hdr->subindex;
fw->firmware = firmware;
complete_all(&fw->completion);
+
+ rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n",
+ fw->version, fw->sub_version, fw->sub_index, fw->h2c_version);
}
static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
@@ -914,6 +1070,7 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtwdev->hci.rpwm_addr = 0x03d9;
+ rtwdev->hci.cpwm_addr = 0x03da;
break;
default:
rtw_err(rtwdev, "unsupported hci type\n");
@@ -948,6 +1105,8 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
/* default use ack */
rtwdev->hal.rcr |= BIT_VHT_DACK;
+ hal->bfee_sts_cap = 3;
+
return ret;
}
@@ -1020,7 +1179,8 @@ static int rtw_dump_hw_feature(struct rtw_dev *rtwdev)
rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num);
- if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE)
+ if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE ||
+ efuse->hw_cap.nss > rtwdev->hal.rf_path_num)
efuse->hw_cap.nss = rtwdev->hal.rf_path_num;
rtw_dbg(rtwdev, RTW_DBG_EFUSE,
@@ -1047,19 +1207,19 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
/* power on mac to read efuse */
ret = rtw_chip_efuse_enable(rtwdev);
if (ret)
- goto out;
+ goto out_unlock;
ret = rtw_parse_efuse_map(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
ret = rtw_dump_hw_feature(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
ret = rtw_check_supported_rfe(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
if (efuse->crystal_cap == 0xff)
efuse->crystal_cap = 0;
@@ -1086,9 +1246,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+out_disable:
rtw_chip_efuse_disable(rtwdev);
-out:
+out_unlock:
mutex_unlock(&rtwdev->mutex);
return ret;
}
@@ -1141,22 +1302,41 @@ err_out:
}
EXPORT_SYMBOL(rtw_chip_info_setup);
+static void rtw_stats_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ int i;
+
+ ewma_tp_init(&stats->tx_ewma_tp);
+ ewma_tp_init(&stats->rx_ewma_tp);
+
+ for (i = 0; i < RTW_EVM_NUM; i++)
+ ewma_evm_init(&dm_info->ewma_evm[i]);
+ for (i = 0; i < RTW_SNR_NUM; i++)
+ ewma_snr_init(&dm_info->ewma_snr[i]);
+}
+
int rtw_core_init(struct rtw_dev *rtwdev)
{
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
int ret;
INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
+ INIT_LIST_HEAD(&rtwdev->txqs);
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
+ tasklet_init(&rtwdev->tx_tasklet, rtw_tx_tasklet,
+ (unsigned long)rtwdev);
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
- INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work);
INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
@@ -1164,6 +1344,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
spin_lock_init(&rtwdev->dm_lock);
spin_lock_init(&rtwdev->rf_lock);
spin_lock_init(&rtwdev->h2c.lock);
+ spin_lock_init(&rtwdev->txq_lock);
spin_lock_init(&rtwdev->tx_report.q_lock);
mutex_init(&rtwdev->mutex);
@@ -1175,11 +1356,17 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
+ if (!(BIT(rtw_fw_lps_deep_mode) & chip->lps_deep_mode_supported))
+ rtwdev->lps_conf.deep_mode = LPS_DEEP_MODE_NONE;
+ else
+ rtwdev->lps_conf.deep_mode = rtw_fw_lps_deep_mode;
mutex_lock(&rtwdev->mutex);
rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
mutex_unlock(&rtwdev->mutex);
+ rtw_stats_init(rtwdev);
+
/* default rx filter setting */
rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV |
BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
@@ -1204,6 +1391,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
if (fw->firmware)
release_firmware(fw->firmware);
+ tasklet_kill(&rtwdev->tx_tasklet);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
@@ -1229,6 +1417,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->extra_tx_headroom = max_tx_headroom;
hw->queues = IEEE80211_NUM_ACS;
+ hw->txq_data_size = sizeof(struct rtw_txq);
hw->sta_data_size = sizeof(struct rtw_sta_info);
hw->vif_data_size = sizeof(struct rtw_vif);
@@ -1241,6 +1430,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, TX_AMSDU);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1252,6 +1443,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
@@ -1268,6 +1461,9 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtw_debugfs_init(rtwdev);
+ rtwdev->bf_info.bfer_mu_cnt = 0;
+ rtwdev->bf_info.bfer_su_cnt = 0;
+
return 0;
}
EXPORT_SYMBOL(rtw_register_hw);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index bede3f38516e..d012eefcd0da 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -11,11 +11,13 @@
#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/interrupt.h>
#include "util.h"
#define RTW_MAX_MAC_ID_NUM 32
#define RTW_MAX_SEC_CAM_NUM 32
+#define MAX_PG_CAM_BACKUP_NUM 8
#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
@@ -27,6 +29,10 @@
#define RTW_RF_PATH_MAX 4
#define HW_FEATURE_LEN 13
+#define RTW_TP_SHIFT 18 /* bytes/2s --> Mbps */
+
+extern bool rtw_bf_support;
+extern unsigned int rtw_fw_lps_deep_mode;
extern unsigned int rtw_debug_mask;
extern const struct ieee80211_ops rtw_ops;
extern struct rtw_chip_info rtw8822b_hw_spec;
@@ -50,10 +56,24 @@ struct rtw_hci {
enum rtw_hci_type type;
u32 rpwm_addr;
+ u32 cpwm_addr;
u8 bulkout_num;
};
+#define IS_CH_5G_BAND_1(channel) ((channel) >= 36 && (channel <= 48))
+#define IS_CH_5G_BAND_2(channel) ((channel) >= 52 && (channel <= 64))
+#define IS_CH_5G_BAND_3(channel) ((channel) >= 100 && (channel <= 144))
+#define IS_CH_5G_BAND_4(channel) ((channel) >= 149 && (channel <= 177))
+
+#define IS_CH_5G_BAND_MID(channel) \
+ (IS_CH_5G_BAND_2(channel) || IS_CH_5G_BAND_3(channel))
+
+#define IS_CH_2G_BAND(channel) ((channel) <= 14)
+#define IS_CH_5G_BAND(channel) \
+ (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel) || \
+ IS_CH_5G_BAND_3(channel) || IS_CH_5G_BAND_4(channel))
+
enum rtw_supported_band {
RTW_BAND_2G = 1 << 0,
RTW_BAND_5G = 1 << 1,
@@ -303,18 +323,50 @@ enum rtw_regulatory_domains {
RTW_REGD_MAX
};
+enum rtw_txq_flags {
+ RTW_TXQ_AMPDU,
+ RTW_TXQ_BLOCK_BA,
+};
+
enum rtw_flags {
RTW_FLAG_RUNNING,
RTW_FLAG_FW_RUNNING,
RTW_FLAG_SCANNING,
RTW_FLAG_INACTIVE_PS,
RTW_FLAG_LEISURE_PS,
+ RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
NUM_OF_RTW_FLAGS,
};
+enum rtw_evm {
+ RTW_EVM_OFDM = 0,
+ RTW_EVM_1SS,
+ RTW_EVM_2SS_A,
+ RTW_EVM_2SS_B,
+ /* keep it last */
+ RTW_EVM_NUM
+};
+
+enum rtw_snr {
+ RTW_SNR_OFDM_A = 0,
+ RTW_SNR_OFDM_B,
+ RTW_SNR_OFDM_C,
+ RTW_SNR_OFDM_D,
+ RTW_SNR_1SS_A,
+ RTW_SNR_1SS_B,
+ RTW_SNR_1SS_C,
+ RTW_SNR_1SS_D,
+ RTW_SNR_2SS_A,
+ RTW_SNR_2SS_B,
+ RTW_SNR_2SS_C,
+ RTW_SNR_2SS_D,
+ /* keep it last */
+ RTW_SNR_NUM
+};
+
/* the power index is represented by differences, which cck-1s & ht40-1s are
* the base values, so for 1s's differences, there are only ht20 & ofdm
*/
@@ -480,6 +532,7 @@ struct rtw_tx_pkt_info {
bool fs;
bool short_gi;
bool report;
+ bool rts;
};
struct rtw_rx_pkt_stat {
@@ -502,10 +555,16 @@ struct rtw_rx_pkt_stat {
s8 rx_power[RTW_RF_PATH_MAX];
u8 rssi;
u8 rxsc;
+ s8 rx_snr[RTW_RF_PATH_MAX];
+ u8 rx_evm[RTW_RF_PATH_MAX];
+ s8 cfo_tail[RTW_RF_PATH_MAX];
+
struct rtw_sta_info *si;
struct ieee80211_vif *vif;
};
+DECLARE_EWMA(tp, 10, 2);
+
struct rtw_traffic_stats {
/* units in bytes */
u64 tx_unicast;
@@ -518,6 +577,8 @@ struct rtw_traffic_stats {
/* units in Mbps */
u32 tx_throughput;
u32 rx_throughput;
+ struct ewma_tp tx_ewma_tp;
+ struct ewma_tp rx_ewma_tp;
};
enum rtw_lps_mode {
@@ -526,6 +587,12 @@ enum rtw_lps_mode {
RTW_MODE_WMM_PS = 2,
};
+enum rtw_lps_deep_mode {
+ LPS_DEEP_MODE_NONE = 0,
+ LPS_DEEP_MODE_LCLK = 1,
+ LPS_DEEP_MODE_PG = 2,
+};
+
enum rtw_pwr_state {
RTW_RF_OFF = 0x0,
RTW_RF_ON = 0x4,
@@ -533,14 +600,14 @@ enum rtw_pwr_state {
};
struct rtw_lps_conf {
- /* the interface to enter lps */
- struct rtw_vif *rtwvif;
enum rtw_lps_mode mode;
+ enum rtw_lps_deep_mode deep_mode;
enum rtw_pwr_state state;
u8 awake_interval;
u8 rlbm;
u8 smart_ps;
u8 port_id;
+ bool sec_cam_backup;
};
enum rtw_hw_key_type {
@@ -576,6 +643,19 @@ struct rtw_tx_report {
struct timer_list purge_timer;
};
+struct rtw_ra_report {
+ struct rate_info txrate;
+ u32 bit_rate;
+ u8 desc_rate;
+};
+
+struct rtw_txq {
+ struct list_head list;
+
+ unsigned long flags;
+ unsigned long last_push;
+};
+
#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
@@ -598,6 +678,41 @@ struct rtw_sta_info {
bool updated;
u8 init_ra_lv;
u64 ra_mask;
+
+ DECLARE_BITMAP(tid_ba, IEEE80211_NUM_TIDS);
+
+ struct rtw_ra_report ra_report;
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask *mask;
+};
+
+enum rtw_bfee_role {
+ RTW_BFEE_NONE,
+ RTW_BFEE_SU,
+ RTW_BFEE_MU
+};
+
+struct rtw_bfee {
+ enum rtw_bfee_role role;
+
+ u16 p_aid;
+ u8 g_id;
+ u8 mac_addr[ETH_ALEN];
+ u8 sound_dim;
+
+ /* SU-MIMO */
+ u8 su_reg_index;
+
+ /* MU-MIMO */
+ u16 aid;
+};
+
+struct rtw_bf_info {
+ u8 bfer_mu_cnt;
+ u8 bfer_su_cnt;
+ DECLARE_BITMAP(bfer_su_reg_maping, 2);
+ u8 cur_csi_rpt_rate;
};
struct rtw_vif {
@@ -608,10 +723,13 @@ struct rtw_vif {
u8 bssid[ETH_ALEN];
u8 port;
u8 bcn_ctrl;
+ struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
bool in_lps;
+
+ struct rtw_bfee bfee;
};
struct rtw_regulatory {
@@ -643,6 +761,14 @@ struct rtw_chip_ops {
void (*phy_calibration)(struct rtw_dev *rtwdev);
void (*dpk_track)(struct rtw_dev *rtwdev);
void (*cck_pd_set)(struct rtw_dev *rtwdev, u8 level);
+ void (*pwr_track)(struct rtw_dev *rtwdev);
+ void (*config_bfee)(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable);
+ void (*set_gid_table)(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
+ void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -747,6 +873,7 @@ enum rtw_dma_mapping {
RTW_DMA_MAPPING_NORMAL = 2,
RTW_DMA_MAPPING_HIGH = 3,
+ RTW_DMA_MAPPING_MAX,
RTW_DMA_MAPPING_UNDEF,
};
@@ -818,6 +945,34 @@ struct rtw_rfe_def {
.txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
}
+#define RTW_PWR_TRK_5G_1 0
+#define RTW_PWR_TRK_5G_2 1
+#define RTW_PWR_TRK_5G_3 2
+#define RTW_PWR_TRK_5G_NUM 3
+
+#define RTW_PWR_TRK_TBL_SZ 30
+
+/* This table stores the values of TX power that will be adjusted by power
+ * tracking.
+ *
+ * For 5G bands, there are 3 different settings.
+ * For 2G there are cck rate and ofdm rate with different settings.
+ */
+struct rtw_pwr_track_tbl {
+ const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_2gb_n;
+ const u8 *pwrtrk_2gb_p;
+ const u8 *pwrtrk_2ga_n;
+ const u8 *pwrtrk_2ga_p;
+ const u8 *pwrtrk_2g_cckb_n;
+ const u8 *pwrtrk_2g_cckb_p;
+ const u8 *pwrtrk_2g_ccka_n;
+ const u8 *pwrtrk_2g_ccka_p;
+};
+
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
@@ -844,6 +999,7 @@ struct rtw_chip_info {
bool ht_supported;
bool vht_supported;
+ u8 lps_deep_mode_supported;
/* init values */
u8 sys_func_en;
@@ -868,6 +1024,11 @@ struct rtw_chip_info {
bool en_dis_dpd;
u16 dpd_ratemask;
+ u8 iqk_threshold;
+ const struct rtw_pwr_track_tbl *pwr_track_tbl;
+
+ u8 bfer_su_max_num;
+ u8 bfer_mu_max_num;
/* coex paras */
u32 coex_para_ver;
@@ -1121,10 +1282,26 @@ struct rtw_phy_cck_pd_reg {
#define DACK_MSBK_BACKUP_NUM 0xf
#define DACK_DCK_BACKUP_NUM 0x2
+struct rtw_swing_table {
+ const u8 *p[RTW_RF_PATH_MAX];
+ const u8 *n[RTW_RF_PATH_MAX];
+};
+
+struct rtw_pkt_count {
+ u16 num_bcn_pkt;
+ u16 num_qry_pkt[DESC_RATE_MAX];
+};
+
+DECLARE_EWMA(evm, 10, 4);
+DECLARE_EWMA(snr, 10, 4);
+
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 total_fa_cnt;
+ u32 cck_cca_cnt;
+ u32 ofdm_cca_cnt;
+ u32 total_cca_cnt;
u32 cck_ok_cnt;
u32 cck_err_cnt;
@@ -1147,6 +1324,15 @@ struct rtw_dm_info {
u8 cck_gi_u_bnd;
u8 cck_gi_l_bnd;
+ u8 tx_rate;
+ u8 thermal_avg[RTW_RF_PATH_MAX];
+ u8 thermal_meter_k;
+ s8 delta_power_index[RTW_RF_PATH_MAX];
+ u8 default_ofdm_index;
+ bool pwr_trk_triggered;
+ bool pwr_trk_init_trigger;
+ struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX];
+
/* backup dack results for each path and I/Q */
u32 dack_adck[RTW_RF_PATH_MAX];
u16 dack_msbk[RTW_RF_PATH_MAX][2][DACK_MSBK_BACKUP_NUM];
@@ -1157,6 +1343,17 @@ struct rtw_dm_info {
/* [bandwidth 0:20M/1:40M][number of path] */
u8 cck_pd_lv[2][RTW_RF_PATH_MAX];
u32 cck_fa_avg;
+
+ /* save the last rx phy status for debug */
+ s8 rx_snr[RTW_RF_PATH_MAX];
+ u8 rx_evm_dbm[RTW_RF_PATH_MAX];
+ s16 cfo_tail[RTW_RF_PATH_MAX];
+ u8 rssi[RTW_RF_PATH_MAX];
+ u8 curr_rx_rate;
+ struct rtw_pkt_count cur_pkt_count;
+ struct rtw_pkt_count last_pkt_count;
+ struct ewma_evm ewma_evm[RTW_EVM_NUM];
+ struct ewma_snr ewma_snr[RTW_SNR_NUM];
};
struct rtw_efuse {
@@ -1170,7 +1367,9 @@ struct rtw_efuse {
u8 country_code[2];
u8 rf_board_option;
u8 rfe_option;
- u8 thermal_meter;
+ u8 power_track_type;
+ u8 thermal_meter[RTW_RF_PATH_MAX];
+ u8 thermal_meter_k;
u8 crystal_cap;
u8 ant_div_cfg;
u8 ant_div_type;
@@ -1252,7 +1451,7 @@ struct rtw_fifo_conf {
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
- enum rtw_dma_mapping pq_map[RTW_PQ_MAP_NUM];
+ struct rtw_rqpn *rqpn;
};
struct rtw_fw_state {
@@ -1289,6 +1488,7 @@ struct rtw_hal {
u8 rf_path_num;
u8 antenna_tx;
u8 antenna_rx;
+ u8 bfee_sts_cap;
/* protect tx power section */
struct mutex tx_power_mutex;
@@ -1326,6 +1526,7 @@ struct rtw_dev {
struct rtw_sec_desc sec;
struct rtw_traffic_stats stats;
struct rtw_regulatory regd;
+ struct rtw_bf_info bf_info;
struct rtw_dm_info dm_info;
struct rtw_coex coex;
@@ -1349,6 +1550,12 @@ struct rtw_dev {
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ /* used to protect txqs list */
+ spinlock_t txq_lock;
+ struct list_head txqs;
+ struct tasklet_struct tx_tasklet;
+ struct work_struct ba_work;
+
struct rtw_tx_report tx_report;
struct {
@@ -1361,11 +1568,12 @@ struct rtw_dev {
/* lps power state & handler work */
struct rtw_lps_conf lps_conf;
- struct delayed_work lps_work;
+ bool ps_enabled;
struct dentry *debugfs;
u8 sta_cnt;
+ u32 rts_threshold;
DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
@@ -1378,24 +1586,23 @@ struct rtw_dev {
#include "hci.h"
-static inline bool rtw_flag_check(struct rtw_dev *rtwdev, enum rtw_flags flag)
+static inline bool rtw_is_assoc(struct rtw_dev *rtwdev)
{
- return test_bit(flag, rtwdev->flags);
+ return !!rtwdev->sta_cnt;
}
-static inline void rtw_flag_clear(struct rtw_dev *rtwdev, enum rtw_flags flag)
+static inline struct ieee80211_txq *rtwtxq_to_txq(struct rtw_txq *rtwtxq)
{
- clear_bit(flag, rtwdev->flags);
-}
+ void *p = rtwtxq;
-static inline void rtw_flag_set(struct rtw_dev *rtwdev, enum rtw_flags flag)
-{
- set_bit(flag, rtwdev->flags);
+ return container_of(p, struct ieee80211_txq, drv_priv);
}
-static inline bool rtw_is_assoc(struct rtw_dev *rtwdev)
+static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif)
{
- return !!rtwdev->sta_cnt;
+ void *p = rtwvif;
+
+ return container_of(p, struct ieee80211_vif, drv_priv);
}
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -1405,6 +1612,7 @@ bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val);
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value);
void rtw_restore_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp, u32 num);
+void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss);
void rtw_set_channel(struct rtw_dev *rtwdev);
void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
@@ -1417,5 +1625,6 @@ int rtw_core_init(struct rtw_dev *rtwdev);
void rtw_core_deinit(struct rtw_dev *rtwdev);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
+u16 rtw_desc_to_bitrate(u8 desc_rate);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index d90928be663b..a58e8276a41a 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -9,6 +9,7 @@
#include "tx.h"
#include "rx.h"
#include "fw.h"
+#include "ps.h"
#include "debug.h"
static bool rtw_disable_msi;
@@ -457,9 +458,9 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
/* reset read/write point */
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
- /* rest H2C Queue index */
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
+ /* reset H2C Queue index in a single write */
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
+ BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
}
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
@@ -536,6 +537,69 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
+static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ bool tx_empty = true;
+ u8 queue;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ /* Deep PS state is not allowed to TX-DMA */
+ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
+ /* BCN queue is rsvd page, does not have DMA interrupt
+ * H2C queue is managed by firmware
+ */
+ if (queue == RTW_TX_QUEUE_BCN ||
+ queue == RTW_TX_QUEUE_H2C)
+ continue;
+
+ tx_ring = &rtwpci->tx_rings[queue];
+
+ /* check if there is any skb DMAing */
+ if (skb_queue_len(&tx_ring->queue)) {
+ tx_empty = false;
+ break;
+ }
+ }
+
+ if (!tx_empty) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "TX path not empty, cannot enter deep power save state\n");
+ return;
+ }
+
+ set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
+ rtw_power_mode_change(rtwdev, true);
+}
+
+static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_power_mode_change(rtwdev, false);
+}
+
+static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_enter(rtwdev);
+
+ if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_leave(rtwdev);
+
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
static u8 ac_to_hwq[] = {
[IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
[IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
@@ -616,6 +680,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
u8 *pkt_desc;
struct rtw_pci_tx_buffer_desc *buf_desc;
u32 bd_idx;
+ unsigned long flags;
ring = &rtwpci->tx_rings[queue];
@@ -651,6 +716,10 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
tx_data = rtw_pci_get_tx_data(skb);
tx_data->dma = dma;
tx_data->sn = pkt_info->sn;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ rtw_pci_deep_ps_leave(rtwdev);
skb_queue_tail(&ring->queue, skb);
/* kick off tx queue */
@@ -666,6 +735,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
}
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return 0;
}
@@ -990,23 +1060,49 @@ static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
{
u16 write_addr;
- u16 remainder = addr & 0x3;
+ u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
u8 flag;
- u8 cnt = 20;
+ u8 cnt;
- write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
+ write_addr = addr & BITS_DBI_ADDR_MASK;
+ write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
- rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
+
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
+ flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
+ if (flag == 0)
+ return;
- flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
- while (flag && (cnt != 0)) {
udelay(10);
+ }
+
+ WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
+}
+
+static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & BITS_DBI_ADDR_MASK;
+ u8 flag;
+ u8 cnt;
+
+ rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
+
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
- cnt--;
+ if (flag == 0) {
+ read_addr = REG_DBI_RDATA_V1 + (addr & 3);
+ *value = rtw_read8(rtwdev, read_addr);
+ return 0;
+ }
+
+ udelay(10);
}
- WARN(flag, "DBI write fail\n");
+ WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
+ return -EIO;
}
static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
@@ -1017,23 +1113,113 @@ static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
rtw_write16(rtwdev, REG_MDIO_V1, data);
- page = addr < 0x20 ? 0 : 1;
- page += g1 ? 0 : 2;
- rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
+ page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
+ page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
+ rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
-
rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
- wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
- cnt = 20;
- while (wflag && (cnt != 0)) {
- udelay(10);
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
BIT_MDIO_WFLAG_V1);
- cnt--;
+ if (wflag == 0)
+ return;
+
+ udelay(10);
+ }
+
+ WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
+}
+
+static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
+ return;
}
- WARN(wflag, "MDIO write fail\n");
+ if (enable)
+ value |= BIT_CLKREQ_SW_EN;
+ else
+ value &= ~BIT_CLKREQ_SW_EN;
+
+ rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
+}
+
+static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
+ return;
+ }
+
+ if (enable)
+ value |= BIT_L1_SW_EN;
+ else
+ value &= ~BIT_L1_SW_EN;
+
+ rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
+}
+
+static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
+ * only be enabled when host supports it.
+ *
+ * And ASPM mechanism should be enabled when driver/firmware enters
+ * power save mode, without having heavy traffic. Because we've
+ * experienced some inter-operability issues that the link tends
+ * to enter L1 state on the fly even when driver is having high
+ * throughput. This is probably because the ASPM behavior slightly
+ * varies from different SOC.
+ */
+ if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
+ rtw_pci_aspm_set(rtwdev, enter);
+}
+
+static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u16 link_ctrl;
+ int ret;
+
+ /* Though there is standard PCIE configuration space to set the
+ * link control register, but by Realtek's design, driver should
+ * check if host supports CLKREQ/ASPM to enable the HW module.
+ *
+ * These functions are implemented by two HW modules associated,
+ * one is responsible to access PCIE configuration space to
+ * follow the host settings, and another is in charge of doing
+ * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
+ * the host does not support it, and due to some reasons or wrong
+ * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
+ * loss if HW misbehaves on the link.
+ *
+ * Hence it's designed that driver should first check the PCIE
+ * configuration space is sync'ed and enabled, then driver can turn
+ * on the other module that is actually working on the mechanism.
+ */
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
+ return;
+ }
+
+ if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
+ rtw_pci_clkreq_set(rtwdev, true);
+
+ rtwpci->link_ctrl = link_ctrl;
}
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
@@ -1074,6 +1260,8 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
else
rtw_dbi_write8(rtwdev, offset, value);
}
+
+ rtw_pci_link_cfg(rtwdev);
}
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
@@ -1120,8 +1308,6 @@ static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
goto err_io_unmap;
}
- rtw_pci_phy_cfg(rtwdev);
-
return 0;
err_io_unmap:
@@ -1142,6 +1328,8 @@ static struct rtw_hci_ops rtw_pci_ops = {
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
+ .deep_ps = rtw_pci_deep_ps,
+ .link_ps = rtw_pci_link_ps,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
@@ -1233,6 +1421,8 @@ static int rtw_pci_probe(struct pci_dev *pdev,
goto err_destroy_pci;
}
+ rtw_pci_phy_cfg(rtwdev);
+
ret = rtw_register_hw(rtwdev, hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 87824a4caba9..49bf29a92152 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -20,10 +20,25 @@
#define BIT_RST_TRXDMA_INTF BIT(20)
#define BIT_RX_TAG_EN BIT(15)
#define REG_DBI_WDATA_V1 0x03E8
+#define REG_DBI_RDATA_V1 0x03EC
#define REG_DBI_FLAG_V1 0x03F0
+#define BIT_DBI_RFLAG BIT(17)
+#define BIT_DBI_WFLAG BIT(16)
+#define BITS_DBI_WREN GENMASK(15, 12)
+#define BITS_DBI_ADDR_MASK GENMASK(11, 2)
+
#define REG_MDIO_V1 0x03F4
#define REG_PCIE_MIX_CFG 0x03F8
+#define BITS_MDIO_ADDR_MASK GENMASK(4, 0)
#define BIT_MDIO_WFLAG_V1 BIT(5)
+#define RTW_PCI_MDIO_PG_SZ BIT(5)
+#define RTW_PCI_MDIO_PG_OFFS_G1 0
+#define RTW_PCI_MDIO_PG_OFFS_G2 2
+#define RTW_PCI_WR_RETRY_CNT 20
+
+#define RTK_PCIE_LINK_CFG 0x0719
+#define BIT_CLKREQ_SW_EN BIT(4)
+#define BIT_L1_SW_EN BIT(3)
#define BIT_PCI_BCNQ_FLAG BIT(4)
#define RTK_PCI_TXBD_DESA_BCNQ 0x308
@@ -190,6 +205,7 @@ struct rtw_pci {
u16 rx_tag;
struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
+ u16 link_ctrl;
void __iomem *mmap;
};
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index d3d3f40de75e..a3e1e9578b65 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -20,15 +20,6 @@ union phy_table_tile {
struct phy_cfg_pair cfg;
};
-struct phy_pg_cfg_pair {
- u32 band;
- u32 rf_path;
- u32 tx_num;
- u32 addr;
- u32 bitmask;
- u32 data;
-};
-
static const u32 db_invert_table[12][8] = {
{10, 13, 16, 20,
25, 32, 40, 50},
@@ -118,7 +109,7 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) {
for (j = 0; j < RTW_RF_PATH_MAX; j++)
- dm_info->cck_pd_lv[i][j] = 0;
+ dm_info->cck_pd_lv[i][j] = CCK_PD_LV0;
}
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
@@ -222,10 +213,19 @@ static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
dm_info->min_rssi = data.min_rssi;
}
+static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->last_pkt_count = dm_info->cur_pkt_count;
+ memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count));
+}
+
static void rtw_phy_statistics(struct rtw_dev *rtwdev)
{
rtw_phy_stat_rssi(rtwdev);
rtw_phy_stat_false_alarm(rtwdev);
+ rtw_phy_stat_rate_cnt(rtwdev);
}
#define DIG_PERF_FA_TH_LOW 250
@@ -394,7 +394,7 @@ static void rtw_phy_dig(struct rtw_dev *rtwdev)
u8 step[3];
bool linked;
- if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
+ if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags))
return;
if (rtw_phy_dig_check_damping(dm_info))
@@ -461,7 +461,6 @@ static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
chip->ops->dpk_track(rtwdev);
}
-#define CCK_PD_LV_MAX 5
#define CCK_PD_FA_LV1_MIN 1000
#define CCK_PD_FA_LV0_MAX 500
@@ -471,10 +470,10 @@ static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev)
u32 cck_fa_avg = dm_info->cck_fa_avg;
if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
- return 1;
+ return CCK_PD_LV1;
if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
- return 0;
+ return CCK_PD_LV0;
return CCK_PD_LV_MAX;
}
@@ -494,15 +493,15 @@ static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev)
u32 cck_fa_avg = dm_info->cck_fa_avg;
if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL)
- return 4;
+ return CCK_PD_LV4;
if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL)
- return 3;
+ return CCK_PD_LV3;
if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL)
- return 2;
+ return CCK_PD_LV2;
if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
- return 1;
+ return CCK_PD_LV1;
if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
- return 0;
+ return CCK_PD_LV0;
return CCK_PD_LV_MAX;
}
@@ -539,6 +538,11 @@ static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
chip->ops->cck_pd_set(rtwdev, level);
}
+static void rtw_phy_pwr_track(struct rtw_dev *rtwdev)
+{
+ rtwdev->chip->ops->pwr_track(rtwdev);
+}
+
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
{
/* for further calculation */
@@ -547,6 +551,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_cck_pd(rtwdev);
rtw_phy_ra_info_update(rtwdev);
rtw_phy_dpk_track(rtwdev);
+ rtw_phy_pwr_track(rtwdev);
}
#define FRAC_BITS 3
@@ -1211,10 +1216,8 @@ static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev,
void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
{
- const struct phy_pg_cfg_pair *p = tbl->data;
- const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
-
- BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
+ const struct rtw_phy_pg_cfg_pair *p = tbl->data;
+ const struct rtw_phy_pg_cfg_pair *end = p + tbl->size;
for (; p < end; p++) {
if (p->addr == 0xfe || p->addr == 0xffe) {
@@ -1748,7 +1751,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
group = rtw_get_channel_group(ch);
/* base power index for 2.4G/5G */
- if (ch <= 14) {
+ if (IS_CH_2G_BAND(ch)) {
band = PHY_BAND_2G;
*base = rtw_phy_get_2g_tx_power_index(rtwdev,
&pwr_idx->pwr_idx_2g,
@@ -1968,3 +1971,123 @@ void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
rs);
}
+
+void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table)
+{
+ const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
+ u8 channel = rtwdev->hal.current_channel;
+
+ if (IS_CH_2G_BAND(channel)) {
+ if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n;
+ } else {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ }
+ } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1];
+ } else if (IS_CH_5G_BAND_3(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2];
+ } else if (IS_CH_5G_BAND_4(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3];
+ } else {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ }
+}
+
+void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ ewma_thermal_add(&dm_info->avg_thermal[path], thermal);
+ dm_info->thermal_avg[path] =
+ ewma_thermal_read(&dm_info->avg_thermal[path]);
+}
+
+bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]);
+
+ if (avg == thermal)
+ return false;
+
+ return true;
+}
+
+u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 therm_avg, therm_efuse, therm_delta;
+
+ therm_avg = dm_info->thermal_avg[path];
+ therm_efuse = rtwdev->efuse.thermal_meter[path];
+ therm_delta = abs(therm_avg - therm_efuse);
+
+ return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1);
+}
+
+s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 tbl_path, u8 therm_path, u8 delta)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ const u8 *delta_swing_table_idx_pos;
+ const u8 *delta_swing_table_idx_neg;
+
+ if (delta >= RTW_PWR_TRK_TBL_SZ) {
+ rtw_warn(rtwdev, "power track table overflow\n");
+ return 0;
+ }
+
+ if (!swing_table) {
+ rtw_warn(rtwdev, "swing table not configured\n");
+ return 0;
+ }
+
+ delta_swing_table_idx_pos = swing_table->p[tbl_path];
+ delta_swing_table_idx_neg = swing_table->n[tbl_path];
+
+ if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) {
+ rtw_warn(rtwdev, "invalid swing table index\n");
+ return 0;
+ }
+
+ if (dm_info->thermal_avg[therm_path] >
+ rtwdev->efuse.thermal_meter[therm_path])
+ return delta_swing_table_idx_pos[delta];
+ else
+ return -delta_swing_table_idx_neg[delta];
+}
+
+bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 delta_iqk;
+
+ delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k);
+ if (delta_iqk >= rtwdev->chip->iqk_threshold) {
+ dm_info->thermal_meter_k = dm_info->thermal_avg[0];
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index e79b084628e7..af916d8784cd 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -41,9 +41,21 @@ void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_phy_init_tx_power(struct rtw_dev *rtwdev);
void rtw_phy_load_tables(struct rtw_dev *rtwdev);
+u8 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
+ enum rtw_bandwidth bw, u8 channel, u8 regd);
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
+void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path);
+bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
+ u8 path);
+u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
+s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 tbl_path, u8 therm_path, u8 delta);
+bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
+void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table);
struct rtw_txpwr_lmt_cfg_pair {
u8 regd;
@@ -54,6 +66,15 @@ struct rtw_txpwr_lmt_cfg_pair {
s8 txpwr_lmt;
};
+struct rtw_phy_pg_cfg_pair {
+ u32 band;
+ u32 rf_path;
+ u32 tx_num;
+ u32 addr;
+ u32 bitmask;
+ u32 data;
+};
+
#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
const struct rtw_table name ## _tbl = { \
.data = name, \
@@ -125,6 +146,15 @@ rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path,
u8 rate, u8 bw, u8 ch, u8 regd,
struct rtw_power_params *pwr_param);
+enum rtw_phy_cck_pd_lv {
+ CCK_PD_LV0,
+ CCK_PD_LV1,
+ CCK_PD_LV2,
+ CCK_PD_LV3,
+ CCK_PD_LV4,
+ CCK_PD_LV_MAX,
+};
+
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
#define MASKBYTE2 0xff0000
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 9ecd14feb76b..913e6f47130f 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "reg.h"
#include "fw.h"
#include "ps.h"
#include "mac.h"
@@ -18,18 +19,19 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "leave idle state failed\n");
rtw_set_channel(rtwdev);
- rtw_flag_clear(rtwdev, RTW_FLAG_INACTIVE_PS);
+ clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
return ret;
}
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
+ set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
rtw_core_stop(rtwdev);
+ rtw_hci_link_ps(rtwdev, true);
return 0;
}
@@ -48,6 +50,8 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
+ rtw_hci_link_ps(rtwdev, false);
+
ret = rtw_ips_pwr_up(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to leave ips state\n");
@@ -61,6 +65,85 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
return 0;
}
+void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter)
+{
+ u8 request, confirm, polling;
+ u8 polling_cnt;
+ u8 retry_cnt = 0;
+
+ for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) {
+ request = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+ confirm = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
+
+ /* toggle to request power mode, others remain 0 */
+ request ^= request | BIT_RPWM_TOGGLE;
+ if (!enter) {
+ request |= POWER_MODE_ACK;
+ } else {
+ request |= POWER_MODE_LCLK;
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ request |= POWER_MODE_PG;
+ }
+
+ rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, request);
+
+ if (enter)
+ return;
+
+ /* check confirm power mode has left power save state */
+ for (polling_cnt = 0; polling_cnt < 3; polling_cnt++) {
+ polling = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
+ if ((polling ^ confirm) & BIT_RPWM_TOGGLE)
+ return;
+ mdelay(20);
+ }
+
+ /* in case of fw/hw missed the request, retry */
+ rtw_warn(rtwdev, "failed to leave deep PS, retry=%d\n",
+ retry_cnt);
+ }
+
+ /* Hit here means that driver failed to change hardware power mode to
+ * active state after retry 3 times. If the power state is locked at
+ * Deep sleep, most of the hardware circuits is not working, even
+ * register read/write. It should be treated as fatal error and
+ * requires an entire analysis about the firmware/hardware
+ */
+ WARN(1, "Hardware power state locked\n");
+}
+EXPORT_SYMBOL(rtw_power_mode_change);
+
+static void __rtw_leave_lps_deep(struct rtw_dev *rtwdev)
+{
+ rtw_hci_deep_ps(rtwdev, false);
+}
+
+static void rtw_fw_leave_lps_state_check(struct rtw_dev *rtwdev)
+{
+ int i;
+
+ /* Driver needs to wait for firmware to leave LPS state
+ * successfully. Firmware will send null packet to inform AP,
+ * and see if AP sends an ACK back, then firmware will restore
+ * the REG_TCR register.
+ *
+ * If driver does not wait for firmware, null packet with
+ * PS bit could be sent due to incorrect REG_TCR setting.
+ *
+ * In our test, 100ms should be enough for firmware to finish
+ * the flow. If REG_TCR Register is still incorrect after 100ms,
+ * just modify it directly, and throw a warn message.
+ */
+ for (i = 0 ; i < LEAVE_LPS_TRY_CNT; i++) {
+ if (rtw_read32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN) == 0)
+ return;
+ msleep(20);
+ }
+
+ rtw_write32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN, 0);
+ rtw_warn(rtwdev, "firmware failed to restore hardware setting\n");
+}
+
static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -70,12 +153,32 @@ static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
conf->rlbm = 0;
conf->smart_ps = 0;
+ rtw_hci_link_ps(rtwdev, false);
rtw_fw_set_pwr_mode(rtwdev);
- rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
+ rtw_fw_leave_lps_state_check(rtwdev);
+
+ clear_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags);
rtw_coex_lps_notify(rtwdev, COEX_LPS_DISABLE);
}
+static void __rtw_enter_lps_deep(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->lps_conf.deep_mode == LPS_DEEP_MODE_NONE)
+ return;
+
+ if (!test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags)) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "Should enter LPS before entering deep PS\n");
+ return;
+ }
+
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ rtw_fw_set_pg_info(rtwdev);
+
+ rtw_hci_deep_ps(rtwdev, true);
+}
+
static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -88,88 +191,64 @@ static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
rtw_coex_lps_notify(rtwdev, COEX_LPS_ENABLE);
rtw_fw_set_pwr_mode(rtwdev);
- rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
-}
+ rtw_hci_link_ps(rtwdev, true);
-void rtw_lps_work(struct work_struct *work)
-{
- struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
- lps_work.work);
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- struct rtw_vif *rtwvif = conf->rtwvif;
-
- if (WARN_ON(!rtwvif))
- return;
-
- if (conf->mode == RTW_MODE_LPS)
- rtw_enter_lps_core(rtwdev);
- else
- rtw_leave_lps_core(rtwdev);
+ set_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags);
}
-void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+static void __rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- if (rtwvif->in_lps)
+ if (test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags))
return;
conf->mode = RTW_MODE_LPS;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = true;
+ conf->port_id = port_id;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
+ rtw_enter_lps_core(rtwdev);
}
-void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+static void __rtw_leave_lps(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- if (!rtwvif->in_lps)
+ if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "Should leave deep PS before leaving LPS\n");
+ __rtw_leave_lps_deep(rtwdev);
+ }
+
+ if (!test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags))
return;
conf->mode = RTW_MODE_ACTIVE;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = false;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
-}
-
-bool rtw_in_lps(struct rtw_dev *rtwdev)
-{
- return rtw_flag_check(rtwdev, RTW_FLAG_LEISURE_PS);
+ rtw_leave_lps_core(rtwdev);
}
-void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id)
{
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ lockdep_assert_held(&rtwdev->mutex);
- if (WARN_ON(!rtwvif))
+ if (rtwdev->coex.stat.wl_force_lps_ctrl)
return;
- if (rtwvif->in_lps)
- return;
-
- conf->mode = RTW_MODE_LPS;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = true;
-
- rtw_enter_lps_core(rtwdev);
+ __rtw_enter_lps(rtwdev, port_id);
+ __rtw_enter_lps_deep(rtwdev);
}
-void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+void rtw_leave_lps(struct rtw_dev *rtwdev)
{
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
-
- if (WARN_ON(!rtwvif))
- return;
+ lockdep_assert_held(&rtwdev->mutex);
- if (!rtwvif->in_lps)
- return;
+ __rtw_leave_lps_deep(rtwdev);
+ __rtw_leave_lps(rtwdev);
+}
- conf->mode = RTW_MODE_ACTIVE;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = false;
+void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
+{
+ lockdep_assert_held(&rtwdev->mutex);
- rtw_leave_lps_core(rtwdev);
+ __rtw_leave_lps_deep(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
index 09e57405dc1b..19afceca7d0e 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.h
+++ b/drivers/net/wireless/realtek/rtw88/ps.h
@@ -5,16 +5,20 @@
#ifndef __RTW_PS_H_
#define __RTW_PS_H_
-#define RTW_LPS_THRESHOLD 2
+#define RTW_LPS_THRESHOLD 50
+
+#define POWER_MODE_ACK BIT(6)
+#define POWER_MODE_PG BIT(4)
+#define POWER_MODE_LCLK BIT(0)
+
+#define LEAVE_LPS_TRY_CNT 5
int rtw_enter_ips(struct rtw_dev *rtwdev);
int rtw_leave_ips(struct rtw_dev *rtwdev);
-void rtw_lps_work(struct work_struct *work);
-void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-bool rtw_in_lps(struct rtw_dev *rtwdev);
+void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter);
+void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
+void rtw_leave_lps(struct rtw_dev *rtwdev);
+void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index fe793e270d22..7e817bc997eb 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -239,6 +239,10 @@
#define REG_EDCA_VI_PARAM 0x0504
#define REG_EDCA_BE_PARAM 0x0508
#define REG_EDCA_BK_PARAM 0x050C
+#define BIT_MASK_TXOP_LMT GENMASK(26, 16)
+#define BIT_MASK_CWMAX GENMASK(15, 12)
+#define BIT_MASK_CWMIN GENMASK(11, 8)
+#define BIT_MASK_AIFS GENMASK(7, 0)
#define REG_PIFS 0x0512
#define REG_SIFS 0x0514
#define BIT_SHIFT_SIFS_OFDM_CTX 8
@@ -271,6 +275,7 @@
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
#define REG_TCR 0x0604
+#define BIT_PWRMGT_HWDATA_EN BIT(7)
#define REG_RCR 0x0608
#define BIT_APP_FCS BIT(31)
#define BIT_APP_MIC BIT(30)
@@ -305,6 +310,7 @@
#define REG_RX_PKT_LIMIT 0x060C
#define REG_RX_DRVINFO_SZ 0x060F
#define BIT_APP_PHYSTS BIT(28)
+#define REG_MAR 0x0620
#define REG_USTIME_EDCA 0x0638
#define REG_ACKTO_CCK 0x0639
#define REG_RESP_SIFS_CCK 0x063C
@@ -320,6 +326,7 @@
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
+#define REG_RXFLTMAP4 0x068A
#define REG_BT_COEX_TABLE0 0x06C0
#define REG_BT_COEX_TABLE1 0x06C4
#define REG_BT_COEX_BRK_TABLE 0x06C8
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 63abda3b0ebf..4bc14b1a6340 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -13,6 +13,7 @@
#include "mac.h"
#include "reg.h"
#include "debug.h"
+#include "bf.h"
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -43,6 +44,8 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
@@ -75,6 +78,56 @@ static void rtw8822b_phy_rfe_init(struct rtw_dev *rtwdev)
rtw_write32_mask(rtwdev, 0x974, (BIT(11) | BIT(10)), 0x3);
}
+#define RTW_TXSCALE_SIZE 37
+static const u32 rtw8822b_txscale_tbl[RTW_TXSCALE_SIZE] = {
+ 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8,
+ 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180,
+ 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab,
+ 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
+};
+
+static const u8 rtw8822b_get_swing_index(struct rtw_dev *rtwdev)
+{
+ u8 i = 0;
+ u32 swing, table_value;
+
+ swing = rtw_read32_mask(rtwdev, 0xc1c, 0xffe00000);
+ for (i = 0; i < RTW_TXSCALE_SIZE; i++) {
+ table_value = rtw8822b_txscale_tbl[i];
+ if (swing == table_value)
+ break;
+ }
+
+ return i;
+}
+
+static void rtw8822b_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 swing_idx = rtw8822b_get_swing_index(rtwdev);
+ u8 path;
+
+ if (swing_idx >= RTW_TXSCALE_SIZE)
+ dm_info->default_ofdm_index = 24;
+ else
+ dm_info->default_ofdm_index = swing_idx;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
+static void rtw8822b_phy_bf_init(struct rtw_dev *rtwdev)
+{
+ rtw_bf_phy_init(rtwdev);
+ /* Grouping bitmap parameters */
+ rtw_write32(rtwdev, 0x1C94, 0xAFFFAFFF);
+}
+
static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
@@ -106,6 +159,9 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
rtw_phy_init(rtwdev);
rtw8822b_phy_rfe_init(rtwdev);
+ rtw8822b_pwrtrack_init(rtwdev);
+
+ rtw8822b_phy_bf_init(rtwdev);
}
#define WLAN_SLOT_TIME 0x09
@@ -211,9 +267,8 @@ static int rtw8822b_mac_init(struct rtw_dev *rtwdev)
static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
- bool is_channel_2g = (channel <= 14) ? true : false;
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x705770);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(4), 0);
@@ -241,9 +296,8 @@ static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
- bool is_channel_2g = (channel <= 14) ? true : false;
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
/* signal source */
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x745774);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
@@ -255,7 +309,7 @@ static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
if (hal->antenna_rx == BB_PATH_AB ||
hal->antenna_tx == BB_PATH_AB) {
/* 2TX or 2RX */
@@ -337,6 +391,7 @@ struct rtw8822b_rfe_info {
static const struct rtw8822b_rfe_info rtw8822b_rfe_info[] = {
[2] = I2GE5G_CCUT(efem),
+ [3] = IFEM_EXT_CCUT(ifem),
[5] = IFEM_EXT_CCUT(ifem),
};
@@ -350,7 +405,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u32 reg82c, reg830, reg838;
bool is_efem_cca = false, is_ifem_cca = false, is_rfe_type = false;
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
cca_ccut = rfe_info->cca_ccut_2g;
if (hal->antenna_rx == BB_PATH_A ||
@@ -381,7 +436,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
is_efem_cca = true;
break;
case RTW_RFE_IFEM2G_EFEM5G:
- if (channel <= 14)
+ if (IS_CH_2G_BAND(channel))
is_ifem_cca = true;
else
is_efem_cca = true;
@@ -405,9 +460,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
if (is_efem_cca && !(hal->cut_version == RTW_CHIP_VER_CUT_B))
rtw_write32_mask(rtwdev, REG_L1WT, MASKDWORD, 0x9194b2b9);
- if (bw == RTW_CHANNEL_WIDTH_20 &&
- ((channel >= 52 && channel <= 64) ||
- (channel >= 100 && channel <= 144)))
+ if (bw == RTW_CHANNEL_WIDTH_20 && IS_CH_5G_BAND_MID(channel))
rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0, 0x4);
}
@@ -442,7 +495,7 @@ static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
- rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (IS_CH_2G_BAND(channel) ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
if (channel > 144)
rf_reg18 |= RF18_RFSI_GT_CH144;
@@ -464,13 +517,13 @@ static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
break;
}
- if (channel <= 14)
+ if (IS_CH_2G_BAND(channel))
rf_reg_be = 0x0;
- else if (channel >= 36 && channel <= 64)
+ else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel))
rf_reg_be = low_band[(channel - 36) >> 1];
- else if (channel >= 100 && channel <= 144)
+ else if (IS_CH_5G_BAND_3(channel))
rf_reg_be = middle_band[(channel - 100) >> 1];
- else if (channel >= 149 && channel <= 177)
+ else if (IS_CH_5G_BAND_4(channel))
rf_reg_be = high_band[(channel - 149) >> 1];
else
goto err;
@@ -539,7 +592,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 rfe_option = efuse->rfe_option;
u32 val32;
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0);
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0);
@@ -556,22 +609,22 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
}
rtw_write32_mask(rtwdev, REG_RFEINV, 0x300, 0x2);
- } else if (channel > 35) {
+ } else if (IS_CH_5G_BAND(channel)) {
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1);
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0);
rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 34);
- if (channel >= 36 && channel <= 64)
+ if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x1);
- else if (channel >= 100 && channel <= 144)
+ else if (IS_CH_5G_BAND_3(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x2);
- else if (channel >= 149)
+ else if (IS_CH_5G_BAND_4(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x3);
- if (channel >= 36 && channel <= 48)
+ if (IS_CH_5G_BAND_1(channel))
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494);
- else if (channel >= 52 && channel <= 64)
+ else if (IS_CH_5G_BAND_2(channel))
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453);
else if (channel >= 100 && channel <= 116)
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452);
@@ -612,7 +665,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
- if (rfe_option == 2) {
+ if (rfe_option == 2 || rfe_option == 3) {
rtw_write32_mask(rtwdev, REG_L1PKWT, 0x0000f000, 0x6);
rtw_write32_mask(rtwdev, REG_ADC40, BIT(10), 0x1);
}
@@ -763,6 +816,7 @@ static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s8 min_rx_power = -120;
u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
@@ -772,13 +826,19 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
}
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
+ s8 rx_evm;
+ u8 evm_dbm = 0;
+ u8 rssi;
+ int path;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
@@ -801,6 +861,34 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_evm[RF_PATH_B] = GET_PHY_STAT_P1_RXEVM_B(phy_status);
+
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_B] = GET_PHY_STAT_P1_RXSNR_B(phy_status);
+
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_B] = GET_PHY_STAT_P1_CFO_TAIL_B(phy_status);
+
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
+ dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
+
+ rx_evm = pkt_stat->rx_evm[path];
+
+ if (rx_evm < 0) {
+ if (rx_evm == S8_MIN)
+ evm_dbm = 0;
+ else
+ evm_dbm = ((u8)-rx_evm >> 1);
+ }
+ dm_info->rx_evm_dbm[path] = evm_dbm;
+ }
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -836,7 +924,8 @@ static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
- pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
@@ -946,6 +1035,7 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 crc32_cnt;
+ u32 cca32_cnt;
cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
@@ -969,6 +1059,15 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ cca32_cnt = rtw_read32(rtwdev, 0xf08);
+ dm_info->ofdm_cca_cnt = ((cca32_cnt & 0xffff0000) >> 16);
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable) {
+ cca32_cnt = rtw_read32(rtwdev, 0xfcc);
+ dm_info->cck_cca_cnt = cca32_cnt & 0xffff;
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+ }
+
rtw_write32_set(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
@@ -1255,6 +1354,195 @@ static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
+static void rtw8822b_txagc_swing_offset(struct rtw_dev *rtwdev, u8 path,
+ u8 tx_pwr_idx_offset,
+ s8 *txagc_idx, u8 *swing_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 delta_pwr_idx = dm_info->delta_power_index[path];
+ u8 swing_upper_bound = dm_info->default_ofdm_index + 10;
+ u8 swing_lower_bound = 0;
+ u8 max_tx_pwr_idx_offset = 0xf;
+ s8 agc_index = 0;
+ u8 swing_index = dm_info->default_ofdm_index;
+
+ tx_pwr_idx_offset = min_t(u8, tx_pwr_idx_offset, max_tx_pwr_idx_offset);
+
+ if (delta_pwr_idx >= 0) {
+ if (delta_pwr_idx <= tx_pwr_idx_offset) {
+ agc_index = delta_pwr_idx;
+ swing_index = dm_info->default_ofdm_index;
+ } else if (delta_pwr_idx > tx_pwr_idx_offset) {
+ agc_index = tx_pwr_idx_offset;
+ swing_index = dm_info->default_ofdm_index +
+ delta_pwr_idx - tx_pwr_idx_offset;
+ swing_index = min_t(u8, swing_index, swing_upper_bound);
+ }
+ } else {
+ if (dm_info->default_ofdm_index > abs(delta_pwr_idx))
+ swing_index =
+ dm_info->default_ofdm_index + delta_pwr_idx;
+ else
+ swing_index = swing_lower_bound;
+ swing_index = max_t(u8, swing_index, swing_lower_bound);
+
+ agc_index = 0;
+ }
+
+ if (swing_index >= RTW_TXSCALE_SIZE) {
+ rtw_warn(rtwdev, "swing index overflow\n");
+ swing_index = RTW_TXSCALE_SIZE - 1;
+ }
+ *txagc_idx = agc_index;
+ *swing_idx = swing_index;
+}
+
+static void rtw8822b_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 path,
+ u8 pwr_idx_offset)
+{
+ s8 txagc_idx;
+ u8 swing_idx;
+ u32 reg1, reg2;
+
+ if (path == RF_PATH_A) {
+ reg1 = 0xc94;
+ reg2 = 0xc1c;
+ } else if (path == RF_PATH_B) {
+ reg1 = 0xe94;
+ reg2 = 0xe1c;
+ } else {
+ return;
+ }
+
+ rtw8822b_txagc_swing_offset(rtwdev, path, pwr_idx_offset,
+ &txagc_idx, &swing_idx);
+ rtw_write32_mask(rtwdev, reg1, GENMASK(29, 25), txagc_idx);
+ rtw_write32_mask(rtwdev, reg2, GENMASK(31, 21),
+ rtw8822b_txscale_tbl[swing_idx]);
+}
+
+static void rtw8822b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 pwr_idx_offset, tx_pwr_idx;
+ u8 channel = rtwdev->hal.current_channel;
+ u8 band_width = rtwdev->hal.current_band_width;
+ u8 regd = rtwdev->regd.txpwr_regd;
+ u8 tx_rate = dm_info->tx_rate;
+ u8 max_pwr_idx = rtwdev->chip->max_power_index;
+
+ tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, tx_rate,
+ band_width, channel, regd);
+
+ tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx);
+
+ pwr_idx_offset = max_pwr_idx - tx_pwr_idx;
+
+ rtw8822b_pwrtrack_set_pwr(rtwdev, path, pwr_idx_offset);
+}
+
+static void rtw8822b_phy_pwrtrack_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 power_idx_cur, power_idx_last;
+ u8 delta;
+
+ /* 8822B only has one thermal meter at PATH A */
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ power_idx_last = dm_info->delta_power_index[path];
+ power_idx_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table,
+ path, RF_PATH_A, delta);
+
+ /* if delta of power indexes are the same, just skip */
+ if (power_idx_cur == power_idx_last)
+ return;
+
+ dm_info->delta_power_index[path] = power_idx_cur;
+ rtw8822b_pwrtrack_set(rtwdev, path);
+}
+
+static void rtw8822b_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, path;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[RF_PATH_A] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++)
+ rtw8822b_phy_pwrtrack_path(rtwdev, &swing_table, path);
+
+iqk:
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8822b_do_iqk(rtwdev);
+}
+
+static void rtw8822b_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0)
+ return;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8822b_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8822b_bf_config_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_su(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_su(rtwdev, bfee);
+}
+
+static void rtw8822b_bf_config_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_mu(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_mu(rtwdev, bfee);
+}
+
+static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (bfee->role == RTW_BFEE_SU)
+ rtw8822b_bf_config_bfee_su(rtwdev, vif, bfee, enable);
+ else if (bfee->role == RTW_BFEE_MU)
+ rtw8822b_bf_config_bfee_mu(rtwdev, vif, bfee, enable);
+ else
+ rtw_warn(rtwdev, "wrong bfee role\n");
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -1754,6 +2042,7 @@ static struct rtw_intf_phy_para_table phy_para_table_8822b = {
static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
[2] = RTW_DEF_RFE(8822b, 2, 2),
+ [3] = RTW_DEF_RFE(8822b, 3, 0),
[5] = RTW_DEF_RFE(8822b, 5, 5),
};
@@ -1801,6 +2090,10 @@ static struct rtw_chip_ops rtw8822b_ops = {
.cfg_ldo25 = rtw8822b_cfg_ldo25,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.phy_calibration = rtw8822b_phy_calibration,
+ .pwr_track = rtw8822b_pwr_track,
+ .config_bfee = rtw8822b_bf_config_bfee,
+ .set_gid_table = rtw_bf_set_gid_table,
+ .cfg_csi_rate = rtw_bf_cfg_csi_rate,
.coex_set_init = rtw8822b_coex_cfg_init,
.coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
@@ -1955,6 +2248,129 @@ static const struct coex_rf_para rf_para_rx_8822b[] = {
static_assert(ARRAY_SIZE(rf_para_tx_8822b) == ARRAY_SIZE(rf_para_rx_8822b));
+static const u8
+rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+};
+
+static const u8 rtw8822b_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 12, 13, 13
+};
+
+static const u8 rtw8822b_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 15
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 12, 13, 13
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 15
+};
+
+static const struct rtw_pwr_track_tbl rtw8822b_rtw_pwr_track_tbl = {
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gb_n = rtw8822b_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8822b_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8822b_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8822b_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8822b_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8822b_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8822b_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8822b_pwrtrk_2g_cck_a_p,
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -1977,6 +2393,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
.sys_func_en = 0xDC,
.pwr_on_seq = card_enable_flow_8822b,
.pwr_off_seq = card_disable_flow_8822b,
@@ -1992,6 +2409,10 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
.rfe_defs = rtw8822b_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
+ .pwr_track_tbl = &rtw8822b_rtw_pwr_track_tbl,
+ .iqk_threshold = 8,
+ .bfer_su_max_num = 2,
+ .bfer_mu_max_num = 1,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 0cb93d7d4cfd..6211f4b547b9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -127,6 +127,18 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
index 465f58411cab..b9010b111a13 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
@@ -11643,104 +11643,155 @@ static const u32 rtw8822b_bb[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822b_bb, rtw_phy_cfg_bb);
-static const u32 rtw8822b_bb_pg_type2[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type2[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032, },
};
RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type2);
-static const u32 rtw8822b_bb_pg_type5[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type3[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type3);
+
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type5[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
};
RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type5);
@@ -20382,6 +20433,596 @@ static const u32 rtw8822b_rf_b[] = {
RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B);
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type0[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 28, },
+ { 1, 0, 0, 0, 1, 30, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 28, },
+ { 1, 0, 0, 0, 2, 30, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 28, },
+ { 1, 0, 0, 0, 3, 30, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 28, },
+ { 1, 0, 0, 0, 4, 30, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 28, },
+ { 1, 0, 0, 0, 5, 30, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 28, },
+ { 1, 0, 0, 0, 6, 30, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 28, },
+ { 1, 0, 0, 0, 7, 30, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 28, },
+ { 1, 0, 0, 0, 8, 30, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 28, },
+ { 1, 0, 0, 0, 9, 30, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 28, },
+ { 1, 0, 0, 0, 10, 30, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 28, },
+ { 1, 0, 0, 0, 11, 30, },
+ { 0, 0, 0, 0, 12, 26, },
+ { 2, 0, 0, 0, 12, 28, },
+ { 1, 0, 0, 0, 12, 30, },
+ { 0, 0, 0, 0, 13, 20, },
+ { 2, 0, 0, 0, 13, 28, },
+ { 1, 0, 0, 0, 13, 28, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 26, },
+ { 2, 0, 0, 1, 1, 30, },
+ { 1, 0, 0, 1, 1, 34, },
+ { 0, 0, 0, 1, 2, 30, },
+ { 2, 0, 0, 1, 2, 30, },
+ { 1, 0, 0, 1, 2, 34, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 30, },
+ { 1, 0, 0, 1, 3, 34, },
+ { 0, 0, 0, 1, 4, 34, },
+ { 2, 0, 0, 1, 4, 30, },
+ { 1, 0, 0, 1, 4, 34, },
+ { 0, 0, 0, 1, 5, 34, },
+ { 2, 0, 0, 1, 5, 30, },
+ { 1, 0, 0, 1, 5, 34, },
+ { 0, 0, 0, 1, 6, 34, },
+ { 2, 0, 0, 1, 6, 30, },
+ { 1, 0, 0, 1, 6, 34, },
+ { 0, 0, 0, 1, 7, 34, },
+ { 2, 0, 0, 1, 7, 30, },
+ { 1, 0, 0, 1, 7, 34, },
+ { 0, 0, 0, 1, 8, 34, },
+ { 2, 0, 0, 1, 8, 30, },
+ { 1, 0, 0, 1, 8, 34, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 30, },
+ { 1, 0, 0, 1, 9, 34, },
+ { 0, 0, 0, 1, 10, 30, },
+ { 2, 0, 0, 1, 10, 30, },
+ { 1, 0, 0, 1, 10, 34, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 30, },
+ { 1, 0, 0, 1, 11, 34, },
+ { 0, 0, 0, 1, 12, 22, },
+ { 2, 0, 0, 1, 12, 30, },
+ { 1, 0, 0, 1, 12, 34, },
+ { 0, 0, 0, 1, 13, 14, },
+ { 2, 0, 0, 1, 13, 30, },
+ { 1, 0, 0, 1, 13, 34, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 30, },
+ { 1, 0, 0, 2, 1, 34, },
+ { 0, 0, 0, 2, 2, 30, },
+ { 2, 0, 0, 2, 2, 30, },
+ { 1, 0, 0, 2, 2, 34, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 30, },
+ { 1, 0, 0, 2, 3, 34, },
+ { 0, 0, 0, 2, 4, 34, },
+ { 2, 0, 0, 2, 4, 30, },
+ { 1, 0, 0, 2, 4, 34, },
+ { 0, 0, 0, 2, 5, 34, },
+ { 2, 0, 0, 2, 5, 30, },
+ { 1, 0, 0, 2, 5, 34, },
+ { 0, 0, 0, 2, 6, 34, },
+ { 2, 0, 0, 2, 6, 30, },
+ { 1, 0, 0, 2, 6, 34, },
+ { 0, 0, 0, 2, 7, 34, },
+ { 2, 0, 0, 2, 7, 30, },
+ { 1, 0, 0, 2, 7, 34, },
+ { 0, 0, 0, 2, 8, 34, },
+ { 2, 0, 0, 2, 8, 30, },
+ { 1, 0, 0, 2, 8, 34, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 30, },
+ { 1, 0, 0, 2, 9, 34, },
+ { 0, 0, 0, 2, 10, 30, },
+ { 2, 0, 0, 2, 10, 30, },
+ { 1, 0, 0, 2, 10, 34, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 30, },
+ { 1, 0, 0, 2, 11, 34, },
+ { 0, 0, 0, 2, 12, 20, },
+ { 2, 0, 0, 2, 12, 30, },
+ { 1, 0, 0, 2, 12, 34, },
+ { 0, 0, 0, 2, 13, 14, },
+ { 2, 0, 0, 2, 13, 30, },
+ { 1, 0, 0, 2, 13, 34, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 18, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 28, },
+ { 2, 0, 0, 3, 2, 18, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 18, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 18, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 32, },
+ { 2, 0, 0, 3, 5, 18, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 32, },
+ { 2, 0, 0, 3, 6, 18, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 32, },
+ { 2, 0, 0, 3, 7, 18, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 18, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 18, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 28, },
+ { 2, 0, 0, 3, 10, 18, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 18, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 20, },
+ { 2, 0, 0, 3, 12, 18, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 14, },
+ { 2, 0, 0, 3, 13, 18, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 30, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 26, },
+ { 2, 0, 1, 2, 4, 30, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 30, },
+ { 2, 0, 1, 2, 5, 30, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 30, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 30, },
+ { 2, 0, 1, 2, 7, 30, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 26, },
+ { 2, 0, 1, 2, 8, 30, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 26, },
+ { 2, 0, 1, 2, 9, 30, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 20, },
+ { 2, 0, 1, 2, 10, 30, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 14, },
+ { 2, 0, 1, 2, 11, 30, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 63, },
+ { 1, 0, 1, 2, 12, 63, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 63, },
+ { 1, 0, 1, 2, 13, 63, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 18, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 24, },
+ { 2, 0, 1, 3, 4, 18, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 26, },
+ { 2, 0, 1, 3, 5, 18, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 28, },
+ { 2, 0, 1, 3, 6, 18, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 26, },
+ { 2, 0, 1, 3, 7, 18, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 26, },
+ { 2, 0, 1, 3, 8, 18, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 26, },
+ { 2, 0, 1, 3, 9, 18, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 20, },
+ { 2, 0, 1, 3, 10, 18, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 14, },
+ { 2, 0, 1, 3, 11, 18, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 63, },
+ { 1, 0, 1, 3, 12, 63, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 63, },
+ { 1, 0, 1, 3, 13, 63, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 1, 0, 1, 36, 30, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 30, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 30, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 30, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 30, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 28, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 28, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 28, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 28, },
+ { 0, 1, 0, 1, 100, 26, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 144, 28, },
+ { 2, 1, 0, 1, 144, 32, },
+ { 1, 1, 0, 1, 144, 63, },
+ { 0, 1, 0, 1, 149, 32, },
+ { 2, 1, 0, 1, 149, 63, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 63, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 63, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 63, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 63, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 30, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 28, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 28, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 28, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 28, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 28, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 28, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 28, },
+ { 0, 1, 0, 2, 64, 28, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 28, },
+ { 0, 1, 0, 2, 100, 26, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 26, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 144, 26, },
+ { 2, 1, 0, 2, 144, 63, },
+ { 1, 1, 0, 2, 144, 63, },
+ { 0, 1, 0, 2, 149, 32, },
+ { 2, 1, 0, 2, 149, 63, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 63, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 63, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 63, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 63, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 28, },
+ { 2, 1, 0, 3, 36, 20, },
+ { 1, 1, 0, 3, 36, 22, },
+ { 0, 1, 0, 3, 40, 30, },
+ { 2, 1, 0, 3, 40, 20, },
+ { 1, 1, 0, 3, 40, 22, },
+ { 0, 1, 0, 3, 44, 30, },
+ { 2, 1, 0, 3, 44, 20, },
+ { 1, 1, 0, 3, 44, 22, },
+ { 0, 1, 0, 3, 48, 30, },
+ { 2, 1, 0, 3, 48, 20, },
+ { 1, 1, 0, 3, 48, 22, },
+ { 0, 1, 0, 3, 52, 30, },
+ { 2, 1, 0, 3, 52, 20, },
+ { 1, 1, 0, 3, 52, 22, },
+ { 0, 1, 0, 3, 56, 30, },
+ { 2, 1, 0, 3, 56, 20, },
+ { 1, 1, 0, 3, 56, 22, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 20, },
+ { 1, 1, 0, 3, 60, 22, },
+ { 0, 1, 0, 3, 64, 28, },
+ { 2, 1, 0, 3, 64, 20, },
+ { 1, 1, 0, 3, 64, 22, },
+ { 0, 1, 0, 3, 100, 26, },
+ { 2, 1, 0, 3, 100, 20, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 30, },
+ { 2, 1, 0, 3, 104, 20, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 32, },
+ { 2, 1, 0, 3, 108, 20, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 20, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 20, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 32, },
+ { 2, 1, 0, 3, 120, 20, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 20, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 32, },
+ { 2, 1, 0, 3, 128, 20, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 32, },
+ { 2, 1, 0, 3, 132, 20, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 30, },
+ { 2, 1, 0, 3, 136, 20, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 20, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 144, 26, },
+ { 2, 1, 0, 3, 144, 63, },
+ { 1, 1, 0, 3, 144, 63, },
+ { 0, 1, 0, 3, 149, 32, },
+ { 2, 1, 0, 3, 149, 63, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 32, },
+ { 2, 1, 0, 3, 153, 63, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 32, },
+ { 2, 1, 0, 3, 157, 63, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 32, },
+ { 2, 1, 0, 3, 161, 63, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 32, },
+ { 2, 1, 0, 3, 165, 63, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 1, 2, 38, 22, },
+ { 2, 1, 1, 2, 38, 30, },
+ { 1, 1, 1, 2, 38, 30, },
+ { 0, 1, 1, 2, 46, 30, },
+ { 2, 1, 1, 2, 46, 30, },
+ { 1, 1, 1, 2, 46, 30, },
+ { 0, 1, 1, 2, 54, 30, },
+ { 2, 1, 1, 2, 54, 30, },
+ { 1, 1, 1, 2, 54, 30, },
+ { 0, 1, 1, 2, 62, 24, },
+ { 2, 1, 1, 2, 62, 30, },
+ { 1, 1, 1, 2, 62, 30, },
+ { 0, 1, 1, 2, 102, 24, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 30, },
+ { 2, 1, 1, 2, 110, 30, },
+ { 1, 1, 1, 2, 110, 30, },
+ { 0, 1, 1, 2, 118, 30, },
+ { 2, 1, 1, 2, 118, 30, },
+ { 1, 1, 1, 2, 118, 30, },
+ { 0, 1, 1, 2, 126, 30, },
+ { 2, 1, 1, 2, 126, 30, },
+ { 1, 1, 1, 2, 126, 30, },
+ { 0, 1, 1, 2, 134, 30, },
+ { 2, 1, 1, 2, 134, 30, },
+ { 1, 1, 1, 2, 134, 30, },
+ { 0, 1, 1, 2, 142, 30, },
+ { 2, 1, 1, 2, 142, 63, },
+ { 1, 1, 1, 2, 142, 63, },
+ { 0, 1, 1, 2, 151, 30, },
+ { 2, 1, 1, 2, 151, 63, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 30, },
+ { 2, 1, 1, 2, 159, 63, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 20, },
+ { 2, 1, 1, 3, 38, 20, },
+ { 1, 1, 1, 3, 38, 22, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 20, },
+ { 1, 1, 1, 3, 46, 22, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 20, },
+ { 1, 1, 1, 3, 54, 22, },
+ { 0, 1, 1, 3, 62, 22, },
+ { 2, 1, 1, 3, 62, 20, },
+ { 1, 1, 1, 3, 62, 22, },
+ { 0, 1, 1, 3, 102, 22, },
+ { 2, 1, 1, 3, 102, 20, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 20, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 30, },
+ { 2, 1, 1, 3, 118, 20, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 30, },
+ { 2, 1, 1, 3, 126, 20, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 30, },
+ { 2, 1, 1, 3, 134, 20, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 142, 30, },
+ { 2, 1, 1, 3, 142, 63, },
+ { 1, 1, 1, 3, 142, 63, },
+ { 0, 1, 1, 3, 151, 30, },
+ { 2, 1, 1, 3, 151, 63, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 63, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 2, 4, 42, 20, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 28, },
+ { 0, 1, 2, 4, 58, 20, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 28, },
+ { 0, 1, 2, 4, 106, 20, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 30, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 138, 30, },
+ { 2, 1, 2, 4, 138, 63, },
+ { 1, 1, 2, 4, 138, 63, },
+ { 0, 1, 2, 4, 155, 30, },
+ { 2, 1, 2, 4, 155, 63, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 18, },
+ { 2, 1, 2, 5, 42, 20, },
+ { 1, 1, 2, 5, 42, 22, },
+ { 0, 1, 2, 5, 58, 18, },
+ { 2, 1, 2, 5, 58, 20, },
+ { 1, 1, 2, 5, 58, 22, },
+ { 0, 1, 2, 5, 106, 20, },
+ { 2, 1, 2, 5, 106, 20, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 30, },
+ { 2, 1, 2, 5, 122, 20, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 138, 30, },
+ { 2, 1, 2, 5, 138, 63, },
+ { 1, 1, 2, 5, 138, 63, },
+ { 0, 1, 2, 5, 155, 30, },
+ { 2, 1, 2, 5, 155, 63, },
+ { 1, 1, 2, 5, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type0);
+
static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type2[] = {
{ 0, 0, 0, 0, 1, 32, },
{ 2, 0, 0, 0, 1, 28, },
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
index d4c268889368..4140e1ccb7b1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
@@ -9,9 +9,11 @@ extern const struct rtw_table rtw8822b_mac_tbl;
extern const struct rtw_table rtw8822b_agc_tbl;
extern const struct rtw_table rtw8822b_bb_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type2_tbl;
+extern const struct rtw_table rtw8822b_bb_pg_type3_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type5_tbl;
extern const struct rtw_table rtw8822b_rf_a_tbl;
extern const struct rtw_table rtw8822b_rf_b_tbl;
+extern const struct rtw_table rtw8822b_txpwr_lmt_type0_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type2_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type5_tbl;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index c2f6cd76a658..174029836833 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -14,6 +14,7 @@
#include "reg.h"
#include "debug.h"
#include "util.h"
+#include "bf.h"
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -40,6 +41,11 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->path_a_thermal;
+ efuse->thermal_meter[RF_PATH_B] = map->path_b_thermal;
+ efuse->thermal_meter_k =
+ (map->path_a_thermal + map->path_b_thermal) >> 1;
+ efuse->power_track_type = (map->tx_pwr_calibrate_rate >> 4) & 0xf;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
@@ -1000,6 +1006,21 @@ static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
rtw8822c_rf_x2_check(rtwdev);
}
+static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++) {
+ dm_info->delta_power_index[path] = 0;
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->thermal_avg[path] = 0xff;
+ }
+
+ dm_info->pwr_trk_triggered = false;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
@@ -1047,6 +1068,9 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
rtw8822c_rf_init(rtwdev);
+ rtw8822c_pwrtrack_init(rtwdev);
+
+ rtw_bf_phy_init(rtwdev);
}
#define WLAN_TXQ_RPT_EN 0x1F
@@ -1088,8 +1112,8 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_AMPDU_MAX_TIME 0x70
#define WLAN_RTS_LEN_TH 0xFF
#define WLAN_RTS_TX_TIME_TH 0x08
-#define WLAN_MAX_AGG_PKT_LIMIT 0x20
-#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_MAX_AGG_PKT_LIMIT 0x3f
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x3f
#define WLAN_PRE_TXCNT_TIME_TH 0x1E0
#define FAST_EDCA_VO_TH 0x06
#define FAST_EDCA_VI_TH 0x06
@@ -1112,6 +1136,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_RTS_RATE_FB_RATE4_H 0x400003E0
#define WLAN_RTS_RATE_FB_RATE5 0x0600F015
#define WLAN_RTS_RATE_FB_RATE5_H 0x000000E0
+#define WLAN_MULTI_ADDR 0xFFFFFFFF
#define WLAN_TX_FUNC_CFG1 0x30
#define WLAN_TX_FUNC_CFG2 0x30
@@ -1221,6 +1246,8 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_BCN_MAX_ERR, WLAN_BCN_MAX_ERR);
/* WMAC configuration */
+ rtw_write32(rtwdev, REG_MAR, WLAN_MULTI_ADDR);
+ rtw_write32(rtwdev, REG_MAR + 4, WLAN_MULTI_ADDR);
rtw_write8(rtwdev, REG_BBPSF_CTRL + 2, WLAN_RESP_TXRATE);
rtw_write8(rtwdev, REG_ACKTO, WLAN_ACK_TO);
rtw_write8(rtwdev, REG_ACKTO_CCK, WLAN_ACK_TO_CCK);
@@ -1284,11 +1311,11 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
- rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (IS_CH_2G_BAND(channel) ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
- if (channel > 144)
+ if (IS_CH_5G_BAND_4(channel))
rf_reg18 |= RF18_RFSI_GT_CH140;
- else if (channel >= 80)
+ else if (IS_CH_5G_BAND_3(channel))
rf_reg18 |= RF18_RFSI_GE_CH80;
switch (bw) {
@@ -1338,7 +1365,7 @@ static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
rtw_write32_set(rtwdev, REG_TXF4, BIT(20));
@@ -1403,7 +1430,7 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
else
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x1);
- } else if (channel > 35) {
+ } else if (IS_CH_5G_BAND(channel)) {
rtw_write32_set(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
@@ -1411,17 +1438,17 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
- if (channel >= 36 && channel <= 64) {
+ if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x1);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
0x1);
- } else if (channel >= 100 && channel <= 144) {
+ } else if (IS_CH_5G_BAND_3(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x2);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
0x2);
- } else if (channel >= 149) {
+ } else if (IS_CH_5G_BAND_4(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x3);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
@@ -1616,6 +1643,8 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
u8 gain_a, gain_b;
s8 rx_power[RTW_RF_PATH_MAX];
s8 min_rx_power = -120;
+ u8 rssi;
+ int path;
rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status);
rx_power[RF_PATH_B] = GET_PHY_STAT_P0_PWDB_B(phy_status);
@@ -1638,6 +1667,11 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A];
pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B];
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ }
+
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
@@ -1647,8 +1681,13 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
+ s8 rx_evm;
+ u8 evm_dbm = 0;
+ u8 rssi;
+ int path;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
@@ -1669,6 +1708,34 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_evm[RF_PATH_B] = GET_PHY_STAT_P1_RXEVM_B(phy_status);
+
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_B] = GET_PHY_STAT_P1_RXSNR_B(phy_status);
+
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_B] = GET_PHY_STAT_P1_CFO_TAIL_B(phy_status);
+
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
+ dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
+
+ rx_evm = pkt_stat->rx_evm[path];
+
+ if (rx_evm < 0) {
+ if (rx_evm == S8_MIN)
+ evm_dbm = 0;
+ else
+ evm_dbm = ((u8)-rx_evm >> 1);
+ }
+ dm_info->rx_evm_dbm[path] = evm_dbm;
+ }
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -1704,7 +1771,8 @@ static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
- pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
@@ -1822,6 +1890,7 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_enable;
u32 cck_fa_cnt;
u32 crc32_cnt;
+ u32 cca32_cnt;
u32 ofdm_fa_cnt;
u32 ofdm_fa_cnt1, ofdm_fa_cnt2, ofdm_fa_cnt3, ofdm_fa_cnt4, ofdm_fa_cnt5;
u16 parity_fail, rate_illegal, crc8_fail, mcs_fail, sb_search_fail,
@@ -1866,6 +1935,13 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ cca32_cnt = rtw_read32(rtwdev, 0x2c08);
+ dm_info->ofdm_cca_cnt = ((cca32_cnt & 0xffff0000) >> 16);
+ dm_info->cck_cca_cnt = cca32_cnt & 0xffff;
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable)
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
@@ -2053,6 +2129,57 @@ static void rtw8822c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
+static void rtw8822c_bf_enable_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ u8 csi_rsc = 0;
+ u32 tmp6dc;
+
+ rtw_bf_enable_bfee_su(rtwdev, vif, bfee);
+
+ tmp6dc = rtw_read32(rtwdev, REG_BBPSF_CTRL) |
+ BIT_WMAC_USE_NDPARATE |
+ (csi_rsc << 13);
+ if (vif->net_type == RTW_NET_AP_MODE)
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, tmp6dc | BIT(12));
+ else
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, tmp6dc & ~BIT(12));
+
+ rtw_write32(rtwdev, REG_CSI_RRSR, 0x550);
+}
+
+static void rtw8822c_bf_config_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw8822c_bf_enable_bfee_su(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_su(rtwdev, bfee);
+}
+
+static void rtw8822c_bf_config_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_mu(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_mu(rtwdev, bfee);
+}
+
+static void rtw8822c_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (bfee->role == RTW_BFEE_SU)
+ rtw8822c_bf_config_bfee_su(rtwdev, vif, bfee, enable);
+ else if (bfee->role == RTW_BFEE_MU)
+ rtw8822c_bf_config_bfee_mu(rtwdev, vif, bfee, enable);
+ else
+ rtw_warn(rtwdev, "wrong bfee role\n");
+}
+
struct dpk_cfg_pair {
u32 addr;
u32 bitmask;
@@ -2603,9 +2730,9 @@ static bool rtw8822c_dpk_coef_iq_check(struct rtw_dev *rtwdev,
{
if (coef_i == 0x1000 || coef_i == 0x0fff ||
coef_q == 0x1000 || coef_q == 0x0fff)
- return 1;
- else
- return 0;
+ return true;
+
+ return false;
}
static u32 rtw8822c_dpk_coef_transfer(struct rtw_dev *rtwdev)
@@ -2843,7 +2970,7 @@ static void rtw8822c_dpk_cal_gs(struct rtw_dev *rtwdev, u8 path)
dpk_info->dpk_gs[path] = tmp_gs;
}
-void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
+static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
{
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
u32 offset[DPK_RF_PATH_NUM] = {0, 0x58};
@@ -3084,7 +3211,7 @@ static void rtw8822c_phy_calibration(struct rtw_dev *rtwdev)
rtw8822c_do_dpk(rtwdev);
}
-void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
+static void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
{
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
u8 path;
@@ -3168,8 +3295,8 @@ rtw8822c_phy_cck_pd_set_reg(struct rtw_dev *rtwdev,
static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- s8 pd_lvl[4] = {2, 4, 6, 8};
- s8 cs_lvl[4] = {2, 2, 2, 4};
+ s8 pd_lvl[CCK_PD_LV_MAX] = {0, 2, 4, 6, 8};
+ s8 cs_lvl[CCK_PD_LV_MAX] = {0, 2, 2, 2, 4};
u8 cur_lvl;
u8 nrx, bw;
@@ -3191,6 +3318,87 @@ static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
dm_info->cck_pd_lv[bw][nrx] = new_lvl;
}
+#define PWR_TRACK_MASK 0x7f
+static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ rtw_write32_mask(rtwdev, 0x18a0, PWR_TRACK_MASK,
+ dm_info->delta_power_index[rf_path]);
+ break;
+ case RF_PATH_B:
+ rtw_write32_mask(rtwdev, 0x41a0, PWR_TRACK_MASK,
+ dm_info->delta_power_index[rf_path]);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 thermal_value, delta;
+
+ if (rtwdev->efuse.thermal_meter[path] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
+
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
+
+ dm_info->delta_power_index[path] =
+ rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
+ delta);
+
+ rtw8822c_pwrtrack_set(rtwdev, path);
+}
+
+static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_swing_table swing_table;
+ u8 i;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+ rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
+
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8822c_do_iqk(rtwdev);
+}
+
+static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0)
+ return;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x00);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x01);
+
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x00);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x01);
+
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ __rtw8822c_pwr_track(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -3571,6 +3779,10 @@ static struct rtw_chip_ops rtw8822c_ops = {
.dpk_track = rtw8822c_dpk_track,
.phy_calibration = rtw8822c_phy_calibration,
.cck_pd_set = rtw8822c_phy_cck_pd_set,
+ .pwr_track = rtw8822c_pwr_track,
+ .config_bfee = rtw8822c_bf_config_bfee,
+ .set_gid_table = rtw_bf_set_gid_table,
+ .cfg_csi_rate = rtw_bf_cfg_csi_rate,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -3725,6 +3937,129 @@ static const struct coex_rf_para rf_para_rx_8822c[] = {
static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
+static const u8
+rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+};
+
+static const u8 rtw8822c_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 4, 5, 6, 7, 8,
+ 9, 9, 10, 11, 12, 13, 14, 15, 15, 16,
+ 17, 18, 19, 20, 20, 21, 22, 23, 24, 25
+};
+
+static const u8 rtw8822c_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28
+};
+
+static const u8 rtw8822c_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6,
+ 7, 8, 8, 9, 9, 10, 11, 11, 12, 13,
+ 13, 14, 15, 15, 16, 17, 17, 18, 19, 19
+};
+
+static const u8 rtw8822c_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 25, 26, 27
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 5, 6, 7, 8,
+ 9, 10, 11, 11, 12, 13, 14, 15, 16, 17,
+ 17, 18, 19, 20, 21, 22, 23, 23, 24, 25
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7,
+ 8, 9, 9, 10, 11, 12, 12, 13, 14, 15,
+ 15, 16, 17, 18, 18, 19, 20, 21, 21, 22
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 5, 6, 7, 8,
+ 9, 10, 11, 11, 12, 13, 14, 15, 16, 17,
+ 18, 18, 19, 20, 21, 22, 23, 24, 24, 25
+};
+
+static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gb_n = rtw8822c_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8822c_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8822c_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8822c_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8822c_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8822c_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8822c_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
+};
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -3747,6 +4082,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.dig_min = 0x20,
.ht_supported = true,
.vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK) | BIT(LPS_DEEP_MODE_PG),
.sys_func_en = 0xD8,
.pwr_on_seq = card_enable_flow_8822c,
.pwr_off_seq = card_disable_flow_8822c,
@@ -3765,6 +4101,10 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
.en_dis_dpd = true,
.dpd_ratemask = DIS_DPD_RATEALL,
+ .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
+ .iqk_threshold = 8,
+ .bfer_su_max_num = 2,
+ .bfer_mu_max_num = 1,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 438db74d8e7a..abd9f300bedd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -149,6 +149,18 @@ const struct rtw_table name ## _tbl = { \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index e2dd4c766077..d102a2c27757 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -1762,53 +1762,53 @@ static const u32 rtw8822c_bb[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb);
-static const u32 rtw8822c_bb_pg_type0[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044
+static const struct rtw_phy_pg_cfg_pair rtw8822c_bb_pg_type0[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
};
RTW_DECL_TABLE_BB_PG(rtw8822c_bb_pg_type0);
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 48b9ed49b79a..9b90339ab697 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -5,6 +5,7 @@
#include "main.h"
#include "rx.h"
#include "ps.h"
+#include "debug.h"
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb)
@@ -25,8 +26,6 @@ void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.rx_unicast += skb->len;
rtwvif->stats.rx_cnt++;
- if (rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
- rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
@@ -39,6 +38,60 @@ struct rtw_rx_addr_match_data {
u8 *bssid;
};
+static void rtw_rx_phy_stat(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_pkt_count *cur_pkt_cnt = &dm_info->cur_pkt_count;
+ u8 rate_ss, rate_ss_evm, evm_id;
+ u8 i, idx;
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ cur_pkt_cnt->num_bcn_pkt++;
+
+ switch (pkt_stat->rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ goto pkt_num;
+ case DESC_RATE6M...DESC_RATE54M:
+ rate_ss = 0;
+ rate_ss_evm = 1;
+ evm_id = RTW_EVM_OFDM;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS7:
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT1SS_MCS9:
+ rate_ss = 1;
+ rate_ss_evm = 1;
+ evm_id = RTW_EVM_1SS;
+ break;
+ case DESC_RATEMCS8...DESC_RATEMCS15:
+ case DESC_RATEVHT2SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ rate_ss = 2;
+ rate_ss_evm = 2;
+ evm_id = RTW_EVM_2SS_A;
+ break;
+ default:
+ rtw_warn(rtwdev, "unknown pkt rate = %d\n", pkt_stat->rate);
+ return;
+ }
+
+ for (i = 0; i < rate_ss_evm; i++) {
+ idx = evm_id + i;
+ ewma_evm_add(&dm_info->ewma_evm[idx],
+ dm_info->rx_evm_dbm[i]);
+ }
+
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++) {
+ idx = RTW_SNR_OFDM_A + 4 * rate_ss + i;
+ ewma_snr_add(&dm_info->ewma_snr[idx],
+ dm_info->rx_snr[i]);
+ }
+pkt_num:
+ cur_pkt_cnt->num_qry_pkt[pkt_stat->rate]++;
+}
+
static void rtw_rx_addr_match_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -50,14 +103,16 @@ static void rtw_rx_addr_match_iter(void *data, u8 *mac,
struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
u8 *bssid = iter_data->bssid;
- if (ether_addr_equal(vif->bss_conf.bssid, bssid) &&
- (ether_addr_equal(vif->addr, hdr->addr1) ||
- ieee80211_is_beacon(hdr->frame_control)))
- sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
- vif->addr);
- else
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
return;
+ if (!(ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return;
+
+ rtw_rx_phy_stat(rtwdev, pkt_stat, hdr);
+ sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
+ vif->addr);
if (!sta)
return;
@@ -105,35 +160,17 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
else if (pkt_stat->rate >= DESC_RATEMCS0)
rx_status->encoding = RX_ENC_HT;
- if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT1SS_MCS9) {
- rx_status->nss = 1;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT1SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT2SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT2SS_MCS9) {
- rx_status->nss = 2;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT2SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT3SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT3SS_MCS9) {
- rx_status->nss = 3;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT3SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT4SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT4SS_MCS9) {
- rx_status->nss = 4;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT4SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEMCS0 &&
- pkt_stat->rate <= DESC_RATEMCS15) {
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEMCS0;
- } else if (rx_status->band == NL80211_BAND_5GHZ &&
- pkt_stat->rate >= DESC_RATE6M &&
- pkt_stat->rate <= DESC_RATE54M) {
+ if (rx_status->band == NL80211_BAND_5GHZ &&
+ pkt_stat->rate >= DESC_RATE6M &&
+ pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE6M;
} else if (rx_status->band == NL80211_BAND_2GHZ &&
pkt_stat->rate >= DESC_RATE1M &&
pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE1M;
- } else {
- rx_status->rate_idx = 0;
+ } else if (pkt_stat->rate >= DESC_RATEMCS0) {
+ rtw_desc_to_mcsrate(pkt_stat->rate, &rx_status->rate_idx,
+ &rx_status->nss);
}
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
index 383f3b2babc1..3342e3761281 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.h
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -5,6 +5,15 @@
#ifndef __RTW_RX_H_
#define __RTW_RX_H_
+enum rtw_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
#define GET_RX_DESC_PHYST(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(26))
#define GET_RX_DESC_ICV_ERR(rxdesc) \
@@ -21,6 +30,8 @@
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(19, 16))
#define GET_RX_DESC_SHIFT(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(25, 24))
+#define GET_RX_DESC_ENC_TYPE(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(22, 20))
#define GET_RX_DESC_RX_RATE(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x03), GENMASK(6, 0))
#define GET_RX_DESC_MACID(rxdesc) \
diff --git a/drivers/net/wireless/realtek/rtw88/sec.c b/drivers/net/wireless/realtek/rtw88/sec.c
index c594fc02804d..d0d7fbb10d58 100644
--- a/drivers/net/wireless/realtek/rtw88/sec.c
+++ b/drivers/net/wireless/realtek/rtw88/sec.c
@@ -96,6 +96,27 @@ void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
}
+u8 rtw_sec_cam_pg_backup(struct rtw_dev *rtwdev, u8 *used_cam)
+{
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+ u8 offset = 0;
+ u8 count, n;
+
+ if (!used_cam)
+ return 0;
+
+ for (count = 0; count < MAX_PG_CAM_BACKUP_NUM; count++) {
+ n = find_next_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM, offset);
+ if (n == RTW_MAX_SEC_CAM_NUM)
+ break;
+
+ used_cam[count] = n;
+ offset = n + 1;
+ }
+
+ return count;
+}
+
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev)
{
struct rtw_sec_desc *sec = &rtwdev->sec;
diff --git a/drivers/net/wireless/realtek/rtw88/sec.h b/drivers/net/wireless/realtek/rtw88/sec.h
index 8c50a895c797..efcf45433999 100644
--- a/drivers/net/wireless/realtek/rtw88/sec.h
+++ b/drivers/net/wireless/realtek/rtw88/sec.h
@@ -34,6 +34,7 @@ void rtw_sec_write_cam(struct rtw_dev *rtwdev,
void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
struct rtw_sec_desc *sec,
u8 hw_key_idx);
+u8 rtw_sec_cam_pg_backup(struct rtw_dev *rtwdev, u8 *used_cam);
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 8eaa9809ca44..24c39c60c99a 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -27,8 +27,6 @@ void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.tx_unicast += skb->len;
rtwvif->stats.tx_cnt++;
- if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD)
- rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
@@ -58,6 +56,7 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
+ SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
@@ -260,6 +259,9 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
ampdu_density = get_tx_ampdu_density(sta);
}
+ if (info->control.use_rts)
+ pkt_info->rts = true;
+
if (sta->vht_cap.vht_supported)
rate = get_highest_vht_tx_rate(rtwdev, sta);
else if (sta->ht_cap.ht_supported)
@@ -365,3 +367,132 @@ void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
pkt_info->qsel = TX_DESC_QSEL_MGMT;
pkt_info->ls = true;
}
+
+void rtw_tx(struct rtw_dev *rtwdev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw_tx_pkt_info pkt_info = {0};
+
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
+ if (rtw_hci_tx(rtwdev, &pkt_info, skb))
+ goto out;
+
+ return;
+
+out:
+ ieee80211_free_txskb(rtwdev->hw, skb);
+}
+
+static void rtw_txq_check_agg(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct ieee80211_tx_info *info;
+ struct rtw_sta_info *si;
+
+ if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) {
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ return;
+ }
+
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
+ if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags))
+ return;
+
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return;
+
+ if (!txq->sta)
+ return;
+
+ si = (struct rtw_sta_info *)txq->sta->drv_priv;
+ set_bit(txq->tid, si->tid_ba);
+
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work);
+}
+
+static bool rtw_txq_dequeue(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct ieee80211_tx_control control;
+ struct sk_buff *skb;
+
+ skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
+ if (!skb)
+ return false;
+
+ rtw_txq_check_agg(rtwdev, rtwtxq, skb);
+
+ control.sta = txq->sta;
+ rtw_tx(rtwdev, &control, skb);
+ rtwtxq->last_push = jiffies;
+
+ return true;
+}
+
+static void rtw_txq_push(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ unsigned long frames)
+{
+ int i;
+
+ rcu_read_lock();
+
+ for (i = 0; i < frames; i++)
+ if (!rtw_txq_dequeue(rtwdev, rtwtxq))
+ break;
+
+ rcu_read_unlock();
+}
+
+void rtw_tx_tasklet(unsigned long data)
+{
+ struct rtw_dev *rtwdev = (void *)data;
+ struct rtw_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->txq_lock);
+
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) {
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ rtw_txq_push(rtwdev, rtwtxq, frame_cnt);
+
+ list_del_init(&rtwtxq->list);
+ }
+
+ spin_unlock_bh(&rtwdev->txq_lock);
+}
+
+void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
+{
+ struct rtw_txq *rtwtxq;
+
+ if (!txq)
+ return;
+
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ INIT_LIST_HEAD(&rtwtxq->list);
+}
+
+void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
+{
+ struct rtw_txq *rtwtxq;
+
+ if (!txq)
+ return;
+
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ spin_lock_bh(&rtwdev->txq_lock);
+ if (!list_empty(&rtwtxq->list))
+ list_del_init(&rtwtxq->list);
+ spin_unlock_bh(&rtwdev->txq_lock);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 8338dbf55576..9ca4f74a501b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -35,6 +35,8 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
+#define SET_TX_DESC_USE_RTS(tx_desc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(12))
#define SET_TX_DESC_AMPDU_DENSITY(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, GENMASK(22, 20))
#define SET_TX_DESC_DATA_STBC(txdesc, value) \
@@ -75,6 +77,12 @@ enum rtw_tx_desc_queue_select {
TX_DESC_QSEL_H2C = 19,
};
+void rtw_tx(struct rtw_dev *rtwdev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
+void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
+void rtw_tx_tasklet(unsigned long data);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_tx_control *control,
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index 212070c2baa8..10f1117c0cfb 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -70,3 +70,30 @@ void rtw_restore_reg(struct rtw_dev *rtwdev,
}
}
}
+
+void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+{
+ if (rate <= DESC_RATE54M)
+ return;
+
+ if (rate >= DESC_RATEVHT1SS_MCS0 &&
+ rate <= DESC_RATEVHT1SS_MCS9) {
+ *nss = 1;
+ *mcs = rate - DESC_RATEVHT1SS_MCS0;
+ } else if (rate >= DESC_RATEVHT2SS_MCS0 &&
+ rate <= DESC_RATEVHT2SS_MCS9) {
+ *nss = 2;
+ *mcs = rate - DESC_RATEVHT2SS_MCS0;
+ } else if (rate >= DESC_RATEVHT3SS_MCS0 &&
+ rate <= DESC_RATEVHT3SS_MCS9) {
+ *nss = 3;
+ *mcs = rate - DESC_RATEVHT3SS_MCS0;
+ } else if (rate >= DESC_RATEVHT4SS_MCS0 &&
+ rate <= DESC_RATEVHT4SS_MCS9) {
+ *nss = 4;
+ *mcs = rate - DESC_RATEVHT4SS_MCS0;
+ } else if (rate >= DESC_RATEMCS0 &&
+ rate <= DESC_RATEMCS15) {
+ *mcs = rate - DESC_RATEMCS0;
+ }
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index ce5e92d82efc..440088293aff 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1140,8 +1140,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
else if ((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO))
rsta->seq_start[tid] = seq_no;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- status = 0;
+ status = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 6c7f26ef6476..9cc8a335d519 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1756,6 +1756,7 @@ static int rsi_send_beacon(struct rsi_common *common)
skb_pull(skb, (64 - dword_align_bytes));
if (rsi_prepare_beacon(common, skb)) {
rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n");
+ dev_kfree_skb(skb);
return -EINVAL;
}
skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb);
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 760eaffeebd6..53f41fc2cadf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -785,10 +785,10 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
- if (id && id->idProduct == RSI_USB_PID_9113) {
+ if (id->idProduct == RSI_USB_PID_9113) {
rsi_dbg(INIT_ZONE, "%s: 9113 module detected\n", __func__);
adapter->device_model = RSI_DEV_9113;
- } else if (id && id->idProduct == RSI_USB_PID_9116) {
+ } else if (id->idProduct == RSI_USB_PID_9116) {
rsi_dbg(INIT_ZONE, "%s: 9116 module detected\n", __func__);
adapter->device_model = RSI_DEV_9116;
} else {
diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c
index 6574e78e05ea..2a03dc533b6a 100644
--- a/drivers/net/wireless/st/cw1200/fwio.c
+++ b/drivers/net/wireless/st/cw1200/fwio.c
@@ -320,12 +320,12 @@ int cw1200_load_firmware(struct cw1200_common *priv)
goto out;
}
- priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
- if (priv->hw_type < 0) {
+ ret = cw1200_get_hw_type(val32, &major_revision);
+ if (ret < 0) {
pr_err("Can't deduce hardware type.\n");
- ret = -ENOTSUPP;
goto out;
}
+ priv->hw_type = ret;
/* Set DPLL Reg value, and read back to confirm writes work */
ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 14133eedb3b6..12952b1c29df 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -79,10 +79,9 @@ static void cw1200_queue_register_post_gc(struct list_head *gc_list,
struct cw1200_queue_item *item)
{
struct cw1200_queue_item *gc_item;
- gc_item = kmalloc(sizeof(struct cw1200_queue_item),
+ gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
GFP_ATOMIC);
BUG_ON(!gc_item);
- memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
list_add_tail(&gc_item->head, gc_list);
}
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index c46b044b7f7b..988581cc134b 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -120,8 +120,7 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- if (frame.skb)
- dev_kfree_skb(frame.skb);
+ dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 677f1146ccf0..94569cd695c8 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -16,17 +16,12 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include "wl1251.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x104c
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1251
-#define SDIO_DEVICE_ID_TI_WL1251 0x9066
-#endif
-
struct wl1251_sdio {
struct sdio_func *func;
u32 elp_val;
@@ -49,7 +44,7 @@ static void wl1251_sdio_interrupt(struct sdio_func *func)
}
static const struct sdio_device_id wl1251_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1251) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
@@ -217,6 +212,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
const struct wl1251_platform_data *wl1251_board_data;
+ struct device_node *np = func->dev.of_node;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -248,6 +244,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl->power_gpio = wl1251_board_data->power_gpio;
wl->irq = wl1251_board_data->irq;
wl->use_eeprom = wl1251_board_data->use_eeprom;
+ } else if (np) {
+ wl->use_eeprom = of_property_read_bool(np,
+ "ti,wl1251-has-eeprom");
+ wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ wl->irq = of_irq_get(np, 0);
+
+ if (wl->power_gpio == -EPROBE_DEFER ||
+ wl->irq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto disable;
+ }
}
if (gpio_is_valid(wl->power_gpio)) {
diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig
index e409042ee9a0..9c4511604b67 100644
--- a/drivers/net/wireless/ti/wl12xx/Kconfig
+++ b/drivers/net/wireless/ti/wl12xx/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config WL12XX
- tristate "TI wl12xx support"
+ tristate "TI wl12xx support"
depends on MAC80211
- select WLCORE
- ---help---
+ select WLCORE
+ ---help---
This module adds support for wireless adapters based on TI wl1271,
wl1273, wl1281 and wl1283 chipsets. This module does *not* include
support for wl1251. For wl1251 support, use the separate homonymous
- driver instead.
+ driver instead.
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 547ad538d8b6..e994995d79ab 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -544,11 +544,6 @@ static int wlcore_irq_locked(struct wl1271 *wl)
}
while (!done && loopcount--) {
- /*
- * In order to avoid a race with the hardirq, clear the flag
- * before acknowledging the chip.
- */
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_atomic();
ret = wlcore_fw_status(wl, wl->fw_status);
@@ -668,7 +663,7 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
disable_irq_nosync(wl->irq);
pm_wakeup_event(wl->dev, 0);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
+ goto out_handled;
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -692,6 +687,11 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
mutex_unlock(&wl->mutex);
+out_handled:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
return IRQ_HANDLED;
}
@@ -1434,7 +1434,7 @@ int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
field = &filter->fields[filter->num_fields];
- field->pattern = kzalloc(len, GFP_KERNEL);
+ field->pattern = kmemdup(pattern, len, GFP_KERNEL);
if (!field->pattern) {
wl1271_warning("Failed to allocate RX filter pattern");
return -ENOMEM;
@@ -1445,7 +1445,6 @@ int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
field->offset = cpu_to_le16(offset);
field->flags = flags;
field->len = len;
- memcpy(field->pattern, pattern, len);
return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 7afaf35f2453..9fd8cf2d270c 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -26,14 +26,6 @@
#include "wl12xx_80211.h"
#include "io.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
static bool dump = false;
struct wl12xx_sdio_glue {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index d4c09e54fd63..18c4d998ce4b 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -186,7 +186,7 @@ static void wl12xx_spi_init(struct device *child)
spi_sync(to_spi_device(glue->dev), &m);
- /* Restore chip select configration to normal */
+ /* Restore chip select configuration to normal */
spi->mode ^= SPI_CS_HIGH;
kfree(cmd);
}
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 7997cc6de334..01305ba2d3aa 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -450,7 +450,6 @@ static void virt_wifi_net_device_destructor(struct net_device *dev)
*/
kfree(dev->ieee80211_ptr);
dev->ieee80211_ptr = NULL;
- free_netdev(dev);
}
/* No lock interaction. */
@@ -458,7 +457,7 @@ static void virt_wifi_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &virt_wifi_ops;
- dev->priv_destructor = virt_wifi_net_device_destructor;
+ dev->needs_free_netdev = true;
}
/* Called in a RCU read critical section from netif_receive_skb */
@@ -544,6 +543,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
goto unregister_netdev;
}
+ dev->priv_destructor = virt_wifi_net_device_destructor;
priv->being_deleted = false;
priv->is_connected = false;
priv->is_up = false;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 103ed00775eb..68dd7bb07ca6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -626,6 +626,38 @@ err:
return err;
}
+static void xenvif_disconnect_queue(struct xenvif_queue *queue)
+{
+ if (queue->tx_irq) {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ if (queue->tx_irq == queue->rx_irq)
+ queue->rx_irq = 0;
+ queue->tx_irq = 0;
+ }
+
+ if (queue->rx_irq) {
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ queue->rx_irq = 0;
+ }
+
+ if (queue->task) {
+ kthread_stop(queue->task);
+ queue->task = NULL;
+ }
+
+ if (queue->dealloc_task) {
+ kthread_stop(queue->dealloc_task);
+ queue->dealloc_task = NULL;
+ }
+
+ if (queue->napi.poll) {
+ netif_napi_del(&queue->napi);
+ queue->napi.poll = NULL;
+ }
+
+ xenvif_unmap_frontend_data_rings(queue);
+}
+
int xenvif_connect_data(struct xenvif_queue *queue,
unsigned long tx_ring_ref,
unsigned long rx_ring_ref,
@@ -651,13 +683,27 @@ int xenvif_connect_data(struct xenvif_queue *queue,
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
XENVIF_NAPI_WEIGHT);
+ queue->stalled = true;
+
+ task = kthread_run(xenvif_kthread_guest_rx, queue,
+ "%s-guest-rx", queue->name);
+ if (IS_ERR(task))
+ goto kthread_err;
+ queue->task = task;
+
+ task = kthread_run(xenvif_dealloc_kthread, queue,
+ "%s-dealloc", queue->name);
+ if (IS_ERR(task))
+ goto kthread_err;
+ queue->dealloc_task = task;
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
- goto err_unmap;
+ goto err;
queue->tx_irq = queue->rx_irq = err;
disable_irq(queue->tx_irq);
} else {
@@ -668,7 +714,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
- goto err_unmap;
+ goto err;
queue->tx_irq = err;
disable_irq(queue->tx_irq);
@@ -678,47 +724,18 @@ int xenvif_connect_data(struct xenvif_queue *queue,
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
- goto err_tx_unbind;
+ goto err;
queue->rx_irq = err;
disable_irq(queue->rx_irq);
}
- queue->stalled = true;
-
- task = kthread_create(xenvif_kthread_guest_rx,
- (void *)queue, "%s-guest-rx", queue->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", queue->name);
- err = PTR_ERR(task);
- goto err_rx_unbind;
- }
- queue->task = task;
- get_task_struct(task);
-
- task = kthread_create(xenvif_dealloc_kthread,
- (void *)queue, "%s-dealloc", queue->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", queue->name);
- err = PTR_ERR(task);
- goto err_rx_unbind;
- }
- queue->dealloc_task = task;
-
- wake_up_process(queue->task);
- wake_up_process(queue->dealloc_task);
-
return 0;
-err_rx_unbind:
- unbind_from_irqhandler(queue->rx_irq, queue);
- queue->rx_irq = 0;
-err_tx_unbind:
- unbind_from_irqhandler(queue->tx_irq, queue);
- queue->tx_irq = 0;
-err_unmap:
- xenvif_unmap_frontend_data_rings(queue);
- netif_napi_del(&queue->napi);
+kthread_err:
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
+ err = PTR_ERR(task);
err:
+ xenvif_disconnect_queue(queue);
return err;
}
@@ -746,30 +763,7 @@ void xenvif_disconnect_data(struct xenvif *vif)
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
- netif_napi_del(&queue->napi);
-
- if (queue->task) {
- kthread_stop(queue->task);
- put_task_struct(queue->task);
- queue->task = NULL;
- }
-
- if (queue->dealloc_task) {
- kthread_stop(queue->dealloc_task);
- queue->dealloc_task = NULL;
- }
-
- if (queue->tx_irq) {
- if (queue->tx_irq == queue->rx_irq)
- unbind_from_irqhandler(queue->tx_irq, queue);
- else {
- unbind_from_irqhandler(queue->tx_irq, queue);
- unbind_from_irqhandler(queue->rx_irq, queue);
- }
- queue->tx_irq = 0;
- }
-
- xenvif_unmap_frontend_data_rings(queue);
+ xenvif_disconnect_queue(queue);
}
xenvif_mcast_addr_list_free(vif);
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
index 06f34fb4e0b0..ded0d03c0015 100644
--- a/drivers/nfc/nfcmrvl/Kconfig
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -15,7 +15,7 @@ config NFC_MRVL_USB
Marvell NFC-over-USB driver.
This driver provides support for Marvell NFC-over-USB devices:
- 8897.
+ 8897.
Say Y here to compile support for Marvell NFC-over-USB driver
into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 0f22379887ca..18cd96284b77 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -278,7 +278,6 @@ static struct i2c_driver nfcmrvl_i2c_driver = {
.remove = nfcmrvl_i2c_remove,
.driver = {
.name = "nfcmrvl_i2c",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_nfcmrvl_i2c_match),
},
};
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 307bd2afbe05..4d1909aecd6c 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -220,8 +220,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
if (r == -EREMOTEIO) {
phy->hard_fault = r;
- skb = NULL;
- } else if (r < 0) {
+ if (info->mode == NXP_NCI_MODE_FW)
+ nxp_nci_fw_recv_frame(phy->ndev, NULL);
+ }
+ if (r < 0) {
nfc_err(&client->dev, "Read failed with error %d\n", r);
goto exit_irq_handled;
}
diff --git a/drivers/nfc/pn533/Kconfig b/drivers/nfc/pn533/Kconfig
index f6d6b345ba0d..7fe1bbe26568 100644
--- a/drivers/nfc/pn533/Kconfig
+++ b/drivers/nfc/pn533/Kconfig
@@ -26,3 +26,14 @@ config NFC_PN533_I2C
If you choose to build a module, it'll be called pn533_i2c.
Say N if unsure.
+
+config NFC_PN532_UART
+ tristate "NFC PN532 device support (UART)"
+ depends on SERIAL_DEV_BUS
+ select NFC_PN533
+ ---help---
+ This module adds support for the NXP pn532 UART interface.
+ Select this if your platform is using the UART bus.
+
+ If you choose to build a module, it'll be called pn532_uart.
+ Say N if unsure.
diff --git a/drivers/nfc/pn533/Makefile b/drivers/nfc/pn533/Makefile
index 43c25b4f9466..b9648337576f 100644
--- a/drivers/nfc/pn533/Makefile
+++ b/drivers/nfc/pn533/Makefile
@@ -4,7 +4,9 @@
#
pn533_usb-objs = usb.o
pn533_i2c-objs = i2c.o
+pn532_uart-objs = uart.o
obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o
obj-$(CONFIG_NFC_PN533_I2C) += pn533_i2c.o
+obj-$(CONFIG_NFC_PN532_UART) += pn532_uart.o
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 1832cd921ea7..7507176cca0a 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -193,12 +193,10 @@ static int pn533_i2c_probe(struct i2c_client *client,
phy->i2c_dev = client;
i2c_set_clientdata(client, phy);
- priv = pn533_register_device(PN533_DEVICE_PN532,
- PN533_NO_TYPE_B_PROTOCOLS,
- PN533_PROTO_REQ_ACK_RESP,
- phy, &i2c_phy_ops, NULL,
- &phy->i2c_dev->dev,
- &client->dev);
+ priv = pn53x_common_init(PN533_DEVICE_PN532,
+ PN533_PROTO_REQ_ACK_RESP,
+ phy, &i2c_phy_ops, NULL,
+ &phy->i2c_dev->dev);
if (IS_ERR(priv)) {
r = PTR_ERR(priv);
@@ -206,6 +204,9 @@ static int pn533_i2c_probe(struct i2c_client *client,
}
phy->priv = priv;
+ r = pn532_i2c_nfc_alloc(priv, PN533_NO_TYPE_B_PROTOCOLS, &client->dev);
+ if (r)
+ goto nfc_alloc_err;
r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn,
IRQF_TRIGGER_FALLING |
@@ -220,13 +221,20 @@ static int pn533_i2c_probe(struct i2c_client *client,
if (r)
goto fn_setup_err;
- return 0;
+ r = nfc_register_device(priv->nfc_dev);
+ if (r)
+ goto fn_setup_err;
+
+ return r;
fn_setup_err:
free_irq(client->irq, phy);
irq_rqst_err:
- pn533_unregister_device(phy->priv);
+ nfc_free_device(priv->nfc_dev);
+
+nfc_alloc_err:
+ pn53x_common_clean(phy->priv);
return r;
}
@@ -239,12 +247,18 @@ static int pn533_i2c_remove(struct i2c_client *client)
free_irq(client->irq, phy);
- pn533_unregister_device(phy->priv);
+ pn53x_unregister_nfc(phy->priv);
+ pn53x_common_clean(phy->priv);
return 0;
}
static const struct of_device_id of_pn533_i2c_match[] = {
+ { .compatible = "nxp,pn532", },
+ /*
+ * NOTE: The use of the compatibles with the trailing "...-i2c" is
+ * deprecated and will be removed.
+ */
{ .compatible = "nxp,pn533-i2c", },
{ .compatible = "nxp,pn532-i2c", },
{},
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index a172a32aa9d9..346e084387f7 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -185,6 +185,32 @@ struct pn533_cmd_jump_dep_response {
u8 gt[];
} __packed;
+struct pn532_autopoll_resp {
+ u8 type;
+ u8 ln;
+ u8 tg;
+ u8 tgdata[];
+};
+
+/* PN532_CMD_IN_AUTOPOLL */
+#define PN532_AUTOPOLL_POLLNR_INFINITE 0xff
+#define PN532_AUTOPOLL_PERIOD 0x03 /* in units of 150 ms */
+
+#define PN532_AUTOPOLL_TYPE_GENERIC_106 0x00
+#define PN532_AUTOPOLL_TYPE_GENERIC_212 0x01
+#define PN532_AUTOPOLL_TYPE_GENERIC_424 0x02
+#define PN532_AUTOPOLL_TYPE_JEWEL 0x04
+#define PN532_AUTOPOLL_TYPE_MIFARE 0x10
+#define PN532_AUTOPOLL_TYPE_FELICA212 0x11
+#define PN532_AUTOPOLL_TYPE_FELICA424 0x12
+#define PN532_AUTOPOLL_TYPE_ISOA 0x20
+#define PN532_AUTOPOLL_TYPE_ISOB 0x23
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106 0x40
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212 0x41
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424 0x42
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106 0x80
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_212 0x81
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_424 0x82
/* PN533_TG_INIT_AS_TARGET */
#define PN533_INIT_TARGET_PASSIVE 0x1
@@ -1389,6 +1415,101 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
return rc;
}
+static int pn533_autopoll_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ struct pn532_autopoll_resp *apr;
+ struct nfc_target nfc_tgt;
+ u8 nbtg;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+
+ nfc_err(dev->dev, "%s autopoll complete error %d\n",
+ __func__, rc);
+
+ if (rc == -ENOENT) {
+ if (dev->poll_mod_count != 0)
+ return rc;
+ goto stop_poll;
+ } else if (rc < 0) {
+ nfc_err(dev->dev,
+ "Error %d when running autopoll\n", rc);
+ goto stop_poll;
+ }
+ }
+
+ nbtg = resp->data[0];
+ if ((nbtg > 2) || (nbtg <= 0))
+ return -EAGAIN;
+
+ apr = (struct pn532_autopoll_resp *)&resp->data[1];
+ while (nbtg--) {
+ memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+ switch (apr->type) {
+ case PN532_AUTOPOLL_TYPE_ISOA:
+ dev_dbg(dev->dev, "ISOA\n");
+ rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_FELICA212:
+ case PN532_AUTOPOLL_TYPE_FELICA424:
+ dev_dbg(dev->dev, "FELICA\n");
+ rc = pn533_target_found_felica(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_JEWEL:
+ dev_dbg(dev->dev, "JEWEL\n");
+ rc = pn533_target_found_jewel(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_ISOB:
+ dev_dbg(dev->dev, "ISOB\n");
+ rc = pn533_target_found_type_b(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_MIFARE:
+ dev_dbg(dev->dev, "Mifare\n");
+ rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ default:
+ nfc_err(dev->dev,
+ "Unknown current poll modulation\n");
+ rc = -EPROTO;
+ }
+
+ if (rc)
+ goto done;
+
+ if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
+ nfc_err(dev->dev,
+ "The Tg found doesn't have the desired protocol\n");
+ rc = -EAGAIN;
+ goto done;
+ }
+
+ dev->tgt_available_prots = nfc_tgt.supported_protocols;
+ apr = (struct pn532_autopoll_resp *)
+ (apr->tgdata + (apr->ln - 1));
+ }
+
+ pn533_poll_reset_mod_list(dev);
+ nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+
+done:
+ dev_kfree_skb(resp);
+ return rc;
+
+stop_poll:
+ nfc_err(dev->dev, "autopoll operation has been stopped\n");
+
+ pn533_poll_reset_mod_list(dev);
+ dev->poll_protocols = 0;
+ return rc;
+}
+
static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct sk_buff *resp)
{
@@ -1532,6 +1653,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_poll_modulations *cur_mod;
+ struct sk_buff *skb;
u8 rand_mod;
int rc;
@@ -1557,9 +1679,73 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
tm_protocols = 0;
}
- pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
dev->poll_protocols = im_protocols;
dev->listen_protocols = tm_protocols;
+ if (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL) {
+ skb = pn533_alloc_skb(dev, 4 + 6);
+ if (!skb)
+ return -ENOMEM;
+
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_POLLNR_INFINITE;
+ *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_PERIOD;
+
+ if ((im_protocols & NFC_PROTO_MIFARE_MASK) &&
+ (im_protocols & NFC_PROTO_ISO14443_MASK) &&
+ (im_protocols & NFC_PROTO_NFC_DEP_MASK))
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_GENERIC_106;
+ else {
+ if (im_protocols & NFC_PROTO_MIFARE_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_MIFARE;
+
+ if (im_protocols & NFC_PROTO_ISO14443_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_ISOA;
+
+ if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424;
+ }
+ }
+
+ if (im_protocols & NFC_PROTO_FELICA_MASK ||
+ im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_FELICA212;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_FELICA424;
+ }
+
+ if (im_protocols & NFC_PROTO_JEWEL_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_JEWEL;
+
+ if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_ISOB;
+
+ if (tm_protocols)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106;
+
+ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_AUTOPOLL, skb,
+ pn533_autopoll_complete, NULL);
+
+ if (rc < 0)
+ dev_kfree_skb(skb);
+ else
+ dev->poll_mod_count++;
+
+ return rc;
+ }
+
+ pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
/* Do not always start polling from the same modulation */
get_random_bytes(&rand_mod, sizeof(rand_mod));
@@ -2457,9 +2643,17 @@ static int pn532_sam_configuration(struct nfc_dev *nfc_dev)
static int pn533_dev_up(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ if (dev->phy_ops->dev_up) {
+ rc = dev->phy_ops->dev_up(dev);
+ if (rc)
+ return rc;
+ }
- if (dev->device_type == PN533_DEVICE_PN532) {
- int rc = pn532_sam_configuration(nfc_dev);
+ if ((dev->device_type == PN533_DEVICE_PN532) ||
+ (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL)) {
+ rc = pn532_sam_configuration(nfc_dev);
if (rc)
return rc;
@@ -2470,7 +2664,14 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
static int pn533_dev_down(struct nfc_dev *nfc_dev)
{
- return pn533_rf_field(nfc_dev, 0);
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ int ret;
+
+ ret = pn533_rf_field(nfc_dev, 0);
+ if (dev->phy_ops->dev_down && !ret)
+ ret = dev->phy_ops->dev_down(dev);
+
+ return ret;
}
static struct nfc_ops pn533_nfc_ops = {
@@ -2498,6 +2699,7 @@ static int pn533_setup(struct pn533 *dev)
case PN533_DEVICE_PASORI:
case PN533_DEVICE_ACR122U:
case PN533_DEVICE_PN532:
+ case PN533_DEVICE_PN532_AUTOPOLL:
max_retries.mx_rty_atr = 0x2;
max_retries.mx_rty_psl = 0x1;
max_retries.mx_rty_passive_act =
@@ -2534,6 +2736,7 @@ static int pn533_setup(struct pn533 *dev)
switch (dev->device_type) {
case PN533_DEVICE_STD:
case PN533_DEVICE_PN532:
+ case PN533_DEVICE_PN532_AUTOPOLL:
break;
case PN533_DEVICE_PASORI:
@@ -2580,14 +2783,12 @@ int pn533_finalize_setup(struct pn533 *dev)
}
EXPORT_SYMBOL_GPL(pn533_finalize_setup);
-struct pn533 *pn533_register_device(u32 device_type,
- u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
- struct device *dev,
- struct device *parent)
+ struct device *dev)
{
struct pn533 *priv;
int rc = -ENOMEM;
@@ -2628,43 +2829,18 @@ struct pn533 *pn533_register_device(u32 device_type,
skb_queue_head_init(&priv->fragment_skb);
INIT_LIST_HEAD(&priv->cmd_queue);
-
- priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
- priv->ops->tx_header_len +
- PN533_CMD_DATAEXCH_HEAD_LEN,
- priv->ops->tx_tail_len);
- if (!priv->nfc_dev) {
- rc = -ENOMEM;
- goto destroy_wq;
- }
-
- nfc_set_parent_dev(priv->nfc_dev, parent);
- nfc_set_drvdata(priv->nfc_dev, priv);
-
- rc = nfc_register_device(priv->nfc_dev);
- if (rc)
- goto free_nfc_dev;
-
return priv;
-free_nfc_dev:
- nfc_free_device(priv->nfc_dev);
-
-destroy_wq:
- destroy_workqueue(priv->wq);
error:
kfree(priv);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(pn533_register_device);
+EXPORT_SYMBOL_GPL(pn53x_common_init);
-void pn533_unregister_device(struct pn533 *priv)
+void pn53x_common_clean(struct pn533 *priv)
{
struct pn533_cmd *cmd, *n;
- nfc_unregister_device(priv->nfc_dev);
- nfc_free_device(priv->nfc_dev);
-
flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq);
@@ -2679,8 +2855,47 @@ void pn533_unregister_device(struct pn533 *priv)
kfree(priv);
}
-EXPORT_SYMBOL_GPL(pn533_unregister_device);
+EXPORT_SYMBOL_GPL(pn53x_common_clean);
+
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+ struct device *parent)
+{
+ priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+ priv->ops->tx_header_len +
+ PN533_CMD_DATAEXCH_HEAD_LEN,
+ priv->ops->tx_tail_len);
+ if (!priv->nfc_dev)
+ return -ENOMEM;
+
+ nfc_set_parent_dev(priv->nfc_dev, parent);
+ nfc_set_drvdata(priv->nfc_dev, priv);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pn532_i2c_nfc_alloc);
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+ struct device *parent)
+{
+ int rc;
+
+ rc = pn532_i2c_nfc_alloc(priv, protocols, parent);
+ if (rc)
+ return rc;
+
+ rc = nfc_register_device(priv->nfc_dev);
+ if (rc)
+ nfc_free_device(priv->nfc_dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pn53x_register_nfc);
+
+void pn53x_unregister_nfc(struct pn533 *priv)
+{
+ nfc_unregister_device(priv->nfc_dev);
+ nfc_free_device(priv->nfc_dev);
+}
+EXPORT_SYMBOL_GPL(pn53x_unregister_nfc);
MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
index 8bf9d6ece0f5..5f94f38a2a08 100644
--- a/drivers/nfc/pn533/pn533.h
+++ b/drivers/nfc/pn533/pn533.h
@@ -6,10 +6,11 @@
* Copyright (C) 2012-2013 Tieto Poland
*/
-#define PN533_DEVICE_STD 0x1
-#define PN533_DEVICE_PASORI 0x2
-#define PN533_DEVICE_ACR122U 0x3
-#define PN533_DEVICE_PN532 0x4
+#define PN533_DEVICE_STD 0x1
+#define PN533_DEVICE_PASORI 0x2
+#define PN533_DEVICE_ACR122U 0x3
+#define PN533_DEVICE_PN532 0x4
+#define PN533_DEVICE_PN532_AUTOPOLL 0x5
#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
@@ -43,6 +44,11 @@
/* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */
#define PN533_STD_FRAME_ACK_SIZE 6
+/*
+ * Preamble (1), SoPC (2), Packet Length (1), Packet Length Checksum (1),
+ * Specific Application Level Error Code (1) , Postamble (1)
+ */
+#define PN533_STD_ERROR_FRAME_SIZE 8
#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
/* Half start code (3), LEN (4) should be 0xffff for extended frame */
@@ -70,6 +76,7 @@
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+#define PN533_CMD_IN_AUTOPOLL 0x60
#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
#define PN533_CMD_TG_GET_DATA 0x86
@@ -84,6 +91,9 @@
#define PN533_CMD_MI_MASK 0x40
#define PN533_CMD_RET_SUCCESS 0x00
+#define PN533_FRAME_DATALEN_ACK 0x00
+#define PN533_FRAME_DATALEN_ERROR 0x01
+#define PN533_FRAME_DATALEN_EXTENDED 0xFF
enum pn533_protocol_type {
PN533_PROTO_REQ_ACK_RESP = 0,
@@ -207,21 +217,33 @@ struct pn533_phy_ops {
struct sk_buff *out);
int (*send_ack)(struct pn533 *dev, gfp_t flags);
void (*abort_cmd)(struct pn533 *priv, gfp_t flags);
+ /*
+ * dev_up and dev_down are optional.
+ * They are used to inform the phy layer that the nfc chip
+ * is going to be really used very soon. The phy layer can then
+ * bring up it's interface to the chip and have it suspended for power
+ * saving reasons otherwise.
+ */
+ int (*dev_up)(struct pn533 *priv);
+ int (*dev_down)(struct pn533 *priv);
};
-struct pn533 *pn533_register_device(u32 device_type,
- u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
- struct device *dev,
- struct device *parent);
+ struct device *dev);
int pn533_finalize_setup(struct pn533 *dev);
-void pn533_unregister_device(struct pn533 *priv);
+void pn53x_common_clean(struct pn533 *priv);
void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status);
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+ struct device *parent);
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+ struct device *parent);
+void pn53x_unregister_nfc(struct pn533 *priv);
bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame);
bool pn533_rx_frame_is_ack(void *_frame);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
new file mode 100644
index 000000000000..a0665d8ea85b
--- /dev/null
+++ b/drivers/nfc/pn533/uart.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for NXP PN532 NFC Chip - UART transport layer
+ *
+ * Copyright (C) 2018 Lemonage Software GmbH
+ * Author: Lars Pöschel <poeschel@lemonage.de>
+ * All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include "pn533.h"
+
+#define PN532_UART_SKB_BUFF_LEN (PN533_CMD_DATAEXCH_DATA_MAXLEN * 2)
+
+enum send_wakeup {
+ PN532_SEND_NO_WAKEUP = 0,
+ PN532_SEND_WAKEUP,
+ PN532_SEND_LAST_WAKEUP,
+};
+
+
+struct pn532_uart_phy {
+ struct serdev_device *serdev;
+ struct sk_buff *recv_skb;
+ struct pn533 *priv;
+ /*
+ * send_wakeup variable is used to control if we need to send a wakeup
+ * request to the pn532 chip prior to our actual command. There is a
+ * little propability of a race condition. We decided to not mutex the
+ * variable as the worst that could happen is, that we send a wakeup
+ * to the chip that is already awake. This does not hurt. It is a
+ * no-op to the chip.
+ */
+ enum send_wakeup send_wakeup;
+ struct timer_list cmd_timeout;
+ struct sk_buff *cur_out_buf;
+};
+
+static int pn532_uart_send_frame(struct pn533 *dev,
+ struct sk_buff *out)
+{
+ /* wakeup sequence and dummy bytes for waiting time */
+ static const u8 wakeup[] = {
+ 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int err;
+
+ print_hex_dump_debug("PN532_uart TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ pn532->cur_out_buf = out;
+ if (pn532->send_wakeup) {
+ err = serdev_device_write(pn532->serdev,
+ wakeup, sizeof(wakeup),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+ }
+
+ if (pn532->send_wakeup == PN532_SEND_LAST_WAKEUP)
+ pn532->send_wakeup = PN532_SEND_NO_WAKEUP;
+
+ err = serdev_device_write(pn532->serdev, out->data, out->len,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ mod_timer(&pn532->cmd_timeout, HZ / 40 + jiffies);
+ return 0;
+}
+
+static int pn532_uart_send_ack(struct pn533 *dev, gfp_t flags)
+{
+ /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
+ static const u8 ack[PN533_STD_FRAME_ACK_SIZE] = {
+ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int err;
+
+ err = serdev_device_write(pn532->serdev, ack, sizeof(ack),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void pn532_uart_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+ /* An ack will cancel the last issued command */
+ pn532_uart_send_ack(dev, flags);
+ /* schedule cmd_complete_work to finish current command execution */
+ pn533_recv_frame(dev, NULL, -ENOENT);
+}
+
+static int pn532_dev_up(struct pn533 *dev)
+{
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int ret = 0;
+
+ ret = serdev_device_open(pn532->serdev);
+ if (ret)
+ return ret;
+
+ pn532->send_wakeup = PN532_SEND_LAST_WAKEUP;
+ return ret;
+}
+
+static int pn532_dev_down(struct pn533 *dev)
+{
+ struct pn532_uart_phy *pn532 = dev->phy;
+
+ serdev_device_close(pn532->serdev);
+ pn532->send_wakeup = PN532_SEND_WAKEUP;
+
+ return 0;
+}
+
+static struct pn533_phy_ops uart_phy_ops = {
+ .send_frame = pn532_uart_send_frame,
+ .send_ack = pn532_uart_send_ack,
+ .abort_cmd = pn532_uart_abort_cmd,
+ .dev_up = pn532_dev_up,
+ .dev_down = pn532_dev_down,
+};
+
+static void pn532_cmd_timeout(struct timer_list *t)
+{
+ struct pn532_uart_phy *dev = from_timer(dev, t, cmd_timeout);
+
+ pn532_uart_send_frame(dev->priv, dev->cur_out_buf);
+}
+
+/*
+ * scans the buffer if it contains a pn532 frame. It is not checked if the
+ * frame is really valid. This is later done with pn533_rx_frame_is_valid.
+ * This is useful for malformed or errornous transmitted frames. Adjusts the
+ * bufferposition where the frame starts, since pn533_recv_frame expects a
+ * well formed frame.
+ */
+static int pn532_uart_rx_is_frame(struct sk_buff *skb)
+{
+ struct pn533_std_frame *std;
+ struct pn533_ext_frame *ext;
+ u16 frame_len;
+ int i;
+
+ for (i = 0; i + PN533_STD_FRAME_ACK_SIZE <= skb->len; i++) {
+ std = (struct pn533_std_frame *)&skb->data[i];
+ /* search start code */
+ if (std->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+ continue;
+
+ /* frame type */
+ switch (std->datalen) {
+ case PN533_FRAME_DATALEN_ACK:
+ if (std->datalen_checksum == 0xff) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ case PN533_FRAME_DATALEN_ERROR:
+ if ((std->datalen_checksum == 0xff) &&
+ (skb->len >=
+ PN533_STD_ERROR_FRAME_SIZE)) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ case PN533_FRAME_DATALEN_EXTENDED:
+ ext = (struct pn533_ext_frame *)&skb->data[i];
+ frame_len = be16_to_cpu(ext->datalen);
+ if (skb->len >= frame_len +
+ sizeof(struct pn533_ext_frame) +
+ 2 /* CKS + Postamble */) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ default: /* normal information frame */
+ frame_len = std->datalen;
+ if (skb->len >= frame_len +
+ sizeof(struct pn533_std_frame) +
+ 2 /* CKS + Postamble */) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int pn532_receive_buf(struct serdev_device *serdev,
+ const unsigned char *data, size_t count)
+{
+ struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
+ size_t i;
+
+ del_timer(&dev->cmd_timeout);
+ for (i = 0; i < count; i++) {
+ skb_put_u8(dev->recv_skb, *data++);
+ if (!pn532_uart_rx_is_frame(dev->recv_skb))
+ continue;
+
+ pn533_recv_frame(dev->priv, dev->recv_skb, 0);
+ dev->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!dev->recv_skb)
+ return 0;
+ }
+
+ return i;
+}
+
+static struct serdev_device_ops pn532_serdev_ops = {
+ .receive_buf = pn532_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static const struct of_device_id pn532_uart_of_match[] = {
+ { .compatible = "nxp,pn532", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pn532_uart_of_match);
+
+static int pn532_uart_probe(struct serdev_device *serdev)
+{
+ struct pn532_uart_phy *pn532;
+ struct pn533 *priv;
+ int err;
+
+ err = -ENOMEM;
+ pn532 = kzalloc(sizeof(*pn532), GFP_KERNEL);
+ if (!pn532)
+ goto err_exit;
+
+ pn532->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!pn532->recv_skb)
+ goto err_free;
+
+ pn532->serdev = serdev;
+ serdev_device_set_drvdata(serdev, pn532);
+ serdev_device_set_client_ops(serdev, &pn532_serdev_ops);
+ err = serdev_device_open(serdev);
+ if (err) {
+ dev_err(&serdev->dev, "Unable to open device\n");
+ goto err_skb;
+ }
+
+ err = serdev_device_set_baudrate(serdev, 115200);
+ if (err != 115200) {
+ err = -EINVAL;
+ goto err_serdev;
+ }
+
+ serdev_device_set_flow_control(serdev, false);
+ pn532->send_wakeup = PN532_SEND_WAKEUP;
+ timer_setup(&pn532->cmd_timeout, pn532_cmd_timeout, 0);
+ priv = pn53x_common_init(PN533_DEVICE_PN532_AUTOPOLL,
+ PN533_PROTO_REQ_ACK_RESP,
+ pn532, &uart_phy_ops, NULL,
+ &pn532->serdev->dev);
+ if (IS_ERR(priv)) {
+ err = PTR_ERR(priv);
+ goto err_serdev;
+ }
+
+ pn532->priv = priv;
+ err = pn533_finalize_setup(pn532->priv);
+ if (err)
+ goto err_clean;
+
+ serdev_device_close(serdev);
+ err = pn53x_register_nfc(priv, PN533_NO_TYPE_B_PROTOCOLS, &serdev->dev);
+ if (err) {
+ pn53x_common_clean(pn532->priv);
+ goto err_skb;
+ }
+
+ return err;
+
+err_clean:
+ pn53x_common_clean(pn532->priv);
+err_serdev:
+ serdev_device_close(serdev);
+err_skb:
+ kfree_skb(pn532->recv_skb);
+err_free:
+ kfree(pn532);
+err_exit:
+ return err;
+}
+
+static void pn532_uart_remove(struct serdev_device *serdev)
+{
+ struct pn532_uart_phy *pn532 = serdev_device_get_drvdata(serdev);
+
+ pn53x_unregister_nfc(pn532->priv);
+ serdev_device_close(serdev);
+ pn53x_common_clean(pn532->priv);
+ kfree_skb(pn532->recv_skb);
+ kfree(pn532);
+}
+
+static struct serdev_device_driver pn532_uart_driver = {
+ .probe = pn532_uart_probe,
+ .remove = pn532_uart_remove,
+ .driver = {
+ .name = "pn532_uart",
+ .of_match_table = of_match_ptr(pn532_uart_of_match),
+ },
+};
+
+module_serdev_device_driver(pn532_uart_driver);
+
+MODULE_AUTHOR("Lars Pöschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("PN532 UART driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index e897e4d768ef..4590fbf82dc2 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -534,9 +534,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
goto error;
}
- priv = pn533_register_device(id->driver_info, protocols, protocol_type,
+ priv = pn53x_common_init(id->driver_info, protocol_type,
phy, &usb_phy_ops, fops,
- &phy->udev->dev, &interface->dev);
+ &phy->udev->dev);
if (IS_ERR(priv)) {
rc = PTR_ERR(priv);
@@ -547,14 +547,17 @@ static int pn533_usb_probe(struct usb_interface *interface,
rc = pn533_finalize_setup(priv);
if (rc)
- goto err_deregister;
+ goto err_clean;
usb_set_intfdata(interface, phy);
+ rc = pn53x_register_nfc(priv, protocols, &interface->dev);
+ if (rc)
+ goto err_clean;
return 0;
-err_deregister:
- pn533_unregister_device(phy->priv);
+err_clean:
+ pn53x_common_clean(priv);
error:
usb_kill_urb(phy->in_urb);
usb_kill_urb(phy->out_urb);
@@ -577,7 +580,8 @@ static void pn533_usb_disconnect(struct usb_interface *interface)
if (!phy)
return;
- pn533_unregister_device(phy->priv);
+ pn53x_unregister_nfc(phy->priv);
+ pn53x_common_clean(phy->priv);
usb_set_intfdata(interface, NULL);
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 145ddf3f0a45..604dba4f18af 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -783,7 +783,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc)
- usb_unlink_urb(dev->out_urb);
+ usb_kill_urb(dev->out_urb);
exit:
mutex_unlock(&dev->out_urb_lock);
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index e4f7fa00862d..b4eb926d220a 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -279,7 +279,6 @@ MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match);
static struct i2c_driver s3fwrn5_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = S3FWRN5_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
},
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 65865e460ab8..04dd46647db3 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -354,13 +354,10 @@ static void pp_clear_ctx(struct pp_ctx *pp)
static void pp_setup_dbgfs(struct pp_ctx *pp)
{
struct pci_dev *pdev = pp->ntb->pdev;
- void *ret;
pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir);
- ret = debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
- if (!ret)
- dev_warn(&pp->ntb->dev, "DebugFS unsupported\n");
+ debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
}
static void pp_clear_dbgfs(struct pp_ctx *pp)
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index bb0d63a44f41..f70ba58dbc55 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -163,7 +163,7 @@ unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
unsigned int len)
{
- unsigned char *t = (unsigned char *)dest;
+ unsigned char *t = dest;
unsigned char *p = nubus_dirptr(dirent);
while (len) {
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 36af7af6b7cf..b7d1eb38b27d 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -4,6 +4,7 @@ menuconfig LIBNVDIMM
depends on PHYS_ADDR_T_64BIT
depends on HAS_IOMEM
depends on BLK_DEV
+ select MEMREGION
help
Generic support for non-volatile memory devices including
ACPI-6-NFIT defined resources. On platforms that define an
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 9204f1e9fd14..e592c4964674 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -455,7 +455,6 @@ static __exit void libnvdimm_exit(void)
nd_region_exit();
nvdimm_exit();
nvdimm_bus_exit();
- nd_region_devs_exit();
nvdimm_devs_exit();
}
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 25fa121104d0..aa059439fca0 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -114,7 +114,6 @@ struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
void nvdimm_devs_exit(void);
-void nd_region_devs_exit(void);
struct nd_region;
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
void nd_region_create_ns_seed(struct nd_region *nd_region);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ef423ba1a711..fbf34cf688f4 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -3,6 +3,7 @@
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/scatterlist.h>
+#include <linux/memregion.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -19,7 +20,6 @@
*/
#include <linux/io-64-nonatomic-hi-lo.h>
-static DEFINE_IDA(region_ida);
static DEFINE_PER_CPU(int, flush_idx);
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
@@ -133,7 +133,7 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- ida_simple_remove(&region_ida, nd_region->id);
+ memregion_free(nd_region->id);
if (is_nd_blk(dev))
kfree(to_nd_blk_region(dev));
else
@@ -985,7 +985,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!region_buf)
return NULL;
- nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
+ nd_region->id = memregion_alloc(GFP_KERNEL);
if (nd_region->id < 0)
goto err_id;
@@ -1044,7 +1044,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
err_percpu:
- ida_simple_remove(&region_ida, nd_region->id);
+ memregion_free(nd_region->id);
err_id:
kfree(region_buf);
return NULL;
@@ -1216,8 +1216,3 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}
-
-void __exit nd_region_devs_exit(void)
-{
- ida_destroy(&region_ida);
-}
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 2b36f052bfb9..c6439638a419 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -23,6 +23,16 @@ config NVME_MULTIPATH
/dev/nvmeXnY device will show up for each NVMe namespaces,
even if it is accessible through multiple controllers.
+config NVME_HWMON
+ bool "NVMe hardware monitoring"
+ depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
+ help
+ This provides support for NVMe hardware monitoring. If enabled,
+ a hardware monitoring device will be created for each NVMe drive
+ in the system.
+
+ If unsure, say N.
+
config NVME_FABRICS
tristate
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 8a4b671c5f0c..fc7b26be692d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -14,6 +14,7 @@ nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
+nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fa7ba09dca77..8e8527408db3 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -283,6 +283,8 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+
if (nvme_req(req)->ctrl->kas)
nvme_req(req)->ctrl->comp_seen = true;
@@ -313,7 +315,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
if (blk_mq_request_completed(req))
return true;
- nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(req);
return true;
}
@@ -611,8 +613,14 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_dsm_range *range;
struct bio *bio;
- range = kmalloc_array(segments, sizeof(*range),
- GFP_ATOMIC | __GFP_NOWARN);
+ /*
+ * Some devices do not consider the DSM 'Number of Ranges' field when
+ * determining how much data to DMA. Always allocate memory for maximum
+ * number of segments to prevent device reading beyond end of buffer.
+ */
+ static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
+
+ range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
if (!range) {
/*
* If we fail allocation our range, fallback to the controller
@@ -626,7 +634,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
}
__rq_for_each_bio(bio, req) {
- u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
+ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
if (n < segments) {
@@ -652,7 +660,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = sizeof(*range) * segments;
+ req->special_vec.bv_len = alloc_size;
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return BLK_STS_OK;
@@ -667,7 +675,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->write_zeroes.slba =
- cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->write_zeroes.control = 0;
@@ -691,7 +699,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
@@ -1647,7 +1655,7 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
static void nvme_set_chunk_size(struct nvme_ns *ns)
{
- u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
+ u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob);
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
@@ -1684,8 +1692,7 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
{
- u32 max_sectors;
- unsigned short bs = 1 << ns->lba_shift;
+ u64 max_blocks;
if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
(ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
@@ -1701,11 +1708,12 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
* nvme_init_identify() if available.
*/
if (ns->ctrl->max_hw_sectors == UINT_MAX)
- max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9;
+ max_blocks = (u64)USHRT_MAX + 1;
else
- max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
+ max_blocks = ns->ctrl->max_hw_sectors + 1;
- blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
+ blk_queue_max_write_zeroes_sectors(disk->queue,
+ nvme_lba_to_sect(ns, max_blocks));
}
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@@ -1748,7 +1756,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
- sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
+ sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
unsigned short bs = 1 << ns->lba_shift;
u32 atomic_bs, phys_bs, io_opt;
@@ -2796,6 +2804,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->mtfa = le16_to_cpu(id->mtfa);
ctrl->oaes = le32_to_cpu(id->oaes);
+ ctrl->wctemp = le16_to_cpu(id->wctemp);
+ ctrl->cctemp = le16_to_cpu(id->cctemp);
+
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
if (id->mdts)
@@ -2895,6 +2906,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
+ if (!ctrl->identified)
+ nvme_hwmon_init(ctrl);
+
ctrl->identified = true;
return 0;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 265f89e11d8b..679a721ae229 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1224,7 +1224,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
lsreq->rqstlen = sizeof(*assoc_rqst);
lsreq->rspaddr = assoc_acc;
lsreq->rsplen = sizeof(*assoc_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1264,7 +1264,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create Association LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
ctrl->association_id =
@@ -1332,7 +1332,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
lsreq->rqstlen = sizeof(*conn_rqst);
lsreq->rspaddr = conn_acc;
lsreq->rsplen = sizeof(*conn_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1363,7 +1363,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create I/O Connection LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
queue->connection_id =
@@ -1376,7 +1376,7 @@ out_free_buffer:
out_no_memory:
if (ret)
dev_err(ctrl->dev,
- "queue %d connect command failed (%d).\n",
+ "queue %d connect I/O queue failed (%d).\n",
queue->qnum, ret);
return ret;
}
@@ -1413,8 +1413,8 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
static void
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
{
- struct fcnvme_ls_disconnect_rqst *discon_rqst;
- struct fcnvme_ls_disconnect_acc *discon_acc;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
struct nvmefc_ls_req_op *lsop;
struct nvmefc_ls_req *lsreq;
int ret;
@@ -1430,11 +1430,11 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
lsreq = &lsop->ls_req;
lsreq->private = (void *)&lsop[1];
- discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)
(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
- discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
- discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
discon_rqst->desc_list_len = cpu_to_be32(
sizeof(struct fcnvme_lsdesc_assoc_id) +
sizeof(struct fcnvme_lsdesc_disconn_cmd));
@@ -1451,22 +1451,17 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
discon_rqst->discon_cmd.desc_len =
fcnvme_lsdesc_len(
sizeof(struct fcnvme_lsdesc_disconn_cmd));
- discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
- discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
lsreq->rqstaddr = discon_rqst;
lsreq->rqstlen = sizeof(*discon_rqst);
lsreq->rspaddr = discon_acc;
lsreq->rsplen = sizeof(*discon_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
nvme_fc_disconnect_assoc_done);
if (ret)
kfree(lsop);
-
- /* only meaningful part to terminating the association */
- ctrl->association_id = 0;
}
@@ -1662,7 +1657,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
(freq->rcv_rsplen / 4) ||
be32_to_cpu(op->rsp_iu.xfrd_len) !=
freq->transferred_length ||
- op->rsp_iu.status_code ||
+ op->rsp_iu.ersp_result ||
sqe->common.command_id != cqe->command_id)) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
@@ -1672,7 +1667,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
be32_to_cpu(op->rsp_iu.xfrd_len),
freq->transferred_length,
- op->rsp_iu.status_code,
+ op->rsp_iu.ersp_result,
sqe->common.command_id,
cqe->command_id);
goto done;
@@ -1731,9 +1726,14 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
op->rq = rq;
op->rqno = rqno;
- cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->format_id = NVME_CMD_FORMAT_ID;
cmdiu->fc_id = NVME_CMD_FC_ID;
cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+ if (queue->qnum)
+ cmdiu->rsv_cat = fccmnd_set_cat_css(0,
+ (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
+ else
+ cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
@@ -2173,8 +2173,6 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
rq_dma_dir(rq));
- nvme_cleanup_cmd(rq);
-
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
freq->sg_cnt = 0;
@@ -2305,6 +2303,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
@@ -2695,7 +2694,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
/* warn if maxcmd is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl maxcmd %u, reducing "
- "to queue_size\n",
+ "to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
@@ -2703,7 +2702,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ "queue_size %zu > ctrl sqsize %u, reducing "
+ "to sqsize\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
@@ -2739,6 +2739,7 @@ out_term_aen_ops:
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
+ ctrl->association_id = 0;
out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
@@ -2830,6 +2831,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->association_id)
nvme_fc_xmt_disconnect_assoc(ctrl);
+ ctrl->association_id = 0;
+
if (ctrl->ctrl.tagset) {
nvme_fc_delete_hw_io_queues(ctrl);
nvme_fc_free_io_queues(ctrl);
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
new file mode 100644
index 000000000000..a5af21f5d370
--- /dev/null
+++ b/drivers/nvme/host/hwmon.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express hardware monitoring support
+ * Copyright (c) 2019, Guenter Roeck
+ */
+
+#include <linux/hwmon.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+/* These macros should be moved to linux/temperature.h */
+#define MILLICELSIUS_TO_KELVIN(t) DIV_ROUND_CLOSEST((t) + 273150, 1000)
+#define KELVIN_TO_MILLICELSIUS(t) ((t) * 1000L - 273150)
+
+struct nvme_hwmon_data {
+ struct nvme_ctrl *ctrl;
+ struct nvme_smart_log log;
+ struct mutex read_lock;
+};
+
+static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long *temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ u32 status;
+ int ret;
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ &status);
+ if (ret > 0)
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ *temp = KELVIN_TO_MILLICELSIUS(status & NVME_TEMP_THRESH_MASK);
+
+ return 0;
+}
+
+static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ int ret;
+
+ temp = MILLICELSIUS_TO_KELVIN(temp);
+ threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_set_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ NULL);
+ if (ret > 0)
+ return -EIO;
+
+ return ret;
+}
+
+static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
+{
+ int ret;
+
+ ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
+ &data->log, sizeof(data->log), 0);
+
+ return ret <= 0 ? ret : -EIO;
+}
+
+static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+ struct nvme_smart_log *log = &data->log;
+ int temp;
+ int err;
+
+ /*
+ * First handle attributes which don't require us to read
+ * the smart log.
+ */
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_get_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_get_temp_thresh(data->ctrl, channel, true, val);
+ case hwmon_temp_crit:
+ *val = KELVIN_TO_MILLICELSIUS(data->ctrl->cctemp);
+ return 0;
+ default:
+ break;
+ }
+
+ mutex_lock(&data->read_lock);
+ err = nvme_hwmon_get_smart_log(data);
+ if (err)
+ goto unlock;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!channel)
+ temp = get_unaligned_le16(log->temperature);
+ else
+ temp = le16_to_cpu(log->temp_sensor[channel - 1]);
+ *val = KELVIN_TO_MILLICELSIUS(temp);
+ break;
+ case hwmon_temp_alarm:
+ *val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+unlock:
+ mutex_unlock(&data->read_lock);
+ return err;
+}
+
+static int nvme_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_set_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_set_temp_thresh(data->ctrl, channel, true, val);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const char * const nvme_hwmon_sensor_names[] = {
+ "Composite",
+ "Sensor 1",
+ "Sensor 2",
+ "Sensor 3",
+ "Sensor 4",
+ "Sensor 5",
+ "Sensor 6",
+ "Sensor 7",
+ "Sensor 8",
+};
+
+static int nvme_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ *str = nvme_hwmon_sensor_names[channel];
+ return 0;
+}
+
+static umode_t nvme_hwmon_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct nvme_hwmon_data *data = _data;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ if (!channel && data->ctrl->cctemp)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ if ((!channel && data->ctrl->wctemp) ||
+ (channel && data->log.temp_sensor[channel - 1])) {
+ if (data->ctrl->quirks &
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
+ return 0444;
+ return 0644;
+ }
+ break;
+ case hwmon_temp_alarm:
+ if (!channel)
+ return 0444;
+ break;
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ if (!channel || data->log.temp_sensor[channel - 1])
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *nvme_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_CRIT | HWMON_T_LABEL | HWMON_T_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops nvme_hwmon_ops = {
+ .is_visible = nvme_hwmon_is_visible,
+ .read = nvme_hwmon_read,
+ .read_string = nvme_hwmon_read_string,
+ .write = nvme_hwmon_write,
+};
+
+static const struct hwmon_chip_info nvme_hwmon_chip_info = {
+ .ops = &nvme_hwmon_ops,
+ .info = nvme_hwmon_info,
+};
+
+void nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ struct nvme_hwmon_data *data;
+ struct device *hwmon;
+ int err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ data->ctrl = ctrl;
+ mutex_init(&data->read_lock);
+
+ err = nvme_hwmon_get_smart_log(data);
+ if (err) {
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ devm_kfree(dev, data);
+ return;
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
+ &nvme_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ devm_kfree(dev, data);
+ }
+}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index e0f064dcbd02..797c18337d96 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -95,6 +95,7 @@ void nvme_failover_req(struct request *req)
}
break;
case NVME_SC_HOST_PATH_ERROR:
+ case NVME_SC_HOST_ABORTED_CMD:
/*
* Temporary transport disruption in talking to the controller.
* Try to send on a new path.
@@ -446,8 +447,14 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
struct nvme_ana_group_desc *desc = base + offset;
- u32 nr_nsids = le32_to_cpu(desc->nnsids);
- size_t nsid_buf_size = nr_nsids * sizeof(__le32);
+ u32 nr_nsids;
+ size_t nsid_buf_size;
+
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
+ return -EINVAL;
+
+ nr_nsids = le32_to_cpu(desc->nnsids);
+ nsid_buf_size = nr_nsids * sizeof(__le32);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
@@ -467,8 +474,6 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return error;
offset += nsid_buf_size;
- if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
- return -EINVAL;
}
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 22e8401352c2..3b9cbe0668fa 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -115,6 +115,11 @@ enum nvme_quirks {
* Prevent tag overlap between queues
*/
NVME_QUIRK_SHARED_TAGS = (1 << 13),
+
+ /*
+ * Don't change the value of the temperature threshold feature
+ */
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
};
/*
@@ -231,6 +236,8 @@ struct nvme_ctrl {
u16 kas;
u8 npss;
u8 apsta;
+ u16 wctemp;
+ u16 cctemp;
u32 oaes;
u32 aen_result;
u32 ctratt;
@@ -419,9 +426,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
}
-static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
+/*
+ * Convert a 512B sector number to a device logical block number.
+ */
+static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
+{
+ return sector >> (ns->lba_shift - SECTOR_SHIFT);
+}
+
+/*
+ * Convert a device logical block number to a 512B sector number.
+ */
+static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
{
- return (sector >> (ns->lba_shift - 9));
+ return lba << (ns->lba_shift - SECTOR_SHIFT);
}
static inline void nvme_end_request(struct request *req, __le16 status,
@@ -446,6 +464,11 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
put_device(ctrl->device);
}
+static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+{
+ return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+}
+
void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
@@ -652,4 +675,10 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
return dev_to_disk(dev)->private_data;
}
+#ifdef CONFIG_NVME_HWMON
+void nvme_hwmon_init(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
+#endif
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 869f462e6b6e..dcaad5831cee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -925,7 +925,6 @@ static void nvme_pci_complete_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_dev *dev = iod->nvmeq->dev;
- nvme_cleanup_cmd(req);
if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
@@ -968,8 +967,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvmeq->qid == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
@@ -2982,7 +2980,7 @@ static int nvme_suspend(struct device *dev)
/*
* Clearing npss forces a controller reset on resume. The
- * correct value will be resdicovered then.
+ * correct value will be rediscovered then.
*/
ret = nvme_disable_prepare_reset(ndev, true);
ctrl->npss = 0;
@@ -3082,7 +3080,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
- NVME_QUIRK_MEDIUM_PRIO_SQ },
+ NVME_QUIRK_MEDIUM_PRIO_SQ |
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE },
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cb4c3000a57e..dce59459ed41 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1160,8 +1160,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
}
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-
- nvme_cleanup_cmd(rq);
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
}
@@ -1501,8 +1499,8 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
@@ -1768,7 +1766,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(err < 0)) {
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", err);
- nvme_cleanup_cmd(rq);
goto err;
}
@@ -1779,18 +1776,19 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr ? &req->reg_wr.wr : NULL);
- if (unlikely(err)) {
- nvme_rdma_unmap_data(queue, rq);
- goto err;
- }
+ if (unlikely(err))
+ goto err_unmap;
return BLK_STS_OK;
+err_unmap:
+ nvme_rdma_unmap_data(queue, rq);
err:
if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
+ nvme_cleanup_cmd(rq);
unmap_qe:
ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7544be84ab35..6d43b23a0fc8 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -491,8 +491,8 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 831a062d27cb..56c21b501185 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -31,7 +31,7 @@ u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{
- nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
+ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
}
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
@@ -134,7 +134,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
u16 status = NVME_SC_INTERNAL;
unsigned long flags;
- if (req->data_len != sizeof(*log))
+ if (req->transfer_len != sizeof(*log))
goto out;
log = kzalloc(sizeof(*log), GFP_KERNEL);
@@ -196,7 +196,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
u16 status = NVME_SC_INTERNAL;
size_t len;
- if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
+ if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
goto out;
mutex_lock(&ctrl->lock);
@@ -206,7 +206,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
len = ctrl->nr_changed_ns * sizeof(__le32);
status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
if (!status)
- status = nvmet_zero_sgl(req, len, req->data_len - len);
+ status = nvmet_zero_sgl(req, len, req->transfer_len - len);
ctrl->nr_changed_ns = 0;
nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
mutex_unlock(&ctrl->lock);
@@ -282,6 +282,36 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
+ return;
+
+ switch (req->cmd->get_log_page.lid) {
+ case NVME_LOG_ERROR:
+ return nvmet_execute_get_log_page_error(req);
+ case NVME_LOG_SMART:
+ return nvmet_execute_get_log_page_smart(req);
+ case NVME_LOG_FW_SLOT:
+ /*
+ * We only support a single firmware slot which always is
+ * active, so we can zero out the whole firmware slot log and
+ * still claim to fully implement this mandatory log page.
+ */
+ return nvmet_execute_get_log_page_noop(req);
+ case NVME_LOG_CHANGED_NS:
+ return nvmet_execute_get_log_changed_ns(req);
+ case NVME_LOG_CMD_EFFECTS:
+ return nvmet_execute_get_log_cmd_effects_ns(req);
+ case NVME_LOG_ANA:
+ return nvmet_execute_get_log_page_ana(req);
+ }
+ pr_err("unhandled lid %d on qid %d\n",
+ req->cmd->get_log_page.lid, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -565,6 +595,28 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_identify(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_NS:
+ return nvmet_execute_identify_ns(req);
+ case NVME_ID_CNS_CTRL:
+ return nvmet_execute_identify_ctrl(req);
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
+ return nvmet_execute_identify_nslist(req);
+ case NVME_ID_CNS_NS_DESC_LIST:
+ return nvmet_execute_identify_desclist(req);
+ }
+
+ pr_err("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
/*
* A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
@@ -574,6 +626,8 @@ out:
*/
static void nvmet_execute_abort(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
nvmet_set_result(req, 1);
nvmet_req_complete(req, 0);
}
@@ -658,6 +712,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
nvmet_set_result(req,
@@ -721,6 +778,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
/*
* These features are mandatory in the spec, but we don't
@@ -785,6 +845,9 @@ void nvmet_execute_async_event(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
mutex_lock(&ctrl->lock);
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
mutex_unlock(&ctrl->lock);
@@ -801,6 +864,9 @@ void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
@@ -813,77 +879,36 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_cmd(req);
+ if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ return nvmet_parse_discovery_cmd(req);
+
ret = nvmet_check_ctrl_status(req, cmd);
if (unlikely(ret))
return ret;
switch (cmd->common.opcode) {
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_ERROR:
- req->execute = nvmet_execute_get_log_page_error;
- return 0;
- case NVME_LOG_SMART:
- req->execute = nvmet_execute_get_log_page_smart;
- return 0;
- case NVME_LOG_FW_SLOT:
- /*
- * We only support a single firmware slot which always
- * is active, so we can zero out the whole firmware slot
- * log and still claim to fully implement this mandatory
- * log page.
- */
- req->execute = nvmet_execute_get_log_page_noop;
- return 0;
- case NVME_LOG_CHANGED_NS:
- req->execute = nvmet_execute_get_log_changed_ns;
- return 0;
- case NVME_LOG_CMD_EFFECTS:
- req->execute = nvmet_execute_get_log_cmd_effects_ns;
- return 0;
- case NVME_LOG_ANA:
- req->execute = nvmet_execute_get_log_page_ana;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_NS:
- req->execute = nvmet_execute_identify_ns;
- return 0;
- case NVME_ID_CNS_CTRL:
- req->execute = nvmet_execute_identify_ctrl;
- return 0;
- case NVME_ID_CNS_NS_ACTIVE_LIST:
- req->execute = nvmet_execute_identify_nslist;
- return 0;
- case NVME_ID_CNS_NS_DESC_LIST:
- req->execute = nvmet_execute_identify_desclist;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_identify;
+ return 0;
case nvme_admin_abort_cmd:
req->execute = nvmet_execute_abort;
- req->data_len = 0;
return 0;
case nvme_admin_set_features:
req->execute = nvmet_execute_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 3a67e244e568..28438b833c1b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -892,14 +892,10 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
}
if (unlikely(!req->sq->ctrl))
- /* will return an error for any Non-connect command: */
+ /* will return an error for any non-connect command: */
status = nvmet_parse_connect_cmd(req);
else if (likely(req->sq->qid != 0))
status = nvmet_parse_io_cmd(req);
- else if (nvme_is_fabrics(req->cmd))
- status = nvmet_parse_fabrics_cmd(req);
- else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
- status = nvmet_parse_discovery_cmd(req);
else
status = nvmet_parse_admin_cmd(req);
@@ -930,15 +926,17 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
-void nvmet_req_execute(struct nvmet_req *req)
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
{
- if (unlikely(req->data_len != req->transfer_len)) {
+ if (unlikely(data_len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
- } else
- req->execute(req);
+ return false;
+ }
+
+ return true;
}
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
+EXPORT_SYMBOL_GPL(nvmet_check_data_len);
int nvmet_req_alloc_sgl(struct nvmet_req *req)
{
@@ -966,7 +964,7 @@ int nvmet_req_alloc_sgl(struct nvmet_req *req)
}
req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
- if (!req->sg)
+ if (unlikely(!req->sg))
return -ENOMEM;
return 0;
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 3764a8900850..0c2274b21e15 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -157,7 +157,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
return entries;
}
-static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
{
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -171,6 +171,16 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
u16 status = 0;
void *buffer;
+ if (!nvmet_check_data_len(req, data_len))
+ return;
+
+ if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lid);
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ goto out;
+ }
+
/* Spec requires dword aligned offsets */
if (offset & 0x3) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
@@ -227,20 +237,35 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+static void nvmet_execute_disc_identify(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
+ const char model[] = "Linux";
u16 status = 0;
+ if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ goto out;
+ }
+
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
goto out;
}
+ memset(id->sn, ' ', sizeof(id->sn));
+ bin2hex(id->sn, &ctrl->subsys->serial,
+ min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
memset(id->fr, ' ', sizeof(id->fr));
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->fr, sizeof(id->fr),
+ UTS_RELEASE, strlen(UTS_RELEASE), ' ');
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -273,6 +298,9 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
stat = nvmet_set_feat_kato(req);
@@ -296,6 +324,9 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
nvmet_get_feat_kato(req);
@@ -328,47 +359,22 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) {
case nvme_admin_set_features:
req->execute = nvmet_execute_disc_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_disc_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_DISC:
- req->execute = nvmet_execute_get_disc_log_page;
- return 0;
- default:
- pr_err("unsupported get_log_page lid %d\n",
- cmd->get_log_page.lid);
- req->error_loc =
- offsetof(struct nvme_get_log_page_command, lid);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_CTRL:
- req->execute =
- nvmet_execute_identify_disc_ctrl;
- return 0;
- default:
- pr_err("unsupported identify cns %d\n",
- cmd->identify.cns);
- req->error_loc = offsetof(struct nvme_identify, cns);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_identify;
+ return 0;
default:
pr_err("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d16b55ffe79f..f7297473d9eb 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -12,6 +12,9 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
u64 val = le64_to_cpu(req->cmd->prop_set.value);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
if (req->cmd->prop_set.attrib & 1) {
req->error_loc =
offsetof(struct nvmf_property_set_command, attrib);
@@ -38,6 +41,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
u16 status = 0;
u64 val = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
if (req->cmd->prop_get.attrib & 1) {
switch (le32_to_cpu(req->cmd->prop_get.offset)) {
case NVME_REG_CAP:
@@ -82,11 +88,9 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
switch (cmd->fabrics.fctype) {
case nvme_fabrics_type_property_set:
- req->data_len = 0;
req->execute = nvmet_execute_prop_set;
break;
case nvme_fabrics_type_property_get:
- req->data_len = 0;
req->execute = nvmet_execute_prop_get;
break;
default:
@@ -147,6 +151,9 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -211,6 +218,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
u16 qid = le16_to_cpu(c->qid);
u16 status = 0;
+ if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -281,7 +291,6 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
- req->data_len = sizeof(struct nvmf_connect_data);
if (cmd->connect.qid == 0)
req->execute = nvmet_execute_admin_connect;
else
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ce8d819f86cc..a0db6371b43e 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1495,20 +1495,20 @@ static void
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_disconnect_rqst *rqst =
- (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
- struct fcnvme_ls_disconnect_acc *acc =
- (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf;
struct nvmet_fc_tgt_assoc *assoc;
int ret = 0;
memset(acc, 0, sizeof(*acc));
- if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
ret = VERR_DISCONN_LEN;
else if (rqst->desc_list_len !=
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_rqst)))
+ sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
ret = VERR_DISCONN_RQST_LEN;
else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
ret = VERR_ASSOC_ID;
@@ -1523,8 +1523,11 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_lsdesc_disconn_cmd)))
ret = VERR_DISCONN_CMD_LEN;
- else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
- (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
+ /*
+ * As the standard changed on the LS, check if old format and scope
+ * something other than Association (e.g. 0).
+ */
+ else if (rqst->discon_cmd.rsvd8[0])
ret = VERR_DISCONN_SCOPE;
else {
/* match an active association */
@@ -1556,8 +1559,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_acc)),
- FCNVME_LS_DISCONNECT);
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
/* release get taken in nvmet_fc_find_target_assoc */
nvmet_fc_tgt_a_put(iod->assoc);
@@ -1632,7 +1635,7 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
/* Creates an IO Queue/Connection */
nvmet_fc_ls_create_connection(tgtport, iod);
break;
- case FCNVME_LS_DISCONNECT:
+ case FCNVME_LS_DISCONNECT_ASSOC:
/* Terminate a Queue/Connection or the Association */
nvmet_fc_ls_disconnect(tgtport, iod);
break;
@@ -2015,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
/* data transfer complete, resume with nvmet layer */
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
break;
case NVMET_FCOP_READDATA:
@@ -2231,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
* can invoke the nvmet_layer now. If read data, cmd completion will
* push the data
*/
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
return;
transport_error:
@@ -2299,7 +2302,7 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
- (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+ (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
(cmdiu->fc_id != NVME_CMD_FC_ID) ||
(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
return -EIO;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 32008d85172b..b6fca0e421ef 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -147,8 +147,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
int sg_cnt = req->sg_cnt;
struct bio *bio;
struct scatterlist *sg;
+ struct blk_plug plug;
sector_t sector;
- int op, op_flags = 0, i;
+ int op, i;
+
+ if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ return;
if (!req->sg_cnt) {
nvmet_req_complete(req, 0);
@@ -156,21 +160,20 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE;
- op_flags = REQ_SYNC | REQ_IDLE;
+ op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op_flags |= REQ_FUA;
+ op |= REQ_FUA;
} else {
op = REQ_OP_READ;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op_flags |= REQ_NOMERGE;
+ op |= REQ_NOMERGE;
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
@@ -180,8 +183,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
+ blk_start_plug(&plug);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
@@ -190,7 +194,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -201,12 +205,16 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
submit_bio(bio);
+ blk_finish_plug(&plug);
}
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
@@ -261,12 +269,10 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- if (status) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- } else {
+ if (status)
+ bio_io_error(bio);
+ else
submit_bio(bio);
- }
} else {
nvmet_req_complete(req, status);
}
@@ -274,6 +280,9 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ return;
+
switch (le32_to_cpu(req->cmd->dsm.attributes)) {
case NVME_DSMGMT_AD:
nvmet_bdev_execute_discard(req);
@@ -295,6 +304,9 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
sector_t nr_sector;
int ret;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
@@ -319,20 +331,15 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
- req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_bdev_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_bdev_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 05453f5d1448..caebfce06605 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -126,7 +126,7 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
mempool_free(req->f.bvec, req->ns->bvec_pool);
}
- if (unlikely(ret != req->data_len))
+ if (unlikely(ret != req->transfer_len))
status = errno_to_nvme_status(req, ret);
nvmet_req_complete(req, status);
}
@@ -146,7 +146,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
is_sync = true;
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
- if (unlikely(pos + req->data_len > req->ns->size)) {
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
return true;
}
@@ -173,7 +173,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
nr_bvec--;
}
- if (WARN_ON_ONCE(total_len != req->data_len)) {
+ if (WARN_ON_ONCE(total_len != req->transfer_len)) {
ret = -EIO;
goto complete;
}
@@ -232,6 +232,9 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
{
ssize_t nr_bvec = req->sg_cnt;
+ if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ return;
+
if (!req->sg_cnt || !nr_bvec) {
nvmet_req_complete(req, 0);
return;
@@ -273,6 +276,8 @@ static void nvmet_file_flush_work(struct work_struct *w)
static void nvmet_file_execute_flush(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work);
}
@@ -331,6 +336,8 @@ static void nvmet_file_dsm_work(struct work_struct *w)
static void nvmet_file_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work);
}
@@ -359,6 +366,8 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work);
}
@@ -371,20 +380,15 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
- req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_file_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_file_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_file_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd for file ns %d on qid %d\n",
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 11f5aea97d1b..a758bb3d5dd4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -76,7 +76,6 @@ static void nvme_loop_complete_rq(struct request *req)
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
- nvme_cleanup_cmd(req);
sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
nvme_complete_rq(req);
}
@@ -102,8 +101,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
+ cqe->command_id))) {
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else {
@@ -126,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
struct nvme_loop_iod *iod =
container_of(work, struct nvme_loop_iod, work);
- nvmet_req_execute(&iod->req);
+ iod->req.execute(&iod->req);
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c51f8dd01dc4..46df45e837c9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -304,8 +304,6 @@ struct nvmet_req {
} f;
};
int sg_cnt;
- /* data length as parsed from the command: */
- size_t data_len;
/* data length as parsed from the SGL descriptor: */
size_t transfer_len;
@@ -375,7 +373,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
-void nvmet_req_execute(struct nvmet_req *req);
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgl(struct nvmet_req *req);
void nvmet_req_free_sgl(struct nvmet_req *req);
@@ -495,6 +493,12 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
req->ns->blksize_shift;
}
+static inline u32 nvmet_dsm_len(struct nvmet_req *req)
+{
+ return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
+ sizeof(struct nvme_dsm_range);
+}
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
/* Convert a 32-bit number to a 16-bit 0's based number */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 36d906a7f70d..37d262a65877 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- nvmet_req_execute(&rsp->req);
+ rsp->req.execute(&rsp->req);
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -672,13 +672,13 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
return 0;
ret = nvmet_req_alloc_sgl(&rsp->req);
- if (ret < 0)
+ if (unlikely(ret < 0))
goto error_out;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
nvmet_data_dir(&rsp->req));
- if (ret < 0)
+ if (unlikely(ret < 0))
goto error_out;
rsp->n_rdma += ret;
@@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
- nvmet_req_execute(&rsp->req);
+ rsp->req.execute(&rsp->req);
}
return true;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d535080b781f..af674fc0bb1e 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -320,7 +320,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
u32 len = le32_to_cpu(sgl->length);
- if (!cmd->req.data_len)
+ if (!len)
return 0;
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
@@ -813,13 +813,11 @@ free_crypto:
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
{
+ size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
int ret;
- /* recover the expected data transfer length */
- req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
-
if (!nvme_is_write(cmd->req.cmd) ||
- req->data_len > cmd->req.port->inline_data_size) {
+ data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
return;
}
@@ -932,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
goto out;
}
- nvmet_req_execute(&queue->cmd->req);
+ queue->cmd->req.execute(&queue->cmd->req);
out:
nvmet_prepare_receive_pdu(queue);
return ret;
@@ -1052,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
}
- nvmet_req_execute(&cmd->req);
+ cmd->req.execute(&cmd->req);
}
nvmet_prepare_receive_pdu(queue);
@@ -1092,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len)
- nvmet_req_execute(&cmd->req);
+ cmd->req.execute(&cmd->req);
ret = 0;
out:
nvmet_prepare_receive_pdu(queue);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index c2ec750cae6e..73567e922491 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -50,6 +50,7 @@ config NVMEM_IMX_OCOTP
config NVMEM_IMX_OCOTP_SCU
tristate "i.MX8 SCU On-Chip OTP Controller support"
depends on IMX_SCU
+ depends on HAVE_ARM_SMCCC
help
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
@@ -119,6 +120,17 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
+config ROCKCHIP_OTP
+ tristate "Rockchip OTP controller support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple drive to dump specified values of Rockchip SoC
+ from otp, such as cpu-leakage.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_rockchip_otp.
+
config NVMEM_BCM_OCOTP
tristate "Broadcom On-Chip OTP Controller support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -230,4 +242,15 @@ config NVMEM_ZYNQMP
If sure, say yes. If unsure, say no.
+config SPRD_EFUSE
+ tristate "Spreadtrum SoC eFuse Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SoCs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sprd-efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index e5c153d99a67..9e667823edb3 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -30,6 +30,8 @@ obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
+nvmem-rockchip-otp-y := rockchip-otp.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_stm32_romem-y := stm32-romem.o
obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
@@ -50,3 +52,5 @@ obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
+obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
+nvmem_sprd_efuse-y := sprd-efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 057d1ff87d5d..9f1ee9c766ec 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -76,33 +76,6 @@ static struct bus_type nvmem_bus_type = {
.name = "nvmem",
};
-static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
-{
- struct device *d;
-
- if (!nvmem_np)
- return NULL;
-
- d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
-
- if (!d)
- return NULL;
-
- return to_nvmem_device(d);
-}
-
-static struct nvmem_device *nvmem_find(const char *name)
-{
- struct device *d;
-
- d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
-
- if (!d)
- return NULL;
-
- return to_nvmem_device(d);
-}
-
static void nvmem_cell_drop(struct nvmem_cell *cell)
{
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
@@ -532,13 +505,16 @@ int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
}
EXPORT_SYMBOL(devm_nvmem_unregister);
-static struct nvmem_device *__nvmem_device_get(struct device_node *np,
- const char *nvmem_name)
+static struct nvmem_device *__nvmem_device_get(void *data,
+ int (*match)(struct device *dev, const void *data))
{
struct nvmem_device *nvmem = NULL;
+ struct device *dev;
mutex_lock(&nvmem_mutex);
- nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
+ dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
+ if (dev)
+ nvmem = to_nvmem_device(dev);
mutex_unlock(&nvmem_mutex);
if (!nvmem)
return ERR_PTR(-EPROBE_DEFER);
@@ -587,7 +563,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
if (!nvmem_np)
return ERR_PTR(-ENOENT);
- return __nvmem_device_get(nvmem_np, NULL);
+ return __nvmem_device_get(nvmem_np, device_match_of_node);
}
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
#endif
@@ -613,10 +589,26 @@ struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
}
- return __nvmem_device_get(NULL, dev_name);
+ return __nvmem_device_get((void *)dev_name, device_match_name);
}
EXPORT_SYMBOL_GPL(nvmem_device_get);
+/**
+ * nvmem_device_find() - Find nvmem device with matching function
+ *
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ return __nvmem_device_get(data, match);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_find);
+
static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
{
struct nvmem_device **nvmem = res;
@@ -710,7 +702,8 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if ((strcmp(lookup->dev_id, dev_id) == 0) &&
(strcmp(lookup->con_id, con_id) == 0)) {
/* This is the right entry. */
- nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
+ nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
+ device_match_name);
if (IS_ERR(nvmem)) {
/* Provider may not be registered yet. */
cell = ERR_CAST(nvmem);
@@ -780,7 +773,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
if (!nvmem_np)
return ERR_PTR(-EINVAL);
- nvmem = __nvmem_device_get(nvmem_np, NULL);
+ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
if (IS_ERR(nvmem))
return ERR_CAST(nvmem);
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 61a17f943f47..03f1ab23ad51 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -7,6 +7,7 @@
* Peng Fan <peng.fan@nxp.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
@@ -14,14 +15,28 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#define IMX_SIP_OTP 0xC200000A
+#define IMX_SIP_OTP_WRITE 0x2
+
enum ocotp_devtype {
IMX8QXP,
IMX8QM,
};
+#define ECC_REGION BIT(0)
+#define HOLE_REGION BIT(1)
+
+struct ocotp_region {
+ u32 start;
+ u32 end;
+ u32 flag;
+};
+
struct ocotp_devtype_data {
int devtype;
int nregs;
+ u32 num_region;
+ struct ocotp_region region[];
};
struct ocotp_priv {
@@ -35,16 +50,63 @@ struct imx_sc_msg_misc_fuse_read {
u32 word;
} __packed;
+static DEFINE_MUTEX(scu_ocotp_mutex);
+
static struct ocotp_devtype_data imx8qxp_data = {
.devtype = IMX8QXP,
.nregs = 800,
+ .num_region = 3,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x110, 0x21F, HOLE_REGION},
+ {0x220, 0x31F, ECC_REGION},
+ },
};
static struct ocotp_devtype_data imx8qm_data = {
.devtype = IMX8QM,
.nregs = 800,
+ .num_region = 2,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x1a0, 0x1ff, ECC_REGION},
+ },
};
+static bool in_hole(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & HOLE_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool in_ecc(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & ECC_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
u32 *val)
{
@@ -88,18 +150,19 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
if (!p)
return -ENOMEM;
+ mutex_lock(&scu_ocotp_mutex);
+
buf = p;
for (i = index; i < (index + count); i++) {
- if (priv->data->devtype == IMX8QXP) {
- if ((i > 271) && (i < 544)) {
- *buf++ = 0;
- continue;
- }
+ if (in_hole(context, i)) {
+ *buf++ = 0;
+ continue;
}
ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf);
if (ret) {
+ mutex_unlock(&scu_ocotp_mutex);
kfree(p);
return ret;
}
@@ -108,18 +171,63 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
memcpy(val, (u8 *)p + offset % 4, bytes);
+ mutex_unlock(&scu_ocotp_mutex);
+
kfree(p);
return 0;
}
+static int imx_scu_ocotp_write(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ struct arm_smccc_res res;
+ u32 *buf = val;
+ u32 tmp;
+ u32 index;
+ int ret;
+
+ /* allow only writing one complete OTP word at a time */
+ if ((bytes != 4) || (offset % 4))
+ return -EINVAL;
+
+ index = offset >> 2;
+
+ if (in_hole(context, index))
+ return -EINVAL;
+
+ if (in_ecc(context, index)) {
+ pr_warn("ECC region, only program once\n");
+ mutex_lock(&scu_ocotp_mutex);
+ ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp);
+ mutex_unlock(&scu_ocotp_mutex);
+ if (ret)
+ return ret;
+ if (tmp) {
+ pr_warn("ECC region, already has value: %x\n", tmp);
+ return -EIO;
+ }
+ }
+
+ mutex_lock(&scu_ocotp_mutex);
+
+ arm_smccc_smc(IMX_SIP_OTP, IMX_SIP_OTP_WRITE, index, *buf,
+ 0, 0, 0, 0, &res);
+
+ mutex_unlock(&scu_ocotp_mutex);
+
+ return res.a0;
+}
+
static struct nvmem_config imx_scu_ocotp_nvmem_config = {
.name = "imx-scu-ocotp",
- .read_only = true,
+ .read_only = false,
.word_size = 4,
.stride = 1,
.owner = THIS_MODULE,
.reg_read = imx_scu_ocotp_read,
+ .reg_write = imx_scu_ocotp_write,
};
static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index dff2f3c357f5..fc40555ca4cd 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -521,6 +521,10 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ clk_prepare_enable(priv->clk);
+ imx_ocotp_clr_err_if_set(priv->base);
+ clk_disable_unprepare(priv->clk);
+
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
new file mode 100644
index 000000000000..9f53bcce2f87
--- /dev/null
+++ b/drivers/nvmem/rockchip-otp.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip OTP Driver
+ *
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* OTP Register Offsets */
+#define OTPC_SBPI_CTRL 0x0020
+#define OTPC_SBPI_CMD_VALID_PRE 0x0024
+#define OTPC_SBPI_CS_VALID_PRE 0x0028
+#define OTPC_SBPI_STATUS 0x002C
+#define OTPC_USER_CTRL 0x0100
+#define OTPC_USER_ADDR 0x0104
+#define OTPC_USER_ENABLE 0x0108
+#define OTPC_USER_Q 0x0124
+#define OTPC_INT_STATUS 0x0304
+#define OTPC_SBPI_CMD0_OFFSET 0x1000
+#define OTPC_SBPI_CMD1_OFFSET 0x1004
+
+/* OTP Register bits and masks */
+#define OTPC_USER_ADDR_MASK GENMASK(31, 16)
+#define OTPC_USE_USER BIT(0)
+#define OTPC_USE_USER_MASK GENMASK(16, 16)
+#define OTPC_USER_FSM_ENABLE BIT(0)
+#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16)
+#define OTPC_SBPI_DONE BIT(1)
+#define OTPC_USER_DONE BIT(2)
+
+#define SBPI_DAP_ADDR 0x02
+#define SBPI_DAP_ADDR_SHIFT 8
+#define SBPI_DAP_ADDR_MASK GENMASK(31, 24)
+#define SBPI_CMD_VALID_MASK GENMASK(31, 16)
+#define SBPI_DAP_CMD_WRF 0xC0
+#define SBPI_DAP_REG_ECC 0x3A
+#define SBPI_ECC_ENABLE 0x00
+#define SBPI_ECC_DISABLE 0x09
+#define SBPI_ENABLE BIT(0)
+#define SBPI_ENABLE_MASK GENMASK(16, 16)
+
+#define OTPC_TIMEOUT 10000
+
+struct rockchip_otp {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *rst;
+};
+
+/* list of required clocks */
+static const char * const rockchip_otp_clocks[] = {
+ "otp", "apb_pclk", "phy",
+};
+
+struct rockchip_data {
+ int size;
+};
+
+static int rockchip_otp_reset(struct rockchip_otp *otp)
+{
+ int ret;
+
+ ret = reset_control_assert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to assert otp phy %d\n", ret);
+ return ret;
+ }
+
+ udelay(2);
+
+ ret = reset_control_deassert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to deassert otp phy %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag)
+{
+ u32 status = 0;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(otp->base + OTPC_INT_STATUS, status,
+ (status & flag), 1, OTPC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* clean int status */
+ writel(flag, otp->base + OTPC_INT_STATUS);
+
+ return 0;
+}
+
+static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
+{
+ int ret = 0;
+
+ writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT),
+ otp->base + OTPC_SBPI_CTRL);
+
+ writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE);
+ writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC,
+ otp->base + OTPC_SBPI_CMD0_OFFSET);
+ if (enable)
+ writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+ else
+ writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+
+ writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
+
+ ret = rockchip_otp_wait_status(otp, OTPC_SBPI_DONE);
+ if (ret < 0)
+ dev_err(otp->dev, "timeout during ecc_enable\n");
+
+ return ret;
+}
+
+static int rockchip_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ u8 *buf = val;
+ int ret = 0;
+
+ ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks);
+ if (ret < 0) {
+ dev_err(otp->dev, "failed to prepare/enable clks\n");
+ return ret;
+ }
+
+ ret = rockchip_otp_reset(otp);
+ if (ret) {
+ dev_err(otp->dev, "failed to reset otp phy\n");
+ goto disable_clks;
+ }
+
+ ret = rockchip_otp_ecc_enable(otp, false);
+ if (ret < 0) {
+ dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
+ goto disable_clks;
+ }
+
+ writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+ udelay(5);
+ while (bytes--) {
+ writel(offset++ | OTPC_USER_ADDR_MASK,
+ otp->base + OTPC_USER_ADDR);
+ writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
+ otp->base + OTPC_USER_ENABLE);
+ ret = rockchip_otp_wait_status(otp, OTPC_USER_DONE);
+ if (ret < 0) {
+ dev_err(otp->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+ *buf++ = readb(otp->base + OTPC_USER_Q);
+ }
+
+read_end:
+ writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+disable_clks:
+ clk_bulk_disable_unprepare(otp->num_clks, otp->clks);
+
+ return ret;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "rockchip-otp",
+ .owner = THIS_MODULE,
+ .read_only = true,
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = rockchip_otp_read,
+};
+
+static const struct rockchip_data px30_data = {
+ .size = 0x40,
+};
+
+static const struct of_device_id rockchip_otp_match[] = {
+ {
+ .compatible = "rockchip,px30-otp",
+ .data = (void *)&px30_data,
+ },
+ {
+ .compatible = "rockchip,rk3308-otp",
+ .data = (void *)&px30_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_otp_match);
+
+static int rockchip_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_otp *otp;
+ const struct rockchip_data *data;
+ struct nvmem_device *nvmem;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp),
+ GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp->num_clks = ARRAY_SIZE(rockchip_otp_clocks);
+ otp->clks = devm_kcalloc(dev, otp->num_clks,
+ sizeof(*otp->clks), GFP_KERNEL);
+ if (!otp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < otp->num_clks; ++i)
+ otp->clks[i].id = rockchip_otp_clocks[i];
+
+ ret = devm_clk_bulk_get(dev, otp->num_clks, otp->clks);
+ if (ret)
+ return ret;
+
+ otp->rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(otp->rst))
+ return PTR_ERR(otp->rst);
+
+ otp_config.size = data->size;
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+ nvmem = devm_nvmem_register(dev, &otp_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver rockchip_otp_driver = {
+ .probe = rockchip_otp_probe,
+ .driver = {
+ .name = "rockchip-otp",
+ .of_match_table = rockchip_otp_match,
+ },
+};
+
+module_platform_driver(rockchip_otp_driver);
+MODULE_DESCRIPTION("Rockchip OTP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index c6ee21018d80..ab5e7e0bc3d8 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -211,7 +211,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
return ret;
}
- efuse->hwlock = hwspin_lock_request_specific(ret);
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!efuse->hwlock) {
dev_err(&pdev->dev, "failed to request hwspinlock\n");
return -ENXIO;
@@ -219,7 +219,6 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
- platform_set_drvdata(pdev, efuse);
econfig.stride = 1;
econfig.word_size = 1;
@@ -232,21 +231,12 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
nvmem = devm_nvmem_register(&pdev->dev, &econfig);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev, "failed to register nvmem config\n");
- hwspin_lock_free(efuse->hwlock);
return PTR_ERR(nvmem);
}
return 0;
}
-static int sc27xx_efuse_remove(struct platform_device *pdev)
-{
- struct sc27xx_efuse *efuse = platform_get_drvdata(pdev);
-
- hwspin_lock_free(efuse->hwlock);
- return 0;
-}
-
static const struct of_device_id sc27xx_efuse_of_match[] = {
{ .compatible = "sprd,sc2731-efuse" },
{ }
@@ -254,7 +244,6 @@ static const struct of_device_id sc27xx_efuse_of_match[] = {
static struct platform_driver sc27xx_efuse_driver = {
.probe = sc27xx_efuse_probe,
- .remove = sc27xx_efuse_remove,
.driver = {
.name = "sc27xx-efuse",
.of_match_table = sc27xx_efuse_of_match,
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
new file mode 100644
index 000000000000..2f1e0fbd1901
--- /dev/null
+++ b/drivers/nvmem/sprd-efuse.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define SPRD_EFUSE_ENABLE 0x20
+#define SPRD_EFUSE_ERR_FLAG 0x24
+#define SPRD_EFUSE_ERR_CLR 0x28
+#define SPRD_EFUSE_MAGIC_NUM 0x2c
+#define SPRD_EFUSE_FW_CFG 0x50
+#define SPRD_EFUSE_PW_SWT 0x54
+#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2))
+
+#define SPRD_EFUSE_VDD_EN BIT(0)
+#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1)
+#define SPRD_EFUSE_DOUBLE_EN BIT(2)
+#define SPRD_EFUSE_MARGIN_RD_EN BIT(3)
+#define SPRD_EFUSE_LOCK_WR_EN BIT(4)
+
+#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0)
+
+#define SPRD_EFUSE_ENK1_ON BIT(0)
+#define SPRD_EFUSE_ENK2_ON BIT(1)
+#define SPRD_EFUSE_PROG_EN BIT(2)
+
+#define SPRD_EFUSE_MAGIC_NUMBER 0x8810
+
+/* Block width (bytes) definitions */
+#define SPRD_EFUSE_BLOCK_WIDTH 4
+
+/*
+ * The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse,
+ * and we can only access the normal efuse in kernel. So define the normal
+ * block offset index and normal block numbers.
+ */
+#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24
+#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000
+
+/*
+ * Since different Spreadtrum SoC chip can have different normal block numbers
+ * and offset. And some SoC can support block double feature, which means
+ * when reading or writing data to efuse memory, the controller can save double
+ * data in case one data become incorrect after a long period.
+ *
+ * Thus we should save them in the device data structure.
+ */
+struct sprd_efuse_variant_data {
+ u32 blk_nums;
+ u32 blk_offset;
+ bool blk_double;
+};
+
+struct sprd_efuse {
+ struct device *dev;
+ struct clk *clk;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ void __iomem *base;
+ const struct sprd_efuse_variant_data *data;
+};
+
+static const struct sprd_efuse_variant_data ums312_data = {
+ .blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS,
+ .blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET,
+ .blk_double = false,
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sprd_efuse_lock(struct sprd_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SPRD_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sprd_efuse_unlock(struct sprd_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val &= ~SPRD_EFUSE_ENK2_ON;
+ else
+ val &= ~SPRD_EFUSE_ENK1_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+
+ if (en)
+ val |= SPRD_EFUSE_ENK1_ON;
+ else
+ val |= SPRD_EFUSE_ENK2_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_VDD_EN;
+ else
+ val &= ~SPRD_EFUSE_VDD_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_LOCK_WR_EN;
+ else
+ val &= ~SPRD_EFUSE_LOCK_WR_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_AUTO_CHECK_EN;
+ else
+ val &= ~SPRD_EFUSE_AUTO_CHECK_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_DOUBLE_EN;
+ else
+ val &= ~SPRD_EFUSE_DOUBLE_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val |= SPRD_EFUSE_PROG_EN;
+ else
+ val &= ~SPRD_EFUSE_PROG_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+}
+
+static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
+ bool lock, u32 *data)
+{
+ u32 status;
+ int ret = 0;
+
+ /*
+ * We need set the correct magic number before writing the efuse to
+ * allow programming, and block other programming until we clear the
+ * magic number.
+ */
+ writel(SPRD_EFUSE_MAGIC_NUMBER,
+ efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ /*
+ * Power on the efuse, enable programme and enable double data
+ * if asked.
+ */
+ sprd_efuse_set_prog_power(efuse, true);
+ sprd_efuse_set_prog_en(efuse, true);
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /*
+ * Enable the auto-check function to validate if the programming is
+ * successful.
+ */
+ sprd_efuse_set_auto_check(efuse, true);
+
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable auto-check and data double after programming */
+ sprd_efuse_set_auto_check(efuse, false);
+ sprd_efuse_set_data_double(efuse, false);
+
+ /*
+ * Check the efuse error status, if the programming is successful,
+ * we should lock this efuse block to avoid programming again.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "write error status %d of block %d\n", ret, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ ret = -EBUSY;
+ } else {
+ sprd_efuse_set_prog_lock(efuse, lock);
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+ sprd_efuse_set_prog_lock(efuse, false);
+ }
+
+ sprd_efuse_set_prog_power(efuse, false);
+ writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ return ret;
+}
+
+static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val,
+ bool doub)
+{
+ u32 status;
+
+ /*
+ * Need power on the efuse before reading data from efuse, and will
+ * power off the efuse after reading process.
+ */
+ sprd_efuse_set_read_power(efuse, true);
+
+ /* Enable double data if asked */
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /* Start to read data from efuse block */
+ *val = readl(efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable double data */
+ sprd_efuse_set_data_double(efuse, false);
+
+ /* Power off the efuse */
+ sprd_efuse_set_read_power(efuse, false);
+
+ /*
+ * Check the efuse error status and clear them if there are some
+ * errors occurred.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "read error status %d of block %d\n", status, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset;
+ u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
+ u32 data;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_read(efuse, index, &data, blk_double);
+ if (!ret) {
+ data >>= blk_offset;
+ memcpy(val, &data, bytes);
+ }
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_prog(efuse, offset, false, false, val);
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = { };
+ struct sprd_efuse *efuse;
+ const struct sprd_efuse_variant_data *pdata;
+ int ret;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!efuse->base)
+ return -ENOMEM;
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwlock\n");
+ return -ENXIO;
+ }
+
+ efuse->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(efuse->clk)) {
+ dev_err(&pdev->dev, "failed to get enable clock\n");
+ return PTR_ERR(efuse->clk);
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ efuse->data = pdata;
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = false;
+ econfig.name = "sprd-efuse";
+ econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sprd_efuse_read;
+ econfig.reg_write = sprd_efuse_write;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem\n");
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_efuse_of_match[] = {
+ { .compatible = "sprd,ums312-efuse", .data = &ums312_data },
+ { }
+};
+
+static struct platform_driver sprd_efuse_driver = {
+ .probe = sprd_efuse_probe,
+ .driver = {
+ .name = "sprd-efuse",
+ .of_match_table = sprd_efuse_of_match,
+ },
+};
+
+module_platform_driver(sprd_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum AP efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 223d617ecfe1..f1c23aad951e 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -412,8 +412,8 @@ void *__unflatten_device_tree(const void *blob,
/* Second pass, do actual unflattening */
unflatten_dt_nodes(blob, mem, dad, mynodes);
if (be32_to_cpup(mem + size) != 0xdeadbeef)
- pr_warning("End of tree marker overwritten: %08x\n",
- be32_to_cpup(mem + size));
+ pr_warn("End of tree marker overwritten: %08x\n",
+ be32_to_cpup(mem + size));
if (detached && mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
@@ -1120,25 +1120,25 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
size &= PAGE_MASK;
if (base > MAX_MEMBLOCK_ADDR) {
- pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
- base, base + size);
+ pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
return;
}
if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
- pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
+ pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
+ ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
size = MAX_MEMBLOCK_ADDR - base + 1;
}
if (base + size < phys_offset) {
- pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
- base, base + size);
+ pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
return;
}
if (base < phys_offset) {
- pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- base, phys_offset);
+ pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
+ base, phys_offset);
size -= phys_offset - base;
base = phys_offset;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index bd6129db6417..c6b87ce2b0cc 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -361,8 +361,8 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
struct phy_device *phy;
int ret;
- iface = of_get_phy_mode(np);
- if ((int)iface < 0)
+ ret = of_get_phy_mode(np, &iface);
+ if (ret)
return NULL;
if (of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index b02734aff8c1..6e411821583e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -15,16 +15,20 @@
/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
+ * @interface: Pointer to the result
*
* The function gets phy interface string from property 'phy-mode' or
- * 'phy-connection-type', and return its index in phy_modes table, or errno in
- * error case.
+ * 'phy-connection-type'. The index in phy_modes table is set in
+ * interface and 0 returned. In case of error interface is set to
+ * PHY_INTERFACE_MODE_NA and an errno is returned, e.g. -ENODEV.
*/
-int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np, phy_interface_t *interface)
{
const char *pm;
int err, i;
+ *interface = PHY_INTERFACE_MODE_NA;
+
err = of_property_read_string(np, "phy-mode", &pm);
if (err < 0)
err = of_property_read_string(np, "phy-connection-type", &pm);
@@ -32,8 +36,10 @@ int of_get_phy_mode(struct device_node *np)
return err;
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
- if (!strcasecmp(pm, phy_modes(i)))
- return i;
+ if (!strcasecmp(pm, phy_modes(i))) {
+ *interface = i;
+ return 0;
+ }
return -ENODEV;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b47a2292fe8e..d93891a05f60 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -480,6 +480,7 @@ int of_platform_populate(struct device_node *root,
pr_debug("%s()\n", __func__);
pr_debug(" starting at: %pOF\n", root);
+ device_links_supplier_sync_state_pause();
for_each_child_of_node(root, child) {
rc = of_platform_bus_create(child, matches, lookup, parent, true);
if (rc) {
@@ -487,6 +488,8 @@ int of_platform_populate(struct device_node *root,
break;
}
}
+ device_links_supplier_sync_state_resume();
+
of_node_set_flag(root, OF_POPULATED_BUS);
of_node_put(root);
@@ -518,6 +521,7 @@ static int __init of_platform_default_populate_init(void)
if (!of_have_populated_dt())
return -ENODEV;
+ device_links_supplier_sync_state_pause();
/*
* Handle certain compatibles explicitly, since we don't want to create
* platform_devices for every node in /reserved-memory with a
@@ -538,6 +542,14 @@ static int __init of_platform_default_populate_init(void)
return 0;
}
arch_initcall_sync(of_platform_default_populate_init);
+
+static int __init of_platform_sync_state_init(void)
+{
+ if (of_have_populated_dt())
+ device_links_supplier_sync_state_resume();
+ return 0;
+}
+late_initcall_sync(of_platform_sync_state_init);
#endif
int of_platform_device_destroy(struct device *dev, void *data)
diff --git a/drivers/of/property.c b/drivers/of/property.c
index d7fa75e31f22..477966d2421a 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -25,6 +25,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/string.h>
+#include <linux/moduleparam.h>
#include "of_private.h"
@@ -872,6 +873,20 @@ of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
of_property_count_strings(node, propname);
}
+static const char *of_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return kbasename(to_of_node(fwnode)->full_name);
+}
+
+static const char *of_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ /* Root needs no prefix here (its name is "/"). */
+ if (!to_of_node(fwnode)->parent)
+ return "";
+
+ return "/";
+}
+
static struct fwnode_handle *
of_fwnode_get_parent(const struct fwnode_handle *fwnode)
{
@@ -985,6 +1000,320 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
return of_device_get_match_data(dev);
}
+static bool of_is_ancestor_of(struct device_node *test_ancestor,
+ struct device_node *child)
+{
+ of_node_get(child);
+ while (child) {
+ if (child == test_ancestor) {
+ of_node_put(child);
+ return true;
+ }
+ child = of_get_next_parent(child);
+ }
+ return false;
+}
+
+/**
+ * of_link_to_phandle - Add device link to supplier from supplier phandle
+ * @dev: consumer device
+ * @sup_np: phandle to supplier device tree node
+ *
+ * Given a phandle to a supplier device tree node (@sup_np), this function
+ * finds the device that owns the supplier device tree node and creates a
+ * device link from @dev consumer device to the supplier device. This function
+ * doesn't create device links for invalid scenarios such as trying to create a
+ * link with a parent device as the consumer of its child device. In such
+ * cases, it returns an error.
+ *
+ * Returns:
+ * - 0 if link successfully created to supplier
+ * - -EAGAIN if linking to the supplier should be reattempted
+ * - -EINVAL if the supplier link is invalid and should not be created
+ * - -ENODEV if there is no device that corresponds to the supplier phandle
+ */
+static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
+ u32 dl_flags)
+{
+ struct device *sup_dev;
+ int ret = 0;
+ struct device_node *tmp_np = sup_np;
+ int is_populated;
+
+ of_node_get(sup_np);
+ /*
+ * Find the device node that contains the supplier phandle. It may be
+ * @sup_np or it may be an ancestor of @sup_np.
+ */
+ while (sup_np && !of_find_property(sup_np, "compatible", NULL))
+ sup_np = of_get_next_parent(sup_np);
+ if (!sup_np) {
+ dev_dbg(dev, "Not linking to %pOFP - No device\n", tmp_np);
+ return -ENODEV;
+ }
+
+ /*
+ * Don't allow linking a device node as a consumer of one of its
+ * descendant nodes. By definition, a child node can't be a functional
+ * dependency for the parent node.
+ */
+ if (of_is_ancestor_of(dev->of_node, sup_np)) {
+ dev_dbg(dev, "Not linking to %pOFP - is descendant\n", sup_np);
+ of_node_put(sup_np);
+ return -EINVAL;
+ }
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ is_populated = of_node_check_flag(sup_np, OF_POPULATED);
+ of_node_put(sup_np);
+ if (!sup_dev && is_populated) {
+ /* Early device without struct device. */
+ dev_dbg(dev, "Not linking to %pOFP - No struct device\n",
+ sup_np);
+ return -ENODEV;
+ } else if (!sup_dev) {
+ return -EAGAIN;
+ }
+ if (!device_link_add(dev, sup_dev, dl_flags))
+ ret = -EAGAIN;
+ put_device(sup_dev);
+ return ret;
+}
+
+/**
+ * parse_prop_cells - Property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @list_name: Property name that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed name
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *list_name,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp(prop_name, list_name))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, list_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SIMPLE_PROP(fname, name, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_prop_cells(np, prop_name, index, name, cells); \
+}
+
+static int strcmp_suffix(const char *str, const char *suffix)
+{
+ unsigned int len, suffix_len;
+
+ len = strlen(str);
+ suffix_len = strlen(suffix);
+ if (len <= suffix_len)
+ return -1;
+ return strcmp(str + len - suffix_len, suffix);
+}
+
+/**
+ * parse_suffix_prop_cells - Suffix property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @suffix: Property suffix that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed suffix
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_suffix_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *suffix,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp_suffix(prop_name, suffix))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, prop_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SUFFIX_PROP(fname, suffix, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_suffix_prop_cells(np, prop_name, index, suffix, cells); \
+}
+
+/**
+ * struct supplier_bindings - Property parsing functions for suppliers
+ *
+ * @parse_prop: function name
+ * parse_prop() finds the node corresponding to a supplier phandle
+ * @parse_prop.np: Pointer to device node holding supplier phandle property
+ * @parse_prop.prop_name: Name of property holding a phandle value
+ * @parse_prop.index: For properties holding a list of phandles, this is the
+ * index into the list
+ *
+ * Returns:
+ * parse_prop() return values are
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+struct supplier_bindings {
+ struct device_node *(*parse_prop)(struct device_node *np,
+ const char *prop_name, int index);
+};
+
+DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
+DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
+DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
+DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
+DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
+DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
+DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
+DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_iommu_maps(struct device_node *np,
+ const char *prop_name, int index)
+{
+ if (strcmp(prop_name, "iommu-map"))
+ return NULL;
+
+ return of_parse_phandle(np, prop_name, (index * 4) + 1);
+}
+
+static const struct supplier_bindings of_supplier_bindings[] = {
+ { .parse_prop = parse_clocks, },
+ { .parse_prop = parse_interconnects, },
+ { .parse_prop = parse_iommus, },
+ { .parse_prop = parse_iommu_maps, },
+ { .parse_prop = parse_mboxes, },
+ { .parse_prop = parse_io_channels, },
+ { .parse_prop = parse_interrupt_parent, },
+ { .parse_prop = parse_dmas, },
+ { .parse_prop = parse_regulators, },
+ { .parse_prop = parse_gpio, },
+ { .parse_prop = parse_gpios, },
+ {}
+};
+
+/**
+ * of_link_property - Create device links to suppliers listed in a property
+ * @dev: Consumer device
+ * @con_np: The consumer device tree node which contains the property
+ * @prop_name: Name of property to be parsed
+ *
+ * This function checks if the property @prop_name that is present in the
+ * @con_np device tree node is one of the known common device tree bindings
+ * that list phandles to suppliers. If @prop_name isn't one, this function
+ * doesn't do anything.
+ *
+ * If @prop_name is one, this function attempts to create device links from the
+ * consumer device @dev to all the devices of the suppliers listed in
+ * @prop_name.
+ *
+ * Any failed attempt to create a device link will NOT result in an immediate
+ * return. of_link_property() must create links to all the available supplier
+ * devices even when attempts to create a link to one or more suppliers fail.
+ */
+static int of_link_property(struct device *dev, struct device_node *con_np,
+ const char *prop_name)
+{
+ struct device_node *phandle;
+ const struct supplier_bindings *s = of_supplier_bindings;
+ unsigned int i = 0;
+ bool matched = false;
+ int ret = 0;
+ u32 dl_flags;
+
+ if (dev->of_node == con_np)
+ dl_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ else
+ dl_flags = DL_FLAG_SYNC_STATE_ONLY;
+
+ /* Do not stop at first failed link, link all available suppliers. */
+ while (!matched && s->parse_prop) {
+ while ((phandle = s->parse_prop(con_np, prop_name, i))) {
+ matched = true;
+ i++;
+ if (of_link_to_phandle(dev, phandle, dl_flags)
+ == -EAGAIN)
+ ret = -EAGAIN;
+ of_node_put(phandle);
+ }
+ s++;
+ }
+ return ret;
+}
+
+static int of_link_to_suppliers(struct device *dev,
+ struct device_node *con_np)
+{
+ struct device_node *child;
+ struct property *p;
+ int ret = 0;
+
+ for_each_property_of_node(con_np, p)
+ if (of_link_property(dev, con_np, p->name))
+ ret = -ENODEV;
+
+ for_each_child_of_node(con_np, child)
+ if (of_link_to_suppliers(dev, child) && !ret)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static bool of_devlink;
+core_param(of_devlink, of_devlink, bool, 0);
+
+static int of_fwnode_add_links(const struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ if (!of_devlink)
+ return 0;
+
+ if (unlikely(!is_of_node(fwnode)))
+ return 0;
+
+ return of_link_to_suppliers(dev, to_of_node(fwnode));
+}
+
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
@@ -993,6 +1322,8 @@ const struct fwnode_operations of_fwnode_ops = {
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
+ .get_name = of_fwnode_get_name,
+ .get_name_prefix = of_fwnode_get_name_prefix,
.get_parent = of_fwnode_get_parent,
.get_next_child_node = of_fwnode_get_next_child_node,
.get_named_child_node = of_fwnode_get_named_child_node,
@@ -1001,5 +1332,6 @@ const struct fwnode_operations of_fwnode_ops = {
.graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint,
.graph_get_port_parent = of_fwnode_graph_get_port_parent,
.graph_parse_endpoint = of_fwnode_graph_parse_endpoint,
+ .add_links = of_fwnode_add_links,
};
EXPORT_SYMBOL_GPL(of_fwnode_ops);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 9ff0538ee83a..be7a7d332332 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -2103,6 +2103,75 @@ put_table:
}
/**
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to adjust voltage of
+ * @u_volt: new OPP target voltage
+ * @u_volt_min: new OPP min voltage
+ * @u_volt_max: new OPP max voltage
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ int r = 0;
+
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
+ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ return r;
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+ if (tmp_opp->rate == freq) {
+ opp = tmp_opp;
+ break;
+ }
+ }
+
+ if (IS_ERR(opp)) {
+ r = PTR_ERR(opp);
+ goto adjust_unlock;
+ }
+
+ /* Is update really needed? */
+ if (opp->supplies->u_volt == u_volt)
+ goto adjust_unlock;
+
+ opp->supplies->u_volt = u_volt;
+ opp->supplies->u_volt_min = u_volt_min;
+ opp->supplies->u_volt_max = u_volt_max;
+
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+
+ /* Notify the voltage change of the OPP */
+ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
+ opp);
+
+ dev_pm_opp_put(opp);
+ goto adjust_put_table;
+
+adjust_unlock:
+ mutex_unlock(&opp_table->lock);
+adjust_put_table:
+ dev_pm_opp_put_opp_table(opp_table);
+ return r;
+}
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index 4b150a754890..98a63a5f8763 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -46,8 +46,8 @@ static void op_overflow_handler(struct perf_event *event,
if (id != num_counters)
oprofile_add_sample(regs, id);
else
- pr_warning("oprofile: ignoring spurious overflow "
- "on cpu %u\n", cpu);
+ pr_warn("oprofile: ignoring spurious overflow on cpu %u\n",
+ cpu);
}
/*
@@ -88,8 +88,8 @@ static int op_create_counter(int cpu, int event)
if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
perf_event_release_kernel(pevent);
- pr_warning("oprofile: failed to enable event %d "
- "on CPU %d\n", event, cpu);
+ pr_warn("oprofile: failed to enable event %d on CPU %d\n",
+ event, cpu);
return -EBUSY;
}
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5484a46dafda..3b00e2c8e2e9 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -45,6 +45,7 @@ static struct daisydev {
static DEFINE_SPINLOCK(topology_lock);
static int numdevs;
+static bool daisy_init_done;
/* Forward-declaration of lower-level functions. */
static int mux_present(struct parport *port);
@@ -87,6 +88,24 @@ static struct parport *clone_parport(struct parport *real, int muxport)
return extra;
}
+static int daisy_drv_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+
+ if (strcmp(drv->name, "daisy_drv"))
+ return -ENODEV;
+ if (strcmp(par_dev->name, daisy_dev_name))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver daisy_driver = {
+ .name = "daisy_drv",
+ .probe = daisy_drv_probe,
+ .devmodel = true,
+};
+
/* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains.
* Return value is number of devices actually detected. */
int parport_daisy_init(struct parport *port)
@@ -98,6 +117,23 @@ int parport_daisy_init(struct parport *port)
int i;
int last_try = 0;
+ if (!daisy_init_done) {
+ /*
+ * flag should be marked true first as
+ * parport_register_driver() might try to load the low
+ * level driver which will lead to announcing new ports
+ * and which will again come back here at
+ * parport_daisy_init()
+ */
+ daisy_init_done = true;
+ i = parport_register_driver(&daisy_driver);
+ if (i) {
+ pr_err("daisy registration failed\n");
+ daisy_init_done = false;
+ return i;
+ }
+ }
+
again:
/* Because this is called before any other devices exist,
* we don't have to claim exclusive access. */
@@ -213,10 +249,12 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
+ struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
+ memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -230,7 +268,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
+ dev = parport_register_dev_model(port, name, &par_cb, devnum);
parport_put_port(port);
if (!dev)
return NULL;
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e035174ba205..e5e6a463a941 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open (devnum, "Device ID probe");
+ struct pardevice *dev = parport_open(devnum, daisy_dev_name);
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 7b4ee33c1935..d6920ebeabcd 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
return 0;
}
+/*
+ * Iterates through all the devices connected to the bus and return 1
+ * if the device is a parallel port.
+ */
+
+static int port_detect(struct device *dev, void *dev_drv)
+{
+ if (is_parport(dev))
+ return 1;
+ return 0;
+}
+
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
@@ -266,9 +278,6 @@ static int port_check(struct device *dev, void *dev_drv)
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
- if (list_empty(&portlist))
- get_lowlevel_driver();
-
if (drv->devmodel) {
/* using device model */
int ret;
@@ -282,6 +291,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
if (ret)
return ret;
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
mutex_lock(&registration_lock);
if (drv->match_port)
bus_for_each_dev(&parport_bus_type, NULL, drv,
@@ -292,6 +310,8 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
drv->devmodel = false;
+ if (list_empty(&portlist))
+ get_lowlevel_driver();
mutex_lock(&registration_lock);
list_for_each_entry(port, &portlist, list)
drv->attach(port);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a97e2571a527..fcfaadc774ee 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5854,6 +5854,24 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+#ifdef CONFIG_ACPI
+bool pci_pr3_present(struct pci_dev *pdev)
+{
+ struct acpi_device *adev;
+
+ if (acpi_disabled)
+ return false;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return false;
+
+ return adev->power.flags.power_resources &&
+ acpi_has_method(adev->handle, "_PR3");
+}
+EXPORT_SYMBOL_GPL(pci_pr3_present);
+#endif
+
/**
* pci_add_dma_alias - Add a DMA devfn alias for a device
* @dev: the PCI device for which alias is added
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index c502dfbf66e3..45c8252c8edc 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -22,7 +22,9 @@
#include <linux/pci.h>
#include <pcmcia/ss.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
{
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 629359fe3513..cf109d9a1112 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -28,6 +28,7 @@
#include <pcmcia/ss.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
#include "cs_internal.h"
static const u_char mantissa[] = {
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 245d60189375..aad8a46605be 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -431,27 +431,25 @@ static int i82092aa_get_status(struct pcmcia_socket *socket, u_int *value)
/* IO cards have a different meaning of bits 0,1 */
/* Also notice the inverse-logic on the bits */
- if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD) {
- /* IO card */
- if (!(status & I365_CS_STSCHG))
- *value |= SS_STSCHG;
- } else { /* non I/O card */
- if (!(status & I365_CS_BVD1))
- *value |= SS_BATDEAD;
- if (!(status & I365_CS_BVD2))
- *value |= SS_BATWARN;
-
- }
+ if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD) {
+ /* IO card */
+ if (!(status & I365_CS_STSCHG))
+ *value |= SS_STSCHG;
+ } else { /* non I/O card */
+ if (!(status & I365_CS_BVD1))
+ *value |= SS_BATDEAD;
+ if (!(status & I365_CS_BVD2))
+ *value |= SS_BATWARN;
+ }
- if (status & I365_CS_WRPROT)
- (*value) |= SS_WRPROT; /* card is write protected */
+ if (status & I365_CS_WRPROT)
+ (*value) |= SS_WRPROT; /* card is write protected */
- if (status & I365_CS_READY)
- (*value) |= SS_READY; /* card is not busy */
+ if (status & I365_CS_READY)
+ (*value) |= SS_READY; /* card is not busy */
- if (status & I365_CS_POWERON)
- (*value) |= SS_POWERON; /* power is applied to the card */
-
+ if (status & I365_CS_POWERON)
+ (*value) |= SS_POWERON; /* power is applied to the card */
leave("i82092aa_get_status");
return 0;
diff --git a/drivers/pcmcia/i82092aa.h b/drivers/pcmcia/i82092aa.h
index fabe08c3e33d..4586c43c78e2 100644
--- a/drivers/pcmcia/i82092aa.h
+++ b/drivers/pcmcia/i82092aa.h
@@ -8,11 +8,9 @@
#ifdef NOTRACE
#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
#define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__)
-#define dprintk(fmt, args...) printk(fmt , ## args)
#else
#define enter(x) do {} while (0)
#define leave(x) do {} while (0)
-#define dprintk(fmt, args...) do {} while (0)
#endif
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 810761ab8e9d..49b1c6a1bdbe 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -173,8 +173,7 @@ static void exca_writew(struct yenta_socket *socket, unsigned reg, u16 val)
static ssize_t show_yenta_registers(struct device *yentadev, struct device_attribute *attr, char *buf)
{
- struct pci_dev *dev = to_pci_dev(yentadev);
- struct yenta_socket *socket = pci_get_drvdata(dev);
+ struct yenta_socket *socket = dev_get_drvdata(yentadev);
int offset = 0, i;
offset = snprintf(buf, PAGE_SIZE, "CB registers:");
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 8f8606b9bc9e..1b8e337a29ca 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1642,7 +1642,6 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev)
static int cci_pmu_probe(struct platform_device *pdev)
{
- struct resource *res;
struct cci_pmu *cci_pmu;
int i, ret, irq;
@@ -1650,8 +1649,7 @@ static int cci_pmu_probe(struct platform_device *pdev)
if (IS_ERR(cci_pmu))
return PTR_ERR(cci_pmu);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cci_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cci_pmu->base))
return -ENOMEM;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 6fc0273b6129..fea354d6fb29 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1477,8 +1477,7 @@ static int arm_ccn_probe(struct platform_device *pdev)
ccn->dev = &pdev->dev;
platform_set_drvdata(pdev, ccn);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ccn->base = devm_ioremap_resource(ccn->dev, res);
+ ccn->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ccn->base))
return PTR_ERR(ccn->base);
@@ -1537,6 +1536,7 @@ static int arm_ccn_remove(struct platform_device *pdev)
static const struct of_device_id arm_ccn_match[] = {
{ .compatible = "arm,ccn-502", },
{ .compatible = "arm,ccn-504", },
+ { .compatible = "arm,ccn-512", },
{},
};
MODULE_DEVICE_TABLE(of, arm_ccn_match);
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index abcf54f7d19c..773128f411f1 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -727,7 +727,7 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
static int smmu_pmu_probe(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu;
- struct resource *res_0, *res_1;
+ struct resource *res_0;
u32 cfgr, reg_size;
u64 ceid_64[2];
int irq, err;
@@ -764,8 +764,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
/* Determine if page 1 is present */
if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
- res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
+ smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(smmu_pmu->reloc_base))
return PTR_ERR(smmu_pmu->reloc_base);
} else {
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index ce7345745b42..55083c67b2bb 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -45,7 +45,8 @@
static DEFINE_IDA(ddr_ida);
/* DDR Perf hardware feature */
-#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
+#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
+#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */
@@ -57,9 +58,14 @@ static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
};
+static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
+ { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -78,6 +84,61 @@ struct ddr_pmu {
int id;
};
+enum ddr_perf_filter_capabilities {
+ PERF_CAP_AXI_ID_FILTER = 0,
+ PERF_CAP_AXI_ID_FILTER_ENHANCED,
+ PERF_CAP_AXI_ID_FEAT_MAX,
+};
+
+static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
+{
+ u32 quirks = pmu->devtype_data->quirks;
+
+ switch (cap) {
+ case PERF_CAP_AXI_ID_FILTER:
+ return !!(quirks & DDR_CAP_AXI_ID_FILTER);
+ case PERF_CAP_AXI_ID_FILTER_ENHANCED:
+ quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ default:
+ WARN(1, "unknown filter cap %d\n", cap);
+ }
+
+ return 0;
+}
+
+static ssize_t ddr_perf_filter_cap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ int cap = (long)ea->var;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ ddr_perf_filter_cap_get(pmu, cap));
+}
+
+#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
+ (&((struct dev_ext_attribute) { \
+ __ATTR(_name, 0444, _func, NULL), (void *)_var \
+ }).attr.attr)
+
+#define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
+ PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
+
+static struct attribute *ddr_perf_filter_cap_attr[] = {
+ PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
+ PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
+ NULL,
+};
+
+static struct attribute_group ddr_perf_filter_cap_attr_group = {
+ .name = "caps",
+ .attrs = ddr_perf_filter_cap_attr,
+};
+
static ssize_t ddr_perf_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -175,9 +236,40 @@ static const struct attribute_group *attr_groups[] = {
&ddr_perf_events_attr_group,
&ddr_perf_format_attr_group,
&ddr_perf_cpumask_attr_group,
+ &ddr_perf_filter_cap_attr_group,
NULL,
};
+static bool ddr_perf_is_filtered(struct perf_event *event)
+{
+ return event->attr.config == 0x41 || event->attr.config == 0x42;
+}
+
+static u32 ddr_perf_filter_val(struct perf_event *event)
+{
+ return event->attr.config1;
+}
+
+static bool ddr_perf_filters_compatible(struct perf_event *a,
+ struct perf_event *b)
+{
+ if (!ddr_perf_is_filtered(a))
+ return true;
+ if (!ddr_perf_is_filtered(b))
+ return true;
+ return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+}
+
+static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
+{
+ unsigned int filt;
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+
+ filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
+ ddr_perf_is_filtered(event);
+}
+
static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
{
int i;
@@ -209,27 +301,17 @@ static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
{
- return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
-}
-
-static bool ddr_perf_is_filtered(struct perf_event *event)
-{
- return event->attr.config == 0x41 || event->attr.config == 0x42;
-}
+ struct perf_event *event = pmu->events[counter];
+ void __iomem *base = pmu->base;
-static u32 ddr_perf_filter_val(struct perf_event *event)
-{
- return event->attr.config1;
-}
-
-static bool ddr_perf_filters_compatible(struct perf_event *a,
- struct perf_event *b)
-{
- if (!ddr_perf_is_filtered(a))
- return true;
- if (!ddr_perf_is_filtered(b))
- return true;
- return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+ /*
+ * return bytes instead of bursts from ddr transaction for
+ * axid-read and axid-write event if PMU core supports enhanced
+ * filter.
+ */
+ base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
+ COUNTER_READ;
+ return readl_relaxed(base + counter * 4);
}
static int ddr_perf_event_init(struct perf_event *event)
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index e42d4464c2cf..453f1c6a16ca 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -243,8 +243,6 @@ MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
- struct resource *res;
-
/*
* Use the SCCL_ID and DDRC channel ID to identify the
* DDRC PMU, while SCCL_ID is in MPIDR[aff2].
@@ -263,8 +261,7 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
/* DDRC PMUs only share the same SCCL */
ddrc_pmu->ccl_id = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddrc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
return PTR_ERR(ddrc_pmu->base);
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index f28063873e11..6a1dd72d8abb 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -234,7 +234,6 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *hha_pmu)
{
unsigned long long id;
- struct resource *res;
acpi_status status;
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
@@ -256,8 +255,7 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
/* HHA PMUs only share the same SCCL */
hha_pmu->ccl_id = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ hha_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hha_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
return PTR_ERR(hha_pmu->base);
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 078b8dc57250..1151e99b241c 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -233,7 +233,6 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
unsigned long long id;
- struct resource *res;
acpi_status status;
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
@@ -259,8 +258,7 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- l3c_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(l3c_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
return PTR_ERR(l3c_pmu->base);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 79f76f8dda8e..96183e31b96a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <asm/cputype.h>
#include <asm/local64.h>
#include "hisi_uncore_pmu.h"
@@ -338,8 +339,10 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
/*
* Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2]
- * and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID
+ * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
+ * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
+ * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
+ * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
* is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
*/
static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
@@ -347,12 +350,19 @@ static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
u64 mpidr = read_cpuid_mpidr();
if (mpidr & MPIDR_MT_BITMASK) {
- int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
- if (sccl_id)
- *sccl_id = aff2 >> 3;
- if (ccl_id)
- *ccl_id = aff2 & 0x7;
+ if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+
+ if (sccl_id)
+ *sccl_id = aff2 >> 3;
+ if (ccl_id)
+ *ccl_id = aff2 & 0x7;
+ } else {
+ if (sccl_id)
+ *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ if (ccl_id)
+ *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ }
} else {
if (sccl_id)
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 43d76c85da56..51b31d6ff2c4 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -16,23 +16,36 @@
* they need to be sampled before overflow(i.e, at every 2 seconds).
*/
-#define TX2_PMU_MAX_COUNTERS 4
+#define TX2_PMU_DMC_L3C_MAX_COUNTERS 4
+#define TX2_PMU_CCPI2_MAX_COUNTERS 8
+#define TX2_PMU_MAX_COUNTERS TX2_PMU_CCPI2_MAX_COUNTERS
+
+
#define TX2_PMU_DMC_CHANNELS 8
#define TX2_PMU_L3_TILES 16
#define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
-#define GET_EVENTID(ev) ((ev->hw.config) & 0x1f)
-#define GET_COUNTERID(ev) ((ev->hw.idx) & 0x3)
+#define GET_EVENTID(ev, mask) ((ev->hw.config) & mask)
+#define GET_COUNTERID(ev, mask) ((ev->hw.idx) & mask)
/* 1 byte per counter(4 counters).
* Event id is encoded in bits [5:1] of a byte,
*/
#define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
+/* bits[3:0] to select counters, are indexed from 8 to 15. */
+#define CCPI2_COUNTER_OFFSET 8
+
#define L3C_COUNTER_CTL 0xA8
#define L3C_COUNTER_DATA 0xAC
#define DMC_COUNTER_CTL 0x234
#define DMC_COUNTER_DATA 0x240
+#define CCPI2_PERF_CTL 0x108
+#define CCPI2_COUNTER_CTL 0x10C
+#define CCPI2_COUNTER_SEL 0x12c
+#define CCPI2_COUNTER_DATA_L 0x130
+#define CCPI2_COUNTER_DATA_H 0x134
+
/* L3C event IDs */
#define L3_EVENT_READ_REQ 0xD
#define L3_EVENT_WRITEBACK_REQ 0xE
@@ -51,15 +64,28 @@
#define DMC_EVENT_READ_TXNS 0xF
#define DMC_EVENT_MAX 0x10
+#define CCPI2_EVENT_REQ_PKT_SENT 0x3D
+#define CCPI2_EVENT_SNOOP_PKT_SENT 0x65
+#define CCPI2_EVENT_DATA_PKT_SENT 0x105
+#define CCPI2_EVENT_GIC_PKT_SENT 0x12D
+#define CCPI2_EVENT_MAX 0x200
+
+#define CCPI2_PERF_CTL_ENABLE BIT(0)
+#define CCPI2_PERF_CTL_START BIT(1)
+#define CCPI2_PERF_CTL_RESET BIT(4)
+#define CCPI2_EVENT_LEVEL_RISING_EDGE BIT(10)
+#define CCPI2_EVENT_TYPE_EDGE_SENSITIVE BIT(11)
+
enum tx2_uncore_type {
PMU_TYPE_L3C,
PMU_TYPE_DMC,
+ PMU_TYPE_CCPI2,
PMU_TYPE_INVALID,
};
/*
- * pmu on each socket has 2 uncore devices(dmc and l3c),
- * each device has 4 counters.
+ * Each socket has 3 uncore devices associated with a PMU. The DMC and
+ * L3C have 4 32-bit counters and the CCPI2 has 8 64-bit counters.
*/
struct tx2_uncore_pmu {
struct hlist_node hpnode;
@@ -69,8 +95,10 @@ struct tx2_uncore_pmu {
int node;
int cpu;
u32 max_counters;
+ u32 counters_mask;
u32 prorate_factor;
u32 max_events;
+ u32 events_mask;
u64 hrtimer_interval;
void __iomem *base;
DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
@@ -79,6 +107,7 @@ struct tx2_uncore_pmu {
struct hrtimer hrtimer;
const struct attribute_group **attr_groups;
enum tx2_uncore_type type;
+ enum hrtimer_restart (*hrtimer_callback)(struct hrtimer *cb);
void (*init_cntr_base)(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu);
void (*stop_event)(struct perf_event *event);
@@ -92,7 +121,21 @@ static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
return container_of(pmu, struct tx2_uncore_pmu, pmu);
}
-PMU_FORMAT_ATTR(event, "config:0-4");
+#define TX2_PMU_FORMAT_ATTR(_var, _name, _format) \
+static ssize_t \
+__tx2_pmu_##_var##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+ \
+static struct device_attribute format_attr_##_var = \
+ __ATTR(_name, 0444, __tx2_pmu_##_var##_show, NULL)
+
+TX2_PMU_FORMAT_ATTR(event, event, "config:0-4");
+TX2_PMU_FORMAT_ATTR(event_ccpi2, event, "config:0-9");
static struct attribute *l3c_pmu_format_attrs[] = {
&format_attr_event.attr,
@@ -104,6 +147,11 @@ static struct attribute *dmc_pmu_format_attrs[] = {
NULL,
};
+static struct attribute *ccpi2_pmu_format_attrs[] = {
+ &format_attr_event_ccpi2.attr,
+ NULL,
+};
+
static const struct attribute_group l3c_pmu_format_attr_group = {
.name = "format",
.attrs = l3c_pmu_format_attrs,
@@ -114,6 +162,11 @@ static const struct attribute_group dmc_pmu_format_attr_group = {
.attrs = dmc_pmu_format_attrs,
};
+static const struct attribute_group ccpi2_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = ccpi2_pmu_format_attrs,
+};
+
/*
* sysfs event attributes
*/
@@ -164,6 +217,19 @@ static struct attribute *dmc_pmu_events_attrs[] = {
NULL,
};
+TX2_EVENT_ATTR(req_pktsent, CCPI2_EVENT_REQ_PKT_SENT);
+TX2_EVENT_ATTR(snoop_pktsent, CCPI2_EVENT_SNOOP_PKT_SENT);
+TX2_EVENT_ATTR(data_pktsent, CCPI2_EVENT_DATA_PKT_SENT);
+TX2_EVENT_ATTR(gic_pktsent, CCPI2_EVENT_GIC_PKT_SENT);
+
+static struct attribute *ccpi2_pmu_events_attrs[] = {
+ &tx2_pmu_event_attr_req_pktsent.attr.attr,
+ &tx2_pmu_event_attr_snoop_pktsent.attr.attr,
+ &tx2_pmu_event_attr_data_pktsent.attr.attr,
+ &tx2_pmu_event_attr_gic_pktsent.attr.attr,
+ NULL,
+};
+
static const struct attribute_group l3c_pmu_events_attr_group = {
.name = "events",
.attrs = l3c_pmu_events_attrs,
@@ -174,6 +240,11 @@ static const struct attribute_group dmc_pmu_events_attr_group = {
.attrs = dmc_pmu_events_attrs,
};
+static const struct attribute_group ccpi2_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ccpi2_pmu_events_attrs,
+};
+
/*
* sysfs cpumask attributes
*/
@@ -213,6 +284,13 @@ static const struct attribute_group *dmc_pmu_attr_groups[] = {
NULL
};
+static const struct attribute_group *ccpi2_pmu_attr_groups[] = {
+ &ccpi2_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &ccpi2_pmu_events_attr_group,
+ NULL
+};
+
static inline u32 reg_readl(unsigned long addr)
{
return readl((void __iomem *)addr);
@@ -245,33 +323,58 @@ static void init_cntr_base_l3c(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu)
{
struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
/* counter ctrl/data reg offset at 8 */
hwc->config_base = (unsigned long)tx2_pmu->base
- + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event));
+ + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event, cmask));
hwc->event_base = (unsigned long)tx2_pmu->base
- + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event));
+ + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event, cmask));
}
static void init_cntr_base_dmc(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu)
{
struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
hwc->config_base = (unsigned long)tx2_pmu->base
+ DMC_COUNTER_CTL;
/* counter data reg offset at 0xc */
hwc->event_base = (unsigned long)tx2_pmu->base
- + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event));
+ + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event, cmask));
+}
+
+static void init_cntr_base_ccpi2(struct perf_event *event,
+ struct tx2_uncore_pmu *tx2_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+
+ cmask = tx2_pmu->counters_mask;
+
+ hwc->config_base = (unsigned long)tx2_pmu->base
+ + CCPI2_COUNTER_CTL + (4 * GET_COUNTERID(event, cmask));
+ hwc->event_base = (unsigned long)tx2_pmu->base;
}
static void uncore_start_event_l3c(struct perf_event *event, int flags)
{
- u32 val;
+ u32 val, emask;
struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ emask = tx2_pmu->events_mask;
/* event id encoded in bits [07:03] */
- val = GET_EVENTID(event) << 3;
+ val = GET_EVENTID(event, emask) << 3;
reg_writel(val, hwc->config_base);
local64_set(&hwc->prev_count, 0);
reg_writel(0, hwc->event_base);
@@ -284,10 +387,17 @@ static inline void uncore_stop_event_l3c(struct perf_event *event)
static void uncore_start_event_dmc(struct perf_event *event, int flags)
{
- u32 val;
+ u32 val, cmask, emask;
struct hw_perf_event *hwc = &event->hw;
- int idx = GET_COUNTERID(event);
- int event_id = GET_EVENTID(event);
+ struct tx2_uncore_pmu *tx2_pmu;
+ int idx, event_id;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
+ emask = tx2_pmu->events_mask;
+
+ idx = GET_COUNTERID(event, cmask);
+ event_id = GET_EVENTID(event, emask);
/* enable and start counters.
* 8 bits for each counter, bits[05:01] of a counter to set event type.
@@ -302,9 +412,14 @@ static void uncore_start_event_dmc(struct perf_event *event, int flags)
static void uncore_stop_event_dmc(struct perf_event *event)
{
- u32 val;
+ u32 val, cmask;
struct hw_perf_event *hwc = &event->hw;
- int idx = GET_COUNTERID(event);
+ struct tx2_uncore_pmu *tx2_pmu;
+ int idx;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
+ idx = GET_COUNTERID(event, cmask);
/* clear event type(bits[05:01]) to stop counter */
val = reg_readl(hwc->config_base);
@@ -312,27 +427,72 @@ static void uncore_stop_event_dmc(struct perf_event *event)
reg_writel(val, hwc->config_base);
}
+static void uncore_start_event_ccpi2(struct perf_event *event, int flags)
+{
+ u32 emask;
+ struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ emask = tx2_pmu->events_mask;
+
+ /* Bit [09:00] to set event id.
+ * Bits [10], set level to rising edge.
+ * Bits [11], set type to edge sensitive.
+ */
+ reg_writel((CCPI2_EVENT_TYPE_EDGE_SENSITIVE |
+ CCPI2_EVENT_LEVEL_RISING_EDGE |
+ GET_EVENTID(event, emask)), hwc->config_base);
+
+ /* reset[4], enable[0] and start[1] counters */
+ reg_writel(CCPI2_PERF_CTL_RESET |
+ CCPI2_PERF_CTL_START |
+ CCPI2_PERF_CTL_ENABLE,
+ hwc->event_base + CCPI2_PERF_CTL);
+ local64_set(&event->hw.prev_count, 0ULL);
+}
+
+static void uncore_stop_event_ccpi2(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* disable and stop counter */
+ reg_writel(0, hwc->event_base + CCPI2_PERF_CTL);
+}
+
static void tx2_uncore_event_update(struct perf_event *event)
{
- s64 prev, delta, new = 0;
+ u64 prev, delta, new = 0;
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
enum tx2_uncore_type type;
u32 prorate_factor;
+ u32 cmask, emask;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
type = tx2_pmu->type;
+ cmask = tx2_pmu->counters_mask;
+ emask = tx2_pmu->events_mask;
prorate_factor = tx2_pmu->prorate_factor;
-
- new = reg_readl(hwc->event_base);
- prev = local64_xchg(&hwc->prev_count, new);
-
- /* handles rollover of 32 bit counter */
- delta = (u32)(((1UL << 32) - prev) + new);
+ if (type == PMU_TYPE_CCPI2) {
+ reg_writel(CCPI2_COUNTER_OFFSET +
+ GET_COUNTERID(event, cmask),
+ hwc->event_base + CCPI2_COUNTER_SEL);
+ new = reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_H);
+ new = (new << 32) +
+ reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_L);
+ prev = local64_xchg(&hwc->prev_count, new);
+ delta = new - prev;
+ } else {
+ new = reg_readl(hwc->event_base);
+ prev = local64_xchg(&hwc->prev_count, new);
+ /* handles rollover of 32 bit counter */
+ delta = (u32)(((1UL << 32) - prev) + new);
+ }
/* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
if (type == PMU_TYPE_DMC &&
- GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS)
+ GET_EVENTID(event, emask) == DMC_EVENT_DATA_TRANSFERS)
delta = delta/4;
/* L3C and DMC has 16 and 8 interleave channels respectively.
@@ -351,6 +511,7 @@ static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
} devices[] = {
{"CAV901D", PMU_TYPE_L3C},
{"CAV901F", PMU_TYPE_DMC},
+ {"CAV901E", PMU_TYPE_CCPI2},
{"", PMU_TYPE_INVALID}
};
@@ -380,7 +541,8 @@ static bool tx2_uncore_validate_event(struct pmu *pmu,
* Make sure the group of events can be scheduled at once
* on the PMU.
*/
-static bool tx2_uncore_validate_event_group(struct perf_event *event)
+static bool tx2_uncore_validate_event_group(struct perf_event *event,
+ int max_counters)
{
struct perf_event *sibling, *leader = event->group_leader;
int counters = 0;
@@ -403,7 +565,7 @@ static bool tx2_uncore_validate_event_group(struct perf_event *event)
* If the group requires more counters than the HW has,
* it cannot ever be scheduled.
*/
- return counters <= TX2_PMU_MAX_COUNTERS;
+ return counters <= max_counters;
}
@@ -439,7 +601,7 @@ static int tx2_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config;
/* Validate the group */
- if (!tx2_uncore_validate_event_group(event))
+ if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters))
return -EINVAL;
return 0;
@@ -456,6 +618,10 @@ static void tx2_uncore_event_start(struct perf_event *event, int flags)
tx2_pmu->start_event(event, flags);
perf_event_update_userpage(event);
+ /* No hrtimer needed for CCPI2, 64-bit counters */
+ if (!tx2_pmu->hrtimer_callback)
+ return;
+
/* Start timer for first event */
if (bitmap_weight(tx2_pmu->active_counters,
tx2_pmu->max_counters) == 1) {
@@ -510,15 +676,23 @@ static void tx2_uncore_event_del(struct perf_event *event, int flags)
{
struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+ cmask = tx2_pmu->counters_mask;
tx2_uncore_event_stop(event, PERF_EF_UPDATE);
/* clear the assigned counter */
- free_counter(tx2_pmu, GET_COUNTERID(event));
+ free_counter(tx2_pmu, GET_COUNTERID(event, cmask));
perf_event_update_userpage(event);
tx2_pmu->events[hwc->idx] = NULL;
hwc->idx = -1;
+
+ if (!tx2_pmu->hrtimer_callback)
+ return;
+
+ if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters))
+ hrtimer_cancel(&tx2_pmu->hrtimer);
}
static void tx2_uncore_event_read(struct perf_event *event)
@@ -580,8 +754,12 @@ static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
cpu_online_mask);
tx2_pmu->cpu = cpu;
- hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx2_pmu->hrtimer.function = tx2_hrtimer_callback;
+
+ if (tx2_pmu->hrtimer_callback) {
+ hrtimer_init(&tx2_pmu->hrtimer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
+ }
ret = tx2_uncore_pmu_register(tx2_pmu);
if (ret) {
@@ -653,10 +831,13 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
switch (tx2_pmu->type) {
case PMU_TYPE_L3C:
- tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
+ tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
+ tx2_pmu->counters_mask = 0x3;
tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
tx2_pmu->max_events = L3_EVENT_MAX;
+ tx2_pmu->events_mask = 0x1f;
tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
+ tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
tx2_pmu->attr_groups = l3c_pmu_attr_groups;
tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
"uncore_l3c_%d", tx2_pmu->node);
@@ -665,10 +846,13 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
tx2_pmu->stop_event = uncore_stop_event_l3c;
break;
case PMU_TYPE_DMC:
- tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
+ tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
+ tx2_pmu->counters_mask = 0x3;
tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
tx2_pmu->max_events = DMC_EVENT_MAX;
+ tx2_pmu->events_mask = 0x1f;
tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
+ tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
tx2_pmu->attr_groups = dmc_pmu_attr_groups;
tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
"uncore_dmc_%d", tx2_pmu->node);
@@ -676,6 +860,21 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
tx2_pmu->start_event = uncore_start_event_dmc;
tx2_pmu->stop_event = uncore_stop_event_dmc;
break;
+ case PMU_TYPE_CCPI2:
+ /* CCPI2 has 8 counters */
+ tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS;
+ tx2_pmu->counters_mask = 0x7;
+ tx2_pmu->prorate_factor = 1;
+ tx2_pmu->max_events = CCPI2_EVENT_MAX;
+ tx2_pmu->events_mask = 0x1ff;
+ tx2_pmu->attr_groups = ccpi2_pmu_attr_groups;
+ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
+ "uncore_ccpi2_%d", tx2_pmu->node);
+ tx2_pmu->init_cntr_base = init_cntr_base_ccpi2;
+ tx2_pmu->start_event = uncore_start_event_ccpi2;
+ tx2_pmu->stop_event = uncore_stop_event_ccpi2;
+ tx2_pmu->hrtimer_callback = NULL;
+ break;
case PMU_TYPE_INVALID:
devm_kfree(dev, tx2_pmu);
return NULL;
@@ -744,7 +943,9 @@ static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
if (cpu != tx2_pmu->cpu)
return 0;
- hrtimer_cancel(&tx2_pmu->hrtimer);
+ if (tx2_pmu->hrtimer_callback)
+ hrtimer_cancel(&tx2_pmu->hrtimer);
+
cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
new_cpu = cpumask_any_and(
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 7e328d6385c3..46ee6807d533 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1282,25 +1282,21 @@ static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
- struct resource *res;
unsigned int reg;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- csw_csr = devm_ioremap_resource(&pdev->dev, res);
+ csw_csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(csw_csr)) {
dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
return PTR_ERR(csw_csr);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- mcba_csr = devm_ioremap_resource(&pdev->dev, res);
+ mcba_csr = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(mcba_csr)) {
dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
return PTR_ERR(mcba_csr);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- mcbb_csr = devm_ioremap_resource(&pdev->dev, res);
+ mcbb_csr = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(mcbb_csr)) {
dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
return PTR_ERR(mcbb_csr);
@@ -1332,13 +1328,11 @@ static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
void __iomem *csw_csr;
- struct resource *res;
unsigned int reg;
u32 mcb0routing;
u32 mcb1routing;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- csw_csr = devm_ioremap_resource(&pdev->dev, res);
+ csw_csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(csw_csr)) {
dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
return PTR_ERR(csw_csr);
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 215425296c77..3dab79e9d52b 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -45,3 +45,14 @@ config PHY_SUN9I_USB
sun9i SoCs.
This driver controls each individual USB 2 host PHY.
+
+config PHY_SUN50I_USB3
+ tristate "Allwinner H6 SoC USB3 PHY driver"
+ depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on RESET_CONTROLLER
+ select GENERIC_PHY
+ help
+ Enable this to support the USB3.0-capable transceiver that is
+ part of Allwinner H6 SoC.
+
+ This driver controls each individual USB 2+3 host PHY combo.
diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile
index 799a65c0b58d..bd74901a1255 100644
--- a/drivers/phy/allwinner/Makefile
+++ b/drivers/phy/allwinner/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
obj-$(CONFIG_PHY_SUN6I_MIPI_DPHY) += phy-sun6i-mipi-dphy.o
obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o
+obj-$(CONFIG_PHY_SUN50I_USB3) += phy-sun50i-usb3.o
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
new file mode 100644
index 000000000000..1169f3e83a6f
--- /dev/null
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Allwinner sun50i(H6) USB 3.0 phy driver
+ *
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on phy-sun9i-usb.c, which is:
+ *
+ * Copyright (C) 2014-2015 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on code from Allwinner BSP, which is:
+ *
+ * Copyright (c) 2010-2015 Allwinner Technology Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* Interface Status and Control Registers */
+#define SUNXI_ISCR 0x00
+#define SUNXI_PIPE_CLOCK_CONTROL 0x14
+#define SUNXI_PHY_TUNE_LOW 0x18
+#define SUNXI_PHY_TUNE_HIGH 0x1c
+#define SUNXI_PHY_EXTERNAL_CONTROL 0x20
+
+/* USB2.0 Interface Status and Control Register */
+#define SUNXI_ISCR_FORCE_VBUS (3 << 12)
+
+/* PIPE Clock Control Register */
+#define SUNXI_PCC_PIPE_CLK_OPEN (1 << 6)
+
+/* PHY External Control Register */
+#define SUNXI_PEC_EXTERN_VBUS (3 << 1)
+#define SUNXI_PEC_SSC_EN (1 << 24)
+#define SUNXI_PEC_REF_SSP_EN (1 << 26)
+
+/* PHY Tune High Register */
+#define SUNXI_TX_DEEMPH_3P5DB(n) ((n) << 19)
+#define SUNXI_TX_DEEMPH_3P5DB_MASK GENMASK(24, 19)
+#define SUNXI_TX_DEEMPH_6DB(n) ((n) << 13)
+#define SUNXI_TX_DEEMPH_6GB_MASK GENMASK(18, 13)
+#define SUNXI_TX_SWING_FULL(n) ((n) << 6)
+#define SUNXI_TX_SWING_FULL_MASK GENMASK(12, 6)
+#define SUNXI_LOS_BIAS(n) ((n) << 3)
+#define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
+#define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
+#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
+
+struct sun50i_usb3_phy {
+ struct phy *phy;
+ void __iomem *regs;
+ struct reset_control *reset;
+ struct clk *clk;
+};
+
+static void sun50i_usb3_phy_open(struct sun50i_usb3_phy *phy)
+{
+ u32 val;
+
+ val = readl(phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+ val |= SUNXI_PEC_EXTERN_VBUS;
+ val |= SUNXI_PEC_SSC_EN | SUNXI_PEC_REF_SSP_EN;
+ writel(val, phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+
+ val = readl(phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+ val |= SUNXI_PCC_PIPE_CLK_OPEN;
+ writel(val, phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+
+ val = readl(phy->regs + SUNXI_ISCR);
+ val |= SUNXI_ISCR_FORCE_VBUS;
+ writel(val, phy->regs + SUNXI_ISCR);
+
+ /*
+ * All the magic numbers written to the PHY_TUNE_{LOW_HIGH}
+ * registers are directly taken from the BSP USB3 driver from
+ * Allwiner.
+ */
+ writel(0x0047fc87, phy->regs + SUNXI_PHY_TUNE_LOW);
+
+ val = readl(phy->regs + SUNXI_PHY_TUNE_HIGH);
+ val &= ~(SUNXI_TXVBOOSTLVL_MASK | SUNXI_LOS_BIAS_MASK |
+ SUNXI_TX_SWING_FULL_MASK | SUNXI_TX_DEEMPH_6GB_MASK |
+ SUNXI_TX_DEEMPH_3P5DB_MASK);
+ val |= SUNXI_TXVBOOSTLVL(0x7);
+ val |= SUNXI_LOS_BIAS(0x7);
+ val |= SUNXI_TX_SWING_FULL(0x55);
+ val |= SUNXI_TX_DEEMPH_6DB(0x20);
+ val |= SUNXI_TX_DEEMPH_3P5DB(0x15);
+ writel(val, phy->regs + SUNXI_PHY_TUNE_HIGH);
+}
+
+static int sun50i_usb3_phy_init(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ ret = clk_prepare_enable(phy->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(phy->reset);
+ if (ret) {
+ clk_disable_unprepare(phy->clk);
+ return ret;
+ }
+
+ sun50i_usb3_phy_open(phy);
+ return 0;
+}
+
+static int sun50i_usb3_phy_exit(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->reset);
+ clk_disable_unprepare(phy->clk);
+
+ return 0;
+}
+
+static const struct phy_ops sun50i_usb3_phy_ops = {
+ .init = sun50i_usb3_phy_init,
+ .exit = sun50i_usb3_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int sun50i_usb3_phy_probe(struct platform_device *pdev)
+{
+ struct sun50i_usb3_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(phy->clk)) {
+ if (PTR_ERR(phy->clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get phy clock\n");
+ return PTR_ERR(phy->clk);
+ }
+
+ phy->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(phy->reset)) {
+ dev_err(dev, "failed to get reset control\n");
+ return PTR_ERR(phy->reset);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ phy->phy = devm_phy_create(dev, NULL, &sun50i_usb3_phy_ops);
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy->phy);
+ }
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id sun50i_usb3_phy_of_match[] = {
+ { .compatible = "allwinner,sun50i-h6-usb3-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun50i_usb3_phy_of_match);
+
+static struct platform_driver sun50i_usb3_phy_driver = {
+ .probe = sun50i_usb3_phy_probe,
+ .driver = {
+ .of_match_table = sun50i_usb3_phy_of_match,
+ .name = "sun50i-usb3-phy",
+ }
+};
+module_platform_driver(sun50i_usb3_phy_driver);
+
+MODULE_DESCRIPTION("Allwinner H6 USB 3.0 phy driver");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 3c53625f8bc2..91b5b09589d6 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -126,8 +126,8 @@ enum {
USB_CTRL_SELECTOR_COUNT,
};
-#define USB_CTRL_REG(base, reg) ((void *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
#define USB_CTRL_MASK(reg, field) \
USB_CTRL_##reg##_##field##_MASK
#define USB_CTRL_MASK_FAMILY(params, reg, field) \
@@ -416,7 +416,7 @@ void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -428,7 +428,7 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -707,7 +707,7 @@ static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
void __iomem *xhci_ec_base = params->xhci_ec_regs;
u32 val;
- if (params->family_id != 0x74371000 || xhci_ec_base == 0)
+ if (params->family_id != 0x74371000 || !xhci_ec_base)
return;
brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index 9b16f13b5ab2..34a6a9a1ceb2 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -114,7 +114,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
struct hisi_inno_phy_priv *priv;
struct phy_provider *provider;
struct device_node *child;
- struct resource *res;
int i = 0;
int ret;
@@ -122,8 +121,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index 62d10ef20296..f1cb3e4d2add 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -195,7 +195,6 @@ static int histb_combphy_probe(struct platform_device *pdev)
struct histb_combphy_priv *priv;
struct device_node *np = dev->of_node;
struct histb_combphy_mode *mode;
- struct resource *res;
u32 vals[3];
int ret;
@@ -203,8 +202,7 @@ static int histb_combphy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 544d64a84cc0..6e457967653e 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -323,7 +323,8 @@ static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
goto err_disable_pdi_clk;
/* Check if we are in "startup ready" status */
- if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
+ ret = ltq_vrx200_pcie_phy_wait_for_pll(phy);
+ if (ret)
goto err_disable_phy_clk;
ltq_vrx200_pcie_phy_apply_workarounds(phy);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index ded900b06f5a..23bc3bf5c4c0 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -216,20 +216,13 @@ static int mvebu_a3700_utmi_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mvebu_a3700_utmi *utmi;
struct phy_provider *provider;
- struct resource *res;
utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
if (!utmi)
return -ENOMEM;
/* Get UTMI memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "Missing UTMI PHY memory resource\n");
- return -ENODEV;
- }
-
- utmi->regs = devm_ioremap_resource(dev, res);
+ utmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(utmi->regs))
return PTR_ERR(utmi->regs);
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 3c9189473407..7a33ec12f71b 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1342,7 +1342,7 @@ static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx,
static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane)
{
int i;
- struct {
+ static const struct {
u32 reg;
u32 val;
} serdes_reg[] = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 39e8deb8001e..091e20303a14 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -165,6 +165,11 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
[QPHY_PCS_READY_STATUS] = 0x160,
};
+static const unsigned int sm8150_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x180,
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -879,6 +884,93 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
+static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_MULTI_LANE_CTRL1, 0x02),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
@@ -1276,6 +1368,31 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8150_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
+ .tx_tbl = sm8150_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl),
+ .rx_tbl = sm8150_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl),
+ .pcs_tbl = sm8150_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .is_dual_lane_phy = true,
+ .no_pcs_sw_reset = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -1998,6 +2115,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,msm8998-qmp-usb3-phy",
.data = &msm8998_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 335ea5d7ef40..ab6ff9b45a32 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -313,4 +313,100 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
+/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_PLL_IVCO 0x058
+#define QSERDES_V4_COM_CMN_IPTRIM 0x060
+#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
+#define QSERDES_V4_COM_CP_CTRL_MODE1 0x078
+#define QSERDES_V4_COM_PLL_RCTRL_MODE0 0x07c
+#define QSERDES_V4_COM_PLL_RCTRL_MODE1 0x080
+#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084
+#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
+#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
+#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
+#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
+#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
+#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
+#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V4_COM_HSCLK_SEL 0x158
+#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
+#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+
+/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
+#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
+#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
+#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
+#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
+
+/* Only for QMP V4 PHY - RX registers */
+#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
+#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
+#define QSERDES_V4_RX_RX_TERM_BW 0x080
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V4_RX_SIGDET_LVL 0x120
+#define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V4_RX_RX_BAND 0x128
+#define QSERDES_V4_RX_RX_MODE_00_LOW 0x170
+#define QSERDES_V4_RX_RX_MODE_00_HIGH 0x174
+#define QSERDES_V4_RX_RX_MODE_00_HIGH2 0x178
+#define QSERDES_V4_RX_RX_MODE_00_HIGH3 0x17c
+#define QSERDES_V4_RX_RX_MODE_00_HIGH4 0x180
+#define QSERDES_V4_RX_RX_MODE_01_LOW 0x184
+#define QSERDES_V4_RX_RX_MODE_01_HIGH 0x188
+#define QSERDES_V4_RX_RX_MODE_01_HIGH2 0x18c
+#define QSERDES_V4_RX_RX_MODE_01_HIGH3 0x190
+#define QSERDES_V4_RX_RX_MODE_01_HIGH4 0x194
+#define QSERDES_V4_RX_RX_MODE_10_LOW 0x198
+#define QSERDES_V4_RX_RX_MODE_10_HIGH 0x19c
+#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
+#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
+#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
+
+/* Only for QMP V4 PHY - PCS registers */
+#define QPHY_V4_PHY_START 0x000
+#define QPHY_V4_POWER_DOWN_CONTROL 0x004
+#define QPHY_V4_SW_RESET 0x008
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V4_PLL_CNTL 0x02c
+#define QPHY_V4_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V4_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V4_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V4_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V4_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V4_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V4_LINECFG_DISABLE 0x148
+#define QPHY_V4_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V4_RX_SIGDET_CTRL2 0x158
+#define QPHY_V4_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V4_TX_HS_GEAR_BAND 0x168
+#define QPHY_V4_PCS_READY_STATUS 0x180
+#define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V4_MULTI_LANE_CTRL1 0x1e0
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index b163b3a1558d..61054272a7c8 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -158,8 +158,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
/* setup initial state */
qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
uphy->vbus_edev);
- ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
- EXTCON_USB, &uphy->vbus_notify);
+ ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
if (ret)
goto err_ulpi;
}
@@ -180,6 +180,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy)
{
struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
+ if (uphy->vbus_edev)
+ extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
regulator_disable(uphy->v3p3);
regulator_disable(uphy->v1p8);
clk_disable_unprepare(uphy->sleep_clk);
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 2926e4937301..2e279ac0fa4d 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -71,6 +71,7 @@ struct rcar_gen2_phy_driver {
struct rcar_gen2_phy_data {
const struct phy_ops *gen2_phy_ops;
const u32 (*select_value)[PHYS_PER_CHANNEL];
+ const u32 num_channels;
};
static int rcar_gen2_phy_init(struct phy *p)
@@ -271,11 +272,13 @@ static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = {
static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = {
.gen2_phy_ops = &rcar_gen2_phy_ops,
.select_value = pci_select_value,
+ .num_channels = ARRAY_SIZE(pci_select_value),
};
static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = {
.gen2_phy_ops = &rz_g1c_phy_ops,
.select_value = usb20_select_value,
+ .num_channels = ARRAY_SIZE(usb20_select_value),
};
static const struct of_device_id rcar_gen2_phy_match_table[] = {
@@ -389,7 +392,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
channel->selected_phy = -1;
error = of_property_read_u32(np, "reg", &channel_num);
- if (error || channel_num > 2) {
+ if (error || channel_num >= data->num_channels) {
dev_err(dev, "Invalid \"reg\" property\n");
of_node_put(np);
return error;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index b7f6b1324395..bfb22f868857 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/string.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
@@ -320,9 +321,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode = PHY_MODE_USB_HOST;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode = PHY_MODE_USB_DEVICE;
else
return -EINVAL;
@@ -614,7 +615,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return PTR_ERR(channel->base);
/* call request_irq for OTG */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index c454c90cd99e..dbd2de4d28b1 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -35,6 +35,14 @@ config PHY_ROCKCHIP_INNO_USB2
help
Support for Rockchip USB2.0 PHY with Innosilicon IP block.
+config PHY_ROCKCHIP_INNO_DSIDPHY
+ tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
+ Innosilicon IP block.
+
config PHY_ROCKCHIP_PCIE
tristate "Rockchip PCIe PHY Driver"
depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index fd21cbaf40dd..9f59a81e4e0d 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY) += phy-rockchip-inno-dsidphy.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
new file mode 100644
index 000000000000..fc729ecd3fe9
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Wyon Bi <bivvy.bi@rock-chips.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/mfd/syscon.h>
+
+#define PSEC_PER_SEC 1000000000000LL
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+
+/*
+ * The offset address[7:0] is distributed two parts, one from the bit7 to bit5
+ * is the first address, the other from the bit4 to bit0 is the second address.
+ * when you configure the registers, you must set both of them. The Clock Lane
+ * and Data Lane use the same registers with the same second address, but the
+ * first address is different.
+ */
+#define FIRST_ADDRESS(x) (((x) & 0x7) << 5)
+#define SECOND_ADDRESS(x) (((x) & 0x1f) << 0)
+#define PHY_REG(first, second) (FIRST_ADDRESS(first) | \
+ SECOND_ADDRESS(second))
+
+/* Analog Register Part: reg00 */
+#define BANDGAP_POWER_MASK BIT(7)
+#define BANDGAP_POWER_DOWN BIT(7)
+#define BANDGAP_POWER_ON 0
+#define LANE_EN_MASK GENMASK(6, 2)
+#define LANE_EN_CK BIT(6)
+#define LANE_EN_3 BIT(5)
+#define LANE_EN_2 BIT(4)
+#define LANE_EN_1 BIT(3)
+#define LANE_EN_0 BIT(2)
+#define POWER_WORK_MASK GENMASK(1, 0)
+#define POWER_WORK_ENABLE UPDATE(1, 1, 0)
+#define POWER_WORK_DISABLE UPDATE(2, 1, 0)
+/* Analog Register Part: reg01 */
+#define REG_SYNCRST_MASK BIT(2)
+#define REG_SYNCRST_RESET BIT(2)
+#define REG_SYNCRST_NORMAL 0
+#define REG_LDOPD_MASK BIT(1)
+#define REG_LDOPD_POWER_DOWN BIT(1)
+#define REG_LDOPD_POWER_ON 0
+#define REG_PLLPD_MASK BIT(0)
+#define REG_PLLPD_POWER_DOWN BIT(0)
+#define REG_PLLPD_POWER_ON 0
+/* Analog Register Part: reg03 */
+#define REG_FBDIV_HI_MASK BIT(5)
+#define REG_FBDIV_HI(x) UPDATE((x >> 8), 5, 5)
+#define REG_PREDIV_MASK GENMASK(4, 0)
+#define REG_PREDIV(x) UPDATE(x, 4, 0)
+/* Analog Register Part: reg04 */
+#define REG_FBDIV_LO_MASK GENMASK(7, 0)
+#define REG_FBDIV_LO(x) UPDATE(x, 7, 0)
+/* Analog Register Part: reg05 */
+#define SAMPLE_CLOCK_PHASE_MASK GENMASK(6, 4)
+#define SAMPLE_CLOCK_PHASE(x) UPDATE(x, 6, 4)
+#define CLOCK_LANE_SKEW_PHASE_MASK GENMASK(2, 0)
+#define CLOCK_LANE_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg06 */
+#define DATA_LANE_3_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_3_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_2_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_2_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg07 */
+#define DATA_LANE_1_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_1_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_0_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_0_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg08 */
+#define SAMPLE_CLOCK_DIRECTION_MASK BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_REVERSE BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_FORWARD 0
+/* Digital Register Part: reg00 */
+#define REG_DIG_RSTN_MASK BIT(0)
+#define REG_DIG_RSTN_NORMAL BIT(0)
+#define REG_DIG_RSTN_RESET 0
+/* Digital Register Part: reg01 */
+#define INVERT_TXCLKESC_MASK BIT(1)
+#define INVERT_TXCLKESC_ENABLE BIT(1)
+#define INVERT_TXCLKESC_DISABLE 0
+#define INVERT_TXBYTECLKHS_MASK BIT(0)
+#define INVERT_TXBYTECLKHS_ENABLE BIT(0)
+#define INVERT_TXBYTECLKHS_DISABLE 0
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg05 */
+#define T_LPX_CNT_MASK GENMASK(5, 0)
+#define T_LPX_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg06 */
+#define T_HS_PREPARE_CNT_MASK GENMASK(6, 0)
+#define T_HS_PREPARE_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg07 */
+#define T_HS_ZERO_CNT_MASK GENMASK(5, 0)
+#define T_HS_ZERO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg08 */
+#define T_HS_TRAIL_CNT_MASK GENMASK(6, 0)
+#define T_HS_TRAIL_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg09 */
+#define T_HS_EXIT_CNT_MASK GENMASK(4, 0)
+#define T_HS_EXIT_CNT(x) UPDATE(x, 4, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0a */
+#define T_CLK_POST_CNT_MASK GENMASK(3, 0)
+#define T_CLK_POST_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0c */
+#define LPDT_TX_PPI_SYNC_MASK BIT(2)
+#define LPDT_TX_PPI_SYNC_ENABLE BIT(2)
+#define LPDT_TX_PPI_SYNC_DISABLE 0
+#define T_WAKEUP_CNT_HI_MASK GENMASK(1, 0)
+#define T_WAKEUP_CNT_HI(x) UPDATE(x, 1, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0d */
+#define T_WAKEUP_CNT_LO_MASK GENMASK(7, 0)
+#define T_WAKEUP_CNT_LO(x) UPDATE(x, 7, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0e */
+#define T_CLK_PRE_CNT_MASK GENMASK(3, 0)
+#define T_CLK_PRE_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg10 */
+#define T_TA_GO_CNT_MASK GENMASK(5, 0)
+#define T_TA_GO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg11 */
+#define T_TA_SURE_CNT_MASK GENMASK(5, 0)
+#define T_TA_SURE_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg12 */
+#define T_TA_WAIT_CNT_MASK GENMASK(5, 0)
+#define T_TA_WAIT_CNT(x) UPDATE(x, 5, 0)
+/* LVDS Register Part: reg00 */
+#define LVDS_DIGITAL_INTERNAL_RESET_MASK BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_DISABLE BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_ENABLE 0
+/* LVDS Register Part: reg01 */
+#define LVDS_DIGITAL_INTERNAL_ENABLE_MASK BIT(7)
+#define LVDS_DIGITAL_INTERNAL_ENABLE BIT(7)
+#define LVDS_DIGITAL_INTERNAL_DISABLE 0
+/* LVDS Register Part: reg03 */
+#define MODE_ENABLE_MASK GENMASK(2, 0)
+#define TTL_MODE_ENABLE BIT(2)
+#define LVDS_MODE_ENABLE BIT(1)
+#define MIPI_MODE_ENABLE BIT(0)
+/* LVDS Register Part: reg0b */
+#define LVDS_LANE_EN_MASK GENMASK(7, 3)
+#define LVDS_DATA_LANE0_EN BIT(7)
+#define LVDS_DATA_LANE1_EN BIT(6)
+#define LVDS_DATA_LANE2_EN BIT(5)
+#define LVDS_DATA_LANE3_EN BIT(4)
+#define LVDS_CLK_LANE_EN BIT(3)
+#define LVDS_PLL_POWER_MASK BIT(2)
+#define LVDS_PLL_POWER_OFF BIT(2)
+#define LVDS_PLL_POWER_ON 0
+#define LVDS_BANDGAP_POWER_MASK BIT(0)
+#define LVDS_BANDGAP_POWER_DOWN BIT(0)
+#define LVDS_BANDGAP_POWER_ON 0
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_ENABLECLK BIT(2)
+#define DSI_PHY_STATUS 0xb0
+#define PHY_LOCK BIT(0)
+
+struct mipi_dphy_timing {
+ unsigned int clkmiss;
+ unsigned int clkpost;
+ unsigned int clkpre;
+ unsigned int clkprepare;
+ unsigned int clksettle;
+ unsigned int clktermen;
+ unsigned int clktrail;
+ unsigned int clkzero;
+ unsigned int dtermen;
+ unsigned int eot;
+ unsigned int hsexit;
+ unsigned int hsprepare;
+ unsigned int hszero;
+ unsigned int hssettle;
+ unsigned int hsskip;
+ unsigned int hstrail;
+ unsigned int init;
+ unsigned int lpx;
+ unsigned int taget;
+ unsigned int tago;
+ unsigned int tasure;
+ unsigned int wakeup;
+};
+
+struct inno_dsidphy {
+ struct device *dev;
+ struct clk *ref_clk;
+ struct clk *pclk_phy;
+ struct clk *pclk_host;
+ void __iomem *phy_base;
+ void __iomem *host_base;
+ struct reset_control *rst;
+ enum phy_mode mode;
+
+ struct {
+ struct clk_hw hw;
+ u8 prediv;
+ u16 fbdiv;
+ unsigned long rate;
+ } pll;
+};
+
+enum {
+ REGISTER_PART_ANALOG,
+ REGISTER_PART_DIGITAL,
+ REGISTER_PART_CLOCK_LANE,
+ REGISTER_PART_DATA0_LANE,
+ REGISTER_PART_DATA1_LANE,
+ REGISTER_PART_DATA2_LANE,
+ REGISTER_PART_DATA3_LANE,
+ REGISTER_PART_LVDS,
+};
+
+static inline struct inno_dsidphy *hw_to_inno(struct clk_hw *hw)
+{
+ return container_of(hw, struct inno_dsidphy, pll.hw);
+}
+
+static void phy_update_bits(struct inno_dsidphy *inno,
+ u8 first, u8 second, u8 mask, u8 val)
+{
+ u32 reg = PHY_REG(first, second) << 2;
+ unsigned int tmp, orig;
+
+ orig = readl(inno->phy_base + reg);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ writel(tmp, inno->phy_base + reg);
+}
+
+static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ /* Global Operation Timing Parameters */
+ timing->clkmiss = 0;
+ timing->clkpost = 70000 + 52 * period;
+ timing->clkpre = 8 * period;
+ timing->clkprepare = 65000;
+ timing->clksettle = 95000;
+ timing->clktermen = 0;
+ timing->clktrail = 80000;
+ timing->clkzero = 260000;
+ timing->dtermen = 0;
+ timing->eot = 0;
+ timing->hsexit = 120000;
+ timing->hsprepare = 65000 + 4 * period;
+ timing->hszero = 145000 + 6 * period;
+ timing->hssettle = 85000 + 6 * period;
+ timing->hsskip = 40000;
+ timing->hstrail = max(8 * period, 60000 + 4 * period);
+ timing->init = 100000000;
+ timing->lpx = 60000;
+ timing->taget = 5 * timing->lpx;
+ timing->tago = 4 * timing->lpx;
+ timing->tasure = 2 * timing->lpx;
+ timing->wakeup = 1000000000;
+}
+
+static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
+{
+ struct mipi_dphy_timing gotp;
+ const struct {
+ unsigned long rate;
+ u8 hs_prepare;
+ u8 clk_lane_hs_zero;
+ u8 data_lane_hs_zero;
+ u8 hs_trail;
+ } timings[] = {
+ { 110000000, 0x20, 0x16, 0x02, 0x22},
+ { 150000000, 0x06, 0x16, 0x03, 0x45},
+ { 200000000, 0x18, 0x17, 0x04, 0x0b},
+ { 250000000, 0x05, 0x17, 0x05, 0x16},
+ { 300000000, 0x51, 0x18, 0x06, 0x2c},
+ { 400000000, 0x64, 0x19, 0x07, 0x33},
+ { 500000000, 0x20, 0x1b, 0x07, 0x4e},
+ { 600000000, 0x6a, 0x1d, 0x08, 0x3a},
+ { 700000000, 0x3e, 0x1e, 0x08, 0x6a},
+ { 800000000, 0x21, 0x1f, 0x09, 0x29},
+ {1000000000, 0x09, 0x20, 0x09, 0x27},
+ };
+ u32 t_txbyteclkhs, t_txclkesc, ui;
+ u32 txbyteclkhs, txclkesc, esc_clk_div;
+ u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
+ u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
+ unsigned int i;
+
+ /* Select MIPI mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(inno->pll.prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(inno->pll.fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(inno->pll.fbdiv));
+ /* Enable PLL and LDO */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_ON | REG_PLLPD_POWER_ON);
+ /* Reset analog */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_NORMAL);
+ /* Reset digital */
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_NORMAL);
+
+ txbyteclkhs = inno->pll.rate / 8;
+ t_txbyteclkhs = div_u64(PSEC_PER_SEC, txbyteclkhs);
+
+ esc_clk_div = DIV_ROUND_UP(txbyteclkhs, 20000000);
+ txclkesc = txbyteclkhs / esc_clk_div;
+ t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
+
+ ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
+
+ memset(&gotp, 0, sizeof(gotp));
+ mipi_dphy_timing_get_default(&gotp, ui);
+
+ /*
+ * The value of counter for HS Ths-exit
+ * Ths-exit = Tpin_txbyteclkhs * value
+ */
+ hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-post
+ * Tclk-post = Tpin_txbyteclkhs * value
+ */
+ clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-pre
+ * Tclk-pre = Tpin_txbyteclkhs * value
+ */
+ clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+
+ /*
+ * The value of counter for HS Tlpx Time
+ * Tlpx = Tpin_txbyteclkhs * (2 + value)
+ */
+ lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+ if (lpx >= 2)
+ lpx -= 2;
+
+ /*
+ * The value of counter for HS Tta-go
+ * Tta-go for turnaround
+ * Tta-go = Ttxclkesc * value
+ */
+ ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-sure
+ * Tta-sure for turnaround
+ * Tta-sure = Ttxclkesc * value
+ */
+ ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-wait
+ * Tta-wait for turnaround
+ * Tta-wait = Ttxclkesc * value
+ */
+ ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+
+ for (i = 0; i < ARRAY_SIZE(timings); i++)
+ if (inno->pll.rate <= timings[i].rate)
+ break;
+
+ if (i == ARRAY_SIZE(timings))
+ --i;
+
+ hs_prepare = timings[i].hs_prepare;
+ hs_trail = timings[i].hs_trail;
+ clk_lane_hs_zero = timings[i].clk_lane_hs_zero;
+ data_lane_hs_zero = timings[i].data_lane_hs_zero;
+ wakeup = 0x3ff;
+
+ for (i = REGISTER_PART_CLOCK_LANE; i <= REGISTER_PART_DATA3_LANE; i++) {
+ if (i == REGISTER_PART_CLOCK_LANE)
+ hs_zero = clk_lane_hs_zero;
+ else
+ hs_zero = data_lane_hs_zero;
+
+ phy_update_bits(inno, i, 0x05, T_LPX_CNT_MASK,
+ T_LPX_CNT(lpx));
+ phy_update_bits(inno, i, 0x06, T_HS_PREPARE_CNT_MASK,
+ T_HS_PREPARE_CNT(hs_prepare));
+ phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_MASK,
+ T_HS_ZERO_CNT(hs_zero));
+ phy_update_bits(inno, i, 0x08, T_HS_TRAIL_CNT_MASK,
+ T_HS_TRAIL_CNT(hs_trail));
+ phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_MASK,
+ T_HS_EXIT_CNT(hs_exit));
+ phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_MASK,
+ T_CLK_POST_CNT(clk_post));
+ phy_update_bits(inno, i, 0x0e, T_CLK_PRE_CNT_MASK,
+ T_CLK_PRE_CNT(clk_pre));
+ phy_update_bits(inno, i, 0x0c, T_WAKEUP_CNT_HI_MASK,
+ T_WAKEUP_CNT_HI(wakeup >> 8));
+ phy_update_bits(inno, i, 0x0d, T_WAKEUP_CNT_LO_MASK,
+ T_WAKEUP_CNT_LO(wakeup));
+ phy_update_bits(inno, i, 0x10, T_TA_GO_CNT_MASK,
+ T_TA_GO_CNT(ta_go));
+ phy_update_bits(inno, i, 0x11, T_TA_SURE_CNT_MASK,
+ T_TA_SURE_CNT(ta_sure));
+ phy_update_bits(inno, i, 0x12, T_TA_WAIT_CNT_MASK,
+ T_TA_WAIT_CNT(ta_wait));
+ }
+
+ /* Enable all lanes on analog part */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ LANE_EN_MASK, LANE_EN_CK | LANE_EN_3 | LANE_EN_2 |
+ LANE_EN_1 | LANE_EN_0);
+}
+
+static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
+{
+ u8 prediv = 2;
+ u16 fbdiv = 28;
+
+ /* Sample clock reverse direction */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
+ SAMPLE_CLOCK_DIRECTION_MASK,
+ SAMPLE_CLOCK_DIRECTION_REVERSE);
+
+ /* Select LVDS mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, LVDS_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x08, 0xff, 0xfc);
+ /* Enable PLL and Bandgap */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_ON | LVDS_BANDGAP_POWER_ON);
+
+ msleep(20);
+
+ /* Reset LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_ENABLE);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_DISABLE);
+ /* Enable LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_ENABLE);
+ /* Enable LVDS analog driver */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_LANE_EN_MASK, LVDS_CLK_LANE_EN |
+ LVDS_DATA_LANE0_EN | LVDS_DATA_LANE1_EN |
+ LVDS_DATA_LANE2_EN | LVDS_DATA_LANE3_EN);
+}
+
+static int inno_dsidphy_power_on(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ clk_prepare_enable(inno->pclk_phy);
+ pm_runtime_get_sync(inno->dev);
+
+ /* Bandgap power on */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_ON);
+ /* Enable power work */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_ENABLE);
+
+ switch (inno->mode) {
+ case PHY_MODE_MIPI_DPHY:
+ inno_dsidphy_mipi_mode_enable(inno);
+ break;
+ case PHY_MODE_LVDS:
+ inno_dsidphy_lvds_mode_enable(inno);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int inno_dsidphy_power_off(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00, LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_DOWN | REG_PLLPD_POWER_DOWN);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_DOWN);
+
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b, LVDS_LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
+
+ pm_runtime_put(inno->dev);
+ clk_disable_unprepare(inno->pclk_phy);
+
+ return 0;
+}
+
+static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ switch (mode) {
+ case PHY_MODE_MIPI_DPHY:
+ case PHY_MODE_LVDS:
+ inno->mode = mode;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct phy_ops inno_dsidphy_ops = {
+ .set_mode = inno_dsidphy_set_mode,
+ .power_on = inno_dsidphy_power_on,
+ .power_off = inno_dsidphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
+ unsigned long prate,
+ unsigned long rate,
+ u8 *prediv, u16 *fbdiv)
+{
+ unsigned long best_freq = 0;
+ unsigned long fref, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u32 min_delta = UINT_MAX;
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+ * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+ */
+ fref = prate / 2;
+ if (rate > 1000000000UL)
+ fout = 1000000000UL;
+ else
+ fout = rate;
+
+ /* 5Mhz < Fref / prediv < 40MHz */
+ min_prediv = DIV_ROUND_UP(fref, 40000000);
+ max_prediv = fref / 5000000;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fref);
+ _fbdiv = tmp;
+
+ /*
+ * The possible settings of feedback divider are
+ * 12, 13, 14, 16, ~ 511
+ */
+ if (_fbdiv == 15)
+ continue;
+
+ if (_fbdiv < 12 || _fbdiv > 511)
+ continue;
+
+ tmp = (u64)_fbdiv * fref;
+ do_div(tmp, _prediv);
+
+ delta = abs(fout - tmp);
+ if (!delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ break;
+ } else if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ min_delta = delta;
+ }
+ }
+
+ if (best_freq) {
+ *prediv = best_prediv;
+ *fbdiv = best_fbdiv;
+ }
+
+ return best_freq;
+}
+
+static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
+ &prediv, &fbdiv);
+
+ return fout;
+}
+
+static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
+ &prediv, &fbdiv);
+
+ dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
+ parent_rate, fout, prediv, fbdiv);
+
+ inno->pll.prediv = prediv;
+ inno->pll.fbdiv = fbdiv;
+ inno->pll.rate = fout;
+
+ return 0;
+}
+
+static unsigned long
+inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+
+ /* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
+ return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
+}
+
+static const struct clk_ops inno_dsidphy_pll_clk_ops = {
+ .round_rate = inno_dsidphy_pll_clk_round_rate,
+ .set_rate = inno_dsidphy_pll_clk_set_rate,
+ .recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
+};
+
+static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
+{
+ struct device *dev = inno->dev;
+ struct clk *clk;
+ const char *parent_name;
+ struct clk_init_data init;
+ int ret;
+
+ parent_name = __clk_get_name(inno->ref_clk);
+
+ init.name = "mipi_dphy_pll";
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &init.name);
+ if (ret < 0)
+ dev_dbg(dev, "phy should set clock-output-names property\n");
+
+ init.ops = &inno_dsidphy_pll_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ inno->pll.hw.init = &init;
+ clk = devm_clk_register(dev, &inno->pll.hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &inno->pll.hw);
+}
+
+static int inno_dsidphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct inno_dsidphy *inno;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int ret;
+
+ inno = devm_kzalloc(dev, sizeof(*inno), GFP_KERNEL);
+ if (!inno)
+ return -ENOMEM;
+
+ inno->dev = dev;
+ platform_set_drvdata(pdev, inno);
+
+ inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!inno->phy_base)
+ return -ENOMEM;
+
+ inno->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(inno->ref_clk)) {
+ ret = PTR_ERR(inno->ref_clk);
+ dev_err(dev, "failed to get ref clock: %d\n", ret);
+ return ret;
+ }
+
+ inno->pclk_phy = devm_clk_get(dev, "pclk");
+ if (IS_ERR(inno->pclk_phy)) {
+ ret = PTR_ERR(inno->pclk_phy);
+ dev_err(dev, "failed to get phy pclk: %d\n", ret);
+ return ret;
+ }
+
+ inno->rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(inno->rst)) {
+ ret = PTR_ERR(inno->rst);
+ dev_err(dev, "failed to get system reset control: %d\n", ret);
+ return ret;
+ }
+
+ phy = devm_phy_create(dev, NULL, &inno_dsidphy_ops);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ dev_err(dev, "failed to create phy: %d\n", ret);
+ return ret;
+ }
+
+ phy_set_drvdata(phy, inno);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ dev_err(dev, "failed to register phy provider: %d\n", ret);
+ return ret;
+ }
+
+ ret = inno_dsidphy_pll_register(inno);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int inno_dsidphy_remove(struct platform_device *pdev)
+{
+ struct inno_dsidphy *inno = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(inno->dev);
+
+ return 0;
+}
+
+static const struct of_device_id inno_dsidphy_of_match[] = {
+ { .compatible = "rockchip,px30-dsi-dphy", },
+ { .compatible = "rockchip,rk3128-dsi-dphy", },
+ { .compatible = "rockchip,rk3368-dsi-dphy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, inno_dsidphy_of_match);
+
+static struct platform_driver inno_dsidphy_driver = {
+ .driver = {
+ .name = "inno-dsidphy",
+ .of_match_table = of_match_ptr(inno_dsidphy_of_match),
+ },
+ .probe = inno_dsidphy_probe,
+ .remove = inno_dsidphy_remove,
+};
+module_platform_driver(inno_dsidphy_driver);
+
+MODULE_AUTHOR("Wyon Bi <bivvy.bi@rock-chips.com>");
+MODULE_DESCRIPTION("Innosilicon MIPI/LVDS/TTL Video Combo PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index eae865ff312c..680cc0c8825c 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1423,6 +1423,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
};
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
+ { .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 6f3afaf9398f..84c27394c181 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -857,9 +857,32 @@ static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
+static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+
+ if (status) {
+ value |= VBUS_OVERRIDE;
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ } else {
+ value &= ~VBUS_OVERRIDE;
+ }
+
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ return 0;
+}
+
static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.probe = tegra186_xusb_padctl_probe,
.remove = tegra186_xusb_padctl_remove,
+ .vbus_override = tegra186_xusb_padctl_vbus_override,
};
static const char * const tegra186_xusb_padctl_supply_names[] = {
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
index 0c0df6897a3b..394913bb2f20 100644
--- a/drivers/phy/tegra/xusb-tegra210.c
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -39,7 +39,10 @@
#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB 0x1
#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(x) (0x0 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(x) (0x1 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(x) (0x2 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_SS_PORT_MAP 0x014
@@ -47,6 +50,7 @@
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 5)
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 5))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
+#define XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED 0x7
#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
@@ -61,9 +65,14 @@
#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x)))
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(x) (0x080 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP (1 << 18)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN (1 << 22)
+
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(x) (0x084 + (x) * 0x40)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT 7
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK 0x3
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL 0x1
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x088 + (x) * 0x40)
@@ -222,6 +231,12 @@
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(x) (0xa74 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL 0xfcf01368
+#define XUSB_PADCTL_USB2_VBUS_ID 0xc60
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON (1 << 14)
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT 18
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK 0xf
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8
+
struct tegra210_xusb_fuse_calibration {
u32 hs_curr_level[4];
u32 hs_term_range_adj;
@@ -940,6 +955,34 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
priv = to_tegra210_xusb_padctl(padctl);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(
+ port->usb3_port_fake);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(
+ port->usb3_port_fake, index);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+ }
+
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
@@ -957,7 +1000,14 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
value &= ~XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(index);
- value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ if (port->mode == USB_DR_MODE_UNKNOWN)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(index);
+ else if (port->mode == USB_DR_MODE_PERIPHERAL)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(index);
+ else if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ else if (port->mode == USB_DR_MODE_OTG)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(index);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -989,7 +1039,12 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
value &= ~(XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK <<
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT);
- value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ else
+ value |=
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL <<
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT;
padctl_writel(padctl, value,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
@@ -1062,6 +1117,32 @@ static int tegra210_usb2_phy_power_off(struct phy *phy)
mutex_lock(&padctl->lock);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->usb3_port_fake,
+ XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+ }
+
if (WARN_ON(pad->enable == 0))
goto out;
@@ -1225,13 +1306,10 @@ static int tegra210_hsic_phy_power_on(struct phy *phy)
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- struct tegra210_xusb_padctl *priv;
unsigned int index = lane->index;
u32 value;
int err;
- priv = to_tegra210_xusb_padctl(padctl);
-
err = regulator_enable(pad->supply);
if (err)
return err;
@@ -1945,6 +2023,52 @@ static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
.map = tegra210_usb3_port_map,
};
+static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
+
+ if (status) {
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
+ } else {
+ value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra210_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl;
+ struct tegra_xusb_lane *lane;
+ u32 value;
+
+ lane = phy_get_drvdata(phy);
+ padctl = lane->pad->padctl;
+
+ value = padctl_readl(padctl,
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(lane->index));
+
+ if ((value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP) ||
+ (value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN)) {
+ tegra210_xusb_padctl_vbus_override(padctl, false);
+ tegra210_xusb_padctl_vbus_override(padctl, true);
+ return 1;
+ }
+
+ return 0;
+}
+
static int
tegra210_xusb_read_fuse_calibration(struct tegra210_xusb_fuse_calibration *fuse)
{
@@ -2007,6 +2131,8 @@ static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
.remove = tegra210_xusb_padctl_remove,
.usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
.hsic_set_idle = tegra210_hsic_set_idle,
+ .vbus_override = tegra210_xusb_padctl_vbus_override,
+ .utmi_port_reset = tegra210_utmi_port_reset,
};
static const char * const tegra210_xusb_padctl_supply_names[] = {
@@ -2036,6 +2162,7 @@ const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc = {
.ops = &tegra210_xusb_padctl_ops,
.supply_names = tegra210_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra210_xusb_padctl_supply_names),
+ .need_fake_usb3_port = true,
};
EXPORT_SYMBOL_GPL(tegra210_xusb_padctl_soc);
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 2ea8497af82a..f98ec3922c02 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -800,9 +800,62 @@ static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
}
}
+static int tegra_xusb_find_unused_usb3_port(struct tegra_xusb_padctl *padctl)
+{
+ struct device_node *np;
+ unsigned int i;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ np = tegra_xusb_find_port_node(padctl, "usb3", i);
+ if (!np || !of_device_is_available(np))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static bool tegra_xusb_port_is_companion(struct tegra_xusb_usb2_port *usb2)
+{
+ unsigned int i;
+ struct tegra_xusb_usb3_port *usb3;
+ struct tegra_xusb_padctl *padctl = usb2->base.padctl;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ usb3 = tegra_xusb_find_usb3_port(padctl, i);
+ if (usb3 && usb3->port == usb2->base.index)
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_xusb_update_usb3_fake_port(struct tegra_xusb_usb2_port *usb2)
+{
+ int fake;
+
+ /* Disable usb3_port_fake usage by default and assign if needed */
+ usb2->usb3_port_fake = -1;
+
+ if ((usb2->mode == USB_DR_MODE_OTG ||
+ usb2->mode == USB_DR_MODE_PERIPHERAL) &&
+ !tegra_xusb_port_is_companion(usb2)) {
+ fake = tegra_xusb_find_unused_usb3_port(usb2->base.padctl);
+ if (fake < 0) {
+ dev_err(&usb2->base.dev, "no unused USB3 ports available\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&usb2->base.dev, "Found unused usb3 port: %d\n", fake);
+ usb2->usb3_port_fake = fake;
+ }
+
+ return 0;
+}
+
static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_port *port;
+ struct tegra_xusb_usb2_port *usb2;
unsigned int i;
int err = 0;
@@ -832,6 +885,18 @@ static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
goto remove_ports;
}
+ if (padctl->soc->need_fake_usb3_port) {
+ for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
+ usb2 = tegra_xusb_find_usb2_port(padctl, i);
+ if (!usb2)
+ continue;
+
+ err = tegra_xusb_update_usb3_fake_port(usb2);
+ if (err < 0)
+ goto remove_ports;
+ }
+ }
+
list_for_each_entry(port, &padctl->ports, list) {
err = port->ops->enable(port);
if (err < 0)
@@ -862,7 +927,6 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
struct resource *res;
- unsigned int i;
int err;
/* for backwards compatibility with old device trees */
@@ -907,8 +971,9 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
goto remove;
}
- for (i = 0; i < padctl->soc->num_supplies; i++)
- padctl->supplies[i].supply = padctl->soc->supply_names[i];
+ regulator_bulk_set_supply_names(padctl->supplies,
+ padctl->soc->supply_names,
+ padctl->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies,
padctl->supplies);
@@ -1056,6 +1121,28 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_set_lfps_detect);
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val)
+{
+ if (padctl->soc->ops->vbus_override)
+ return padctl->soc->ops->vbus_override(padctl, val);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_set_vbus_override);
+
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_port_reset)
+ return padctl->soc->ops->utmi_port_reset(phy);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 093076ca27fd..da94fcce6307 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -291,6 +291,7 @@ struct tegra_xusb_usb2_port {
struct regulator *supply;
enum usb_dr_mode mode;
bool internal;
+ int usb3_port_fake;
};
static inline struct tegra_xusb_usb2_port *
@@ -372,6 +373,8 @@ struct tegra_xusb_padctl_ops {
unsigned int index, bool idle);
int (*usb3_set_lfps_detect)(struct tegra_xusb_padctl *padctl,
unsigned int index, bool enable);
+ int (*vbus_override)(struct tegra_xusb_padctl *padctl, bool set);
+ int (*utmi_port_reset)(struct phy *phy);
};
struct tegra_xusb_padctl_soc {
@@ -389,6 +392,7 @@ struct tegra_xusb_padctl_soc {
const char * const *supply_names;
unsigned int num_supplies;
+ bool need_fake_usb3_port;
};
struct tegra_xusb_padctl {
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index c3fa1840f8de..174888609779 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -90,8 +90,8 @@ config TWL4030_USB
config PHY_TI_GMII_SEL
tristate
- default y if TI_CPSW=y
- depends on TI_CPSW || COMPILE_TEST
+ default y if TI_CPSW=y || TI_CPSW_SWITCHDEV=y
+ depends on TI_CPSW || TI_CPSW_SWITCHDEV || COMPILE_TEST
select GENERIC_PHY
select REGMAP
default m
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index cbcce7cf0028..26f194779064 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -189,7 +189,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct usb_otg *otg;
const struct of_device_id *of_id;
- const struct usb_phy_data *phy_data;
int error;
of_id = of_match_device(of_match_ptr(dm816x_usb_phy_id_table),
@@ -220,8 +219,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
if (phy->usbphy_ctrl == 0x2c)
phy->instance = 1;
- phy_data = of_id->data;
-
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index a52c5bb35033..a28bd15297f5 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -69,11 +69,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
rgmii_id = 1;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b372419d61f2..3bfbf2ff6e2b 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -32,15 +32,15 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_ARTPEC6
- bool "Axis ARTPEC-6 pin controller driver"
- depends on MACH_ARTPEC6
- select PINMUX
- select GENERIC_PINCONF
- help
- This is the driver for the Axis ARTPEC-6 pin controller. This driver
- supports pin function multiplexing as well as pin bias and drive
- strength configuration. Device tree integration instructions can be
- found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
+ bool "Axis ARTPEC-6 pin controller driver"
+ depends on MACH_ARTPEC6
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This is the driver for the Axis ARTPEC-6 pin controller. This driver
+ supports pin function multiplexing as well as pin bias and drive
+ strength configuration. Device tree integration instructions can be
+ found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
config PINCTRL_AS3722
tristate "Pinctrl and GPIO driver for ams AS3722 PMIC"
@@ -420,4 +420,22 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
+config PINCTRL_EQUILIBRIUM
+ tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+ select PINMUX
+ select PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+
+ help
+ Equilibrium pinctrl driver is a pinctrl & GPIO driver for Intel Lightning
+ Mountain network processor SoC that supports both the linux GPIO and pin
+ control frameworks. It provides interfaces to setup pinmux, assign desired
+ pin functions, configure GPIO attributes for LGM SoC pins. Pinmux and
+ pinconf settings are retrieved from device tree.
+
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index ac537fdbc998..879f312bfb75 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
+obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 5dfe7188a5f8..5a0c8e87aa7c 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -915,7 +915,6 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
int owl_pinctrl_probe(struct platform_device *pdev,
struct owl_pinctrl_soc_data *soc_data)
{
- struct resource *res;
struct owl_pinctrl *pctrl;
int ret, i;
@@ -923,8 +922,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index bc3b232a727a..f690fc5cd688 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1400,12 +1400,10 @@ static struct pinctrl_desc bcm281xx_pinctrl_desc = {
static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
{
struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
- struct resource *res;
struct pinctrl_dev *pctl;
/* So far We can assume there is only 1 bank of registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base)) {
dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
return -ENODEV;
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index dcab2204c60c..4344c5732400 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -940,7 +940,6 @@ static int cygnus_mux_log_init(struct cygnus_pinctrl *pinctrl)
static int cygnus_pinmux_probe(struct platform_device *pdev)
{
struct cygnus_pinctrl *pinctrl;
- struct resource *res;
int i, ret;
struct pinctrl_pin_desc *pins;
unsigned num_pins = ARRAY_SIZE(cygnus_pins);
@@ -953,15 +952,13 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base0);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pinctrl->base1 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pinctrl->base1)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base1);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 42f7ab383ad9..831a9318c384 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -795,8 +795,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
@@ -850,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
irqc = &chip->irqchip;
- irqc->name = "bcm-iproc-gpio";
+ irqc->name = dev_name(dev);
irqc->irq_ack = iproc_gpio_irq_ack;
irqc->irq_mask = iproc_gpio_irq_mask;
irqc->irq_unmask = iproc_gpio_irq_unmask;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 9fabc451550e..32f268f173d1 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1042,8 +1042,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -1057,8 +1056,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->pinconf_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->pinconf_base))
return PTR_ERR(pinctrl->pinconf_base);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index e67ae52023ad..bed0124388c0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -64,17 +64,16 @@
* @gc: GPIO chip
* @pctl: pointer to pinctrl_dev
* @pctldesc: pinctrl descriptor
- * @irq_domain: pointer to irq domain
* @lock: lock to protect access to I/O registers
*/
struct nsp_gpio {
struct device *dev;
void __iomem *base;
void __iomem *io_ctrl;
+ struct irq_chip irqchip;
struct gpio_chip gc;
struct pinctrl_dev *pctl;
struct pinctrl_desc pctldesc;
- struct irq_domain *irq_domain;
raw_spinlock_t lock;
};
@@ -136,8 +135,8 @@ static inline bool nsp_get_bit(struct nsp_gpio *chip, enum base_type address,
static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
{
- struct nsp_gpio *chip = (struct nsp_gpio *)data;
- struct gpio_chip gc = chip->gc;
+ struct gpio_chip *gc = (struct gpio_chip *)data;
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
int bit;
unsigned long int_bits = 0;
u32 int_status;
@@ -155,14 +154,14 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
level &= readl(chip->base + NSP_GPIO_INT_MASK);
int_bits = level | event;
- for_each_set_bit(bit, &int_bits, gc.ngpio) {
+ for_each_set_bit(bit, &int_bits, gc->ngpio) {
/*
* Clear the interrupt before invoking the
* handler, so we do not leave any window
*/
writel(BIT(bit), chip->base + NSP_GPIO_EVENT);
generic_handle_irq(
- irq_linear_revmap(chip->irq_domain, bit));
+ irq_linear_revmap(gc->irq.domain, bit));
}
}
@@ -171,7 +170,8 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
static void nsp_gpio_irq_ack(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 val = BIT(gpio);
u32 trigger_type;
@@ -189,7 +189,8 @@ static void nsp_gpio_irq_ack(struct irq_data *d)
*/
static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 trigger_type;
@@ -202,7 +203,8 @@ static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
static void nsp_gpio_irq_mask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -212,7 +214,8 @@ static void nsp_gpio_irq_mask(struct irq_data *d)
static void nsp_gpio_irq_unmask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -222,7 +225,8 @@ static void nsp_gpio_irq_unmask(struct irq_data *d)
static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
bool level_low;
bool falling;
@@ -265,16 +269,6 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip nsp_gpio_irq_chip = {
- .name = "gpio-a",
- .irq_enable = nsp_gpio_irq_unmask,
- .irq_disable = nsp_gpio_irq_mask,
- .irq_ack = nsp_gpio_irq_ack,
- .irq_mask = nsp_gpio_irq_mask,
- .irq_unmask = nsp_gpio_irq_unmask,
- .irq_set_type = nsp_gpio_irq_set_type,
-};
-
static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
@@ -303,30 +297,36 @@ static int nsp_gpio_direction_output(struct gpio_chip *gc, unsigned gpio,
return 0;
}
-static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int nsp_gpio_get_direction(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
+ int val;
raw_spin_lock_irqsave(&chip->lock, flags);
- nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ val = nsp_get_bit(chip, REG, NSP_GPIO_OUT_EN, gpio);
raw_spin_unlock_irqrestore(&chip->lock, flags);
- dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+ return !val;
}
-static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
+static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
+ unsigned long flags;
- return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
+ raw_spin_lock_irqsave(&chip->lock, flags);
+ nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
}
-static int nsp_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
- return irq_linear_revmap(chip->irq_domain, offset);
+ return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
}
static int nsp_get_groups_count(struct pinctrl_dev *pctldev)
@@ -613,10 +613,9 @@ static const struct of_device_id nsp_gpio_of_match[] = {
static int nsp_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct nsp_gpio *chip;
struct gpio_chip *gc;
- u32 val, count;
+ u32 val;
int irq, ret;
if (of_property_read_u32(pdev->dev.of_node, "ngpios", &val)) {
@@ -631,15 +630,13 @@ static int nsp_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- chip->io_ctrl = devm_ioremap_resource(dev, res);
+ chip->io_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->io_ctrl)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->io_ctrl);
@@ -657,46 +654,47 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
+ gc->get_direction = nsp_gpio_get_direction;
gc->set = nsp_gpio_set;
gc->get = nsp_gpio_get;
- gc->to_irq = nsp_gpio_to_irq;
/* optional GPIO interrupt support */
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- /* Create irq domain so that each pin can be assigned an IRQ.*/
- chip->irq_domain = irq_domain_add_linear(gc->of_node, gc->ngpio,
- &irq_domain_simple_ops,
- chip);
- if (!chip->irq_domain) {
- dev_err(&pdev->dev, "Couldn't allocate IRQ domain\n");
- return -ENXIO;
- }
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqc;
- /* Map each gpio to an IRQ and set the handler for gpiolib. */
- for (count = 0; count < gc->ngpio; count++) {
- int irq = irq_create_mapping(chip->irq_domain, count);
+ irqc = &chip->irqchip;
+ irqc->name = "gpio-a";
+ irqc->irq_ack = nsp_gpio_irq_ack;
+ irqc->irq_mask = nsp_gpio_irq_mask;
+ irqc->irq_unmask = nsp_gpio_irq_unmask;
+ irqc->irq_set_type = nsp_gpio_irq_set_type;
- irq_set_chip_and_handler(irq, &nsp_gpio_irq_chip,
- handle_simple_irq);
- irq_set_chip_data(irq, chip);
- }
+ val = readl(chip->base + NSP_CHIP_A_INT_MASK);
+ val = val | NSP_CHIP_A_GPIO_INT_BIT;
+ writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
/* Install ISR for this GPIO controller. */
- ret = devm_request_irq(&pdev->dev, irq, nsp_gpio_irq_handler,
- IRQF_SHARED, "gpio-a", chip);
+ ret = devm_request_irq(dev, irq, nsp_gpio_irq_handler,
+ IRQF_SHARED, "gpio-a", &chip->gc);
if (ret) {
dev_err(&pdev->dev, "Unable to request IRQ%d: %d\n",
irq, ret);
- goto err_rm_gpiochip;
+ return ret;
}
- val = readl(chip->base + NSP_CHIP_A_INT_MASK);
- val = val | NSP_CHIP_A_GPIO_INT_BIT;
- writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
+ girq = &chip->gc.irq;
+ girq->chip = irqc;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
}
- ret = gpiochip_add_data(gc, chip);
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret < 0) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
@@ -705,15 +703,10 @@ static int nsp_gpio_probe(struct platform_device *pdev)
ret = nsp_gpio_register_pinconf(chip);
if (ret) {
dev_err(dev, "unable to register pinconf\n");
- goto err_rm_gpiochip;
+ return ret;
}
return 0;
-
-err_rm_gpiochip:
- gpiochip_remove(gc);
-
- return ret;
}
static struct platform_driver nsp_gpio_driver = {
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 87618a4e90e4..3756fc9d5826 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -571,8 +571,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -586,8 +585,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->base2 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base2 = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->base2))
return PTR_ERR(pinctrl->base2);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 5d6d8b1e9062..674920daac26 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -29,6 +29,13 @@ struct pinctrl_dt_map {
static void dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
+ int i;
+
+ for (i = 0; i < num_maps; ++i) {
+ kfree_const(map[i].dev_name);
+ map[i].dev_name = NULL;
+ }
+
if (pctldev) {
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
if (ops->dt_free_map)
@@ -63,7 +70,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Initialize common mapping table entry fields */
for (i = 0; i < num_maps; i++) {
- map[i].dev_name = dev_name(p->dev);
+ const char *devname;
+
+ devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
+ if (!devname)
+ goto err_free_map;
+
+ map[i].dev_name = devname;
map[i].name = statename;
if (pctldev)
map[i].ctrl_dev_name = dev_name(pctldev->dev);
@@ -71,10 +84,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Remember the converted mapping table entries */
dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
- if (!dt_map) {
- dt_free_map(pctldev, map, num_maps);
- return -ENOMEM;
- }
+ if (!dt_map)
+ goto err_free_map;
dt_map->pctldev = pctldev;
dt_map->map = map;
@@ -82,6 +93,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
list_add_tail(&dt_map->node, &p->dt_maps);
return pinctrl_register_map(map, num_maps, false);
+
+err_free_map:
+ dt_free_map(pctldev, map, num_maps);
+ return -ENOMEM;
}
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
@@ -147,6 +162,16 @@ static int dt_to_map_one_config(struct pinctrl *p,
ret = ops->dt_node_to_map(pctldev, np_config, &map, &num_maps);
if (ret < 0)
return ret;
+ else if (num_maps == 0) {
+ /*
+ * If we have no valid maps (maybe caused by empty pinctrl node
+ * or typing error) ther is no need remember this, so just
+ * return.
+ */
+ dev_info(p->dev,
+ "there is not valid maps for state %s\n", statename);
+ return 0;
+ }
/* Stash the mapping table chunk away for later use */
return dt_remember_or_free_map(p, statename, pctldev, map, num_maps);
@@ -166,21 +191,6 @@ static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
return dt_remember_or_free_map(p, statename, NULL, map, 1);
}
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- struct device_node *np;
- struct property *prop;
- int size;
-
- np = pctldev->dev->of_node;
- if (!np)
- return false;
-
- prop = of_find_property(np, "pinctrl-0", &size);
-
- return prop ? true : false;
-}
-
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
{
struct device_node *np = p->dev->of_node;
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
index 00e645d7fac7..efa80779de4f 100644
--- a/drivers/pinctrl/devicetree.h
+++ b/drivers/pinctrl/devicetree.h
@@ -9,8 +9,6 @@ struct of_phandle_args;
#ifdef CONFIG_OF
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev);
-
void pinctrl_dt_free_maps(struct pinctrl *p);
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev);
@@ -23,11 +21,6 @@ int pinctrl_parse_index_with_args(const struct device_node *np,
#else
-static inline bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- return false;
-}
-
static inline int pinctrl_dt_to_map(struct pinctrl *p,
struct pinctrl_dev *pctldev)
{
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 5f4058033ec6..3ea9ce3e0cd9 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -39,12 +39,12 @@ config PINCTRL_IMX27
config PINCTRL_IMX25
- bool "IMX25 pinctrl driver"
- depends on OF
- depends on SOC_IMX25
- select PINCTRL_IMX
- help
- Say Y here to enable the imx25 pinctrl driver
+ bool "IMX25 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX25
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx25 pinctrl driver
config PINCTRL_IMX35
bool "IMX35 pinctrl driver"
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 452a14f78707..6091947a8f51 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -115,4 +115,11 @@ config PINCTRL_SUNRISEPOINT
provides an interface that allows configuring of PCH pins and
using them as GPIOs.
+config PINCTRL_TIGERLAKE
+ tristate "Intel Tiger Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Tiger Lake PCH pins and using them as GPIOs.
endif
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index cb491e655749..7e620b471ef6 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
+obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2c419fa5d1c1..582fa8a75559 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -165,7 +165,7 @@ struct chv_pinctrl {
struct gpio_chip chip;
struct irq_chip irqchip;
void __iomem *regs;
- unsigned intr_lines[16];
+ unsigned int intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
struct chv_pin_context *saved_pin_context;
@@ -379,7 +379,7 @@ static const struct chv_community southwest_community = {
.gpio_ranges = southwest_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
/*
- * Southwest community can benerate GPIO interrupts only for the
+ * Southwest community can generate GPIO interrupts only for the
* first 8 interrupts. The upper half (8-15) can only be used to
* trigger GPEs.
*/
@@ -1480,7 +1480,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
pending = readl(pctrl->regs + CHV_INTSTAT);
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
- unsigned irq, offset;
+ unsigned int irq, offset;
offset = pctrl->intr_lines[intr_line];
irq = irq_find_mapping(gc->irq.domain, offset);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 83981ad66a71..4860bc9a4e48 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1131,7 +1131,7 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
pending &= enabled;
for_each_set_bit(gpp_offset, &pending, padgrp->size) {
- unsigned irq;
+ unsigned int irq;
irq = irq_find_mapping(gc->irq.domain,
padgrp->gpio_base + gpp_offset);
@@ -1181,7 +1181,7 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
return ret;
}
-static unsigned intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
+static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
unsigned int ngpio = 0;
@@ -1595,16 +1595,65 @@ intel_gpio_is_requested(struct gpio_chip *chip, int base, unsigned int size)
return requested;
}
-static u32
-intel_gpio_update_pad_mode(void __iomem *hostown, u32 mask, u32 value)
+static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
u32 curr, updated;
- curr = readl(hostown);
+ curr = readl(reg);
+
updated = (curr & ~mask) | (value & mask);
- writel(updated, hostown);
+ if (curr == updated)
+ return false;
+
+ writel(updated, reg);
+ return true;
+}
+
+static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ const struct intel_community *community = &pctrl->communities[c];
+ const struct intel_padgroup *padgrp = &community->gpps[gpp];
+ struct device *dev = pctrl->dev;
+ u32 requested;
+
+ if (padgrp->gpio_base < 0)
+ return;
- return curr;
+ requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size);
+ if (!intel_gpio_update_reg(base + gpp * 4, requested, saved))
+ return;
+
+ dev_dbg(dev, "restored hostown %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_intmask(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ struct device *dev = pctrl->dev;
+
+ if (!intel_gpio_update_reg(base + gpp * 4, ~0U, saved))
+ return;
+
+ dev_dbg(dev, "restored mask %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_padcfg(struct intel_pinctrl *pctrl, unsigned int pin,
+ unsigned int reg, u32 saved)
+{
+ u32 mask = (reg == PADCFG0) ? PADCFG0_GPIORXSTATE : 0;
+ unsigned int n = reg / sizeof(u32);
+ struct device *dev = pctrl->dev;
+ void __iomem *padcfg;
+
+ padcfg = intel_get_padcfg(pctrl, pin, reg);
+ if (!padcfg)
+ return;
+
+ if (!intel_gpio_update_reg(padcfg, ~mask, saved))
+ return;
+
+ dev_dbg(dev, "restored pin %u padcfg%u %#08x\n", pin, n, readl(padcfg));
}
int intel_pinctrl_resume_noirq(struct device *dev)
@@ -1620,37 +1669,13 @@ int intel_pinctrl_resume_noirq(struct device *dev)
pads = pctrl->context.pads;
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- void __iomem *padcfg;
- u32 val;
if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
- val = readl(padcfg) & ~PADCFG0_GPIORXSTATE;
- if (val != pads[i].padcfg0) {
- writel(pads[i].padcfg0, padcfg);
- dev_dbg(dev, "restored pin %u padcfg0 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG1);
- val = readl(padcfg);
- if (val != pads[i].padcfg1) {
- writel(pads[i].padcfg1, padcfg);
- dev_dbg(dev, "restored pin %u padcfg1 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG2);
- if (padcfg) {
- val = readl(padcfg);
- if (val != pads[i].padcfg2) {
- writel(pads[i].padcfg2, padcfg);
- dev_dbg(dev, "restored pin %u padcfg2 %#08x\n",
- desc->number, readl(padcfg));
- }
- }
+ intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG1, pads[i].padcfg1);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG2, pads[i].padcfg2);
}
communities = pctrl->context.communities;
@@ -1660,30 +1685,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
unsigned int gpp;
base = community->regs + community->ie_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- writel(communities[i].intmask[gpp], base + gpp * 4);
- dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp,
- readl(base + gpp * 4));
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_intmask(pctrl, i, base, gpp, communities[i].intmask[gpp]);
base = community->regs + community->hostown_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- const struct intel_padgroup *padgrp = &community->gpps[gpp];
- u32 requested = 0, value = 0;
- u32 saved = communities[i].hostown[gpp];
-
- if (padgrp->gpio_base < 0)
- continue;
-
- requested = intel_gpio_is_requested(&pctrl->chip,
- padgrp->gpio_base, padgrp->size);
- value = intel_gpio_update_pad_mode(base + gpp * 4,
- requested, saved);
- if ((value ^ saved) & requested) {
- dev_warn(dev, "restore hostown %d/%u %#8x->%#8x\n",
- i, gpp, value, saved);
- }
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_hostown(pctrl, i, base, gpp, communities[i].hostown[gpp]);
}
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 2e06fb1464ab..7fdf4257df1e 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -33,6 +33,7 @@
.npins = ((e) - (s) + 1), \
}
+/* Lewisburg */
static const struct pinctrl_pin_desc lbg_pins[] = {
/* GPP_A */
PINCTRL_PIN(0, "RCINB"),
@@ -72,7 +73,7 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(33, "SRCCLKREQB_4"),
PINCTRL_PIN(34, "SRCCLKREQB_5"),
PINCTRL_PIN(35, "GPP_B_11"),
- PINCTRL_PIN(36, "GLB_RST_WARN_N"),
+ PINCTRL_PIN(36, "SLP_S0B"),
PINCTRL_PIN(37, "PLTRSTB"),
PINCTRL_PIN(38, "SPKR"),
PINCTRL_PIN(39, "GPP_B_15"),
@@ -185,96 +186,96 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(141, "GBE_PCI_DIS"),
PINCTRL_PIN(142, "GBE_LAN_DIS"),
PINCTRL_PIN(143, "GPP_I_10"),
- PINCTRL_PIN(144, "GPIO_RCOMP_3P3"),
/* GPP_J */
- PINCTRL_PIN(145, "GBE_LED_0_0"),
- PINCTRL_PIN(146, "GBE_LED_0_1"),
- PINCTRL_PIN(147, "GBE_LED_1_0"),
- PINCTRL_PIN(148, "GBE_LED_1_1"),
- PINCTRL_PIN(149, "GBE_LED_2_0"),
- PINCTRL_PIN(150, "GBE_LED_2_1"),
- PINCTRL_PIN(151, "GBE_LED_3_0"),
- PINCTRL_PIN(152, "GBE_LED_3_1"),
- PINCTRL_PIN(153, "GBE_SCL_0"),
- PINCTRL_PIN(154, "GBE_SDA_0"),
- PINCTRL_PIN(155, "GBE_SCL_1"),
- PINCTRL_PIN(156, "GBE_SDA_1"),
- PINCTRL_PIN(157, "GBE_SCL_2"),
- PINCTRL_PIN(158, "GBE_SDA_2"),
- PINCTRL_PIN(159, "GBE_SCL_3"),
- PINCTRL_PIN(160, "GBE_SDA_3"),
- PINCTRL_PIN(161, "GBE_SDP_0_0"),
- PINCTRL_PIN(162, "GBE_SDP_0_1"),
- PINCTRL_PIN(163, "GBE_SDP_1_0"),
- PINCTRL_PIN(164, "GBE_SDP_1_1"),
- PINCTRL_PIN(165, "GBE_SDP_2_0"),
- PINCTRL_PIN(166, "GBE_SDP_2_1"),
- PINCTRL_PIN(167, "GBE_SDP_3_0"),
- PINCTRL_PIN(168, "GBE_SDP_3_1"),
+ PINCTRL_PIN(144, "GBE_LED_0_0"),
+ PINCTRL_PIN(145, "GBE_LED_0_1"),
+ PINCTRL_PIN(146, "GBE_LED_1_0"),
+ PINCTRL_PIN(147, "GBE_LED_1_1"),
+ PINCTRL_PIN(148, "GBE_LED_2_0"),
+ PINCTRL_PIN(149, "GBE_LED_2_1"),
+ PINCTRL_PIN(150, "GBE_LED_3_0"),
+ PINCTRL_PIN(151, "GBE_LED_3_1"),
+ PINCTRL_PIN(152, "GBE_SCL_0"),
+ PINCTRL_PIN(153, "GBE_SDA_0"),
+ PINCTRL_PIN(154, "GBE_SCL_1"),
+ PINCTRL_PIN(155, "GBE_SDA_1"),
+ PINCTRL_PIN(156, "GBE_SCL_2"),
+ PINCTRL_PIN(157, "GBE_SDA_2"),
+ PINCTRL_PIN(158, "GBE_SCL_3"),
+ PINCTRL_PIN(159, "GBE_SDA_3"),
+ PINCTRL_PIN(160, "GBE_SDP_0_0"),
+ PINCTRL_PIN(161, "GBE_SDP_0_1"),
+ PINCTRL_PIN(162, "GBE_SDP_1_0"),
+ PINCTRL_PIN(163, "GBE_SDP_1_1"),
+ PINCTRL_PIN(164, "GBE_SDP_2_0"),
+ PINCTRL_PIN(165, "GBE_SDP_2_1"),
+ PINCTRL_PIN(166, "GBE_SDP_3_0"),
+ PINCTRL_PIN(167, "GBE_SDP_3_1"),
/* GPP_K */
- PINCTRL_PIN(169, "GBE_RMIICLK"),
- PINCTRL_PIN(170, "GBE_RMII_TXD_0"),
- PINCTRL_PIN(171, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(168, "GBE_RMIICLK"),
+ PINCTRL_PIN(169, "GBE_RMII_RXD_0"),
+ PINCTRL_PIN(170, "GBE_RMII_RXD_1"),
+ PINCTRL_PIN(171, "GBE_RMII_CRS_DV"),
PINCTRL_PIN(172, "GBE_RMII_TX_EN"),
- PINCTRL_PIN(173, "GBE_RMII_CRS_DV"),
- PINCTRL_PIN(174, "GBE_RMII_RXD_0"),
- PINCTRL_PIN(175, "GBE_RMII_RXD_1"),
- PINCTRL_PIN(176, "GBE_RMII_RX_ER"),
- PINCTRL_PIN(177, "GBE_RMII_ARBIN"),
- PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"),
- PINCTRL_PIN(179, "PE_RST_N"),
- PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"),
+ PINCTRL_PIN(173, "GBE_RMII_TXD_0"),
+ PINCTRL_PIN(174, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(175, "GBE_RMII_RX_ER"),
+ PINCTRL_PIN(176, "GBE_RMII_ARBIN"),
+ PINCTRL_PIN(177, "GBE_RMII_ARB_OUT"),
+ PINCTRL_PIN(178, "PE_RST_N"),
/* GPP_G */
- PINCTRL_PIN(181, "FAN_TACH_0"),
- PINCTRL_PIN(182, "FAN_TACH_1"),
- PINCTRL_PIN(183, "FAN_TACH_2"),
- PINCTRL_PIN(184, "FAN_TACH_3"),
- PINCTRL_PIN(185, "FAN_TACH_4"),
- PINCTRL_PIN(186, "FAN_TACH_5"),
- PINCTRL_PIN(187, "FAN_TACH_6"),
- PINCTRL_PIN(188, "FAN_TACH_7"),
- PINCTRL_PIN(189, "FAN_PWM_0"),
- PINCTRL_PIN(190, "FAN_PWM_1"),
- PINCTRL_PIN(191, "FAN_PWM_2"),
- PINCTRL_PIN(192, "FAN_PWM_3"),
- PINCTRL_PIN(193, "GSXDOUT"),
- PINCTRL_PIN(194, "GSXSLOAD"),
- PINCTRL_PIN(195, "GSXDIN"),
- PINCTRL_PIN(196, "GSXSRESETB"),
- PINCTRL_PIN(197, "GSXCLK"),
- PINCTRL_PIN(198, "ADR_COMPLETE"),
- PINCTRL_PIN(199, "NMIB"),
- PINCTRL_PIN(200, "SMIB"),
- PINCTRL_PIN(201, "SSATA_DEVSLP_0"),
- PINCTRL_PIN(202, "SSATA_DEVSLP_1"),
- PINCTRL_PIN(203, "SSATA_DEVSLP_2"),
- PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"),
+ PINCTRL_PIN(179, "FAN_TACH_0"),
+ PINCTRL_PIN(180, "FAN_TACH_1"),
+ PINCTRL_PIN(181, "FAN_TACH_2"),
+ PINCTRL_PIN(182, "FAN_TACH_3"),
+ PINCTRL_PIN(183, "FAN_TACH_4"),
+ PINCTRL_PIN(184, "FAN_TACH_5"),
+ PINCTRL_PIN(185, "FAN_TACH_6"),
+ PINCTRL_PIN(186, "FAN_TACH_7"),
+ PINCTRL_PIN(187, "FAN_PWM_0"),
+ PINCTRL_PIN(188, "FAN_PWM_1"),
+ PINCTRL_PIN(189, "FAN_PWM_2"),
+ PINCTRL_PIN(190, "FAN_PWM_3"),
+ PINCTRL_PIN(191, "GSXDOUT"),
+ PINCTRL_PIN(192, "GSXSLOAD"),
+ PINCTRL_PIN(193, "GSXDIN"),
+ PINCTRL_PIN(194, "GSXSRESETB"),
+ PINCTRL_PIN(195, "GSXCLK"),
+ PINCTRL_PIN(196, "ADR_COMPLETE"),
+ PINCTRL_PIN(197, "NMIB"),
+ PINCTRL_PIN(198, "SMIB"),
+ PINCTRL_PIN(199, "SSATA_DEVSLP_0"),
+ PINCTRL_PIN(200, "SSATA_DEVSLP_1"),
+ PINCTRL_PIN(201, "SSATA_DEVSLP_2"),
+ PINCTRL_PIN(202, "SSATAXPCIE0_SSATAGP0"),
/* GPP_H */
- PINCTRL_PIN(205, "SRCCLKREQB_6"),
- PINCTRL_PIN(206, "SRCCLKREQB_7"),
- PINCTRL_PIN(207, "SRCCLKREQB_8"),
- PINCTRL_PIN(208, "SRCCLKREQB_9"),
- PINCTRL_PIN(209, "SRCCLKREQB_10"),
- PINCTRL_PIN(210, "SRCCLKREQB_11"),
- PINCTRL_PIN(211, "SRCCLKREQB_12"),
- PINCTRL_PIN(212, "SRCCLKREQB_13"),
- PINCTRL_PIN(213, "SRCCLKREQB_14"),
- PINCTRL_PIN(214, "SRCCLKREQB_15"),
- PINCTRL_PIN(215, "SML2CLK"),
- PINCTRL_PIN(216, "SML2DATA"),
- PINCTRL_PIN(217, "SML2ALERTB"),
- PINCTRL_PIN(218, "SML3CLK"),
- PINCTRL_PIN(219, "SML3DATA"),
- PINCTRL_PIN(220, "SML3ALERTB"),
- PINCTRL_PIN(221, "SML4CLK"),
- PINCTRL_PIN(222, "SML4DATA"),
- PINCTRL_PIN(223, "SML4ALERTB"),
- PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"),
- PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"),
- PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"),
- PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"),
- PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"),
+ PINCTRL_PIN(203, "SRCCLKREQB_6"),
+ PINCTRL_PIN(204, "SRCCLKREQB_7"),
+ PINCTRL_PIN(205, "SRCCLKREQB_8"),
+ PINCTRL_PIN(206, "SRCCLKREQB_9"),
+ PINCTRL_PIN(207, "SRCCLKREQB_10"),
+ PINCTRL_PIN(208, "SRCCLKREQB_11"),
+ PINCTRL_PIN(209, "SRCCLKREQB_12"),
+ PINCTRL_PIN(210, "SRCCLKREQB_13"),
+ PINCTRL_PIN(211, "SRCCLKREQB_14"),
+ PINCTRL_PIN(212, "SRCCLKREQB_15"),
+ PINCTRL_PIN(213, "SML2CLK"),
+ PINCTRL_PIN(214, "SML2DATA"),
+ PINCTRL_PIN(215, "SML2ALERTB"),
+ PINCTRL_PIN(216, "SML3CLK"),
+ PINCTRL_PIN(217, "SML3DATA"),
+ PINCTRL_PIN(218, "SML3ALERTB"),
+ PINCTRL_PIN(219, "SML4CLK"),
+ PINCTRL_PIN(220, "SML4DATA"),
+ PINCTRL_PIN(221, "SML4ALERTB"),
+ PINCTRL_PIN(222, "SSATAXPCIE1_SSATAGP1"),
+ PINCTRL_PIN(223, "SSATAXPCIE2_SSATAGP2"),
+ PINCTRL_PIN(224, "SSATAXPCIE3_SSATAGP3"),
+ PINCTRL_PIN(225, "SSATAXPCIE4_SSATAGP4"),
+ PINCTRL_PIN(226, "SSATAXPCIE5_SSATAGP5"),
/* GPP_L */
+ PINCTRL_PIN(227, "GPP_L_0"),
+ PINCTRL_PIN(228, "EC_CSME_INTR_OUT"),
PINCTRL_PIN(229, "VISA2CH0_D0"),
PINCTRL_PIN(230, "VISA2CH0_D1"),
PINCTRL_PIN(231, "VISA2CH0_D2"),
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
new file mode 100644
index 000000000000..58572b15b3ce
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Tiger Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define TGL_PAD_OWN 0x020
+#define TGL_PADCFGLOCK 0x080
+#define TGL_HOSTSW_OWN 0x0b0
+#define TGL_GPI_IS 0x100
+#define TGL_GPI_IE 0x120
+
+#define TGL_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define TGL_COMMUNITY(s, e, g) \
+ { \
+ .padown_offset = TGL_PAD_OWN, \
+ .padcfglock_offset = TGL_PADCFGLOCK, \
+ .hostown_offset = TGL_HOSTSW_OWN, \
+ .is_offset = TGL_GPI_IS, \
+ .ie_offset = TGL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Tiger Lake-LP */
+static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+ /* GPP_B */
+ PINCTRL_PIN(0, "CORE_VID_0"),
+ PINCTRL_PIN(1, "CORE_VID_1"),
+ PINCTRL_PIN(2, "VRALERTB"),
+ PINCTRL_PIN(3, "CPU_GP_2"),
+ PINCTRL_PIN(4, "CPU_GP_3"),
+ PINCTRL_PIN(5, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(6, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(7, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(8, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(9, "I2C5_SDA"),
+ PINCTRL_PIN(10, "I2C5_SCL"),
+ PINCTRL_PIN(11, "PMCALERTB"),
+ PINCTRL_PIN(12, "SLP_S0B"),
+ PINCTRL_PIN(13, "PLTRSTB"),
+ PINCTRL_PIN(14, "SPKR"),
+ PINCTRL_PIN(15, "GSPI0_CS0B"),
+ PINCTRL_PIN(16, "GSPI0_CLK"),
+ PINCTRL_PIN(17, "GSPI0_MISO"),
+ PINCTRL_PIN(18, "GSPI0_MOSI"),
+ PINCTRL_PIN(19, "GSPI1_CS0B"),
+ PINCTRL_PIN(20, "GSPI1_CLK"),
+ PINCTRL_PIN(21, "GSPI1_MISO"),
+ PINCTRL_PIN(22, "GSPI1_MOSI"),
+ PINCTRL_PIN(23, "SML1ALERTB"),
+ PINCTRL_PIN(24, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(25, "GSPI1_CLK_LOOPBK"),
+ /* GPP_T */
+ PINCTRL_PIN(26, "I2C6_SDA"),
+ PINCTRL_PIN(27, "I2C6_SCL"),
+ PINCTRL_PIN(28, "I2C7_SDA"),
+ PINCTRL_PIN(29, "I2C7_SCL"),
+ PINCTRL_PIN(30, "UART4_RXD"),
+ PINCTRL_PIN(31, "UART4_TXD"),
+ PINCTRL_PIN(32, "UART4_RTSB"),
+ PINCTRL_PIN(33, "UART4_CTSB"),
+ PINCTRL_PIN(34, "UART5_RXD"),
+ PINCTRL_PIN(35, "UART5_TXD"),
+ PINCTRL_PIN(36, "UART5_RTSB"),
+ PINCTRL_PIN(37, "UART5_CTSB"),
+ PINCTRL_PIN(38, "UART6_RXD"),
+ PINCTRL_PIN(39, "UART6_TXD"),
+ PINCTRL_PIN(40, "UART6_RTSB"),
+ PINCTRL_PIN(41, "UART6_CTSB"),
+ /* GPP_A */
+ PINCTRL_PIN(42, "ESPI_IO_0"),
+ PINCTRL_PIN(43, "ESPI_IO_1"),
+ PINCTRL_PIN(44, "ESPI_IO_2"),
+ PINCTRL_PIN(45, "ESPI_IO_3"),
+ PINCTRL_PIN(46, "ESPI_CSB"),
+ PINCTRL_PIN(47, "ESPI_CLK"),
+ PINCTRL_PIN(48, "ESPI_RESETB"),
+ PINCTRL_PIN(49, "I2S2_SCLK"),
+ PINCTRL_PIN(50, "I2S2_SFRM"),
+ PINCTRL_PIN(51, "I2S2_TXD"),
+ PINCTRL_PIN(52, "I2S2_RXD"),
+ PINCTRL_PIN(53, "PMC_I2C_SDA"),
+ PINCTRL_PIN(54, "SATAXPCIE_1"),
+ PINCTRL_PIN(55, "PMC_I2C_SCL"),
+ PINCTRL_PIN(56, "USB2_OCB_1"),
+ PINCTRL_PIN(57, "USB2_OCB_2"),
+ PINCTRL_PIN(58, "USB2_OCB_3"),
+ PINCTRL_PIN(59, "DDSP_HPD_C"),
+ PINCTRL_PIN(60, "DDSP_HPD_B"),
+ PINCTRL_PIN(61, "DDSP_HPD_1"),
+ PINCTRL_PIN(62, "DDSP_HPD_2"),
+ PINCTRL_PIN(63, "GPPC_A_21"),
+ PINCTRL_PIN(64, "GPPC_A_22"),
+ PINCTRL_PIN(65, "I2S1_SCLK"),
+ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community0_gpps[] = {
+ TGL_GPP(0, 0, 25), /* GPP_B */
+ TGL_GPP(1, 26, 41), /* GPP_T */
+ TGL_GPP(2, 42, 66), /* GPP_A */
+};
+
+static const struct intel_community tgllp_community0[] = {
+ TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
+ .uid = "0",
+ .pins = tgllp_community0_pins,
+ .npins = ARRAY_SIZE(tgllp_community0_pins),
+ .communities = tgllp_community0,
+ .ncommunities = ARRAY_SIZE(tgllp_community0),
+};
+
+static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
+ /* GPP_S */
+ PINCTRL_PIN(0, "SNDW0_CLK"),
+ PINCTRL_PIN(1, "SNDW0_DATA"),
+ PINCTRL_PIN(2, "SNDW1_CLK"),
+ PINCTRL_PIN(3, "SNDW1_DATA"),
+ PINCTRL_PIN(4, "SNDW2_CLK"),
+ PINCTRL_PIN(5, "SNDW2_DATA"),
+ PINCTRL_PIN(6, "SNDW3_CLK"),
+ PINCTRL_PIN(7, "SNDW3_DATA"),
+ /* GPP_H */
+ PINCTRL_PIN(8, "GPPC_H_0"),
+ PINCTRL_PIN(9, "GPPC_H_1"),
+ PINCTRL_PIN(10, "GPPC_H_2"),
+ PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(12, "I2C2_SDA"),
+ PINCTRL_PIN(13, "I2C2_SCL"),
+ PINCTRL_PIN(14, "I2C3_SDA"),
+ PINCTRL_PIN(15, "I2C3_SCL"),
+ PINCTRL_PIN(16, "I2C4_SDA"),
+ PINCTRL_PIN(17, "I2C4_SCL"),
+ PINCTRL_PIN(18, "SRCCLKREQB_4"),
+ PINCTRL_PIN(19, "SRCCLKREQB_5"),
+ PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(24, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(25, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(26, "CPU_C10_GATEB"),
+ PINCTRL_PIN(27, "TIME_SYNC_0"),
+ PINCTRL_PIN(28, "IMGCLKOUT_1"),
+ PINCTRL_PIN(29, "IMGCLKOUT_2"),
+ PINCTRL_PIN(30, "IMGCLKOUT_3"),
+ PINCTRL_PIN(31, "IMGCLKOUT_4"),
+ /* GPP_D */
+ PINCTRL_PIN(32, "ISH_GP_0"),
+ PINCTRL_PIN(33, "ISH_GP_1"),
+ PINCTRL_PIN(34, "ISH_GP_2"),
+ PINCTRL_PIN(35, "ISH_GP_3"),
+ PINCTRL_PIN(36, "IMGCLKOUT_0"),
+ PINCTRL_PIN(37, "SRCCLKREQB_0"),
+ PINCTRL_PIN(38, "SRCCLKREQB_1"),
+ PINCTRL_PIN(39, "SRCCLKREQB_2"),
+ PINCTRL_PIN(40, "SRCCLKREQB_3"),
+ PINCTRL_PIN(41, "ISH_SPI_CSB"),
+ PINCTRL_PIN(42, "ISH_SPI_CLK"),
+ PINCTRL_PIN(43, "ISH_SPI_MISO"),
+ PINCTRL_PIN(44, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(45, "ISH_UART0_RXD"),
+ PINCTRL_PIN(46, "ISH_UART0_TXD"),
+ PINCTRL_PIN(47, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(48, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(49, "ISH_GP_4"),
+ PINCTRL_PIN(50, "ISH_GP_5"),
+ PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
+ /* GPP_U */
+ PINCTRL_PIN(53, "UART3_RXD"),
+ PINCTRL_PIN(54, "UART3_TXD"),
+ PINCTRL_PIN(55, "UART3_RTSB"),
+ PINCTRL_PIN(56, "UART3_CTSB"),
+ PINCTRL_PIN(57, "GSPI3_CS0B"),
+ PINCTRL_PIN(58, "GSPI3_CLK"),
+ PINCTRL_PIN(59, "GSPI3_MISO"),
+ PINCTRL_PIN(60, "GSPI3_MOSI"),
+ PINCTRL_PIN(61, "GSPI4_CS0B"),
+ PINCTRL_PIN(62, "GSPI4_CLK"),
+ PINCTRL_PIN(63, "GSPI4_MISO"),
+ PINCTRL_PIN(64, "GSPI4_MOSI"),
+ PINCTRL_PIN(65, "GSPI5_CS0B"),
+ PINCTRL_PIN(66, "GSPI5_CLK"),
+ PINCTRL_PIN(67, "GSPI5_MISO"),
+ PINCTRL_PIN(68, "GSPI5_MOSI"),
+ PINCTRL_PIN(69, "GSPI6_CS0B"),
+ PINCTRL_PIN(70, "GSPI6_CLK"),
+ PINCTRL_PIN(71, "GSPI6_MISO"),
+ PINCTRL_PIN(72, "GSPI6_MOSI"),
+ PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
+ /* vGPIO */
+ PINCTRL_PIN(77, "CNV_BTEN"),
+ PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(88, "vUART0_TXD"),
+ PINCTRL_PIN(89, "vUART0_RXD"),
+ PINCTRL_PIN(90, "vUART0_CTS_B"),
+ PINCTRL_PIN(91, "vUART0_RTS_B"),
+ PINCTRL_PIN(92, "vISH_UART0_TXD"),
+ PINCTRL_PIN(93, "vISH_UART0_RXD"),
+ PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(100, "vI2S2_SCLK"),
+ PINCTRL_PIN(101, "vI2S2_SFRM"),
+ PINCTRL_PIN(102, "vI2S2_TXD"),
+ PINCTRL_PIN(103, "vI2S2_RXD"),
+};
+
+static const struct intel_padgroup tgllp_community1_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_S */
+ TGL_GPP(1, 8, 31), /* GPP_H */
+ TGL_GPP(2, 32, 52), /* GPP_D */
+ TGL_GPP(3, 53, 76), /* GPP_U */
+ TGL_GPP(4, 77, 103), /* vGPIO */
+};
+
+static const struct intel_community tgllp_community1[] = {
+ TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
+ .uid = "1",
+ .pins = tgllp_community1_pins,
+ .npins = ARRAY_SIZE(tgllp_community1_pins),
+ .communities = tgllp_community1,
+ .ncommunities = ARRAY_SIZE(tgllp_community1),
+};
+
+static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
+ /* GPP_C */
+ PINCTRL_PIN(0, "SMBCLK"),
+ PINCTRL_PIN(1, "SMBDATA"),
+ PINCTRL_PIN(2, "SMBALERTB"),
+ PINCTRL_PIN(3, "SML0CLK"),
+ PINCTRL_PIN(4, "SML0DATA"),
+ PINCTRL_PIN(5, "SML0ALERTB"),
+ PINCTRL_PIN(6, "SML1CLK"),
+ PINCTRL_PIN(7, "SML1DATA"),
+ PINCTRL_PIN(8, "UART0_RXD"),
+ PINCTRL_PIN(9, "UART0_TXD"),
+ PINCTRL_PIN(10, "UART0_RTSB"),
+ PINCTRL_PIN(11, "UART0_CTSB"),
+ PINCTRL_PIN(12, "UART1_RXD"),
+ PINCTRL_PIN(13, "UART1_TXD"),
+ PINCTRL_PIN(14, "UART1_RTSB"),
+ PINCTRL_PIN(15, "UART1_CTSB"),
+ PINCTRL_PIN(16, "I2C0_SDA"),
+ PINCTRL_PIN(17, "I2C0_SCL"),
+ PINCTRL_PIN(18, "I2C1_SDA"),
+ PINCTRL_PIN(19, "I2C1_SCL"),
+ PINCTRL_PIN(20, "UART2_RXD"),
+ PINCTRL_PIN(21, "UART2_TXD"),
+ PINCTRL_PIN(22, "UART2_RTSB"),
+ PINCTRL_PIN(23, "UART2_CTSB"),
+ /* GPP_F */
+ PINCTRL_PIN(24, "CNV_BRI_DT"),
+ PINCTRL_PIN(25, "CNV_BRI_RSP"),
+ PINCTRL_PIN(26, "CNV_RGI_DT"),
+ PINCTRL_PIN(27, "CNV_RGI_RSP"),
+ PINCTRL_PIN(28, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(29, "GPPC_F_5"),
+ PINCTRL_PIN(30, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(31, "GPPC_F_7"),
+ PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(33, "BOOTMPC"),
+ PINCTRL_PIN(34, "GPPC_F_10"),
+ PINCTRL_PIN(35, "GPPC_F_11"),
+ PINCTRL_PIN(36, "GSXDOUT"),
+ PINCTRL_PIN(37, "GSXSLOAD"),
+ PINCTRL_PIN(38, "GSXDIN"),
+ PINCTRL_PIN(39, "GSXSRESETB"),
+ PINCTRL_PIN(40, "GSXCLK"),
+ PINCTRL_PIN(41, "GMII_MDC"),
+ PINCTRL_PIN(42, "GMII_MDIO"),
+ PINCTRL_PIN(43, "SRCCLKREQB_6"),
+ PINCTRL_PIN(44, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(46, "VNN_CTRL"),
+ PINCTRL_PIN(47, "V1P05_CTRL"),
+ PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ /* HVCMOS */
+ PINCTRL_PIN(49, "L_BKLTEN"),
+ PINCTRL_PIN(50, "L_BKLTCTL"),
+ PINCTRL_PIN(51, "L_VDDEN"),
+ PINCTRL_PIN(52, "SYS_PWROK"),
+ PINCTRL_PIN(53, "SYS_RESETB"),
+ PINCTRL_PIN(54, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(55, "SATAXPCIE_0"),
+ PINCTRL_PIN(56, "SPI1_IO_2"),
+ PINCTRL_PIN(57, "SPI1_IO_3"),
+ PINCTRL_PIN(58, "CPU_GP_0"),
+ PINCTRL_PIN(59, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(60, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(61, "GPPC_E_6"),
+ PINCTRL_PIN(62, "CPU_GP_1"),
+ PINCTRL_PIN(63, "SPI1_CS1B"),
+ PINCTRL_PIN(64, "USB2_OCB_0"),
+ PINCTRL_PIN(65, "SPI1_CSB"),
+ PINCTRL_PIN(66, "SPI1_CLK"),
+ PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(69, "DDSP_HPD_A"),
+ PINCTRL_PIN(70, "ISH_GP_6"),
+ PINCTRL_PIN(71, "ISH_GP_7"),
+ PINCTRL_PIN(72, "GPPC_E_17"),
+ PINCTRL_PIN(73, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(74, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(75, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(76, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(77, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(78, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+ /* JTAG */
+ PINCTRL_PIN(80, "JTAG_TDO"),
+ PINCTRL_PIN(81, "JTAGX"),
+ PINCTRL_PIN(82, "PRDYB"),
+ PINCTRL_PIN(83, "PREQB"),
+ PINCTRL_PIN(84, "CPU_TRSTB"),
+ PINCTRL_PIN(85, "JTAG_TDI"),
+ PINCTRL_PIN(86, "JTAG_TMS"),
+ PINCTRL_PIN(87, "JTAG_TCK"),
+ PINCTRL_PIN(88, "DBG_PMODE"),
+};
+
+static const struct intel_padgroup tgllp_community4_gpps[] = {
+ TGL_GPP(0, 0, 23), /* GPP_C */
+ TGL_GPP(1, 24, 48), /* GPP_F */
+ TGL_GPP(2, 49, 54), /* HVCMOS */
+ TGL_GPP(3, 55, 79), /* GPP_E */
+ TGL_GPP(4, 80, 88), /* JTAG */
+};
+
+static const struct intel_community tgllp_community4[] = {
+ TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
+ .uid = "4",
+ .pins = tgllp_community4_pins,
+ .npins = ARRAY_SIZE(tgllp_community4_pins),
+ .communities = tgllp_community4,
+ .ncommunities = ARRAY_SIZE(tgllp_community4),
+};
+
+static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
+ /* GPP_R */
+ PINCTRL_PIN(0, "HDA_BCLK"),
+ PINCTRL_PIN(1, "HDA_SYNC"),
+ PINCTRL_PIN(2, "HDA_SDO"),
+ PINCTRL_PIN(3, "HDA_SDI_0"),
+ PINCTRL_PIN(4, "HDA_RSTB"),
+ PINCTRL_PIN(5, "HDA_SDI_1"),
+ PINCTRL_PIN(6, "GPP_R_6"),
+ PINCTRL_PIN(7, "GPP_R_7"),
+ /* SPI */
+ PINCTRL_PIN(8, "SPI0_IO_2"),
+ PINCTRL_PIN(9, "SPI0_IO_3"),
+ PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(12, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(15, "SPI0_CLK"),
+ PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community5_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_R */
+ TGL_GPP(1, 8, 16), /* SPI */
+};
+
+static const struct intel_community tgllp_community5[] = {
+ TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
+ .uid = "5",
+ .pins = tgllp_community5_pins,
+ .npins = ARRAY_SIZE(tgllp_community5_pins),
+ .communities = tgllp_community5,
+ .ncommunities = ARRAY_SIZE(tgllp_community5),
+};
+
+static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
+ &tgllp_community0_soc_data,
+ &tgllp_community1_soc_data,
+ &tgllp_community4_soc_data,
+ &tgllp_community5_soc_data,
+ NULL
+};
+
+static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
+ { "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
+
+static struct platform_driver tgl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_uid,
+ .driver = {
+ .name = "tigerlake-pinctrl",
+ .acpi_match_table = tgl_pinctrl_acpi_match,
+ .pm = &tgl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(tgl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Tiger Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 53f52b9a0acd..67f8444f7a0c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -982,7 +982,6 @@ static const struct mtk_eint_xt mtk_eint_xt = {
static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
if (!of_property_read_bool(np, "interrupt-controller"))
return -ENODEV;
@@ -991,8 +990,7 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
if (!pctl->eint)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->eint->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->eint->base))
return PTR_ERR(pctl->eint->base);
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index df55f617aa98..3cb119105ddb 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -54,4 +54,10 @@ config PINCTRL_MESON_G12A
select PINCTRL_MESON_AXG_PMX
default y
+config PINCTRL_MESON_A1
+ bool "Meson a1 Soc pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON_AXG_PMX
+ default y
+
endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index a69c565f2f13..1a5bffe953f9 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
obj-$(CONFIG_PINCTRL_MESON_AXG_PMX) += pinctrl-meson-axg-pmx.o
obj-$(CONFIG_PINCTRL_MESON_AXG) += pinctrl-meson-axg.o
obj-$(CONFIG_PINCTRL_MESON_G12A) += pinctrl-meson-g12a.o
+obj-$(CONFIG_PINCTRL_MESON_A1) += pinctrl-meson-a1.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
new file mode 100644
index 000000000000..0bcec03f344a
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Pin controller and GPIO driver for Amlogic Meson A1 SoC.
+ *
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#include <dt-bindings/gpio/meson-a1-gpio.h>
+#include "pinctrl-meson.h"
+#include "pinctrl-meson-axg-pmx.h"
+
+static const struct pinctrl_pin_desc meson_a1_periphs_pins[] = {
+ MESON_PIN(GPIOP_0),
+ MESON_PIN(GPIOP_1),
+ MESON_PIN(GPIOP_2),
+ MESON_PIN(GPIOP_3),
+ MESON_PIN(GPIOP_4),
+ MESON_PIN(GPIOP_5),
+ MESON_PIN(GPIOP_6),
+ MESON_PIN(GPIOP_7),
+ MESON_PIN(GPIOP_8),
+ MESON_PIN(GPIOP_9),
+ MESON_PIN(GPIOP_10),
+ MESON_PIN(GPIOP_11),
+ MESON_PIN(GPIOP_12),
+ MESON_PIN(GPIOB_0),
+ MESON_PIN(GPIOB_1),
+ MESON_PIN(GPIOB_2),
+ MESON_PIN(GPIOB_3),
+ MESON_PIN(GPIOB_4),
+ MESON_PIN(GPIOB_5),
+ MESON_PIN(GPIOB_6),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOF_0),
+ MESON_PIN(GPIOF_1),
+ MESON_PIN(GPIOF_2),
+ MESON_PIN(GPIOF_3),
+ MESON_PIN(GPIOF_4),
+ MESON_PIN(GPIOF_5),
+ MESON_PIN(GPIOF_6),
+ MESON_PIN(GPIOF_7),
+ MESON_PIN(GPIOF_8),
+ MESON_PIN(GPIOF_9),
+ MESON_PIN(GPIOF_10),
+ MESON_PIN(GPIOF_11),
+ MESON_PIN(GPIOF_12),
+ MESON_PIN(GPIOA_0),
+ MESON_PIN(GPIOA_1),
+ MESON_PIN(GPIOA_2),
+ MESON_PIN(GPIOA_3),
+ MESON_PIN(GPIOA_4),
+ MESON_PIN(GPIOA_5),
+ MESON_PIN(GPIOA_6),
+ MESON_PIN(GPIOA_7),
+ MESON_PIN(GPIOA_8),
+ MESON_PIN(GPIOA_9),
+ MESON_PIN(GPIOA_10),
+ MESON_PIN(GPIOA_11),
+};
+
+/* psram */
+static const unsigned int psram_clkn_pins[] = { GPIOP_0 };
+static const unsigned int psram_clkp_pins[] = { GPIOP_1 };
+static const unsigned int psram_ce_n_pins[] = { GPIOP_2 };
+static const unsigned int psram_rst_n_pins[] = { GPIOP_3 };
+static const unsigned int psram_adq0_pins[] = { GPIOP_4 };
+static const unsigned int psram_adq1_pins[] = { GPIOP_5 };
+static const unsigned int psram_adq2_pins[] = { GPIOP_6 };
+static const unsigned int psram_adq3_pins[] = { GPIOP_7 };
+static const unsigned int psram_adq4_pins[] = { GPIOP_8 };
+static const unsigned int psram_adq5_pins[] = { GPIOP_9 };
+static const unsigned int psram_adq6_pins[] = { GPIOP_10 };
+static const unsigned int psram_adq7_pins[] = { GPIOP_11 };
+static const unsigned int psram_dqs_dm_pins[] = { GPIOP_12 };
+
+/* sdcard */
+static const unsigned int sdcard_d0_b_pins[] = { GPIOB_0 };
+static const unsigned int sdcard_d1_b_pins[] = { GPIOB_1 };
+static const unsigned int sdcard_d2_b_pins[] = { GPIOB_2 };
+static const unsigned int sdcard_d3_b_pins[] = { GPIOB_3 };
+static const unsigned int sdcard_clk_b_pins[] = { GPIOB_4 };
+static const unsigned int sdcard_cmd_b_pins[] = { GPIOB_5 };
+
+static const unsigned int sdcard_d0_x_pins[] = { GPIOX_0 };
+static const unsigned int sdcard_d1_x_pins[] = { GPIOX_1 };
+static const unsigned int sdcard_d2_x_pins[] = { GPIOX_2 };
+static const unsigned int sdcard_d3_x_pins[] = { GPIOX_3 };
+static const unsigned int sdcard_clk_x_pins[] = { GPIOX_4 };
+static const unsigned int sdcard_cmd_x_pins[] = { GPIOX_5 };
+
+/* spif */
+static const unsigned int spif_mo_pins[] = { GPIOB_0 };
+static const unsigned int spif_mi_pins[] = { GPIOB_1 };
+static const unsigned int spif_wp_n_pins[] = { GPIOB_2 };
+static const unsigned int spif_hold_n_pins[] = { GPIOB_3 };
+static const unsigned int spif_clk_pins[] = { GPIOB_4 };
+static const unsigned int spif_cs_pins[] = { GPIOB_5 };
+
+/* i2c0 */
+static const unsigned int i2c0_sck_f9_pins[] = { GPIOF_9 };
+static const unsigned int i2c0_sda_f10_pins[] = { GPIOF_10 };
+static const unsigned int i2c0_sck_f11_pins[] = { GPIOF_11 };
+static const unsigned int i2c0_sda_f12_pins[] = { GPIOF_12 };
+
+/* i2c1 */
+static const unsigned int i2c1_sda_x_pins[] = { GPIOX_9 };
+static const unsigned int i2c1_sck_x_pins[] = { GPIOX_10 };
+static const unsigned int i2c1_sda_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c1_sck_a_pins[] = { GPIOA_11 };
+
+/* i2c2 */
+static const unsigned int i2c2_sck_x0_pins[] = { GPIOX_0 };
+static const unsigned int i2c2_sda_x1_pins[] = { GPIOX_1 };
+static const unsigned int i2c2_sck_x15_pins[] = { GPIOX_15 };
+static const unsigned int i2c2_sda_x16_pins[] = { GPIOX_16 };
+static const unsigned int i2c2_sck_a4_pins[] = { GPIOA_4 };
+static const unsigned int i2c2_sda_a5_pins[] = { GPIOA_5 };
+static const unsigned int i2c2_sck_a8_pins[] = { GPIOA_8 };
+static const unsigned int i2c2_sda_a9_pins[] = { GPIOA_9 };
+
+/* i2c3 */
+static const unsigned int i2c3_sck_f_pins[] = { GPIOF_4 };
+static const unsigned int i2c3_sda_f_pins[] = { GPIOF_5 };
+static const unsigned int i2c3_sck_x_pins[] = { GPIOX_11 };
+static const unsigned int i2c3_sda_x_pins[] = { GPIOX_12 };
+
+/* i2c slave */
+static const unsigned int i2c_slave_sck_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c_slave_sda_a_pins[] = { GPIOA_11 };
+static const unsigned int i2c_slave_sck_f_pins[] = { GPIOF_11 };
+static const unsigned int i2c_slave_sda_f_pins[] = { GPIOF_12 };
+
+/* uart_a */
+static const unsigned int uart_a_tx_pins[] = { GPIOX_11 };
+static const unsigned int uart_a_rx_pins[] = { GPIOX_12 };
+static const unsigned int uart_a_cts_pins[] = { GPIOX_13 };
+static const unsigned int uart_a_rts_pins[] = { GPIOX_14 };
+
+/* uart_b */
+static const unsigned int uart_b_tx_x_pins[] = { GPIOX_7 };
+static const unsigned int uart_b_rx_x_pins[] = { GPIOX_8 };
+static const unsigned int uart_b_tx_f_pins[] = { GPIOF_0 };
+static const unsigned int uart_b_rx_f_pins[] = { GPIOF_1 };
+
+/* uart_c */
+static const unsigned int uart_c_tx_x0_pins[] = { GPIOX_0 };
+static const unsigned int uart_c_rx_x1_pins[] = { GPIOX_1 };
+static const unsigned int uart_c_cts_pins[] = { GPIOX_2 };
+static const unsigned int uart_c_rts_pins[] = { GPIOX_3 };
+static const unsigned int uart_c_tx_x15_pins[] = { GPIOX_15 };
+static const unsigned int uart_c_rx_x16_pins[] = { GPIOX_16 };
+
+/* pmw_a */
+static const unsigned int pwm_a_x6_pins[] = { GPIOX_6 };
+static const unsigned int pwm_a_x7_pins[] = { GPIOX_7 };
+static const unsigned int pwm_a_f6_pins[] = { GPIOF_6 };
+static const unsigned int pwm_a_f10_pins[] = { GPIOF_10 };
+static const unsigned int pwm_a_a_pins[] = { GPIOA_5 };
+
+/* pmw_b */
+static const unsigned int pwm_b_x_pins[] = { GPIOX_8 };
+static const unsigned int pwm_b_f_pins[] = { GPIOF_7 };
+static const unsigned int pwm_b_a_pins[] = { GPIOA_11 };
+
+/* pmw_c */
+static const unsigned int pwm_c_x_pins[] = { GPIOX_9 };
+static const unsigned int pwm_c_f3_pins[] = { GPIOF_3 };
+static const unsigned int pwm_c_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_c_a_pins[] = { GPIOA_10 };
+
+/* pwm_d */
+static const unsigned int pwm_d_x10_pins[] = { GPIOX_10 };
+static const unsigned int pwm_d_x13_pins[] = { GPIOX_13 };
+static const unsigned int pwm_d_x15_pins[] = { GPIOX_15 };
+static const unsigned int pwm_d_f_pins[] = { GPIOF_11 };
+
+/* pwm_e */
+static const unsigned int pwm_e_p_pins[] = { GPIOP_3 };
+static const unsigned int pwm_e_x2_pins[] = { GPIOX_2 };
+static const unsigned int pwm_e_x14_pins[] = { GPIOX_14 };
+static const unsigned int pwm_e_x16_pins[] = { GPIOX_16 };
+static const unsigned int pwm_e_f_pins[] = { GPIOF_3 };
+static const unsigned int pwm_e_a_pins[] = { GPIOA_0 };
+
+/* pwm_f */
+static const unsigned int pwm_f_b_pins[] = { GPIOB_6 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_3 };
+static const unsigned int pwm_f_f4_pins[] = { GPIOF_4 };
+static const unsigned int pwm_f_f12_pins[] = { GPIOF_12 };
+
+/* pwm_a_hiz */
+static const unsigned int pwm_a_hiz_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_a_hiz_f10_pins[] = { GPIOF_10 };
+static const unsigned int pmw_a_hiz_f6_pins[] = { GPIOF_6 };
+
+/* pwm_b_hiz */
+static const unsigned int pwm_b_hiz_pins[] = { GPIOF_7 };
+
+/* pmw_c_hiz */
+static const unsigned int pwm_c_hiz_pins[] = { GPIOF_8 };
+
+/* tdm_a */
+static const unsigned int tdm_a_dout1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_dout0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_sclk_pins[] = { GPIOX_10 };
+static const unsigned int tdm_a_din1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_din0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_slv_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_slv_sclk_pins[] = { GPIOX_10 };
+
+/* spi_a */
+static const unsigned int spi_a_mosi_x2_pins[] = { GPIOX_2 };
+static const unsigned int spi_a_ss0_x3_pins[] = { GPIOX_3 };
+static const unsigned int spi_a_sclk_x4_pins[] = { GPIOX_4 };
+static const unsigned int spi_a_miso_x5_pins[] = { GPIOX_5 };
+static const unsigned int spi_a_mosi_x7_pins[] = { GPIOX_7 };
+static const unsigned int spi_a_miso_x8_pins[] = { GPIOX_8 };
+static const unsigned int spi_a_ss0_x9_pins[] = { GPIOX_9 };
+static const unsigned int spi_a_sclk_x10_pins[] = { GPIOX_10 };
+
+static const unsigned int spi_a_mosi_a_pins[] = { GPIOA_6 };
+static const unsigned int spi_a_miso_a_pins[] = { GPIOA_7 };
+static const unsigned int spi_a_ss0_a_pins[] = { GPIOA_8 };
+static const unsigned int spi_a_sclk_a_pins[] = { GPIOA_9 };
+
+/* pdm */
+static const unsigned int pdm_din0_x_pins[] = { GPIOX_7 };
+static const unsigned int pdm_din1_x_pins[] = { GPIOX_8 };
+static const unsigned int pdm_din2_x_pins[] = { GPIOX_9 };
+static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
+
+static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
+static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
+static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
+static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+
+/* gen_clk */
+static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
+static const unsigned int gen_clk_f8_pins[] = { GPIOF_8 };
+static const unsigned int gen_clk_f10_pins[] = { GPIOF_10 };
+static const unsigned int gen_clk_a_pins[] = { GPIOA_11 };
+
+/* jtag_a */
+static const unsigned int jtag_a_clk_pins[] = { GPIOF_4 };
+static const unsigned int jtag_a_tms_pins[] = { GPIOF_5 };
+static const unsigned int jtag_a_tdi_pins[] = { GPIOF_6 };
+static const unsigned int jtag_a_tdo_pins[] = { GPIOF_7 };
+
+/* clk_32_in */
+static const unsigned int clk_32k_in_pins[] = { GPIOF_2 };
+
+/* ir in */
+static const unsigned int remote_input_f_pins[] = { GPIOF_3 };
+static const unsigned int remote_input_a_pins[] = { GPIOA_11 };
+
+/* ir out */
+static const unsigned int remote_out_pins[] = { GPIOF_5 };
+
+/* spdif */
+static const unsigned int spdif_in_f6_pins[] = { GPIOF_6 };
+static const unsigned int spdif_in_f7_pins[] = { GPIOF_7 };
+
+/* sw */
+static const unsigned int swclk_pins[] = { GPIOF_4 };
+static const unsigned int swdio_pins[] = { GPIOF_5 };
+
+/* clk_25 */
+static const unsigned int clk25_pins[] = { GPIOF_10 };
+
+/* cec_a */
+static const unsigned int cec_a_pins[] = { GPIOF_2 };
+
+/* cec_b */
+static const unsigned int cec_b_pins[] = { GPIOF_2 };
+
+/* clk12_24 */
+static const unsigned int clk12_24_pins[] = { GPIOF_10 };
+
+/* mclk_0 */
+static const unsigned int mclk_0_pins[] = { GPIOA_0 };
+
+/* tdm_b */
+static const unsigned int tdm_b_sclk_pins[] = { GPIOA_1 };
+static const unsigned int tdm_b_fs_pins[] = { GPIOA_2 };
+static const unsigned int tdm_b_dout0_pins[] = { GPIOA_3 };
+static const unsigned int tdm_b_dout1_pins[] = { GPIOA_4 };
+static const unsigned int tdm_b_dout2_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_dout3_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_dout4_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_dout5_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_slv_sclk_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_slv_fs_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_din0_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_din1_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_din2_pins[] = { GPIOA_9 };
+
+/* mclk_vad */
+static const unsigned int mclk_vad_pins[] = { GPIOA_0 };
+
+/* tdm_vad */
+static const unsigned int tdm_vad_sclk_a1_pins[] = { GPIOA_1 };
+static const unsigned int tdm_vad_fs_a2_pins[] = { GPIOA_2 };
+static const unsigned int tdm_vad_sclk_a5_pins[] = { GPIOA_5 };
+static const unsigned int tdm_vad_fs_a6_pins[] = { GPIOA_6 };
+
+/* tst_out */
+static const unsigned int tst_out0_pins[] = { GPIOA_0 };
+static const unsigned int tst_out1_pins[] = { GPIOA_1 };
+static const unsigned int tst_out2_pins[] = { GPIOA_2 };
+static const unsigned int tst_out3_pins[] = { GPIOA_3 };
+static const unsigned int tst_out4_pins[] = { GPIOA_4 };
+static const unsigned int tst_out5_pins[] = { GPIOA_5 };
+static const unsigned int tst_out6_pins[] = { GPIOA_6 };
+static const unsigned int tst_out7_pins[] = { GPIOA_7 };
+static const unsigned int tst_out8_pins[] = { GPIOA_8 };
+static const unsigned int tst_out9_pins[] = { GPIOA_9 };
+static const unsigned int tst_out10_pins[] = { GPIOA_10 };
+static const unsigned int tst_out11_pins[] = { GPIOA_11 };
+
+/* mute */
+static const unsigned int mute_key_pins[] = { GPIOA_4 };
+static const unsigned int mute_en_pins[] = { GPIOA_5 };
+
+static struct meson_pmx_group meson_a1_periphs_groups[] = {
+ GPIO_GROUP(GPIOP_0),
+ GPIO_GROUP(GPIOP_1),
+ GPIO_GROUP(GPIOP_2),
+ GPIO_GROUP(GPIOP_3),
+ GPIO_GROUP(GPIOP_4),
+ GPIO_GROUP(GPIOP_5),
+ GPIO_GROUP(GPIOP_6),
+ GPIO_GROUP(GPIOP_7),
+ GPIO_GROUP(GPIOP_8),
+ GPIO_GROUP(GPIOP_9),
+ GPIO_GROUP(GPIOP_10),
+ GPIO_GROUP(GPIOP_11),
+ GPIO_GROUP(GPIOP_12),
+ GPIO_GROUP(GPIOB_0),
+ GPIO_GROUP(GPIOB_1),
+ GPIO_GROUP(GPIOB_2),
+ GPIO_GROUP(GPIOB_3),
+ GPIO_GROUP(GPIOB_4),
+ GPIO_GROUP(GPIOB_5),
+ GPIO_GROUP(GPIOB_6),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOF_0),
+ GPIO_GROUP(GPIOF_1),
+ GPIO_GROUP(GPIOF_2),
+ GPIO_GROUP(GPIOF_3),
+ GPIO_GROUP(GPIOF_4),
+ GPIO_GROUP(GPIOF_5),
+ GPIO_GROUP(GPIOF_6),
+ GPIO_GROUP(GPIOF_7),
+ GPIO_GROUP(GPIOF_8),
+ GPIO_GROUP(GPIOF_9),
+ GPIO_GROUP(GPIOF_10),
+ GPIO_GROUP(GPIOF_11),
+ GPIO_GROUP(GPIOF_12),
+ GPIO_GROUP(GPIOA_0),
+ GPIO_GROUP(GPIOA_1),
+ GPIO_GROUP(GPIOA_2),
+ GPIO_GROUP(GPIOA_3),
+ GPIO_GROUP(GPIOA_4),
+ GPIO_GROUP(GPIOA_5),
+ GPIO_GROUP(GPIOA_6),
+ GPIO_GROUP(GPIOA_7),
+ GPIO_GROUP(GPIOA_8),
+ GPIO_GROUP(GPIOA_9),
+ GPIO_GROUP(GPIOA_10),
+ GPIO_GROUP(GPIOA_11),
+
+ /* bank P func1 */
+ GROUP(psram_clkn, 1),
+ GROUP(psram_clkp, 1),
+ GROUP(psram_ce_n, 1),
+ GROUP(psram_rst_n, 1),
+ GROUP(psram_adq0, 1),
+ GROUP(psram_adq1, 1),
+ GROUP(psram_adq2, 1),
+ GROUP(psram_adq3, 1),
+ GROUP(psram_adq4, 1),
+ GROUP(psram_adq5, 1),
+ GROUP(psram_adq6, 1),
+ GROUP(psram_adq7, 1),
+ GROUP(psram_dqs_dm, 1),
+
+ /*bank P func2 */
+ GROUP(pwm_e_p, 2),
+
+ /*bank B func1 */
+ GROUP(spif_mo, 1),
+ GROUP(spif_mi, 1),
+ GROUP(spif_wp_n, 1),
+ GROUP(spif_hold_n, 1),
+ GROUP(spif_clk, 1),
+ GROUP(spif_cs, 1),
+ GROUP(pwm_f_b, 1),
+
+ /*bank B func2 */
+ GROUP(sdcard_d0_b, 2),
+ GROUP(sdcard_d1_b, 2),
+ GROUP(sdcard_d2_b, 2),
+ GROUP(sdcard_d3_b, 2),
+ GROUP(sdcard_clk_b, 2),
+ GROUP(sdcard_cmd_b, 2),
+
+ /*bank X func1 */
+ GROUP(sdcard_d0_x, 1),
+ GROUP(sdcard_d1_x, 1),
+ GROUP(sdcard_d2_x, 1),
+ GROUP(sdcard_d3_x, 1),
+ GROUP(sdcard_clk_x, 1),
+ GROUP(sdcard_cmd_x, 1),
+ GROUP(pwm_a_x6, 1),
+ GROUP(tdm_a_dout1, 1),
+ GROUP(tdm_a_dout0, 1),
+ GROUP(tdm_a_fs, 1),
+ GROUP(tdm_a_sclk, 1),
+ GROUP(uart_a_tx, 1),
+ GROUP(uart_a_rx, 1),
+ GROUP(uart_a_cts, 1),
+ GROUP(uart_a_rts, 1),
+ GROUP(pwm_d_x15, 1),
+ GROUP(pwm_e_x16, 1),
+
+ /*bank X func2 */
+ GROUP(i2c2_sck_x0, 2),
+ GROUP(i2c2_sda_x1, 2),
+ GROUP(spi_a_mosi_x2, 2),
+ GROUP(spi_a_ss0_x3, 2),
+ GROUP(spi_a_sclk_x4, 2),
+ GROUP(spi_a_miso_x5, 2),
+ GROUP(tdm_a_din1, 2),
+ GROUP(tdm_a_din0, 2),
+ GROUP(tdm_a_slv_fs, 2),
+ GROUP(tdm_a_slv_sclk, 2),
+ GROUP(i2c3_sck_x, 2),
+ GROUP(i2c3_sda_x, 2),
+ GROUP(pwm_d_x13, 2),
+ GROUP(pwm_e_x14, 2),
+ GROUP(i2c2_sck_x15, 2),
+ GROUP(i2c2_sda_x16, 2),
+
+ /*bank X func3 */
+ GROUP(uart_c_tx_x0, 3),
+ GROUP(uart_c_rx_x1, 3),
+ GROUP(uart_c_cts, 3),
+ GROUP(uart_c_rts, 3),
+ GROUP(pdm_din0_x, 3),
+ GROUP(pdm_din1_x, 3),
+ GROUP(pdm_din2_x, 3),
+ GROUP(pdm_dclk_x, 3),
+ GROUP(uart_c_tx_x15, 3),
+ GROUP(uart_c_rx_x16, 3),
+
+ /*bank X func4 */
+ GROUP(pwm_e_x2, 4),
+ GROUP(pwm_f_x, 4),
+ GROUP(spi_a_mosi_x7, 4),
+ GROUP(spi_a_miso_x8, 4),
+ GROUP(spi_a_ss0_x9, 4),
+ GROUP(spi_a_sclk_x10, 4),
+
+ /*bank X func5 */
+ GROUP(uart_b_tx_x, 5),
+ GROUP(uart_b_rx_x, 5),
+ GROUP(i2c1_sda_x, 5),
+ GROUP(i2c1_sck_x, 5),
+
+ /*bank X func6 */
+ GROUP(pwm_a_x7, 6),
+ GROUP(pwm_b_x, 6),
+ GROUP(pwm_c_x, 6),
+ GROUP(pwm_d_x10, 6),
+
+ /*bank X func7 */
+ GROUP(gen_clk_x, 7),
+
+ /*bank F func1 */
+ GROUP(uart_b_tx_f, 1),
+ GROUP(uart_b_rx_f, 1),
+ GROUP(remote_input_f, 1),
+ GROUP(jtag_a_clk, 1),
+ GROUP(jtag_a_tms, 1),
+ GROUP(jtag_a_tdi, 1),
+ GROUP(jtag_a_tdo, 1),
+ GROUP(gen_clk_f8, 1),
+ GROUP(pwm_a_f10, 1),
+ GROUP(i2c0_sck_f11, 1),
+ GROUP(i2c0_sda_f12, 1),
+
+ /*bank F func2 */
+ GROUP(clk_32k_in, 2),
+ GROUP(pwm_e_f, 2),
+ GROUP(pwm_f_f4, 2),
+ GROUP(remote_out, 2),
+ GROUP(spdif_in_f6, 2),
+ GROUP(spdif_in_f7, 2),
+ GROUP(pwm_a_hiz_f8, 2),
+ GROUP(pwm_a_hiz_f10, 2),
+ GROUP(pwm_d_f, 2),
+ GROUP(pwm_f_f12, 2),
+
+ /*bank F func3 */
+ GROUP(pwm_c_f3, 3),
+ GROUP(swclk, 3),
+ GROUP(swdio, 3),
+ GROUP(pwm_a_f6, 3),
+ GROUP(pwm_b_f, 3),
+ GROUP(pwm_c_f8, 3),
+ GROUP(clk25, 3),
+ GROUP(i2c_slave_sck_f, 3),
+ GROUP(i2c_slave_sda_f, 3),
+
+ /*bank F func4 */
+ GROUP(cec_a, 4),
+ GROUP(i2c3_sck_f, 4),
+ GROUP(i2c3_sda_f, 4),
+ GROUP(pmw_a_hiz_f6, 4),
+ GROUP(pwm_b_hiz, 4),
+ GROUP(pwm_c_hiz, 4),
+ GROUP(i2c0_sck_f9, 4),
+ GROUP(i2c0_sda_f10, 4),
+
+ /*bank F func5 */
+ GROUP(cec_b, 5),
+ GROUP(clk12_24, 5),
+
+ /*bank F func7 */
+ GROUP(gen_clk_f10, 7),
+
+ /*bank A func1 */
+ GROUP(mclk_0, 1),
+ GROUP(tdm_b_sclk, 1),
+ GROUP(tdm_b_fs, 1),
+ GROUP(tdm_b_dout0, 1),
+ GROUP(tdm_b_dout1, 1),
+ GROUP(tdm_b_dout2, 1),
+ GROUP(tdm_b_dout3, 1),
+ GROUP(tdm_b_dout4, 1),
+ GROUP(tdm_b_dout5, 1),
+ GROUP(remote_input_a, 1),
+
+ /*bank A func2 */
+ GROUP(pwm_e_a, 2),
+ GROUP(tdm_b_slv_sclk, 2),
+ GROUP(tdm_b_slv_fs, 2),
+ GROUP(tdm_b_din0, 2),
+ GROUP(tdm_b_din1, 2),
+ GROUP(tdm_b_din2, 2),
+ GROUP(i2c1_sda_a, 2),
+ GROUP(i2c1_sck_a, 2),
+
+ /*bank A func3 */
+ GROUP(i2c2_sck_a4, 3),
+ GROUP(i2c2_sda_a5, 3),
+ GROUP(pdm_din2_a, 3),
+ GROUP(pdm_din1_a, 3),
+ GROUP(pdm_din0_a, 3),
+ GROUP(pdm_dclk, 3),
+ GROUP(pwm_c_a, 3),
+ GROUP(pwm_b_a, 3),
+
+ /*bank A func4 */
+ GROUP(pwm_a_a, 4),
+ GROUP(spi_a_mosi_a, 4),
+ GROUP(spi_a_miso_a, 4),
+ GROUP(spi_a_ss0_a, 4),
+ GROUP(spi_a_sclk_a, 4),
+ GROUP(i2c_slave_sck_a, 4),
+ GROUP(i2c_slave_sda_a, 4),
+
+ /*bank A func5 */
+ GROUP(mclk_vad, 5),
+ GROUP(tdm_vad_sclk_a1, 5),
+ GROUP(tdm_vad_fs_a2, 5),
+ GROUP(tdm_vad_sclk_a5, 5),
+ GROUP(tdm_vad_fs_a6, 5),
+ GROUP(i2c2_sck_a8, 5),
+ GROUP(i2c2_sda_a9, 5),
+
+ /*bank A func6 */
+ GROUP(tst_out0, 6),
+ GROUP(tst_out1, 6),
+ GROUP(tst_out2, 6),
+ GROUP(tst_out3, 6),
+ GROUP(tst_out4, 6),
+ GROUP(tst_out5, 6),
+ GROUP(tst_out6, 6),
+ GROUP(tst_out7, 6),
+ GROUP(tst_out8, 6),
+ GROUP(tst_out9, 6),
+ GROUP(tst_out10, 6),
+ GROUP(tst_out11, 6),
+
+ /*bank A func7 */
+ GROUP(mute_key, 7),
+ GROUP(mute_en, 7),
+ GROUP(gen_clk_a, 7),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOP_0", "GPIOP_1", "GPIOP_2", "GPIOP_3", "GPIOP_4",
+ "GPIOP_5", "GPIOP_6", "GPIOP_7", "GPIOP_8", "GPIOP_9",
+ "GPIOP_10", "GPIOP_11", "GPIOP_12",
+
+ "GPIOB_0", "GPIOB_1", "GPIOB_2", "GPIOB_3", "GPIOB_4",
+ "GPIOB_5", "GPIOB_6",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16",
+
+ "GPIOF_0", "GPIOF_1", "GPIOF_2", "GPIOF_3", "GPIOF_4",
+ "GPIOF_5", "GPIOF_6", "GPIOF_7", "GPIOF_8", "GPIOF_9",
+ "GPIOF_10", "GPIOF_11", "GPIOF_12",
+
+ "GPIOA_0", "GPIOA_1", "GPIOA_2", "GPIOA_3", "GPIOA_4",
+ "GPIOA_5", "GPIOA_6", "GPIOA_7", "GPIOA_8", "GPIOA_9",
+ "GPIOA_10", "GPIOA_11",
+};
+
+static const char * const psram_groups[] = {
+ "psram_clkn", "psram_clkp", "psram_ce_n", "psram_rst_n", "psram_adq0",
+ "psram_adq1", "psram_adq2", "psram_adq3", "psram_adq4", "psram_adq5",
+ "psram_adq6", "psram_adq7", "psram_dqs_dm",
+};
+
+static const char * const pwm_a_groups[] = {
+ "pwm_a_x6", "pwm_a_x7", "pwm_a_f10", "pwm_a_f6", "pwm_a_a",
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b_x", "pwm_b_f", "pwm_b_a",
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c_x", "pwm_c_f3", "pwm_c_f8", "pwm_c_a",
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d_x15", "pwm_d_x13", "pwm_d_x10", "pwm_d_f",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e_p", "pwm_e_x16", "pwm_e_x14", "pwm_e_x2", "pwm_e_f",
+ "pwm_e_a",
+};
+
+static const char * const pwm_f_groups[] = {
+ "pwm_f_b", "pwm_f_x", "pwm_f_f4", "pwm_f_f12",
+};
+
+static const char * const pwm_a_hiz_groups[] = {
+ "pwm_a_hiz_f8", "pwm_a_hiz_f10", "pwm_a_hiz_f6",
+};
+
+static const char * const pwm_b_hiz_groups[] = {
+ "pwm_b_hiz",
+};
+
+static const char * const pwm_c_hiz_groups[] = {
+ "pwm_c_hiz",
+};
+
+static const char * const spif_groups[] = {
+ "spif_mo", "spif_mi", "spif_wp_n", "spif_hold_n", "spif_clk",
+ "spif_cs",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0_b", "sdcard_d1_b", "sdcard_d2_b", "sdcard_d3_b",
+ "sdcard_clk_b", "sdcard_cmd_b",
+
+ "sdcard_d0_x", "sdcard_d1_x", "sdcard_d2_x", "sdcard_d3_x",
+ "sdcard_clk_x", "sdcard_cmd_x",
+};
+
+static const char * const tdm_a_groups[] = {
+ "tdm_a_din0", "tdm_a_din1", "tdm_a_fs", "tdm_a_sclk",
+ "tdm_a_slv_fs", "tdm_a_slv_sclk", "tdm_a_dout0", "tdm_a_dout1",
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_a_tx", "uart_a_rx", "uart_a_cts", "uart_a_rts",
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_b_tx_x", "uart_b_rx_x", "uart_b_tx_f", "uart_b_rx_f",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_c_tx_x0", "uart_c_rx_x1", "uart_c_cts", "uart_c_rts",
+ "uart_c_tx_x15", "uart_c_rx_x16",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_sck_f11", "i2c0_sda_f12", "i2c0_sck_f9", "i2c0_sda_f10",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_sda_x", "i2c1_sck_x", "i2c1_sda_a", "i2c1_sck_a",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_sck_x0", "i2c2_sda_x1", "i2c2_sck_x15", "i2c2_sda_x16",
+ "i2c2_sck_a4", "i2c2_sda_a5", "i2c2_sck_a8", "i2c2_sda_a9",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3_sck_x", "i2c3_sda_x", "i2c3_sck_f", "i2c3_sda_f",
+};
+
+static const char * const i2c_slave_groups[] = {
+ "i2c_slave_sda_a", "i2c_slave_sck_a",
+ "i2c_slave_sda_f", "i2c_slave_sck_f",
+};
+
+static const char * const spi_a_groups[] = {
+ "spi_a_mosi_x2", "spi_a_ss0_x3", "spi_a_sclk_x4", "spi_a_miso_x5",
+ "spi_a_mosi_x7", "spi_a_miso_x8", "spi_a_ss0_x9", "spi_a_sclk_x10",
+
+ "spi_a_mosi_a", "spi_a_miso_a", "spi_a_ss0_a", "spi_a_sclk_a",
+};
+
+static const char * const pdm_groups[] = {
+ "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+};
+
+static const char * const gen_clk_groups[] = {
+ "gen_clk_x", "gen_clk_f8", "gen_clk_f10", "gen_clk_a",
+};
+
+static const char * const remote_input_groups[] = {
+ "remote_input_f",
+ "remote_input_a",
+};
+
+static const char * const jtag_a_groups[] = {
+ "jtag_a_clk", "jtag_a_tms", "jtag_a_tdi", "jtag_a_tdo",
+};
+
+static const char * const clk_32k_in_groups[] = {
+ "clk_32k_in",
+};
+
+static const char * const remote_out_groups[] = {
+ "remote_out",
+};
+
+static const char * const spdif_in_groups[] = {
+ "spdif_in_f6", "spdif_in_f7",
+};
+
+static const char * const sw_groups[] = {
+ "swclk", "swdio",
+};
+
+static const char * const clk25_groups[] = {
+ "clk_25",
+};
+
+static const char * const cec_a_groups[] = {
+ "cec_a",
+};
+
+static const char * const cec_b_groups[] = {
+ "cec_b",
+};
+
+static const char * const clk12_24_groups[] = {
+ "clk12_24",
+};
+
+static const char * const mclk_0_groups[] = {
+ "mclk_0",
+};
+
+static const char * const tdm_b_groups[] = {
+ "tdm_b_din0", "tdm_b_din1", "tdm_b_din2",
+ "tdm_b_sclk", "tdm_b_fs", "tdm_b_dout0", "tdm_b_dout1",
+ "tdm_b_dout2", "tdm_b_dout3", "tdm_b_dout4", "tdm_b_dout5",
+ "tdm_b_slv_sclk", "tdm_b_slv_fs",
+};
+
+static const char * const mclk_vad_groups[] = {
+ "mclk_vad",
+};
+
+static const char * const tdm_vad_groups[] = {
+ "tdm_vad_sclk_a1", "tdm_vad_fs_a2", "tdm_vad_sclk_a5", "tdm_vad_fs_a6",
+};
+
+static const char * const tst_out_groups[] = {
+ "tst_out0", "tst_out1", "tst_out2", "tst_out3",
+ "tst_out4", "tst_out5", "tst_out6", "tst_out7",
+ "tst_out8", "tst_out9", "tst_out10", "tst_out11",
+};
+
+static const char * const mute_groups[] = {
+ "mute_key", "mute_en",
+};
+
+static struct meson_pmx_func meson_a1_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+ FUNCTION(psram),
+ FUNCTION(pwm_a),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_f),
+ FUNCTION(pwm_a_hiz),
+ FUNCTION(pwm_b_hiz),
+ FUNCTION(pwm_c_hiz),
+ FUNCTION(spif),
+ FUNCTION(sdcard),
+ FUNCTION(tdm_a),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(uart_c),
+ FUNCTION(i2c0),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(spi_a),
+ FUNCTION(pdm),
+ FUNCTION(gen_clk),
+ FUNCTION(remote_input),
+ FUNCTION(jtag_a),
+ FUNCTION(clk_32k_in),
+ FUNCTION(remote_out),
+ FUNCTION(spdif_in),
+ FUNCTION(sw),
+ FUNCTION(clk25),
+ FUNCTION(cec_a),
+ FUNCTION(cec_b),
+ FUNCTION(clk12_24),
+ FUNCTION(mclk_0),
+ FUNCTION(tdm_b),
+ FUNCTION(mclk_vad),
+ FUNCTION(tdm_vad),
+ FUNCTION(tst_out),
+ FUNCTION(mute),
+};
+
+static struct meson_bank meson_a1_periphs_banks[] = {
+ /* name first last irq pullen pull dir out in ds*/
+ BANK_DS("P", GPIOP_0, GPIOP_12, 0, 12, 0x3, 0, 0x4, 0,
+ 0x2, 0, 0x1, 0, 0x0, 0, 0x5, 0),
+ BANK_DS("B", GPIOB_0, GPIOB_6, 13, 19, 0x13, 0, 0x14, 0,
+ 0x12, 0, 0x11, 0, 0x10, 0, 0x15, 0),
+ BANK_DS("X", GPIOX_0, GPIOX_16, 20, 36, 0x23, 0, 0x24, 0,
+ 0x22, 0, 0x21, 0, 0x20, 0, 0x25, 0),
+ BANK_DS("F", GPIOF_0, GPIOF_12, 37, 49, 0x33, 0, 0x34, 0,
+ 0x32, 0, 0x31, 0, 0x30, 0, 0x35, 0),
+ BANK_DS("A", GPIOA_0, GPIOA_11, 50, 61, 0x43, 0, 0x44, 0,
+ 0x42, 0, 0x41, 0, 0x40, 0, 0x45, 0),
+};
+
+static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
+ /* name first lask reg offset */
+ BANK_PMX("P", GPIOP_0, GPIOP_12, 0x0, 0),
+ BANK_PMX("B", GPIOB_0, GPIOB_6, 0x2, 0),
+ BANK_PMX("X", GPIOX_0, GPIOX_16, 0x3, 0),
+ BANK_PMX("F", GPIOF_0, GPIOF_12, 0x6, 0),
+ BANK_PMX("A", GPIOA_0, GPIOA_11, 0x8, 0),
+};
+
+static struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
+ .pmx_banks = meson_a1_periphs_pmx_banks,
+ .num_pmx_banks = ARRAY_SIZE(meson_a1_periphs_pmx_banks),
+};
+
+static struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
+ .name = "periphs-banks",
+ .pins = meson_a1_periphs_pins,
+ .groups = meson_a1_periphs_groups,
+ .funcs = meson_a1_periphs_functions,
+ .banks = meson_a1_periphs_banks,
+ .num_pins = ARRAY_SIZE(meson_a1_periphs_pins),
+ .num_groups = ARRAY_SIZE(meson_a1_periphs_groups),
+ .num_funcs = ARRAY_SIZE(meson_a1_periphs_functions),
+ .num_banks = ARRAY_SIZE(meson_a1_periphs_banks),
+ .pmx_ops = &meson_axg_pmx_ops,
+ .pmx_data = &meson_a1_periphs_pmx_banks_data,
+ .parse_dt = &meson_a1_parse_dt_extra,
+};
+
+static const struct of_device_id meson_a1_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-a1-periphs-pinctrl",
+ .data = &meson_a1_periphs_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_a1_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-a1-pinctrl",
+ .of_match_table = meson_a1_pinctrl_dt_match,
+ },
+};
+
+builtin_platform_driver(meson_a1_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index ad502eda4afa..072765db93d7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -1066,6 +1066,7 @@ static struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_axg_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_axg_aobus_pmx_banks_data,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_axg_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 582665fd362a..41850e3c0091 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -1362,6 +1362,14 @@ static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
.num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks),
};
+static int meson_g12a_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+
+ return 0;
+}
+
static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_g12a_periphs_pins,
@@ -1388,6 +1396,7 @@ static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_g12a_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_g12a_aobus_pmx_banks_data,
+ .parse_dt = meson_g12a_aobus_parse_dt_extra,
};
static const struct of_device_id meson_g12a_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 5bfa56f3847e..926b9997159a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -851,6 +851,7 @@ static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 72c5373c8dc1..1b6e8646700f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -820,6 +820,7 @@ static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 8bba9d053d9f..3c80828a5e50 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -625,7 +625,7 @@ static struct regmap *meson_map_resource(struct meson_pinctrl *pc,
i = of_property_match_string(node, "reg-names", name);
if (of_address_to_resource(node, i, &res))
- return ERR_PTR(-ENOENT);
+ return NULL;
base = devm_ioremap_resource(pc->dev, &res);
if (IS_ERR(base))
@@ -665,26 +665,24 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->of_node = gpio_np;
pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
- if (IS_ERR(pc->reg_mux)) {
+ if (IS_ERR_OR_NULL(pc->reg_mux)) {
dev_err(pc->dev, "mux registers not found\n");
- return PTR_ERR(pc->reg_mux);
+ return pc->reg_mux ? PTR_ERR(pc->reg_mux) : -ENOENT;
}
pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
- if (IS_ERR(pc->reg_gpio)) {
+ if (IS_ERR_OR_NULL(pc->reg_gpio)) {
dev_err(pc->dev, "gpio registers not found\n");
- return PTR_ERR(pc->reg_gpio);
+ return pc->reg_gpio ? PTR_ERR(pc->reg_gpio) : -ENOENT;
}
pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
- /* Use gpio region if pull one is not present */
if (IS_ERR(pc->reg_pull))
- pc->reg_pull = pc->reg_gpio;
+ pc->reg_pull = NULL;
pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
- /* Use pull region if pull-enable one is not present */
if (IS_ERR(pc->reg_pullen))
- pc->reg_pullen = pc->reg_pull;
+ pc->reg_pullen = NULL;
pc->reg_ds = meson_map_resource(pc, gpio_np, "ds");
if (IS_ERR(pc->reg_ds)) {
@@ -692,6 +690,28 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->reg_ds = NULL;
}
+ if (pc->data->parse_dt)
+ return pc->data->parse_dt(pc);
+
+ return 0;
+}
+
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ if (!pc->reg_pull)
+ return -EINVAL;
+
+ pc->reg_pullen = pc->reg_pull;
+
+ return 0;
+}
+
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+ pc->reg_ds = pc->reg_gpio;
+
return 0;
}
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index c696f3241a36..f8b0ff9d419a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -11,6 +11,8 @@
#include <linux/regmap.h>
#include <linux/types.h>
+struct meson_pinctrl;
+
/**
* struct meson_pmx_group - a pinmux group
*
@@ -114,6 +116,7 @@ struct meson_pinctrl_data {
unsigned int num_banks;
const struct pinmux_ops *pmx_ops;
void *pmx_data;
+ int (*parse_dt)(struct meson_pinctrl *pc);
};
struct meson_pinctrl {
@@ -171,3 +174,7 @@ int meson_pmx_get_groups(struct pinctrl_dev *pcdev,
/* Common probe function */
int meson_pinctrl_probe(struct platform_device *pdev);
+/* Common ao groups extra dt parse function for SoCs before g12a */
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc);
+/* Common extra dt parse function for SoCs like A1 */
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 0b97befa6335..dd17100efdcf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1103,6 +1103,7 @@ static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
.num_banks = ARRAY_SIZE(meson8_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index a7de388388e6..2d5339edd0b7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -962,6 +962,7 @@ static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8b_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index d69c25798871..0d12894d3ee1 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -46,8 +46,8 @@ config PINCTRL_ORION
select PINCTRL_MVEBU
config PINCTRL_ARMADA_37XX
- bool
- select GENERIC_PINCONF
- select MFD_SYSCON
- select PINCONF
- select PINMUX
+ bool
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+ select PINCONF
+ select PINMUX
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f2f5fcd9a237..aa9dcde0f069 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -595,10 +595,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
regmap_read(info->regmap, in_reg, &in_val);
/* Set initial polarity based on current input level. */
- if (in_val & d->mask)
- val |= d->mask; /* falling */
+ if (in_val & BIT(d->hwirq % GPIO_PER_REG))
+ val |= BIT(d->hwirq % GPIO_PER_REG); /* falling */
else
- val &= ~d->mask; /* rising */
+ val &= ~(BIT(d->hwirq % GPIO_PER_REG)); /* rising */
break;
}
default:
@@ -722,6 +722,8 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device_node *np = info->dev->of_node;
struct gpio_chip *gc = &info->gpio_chip;
struct irq_chip *irqchip = &info->irq_chip;
+ struct gpio_irq_chip *girq = &gc->irq;
+ struct device *dev = &pdev->dev;
struct resource res;
int ret = -ENODEV, i, nr_irq_parent;
@@ -732,19 +734,21 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
break;
}
};
- if (ret)
+ if (ret) {
+ dev_err(dev, "no gpio-controller child node\n");
return ret;
+ }
nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
- dev_err(&pdev->dev, "Invalid or no IRQ\n");
+ dev_err(dev, "invalid or no IRQ\n");
return 0;
}
if (of_address_to_resource(info->dev->of_node, 1, &res)) {
- dev_err(info->dev, "cannot find IO resource\n");
+ dev_err(dev, "cannot find IO resource\n");
return -ENOENT;
}
@@ -759,27 +763,27 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
irqchip->irq_set_type = armada_37xx_irq_set_type;
irqchip->irq_startup = armada_37xx_irq_startup;
irqchip->name = info->data->name;
- ret = gpiochip_irqchip_add(gc, irqchip, 0,
- handle_edge_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_info(&pdev->dev, "could not add irqchip\n");
- return ret;
- }
-
+ girq->chip = irqchip;
+ girq->parent_handler = armada_37xx_irq_handler;
/*
* Many interrupts are connected to the parent interrupt
* controller. But we do not take advantage of this and use
* the chained irq with all of them.
*/
+ girq->num_parents = nr_irq_parent;
+ girq->parents = devm_kcalloc(&pdev->dev, nr_irq_parent,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
int irq = irq_of_parse_and_map(np, i);
if (irq < 0)
continue;
-
- gpiochip_set_chained_irqchip(gc, irqchip, irq,
- armada_37xx_irq_handler);
+ girq->parents[i] = irq;
}
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
return 0;
}
@@ -809,10 +813,10 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
gc->of_node = np;
gc->label = info->data->name;
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
+ ret = armada_37xx_irqchip_register(pdev, info);
if (ret)
return ret;
- ret = armada_37xx_irqchip_register(pdev, info);
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 00cfaf2c9d4a..a1f93859e7ca 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -759,12 +759,10 @@ int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
struct mvebu_mpp_ctrl_data *mpp_data;
- struct resource *res;
void __iomem *base;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 29bb9d8cbbb5..cc97d270be61 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -220,17 +220,14 @@ static int orion_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
of_match_device(orion_pinctrl_of_match, &pdev->dev);
- struct resource *res;
pdev->dev.platform_data = (void*)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ mpp_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mpp_base))
return PTR_ERR(mpp_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- high_mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ high_mpp_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(high_mpp_base))
return PTR_ERR(high_mpp_base);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 726c0b5501fa..b9246e0b4fe2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -391,6 +391,15 @@ static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, /* MC0_CMDDIR */
DB8500_PIN_AA2, /* MC0_DAT2 */
DB8500_PIN_AA1 /* MC0_DAT3 */
};
+/* MMC/SD card 0 interface without CMD/DAT0/DAT2 direction control */
+static const unsigned mc0_a_2_pins[] = { DB8500_PIN_AA3, /* MC0_FBCLK */
+ DB8500_PIN_AA4, /* MC0_CLK */
+ DB8500_PIN_AB2, /* MC0_CMD */
+ DB8500_PIN_Y4, /* MC0_DAT0 */
+ DB8500_PIN_Y2, /* MC0_DAT1 */
+ DB8500_PIN_AA2, /* MC0_DAT2 */
+ DB8500_PIN_AA1 /* MC0_DAT3 */
+};
/* Often only 4 bits are used, then these are not needed (only used for MMC) */
static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, /* MC0_DAT4 */
DB8500_PIN_W3, /* MC0_DAT5 */
@@ -670,6 +679,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
@@ -828,7 +838,7 @@ DB8500_FUNC_GROUPS(ipi2c, "ipi2c_a_1", "ipi2c_a_2");
*/
DB8500_FUNC_GROUPS(msp0, "msp0txrx_a_1", "msp0tfstck_a_1", "msp0rfstck_a_1",
"msp0txrx_b_1", "msp0sck_b_1");
-DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_dat47_a_1", "mc0dat31dir_a_1");
+DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_a_2", "mc0_dat47_a_1", "mc0dat31dir_a_1");
/* MSP0 can swap RX/TX like MSP0 but has no SCK pin available */
DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 2a8190b11d10..95f864dfdef4 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -248,9 +248,6 @@ struct nmk_gpio_chip {
void __iomem *addr;
struct clk *clk;
unsigned int bank;
- unsigned int parent_irq;
- int latent_parent_irq;
- u32 (*get_latent_status)(unsigned int bank);
void (*set_ioforce)(bool enable);
spinlock_t lock;
bool sleepmode;
@@ -802,13 +799,19 @@ static void nmk_gpio_irq_shutdown(struct irq_data *d)
clk_disable(nmk_chip->clk);
}
-static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
+static void nmk_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *host_chip = irq_desc_get_chip(desc);
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ u32 status;
chained_irq_enter(host_chip, desc);
+ clk_enable(nmk_chip->clk);
+ status = readl(nmk_chip->addr + NMK_GPIO_IS);
+ clk_disable(nmk_chip->clk);
+
while (status) {
int bit = __ffs(status);
@@ -819,28 +822,6 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
chained_irq_exit(host_chip, desc);
}
-static void nmk_gpio_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status;
-
- clk_enable(nmk_chip->clk);
- status = readl(nmk_chip->addr + NMK_GPIO_IS);
- clk_disable(nmk_chip->clk);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
-static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
/* I/O Functions */
static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
@@ -1103,8 +1084,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
struct irq_chip *irqchip;
- int latent_irq;
bool supports_sleepmode;
int irq;
int ret;
@@ -1125,15 +1106,10 @@ static int nmk_gpio_probe(struct platform_device *dev)
if (irq < 0)
return irq;
- /* It's OK for this IRQ not to be present */
- latent_irq = platform_get_irq(dev, 1);
-
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
- nmk_chip->parent_irq = irq;
- nmk_chip->latent_parent_irq = latent_irq;
nmk_chip->sleepmode = supports_sleepmode;
spin_lock_init(&nmk_chip->lock);
@@ -1163,6 +1139,19 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip->base,
chip->base + chip->ngpio - 1);
+ girq = &chip->irq;
+ girq->chip = irqchip;
+ girq->parent_handler = nmk_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&dev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
clk_enable(nmk_chip->clk);
nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
clk_disable(nmk_chip->clk);
@@ -1174,33 +1163,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
- /*
- * Let the generic code handle this edge IRQ, the the chained
- * handler will perform the actual work of handling the parent
- * interrupt.
- */
- ret = gpiochip_irqchip_add(chip,
- irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&dev->dev, "could not add irqchip\n");
- gpiochip_remove(&nmk_chip->chip);
- return -ENODEV;
- }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->parent_irq,
- nmk_gpio_irq_handler);
- if (nmk_chip->latent_parent_irq > 0)
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->latent_parent_irq,
- nmk_gpio_latent_irq_handler);
-
- dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
+ dev_info(&dev->dev, "chip registered\n");
return 0;
}
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 17f909d8b63a..22077cbe6880 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1954,6 +1954,22 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
int ret, id;
for (id = 0 ; id < pctrl->bank_num ; id++) {
+ struct gpio_irq_chip *girq;
+
+ girq = &pctrl->gpio_bank[id].gc.irq;
+ girq->chip = &pctrl->gpio_bank[id].irq_chip;
+ girq->parent_handler = npcmgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctrl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
+ goto err_register;
+ }
+ girq->parents[0] = pctrl->gpio_bank[id].irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
ret = devm_gpiochip_add_data(pctrl->dev,
&pctrl->gpio_bank[id].gc,
&pctrl->gpio_bank[id]);
@@ -1972,22 +1988,6 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
gpiochip_remove(&pctrl->gpio_bank[id].gc);
goto err_register;
}
-
- ret = gpiochip_irqchip_add(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(pctrl->dev,
- "Failed to add IRQ chip %u\n", id);
- gpiochip_remove(&pctrl->gpio_bank[id].gc);
- goto err_register;
- }
-
- gpiochip_set_chained_irqchip(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- pctrl->gpio_bank[id].irq,
- npcmgpio_irq_handler);
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 2c61141519f8..eab078244a4c 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -540,7 +540,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
unsigned int i, irqnr;
unsigned long flags;
- u32 *regs, regval;
+ u32 __iomem *regs;
+ u32 regval;
u64 status, mask;
/* Read the wake status */
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index e3239cf926f9..986e04ac6b5b 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -936,7 +936,6 @@ static void artpec6_pmx_reset(struct artpec6_pmx *pmx)
static int artpec6_pmx_probe(struct platform_device *pdev)
{
struct artpec6_pmx *pmx;
- struct resource *res;
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx)
@@ -944,8 +943,7 @@ static int artpec6_pmx_probe(struct platform_device *pdev)
pmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->base = devm_ioremap_resource(&pdev->dev, res);
+ pmx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->base))
return PTR_ERR(pmx->base);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index d6de4d360cd4..694912409fd9 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -328,6 +328,33 @@ static int atmel_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(reg & BIT(pin->line));
}
+static int atmel_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ bitmap_zero(bits, atmel_pioctrl->npins);
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int word = bank;
+ unsigned int offset = 0;
+ unsigned int reg;
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+ offset = bank * ATMEL_PIO_NPINS_PER_BANK % BITS_PER_LONG;
+#endif
+ if (!mask[word])
+ continue;
+
+ reg = atmel_gpio_read(atmel_pioctrl, bank, ATMEL_PIO_PDSR);
+ bits[word] |= mask[word] & (reg << offset);
+ }
+
+ return 0;
+}
+
static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
@@ -358,11 +385,46 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(pin->line));
}
+static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int bitmask;
+ unsigned int word = bank;
+
+/*
+ * On a 64-bit platform, BITS_PER_LONG is 64 so it is necessary to iterate over
+ * two 32bit words to handle the whole bitmask
+ */
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+#endif
+ if (!mask[word])
+ continue;
+
+ bitmask = mask[word] & bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_SODR, bitmask);
+
+ bitmask = mask[word] & ~bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_CODR, bitmask);
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ mask[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+ bits[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+#endif
+ }
+}
+
static struct gpio_chip atmel_gpio_chip = {
.direction_input = atmel_gpio_direction_input,
.get = atmel_gpio_get,
+ .get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
.set = atmel_gpio_set,
+ .set_multiple = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -955,8 +1017,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->nbanks = atmel_pioctrl_data->nbanks;
atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
+ atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d6e7e9f0ddec..207f266e9cf2 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -85,8 +85,8 @@ enum drive_strength_bit {
DRIVE_STRENGTH_SHIFT)
enum slewrate_bit {
- SLEWRATE_BIT_DIS,
SLEWRATE_BIT_ENA,
+ SLEWRATE_BIT_DIS,
};
#define SLEWRATE_BIT_MSK(name) (SLEWRATE_BIT_##name << SLEWRATE_SHIFT)
@@ -669,7 +669,7 @@ static void at91_mux_sam9x60_set_slewrate(void __iomem *pio, unsigned pin,
{
unsigned int tmp;
- if (setting < SLEWRATE_BIT_DIS || setting > SLEWRATE_BIT_ENA)
+ if (setting < SLEWRATE_BIT_ENA || setting > SLEWRATE_BIT_DIS)
return;
tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR);
@@ -1723,9 +1723,11 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
struct irq_chip *gpio_irqchip;
- int ret, i;
+ struct gpio_irq_chip *girq;
+ int i;
- gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL);
+ gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip),
+ GFP_KERNEL);
if (!gpio_irqchip)
return -ENOMEM;
@@ -1747,33 +1749,30 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
* handler will perform the actual work of handling the parent
* interrupt.
*/
- ret = gpiochip_irqchip_add(&at91_gpio->chip,
- gpio_irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev, "at91_gpio.%d: Couldn't add irqchip to gpiochip.\n",
- at91_gpio->pioc_idx);
- return ret;
- }
+ girq = &at91_gpio->chip.irq;
+ girq->chip = gpio_irqchip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
- /* The top level handler handles one bank of GPIOs, except
+ /*
+ * The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
if (!gpiochip_prev) {
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ girq->parent_handler = gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = at91_gpio->pioc_virq;
return 0;
}
prev = gpiochip_get_data(gpiochip_prev);
-
/* we can only have 2 banks before */
for (i = 0; i < 2; i++) {
if (prev->next) {
@@ -1812,7 +1811,6 @@ static const struct of_device_id at91_gpio_of_match[] = {
static int at91_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct at91_gpio_chip *at91_chip = NULL;
struct gpio_chip *chip;
struct pinctrl_gpio_range *range;
@@ -1840,8 +1838,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_chip->regbase = devm_ioremap_resource(&pdev->dev, res);
+ at91_chip->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(at91_chip->regbase)) {
ret = PTR_ERR(at91_chip->regbase);
goto err;
@@ -1903,6 +1900,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
range->npins = chip->ngpio;
range->gc = chip;
+ ret = at91_gpio_of_irq_setup(pdev, at91_chip);
+ if (ret)
+ goto gpiochip_add_err;
+
ret = gpiochip_add_data(chip, at91_chip);
if (ret)
goto gpiochip_add_err;
@@ -1910,16 +1911,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- ret = at91_gpio_of_irq_setup(pdev, at91_chip);
- if (ret)
- goto irq_setup_err;
-
dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
return 0;
-irq_setup_err:
- gpiochip_remove(chip);
gpiochip_add_err:
clk_enable_err:
clk_disable_unprepare(at91_chip->clock);
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
index 63b130cb1ffb..f7dff4f14101 100644
--- a/drivers/pinctrl/pinctrl-bm1880.c
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -1308,15 +1308,13 @@ static struct pinctrl_desc bm1880_desc = {
static int bm1880_pinctrl_probe(struct platform_device *pdev)
{
- struct resource *res;
struct bm1880_pinctrl *pctrl;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 08b9e909e917..2905348ff430 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -615,7 +615,7 @@ static struct coh901_pinpair coh901_pintable[] = {
static int __init u300_gpio_probe(struct platform_device *pdev)
{
struct u300_gpio *gpio;
- struct resource *memres;
+ struct gpio_irq_chip *girq;
int err = 0;
int portno;
u32 val;
@@ -632,8 +632,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->chip.base = 0;
gpio->dev = &pdev->dev;
- memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base = devm_ioremap_resource(&pdev->dev, memres);
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
@@ -672,26 +671,17 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->base + U300_GPIO_CR);
u300_gpio_init_coh901571(gpio);
-#ifdef CONFIG_OF_GPIO
- gpio->chip.of_node = pdev->dev.of_node;
-#endif
- err = gpiochip_add_data(&gpio->chip, gpio);
- if (err) {
- dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
- goto err_no_chip;
- }
-
- err = gpiochip_irqchip_add(&gpio->chip,
- &u300_gpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_EDGE_FALLING);
- if (err) {
- dev_err(gpio->dev, "no GPIO irqchip\n");
- goto err_no_irqchip;
+ girq = &gpio->chip.irq;
+ girq->chip = &u300_gpio_irqchip;
+ girq->parent_handler = u300_gpio_irq_handler;
+ girq->num_parents = U300_GPIO_NUM_PORTS;
+ girq->parents = devm_kcalloc(gpio->dev, U300_GPIO_NUM_PORTS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ err = -ENOMEM;
+ goto err_dis_clk;
}
-
- /* Add each port with its IRQ separately */
for (portno = 0 ; portno < U300_GPIO_NUM_PORTS; portno++) {
struct u300_gpio_port *port = &gpio->ports[portno];
@@ -700,16 +690,21 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
port->gpio = gpio;
port->irq = platform_get_irq(pdev, portno);
-
- gpiochip_set_chained_irqchip(&gpio->chip,
- &u300_gpio_irqchip,
- port->irq,
- u300_gpio_irq_handler);
+ girq->parents[portno] = port->irq;
/* Turns off irq force (test register) for this port */
writel(0x0, gpio->base + portno * gpio->stride + ifr);
}
- dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
+ girq->default_type = IRQ_TYPE_EDGE_FALLING;
+ girq->handler = handle_simple_irq;
+#ifdef CONFIG_OF_GPIO
+ gpio->chip.of_node = pdev->dev.of_node;
+#endif
+ err = gpiochip_add_data(&gpio->chip, gpio);
+ if (err) {
+ dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
+ goto err_dis_clk;
+ }
/*
* Add pinctrl pin ranges, the pin controller must be registered
@@ -729,9 +724,8 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
return 0;
err_no_range:
-err_no_irqchip:
gpiochip_remove(&gpio->chip);
-err_no_chip:
+err_dis_clk:
clk_disable_unprepare(gpio->clk);
dev_err(&pdev->dev, "module ERROR:%d\n", err);
return err;
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
index d06f13a79740..5a0a1f20c843 100644
--- a/drivers/pinctrl/pinctrl-da850-pupd.c
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -146,14 +146,12 @@ static int da850_pupd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da850_pupd_data *data;
- struct resource *res;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base)) {
dev_err(dev, "Could not map resource\n");
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 7e1ceee5895b..ff702cfbaa28 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -270,7 +270,6 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
static int dc_pinctrl_probe(struct platform_device *pdev)
{
struct dc_pinmap *pmap;
- struct resource *r;
struct pinctrl_pin_desc *pins;
struct pinctrl_desc *pctl_desc;
char *pin_names;
@@ -281,8 +280,7 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
if (!pmap)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+ pmap->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmap->regs))
return PTR_ERR(pmap->regs);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
new file mode 100644
index 000000000000..36c9072c5ece
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Intel Corporation */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinmux.h"
+#include "pinctrl-equilibrium.h"
+
+#define PIN_NAME_FMT "io-%d"
+#define PIN_NAME_LEN 10
+#define PAD_REG_OFF 0x100
+
+static void eqbr_gpio_disable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_enable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ gc->direction_input(gc, offset);
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNRNSET);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_ack_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
+{
+ eqbr_gpio_disable_irq(d);
+ eqbr_gpio_ack_irq(d);
+}
+
+static inline void eqbr_cfg_bit(void __iomem *addr,
+ unsigned int offset, unsigned int set)
+{
+ if (set)
+ writel(readl(addr) | BIT(offset), addr);
+ else
+ writel(readl(addr) & ~BIT(offset), addr);
+}
+
+static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ struct eqbr_gpio_ctrl *gctrl,
+ unsigned int offset)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ eqbr_cfg_bit(gctrl->membase + GPIO_IRNCFG, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR1, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR0, offset, type->logic_type);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+
+ return 0;
+}
+
+static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ struct gpio_irq_type it;
+
+ memset(&it, 0, sizeof(it));
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE)
+ return 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_BOTH_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ eqbr_irq_type_cfg(&it, gctrl, offset);
+ if (it.trig_type == GPIO_EDGE_TRIG)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
+ return 0;
+}
+
+static void eqbr_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ unsigned long pins, offset;
+
+ chained_irq_enter(ic, desc);
+ pins = readl(gctrl->membase + GPIO_IRNCR);
+
+ for_each_set_bit(offset, &pins, gc->ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+
+ chained_irq_exit(ic, desc);
+}
+
+static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
+{
+ struct gpio_irq_chip *girq;
+ struct gpio_chip *gc;
+
+ gc = &gctrl->chip;
+ gc->label = gctrl->name;
+#if defined(CONFIG_OF_GPIO)
+ gc->of_node = gctrl->node;
+#endif
+
+ if (!of_property_read_bool(gctrl->node, "interrupt-controller")) {
+ dev_dbg(dev, "gc %s: doesn't act as interrupt controller!\n",
+ gctrl->name);
+ return 0;
+ }
+
+ gctrl->ic.name = "gpio_irq";
+ gctrl->ic.irq_mask = eqbr_gpio_disable_irq;
+ gctrl->ic.irq_unmask = eqbr_gpio_enable_irq;
+ gctrl->ic.irq_ack = eqbr_gpio_ack_irq;
+ gctrl->ic.irq_mask_ack = eqbr_gpio_mask_ack_irq;
+ gctrl->ic.irq_set_type = eqbr_gpio_set_irq_type;
+
+ girq = &gctrl->chip.irq;
+ girq->chip = &gctrl->ic;
+ girq->parent_handler = eqbr_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = gctrl->virq;
+
+ return 0;
+}
+
+static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_gpio_ctrl *gctrl;
+ struct device_node *np;
+ struct resource res;
+ int i, ret;
+
+ for (i = 0; i < drvdata->nr_gpio_ctrls; i++) {
+ gctrl = drvdata->gpio_ctrls + i;
+ np = gctrl->node;
+
+ gctrl->name = devm_kasprintf(dev, GFP_KERNEL, "gpiochip%d", i);
+ if (!gctrl->name)
+ return -ENOMEM;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(dev, "Failed to get GPIO register address\n");
+ return -ENXIO;
+ }
+
+ gctrl->membase = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(gctrl->membase))
+ return PTR_ERR(gctrl->membase);
+
+ gctrl->virq = irq_of_parse_and_map(np, 0);
+ if (!gctrl->virq) {
+ dev_err(dev, "%s: failed to parse and map irq\n",
+ gctrl->name);
+ return -ENXIO;
+ }
+ raw_spin_lock_init(&gctrl->lock);
+
+ ret = bgpio_init(&gctrl->chip, dev, gctrl->bank->nr_pins / 8,
+ gctrl->membase + GPIO_IN,
+ gctrl->membase + GPIO_OUTSET,
+ gctrl->membase + GPIO_OUTCLR,
+ gctrl->membase + GPIO_DIR,
+ NULL, 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ ret = gpiochip_setup(dev, gctrl);
+ if (ret)
+ return ret;
+
+ ret = devm_gpiochip_add_data(dev, &gctrl->chip, gctrl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct eqbr_pin_bank
+*find_pinbank_via_pin(struct eqbr_pinctrl_drv_data *pctl, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ int i;
+
+ for (i = 0; i < pctl->nr_banks; i++) {
+ bank = &pctl->pin_banks[i];
+ if (pin >= bank->pin_base &&
+ (pin - bank->pin_base) < bank->nr_pins)
+ return bank;
+ }
+
+ return NULL;
+}
+
+static const struct pinctrl_ops eqbr_pctl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int eqbr_set_pin_mux(struct eqbr_pinctrl_drv_data *pctl,
+ unsigned int pmx, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ writel(pmx, mem + (offset * 4));
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return 0;
+}
+
+static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ struct group_desc *grp;
+ unsigned int *pinmux;
+ int i;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ pinmux = grp->data;
+ for (i = 0; i < grp->num_pins; i++)
+ eqbr_set_pin_mux(pctl, pinmux[i], grp->pins[i]);
+
+ return 0;
+}
+
+static int eqbr_pinmux_gpio_request(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return eqbr_set_pin_mux(pctl, EQBR_GPIO_MODE, pin);
+}
+
+static const struct pinmux_ops eqbr_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = eqbr_pinmux_set_mux,
+ .gpio_request_enable = eqbr_pinmux_gpio_request,
+ .strict = true,
+};
+
+static int get_drv_cur(void __iomem *mem, unsigned int offset)
+{
+ unsigned int idx = offset / DRV_CUR_PINS; /* 0-15, 16-31 per register*/
+ unsigned int pin_offset = offset % DRV_CUR_PINS;
+
+ return PARSE_DRV_CURRENT(readl(mem + REG_DRCC(idx)), pin_offset);
+}
+
+static struct eqbr_gpio_ctrl
+*get_gpio_ctrls_via_bank(struct eqbr_pinctrl_drv_data *pctl,
+ struct eqbr_pin_bank *bank)
+{
+ int i;
+
+ for (i = 0; i < pctl->nr_gpio_ctrls; i++) {
+ if (pctl->gpio_ctrls[i].bank == bank)
+ return &pctl->gpio_ctrls[i];
+ }
+
+ return NULL;
+}
+
+static int eqbr_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct eqbr_gpio_ctrl *gctrl;
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+ u32 val;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = !!(readl(mem + REG_PUEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = !!(readl(mem + REG_PDEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ val = !!(readl(mem + REG_OD) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val = get_drv_cur(mem, offset);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ val = !!(readl(mem + REG_SRC) & BIT(offset));
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENODEV;
+ }
+ val = !!(readl(gctrl->membase + GPIO_DIR) & BIT(offset));
+ break;
+ default:
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENOTSUPP;
+ }
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ *config = pinconf_to_config_packed(param, val);
+;
+ return 0;
+}
+
+static int eqbr_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct eqbr_gpio_ctrl *gctrl;
+ enum pin_config_param param;
+ struct eqbr_pin_bank *bank;
+ unsigned int val, offset;
+ struct gpio_chip *gc;
+ unsigned long flags;
+ void __iomem *mem;
+ u32 regval, mask;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ val = pinconf_to_config_argument(configs[i]);
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev,
+ "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mem += REG_PUEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mem += REG_PDEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ mem += REG_OD;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mem += REG_DRCC(offset / DRV_CUR_PINS);
+ offset = (offset % DRV_CUR_PINS) * 2;
+ mask = GENMASK(1, 0) << offset;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ mem += REG_SRC;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ return -ENODEV;
+ }
+ gc = &gctrl->chip;
+ gc->direction_output(gc, offset, 0);
+ continue;
+ default:
+ return -ENOTSUPP;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ regval = readl(mem);
+ regval = (regval & ~mask) | ((val << offset) & mask);
+ writel(regval, mem);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ }
+
+ return 0;
+}
+
+static int eqbr_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ unsigned int i, npins, old = 0;
+ const unsigned int *pins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ if (eqbr_pinconf_get(pctldev, pins[i], config))
+ return -ENOTSUPP;
+
+ if (i && old != *config)
+ return -ENOTSUPP;
+
+ old = *config;
+ }
+ return 0;
+}
+
+static int eqbr_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = eqbr_pinconf_set(pctldev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct pinconf_ops eqbr_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = eqbr_pinconf_get,
+ .pin_config_set = eqbr_pinconf_set,
+ .pin_config_group_get = eqbr_pinconf_group_get,
+ .pin_config_group_set = eqbr_pinconf_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static bool is_func_exist(struct eqbr_pmx_func *funcs, const char *name,
+ unsigned int nr_funcs, unsigned int *idx)
+{
+ int i;
+
+ if (!funcs)
+ return false;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (funcs[i].name && !strcmp(funcs[i].name, name)) {
+ *idx = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
+ unsigned int *nr_funcs, funcs_util_ops op)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *np;
+ struct property *prop;
+ const char *fn_name;
+ unsigned int fid;
+ int i, j;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ if (of_property_read_string(np, "function", &fn_name)) {
+ /* some groups may not have function, it's OK */
+ dev_dbg(dev, "Group %s: not function binded!\n",
+ (char *)prop->value);
+ continue;
+ }
+
+ switch (op) {
+ case OP_COUNT_NR_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ *nr_funcs = *nr_funcs + 1;
+ break;
+
+ case OP_ADD_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[i].name = fn_name;
+ break;
+
+ case OP_COUNT_NR_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[fid].nr_groups++;
+ break;
+
+ case OP_ADD_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid)) {
+ for (j = 0; j < funcs[fid].nr_groups; j++)
+ if (!funcs[fid].groups[j])
+ break;
+ funcs[fid].groups[j] = prop->value;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_pmx_func *funcs = NULL;
+ unsigned int nr_funcs = 0;
+ int i, ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNCS);
+ if (ret)
+ return ret;
+
+ funcs = devm_kcalloc(dev, nr_funcs, sizeof(*funcs), GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNCS);
+ if (ret)
+ return ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (!funcs[i].nr_groups)
+ continue;
+ funcs[i].groups = devm_kcalloc(dev, funcs[i].nr_groups,
+ sizeof(*(funcs[i].groups)),
+ GFP_KERNEL);
+ if (!funcs[i].groups)
+ return -ENOMEM;
+ }
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ ret = pinmux_generic_add_function(drvdata->pctl_dev,
+ funcs[i].name,
+ funcs[i].groups,
+ funcs[i].nr_groups,
+ drvdata);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register function %s\n",
+ funcs[i].name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *node = dev->of_node;
+ unsigned int *pinmux, pin_id, pinmux_id;
+ struct group_desc group;
+ struct device_node *np;
+ struct property *prop;
+ int j, err;
+
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ group.num_pins = of_property_count_u32_elems(np, "pins");
+ if (group.num_pins < 0) {
+ dev_err(dev, "No pins in the group: %s\n", prop->name);
+ return -EINVAL;
+ }
+ group.name = prop->value;
+ group.pins = devm_kcalloc(dev, group.num_pins,
+ sizeof(*(group.pins)), GFP_KERNEL);
+ if (!group.pins)
+ return -ENOMEM;
+
+ pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
+ GFP_KERNEL);
+ if (!pinmux)
+ return -ENOMEM;
+
+ for (j = 0; j < group.num_pins; j++) {
+ if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
+ dev_err(dev, "Group %s: Read intel pins id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ if (pin_id >= drvdata->pctl_desc.npins) {
+ dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
+ group.name, j, pin_id);
+ return -EINVAL;
+ }
+ group.pins[j] = pin_id;
+ if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
+ dev_err(dev, "Group %s: Read intel pinmux id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ pinmux[j] = pinmux_id;
+ }
+
+ err = pinctrl_generic_add_group(drvdata->pctl_dev, group.name,
+ group.pins, group.num_pins,
+ pinmux);
+ if (err < 0) {
+ dev_err(dev, "Failed to register group %s\n", group.name);
+ return err;
+ }
+ memset(&group, 0, sizeof(group));
+ pinmux = NULL;
+ }
+
+ return 0;
+}
+
+static int pinctrl_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct pinctrl_desc *pctl_desc;
+ struct pinctrl_pin_desc *pdesc;
+ struct device *dev;
+ unsigned int nr_pins;
+ char *pin_names;
+ int i, ret;
+
+ dev = drvdata->dev;
+ pctl_desc = &drvdata->pctl_desc;
+ pctl_desc->name = "eqbr-pinctrl";
+ pctl_desc->owner = THIS_MODULE;
+ pctl_desc->pctlops = &eqbr_pctl_ops;
+ pctl_desc->pmxops = &eqbr_pinmux_ops;
+ pctl_desc->confops = &eqbr_pinconf_ops;
+ raw_spin_lock_init(&drvdata->lock);
+
+ for (i = 0, nr_pins = 0; i < drvdata->nr_banks; i++)
+ nr_pins += drvdata->pin_banks[i].nr_pins;
+
+ pdesc = devm_kcalloc(dev, nr_pins, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc)
+ return -ENOMEM;
+ pin_names = devm_kcalloc(dev, nr_pins, PIN_NAME_LEN, GFP_KERNEL);
+ if (!pin_names)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pins; i++) {
+ sprintf(pin_names, PIN_NAME_FMT, i);
+ pdesc[i].number = i;
+ pdesc[i].name = pin_names;
+ pin_names += PIN_NAME_LEN;
+ }
+ pctl_desc->pins = pdesc;
+ pctl_desc->npins = nr_pins;
+ dev_dbg(dev, "pinctrl total pin number: %u\n", nr_pins);
+
+ ret = devm_pinctrl_register_and_init(dev, pctl_desc, drvdata,
+ &drvdata->pctl_dev);
+ if (ret)
+ return ret;
+
+ ret = eqbr_build_groups(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ ret = eqbr_build_functions(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ return pinctrl_enable(drvdata->pctl_dev);
+}
+
+static int pinbank_init(struct device_node *np,
+ struct eqbr_pinctrl_drv_data *drvdata,
+ struct eqbr_pin_bank *bank, unsigned int id)
+{
+ struct device *dev = drvdata->dev;
+ struct of_phandle_args spec;
+ int ret;
+
+ bank->membase = drvdata->membase + id * PAD_REG_OFF;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &spec);
+ if (ret) {
+ dev_err(dev, "gpio-range not available!\n");
+ return ret;
+ }
+
+ bank->pin_base = spec.args[1];
+ bank->nr_pins = spec.args[2];
+
+ bank->aval_pinmap = readl(bank->membase + REG_AVAIL);
+ bank->id = id;
+
+ dev_dbg(dev, "pinbank id: %d, reg: %px, pinbase: %u, pin number: %u, pinmap: 0x%x\n",
+ id, bank->membase, bank->pin_base,
+ bank->nr_pins, bank->aval_pinmap);
+
+ return ret;
+}
+
+static int pinbank_probe(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *np_gpio;
+ struct eqbr_gpio_ctrl *gctrls;
+ struct eqbr_pin_bank *banks;
+ int i, nr_gpio;
+
+ /* Count gpio bank number */
+ nr_gpio = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (of_device_is_available(np_gpio))
+ nr_gpio++;
+ }
+
+ if (!nr_gpio) {
+ dev_err(dev, "NO pin bank available!\n");
+ return -ENODEV;
+ }
+
+ /* Count pin bank number and gpio controller number */
+ banks = devm_kcalloc(dev, nr_gpio, sizeof(*banks), GFP_KERNEL);
+ if (!banks)
+ return -ENOMEM;
+
+ gctrls = devm_kcalloc(dev, nr_gpio, sizeof(*gctrls), GFP_KERNEL);
+ if (!gctrls)
+ return -ENOMEM;
+
+ dev_dbg(dev, "found %d gpio controller!\n", nr_gpio);
+
+ /* Initialize Pin bank */
+ i = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (!of_device_is_available(np_gpio))
+ continue;
+
+ pinbank_init(np_gpio, drvdata, banks + i, i);
+
+ gctrls[i].node = np_gpio;
+ gctrls[i].bank = banks + i;
+ i++;
+ }
+
+ drvdata->pin_banks = banks;
+ drvdata->nr_banks = nr_gpio;
+ drvdata->gpio_ctrls = gctrls;
+ drvdata->nr_gpio_ctrls = nr_gpio;
+
+ return 0;
+}
+
+static int eqbr_pinctrl_probe(struct platform_device *pdev)
+{
+ struct eqbr_pinctrl_drv_data *drvdata;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ drvdata->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drvdata->membase))
+ return PTR_ERR(drvdata->membase);
+
+ ret = pinbank_probe(drvdata);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_reg(drvdata);
+ if (ret)
+ return ret;
+
+ ret = gpiolib_reg(drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drvdata);
+ return 0;
+}
+
+static const struct of_device_id eqbr_pinctrl_dt_match[] = {
+ { .compatible = "intel,lgm-io" },
+ {}
+};
+
+static struct platform_driver eqbr_pinctrl_driver = {
+ .probe = eqbr_pinctrl_probe,
+ .driver = {
+ .name = "eqbr-pinctrl",
+ .of_match_table = eqbr_pinctrl_dt_match,
+ },
+};
+
+module_platform_driver(eqbr_pinctrl_driver);
+
+MODULE_AUTHOR("Zhu Yixin <yixin.zhu@intel.com>, Rahul Tanwar <rahul.tanwar@intel.com>");
+MODULE_DESCRIPTION("Pinctrl Driver for LGM SoC (Equilibrium)");
diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h
new file mode 100644
index 000000000000..83cb7dafc657
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef __PINCTRL_EQUILIBRIUM_H
+#define __PINCTRL_EQUILIBRIUM_H
+
+/* PINPAD register offset */
+#define REG_PMX_BASE 0x0 /* Port Multiplexer Control Register */
+#define REG_PUEN 0x80 /* PULL UP Enable Register */
+#define REG_PDEN 0x84 /* PULL DOWN Enable Register */
+#define REG_SRC 0x88 /* Slew Rate Control Register */
+#define REG_DCC0 0x8C /* Drive Current Control Register 0 */
+#define REG_DCC1 0x90 /* Drive Current Control Register 1 */
+#define REG_OD 0x94 /* Open Drain Enable Register */
+#define REG_AVAIL 0x98 /* Pad Control Availability Register */
+#define DRV_CUR_PINS 16 /* Drive Current pin number per register */
+#define REG_DRCC(x) (REG_DCC0 + (x) * 4) /* Driver current macro */
+
+/* GPIO register offset */
+#define GPIO_OUT 0x0 /* Data Output Register */
+#define GPIO_IN 0x4 /* Data Input Register */
+#define GPIO_DIR 0x8 /* Direction Register */
+#define GPIO_EXINTCR0 0x18 /* External Interrupt Control Register 0 */
+#define GPIO_EXINTCR1 0x1C /* External Interrupt Control Register 1 */
+#define GPIO_IRNCR 0x20 /* IRN Capture Register */
+#define GPIO_IRNICR 0x24 /* IRN Interrupt Control Register */
+#define GPIO_IRNEN 0x28 /* IRN Interrupt Enable Register */
+#define GPIO_IRNCFG 0x2C /* IRN Interrupt Configuration Register */
+#define GPIO_IRNRNSET 0x30 /* IRN Interrupt Enable Set Register */
+#define GPIO_IRNENCLR 0x34 /* IRN Interrupt Enable Clear Register */
+#define GPIO_OUTSET 0x40 /* Output Set Register */
+#define GPIO_OUTCLR 0x44 /* Output Clear Register */
+#define GPIO_DIRSET 0x48 /* Direction Set Register */
+#define GPIO_DIRCLR 0x4C /* Direction Clear Register */
+
+/* parse given pin's driver current value */
+#define PARSE_DRV_CURRENT(val, pin) (((val) >> ((pin) * 2)) & 0x3)
+
+#define GPIO_EDGE_TRIG 0
+#define GPIO_LEVEL_TRIG 1
+#define GPIO_SINGLE_EDGE 0
+#define GPIO_BOTH_EDGE 1
+#define GPIO_POSITIVE_TRIG 0
+#define GPIO_NEGATIVE_TRIG 1
+
+#define EQBR_GPIO_MODE 0
+
+typedef enum {
+ OP_COUNT_NR_FUNCS,
+ OP_ADD_FUNCS,
+ OP_COUNT_NR_FUNC_GRPS,
+ OP_ADD_FUNC_GRPS,
+ OP_NONE,
+} funcs_util_ops;
+
+/**
+ * struct gpio_irq_type: gpio irq configuration
+ * @trig_type: level trigger or edge trigger
+ * @edge_type: sigle edge or both edge
+ * @logic_type: positive trigger or negative trigger
+ */
+struct gpio_irq_type {
+ unsigned int trig_type;
+ unsigned int edge_type;
+ unsigned int logic_type;
+};
+
+/**
+ * struct eqbr_pmx_func: represent a pin function.
+ * @name: name of the pin function, used to lookup the function.
+ * @groups: one or more names of pin groups that provide this function.
+ * @nr_groups: number of groups included in @groups.
+ */
+struct eqbr_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned int nr_groups;
+};
+
+/**
+ * struct eqbr_pin_bank: represent a pin bank.
+ * @membase: base address of the pin bank register.
+ * @id: bank id, to idenify the unique bank.
+ * @pin_base: starting pin number of the pin bank.
+ * @nr_pins: number of the pins of the pin bank.
+ * @aval_pinmap: available pin bitmap of the pin bank.
+ */
+struct eqbr_pin_bank {
+ void __iomem *membase;
+ unsigned int id;
+ unsigned int pin_base;
+ unsigned int nr_pins;
+ u32 aval_pinmap;
+};
+
+/**
+ * struct eqbr_gpio_ctrl: represent a gpio controller.
+ * @node: device node of gpio controller.
+ * @bank: pointer to corresponding pin bank.
+ * @membase: base address of the gpio controller.
+ * @chip: gpio chip.
+ * @ic: irq chip.
+ * @name: gpio chip name.
+ * @virq: irq number of the gpio chip to parent's irq domain.
+ * @lock: spin lock to protect gpio register write.
+ */
+struct eqbr_gpio_ctrl {
+ struct device_node *node;
+ struct eqbr_pin_bank *bank;
+ void __iomem *membase;
+ struct gpio_chip chip;
+ struct irq_chip ic;
+ const char *name;
+ unsigned int virq;
+ raw_spinlock_t lock; /* protect gpio register */
+};
+
+/**
+ * struct eqbr_pinctrl_drv_data:
+ * @dev: device instance representing the controller.
+ * @pctl_desc: pin controller descriptor.
+ * @pctl_dev: pin control class device
+ * @membase: base address of pin controller
+ * @pin_banks: list of pin banks of the driver.
+ * @nr_banks: number of pin banks.
+ * @gpio_ctrls: list of gpio controllers.
+ * @nr_gpio_ctrls: number of gpio controllers.
+ * @lock: protect pinctrl register write
+ */
+struct eqbr_pinctrl_drv_data {
+ struct device *dev;
+ struct pinctrl_desc pctl_desc;
+ struct pinctrl_dev *pctl_dev;
+ void __iomem *membase;
+ struct eqbr_pin_bank *pin_banks;
+ unsigned int nr_banks;
+ struct eqbr_gpio_ctrl *gpio_ctrls;
+ unsigned int nr_gpio_ctrls;
+ raw_spinlock_t lock; /* protect pinpad register */
+};
+
+#endif /* __PINCTRL_EQUILIBRIUM_H */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6e2683016c1f..24e0e2ef47a4 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -686,6 +686,7 @@ static int jz4770_mac_rmii_pins[] = {
0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
};
static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
+static int jz4770_otg_pins[] = { 0x8a, };
static int jz4770_uart0_data_funcs[] = { 0, 0, };
static int jz4770_uart0_hwflow_funcs[] = { 0, 0, };
@@ -744,6 +745,7 @@ static int jz4770_pwm_pwm6_funcs[] = { 0, };
static int jz4770_pwm_pwm7_funcs[] = { 0, };
static int jz4770_mac_rmii_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
static int jz4770_mac_mii_funcs[] = { 0, 0, };
+static int jz4770_otg_funcs[] = { 0, };
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
@@ -799,6 +801,7 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii),
INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii),
+ INGENIC_PIN_GROUP("otg-vbus", jz4770_otg),
};
static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -841,6 +844,7 @@ static const char *jz4770_pwm5_groups[] = { "pwm5", };
static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
+static const char *jz4770_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4770_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
@@ -871,6 +875,7 @@ static const struct function_desc jz4770_functions[] = {
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
{ "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), },
+ { "otg", jz4770_otg_groups, ARRAY_SIZE(jz4770_otg_groups), },
};
static const struct ingenic_chip_info jz4770_chip_info = {
@@ -1801,19 +1806,30 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
}
+static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool high)
+{
+ if (jzpc->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
+}
+
static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- unsigned int cfg;
+ unsigned int cfg, arg;
+ int ret;
for (cfg = 0; cfg < num_configs; cfg++) {
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_OUTPUT:
continue;
default:
return -ENOTSUPP;
@@ -1821,6 +1837,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
}
for (cfg = 0; cfg < num_configs; cfg++) {
+ arg = pinconf_to_config_argument(configs[cfg]);
+
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
@@ -1844,6 +1862,14 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_bias(jzpc, pin, true);
break;
+ case PIN_CONFIG_OUTPUT:
+ ret = pinctrl_gpio_direction_output(pin);
+ if (ret)
+ return ret;
+
+ ingenic_set_output_level(jzpc, pin, arg);
+ break;
+
default:
unreachable();
}
@@ -1940,6 +1966,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
{
struct ingenic_gpio_chip *jzgc;
struct device *dev = jzpc->dev;
+ struct gpio_irq_chip *girq;
unsigned int bank;
int err;
@@ -1982,10 +2009,6 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.free = gpiochip_generic_free;
}
- err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
- if (err)
- return err;
-
jzgc->irq = irq_of_parse_and_map(node, 0);
if (!jzgc->irq)
return -EINVAL;
@@ -2000,13 +2023,22 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
- err = gpiochip_irqchip_add(&jzgc->gc, &jzgc->irq_chip, 0,
- handle_level_irq, IRQ_TYPE_NONE);
+ girq = &jzgc->gc.irq;
+ girq->chip = &jzgc->irq_chip;
+ girq->parent_handler = ingenic_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = jzgc->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
+ err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
if (err)
return err;
- gpiochip_set_chained_irqchip(&jzgc->gc, &jzgc->irq_chip,
- jzgc->irq, ingenic_gpio_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 06be55dab341..e4677546aec4 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -1324,15 +1324,13 @@ static int lpc18xx_create_group_func_map(struct device *dev,
static int lpc18xx_scu_probe(struct platform_device *pdev)
{
struct lpc18xx_scu_data *scu;
- struct resource *res;
int ret;
scu = devm_kzalloc(&pdev->dev, sizeof(*scu), GFP_KERNEL);
if (!scu)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scu->base = devm_ioremap_resource(&pdev->dev, res);
+ scu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scu->base))
return PTR_ERR(scu->base);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index fb76fb2e9ea5..eb3dd0d46d6c 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -736,6 +736,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
struct ocelot_pinctrl *info)
{
struct gpio_chip *gc;
+ struct gpio_irq_chip *girq;
int ret, irq;
info->gpio_chip = ocelot_gpiolib_chip;
@@ -747,22 +748,26 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc->of_node = info->dev->of_node;
gc->label = "ocelot-gpio";
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
- if (ret)
- return ret;
-
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq <= 0)
return irq;
- ret = gpiochip_irqchip_add(gc, &ocelot_irqchip, 0, handle_edge_irq,
- IRQ_TYPE_NONE);
+ girq = &gc->irq;
+ girq->chip = &ocelot_irqchip;
+ girq->parent_handler = ocelot_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
- gpiochip_set_chained_irqchip(gc, &ocelot_irqchip, irq,
- ocelot_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 55488ca246f1..674b7b5919df 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1196,7 +1196,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
struct oxnas_gpio_bank *bank;
unsigned int id, ngpios;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_parse_phandle_with_fixed_args(np, "gpio-ranges",
3, 0, &pinspec)) {
@@ -1219,8 +1219,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank = &oxnas_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -1232,6 +1231,18 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
bank->gpio_chip.ngpio = ngpios;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = oxnas_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
@@ -1239,18 +1250,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, oxnas_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index e7f6dd5ab578..e5d6d3f9753e 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2202,7 +2202,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
struct pic32_gpio_bank *bank;
u32 id;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_property_read_u32(np, "microchip,gpio-bank", &id)) {
dev_err(&pdev->dev, "microchip,gpio-bank property not found\n");
@@ -2216,8 +2216,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank = &pic32_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -2240,25 +2239,23 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pic32_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->parents[0] = irq;
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
id, ret);
return ret;
}
-
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pic32_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 379e9a6a6d89..fa370c171cad 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1352,6 +1352,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
for (i = 0; i < pctl->nbanks; i++) {
char child_name[sizeof("gpioXX")];
struct device_node *child;
+ struct gpio_irq_chip *girq;
snprintf(child_name, sizeof(child_name), "gpio%d", i);
child = of_get_child_by_name(node, child_name);
@@ -1383,23 +1384,28 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
bank->gpio_chip.parent = pctl->dev;
bank->gpio_chip.of_node = child;
- ret = gpiochip_add_data(&bank->gpio_chip, bank);
- if (ret < 0) {
- dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
- i, ret);
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pistachio_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
goto err;
}
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
- dev_err(pctl->dev, "Failed to add IRQ chip %u: %d\n",
+ dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
i, ret);
- gpiochip_remove(&bank->gpio_chip);
goto err;
}
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pistachio_gpio_irq_handler);
ret = gpiochip_add_pin_range(&bank->gpio_chip,
dev_name(pctl->dev), 0,
@@ -1429,7 +1435,6 @@ static const struct of_device_id pistachio_pinctrl_of_match[] = {
static int pistachio_pinctrl_probe(struct platform_device *pdev)
{
struct pistachio_pinctrl *pctl;
- struct resource *res;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1437,8 +1442,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
pctl->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, pctl);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->base))
return PTR_ERR(pctl->base);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index dc0bbf198cbc..fc9a2a9959d9 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -58,6 +58,7 @@ enum rockchip_pinctrl_type {
RK3128,
RK3188,
RK3288,
+ RK3308,
RK3368,
RK3399,
};
@@ -70,6 +71,7 @@ enum rockchip_pinctrl_type {
#define IOMUX_SOURCE_PMU BIT(2)
#define IOMUX_UNROUTED BIT(3)
#define IOMUX_WIDTH_3BIT BIT(4)
+#define IOMUX_WIDTH_2BIT BIT(5)
/**
* @type: iomux variant using IOMUX_* constants
@@ -656,6 +658,100 @@ static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
},
};
+static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
+ {
+ .num = 1,
+ .pin = 14,
+ .reg = 0x28,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 15,
+ .reg = 0x2c,
+ .bit = 0,
+ .mask = 0x3
+ }, {
+ .num = 1,
+ .pin = 18,
+ .reg = 0x30,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 19,
+ .reg = 0x30,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 20,
+ .reg = 0x30,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 21,
+ .reg = 0x34,
+ .bit = 0,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 22,
+ .reg = 0x34,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 23,
+ .reg = 0x34,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 12,
+ .reg = 0x68,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 13,
+ .reg = 0x68,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 2,
+ .pin = 2,
+ .reg = 0x608,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 3,
+ .reg = 0x608,
+ .bit = 4,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 16,
+ .reg = 0x610,
+ .bit = 8,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 10,
+ .reg = 0x610,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 11,
+ .reg = 0x610,
+ .bit = 4,
+ .mask = 0x7
+ },
+};
+
static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
{
.num = 2,
@@ -982,6 +1078,192 @@ static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
},
};
+static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ {
+ /* rtc_clk */
+ .bank_num = 0,
+ .pin = 19,
+ .func = 1,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 0) | BIT(0),
+ }, {
+ /* uart2_rxm0 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3),
+ }, {
+ /* uart2_rxm1 */
+ .bank_num = 4,
+ .pin = 26,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3) | BIT(2),
+ }, {
+ /* i2c3_sdam0 */
+ .bank_num = 0,
+ .pin = 15,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9),
+ }, {
+ /* i2c3_sdam1 */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(8),
+ }, {
+ /* i2c3_sdam2 */
+ .bank_num = 2,
+ .pin = 0,
+ .func = 3,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(9),
+ }, {
+ /* i2s-8ch-1-sclktxm0 */
+ .bank_num = 1,
+ .pin = 3,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclkrxm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclktxm1 */
+ .bank_num = 1,
+ .pin = 13,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* i2s-8ch-1-sclkrxm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* pdm-clkm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* pdm-clkm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 4,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* pdm-clkm2 */
+ .bank_num = 2,
+ .pin = 6,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* pdm-clkm-m2 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x600,
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* spi1_miso */
+ .bank_num = 3,
+ .pin = 10,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9),
+ }, {
+ /* spi1_miso_m1 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9) | BIT(9),
+ }, {
+ /* owire_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11),
+ }, {
+ /* owire_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 7,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
+ }, {
+ /* owire_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
+ }, {
+ /* can_rxd_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* can_rxd_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* can_rxd_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* mac_rxd0_m0 */
+ .bank_num = 1,
+ .pin = 20,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14),
+ }, {
+ /* mac_rxd0_m1 */
+ .bank_num = 4,
+ .pin = 2,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14) | BIT(14),
+ }, {
+ /* uart3_rx */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15),
+ }, {
+ /* uart3_rx_m1 */
+ .bank_num = 0,
+ .pin = 17,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15) | BIT(15),
+ },
+};
+
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
{
/* uart2dbg_rxm0 */
@@ -1475,6 +1757,26 @@ static int rv1108_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3308_SCHMITT_PINS_PER_REG 8
+#define RK3308_SCHMITT_BANK_STRIDE 16
+#define RK3308_SCHMITT_GRF_OFFSET 0x1a0
+
+static int rk3308_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_SCHMITT_GRF_OFFSET;
+
+ *reg += bank->bank_num * RK3308_SCHMITT_BANK_STRIDE;
+ *reg += ((pin_num / RK3308_SCHMITT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3308_SCHMITT_PINS_PER_REG;
+
+ return 0;
+}
+
#define RK2928_PULL_OFFSET 0x118
#define RK2928_PULL_PINS_PER_REG 16
#define RK2928_PULL_BANK_STRIDE 8
@@ -1646,6 +1948,40 @@ static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit *= RK3288_DRV_BITS_PER_PIN;
}
+#define RK3308_PULL_OFFSET 0xa0
+
+static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_PULL_OFFSET;
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+}
+
+#define RK3308_DRV_GRF_OFFSET 0x100
+
+static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_DRV_GRF_OFFSET;
+ *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE;
+ *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3288_DRV_PINS_PER_REG);
+ *bit *= RK3288_DRV_BITS_PER_PIN;
+}
+
#define RK3368_PULL_GRF_OFFSET 0x100
#define RK3368_PULL_PMU_OFFSET 0x10
@@ -1986,6 +2322,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2030,6 +2367,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2293,6 +2631,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
@@ -3303,7 +3642,8 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
* 4bit iomux'es are spread over two registers.
*/
inc = (iom->type & (IOMUX_WIDTH_4BIT |
- IOMUX_WIDTH_3BIT)) ? 8 : 4;
+ IOMUX_WIDTH_3BIT |
+ IOMUX_WIDTH_2BIT)) ? 8 : 4;
if (iom->type & IOMUX_SOURCE_PMU)
pmu_offs += inc;
else
@@ -3709,6 +4049,44 @@ static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
.drv_calc_reg = rk3288_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3308_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+};
+
+static struct rockchip_pin_ctrl rk3308_pin_ctrl = {
+ .pin_banks = rk3308_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3308_pin_banks),
+ .label = "RK3308-GPIO",
+ .type = RK3308,
+ .grf_mux_offset = 0x0,
+ .iomux_recalced = rk3308_mux_recalced_data,
+ .niomux_recalced = ARRAY_SIZE(rk3308_mux_recalced_data),
+ .iomux_routes = rk3308_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3308_mux_route_data),
+ .pull_calc_reg = rk3308_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3308_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3308_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3328_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
@@ -3849,6 +4227,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3228_pin_ctrl },
{ .compatible = "rockchip,rk3288-pinctrl",
.data = &rk3288_pin_ctrl },
+ { .compatible = "rockchip,rk3308-pinctrl",
+ .data = &rk3308_pin_ctrl },
{ .compatible = "rockchip,rk3328-pinctrl",
.data = &rk3328_pin_ctrl },
{ .compatible = "rockchip,rk3368-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 017fc6b3e27e..215db220d795 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -617,12 +617,6 @@ static void rza1_pin_reset(struct rza1_port *port, unsigned int pin)
spin_unlock_irqrestore(&port->lock, irqflags);
}
-static inline int rza1_pin_get_direction(struct rza1_port *port,
- unsigned int pin)
-{
- return !!rza1_get_bit(port, RZA1_PM_REG, pin);
-}
-
/**
* rza1_pin_set_direction() - set I/O direction on a pin in port mode
*
@@ -783,7 +777,7 @@ static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
{
struct rza1_port *port = gpiochip_get_data(chip);
- return rza1_pin_get_direction(port, gpio);
+ return !!rza1_get_bit(port, RZA1_PM_REG, gpio);
}
static int rza1_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
index 3be1d833bf25..a205964e839b 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/pinctrl-rza2.c
@@ -213,8 +213,8 @@ static const char * const rza2_gpio_names[] = {
"PC_0", "PC_1", "PC_2", "PC_3", "PC_4", "PC_5", "PC_6", "PC_7",
"PD_0", "PD_1", "PD_2", "PD_3", "PD_4", "PD_5", "PD_6", "PD_7",
"PE_0", "PE_1", "PE_2", "PE_3", "PE_4", "PE_5", "PE_6", "PE_7",
- "PF_0", "PF_1", "PF_2", "PF_3", "P0_4", "PF_5", "PF_6", "PF_7",
- "PG_0", "PG_1", "PG_2", "P0_3", "PG_4", "PG_5", "PG_6", "PG_7",
+ "PF_0", "PF_1", "PF_2", "PF_3", "PF_4", "PF_5", "PF_6", "PF_7",
+ "PG_0", "PG_1", "PG_2", "PG_3", "PG_4", "PG_5", "PG_6", "PG_7",
"PH_0", "PH_1", "PH_2", "PH_3", "PH_4", "PH_5", "PH_6", "PH_7",
/* port I does not exist */
"PJ_0", "PJ_1", "PJ_2", "PJ_3", "PJ_4", "PJ_5", "PJ_6", "PJ_7",
@@ -462,7 +462,6 @@ static const struct pinmux_ops rza2_pinmux_ops = {
static int rza2_pinctrl_probe(struct platform_device *pdev)
{
struct rza2_pinctrl_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -471,8 +470,7 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/pinctrl/pinctrl-rzn1.c b/drivers/pinctrl/pinctrl-rzn1.c
index 0f6f8a10a53a..39538d40dbf3 100644
--- a/drivers/pinctrl/pinctrl-rzn1.c
+++ b/drivers/pinctrl/pinctrl-rzn1.c
@@ -487,7 +487,7 @@ static int rzn1_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- const u32 reg_drive[4] = { 4, 6, 8, 12 };
+ static const u32 reg_drive[4] = { 4, 6, 8, 12 };
u32 pull, drive, l1mux;
u32 l1, l2, arg = 0;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 00db8b9efb2c..4f39a7945d01 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1477,7 +1477,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
struct device *dev = info->dev;
int bank_num = of_alias_get_id(np, "gpio");
struct resource res, irq_res;
- int gpio_irq = 0, err;
+ int err;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
@@ -1500,12 +1500,6 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
range->pin_base = range->base = range->id * ST_GPIO_PINS_PER_BANK;
range->npins = bank->gpio_chip.ngpio;
range->gc = &bank->gpio_chip;
- err = gpiochip_add_data(&bank->gpio_chip, bank);
- if (err) {
- dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
- return err;
- }
- dev_info(dev, "%s bank added.\n", range->name);
/**
* GPIO bank can have one of the two possible types of
@@ -1527,23 +1521,40 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
*/
if (of_irq_to_resource(np, 0, &irq_res) > 0) {
- gpio_irq = irq_res.start;
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &st_gpio_irqchip,
- gpio_irq, st_gpio_irq_handler);
- }
+ struct gpio_irq_chip *girq;
+ int gpio_irq = irq_res.start;
- if (info->irqmux_base || gpio_irq > 0) {
- err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
- 0, handle_simple_irq,
- IRQ_TYPE_NONE);
- if (err) {
- gpiochip_remove(&bank->gpio_chip);
- dev_info(dev, "could not add irqchip\n");
- return err;
+ /* This is not a valid IRQ */
+ if (gpio_irq <= 0) {
+ dev_err(dev, "invalid IRQ for %pOF bank\n", np);
+ goto skip_irq;
}
- } else {
- dev_info(dev, "No IRQ support for %pOF bank\n", np);
+ /* We need to have a mux as well */
+ if (!info->irqmux_base) {
+ dev_err(dev, "no irqmux for %pOF bank\n", np);
+ goto skip_irq;
+ }
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &st_gpio_irqchip;
+ girq->parent_handler = st_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = gpio_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ }
+
+skip_irq:
+ err = gpiochip_add_data(&bank->gpio_chip, bank);
+ if (err) {
+ dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
+ return err;
}
+ dev_info(dev, "%s bank added.\n", range->name);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index ccdf0bb21414..16723797fa7c 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -505,6 +505,25 @@ static void stmfx_pinctrl_irq_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&pctl->lock);
}
+static int stmfx_gpio_irq_request_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = stmfx_gpio_direction_input(gpio_chip, data->hwirq);
+ if (ret)
+ return ret;
+
+ return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+}
+
+static void stmfx_gpio_irq_release_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+
+ return gpiochip_relres_irq(gpio_chip, data->hwirq);
+}
+
static void stmfx_pinctrl_irq_toggle_trigger(struct stmfx_pinctrl *pctl,
unsigned int offset)
{
@@ -664,6 +683,8 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
+ pctl->irq_chip.irq_request_resources = stmfx_gpio_irq_request_resources;
+ pctl->irq_chip.irq_release_resources = stmfx_gpio_irq_release_resources;
ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
0, handle_bad_irq, IRQ_TYPE_NONE);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 1f64e2e7efd9..ab49bd708969 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -747,7 +747,6 @@ static struct pinctrl_desc tb10x_pindesc = {
static int tb10x_pinctrl_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
- struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
struct device_node *child;
@@ -768,8 +767,7 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, state);
mutex_init(&state->mutex);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->base = devm_ioremap_resource(dev, mem);
+ state->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->base)) {
ret = PTR_ERR(state->base);
goto fail;
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 348423bb39dd..cc306448259e 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1055,7 +1055,6 @@ static struct pinctrl_desc u300_pmx_desc = {
static int u300_pmx_probe(struct platform_device *pdev)
{
struct u300_pmx *upmx;
- struct resource *res;
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
@@ -1064,8 +1063,7 @@ static int u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- upmx->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ upmx->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(upmx->virtbase))
return PTR_ERR(upmx->virtbase);
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 913d38f29b73..5e3f31b55eb7 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1705,12 +1705,10 @@ static int pinmux_xway_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct pinctrl_xway_soc *xway_soc;
- struct resource *res;
int ret, i;
/* get and remap our register range */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
+ xway_info.membase[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xway_info.membase[0]))
return PTR_ERR(xway_info.membase[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa25x.c b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
index 8d1247078ae5..95640698422f 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa25x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
@@ -216,25 +216,20 @@ static int pxa25x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa27x.c b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
index 64943e819af6..48ccfb50b23e 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa27x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
@@ -508,25 +508,20 @@ static int pxa27x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 32fc2458b8eb..811af2f81c39 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -90,6 +90,16 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8976
+ tristate "Qualcomm 8976 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8976 platform.
+ The Qualcomm MSM8956, APQ8056, APQ8076 platforms are also
+ supported by this driver.
+
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
depends on GPIOLIB && OF
@@ -132,32 +142,33 @@ config PINCTRL_QDF2XXX
Qualcomm Technologies QDF2xxx SOCs.
config PINCTRL_QCOM_SPMI_PMIC
- tristate "Qualcomm SPMI PMIC pin controller driver"
- depends on GPIOLIB && OF && SPMI
- select REGMAP_SPMI
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SPMI for communication with SoC. Example PMIC's
- devices are pm8841, pm8941 and pma8084.
+ tristate "Qualcomm SPMI PMIC pin controller driver"
+ depends on GPIOLIB && OF && SPMI
+ select REGMAP_SPMI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SPMI for communication with SoC. Example PMIC's
+ devices are pm8841, pm8941 and pma8084.
config PINCTRL_QCOM_SSBI_PMIC
- tristate "Qualcomm SSBI PMIC pin controller driver"
- depends on GPIOLIB && OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SSBI for communication with SoC. Example PMIC's
- devices are pm8058 and pm8921.
+ tristate "Qualcomm SSBI PMIC pin controller driver"
+ depends on GPIOLIB && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SSBI for communication with SoC. Example PMIC's
+ devices are pm8058 and pm8921.
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
@@ -169,30 +180,30 @@ config PINCTRL_SC7180
Technologies Inc SC7180 platform.
config PINCTRL_SDM660
- tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM660 platform.
+ tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM660 platform.
config PINCTRL_SDM845
- tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
- depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM845 platform.
+ tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
+ depends on GPIOLIB && (OF || ACPI)
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM845 platform.
config PINCTRL_SM8150
- tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SM8150 platform.
+ tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8150 platform.
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index f8bb0c265381..c2c2f9ad6827 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 763da0be10d6..62fcae9f05ae 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1150,8 +1150,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->regs[i]);
}
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->regs[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->regs[0]))
return PTR_ERR(pctrl->regs[0]);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
new file mode 100644
index 000000000000..e1259ce27396
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2016, AngeloGioacchino Del Regno <kholk11@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8976_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "SDC1_CLK"),
+ PINCTRL_PIN(146, "SDC1_CMD"),
+ PINCTRL_PIN(147, "SDC1_DATA"),
+ PINCTRL_PIN(148, "SDC1_RCLK"),
+ PINCTRL_PIN(149, "SDC2_CLK"),
+ PINCTRL_PIN(150, "SDC2_CMD"),
+ PINCTRL_PIN(151, "SDC2_DATA"),
+ PINCTRL_PIN(152, "QDSD_CLK"),
+ PINCTRL_PIN(153, "QDSD_CMD"),
+ PINCTRL_PIN(154, "QDSD_DATA0"),
+ PINCTRL_PIN(155, "QDSD_DATA1"),
+ PINCTRL_PIN(156, "QDSD_DATA2"),
+ PINCTRL_PIN(157, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+
+static const unsigned int sdc1_clk_pins[] = { 145 };
+static const unsigned int sdc1_cmd_pins[] = { 146 };
+static const unsigned int sdc1_data_pins[] = { 147 };
+static const unsigned int sdc1_rclk_pins[] = { 148 };
+static const unsigned int sdc2_clk_pins[] = { 149 };
+static const unsigned int sdc2_cmd_pins[] = { 150 };
+static const unsigned int sdc2_data_pins[] = { 151 };
+static const unsigned int qdsd_clk_pins[] = { 152 };
+static const unsigned int qdsd_cmd_pins[] = { 153 };
+static const unsigned int qdsd_data0_pins[] = { 154 };
+static const unsigned int qdsd_data1_pins[] = { 155 };
+static const unsigned int qdsd_data2_pins[] = { 156 };
+static const unsigned int qdsd_data3_pins[] = { 157 };
+
+enum msm8976_functions {
+ msm_mux_gpio,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_spi1,
+ msm_mux_smb_int,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_i2c2,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_blsp_spi3,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_blsp_i2c3,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_blsp_spi4,
+ msm_mux_cap_int,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_uart5,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_m_voc,
+ msm_mux_blsp_i2c5,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart6,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_blsp_i2c6,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_mdp_vsync,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_cam_mclk,
+ msm_mux_cci0_i2c,
+ msm_mux_cci1_i2c,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp3_spi,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_uim_batt,
+ msm_mux_sd_write,
+ msm_mux_uim1_data,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_reset,
+ msm_mux_uim1_present,
+ msm_mux_uim2_data,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_reset,
+ msm_mux_uim2_present,
+ msm_mux_ts_xvdd,
+ msm_mux_mipi_dsi0,
+ msm_mux_us_euro,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_pri_mi2s,
+ msm_mux_codec_reset,
+ msm_mux_cdc_pdm0,
+ msm_mux_us_emitter,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_mclk_c,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_wcss_bt,
+ msm_mux_sdc3,
+ msm_mux_wcss_wlan2,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_fm,
+ msm_mux_key_volp,
+ msm_mux_key_snapshot,
+ msm_mux_key_focus,
+ msm_mux_key_home,
+ msm_mux_pwr_down,
+ msm_mux_dmic0_clk,
+ msm_mux_hdmi_int,
+ msm_mux_dmic0_data,
+ msm_mux_wsa_vi,
+ msm_mux_wsa_en,
+ msm_mux_blsp_spi8,
+ msm_mux_wsa_irq,
+ msm_mux_blsp_i2c8,
+ msm_mux_pa_indicator,
+ msm_mux_modem_tsync,
+ msm_mux_ssbi_wtr1,
+ msm_mux_gsm1_tx,
+ msm_mux_gsm0_tx,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_ss_switch,
+ msm_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const smb_int_groups[] = {
+ "gpio1",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio105",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio30",
+ "gpio31", "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio38",
+ "gpio116", "gpio126", "gpio128", "gpio129",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio12",
+};
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const cap_int_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio46",
+};
+const char * const m_voc_groups[] = {
+ "gpio123", "gpio124",
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio136", "gpio137",
+};
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio45",
+};
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio47", "gpio48", "gpio62", "gpio69", "gpio120",
+ "gpio121", "gpio130", "gpio131",
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio5",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio126",
+};
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio62",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+static const char * const cci0_i2c_groups[] = {
+ "gpio30", "gpio29",
+};
+static const char * const cci1_i2c_groups[] = {
+ "gpio104", "gpio103",
+};
+static const char * const blsp1_spi_groups[] = {
+ "gpio101",
+};
+static const char * const blsp3_spi_groups[] = {
+ "gpio106", "gpio107",
+};
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio49",
+};
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio50",
+};
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio51",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio61",
+};
+static const char * const sd_write_groups[] = {
+ "gpio50",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio51",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio52",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio53",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio54",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio55",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio56",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio57",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio58",
+};
+static const char * const ts_xvdd_groups[] = {
+ "gpio60",
+};
+static const char * const mipi_dsi0_groups[] = {
+ "gpio61",
+};
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio122", "gpio123", "gpio124", "gpio125", "gpio127",
+};
+static const char * const codec_reset_groups[] = {
+ "gpio67",
+};
+static const char * const cdc_pdm0_groups[] = {
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+};
+static const char * const us_emitter_groups[] = {
+ "gpio68",
+};
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio62",
+};
+static const char * const pri_mi2s_mclk_c_groups[] = {
+ "gpio116",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio117",
+};
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio118",
+};
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio119",
+};
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+static const char * const wcss_bt_groups[] = {
+ "gpio39", "gpio47", "gpio88",
+};
+static const char * const sdc3_groups[] = {
+ "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44",
+};
+static const char * const wcss_wlan2_groups[] = {
+ "gpio40",
+};
+static const char * const wcss_wlan1_groups[] = {
+ "gpio41",
+};
+static const char * const wcss_wlan0_groups[] = {
+ "gpio42",
+};
+static const char * const wcss_wlan_groups[] = {
+ "gpio43", "gpio44",
+};
+static const char * const wcss_fm_groups[] = {
+ "gpio45", "gpio46",
+};
+static const char * const key_volp_groups[] = {
+ "gpio85",
+};
+static const char * const key_snapshot_groups[] = {
+ "gpio86",
+};
+static const char * const key_focus_groups[] = {
+ "gpio87",
+};
+static const char * const key_home_groups[] = {
+ "gpio88",
+};
+static const char * const pwr_down_groups[] = {
+ "gpio89",
+};
+static const char * const dmic0_clk_groups[] = {
+ "gpio66",
+};
+static const char * const hdmi_int_groups[] = {
+ "gpio90",
+};
+static const char * const dmic0_data_groups[] = {
+ "gpio67",
+};
+static const char * const wsa_vi_groups[] = {
+ "gpio108", "gpio109",
+};
+static const char * const wsa_en_groups[] = {
+ "gpio96",
+};
+static const char * const blsp_spi8_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const wsa_irq_groups[] = {
+ "gpio97",
+};
+static const char * const blsp_i2c8_groups[] = {
+ "gpio18", "gpio19",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio92",
+};
+static const char * const modem_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const nav_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio79", "gpio94",
+};
+static const char * const gsm1_tx_groups[] = {
+ "gpio95",
+};
+static const char * const gsm0_tx_groups[] = {
+ "gpio99",
+};
+static const char * const sdcard_det_groups[] = {
+ "gpio133",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio102", "gpio105", "gpio134", "gpio135",
+};
+
+static const char * const ss_switch_groups[] = {
+ "gpio139",
+};
+
+static const struct msm_function msm8976_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(blsp_spi1),
+ FUNCTION(smb_int),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(blsp_spi3),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(blsp_spi4),
+ FUNCTION(cap_int),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_uart5),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(m_voc),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart6),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(mdp_vsync),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(sec_mi2s_mclk_a),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci0_i2c),
+ FUNCTION(cci1_i2c),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp3_spi),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(uim_batt),
+ FUNCTION(sd_write),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim1_present),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim2_present),
+ FUNCTION(ts_xvdd),
+ FUNCTION(mipi_dsi0),
+ FUNCTION(us_euro),
+ FUNCTION(ts_resout),
+ FUNCTION(ts_sample),
+ FUNCTION(sec_mi2s_mclk_b),
+ FUNCTION(pri_mi2s),
+ FUNCTION(codec_reset),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(us_emitter),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_mclk_c),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(lpass_slimbus0),
+ FUNCTION(lpass_slimbus1),
+ FUNCTION(codec_int1),
+ FUNCTION(codec_int2),
+ FUNCTION(wcss_bt),
+ FUNCTION(sdc3),
+ FUNCTION(wcss_wlan2),
+ FUNCTION(wcss_wlan1),
+ FUNCTION(wcss_wlan0),
+ FUNCTION(wcss_wlan),
+ FUNCTION(wcss_fm),
+ FUNCTION(key_volp),
+ FUNCTION(key_snapshot),
+ FUNCTION(key_focus),
+ FUNCTION(key_home),
+ FUNCTION(pwr_down),
+ FUNCTION(dmic0_clk),
+ FUNCTION(hdmi_int),
+ FUNCTION(dmic0_data),
+ FUNCTION(wsa_vi),
+ FUNCTION(wsa_en),
+ FUNCTION(blsp_spi8),
+ FUNCTION(wsa_irq),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(pa_indicator),
+ FUNCTION(modem_tsync),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(gsm1_tx),
+ FUNCTION(gsm0_tx),
+ FUNCTION(sdcard_det),
+ FUNCTION(sec_mi2s),
+ FUNCTION(ss_switch),
+};
+
+static const struct msm_pingroup msm8976_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_tracectl_b, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_traceclk_b, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, blsp_spi3, NA, NA, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(10, blsp_spi3, NA, blsp_i2c3, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(11, blsp_spi3, NA, blsp_i2c3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, blsp_spi4, NA, gcc_gp2_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, blsp_spi4, NA, gcc_gp3_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(27, cam_mclk, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(28, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(29, cci0_i2c, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(30, cci0_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(34, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, wcss_fm, NA, NA, qdss_traceclk_a, NA, NA, NA, NA, NA),
+ PINGROUP(47, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, gcc_gp1_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, sd_write, gcc_gp2_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, uim2_data, gcc_gp3_clk_a, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, sec_mi2s_mclk_a, pri_mi2s_mclk_b, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, dmic0_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, dmic0_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, pa_indicator, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, modem_tsync, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, gsm0_tx, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, blsp1_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, sec_mi2s, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, pri_mi2s_mclk_c, cdc_pdm0, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(117, lpass_slimbus, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, lpass_slimbus0, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, lpass_slimbus1, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(121, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(122, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, pri_mi2s_mclk_a, sec_mi2s_mclk_b, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(127, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(129, qdss_tracedata_b, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(134, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8976_pinctrl = {
+ .pins = msm8976_pins,
+ .npins = ARRAY_SIZE(msm8976_pins),
+ .functions = msm8976_functions,
+ .nfunctions = ARRAY_SIZE(msm8976_functions),
+ .groups = msm8976_groups,
+ .ngroups = ARRAY_SIZE(msm8976_groups),
+ .ngpios = 145,
+};
+
+static int msm8976_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8976_pinctrl);
+}
+
+static const struct of_device_id msm8976_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8976-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8976_pinctrl_driver = {
+ .driver = {
+ .name = "msm8976-pinctrl",
+ .of_match_table = msm8976_pinctrl_of_match,
+ },
+ .probe = msm8976_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8976_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8976_pinctrl_driver);
+}
+arch_initcall(msm8976_pinctrl_init);
+
+static void __exit msm8976_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8976_pinctrl_driver);
+}
+module_exit(msm8976_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8976 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8976_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6399c8a2bc22..d6cfad7417b1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -77,6 +77,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -102,6 +103,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = 3, \
.drv_bit = 0, \
@@ -1087,14 +1089,14 @@ static const struct msm_pingroup sc7180_groups[] = {
[116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
- [119] = UFS_RESET(ufs_reset, 0x97f000),
- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x97a000, 15, 0),
- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x97a000, 13, 6),
- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x97a000, 11, 3),
- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x97a000, 9, 0),
- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x97b000, 14, 6),
- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x97b000, 11, 3),
- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x97b000, 9, 0),
+ [119] = UFS_RESET(ufs_reset, 0x7f000),
+ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x7a000, 15, 0),
+ [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x7a000, 13, 6),
+ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x7a000, 11, 3),
+ [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x7a000, 9, 0),
+ [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x7b000, 14, 6),
+ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x7b000, 11, 3),
+ [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
};
static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index f1fece5b9c06..653d1095bfea 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1108,6 +1108,9 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
+ /* pm8950 has 8 GPIOs with holes on 3 */
+ { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
{ .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
@@ -1121,6 +1124,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8150b-gpio", .data = (void *) 12 },
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 91407b024cf3..48602dba4967 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -915,6 +915,8 @@ static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
+ { .compatible = "qcom,pm8950-mpp" }, /* 4 MPP's */
+ { .compatible = "qcom,pmi8950-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8994-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,spmi-mpp" }, /* Generic */
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index c1f7d0799ebe..dca86886b1f9 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -56,7 +56,6 @@
/**
* struct pm8xxx_pin_data - dynamic configuration for a pin
* @reg: address of the control register
- * @irq: IRQ from the PMIC interrupt controller
* @power_source: logical selected voltage source, mapping in static data
* is used translate to register values
* @mode: operating mode for the pin (input/output)
@@ -72,7 +71,6 @@
*/
struct pm8xxx_pin_data {
unsigned reg;
- int irq;
u8 power_source;
u8 mode;
bool open_drain;
@@ -93,9 +91,6 @@ struct pm8xxx_gpio {
struct pinctrl_desc desc;
unsigned npins;
-
- struct fwnode_handle *fwnode;
- struct irq_domain *domain;
};
static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
@@ -491,13 +486,16 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ int ret, irq;
bool state;
- int ret;
- if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
- ret = pin->output_value;
- } else if (pin->irq >= 0) {
- ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT)
+ return pin->output_value;
+
+ irq = chip->to_irq(chip, offset);
+ if (irq >= 0) {
+ ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL,
+ &state);
if (!ret)
ret = !!state;
} else
@@ -535,37 +533,6 @@ static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
}
-static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
- struct irq_fwspec fwspec;
- int ret;
-
- fwspec.fwnode = pctrl->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
-
- ret = irq_create_fwspec_mapping(&fwspec);
-
- /*
- * Cache the IRQ since pm8xxx_gpio_get() needs this to get determine the
- * line level.
- */
- pin->irq = ret;
-
- return ret;
-}
-
-static void pm8xxx_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
-
- pin->irq = -1;
-}
-
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
@@ -624,13 +591,11 @@ static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#endif
static const struct gpio_chip pm8xxx_gpio_template = {
- .free = pm8xxx_gpio_free,
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
.set = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
- .to_irq = pm8xxx_gpio_to_irq,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
};
@@ -712,43 +677,24 @@ static int pm8xxx_domain_translate(struct irq_domain *domain,
return 0;
}
-static int pm8xxx_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *data)
+static unsigned int pm8xxx_child_offset_to_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
- struct pm8xxx_gpio *pctrl = container_of(domain->host_data,
- struct pm8xxx_gpio, chip);
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq;
- unsigned int type;
- int ret, i;
-
- ret = pm8xxx_domain_translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
- &pm8xxx_irq_chip, pctrl, handle_level_irq,
- NULL, NULL);
+ return offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
+}
- parent_fwspec.fwnode = domain->parent->fwnode;
- parent_fwspec.param_count = 2;
- parent_fwspec.param[0] = hwirq + 0xc0;
- parent_fwspec.param[1] = fwspec->param[1];
+static int pm8xxx_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int child_hwirq,
+ unsigned int child_type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = child_hwirq + 0xc0;
+ *parent_type = child_type;
- return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
- &parent_fwspec);
+ return 0;
}
-static const struct irq_domain_ops pm8xxx_domain_ops = {
- .activate = gpiochip_irq_domain_activate,
- .alloc = pm8xxx_domain_alloc,
- .deactivate = gpiochip_irq_domain_deactivate,
- .free = irq_domain_free_irqs_common,
- .translate = pm8xxx_domain_translate,
-};
-
static const struct of_device_id pm8xxx_gpio_of_match[] = {
{ .compatible = "qcom,pm8018-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pm8038-gpio", .data = (void *) 12 },
@@ -765,6 +711,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent_domain;
struct device_node *parent_node;
struct pinctrl_pin_desc *pins;
+ struct gpio_irq_chip *girq;
struct pm8xxx_gpio *pctrl;
int ret, i;
@@ -800,7 +747,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
for (i = 0; i < pctrl->desc.npins; i++) {
pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
- pin_data[i].irq = -1;
ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
if (ret)
@@ -841,19 +787,21 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
- pctrl->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
- pctrl->domain = irq_domain_create_hierarchy(parent_domain, 0,
- pctrl->chip.ngpio,
- pctrl->fwnode,
- &pm8xxx_domain_ops,
- &pctrl->chip);
- if (!pctrl->domain)
- return -ENODEV;
+ girq = &pctrl->chip.irq;
+ girq->chip = &pm8xxx_irq_chip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
+ girq->parent_domain = parent_domain;
+ girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
+ girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
+ girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(&pdev->dev, "failed register gpiochip\n");
- goto err_chip_add_data;
+ return ret;
}
/*
@@ -883,8 +831,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
unregister_gpiochip:
gpiochip_remove(&pctrl->chip);
-err_chip_add_data:
- irq_domain_remove(pctrl->domain);
return ret;
}
@@ -894,7 +840,6 @@ static int pm8xxx_gpio_remove(struct platform_device *pdev)
struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
- irq_domain_remove(pctrl->domain);
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index ebc27b06718c..0599f5127b01 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -486,8 +486,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
if (match) {
irq_chip = kmemdup(match->data,
sizeof(*irq_chip), GFP_KERNEL);
- if (!irq_chip)
+ if (!irq_chip) {
+ of_node_put(np);
return -ENOMEM;
+ }
wkup_np = np;
break;
}
@@ -504,6 +506,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "wkup irq domain add failed\n");
+ of_node_put(wkup_np);
return -ENXIO;
}
@@ -518,8 +521,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kcalloc(dev,
bank->nr_pins, sizeof(*weint_data),
GFP_KERNEL);
- if (!weint_data)
+ if (!weint_data) {
+ of_node_put(wkup_np);
return -ENOMEM;
+ }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(bank->of_node, idx);
@@ -536,10 +541,13 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
}
}
- if (!muxed_banks)
+ if (!muxed_banks) {
+ of_node_put(wkup_np);
return 0;
+ }
irq = irq_of_parse_and_map(wkup_np, 0);
+ of_node_put(wkup_np);
if (!irq) {
dev_err(dev, "irq number for muxed EINTs not found\n");
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 7e824e4d20f4..9bd0a3de101d 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -490,8 +490,10 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL);
- if (!eint_data)
+ if (!eint_data) {
+ of_node_put(eint_np);
return -ENOMEM;
+ }
eint_data->drvdata = d;
@@ -503,12 +505,14 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint_np);
return -ENXIO;
}
eint_data->parents[i] = irq;
irq_set_chained_handler_and_data(irq, handlers[i], eint_data);
}
+ of_node_put(eint_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index c399f0932af5..f97f8179f2b1 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -704,8 +704,10 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ of_node_put(eint0_np);
return -ENOMEM;
+ }
data->drvdata = d;
for (i = 0; i < NUM_EINT0_IRQ; ++i) {
@@ -714,6 +716,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint0_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint0_np);
return -ENXIO;
}
@@ -721,6 +724,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
s3c64xx_eint0_handlers[i],
data);
}
+ of_node_put(eint0_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index de0477bb469d..f26574ef234a 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -272,6 +272,7 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
+ of_node_put(np);
return ret;
}
}
@@ -785,8 +786,10 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
@@ -797,8 +800,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
for_each_child_of_node(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(func_np);
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 2dd716b016a3..28d66e7cb098 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -17,6 +17,7 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7745 if ARCH_R8A7745
select PINCTRL_PFC_R8A77470 if ARCH_R8A77470
select PINCTRL_PFC_R8A774A1 if ARCH_R8A774A1
+ select PINCTRL_PFC_R8A774B1 if ARCH_R8A774B1
select PINCTRL_PFC_R8A774C0 if ARCH_R8A774C0
select PINCTRL_PFC_R8A7778 if ARCH_R8A7778
select PINCTRL_PFC_R8A7779 if ARCH_R8A7779
@@ -26,7 +27,8 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
- select PINCTRL_PFC_R8A7796 if ARCH_R8A7796
+ select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
select PINCTRL_PFC_R8A77980 if ARCH_R8A77980
@@ -86,6 +88,9 @@ config PINCTRL_PFC_R8A77470
config PINCTRL_PFC_R8A774A1
bool "RZ/G2M pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A774B1
+ bool "RZ/G2N pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A774C0
bool "RZ/G2E pin control support" if COMPILE_TEST
@@ -113,9 +118,12 @@ config PINCTRL_PFC_R8A7794
config PINCTRL_PFC_R8A7795
bool "R-Car H3 pin control support" if COMPILE_TEST
-config PINCTRL_PFC_R8A7796
+config PINCTRL_PFC_R8A77960
bool "R-Car M3-W pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77961
+ bool "R-Car M3-W+ pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A77965
bool "R-Car M3-N pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 8c95abcfcc00..3bc05666e1a6 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7744) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o
obj-$(CONFIG_PINCTRL_PFC_R8A774A1) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A774B1) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A774C0) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
@@ -19,7 +20,8 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795-es1.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7796) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77960) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77961) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A77970) += pfc-r8a77970.o
obj-$(CONFIG_PINCTRL_PFC_R8A77980) += pfc-r8a77980.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index b8640ad41bef..65e52688f091 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,12 +29,12 @@
static int sh_pfc_map_resources(struct sh_pfc *pfc,
struct platform_device *pdev)
{
- unsigned int num_windows, num_irqs;
struct sh_pfc_window *windows;
unsigned int *irqs = NULL;
+ unsigned int num_windows;
struct resource *res;
unsigned int i;
- int irq;
+ int num_irqs;
/* Count the MEM and IRQ resources. */
for (num_windows = 0;; num_windows++) {
@@ -42,17 +42,13 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
if (!res)
break;
}
- for (num_irqs = 0;; num_irqs++) {
- irq = platform_get_irq(pdev, num_irqs);
- if (irq == -EPROBE_DEFER)
- return irq;
- if (irq < 0)
- break;
- }
-
if (num_windows == 0)
return -EINVAL;
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0)
+ return num_irqs;
+
/* Allocate memory windows and IRQs arrays. */
windows = devm_kcalloc(pfc->dev, num_windows, sizeof(*windows),
GFP_KERNEL);
@@ -518,6 +514,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a774a1_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+ {
+ .compatible = "renesas,pfc-r8a774b1",
+ .data = &r8a774b1_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A774C0
{
.compatible = "renesas,pfc-r8a774c0",
@@ -579,10 +581,16 @@ static const struct of_device_id sh_pfc_of_table[] = {
},
#endif /* DEBUG */
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
{
.compatible = "renesas,pfc-r8a7796",
- .data = &r8a7796_pinmux_info,
+ .data = &r8a77960_pinmux_info,
+ },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+ {
+ .compatible = "renesas,pfc-r8a77961",
+ .data = &r8a77961_pinmux_info,
},
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77965
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 95f9aae3bfba..ad05da8f6516 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -718,7 +718,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_MSEL(IP1_27_24, A20, I2C_SEL_3_0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 7df010f757b1..d3145aa135d0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -726,7 +726,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 61db7c7a35ec..a2496baca85d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7796 processor support - PFC hardware block.
+ * R8A7796 (R-Car M3-W/W+) support - PFC hardware block.
*
* Copyright (C) 2016-2019 Renesas Electronics Corp.
*
@@ -729,7 +729,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -6210,8 +6210,8 @@ const struct sh_pfc_soc_info r8a774a1_pinmux_info = {
};
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
-const struct sh_pfc_soc_info r8a7796_pinmux_info = {
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
+const struct sh_pfc_soc_info r8a77960_pinmux_info = {
.name = "r8a77960_pfc",
.ops = &r8a7796_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
@@ -6236,3 +6236,30 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+const struct sh_pfc_soc_info r8a77961_pinmux_info = {
+ .name = "r8a77961_pfc",
+ .ops = &r8a7796_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 697c77a4ea95..8bdf33c807f6 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -732,7 +732,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -4378,355 +4378,362 @@ static const unsigned int vin5_clk_mux[] = {
VI5_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(audio_clk_a_a),
- SH_PFC_PIN_GROUP(audio_clk_a_b),
- SH_PFC_PIN_GROUP(audio_clk_a_c),
- SH_PFC_PIN_GROUP(audio_clk_b_a),
- SH_PFC_PIN_GROUP(audio_clk_b_b),
- SH_PFC_PIN_GROUP(audio_clk_c_a),
- SH_PFC_PIN_GROUP(audio_clk_c_b),
- SH_PFC_PIN_GROUP(audio_clkout_a),
- SH_PFC_PIN_GROUP(audio_clkout_b),
- SH_PFC_PIN_GROUP(audio_clkout_c),
- SH_PFC_PIN_GROUP(audio_clkout_d),
- SH_PFC_PIN_GROUP(audio_clkout1_a),
- SH_PFC_PIN_GROUP(audio_clkout1_b),
- SH_PFC_PIN_GROUP(audio_clkout2_a),
- SH_PFC_PIN_GROUP(audio_clkout2_b),
- SH_PFC_PIN_GROUP(audio_clkout3_a),
- SH_PFC_PIN_GROUP(audio_clkout3_b),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
- SH_PFC_PIN_GROUP(avb_avtp_match_b),
- SH_PFC_PIN_GROUP(avb_avtp_capture_b),
- SH_PFC_PIN_GROUP(can0_data_a),
- SH_PFC_PIN_GROUP(can0_data_b),
- SH_PFC_PIN_GROUP(can1_data),
- SH_PFC_PIN_GROUP(can_clk),
- SH_PFC_PIN_GROUP(canfd0_data_a),
- SH_PFC_PIN_GROUP(canfd0_data_b),
- SH_PFC_PIN_GROUP(canfd1_data),
- SH_PFC_PIN_GROUP(drif0_ctrl_a),
- SH_PFC_PIN_GROUP(drif0_data0_a),
- SH_PFC_PIN_GROUP(drif0_data1_a),
- SH_PFC_PIN_GROUP(drif0_ctrl_b),
- SH_PFC_PIN_GROUP(drif0_data0_b),
- SH_PFC_PIN_GROUP(drif0_data1_b),
- SH_PFC_PIN_GROUP(drif0_ctrl_c),
- SH_PFC_PIN_GROUP(drif0_data0_c),
- SH_PFC_PIN_GROUP(drif0_data1_c),
- SH_PFC_PIN_GROUP(drif1_ctrl_a),
- SH_PFC_PIN_GROUP(drif1_data0_a),
- SH_PFC_PIN_GROUP(drif1_data1_a),
- SH_PFC_PIN_GROUP(drif1_ctrl_b),
- SH_PFC_PIN_GROUP(drif1_data0_b),
- SH_PFC_PIN_GROUP(drif1_data1_b),
- SH_PFC_PIN_GROUP(drif1_ctrl_c),
- SH_PFC_PIN_GROUP(drif1_data0_c),
- SH_PFC_PIN_GROUP(drif1_data1_c),
- SH_PFC_PIN_GROUP(drif2_ctrl_a),
- SH_PFC_PIN_GROUP(drif2_data0_a),
- SH_PFC_PIN_GROUP(drif2_data1_a),
- SH_PFC_PIN_GROUP(drif2_ctrl_b),
- SH_PFC_PIN_GROUP(drif2_data0_b),
- SH_PFC_PIN_GROUP(drif2_data1_b),
- SH_PFC_PIN_GROUP(drif3_ctrl_a),
- SH_PFC_PIN_GROUP(drif3_data0_a),
- SH_PFC_PIN_GROUP(drif3_data1_a),
- SH_PFC_PIN_GROUP(drif3_ctrl_b),
- SH_PFC_PIN_GROUP(drif3_data0_b),
- SH_PFC_PIN_GROUP(drif3_data1_b),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync),
- SH_PFC_PIN_GROUP(du_oddf),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_a),
- SH_PFC_PIN_GROUP(hscif1_clk_a),
- SH_PFC_PIN_GROUP(hscif1_ctrl_a),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_clk_b),
- SH_PFC_PIN_GROUP(hscif1_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_a),
- SH_PFC_PIN_GROUP(hscif2_clk_a),
- SH_PFC_PIN_GROUP(hscif2_ctrl_a),
- SH_PFC_PIN_GROUP(hscif2_data_b),
- SH_PFC_PIN_GROUP(hscif2_clk_b),
- SH_PFC_PIN_GROUP(hscif2_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_c),
- SH_PFC_PIN_GROUP(hscif2_clk_c),
- SH_PFC_PIN_GROUP(hscif2_ctrl_c),
- SH_PFC_PIN_GROUP(hscif3_data_a),
- SH_PFC_PIN_GROUP(hscif3_clk),
- SH_PFC_PIN_GROUP(hscif3_ctrl),
- SH_PFC_PIN_GROUP(hscif3_data_b),
- SH_PFC_PIN_GROUP(hscif3_data_c),
- SH_PFC_PIN_GROUP(hscif3_data_d),
- SH_PFC_PIN_GROUP(hscif4_data_a),
- SH_PFC_PIN_GROUP(hscif4_clk),
- SH_PFC_PIN_GROUP(hscif4_ctrl),
- SH_PFC_PIN_GROUP(hscif4_data_b),
- SH_PFC_PIN_GROUP(i2c0),
- SH_PFC_PIN_GROUP(i2c1_a),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c2_a),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c3),
- SH_PFC_PIN_GROUP(i2c5),
- SH_PFC_PIN_GROUP(i2c6_a),
- SH_PFC_PIN_GROUP(i2c6_b),
- SH_PFC_PIN_GROUP(i2c6_c),
- SH_PFC_PIN_GROUP(intc_ex_irq0),
- SH_PFC_PIN_GROUP(intc_ex_irq1),
- SH_PFC_PIN_GROUP(intc_ex_irq2),
- SH_PFC_PIN_GROUP(intc_ex_irq3),
- SH_PFC_PIN_GROUP(intc_ex_irq4),
- SH_PFC_PIN_GROUP(intc_ex_irq5),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_txd),
- SH_PFC_PIN_GROUP(msiof0_rxd),
- SH_PFC_PIN_GROUP(msiof1_clk_a),
- SH_PFC_PIN_GROUP(msiof1_sync_a),
- SH_PFC_PIN_GROUP(msiof1_ss1_a),
- SH_PFC_PIN_GROUP(msiof1_ss2_a),
- SH_PFC_PIN_GROUP(msiof1_txd_a),
- SH_PFC_PIN_GROUP(msiof1_rxd_a),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_sync_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_txd_b),
- SH_PFC_PIN_GROUP(msiof1_rxd_b),
- SH_PFC_PIN_GROUP(msiof1_clk_c),
- SH_PFC_PIN_GROUP(msiof1_sync_c),
- SH_PFC_PIN_GROUP(msiof1_ss1_c),
- SH_PFC_PIN_GROUP(msiof1_ss2_c),
- SH_PFC_PIN_GROUP(msiof1_txd_c),
- SH_PFC_PIN_GROUP(msiof1_rxd_c),
- SH_PFC_PIN_GROUP(msiof1_clk_d),
- SH_PFC_PIN_GROUP(msiof1_sync_d),
- SH_PFC_PIN_GROUP(msiof1_ss1_d),
- SH_PFC_PIN_GROUP(msiof1_ss2_d),
- SH_PFC_PIN_GROUP(msiof1_txd_d),
- SH_PFC_PIN_GROUP(msiof1_rxd_d),
- SH_PFC_PIN_GROUP(msiof1_clk_e),
- SH_PFC_PIN_GROUP(msiof1_sync_e),
- SH_PFC_PIN_GROUP(msiof1_ss1_e),
- SH_PFC_PIN_GROUP(msiof1_ss2_e),
- SH_PFC_PIN_GROUP(msiof1_txd_e),
- SH_PFC_PIN_GROUP(msiof1_rxd_e),
- SH_PFC_PIN_GROUP(msiof1_clk_f),
- SH_PFC_PIN_GROUP(msiof1_sync_f),
- SH_PFC_PIN_GROUP(msiof1_ss1_f),
- SH_PFC_PIN_GROUP(msiof1_ss2_f),
- SH_PFC_PIN_GROUP(msiof1_txd_f),
- SH_PFC_PIN_GROUP(msiof1_rxd_f),
- SH_PFC_PIN_GROUP(msiof1_clk_g),
- SH_PFC_PIN_GROUP(msiof1_sync_g),
- SH_PFC_PIN_GROUP(msiof1_ss1_g),
- SH_PFC_PIN_GROUP(msiof1_ss2_g),
- SH_PFC_PIN_GROUP(msiof1_txd_g),
- SH_PFC_PIN_GROUP(msiof1_rxd_g),
- SH_PFC_PIN_GROUP(msiof2_clk_a),
- SH_PFC_PIN_GROUP(msiof2_sync_a),
- SH_PFC_PIN_GROUP(msiof2_ss1_a),
- SH_PFC_PIN_GROUP(msiof2_ss2_a),
- SH_PFC_PIN_GROUP(msiof2_txd_a),
- SH_PFC_PIN_GROUP(msiof2_rxd_a),
- SH_PFC_PIN_GROUP(msiof2_clk_b),
- SH_PFC_PIN_GROUP(msiof2_sync_b),
- SH_PFC_PIN_GROUP(msiof2_ss1_b),
- SH_PFC_PIN_GROUP(msiof2_ss2_b),
- SH_PFC_PIN_GROUP(msiof2_txd_b),
- SH_PFC_PIN_GROUP(msiof2_rxd_b),
- SH_PFC_PIN_GROUP(msiof2_clk_c),
- SH_PFC_PIN_GROUP(msiof2_sync_c),
- SH_PFC_PIN_GROUP(msiof2_ss1_c),
- SH_PFC_PIN_GROUP(msiof2_ss2_c),
- SH_PFC_PIN_GROUP(msiof2_txd_c),
- SH_PFC_PIN_GROUP(msiof2_rxd_c),
- SH_PFC_PIN_GROUP(msiof2_clk_d),
- SH_PFC_PIN_GROUP(msiof2_sync_d),
- SH_PFC_PIN_GROUP(msiof2_ss1_d),
- SH_PFC_PIN_GROUP(msiof2_ss2_d),
- SH_PFC_PIN_GROUP(msiof2_txd_d),
- SH_PFC_PIN_GROUP(msiof2_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_a),
- SH_PFC_PIN_GROUP(msiof3_sync_a),
- SH_PFC_PIN_GROUP(msiof3_ss1_a),
- SH_PFC_PIN_GROUP(msiof3_ss2_a),
- SH_PFC_PIN_GROUP(msiof3_txd_a),
- SH_PFC_PIN_GROUP(msiof3_rxd_a),
- SH_PFC_PIN_GROUP(msiof3_clk_b),
- SH_PFC_PIN_GROUP(msiof3_sync_b),
- SH_PFC_PIN_GROUP(msiof3_ss1_b),
- SH_PFC_PIN_GROUP(msiof3_ss2_b),
- SH_PFC_PIN_GROUP(msiof3_txd_b),
- SH_PFC_PIN_GROUP(msiof3_rxd_b),
- SH_PFC_PIN_GROUP(msiof3_clk_c),
- SH_PFC_PIN_GROUP(msiof3_sync_c),
- SH_PFC_PIN_GROUP(msiof3_txd_c),
- SH_PFC_PIN_GROUP(msiof3_rxd_c),
- SH_PFC_PIN_GROUP(msiof3_clk_d),
- SH_PFC_PIN_GROUP(msiof3_sync_d),
- SH_PFC_PIN_GROUP(msiof3_ss1_d),
- SH_PFC_PIN_GROUP(msiof3_txd_d),
- SH_PFC_PIN_GROUP(msiof3_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_e),
- SH_PFC_PIN_GROUP(msiof3_sync_e),
- SH_PFC_PIN_GROUP(msiof3_ss1_e),
- SH_PFC_PIN_GROUP(msiof3_ss2_e),
- SH_PFC_PIN_GROUP(msiof3_txd_e),
- SH_PFC_PIN_GROUP(msiof3_rxd_e),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm1_a),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2_a),
- SH_PFC_PIN_GROUP(pwm2_b),
- SH_PFC_PIN_GROUP(pwm3_a),
- SH_PFC_PIN_GROUP(pwm3_b),
- SH_PFC_PIN_GROUP(pwm4_a),
- SH_PFC_PIN_GROUP(pwm4_b),
- SH_PFC_PIN_GROUP(pwm5_a),
- SH_PFC_PIN_GROUP(pwm5_b),
- SH_PFC_PIN_GROUP(pwm6_a),
- SH_PFC_PIN_GROUP(pwm6_b),
- SH_PFC_PIN_GROUP(sata0_devslp_a),
- SH_PFC_PIN_GROUP(sata0_devslp_b),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_clk),
- SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_a),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif2_data_a),
- SH_PFC_PIN_GROUP(scif2_clk),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scif3_data_a),
- SH_PFC_PIN_GROUP(scif3_clk),
- SH_PFC_PIN_GROUP(scif3_ctrl),
- SH_PFC_PIN_GROUP(scif3_data_b),
- SH_PFC_PIN_GROUP(scif4_data_a),
- SH_PFC_PIN_GROUP(scif4_clk_a),
- SH_PFC_PIN_GROUP(scif4_ctrl_a),
- SH_PFC_PIN_GROUP(scif4_data_b),
- SH_PFC_PIN_GROUP(scif4_clk_b),
- SH_PFC_PIN_GROUP(scif4_ctrl_b),
- SH_PFC_PIN_GROUP(scif4_data_c),
- SH_PFC_PIN_GROUP(scif4_clk_c),
- SH_PFC_PIN_GROUP(scif4_ctrl_c),
- SH_PFC_PIN_GROUP(scif5_data_a),
- SH_PFC_PIN_GROUP(scif5_clk_a),
- SH_PFC_PIN_GROUP(scif5_data_b),
- SH_PFC_PIN_GROUP(scif5_clk_b),
- SH_PFC_PIN_GROUP(scif_clk_a),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_data8),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd_a),
- SH_PFC_PIN_GROUP(sdhi2_wp_a),
- SH_PFC_PIN_GROUP(sdhi2_cd_b),
- SH_PFC_PIN_GROUP(sdhi2_wp_b),
- SH_PFC_PIN_GROUP(sdhi2_ds),
- SH_PFC_PIN_GROUP(sdhi3_data1),
- SH_PFC_PIN_GROUP(sdhi3_data4),
- SH_PFC_PIN_GROUP(sdhi3_data8),
- SH_PFC_PIN_GROUP(sdhi3_ctrl),
- SH_PFC_PIN_GROUP(sdhi3_cd),
- SH_PFC_PIN_GROUP(sdhi3_wp),
- SH_PFC_PIN_GROUP(sdhi3_ds),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi01239_ctrl),
- SH_PFC_PIN_GROUP(ssi1_data_a),
- SH_PFC_PIN_GROUP(ssi1_data_b),
- SH_PFC_PIN_GROUP(ssi1_ctrl_a),
- SH_PFC_PIN_GROUP(ssi1_ctrl_b),
- SH_PFC_PIN_GROUP(ssi2_data_a),
- SH_PFC_PIN_GROUP(ssi2_data_b),
- SH_PFC_PIN_GROUP(ssi2_ctrl_a),
- SH_PFC_PIN_GROUP(ssi2_ctrl_b),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi349_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5_data),
- SH_PFC_PIN_GROUP(ssi5_ctrl),
- SH_PFC_PIN_GROUP(ssi6_data),
- SH_PFC_PIN_GROUP(ssi6_ctrl),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi9_data_a),
- SH_PFC_PIN_GROUP(ssi9_data_b),
- SH_PFC_PIN_GROUP(ssi9_ctrl_a),
- SH_PFC_PIN_GROUP(ssi9_ctrl_b),
- SH_PFC_PIN_GROUP(tmu_tclk1_a),
- SH_PFC_PIN_GROUP(tmu_tclk1_b),
- SH_PFC_PIN_GROUP(tmu_tclk2_a),
- SH_PFC_PIN_GROUP(tmu_tclk2_b),
- SH_PFC_PIN_GROUP(tpu_to0),
- SH_PFC_PIN_GROUP(tpu_to1),
- SH_PFC_PIN_GROUP(tpu_to2),
- SH_PFC_PIN_GROUP(tpu_to3),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb1),
- SH_PFC_PIN_GROUP(usb30),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
- SH_PFC_PIN_GROUP(vin4_data18_a),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
- SH_PFC_PIN_GROUP(vin4_data18_b),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
- SH_PFC_PIN_GROUP(vin4_sync),
- SH_PFC_PIN_GROUP(vin4_field),
- SH_PFC_PIN_GROUP(vin4_clkenb),
- SH_PFC_PIN_GROUP(vin4_clk),
- VIN_DATA_PIN_GROUP(vin5_data, 8),
- VIN_DATA_PIN_GROUP(vin5_data, 10),
- VIN_DATA_PIN_GROUP(vin5_data, 12),
- VIN_DATA_PIN_GROUP(vin5_data, 16),
- SH_PFC_PIN_GROUP(vin5_sync),
- SH_PFC_PIN_GROUP(vin5_field),
- SH_PFC_PIN_GROUP(vin5_clkenb),
- SH_PFC_PIN_GROUP(vin5_clk),
+static const struct {
+ struct sh_pfc_pin_group common[318];
+ struct sh_pfc_pin_group automotive[30];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(sata0_devslp_a),
+ SH_PFC_PIN_GROUP(sata0_devslp_b),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_clk_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_clk_b),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
+ SH_PFC_PIN_GROUP(tpu_to0),
+ SH_PFC_PIN_GROUP(tpu_to1),
+ SH_PFC_PIN_GROUP(tpu_to2),
+ SH_PFC_PIN_GROUP(tpu_to3),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb30),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
+ SH_PFC_PIN_GROUP(vin4_data18_a),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
+ SH_PFC_PIN_GROUP(vin4_data18_b),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_sync),
+ SH_PFC_PIN_GROUP(vin4_field),
+ SH_PFC_PIN_GROUP(vin4_clkenb),
+ SH_PFC_PIN_GROUP(vin4_clk),
+ VIN_DATA_PIN_GROUP(vin5_data, 8),
+ VIN_DATA_PIN_GROUP(vin5_data, 10),
+ VIN_DATA_PIN_GROUP(vin5_data, 12),
+ VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_sync),
+ SH_PFC_PIN_GROUP(vin5_field),
+ SH_PFC_PIN_GROUP(vin5_clkenb),
+ SH_PFC_PIN_GROUP(vin5_clk),
+ },
+ .automotive = {
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
+ }
};
static const char * const audio_clk_groups[] = {
@@ -5241,62 +5248,69 @@ static const char * const vin5_groups[] = {
"vin5_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(can0),
- SH_PFC_FUNCTION(can1),
- SH_PFC_FUNCTION(can_clk),
- SH_PFC_FUNCTION(canfd0),
- SH_PFC_FUNCTION(canfd1),
- SH_PFC_FUNCTION(drif0),
- SH_PFC_FUNCTION(drif1),
- SH_PFC_FUNCTION(drif2),
- SH_PFC_FUNCTION(drif3),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(hscif2),
- SH_PFC_FUNCTION(hscif3),
- SH_PFC_FUNCTION(hscif4),
- SH_PFC_FUNCTION(i2c0),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c3),
- SH_PFC_FUNCTION(i2c5),
- SH_PFC_FUNCTION(i2c6),
- SH_PFC_FUNCTION(intc_ex),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(msiof3),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(sata0),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scif3),
- SH_PFC_FUNCTION(scif4),
- SH_PFC_FUNCTION(scif5),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(sdhi3),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(tmu),
- SH_PFC_FUNCTION(tpu),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(usb30),
- SH_PFC_FUNCTION(vin4),
- SH_PFC_FUNCTION(vin5),
+static const struct {
+ struct sh_pfc_function common[51];
+ struct sh_pfc_function automotive[4];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(sata0),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tmu),
+ SH_PFC_FUNCTION(tpu),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(vin4),
+ SH_PFC_FUNCTION(vin5),
+ },
+ .automotive = {
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6425,6 +6439,32 @@ static const struct sh_pfc_soc_operations r8a77965_pinmux_ops = {
.set_bias = r8a77965_pinmux_set_bias,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+const struct sh_pfc_soc_info r8a774b1_pinmux_info = {
+ .name = "r8a774b1_pfc",
+ .ops = &r8a77965_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.name = "r8a77965_pfc",
.ops = &r8a77965_pinmux_ops,
@@ -6434,10 +6474,12 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
@@ -6447,3 +6489,4 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index 2dfb8d9cfda1..c926a59dd21c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -232,8 +232,8 @@
#define IP2_11_8 FM(AVB_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_15_12 FM(BS_N) FM(PWM0_A) FM(AVB_MAGIC) FM(VI4_CLK) F_(0, 0) FM(TX3_C) F_(0, 0) FM(VI5_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_19_16 FM(RD_N) FM(PWM1_A) FM(AVB_LINK) FM(VI4_FIELD) F_(0, 0) FM(RX3_C) FM(FSCLKST2_N_A) FM(VI5_DATA0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH_A) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE_A) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_31_28 FM(A0) FM(IRQ0) FM(PWM2_A) FM(MSIOF3_SS1_B) FM(VI5_CLK_A) FM(DU_CDE) FM(HRX3_D) FM(IERX) FM(QSTB_QHE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -448,6 +448,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
+#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
@@ -468,7 +470,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define PINMUX_MOD_SELS \
\
-MOD_SEL0_30_29 \
+ MOD_SEL1_31 \
+MOD_SEL0_30_29 MOD_SEL1_30 \
MOD_SEL1_29 \
MOD_SEL0_28 MOD_SEL1_28 \
MOD_SEL0_27_26 \
@@ -634,7 +637,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_23_20, RD_WR_N),
PINMUX_IPSR_MSEL(IP2_23_20, SCL7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH_A),
+ PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH),
PINMUX_IPSR_GPSR(IP2_23_20, VI4_VSYNC_N),
PINMUX_IPSR_GPSR(IP2_23_20, TX5_B),
PINMUX_IPSR_MSEL(IP2_23_20, SCK3_C, SEL_SCIF3_2),
@@ -642,7 +645,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_27_24, EX_WAIT0),
PINMUX_IPSR_MSEL(IP2_27_24, SDA7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE_A),
+ PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE),
PINMUX_IPSR_GPSR(IP2_27_24, VI4_HSYNC_N),
PINMUX_IPSR_MSEL(IP2_27_24, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP2_27_24, PWM6_A, SEL_PWM6_0),
@@ -1058,7 +1061,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B),
+ PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
@@ -1067,7 +1070,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
/* IPSR11 */
@@ -1085,13 +1088,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
- PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0),
PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
- PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
@@ -1196,7 +1199,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
- PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A),
+ PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1),
@@ -1264,7 +1267,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
- PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B),
+ PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
@@ -1524,22 +1527,22 @@ static const unsigned int avb_avtp_pps_mux[] = {
AVB_AVTP_PPS_MARK,
};
-static const unsigned int avb_avtp_match_a_pins[] = {
- /* AVB_AVTP_MATCH_A */
+static const unsigned int avb_avtp_match_pins[] = {
+ /* AVB_AVTP_MATCH */
RCAR_GP_PIN(2, 24),
};
-static const unsigned int avb_avtp_match_a_mux[] = {
- AVB_AVTP_MATCH_A_MARK,
+static const unsigned int avb_avtp_match_mux[] = {
+ AVB_AVTP_MATCH_MARK,
};
-static const unsigned int avb_avtp_capture_a_pins[] = {
- /* AVB_AVTP_CAPTURE_A */
+static const unsigned int avb_avtp_capture_pins[] = {
+ /* AVB_AVTP_CAPTURE */
RCAR_GP_PIN(2, 25),
};
-static const unsigned int avb_avtp_capture_a_mux[] = {
- AVB_AVTP_CAPTURE_A_MARK,
+static const unsigned int avb_avtp_capture_mux[] = {
+ AVB_AVTP_CAPTURE_MARK,
};
/* - CAN ------------------------------------------------------------------ */
@@ -3784,8 +3787,8 @@ static const struct {
SH_PFC_PIN_GROUP(avb_phy_int),
SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match),
+ SH_PFC_PIN_GROUP(avb_avtp_capture),
SH_PFC_PIN_GROUP(can0_data),
SH_PFC_PIN_GROUP(can1_data),
SH_PFC_PIN_GROUP(can_clk),
@@ -4061,8 +4064,8 @@ static const char * const avb_groups[] = {
"avb_phy_int",
"avb_mii",
"avb_avtp_pps",
- "avb_avtp_match_a",
- "avb_avtp_capture_a",
+ "avb_avtp_match",
+ "avb_avtp_capture",
};
static const char * const can0_groups[] = {
@@ -4957,11 +4960,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1,
- 2, 2, 2, 1, 1, 2, 1, 4),
+ GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 2, 1, 4),
GROUP(
- /* RESERVED 31, 30 */
- 0, 0, 0, 0,
+ MOD_SEL1_31
+ MOD_SEL1_30
MOD_SEL1_29
MOD_SEL1_28
/* RESERVED 27 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 5dfd991ffdaa..dbc36079c381 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1450,7 +1450,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(ET0_ETXD2_A),
GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
GPIO_FN(ET0_ETXD3_A),
- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_20 [1] */
FN_EX_WAIT0, FN_TCLK1_B,
/* IP3_19_18 [2] */
- FN_RD_WR, FN_TCLK1_B, 0, 0,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
/* IP3_17_15 [3] */
FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
FN_ET0_ETXD3_A, 0, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 835148fc0f28..640d2a4cb838 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -309,6 +309,7 @@ extern const struct sh_pfc_soc_info r8a7744_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
extern const struct sh_pfc_soc_info r8a774a1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a774b1_pinmux_info;
extern const struct sh_pfc_soc_info r8a774c0_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
@@ -319,7 +320,8 @@ extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7796_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
extern const struct sh_pfc_soc_info r8a77970_pinmux_info;
extern const struct sh_pfc_soc_info r8a77980_pinmux_info;
@@ -422,12 +424,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
/*
* Describe a pinmux configuration in which a pin is physically multiplexed
* with other pins.
- * - ipsr: IPSR field (unused, for documentation purposes only)
+ * - ipsr: IPSR field
* - fn: Function name
* - psel: Physical multiplexing selector
*/
#define PINMUX_IPSR_PHYS(ipsr, fn, psel) \
- PINMUX_DATA(fn##_MARK, FN_##psel)
+ PINMUX_DATA(fn##_MARK, FN_##psel, FN_##ipsr)
/*
* Describe a pinmux configuration for a single-function pin with GPIO
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 924080362bf7..b1a9611f46b3 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5996,6 +5996,7 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
struct gpio_chip *chip;
u32 nbank;
int ret, idx;
+ struct gpio_irq_chip *girq;
ret = of_property_read_u32(np, "gpio-banks", &nbank);
if (ret) {
@@ -6048,24 +6049,15 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
chip->of_gpio_n_cells = 2;
chip->parent = &pdev->dev;
- /* Add gpio chip to system */
- ret = gpiochip_add_data(chip, a7gc);
- if (ret) {
- dev_err(&pdev->dev,
- "%pOF: error in probe function with status %d\n",
- np, ret);
- goto failed;
- }
-
- /* Add gpio chip to irq subsystem */
- ret = gpiochip_irqchip_add(chip, &atlas7_gpio_irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto failed;
- }
-
+ girq = &chip->irq;
+ girq->chip = &atlas7_gpio_irq_chip;
+ girq->parent_handler = atlas7_gpio_handle_irq;
+ girq->num_parents = nbank;
+ girq->parents = devm_kcalloc(&pdev->dev, nbank,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (idx = 0; idx < nbank; idx++) {
struct atlas7_gpio_bank *bank;
@@ -6084,9 +6076,18 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
goto failed;
}
bank->irq = ret;
+ girq->parents[idx] = ret;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(chip, &atlas7_gpio_irq_chip,
- bank->irq, atlas7_gpio_handle_irq);
+ /* Add gpio chip to system */
+ ret = gpiochip_add_data(chip, a7gc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%pOF: error in probe function with status %d\n",
+ np, ret);
+ goto failed;
}
platform_set_drvdata(pdev, a7gc);
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 780c31bb4009..1ebcb957c654 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -785,6 +785,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
struct sirfsoc_gpio_bank *bank;
void __iomem *regs;
struct platform_device *pdev;
+ struct gpio_irq_chip *girq;
u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
@@ -816,36 +817,33 @@ static int sirfsoc_gpio_probe(struct device_node *np)
sgpio->chip.gc.parent = &pdev->dev;
sgpio->chip.regs = regs;
- err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
- if (err) {
- dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
- np, err);
- goto out;
- }
-
- err = gpiochip_irqchip_add(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (err) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto out_banks;
- }
-
+ girq = &sgpio->chip.gc.irq;
+ girq->chip = &sirfsoc_irq_chip;
+ girq->parent_handler = sirfsoc_gpio_handle_irq;
+ girq->num_parents = SIRFSOC_GPIO_NO_OF_BANKS;
+ girq->parents = devm_kcalloc(&pdev->dev, SIRFSOC_GPIO_NO_OF_BANKS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
bank = &sgpio->sgpio_bank[i];
spin_lock_init(&bank->lock);
bank->parent_irq = platform_get_irq(pdev, i);
if (bank->parent_irq < 0) {
err = bank->parent_irq;
- goto out_banks;
+ goto out;
}
+ girq->parents[i] = bank->parent_irq;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- bank->parent_irq,
- sirfsoc_gpio_handle_irq);
+ err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
+ if (err) {
+ dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
+ np, err);
+ goto out;
}
err = gpiochip_add_pin_range(&sgpio->chip.gc, dev_name(&pdev->dev),
@@ -867,7 +865,6 @@ static int sirfsoc_gpio_probe(struct device_node *np)
return 0;
out_no_range:
-out_banks:
gpiochip_remove(&sgpio->chip.gc);
out:
iounmap(regs);
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 9d906474f3e4..1ebbc49b16f1 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -515,15 +515,13 @@ end:
static int plgpio_probe(struct platform_device *pdev)
{
struct plgpio *plgpio;
- struct resource *res;
int ret, irq;
plgpio = devm_kzalloc(&pdev->dev, sizeof(*plgpio), GFP_KERNEL);
if (!plgpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- plgpio->base = devm_ioremap_resource(&pdev->dev, res);
+ plgpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(plgpio->base))
return PTR_ERR(plgpio->base);
@@ -569,40 +567,35 @@ static int plgpio_probe(struct platform_device *pdev)
}
}
- ret = gpiochip_add_data(&plgpio->chip, plgpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpio chip\n");
- goto unprepare_clk;
- }
-
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_info(&pdev->dev, "PLGPIO registered without IRQs\n");
- return 0;
+ if (irq > 0) {
+ struct gpio_irq_chip *girq;
+
+ girq = &plgpio->chip.irq;
+ girq->chip = &plgpio_irqchip;
+ girq->parent_handler = plgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ dev_info(&pdev->dev, "PLGPIO registering with IRQs\n");
+ } else {
+ dev_info(&pdev->dev, "PLGPIO registering without IRQs\n");
}
- ret = gpiochip_irqchip_add(&plgpio->chip,
- &plgpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&plgpio->chip, plgpio);
if (ret) {
- dev_err(&pdev->dev, "failed to add irqchip to gpiochip\n");
- goto remove_gpiochip;
+ dev_err(&pdev->dev, "unable to add gpio chip\n");
+ goto unprepare_clk;
}
- gpiochip_set_chained_irqchip(&plgpio->chip,
- &plgpio_irqchip,
- irq,
- plgpio_irq_handler);
-
- dev_info(&pdev->dev, "PLGPIO registered with IRQs\n");
-
return 0;
-remove_gpiochip:
- dev_info(&pdev->dev, "Remove gpiochip\n");
- gpiochip_remove(&plgpio->chip);
unprepare_clk:
if (!IS_ERR(plgpio->clk))
clk_unprepare(plgpio->clk);
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 7ec19c73f870..948f56abb9ae 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -358,7 +358,6 @@ int spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct spear_pmx *pmx;
if (!machdata)
@@ -368,8 +367,7 @@ int spear_pinctrl_probe(struct platform_device *pdev,
if (!pmx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->vbase = devm_ioremap_resource(&pdev->dev, res);
+ pmx->vbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->vbase))
return PTR_ERR(pmx->vbase);
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 7b95bf5a82a9..157712ab05a8 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -41,7 +41,8 @@
#define PUBCP_SLEEP_MODE BIT(14)
#define TGLDSP_SLEEP_MODE BIT(15)
#define AGDSP_SLEEP_MODE BIT(16)
-#define SLEEP_MODE_MASK GENMASK(3, 0)
+#define CM4_SLEEP_MODE BIT(17)
+#define SLEEP_MODE_MASK GENMASK(5, 0)
#define SLEEP_MODE_SHIFT 13
#define SLEEP_INPUT BIT(1)
@@ -81,6 +82,7 @@ enum pin_sleep_mode {
PUBCP_SLEEP = BIT(1),
TGLDSP_SLEEP = BIT(2),
AGDSP_SLEEP = BIT(3),
+ CM4_SLEEP = BIT(4),
};
enum pin_func_sel {
@@ -484,6 +486,13 @@ static int sprd_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin_id,
SLEEP_PULL_UP_MASK) << 16;
arg |= (reg >> PULL_UP_SHIFT) & PULL_UP_MASK;
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if ((reg & (SLEEP_PULL_DOWN | SLEEP_PULL_UP)) ||
+ (reg & (PULL_DOWN | PULL_UP_4_7K | PULL_UP_20K)))
+ return -EINVAL;
+
+ arg = 1;
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
arg = 0;
break;
@@ -609,6 +618,8 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
val |= TGLDSP_SLEEP_MODE;
if (arg & AGDSP_SLEEP)
val |= AGDSP_SLEEP_MODE;
+ if (arg & CM4_SLEEP)
+ val |= CM4_SLEEP_MODE;
mask = SLEEP_MODE_MASK;
shift = SLEEP_MODE_SHIFT;
@@ -674,6 +685,16 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = PULL_UP_SHIFT;
}
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (is_sleep_config == true) {
+ val = shift = 0;
+ mask = SLEEP_PULL_DOWN | SLEEP_PULL_UP;
+ } else {
+ val = shift = 0;
+ mask = PULL_DOWN | PULL_UP_20K |
+ PULL_UP_4_7K;
+ }
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
continue;
default:
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0cbca30b75dc..b35c3245ab3f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1385,7 +1385,6 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
struct pinmux_ops *pmxops;
- struct resource *res;
int i, ret, last_pin, pin_idx;
struct clk *clk;
@@ -1396,8 +1395,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
raw_spin_lock_init(&pctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->membase = devm_ioremap_resource(&pdev->dev, res);
+ pctl->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->membase))
return PTR_ERR(pctl->membase);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 95002e3ecaff..6f7b3767f453 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -873,7 +873,6 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
{
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
- struct resource *res;
struct phy *phy;
int err;
@@ -885,11 +884,16 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
mutex_init(&padctl->lock);
padctl->dev = &pdev->dev;
+ /*
+ * Note that we can't replace this by of_device_get_match_data()
+ * because we need the separate matching table for this legacy code on
+ * Tegra124. of_device_get_match_data() would attempt to use the table
+ * from the updated driver and fail.
+ */
match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
padctl->soc = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+ padctl->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(padctl->regs))
return PTR_ERR(padctl->regs);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index e9a7cbb9aa33..692d8b3e2a20 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -781,8 +781,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return -ENOMEM;
for (i = 0; i < pmx->nbanks; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- pmx->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ pmx->regs[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(pmx->regs[i]))
return PTR_ERR(pmx->regs[i]);
}
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index e5e7f1f22813..b522ca010332 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -496,7 +496,7 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
return -EINVAL;
rows = pinctrl_count_index_with_args(np, name);
- if (rows == -EINVAL)
+ if (rows < 0)
return rows;
*map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 4d5cd7d8c760..ea910a18b4d7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -553,10 +553,8 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
struct wmt_pinctrl_data *data)
{
int err;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 9512045420ec..786bf89487d6 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -387,7 +387,6 @@ int zx_pinctrl_init(struct platform_device *pdev,
struct pinctrl_desc *pctldesc;
struct zx_pinctrl *zpctl;
struct device_node *np;
- struct resource *res;
int ret;
zpctl = devm_kzalloc(&pdev->dev, sizeof(*zpctl), GFP_KERNEL);
@@ -396,8 +395,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
spin_lock_init(&zpctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- zpctl->base = devm_ioremap_resource(&pdev->dev, res);
+ zpctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(zpctl->base))
return PTR_ERR(zpctl->base);
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 6f80ff4532ae..5af1d66d9eca 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -98,7 +98,10 @@
TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
- TRACE_SYMBOL(EC_CMD_CODEC_I2S), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
TRACE_SYMBOL(EC_CMD_ACPI_READ), \
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 77b35df3a801..f3d09b1631e3 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
- depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 62ea1934fb6a..f4d0a86c00d0 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -17,8 +17,8 @@ menuconfig MIPS_PLATFORM_DEVICES
if MIPS_PLATFORM_DEVICES
config CPU_HWMON
- tristate "Loongson CPU HWMon Driver"
- depends on LOONGSON_MACH3X
+ tristate "Loongson-3 CPU HWMon Driver"
+ depends on CONFIG_MACH_LOONGSON64
select HWMON
default y
help
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index a7f184bb47e0..0d27cb7a9e3c 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -9,6 +9,9 @@
#include <loongson.h>
#include <boot_param.h>
#include <loongson_hwmon.h>
+#include <loongson_regs.h>
+
+static int csr_temp_enable = 0;
/*
* Loongson-3 series cpu has two sensors inside,
@@ -20,8 +23,14 @@ int loongson3_cpu_temp(int cpu)
{
u32 reg, prid_rev;
+ if (csr_temp_enable) {
+ reg = (csr_readl(LOONGSON_CSR_CPUTEMP) & 0xff);
+ goto out;
+ }
+
reg = LOONGSON_CHIPTEMP(cpu);
prid_rev = read_c0_prid() & PRID_REV_MASK;
+
switch (prid_rev) {
case PRID_REV_LOONGSON3A_R1:
reg = (reg >> 8) & 0xff;
@@ -34,9 +43,12 @@ int loongson3_cpu_temp(int cpu)
break;
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
+ default:
reg = (reg & 0xffff)*731/0x4000 - 273;
break;
}
+
+out:
return (int)reg * 1000;
}
@@ -159,9 +171,12 @@ static int __init loongson_hwmon_init(void)
pr_info("Loongson Hwmon Enter...\n");
+ if (cpu_has_csr())
+ csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_TEMP;
+
cpu_hwmon_dev = hwmon_device_register(NULL);
if (IS_ERR(cpu_hwmon_dev)) {
- ret = -ENOMEM;
+ ret = PTR_ERR(cpu_hwmon_dev);
pr_err("hwmon_device_register fail!\n");
goto fail_hwmon_device_register;
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 472af7edf0af..ca65e1039f92 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1148,7 +1148,7 @@ static void asus_als_switch(struct asus_laptop *asus, int value)
ret = write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value);
}
if (ret)
- pr_warning("Error setting light sensor switch\n");
+ pr_warn("Error setting light sensor switch\n");
asus->light_switch = value;
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index f3f74a9c109e..776868d5e458 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -578,7 +578,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
port = acpi_get_pci_dev(handle);
if (!port) {
- pr_warning("Unable to find port\n");
+ pr_warn("Unable to find port\n");
goto out_unlock;
}
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 3c0438ba385e..1a09a75bd16d 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -243,7 +243,7 @@ static int oaktrail_backlight_init(void)
if (IS_ERR(bd)) {
oaktrail_bl_device = NULL;
- pr_warning("Unable to register backlight device\n");
+ pr_warn("Unable to register backlight device\n");
return PTR_ERR(bd);
}
@@ -313,20 +313,20 @@ static int __init oaktrail_init(void)
ret = platform_driver_register(&oaktrail_driver);
if (ret) {
- pr_warning("Unable to register platform driver\n");
+ pr_warn("Unable to register platform driver\n");
goto err_driver_reg;
}
oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
if (!oaktrail_device) {
- pr_warning("Unable to allocate platform device\n");
+ pr_warn("Unable to allocate platform device\n");
ret = -ENOMEM;
goto err_device_alloc;
}
ret = platform_device_add(oaktrail_device);
if (ret) {
- pr_warning("Unable to add platform device\n");
+ pr_warn("Unable to add platform device\n");
goto err_device_add;
}
@@ -338,7 +338,7 @@ static int __init oaktrail_init(void)
ret = oaktrail_rfkill_init();
if (ret) {
- pr_warning("Setup rfkill failed\n");
+ pr_warn("Setup rfkill failed\n");
goto err_rfkill;
}
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 4684e7df833a..5376f3d22f31 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -905,7 +905,7 @@ static int omap_sr_probe(struct platform_device *pdev)
sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
- (void *)sr_info, &pm_sr_fops);
+ sr_info, &pm_sr_fops);
debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_weight);
debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 44ca983a49a1..d94e3267c3b6 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -131,7 +131,7 @@ static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST),
+ writel(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
at91_rstc_base);
return NOTIFY_DONE;
@@ -140,9 +140,7 @@ static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
static int samx7_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PROCRST),
- at91_rstc_base);
-
+ writel(AT91_RSTC_KEY | AT91_RSTC_PROCRST, at91_rstc_base);
return NOTIFY_DONE;
}
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index e341cc5c0ea6..1c18f465a245 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -269,6 +269,12 @@ static const struct of_device_id at91_shdwc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at91_shdwc_of_match);
+static const struct of_device_id at91_pmc_ids[] = {
+ { .compatible = "atmel,sama5d2-pmc" },
+ { .compatible = "microchip,sam9x60-pmc" },
+ { /* Sentinel. */ }
+};
+
static int __init at91_shdwc_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -313,7 +319,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
at91_shdwc_dt_configure(pdev);
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-pmc");
+ np = of_find_matching_node(NULL, at91_pmc_ids);
if (!np) {
ret = -ENODEV;
goto clk_disable;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index c84a7b1caeb6..27164a1d3c7c 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -629,7 +629,7 @@ config BATTERY_GAUGE_LTC2941
config AB8500_BM
bool "AB8500 Battery Management Driver"
- depends on AB8500_CORE && AB8500_GPADC
+ depends on AB8500_CORE && AB8500_GPADC && (IIO = y)
help
Say Y to include support for AB8500 battery management.
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 8fe81259bfd9..909f0242bacb 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -26,7 +26,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/iio/consumer.h>
#define VTVOUT_V 1800
@@ -79,7 +79,8 @@ struct ab8500_btemp_ranges {
* @bat_temp: Dispatched battery temperature in degree Celsius
* @prev_bat_temp Last measured battery temperature in degree Celsius
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @adc_btemp_ball: ADC channel for the battery ball temperature
+ * @adc_bat_ctrl: ADC channel for the battery control
* @fg: Pointer to the struct fg
* @bm: Platform specific battery management information
* @btemp_psy: Structure for BTEMP specific battery properties
@@ -96,7 +97,8 @@ struct ab8500_btemp {
int bat_temp;
int prev_bat_temp;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *btemp_ball;
+ struct iio_channel *bat_ctrl;
struct ab8500_fg *fg;
struct abx500_bm_data *bm;
struct power_supply *btemp_psy;
@@ -177,13 +179,13 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
*/
static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
{
- int vbtemp;
+ int vbtemp, ret;
static int prev;
- vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
- if (vbtemp < 0) {
+ ret = iio_read_channel_processed(di->bat_ctrl, &vbtemp);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed, using previous value",
+ "%s ADC conversion failed, using previous value",
__func__);
return prev;
}
@@ -455,7 +457,7 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
*/
static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
{
- int temp;
+ int temp, ret;
static int prev;
int rbat, rntc, vntc;
u8 id;
@@ -480,10 +482,10 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
di->bm->bat_type[id].r_to_t_tbl,
di->bm->bat_type[id].n_temp_tbl_elements, rbat);
} else {
- vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
- if (vntc < 0) {
+ ret = iio_read_channel_processed(di->btemp_ball, &vntc);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed,"
+ "%s ADC conversion failed,"
" using previous value\n", __func__);
return prev;
}
@@ -1024,7 +1026,22 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* Get ADC channels */
+ di->btemp_ball = devm_iio_channel_get(&pdev->dev, "btemp_ball");
+ if (IS_ERR(di->btemp_ball)) {
+ if (PTR_ERR(di->btemp_ball) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get BTEMP BALL ADC channel\n");
+ return PTR_ERR(di->btemp_ball);
+ }
+ di->bat_ctrl = devm_iio_channel_get(&pdev->dev, "bat_ctrl");
+ if (IS_ERR(di->bat_ctrl)) {
+ if (PTR_ERR(di->bat_ctrl) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get BAT CTRL ADC channel\n");
+ return PTR_ERR(di->bat_ctrl);
+ }
di->initialized = false;
@@ -1082,6 +1099,11 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_btemp_irq[i].name, di);
@@ -1104,13 +1126,13 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->btemp_psy);
-
/* We also have to free all successfully registered irqs */
for (i = i - 1; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
free_irq(irq, di);
}
+
+ power_supply_unregister(di->btemp_psy);
free_btemp_wq:
destroy_workqueue(di->btemp_wq);
return ret;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index e51d0e72beea..8a0f9d769690 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -29,10 +29,10 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/mfd/abx500/ux500_chargalg.h>
#include <linux/usb/otg.h>
#include <linux/mutex.h>
+#include <linux/iio/consumer.h>
/* Charger constants */
#define NO_PW_CONN 0
@@ -233,7 +233,10 @@ struct ab8500_charger_max_usb_in_curr {
* @current_stepping_sessions:
* Counter for current stepping sessions
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @adc_main_charger_v ADC channel for main charger voltage
+ * @adc_main_charger_c ADC channel for main charger current
+ * @adc_vbus_v ADC channel for USB charger voltage
+ * @adc_usb_charger_c ADC channel for USB charger current
* @bm: Platform specific battery management information
* @flags: Structure for information about events triggered
* @usb_state: Structure for usb stack information
@@ -283,7 +286,10 @@ struct ab8500_charger {
int is_aca_rid;
atomic_t current_stepping_sessions;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *adc_main_charger_v;
+ struct iio_channel *adc_main_charger_c;
+ struct iio_channel *adc_vbus_v;
+ struct iio_channel *adc_usb_charger_c;
struct abx500_bm_data *bm;
struct ab8500_charger_event_flags flags;
struct ab8500_charger_usb_state usb_state;
@@ -459,13 +465,13 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
*/
static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
{
- int vch;
+ int vch, ret;
/* Only measure voltage if the charger is connected */
if (di->ac.charger_connected) {
- vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
- if (vch < 0)
- dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
+ ret = iio_read_channel_processed(di->adc_main_charger_v, &vch);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
vch = 0;
}
@@ -510,13 +516,13 @@ static int ab8500_charger_ac_cv(struct ab8500_charger *di)
*/
static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
{
- int vch;
+ int vch, ret;
/* Only measure voltage if the charger is connected */
if (di->usb.charger_connected) {
- vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
- if (vch < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_vbus_v, &vch);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
vch = 0;
}
@@ -532,13 +538,13 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
*/
static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
{
- int ich;
+ int ich, ret;
/* Only measure current if the charger is online */
if (di->usb.charger_online) {
- ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
- if (ich < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_usb_charger_c, &ich);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
ich = 0;
}
@@ -554,13 +560,13 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
*/
static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
{
- int ich;
+ int ich, ret;
/* Only measure current if the charger is online */
if (di->ac.charger_online) {
- ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
- if (ich < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_main_charger_c, &ich);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
ich = 0;
}
@@ -3371,7 +3377,39 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* Get ADC channels */
+ di->adc_main_charger_v = devm_iio_channel_get(&pdev->dev,
+ "main_charger_v");
+ if (IS_ERR(di->adc_main_charger_v)) {
+ if (PTR_ERR(di->adc_main_charger_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC main charger voltage\n");
+ return PTR_ERR(di->adc_main_charger_v);
+ }
+ di->adc_main_charger_c = devm_iio_channel_get(&pdev->dev,
+ "main_charger_c");
+ if (IS_ERR(di->adc_main_charger_c)) {
+ if (PTR_ERR(di->adc_main_charger_c) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC main charger current\n");
+ return PTR_ERR(di->adc_main_charger_c);
+ }
+ di->adc_vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ if (IS_ERR(di->adc_vbus_v)) {
+ if (PTR_ERR(di->adc_vbus_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC USB charger voltage\n");
+ return PTR_ERR(di->adc_vbus_v);
+ }
+ di->adc_usb_charger_c = devm_iio_channel_get(&pdev->dev,
+ "usb_charger_c");
+ if (IS_ERR(di->adc_usb_charger_c)) {
+ if (PTR_ERR(di->adc_usb_charger_c) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC USB charger current\n");
+ return PTR_ERR(di->adc_usb_charger_c);
+ }
/* initialize lock */
spin_lock_init(&di->usb_state.usb_lock);
@@ -3556,6 +3594,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_charger_irq[i].name, di);
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 6fc4bc30644c..c3912ee9eb99 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -32,7 +32,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/iio/consumer.h>
#include <linux/kernel.h>
#define MILLI_TO_MICRO 1000
@@ -182,7 +182,7 @@ struct inst_curr_result_list {
* @bat_cap: Structure for battery capacity specific parameters
* @avg_cap: Average capacity filter
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @main_bat_v: ADC channel for the main battery voltage
* @bm: Platform specific battery management information
* @fg_psy: Structure that holds the FG specific battery properties
* @fg_wq: Work queue for running the FG algorithm
@@ -224,7 +224,7 @@ struct ab8500_fg {
struct ab8500_fg_battery_capacity bat_cap;
struct ab8500_fg_avg_cap avg_cap;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *main_bat_v;
struct abx500_bm_data *bm;
struct power_supply *fg_psy;
struct workqueue_struct *fg_wq;
@@ -829,13 +829,13 @@ exit:
*/
static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
{
- int vbat;
+ int vbat, ret;
static int prev;
- vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
- if (vbat < 0) {
+ ret = iio_read_channel_processed(di->main_bat_v, &vbat);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed, using previous value\n",
+ "%s ADC conversion failed, using previous value\n",
__func__);
return prev;
}
@@ -3066,7 +3066,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ di->main_bat_v = devm_iio_channel_get(&pdev->dev, "main_bat_v");
+ if (IS_ERR(di->main_bat_v)) {
+ if (PTR_ERR(di->main_bat_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get main battery ADC channel\n");
+ return PTR_ERR(di->main_bat_v);
+ }
psy_cfg.supplied_to = supply_interface;
psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
@@ -3151,6 +3158,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register primary interrupt handlers */
for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_irq(irq, ab8500_fg_irq_th[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_fg_irq_th[i].name, di);
@@ -3158,7 +3170,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
@@ -3166,6 +3178,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register threaded interrupt handler */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_fg_irq_bh[0].isr,
IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_fg_irq_bh[0].name, di);
@@ -3173,7 +3190,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
@@ -3212,15 +3229,17 @@ static int ab8500_fg_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->fg_psy);
-
/* We also have to free all registered irqs */
- for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ free_irq(irq, di);
+free_irq_th:
+ while (--i >= 0) {
+ /* Last assignment of i from primary interrupt handlers */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
free_irq(irq, di);
}
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- free_irq(irq, di);
+
+ power_supply_unregister(di->fg_psy);
free_inst_curr_wq:
destroy_workqueue(di->fg_wq);
return ret;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index 23757fb10479..e6e37d4f20e4 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -354,13 +354,13 @@ static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
if (di->chg_info.charger_type & USB_CHG) {
return di->usb_chg->ops.check_enable(di->usb_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
} else if ((di->chg_info.charger_type & AC_CHG) &&
!(di->ac_chg->external)) {
return di->ac_chg->ops.check_enable(di->ac_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
}
return 0;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index dc4c316eff81..5f0a5722b19e 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -48,6 +48,8 @@
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+#define AXP813_BC_EN BIT(0)
+
/*
* Note do not raise the debounce time, we must report Vusb high within
* 100ms otherwise we get Vbus errors in musb.
@@ -495,6 +497,12 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (power->axp20x_id == AXP813_ID) {
+ /* Enable USB Battery Charging specification detection */
+ regmap_update_bits(axp20x->regmap, AXP288_BC_GLOBAL,
+ AXP813_BC_EN, AXP813_BC_EN);
+ }
+
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = power;
diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
index 1bb32b7226d7..b8e1ec106627 100644
--- a/drivers/power/supply/bd70528-charger.c
+++ b/drivers/power/supply/bd70528-charger.c
@@ -741,3 +741,4 @@ module_platform_driver(bd70528_power);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 power-supply driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-power");
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 61d6447d1966..6e9392901b0a 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -33,8 +33,6 @@
#include <linux/iio/types.h>
#include <linux/mfd/motorola-cpcap.h>
-#include <asm/div64.h>
-
/*
* Register bit defines for CPCAP_REG_BPEOL. Some of these seem to
* map to MC13783UG.pdf "Table 5-19. Register 13, Power Control 0"
@@ -52,6 +50,26 @@
#define CPCAP_REG_BPEOL_BIT_BATTDETEN BIT(1) /* Enable battery detect */
#define CPCAP_REG_BPEOL_BIT_EOLSEL BIT(0) /* BPDET = 0, EOL = 1 */
+/*
+ * Register bit defines for CPCAP_REG_CCC1. These seem similar to the twl6030
+ * coulomb counter registers rather than the mc13892 registers. Both twl6030
+ * and mc13892 set bits 2 and 1 to reset and clear registers. But mc13892
+ * sets bit 0 to start the coulomb counter while twl6030 sets bit 0 to stop
+ * the coulomb counter like cpcap does. So for now, we use the twl6030 style
+ * naming for the registers.
+ */
+#define CPCAP_REG_CCC1_ACTIVE_MODE1 BIT(4) /* Update rate */
+#define CPCAP_REG_CCC1_ACTIVE_MODE0 BIT(3) /* Update rate */
+#define CPCAP_REG_CCC1_AUTOCLEAR BIT(2) /* Resets sample registers */
+#define CPCAP_REG_CCC1_CAL_EN BIT(1) /* Clears after write in 1s */
+#define CPCAP_REG_CCC1_PAUSE BIT(0) /* Stop counters, allow write */
+#define CPCAP_REG_CCC1_RESET_MASK (CPCAP_REG_CCC1_AUTOCLEAR | \
+ CPCAP_REG_CCC1_CAL_EN)
+
+#define CPCAP_REG_CCCC2_RATE1 BIT(5)
+#define CPCAP_REG_CCCC2_RATE0 BIT(4)
+#define CPCAP_REG_CCCC2_ENABLE BIT(3)
+
#define CPCAP_BATTERY_CC_SAMPLE_PERIOD_MS 250
enum {
@@ -64,6 +82,7 @@ enum {
enum cpcap_battery_irq_action {
CPCAP_BATTERY_IRQ_ACTION_NONE,
+ CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE,
CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW,
CPCAP_BATTERY_IRQ_ACTION_POWEROFF,
};
@@ -76,15 +95,16 @@ struct cpcap_interrupt_desc {
};
struct cpcap_battery_config {
- int ccm;
int cd_factor;
struct power_supply_info info;
+ struct power_supply_battery_info bat;
};
struct cpcap_coulomb_counter_data {
s32 sample; /* 24 or 32 bits */
s32 accumulator;
s16 offset; /* 9 bits */
+ s16 integrator; /* 13 or 16 bits */
};
enum cpcap_battery_state {
@@ -110,6 +130,7 @@ struct cpcap_battery_ddata {
struct power_supply *psy;
struct cpcap_battery_config config;
struct cpcap_battery_state_data state[CPCAP_BATTERY_STATE_NR];
+ u32 cc_lsb; /* μAms per LSB */
atomic_t active;
int status;
u16 vendor;
@@ -217,41 +238,17 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
s16 offset, u32 divider)
{
s64 acc;
- u64 tmp;
- int avg_current;
- u32 cc_lsb;
if (!divider)
return 0;
- switch (ddata->vendor) {
- case CPCAP_VENDOR_ST:
- cc_lsb = 95374; /* μAms per LSB */
- break;
- case CPCAP_VENDOR_TI:
- cc_lsb = 91501; /* μAms per LSB */
- break;
- default:
- return -EINVAL;
- }
-
acc = accumulator;
- acc = acc - ((s64)sample * offset);
- cc_lsb = (cc_lsb * ddata->config.cd_factor) / 1000;
+ acc -= (s64)sample * offset;
+ acc *= ddata->cc_lsb;
+ acc *= -1;
+ acc = div_s64(acc, divider);
- if (acc >= 0)
- tmp = acc;
- else
- tmp = acc * -1;
-
- tmp = tmp * cc_lsb;
- do_div(tmp, divider);
- avg_current = tmp;
-
- if (acc >= 0)
- return -avg_current;
- else
- return avg_current;
+ return acc;
}
/* 3600000μAms = 1μAh */
@@ -293,12 +290,13 @@ static int
cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
struct cpcap_coulomb_counter_data *ccd)
{
- u16 buf[7]; /* CPCAP_REG_CC1 to CCI */
+ u16 buf[7]; /* CPCAP_REG_CCS1 to CCI */
int error;
ccd->sample = 0;
ccd->accumulator = 0;
ccd->offset = 0;
+ ccd->integrator = 0;
/* Read coulomb counter register range */
error = regmap_bulk_read(ddata->reg, CPCAP_REG_CCS1,
@@ -323,6 +321,12 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
ccd->offset = buf[4];
ccd->offset = sign_extend32(ccd->offset, 9);
+ /* Integrator register CPCAP_REG_CCI */
+ if (ddata->vendor == CPCAP_VENDOR_TI)
+ ccd->integrator = sign_extend32(buf[6], 13);
+ else
+ ccd->integrator = (s16)buf[6];
+
return cpcap_battery_cc_to_uah(ddata,
ccd->sample,
ccd->accumulator,
@@ -336,31 +340,28 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
static int cpcap_battery_cc_get_avg_current(struct cpcap_battery_ddata *ddata)
{
int value, acc, error;
- s32 sample = 1;
+ s32 sample;
s16 offset;
- if (ddata->vendor == CPCAP_VENDOR_ST)
- sample = 4;
-
/* Coulomb counter integrator */
error = regmap_read(ddata->reg, CPCAP_REG_CCI, &value);
if (error)
return error;
- if ((ddata->vendor == CPCAP_VENDOR_TI) && (value > 0x2000))
- value = value | 0xc000;
-
- acc = (s16)value;
+ if (ddata->vendor == CPCAP_VENDOR_TI) {
+ acc = sign_extend32(value, 13);
+ sample = 1;
+ } else {
+ acc = (s16)value;
+ sample = 4;
+ }
- /* Coulomb counter sample time */
+ /* Coulomb counter calibration offset */
error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
if (error)
return error;
- if (value < 0x200)
- offset = value;
- else
- offset = value | 0xfc00;
+ offset = sign_extend32(value, 9);
return cpcap_battery_cc_to_ua(ddata, sample, acc, offset);
}
@@ -369,8 +370,8 @@ static bool cpcap_battery_full(struct cpcap_battery_ddata *ddata)
{
struct cpcap_battery_state_data *state = cpcap_battery_latest(ddata);
- /* Basically anything that measures above 4347000 is full */
- if (state->voltage >= (ddata->config.info.voltage_max_design - 4000))
+ if (state->voltage >=
+ (ddata->config.bat.constant_charge_voltage_max_uv - 18000))
return true;
return false;
@@ -417,6 +418,7 @@ static enum power_supply_property cpcap_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
@@ -475,6 +477,9 @@ static int cpcap_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = ddata->config.info.voltage_min_design;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->config.bat.constant_charge_voltage_max_uv;
+ break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
sample = latest->cc.sample - previous->cc.sample;
if (!sample) {
@@ -540,6 +545,69 @@ static int cpcap_battery_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_battery_update_charger(struct cpcap_battery_ddata *ddata,
+ int const_charge_voltage)
+{
+ union power_supply_propval prop;
+ union power_supply_propval val;
+ struct power_supply *charger;
+ int error;
+
+ charger = power_supply_get_by_name("usb");
+ if (!charger)
+ return -ENODEV;
+
+ error = power_supply_get_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (error)
+ return error;
+
+ /* Allow charger const voltage lower than battery const voltage */
+ if (const_charge_voltage > prop.intval)
+ return 0;
+
+ val.intval = const_charge_voltage;
+
+ return power_supply_set_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &val);
+}
+
+static int cpcap_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_battery_ddata *ddata = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ if (val->intval < ddata->config.info.voltage_min_design)
+ return -EINVAL;
+ if (val->intval > ddata->config.info.voltage_max_design)
+ return -EINVAL;
+
+ ddata->config.bat.constant_charge_voltage_max_uv = val->intval;
+
+ return cpcap_battery_update_charger(ddata, val->intval);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
{
struct cpcap_battery_ddata *ddata = data;
@@ -560,14 +628,19 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
latest = cpcap_battery_latest(ddata);
switch (d->action) {
+ case CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE:
+ dev_info(ddata->dev, "Coulomb counter calibration done\n");
+ break;
case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
if (latest->current_ua >= 0)
- dev_warn(ddata->dev, "Battery low at 3.3V!\n");
+ dev_warn(ddata->dev, "Battery low at %imV!\n",
+ latest->voltage / 1000);
break;
case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
- if (latest->current_ua >= 0) {
+ if (latest->current_ua >= 0 && latest->voltage <= 3200000) {
dev_emerg(ddata->dev,
- "Battery empty at 3.1V, powering off\n");
+ "Battery empty at %imV, powering off\n",
+ latest->voltage / 1000);
orderly_poweroff(true);
}
break;
@@ -609,7 +682,9 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
d->name = name;
d->irq = irq;
- if (!strncmp(name, "lowbph", 6))
+ if (!strncmp(name, "cccal", 5))
+ d->action = CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE;
+ else if (!strncmp(name, "lowbph", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW;
else if (!strncmp(name, "lowbpl", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_POWEROFF;
@@ -635,6 +710,9 @@ static int cpcap_battery_init_interrupts(struct platform_device *pdev,
return error;
}
+ /* Enable calibration interrupt if already available in dts */
+ cpcap_battery_init_irq(pdev, ddata, "cccal");
+
/* Enable low battery interrupts for 3.3V high and 3.1V low */
error = regmap_update_bits(ddata->reg, CPCAP_REG_BPEOL,
0xffff,
@@ -676,6 +754,60 @@ out_err:
return error;
}
+/* Calibrate coulomb counter */
+static int cpcap_battery_calibrate(struct cpcap_battery_ddata *ddata)
+{
+ int error, ccc1, value;
+ unsigned long timeout;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &ccc1);
+ if (error)
+ return error;
+
+ timeout = jiffies + msecs_to_jiffies(6000);
+
+ /* Start calibration */
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff,
+ CPCAP_REG_CCC1_CAL_EN);
+ if (error)
+ goto restore;
+
+ while (time_before(jiffies, timeout)) {
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &value);
+ if (error)
+ goto restore;
+
+ if (!(value & CPCAP_REG_CCC1_CAL_EN))
+ break;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ msleep(300);
+ }
+
+ /* Read calibration offset from CCM */
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ dev_info(ddata->dev, "calibration done: 0x%04x\n", value);
+
+restore:
+ if (error)
+ dev_err(ddata->dev, "%s: error %i\n", __func__, error);
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff, ccc1);
+ if (error)
+ dev_err(ddata->dev, "%s: restore error %i\n",
+ __func__, error);
+
+ return error;
+}
+
/*
* Based on the values from Motorola mapphone Linux kernel. In the
* the Motorola mapphone Linux kernel tree the value for pm_cd_factor
@@ -687,12 +819,12 @@ out_err:
* at 3078000. The device will die around 2743000.
*/
static const struct cpcap_battery_config cpcap_battery_default_data = {
- .ccm = 0x3ff,
.cd_factor = 0x3cc,
.info.technology = POWER_SUPPLY_TECHNOLOGY_LION,
.info.voltage_max_design = 4351000,
.info.voltage_min_design = 3100000,
.info.charge_full_design = 1740000,
+ .bat.constant_charge_voltage_max_uv = 4200000,
};
#ifdef CONFIG_OF
@@ -741,12 +873,19 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, ddata);
+ switch (ddata->vendor) {
+ case CPCAP_VENDOR_ST:
+ ddata->cc_lsb = 95374; /* μAms per LSB */
+ break;
+ case CPCAP_VENDOR_TI:
+ ddata->cc_lsb = 91501; /* μAms per LSB */
+ break;
+ default:
+ return -EINVAL;
+ }
+ ddata->cc_lsb = (ddata->cc_lsb * ddata->config.cd_factor) / 1000;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_CCM,
- 0xffff, ddata->config.ccm);
- if (error)
- return error;
+ platform_set_drvdata(pdev, ddata);
error = cpcap_battery_init_interrupts(pdev, ddata);
if (error)
@@ -760,11 +899,13 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (!psy_desc)
return -ENOMEM;
- psy_desc->name = "battery",
- psy_desc->type = POWER_SUPPLY_TYPE_BATTERY,
- psy_desc->properties = cpcap_battery_props,
- psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props),
- psy_desc->get_property = cpcap_battery_get_property,
+ psy_desc->name = "battery";
+ psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ psy_desc->properties = cpcap_battery_props;
+ psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props);
+ psy_desc->get_property = cpcap_battery_get_property;
+ psy_desc->set_property = cpcap_battery_set_property;
+ psy_desc->property_is_writeable = cpcap_battery_property_is_writeable;
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = ddata;
@@ -779,6 +920,10 @@ static int cpcap_battery_probe(struct platform_device *pdev)
atomic_set(&ddata->active, 1);
+ error = cpcap_battery_calibrate(ddata);
+ if (error)
+ return error;
+
return 0;
}
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 74258c7fe17d..c0d452e3dc8b 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -120,6 +120,13 @@ enum {
CPCAP_CHARGER_IIO_NR,
};
+enum {
+ CPCAP_CHARGER_DISCONNECTED,
+ CPCAP_CHARGER_DETECTING,
+ CPCAP_CHARGER_CHARGING,
+ CPCAP_CHARGER_DONE,
+};
+
struct cpcap_charger_ddata {
struct device *dev;
struct regmap *reg;
@@ -138,6 +145,8 @@ struct cpcap_charger_ddata {
atomic_t active;
int status;
+ int state;
+ int voltage;
};
struct cpcap_interrupt_desc {
@@ -153,6 +162,7 @@ struct cpcap_charger_ints_state {
bool chrg_se1b;
bool rvrs_mode;
+ bool chrgcurr2;
bool chrgcurr1;
bool vbusvld;
@@ -162,24 +172,26 @@ struct cpcap_charger_ints_state {
static enum power_supply_property cpcap_charger_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
+/* No battery always shows temperature of -40000 */
static bool cpcap_charger_battery_found(struct cpcap_charger_ddata *ddata)
{
struct iio_channel *channel;
- int error, value;
+ int error, temperature;
channel = ddata->channels[CPCAP_CHARGER_IIO_BATTDET];
- error = iio_read_channel_raw(channel, &value);
+ error = iio_read_channel_processed(channel, &temperature);
if (error < 0) {
dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
return false;
}
- return value == 1;
+ return temperature > -20000 && temperature < 60000;
}
static int cpcap_charger_get_charge_voltage(struct cpcap_charger_ddata *ddata)
@@ -224,6 +236,9 @@ static int cpcap_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
val->intval = ddata->status;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->voltage;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (ddata->status == POWER_SUPPLY_STATUS_CHARGING)
val->intval = cpcap_charger_get_charge_voltage(ddata) *
@@ -248,6 +263,83 @@ static int cpcap_charger_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_charger_match_voltage(int voltage)
+{
+ switch (voltage) {
+ case 0 ... 4100000 - 1: return 3800000;
+ case 4100000 ... 4120000 - 1: return 4100000;
+ case 4120000 ... 4150000 - 1: return 4120000;
+ case 4150000 ... 4170000 - 1: return 4150000;
+ case 4170000 ... 4200000 - 1: return 4170000;
+ case 4200000 ... 4230000 - 1: return 4200000;
+ case 4230000 ... 4250000 - 1: return 4230000;
+ case 4250000 ... 4270000 - 1: return 4250000;
+ case 4270000 ... 4300000 - 1: return 4270000;
+ case 4300000 ... 4330000 - 1: return 4300000;
+ case 4330000 ... 4350000 - 1: return 4330000;
+ case 4350000 ... 4380000 - 1: return 4350000;
+ case 4380000 ... 4400000 - 1: return 4380000;
+ case 4400000 ... 4420000 - 1: return 4400000;
+ case 4420000 ... 4440000 - 1: return 4420000;
+ case 4440000: return 4440000;
+ default: return 0;
+ }
+}
+
+static int
+cpcap_charger_get_bat_const_charge_voltage(struct cpcap_charger_ddata *ddata)
+{
+ union power_supply_propval prop;
+ struct power_supply *battery;
+ int voltage = ddata->voltage;
+ int error;
+
+ battery = power_supply_get_by_name("battery");
+ if (battery) {
+ error = power_supply_get_property(battery,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (!error)
+ voltage = prop.intval;
+ }
+
+ return voltage;
+}
+
+static int cpcap_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_charger_ddata *ddata = dev_get_drvdata(psy->dev.parent);
+ int voltage, batvolt;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ voltage = cpcap_charger_match_voltage(val->intval);
+ batvolt = cpcap_charger_get_bat_const_charge_voltage(ddata);
+ if (voltage > batvolt)
+ voltage = batvolt;
+ ddata->voltage = voltage;
+ schedule_delayed_work(&ddata->detect_work, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static void cpcap_charger_set_cable_path(struct cpcap_charger_ddata *ddata,
bool enabled)
{
@@ -422,6 +514,7 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
s->chrg_se1b = val & BIT(13);
s->rvrs_mode = val & BIT(6);
+ s->chrgcurr2 = val & BIT(5);
s->chrgcurr1 = val & BIT(4);
s->vbusvld = val & BIT(3);
@@ -434,6 +527,79 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
return 0;
}
+static void cpcap_charger_update_state(struct cpcap_charger_ddata *ddata,
+ int state)
+{
+ const char *status;
+
+ if (state > CPCAP_CHARGER_DONE) {
+ dev_warn(ddata->dev, "unknown state: %i\n", state);
+
+ return;
+ }
+
+ ddata->state = state;
+
+ switch (state) {
+ case CPCAP_CHARGER_DISCONNECTED:
+ status = "DISCONNECTED";
+ break;
+ case CPCAP_CHARGER_DETECTING:
+ status = "DETECTING";
+ break;
+ case CPCAP_CHARGER_CHARGING:
+ status = "CHARGING";
+ break;
+ case CPCAP_CHARGER_DONE:
+ status = "DONE";
+ break;
+ default:
+ return;
+ }
+
+ dev_dbg(ddata->dev, "state: %s\n", status);
+}
+
+static int cpcap_charger_voltage_to_regval(int voltage)
+{
+ int offset;
+
+ switch (voltage) {
+ case 0 ... 4100000 - 1:
+ return 0;
+ case 4100000 ... 4200000 - 1:
+ offset = 1;
+ break;
+ case 4200000 ... 4300000 - 1:
+ offset = 0;
+ break;
+ case 4300000 ... 4380000 - 1:
+ offset = -1;
+ break;
+ case 4380000 ... 4440000:
+ offset = -2;
+ break;
+ default:
+ return 0;
+ }
+
+ return ((voltage - 4100000) / 20000) + offset;
+}
+
+static void cpcap_charger_disconnect(struct cpcap_charger_ddata *ddata,
+ int state, unsigned long delay)
+{
+ int error;
+
+ error = cpcap_charger_set_state(ddata, 0, 0, 0);
+ if (error)
+ return;
+
+ cpcap_charger_update_state(ddata, state);
+ power_supply_changed(ddata->usb);
+ schedule_delayed_work(&ddata->detect_work, delay);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_charger_ddata *ddata;
@@ -447,24 +613,67 @@ static void cpcap_usb_detect(struct work_struct *work)
if (error)
return;
+ /* Just init the state if a charger is connected with no chrg_det set */
+ if (!s.chrg_det && s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DETECTING);
+
+ return;
+ }
+
+ /*
+ * If battery voltage is higher than charge voltage, it may have been
+ * charged to 4.35V by Android. Try again in 10 minutes.
+ */
+ if (cpcap_charger_get_charge_voltage(ddata) > ddata->voltage) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 60 * 10);
+
+ return;
+ }
+
+ /* Throttle chrgcurr2 interrupt for charger done and retry */
+ switch (ddata->state) {
+ case CPCAP_CHARGER_CHARGING:
+ if (s.chrgcurr2)
+ break;
+ if (s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DONE,
+ HZ * 5);
+ return;
+ }
+ break;
+ case CPCAP_CHARGER_DONE:
+ if (!s.chrgcurr2)
+ break;
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 5);
+ return;
+ default:
+ break;
+ }
+
if (!ddata->feeding_vbus && cpcap_charger_vbus_valid(ddata) &&
s.chrgcurr1) {
int max_current;
+ int vchrg;
if (cpcap_charger_battery_found(ddata))
max_current = CPCAP_REG_CRM_ICHRG_1A596;
else
max_current = CPCAP_REG_CRM_ICHRG_0A532;
+ vchrg = cpcap_charger_voltage_to_regval(ddata->voltage);
error = cpcap_charger_set_state(ddata,
- CPCAP_REG_CRM_VCHRG_4V35,
+ CPCAP_REG_CRM_VCHRG(vchrg),
max_current, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_CHARGING);
} else {
error = cpcap_charger_set_state(ddata, 0, 0, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DISCONNECTED);
}
power_supply_changed(ddata->usb);
@@ -524,7 +733,7 @@ static const char * const cpcap_charger_irqs[] = {
"chrg_det", "rvrs_chrg",
/* REG_INT1 */
- "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr1", "vbusvld",
+ "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
/* REG_INT_3 */
"battdetb",
@@ -596,6 +805,8 @@ static const struct power_supply_desc cpcap_charger_usb_desc = {
.properties = cpcap_charger_props,
.num_properties = ARRAY_SIZE(cpcap_charger_props),
.get_property = cpcap_charger_get_property,
+ .set_property = cpcap_charger_set_property,
+ .property_is_writeable = cpcap_charger_property_is_writeable,
};
#ifdef CONFIG_OF
@@ -625,6 +836,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
return -ENOMEM;
ddata->dev = &pdev->dev;
+ ddata->voltage = 4200000;
ddata->reg = dev_get_regmap(ddata->dev->parent, NULL);
if (!ddata->reg)
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index c3cad2b6daba..65c23ef6408d 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -33,6 +33,8 @@ static int battery_present = 1; /* true */
static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
static int battery_capacity = 50;
static int battery_voltage = 3300;
+static int battery_charge_counter = -1000;
+static int battery_current = 1600;
static bool module_initialized;
@@ -100,6 +102,9 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_NOW:
val->intval = battery_capacity;
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = battery_charge_counter;
+ break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CHARGE_FULL:
val->intval = 100;
@@ -114,6 +119,10 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = battery_voltage;
break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battery_current;
+ break;
default:
pr_info("%s: some properties deliberately report errors.\n",
__func__);
@@ -135,6 +144,7 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -144,6 +154,8 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
};
static char *test_power_ac_supplied_to[] = {
@@ -447,6 +459,36 @@ static int param_set_battery_voltage(const char *key,
#define param_get_battery_voltage param_get_int
+static int param_set_battery_charge_counter(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_charge_counter = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_charge_counter param_get_int
+
+static int param_set_battery_current(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_current = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_current param_get_int
+
static const struct kernel_param_ops param_ops_ac_online = {
.set = param_set_ac_online,
.get = param_get_ac_online,
@@ -487,6 +529,16 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
.get = param_get_battery_voltage,
};
+static const struct kernel_param_ops param_ops_battery_charge_counter = {
+ .set = param_set_battery_charge_counter,
+ .get = param_get_battery_charge_counter,
+};
+
+static const struct kernel_param_ops param_ops_battery_current = {
+ .set = param_set_battery_current,
+ .get = param_get_battery_current,
+};
+
#define param_check_ac_online(name, p) __param_check(name, p, void);
#define param_check_usb_online(name, p) __param_check(name, p, void);
#define param_check_battery_status(name, p) __param_check(name, p, void);
@@ -495,6 +547,8 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
#define param_check_battery_health(name, p) __param_check(name, p, void);
#define param_check_battery_capacity(name, p) __param_check(name, p, void);
#define param_check_battery_voltage(name, p) __param_check(name, p, void);
+#define param_check_battery_charge_counter(name, p) __param_check(name, p, void);
+#define param_check_battery_current(name, p) __param_check(name, p, void);
module_param(ac_online, ac_online, 0644);
@@ -525,6 +579,13 @@ MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
module_param(battery_voltage, battery_voltage, 0644);
MODULE_PARM_DESC(battery_voltage, "battery voltage (millivolts)");
+module_param(battery_charge_counter, battery_charge_counter, 0644);
+MODULE_PARM_DESC(battery_charge_counter,
+ "battery charge counter (microampere-hours)");
+
+module_param(battery_current, battery_current, 0644);
+MODULE_PARM_DESC(battery_current, "battery current (milliampere)");
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 94ddd7d659c8..a67701ed93e8 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -978,6 +978,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core),
INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
+ INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
+ INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 0517272a268e..b45d2b86d8ca 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -119,4 +119,16 @@ config PTP_1588_CLOCK_KVM
To compile this driver as a module, choose M here: the module
will be called ptp_kvm.
+config PTP_1588_CLOCK_IDTCM
+ tristate "IDT CLOCKMATRIX as PTP clock"
+ depends on PTP_1588_CLOCK
+ default n
+ help
+ This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_clockmatrix.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 677d1d178a3e..69a06f86a450 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o \ No newline at end of file
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
new file mode 100644
index 000000000000..9263bc33b8f4
--- /dev/null
+++ b/drivers/ptp/idt8a340_reg.h
@@ -0,0 +1,659 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* idt8a340_reg.h
+ *
+ * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
+ * https://github.com/richardcochran/regen
+ *
+ * Hand modified to include some HW registers.
+ * Based on 4.8.0, SCSR rev C commit a03c7ae5
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+
+#define STATUS 0xc03c
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+
+#define INPUT_1 0xc1c0
+
+#define INPUT_2 0xc1d0
+
+#define INPUT_3 0xc200
+
+#define INPUT_4 0xc210
+
+#define INPUT_5 0xc220
+
+#define INPUT_6 0xc230
+
+#define INPUT_7 0xc240
+
+#define INPUT_8 0xc250
+
+#define INPUT_9 0xc260
+
+#define INPUT_10 0xc280
+
+#define INPUT_11 0xc290
+
+#define INPUT_12 0xc2a0
+
+#define INPUT_13 0xc2b0
+
+#define INPUT_14 0xc2c0
+
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+
+#define REF_MON_1 0xc2ec
+
+#define REF_MON_2 0xc300
+
+#define REF_MON_3 0xc30c
+
+#define REF_MON_4 0xc318
+
+#define REF_MON_5 0xc324
+
+#define REF_MON_6 0xc330
+
+#define REF_MON_7 0xc33c
+
+#define REF_MON_8 0xc348
+
+#define REF_MON_9 0xc354
+
+#define REF_MON_10 0xc360
+
+#define REF_MON_11 0xc36c
+
+#define REF_MON_12 0xc380
+
+#define REF_MON_13 0xc38c
+
+#define REF_MON_14 0xc398
+
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+
+#define DPLL_1 0xc400
+
+#define DPLL_2 0xc438
+
+#define DPLL_3 0xc480
+
+#define DPLL_4 0xc4b8
+
+#define DPLL_5 0xc500
+
+#define DPLL_6 0xc538
+
+#define DPLL_7 0xc580
+
+#define SYS_DPLL 0xc5b8
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+
+#define DPLL_CTRL_1 0xc63c
+
+#define DPLL_CTRL_2 0xc680
+
+#define DPLL_CTRL_3 0xc6bc
+
+#define DPLL_CTRL_4 0xc700
+
+#define DPLL_CTRL_5 0xc73c
+
+#define DPLL_CTRL_6 0xc780
+
+#define DPLL_CTRL_7 0xc7bc
+
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+
+#define DPLL_PHASE_1 0xc81c
+
+#define DPLL_PHASE_2 0xc820
+
+#define DPLL_PHASE_3 0xc824
+
+#define DPLL_PHASE_4 0xc828
+
+#define DPLL_PHASE_5 0xc82c
+
+#define DPLL_PHASE_6 0xc830
+
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+
+#define DPLL_FREQ_1 0xc840
+
+#define DPLL_FREQ_2 0xc848
+
+#define DPLL_FREQ_3 0xc850
+
+#define DPLL_FREQ_4 0xc858
+
+#define DPLL_FREQ_5 0xc860
+
+#define DPLL_FREQ_6 0xc868
+
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+
+#define DPLL_PHASE_PULL_IN_1 0xc888
+
+#define DPLL_PHASE_PULL_IN_2 0xc890
+
+#define DPLL_PHASE_PULL_IN_3 0xc898
+
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+
+#define GPIO_1 0xc8d4
+
+#define GPIO_2 0xc8e6
+
+#define GPIO_3 0xc900
+
+#define GPIO_4 0xc912
+
+#define GPIO_5 0xc924
+
+#define GPIO_6 0xc936
+
+#define GPIO_7 0xc948
+
+#define GPIO_8 0xc95a
+
+#define GPIO_9 0xc980
+
+#define GPIO_10 0xc992
+
+#define GPIO_11 0xc9a4
+
+#define GPIO_12 0xc9b6
+
+#define GPIO_13 0xc9c8
+
+#define GPIO_14 0xc9da
+
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+
+#define OUTPUT_0 0xca14
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+
+#define OUTPUT_1 0xca24
+
+#define OUTPUT_2 0xca34
+
+#define OUTPUT_3 0xca44
+
+#define OUTPUT_4 0xca54
+
+#define OUTPUT_5 0xca64
+
+#define OUTPUT_6 0xca80
+
+#define OUTPUT_7 0xca90
+
+#define OUTPUT_8 0xcaa0
+
+#define OUTPUT_9 0xcab0
+
+#define OUTPUT_10 0xcac0
+
+#define OUTPUT_11 0xcad0
+
+#define SERIAL 0xcae0
+
+#define PWM_ENCODER_0 0xcb00
+
+#define PWM_ENCODER_1 0xcb08
+
+#define PWM_ENCODER_2 0xcb10
+
+#define PWM_ENCODER_3 0xcb18
+
+#define PWM_ENCODER_4 0xcb20
+
+#define PWM_ENCODER_5 0xcb28
+
+#define PWM_ENCODER_6 0xcb30
+
+#define PWM_ENCODER_7 0xcb38
+
+#define PWM_DECODER_0 0xcb40
+
+#define PWM_DECODER_1 0xcb48
+
+#define PWM_DECODER_2 0xcb50
+
+#define PWM_DECODER_3 0xcb58
+
+#define PWM_DECODER_4 0xcb60
+
+#define PWM_DECODER_5 0xcb68
+
+#define PWM_DECODER_6 0xcb70
+
+#define PWM_DECODER_7 0xcb80
+
+#define PWM_DECODER_8 0xcb88
+
+#define PWM_DECODER_9 0xcb90
+
+#define PWM_DECODER_10 0xcb98
+
+#define PWM_DECODER_11 0xcba0
+
+#define PWM_DECODER_12 0xcba8
+
+#define PWM_DECODER_13 0xcbb0
+
+#define PWM_DECODER_14 0xcbb8
+
+#define PWM_DECODER_15 0xcbc0
+
+#define PWM_USER_DATA 0xcbc8
+
+#define TOD_0 0xcbcc
+
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+
+#define TOD_1 0xcbce
+
+#define TOD_2 0xcbd0
+
+#define TOD_3 0xcbd2
+
+
+#define TOD_WRITE_0 0xcc00
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+
+#define TOD_WRITE_1 0xcc10
+
+#define TOD_WRITE_2 0xcc20
+
+#define TOD_WRITE_3 0xcc30
+
+#define TOD_READ_PRIMARY_0 0xcc40
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+
+#define TOD_READ_PRIMARY_1 0xcc50
+
+#define TOD_READ_PRIMARY_2 0xcc60
+
+#define TOD_READ_PRIMARY_3 0xcc80
+
+#define TOD_READ_SECONDARY_0 0xcc90
+
+#define TOD_READ_SECONDARY_1 0xcca0
+
+#define TOD_READ_SECONDARY_2 0xccb0
+
+#define TOD_READ_SECONDARY_3 0xccc0
+
+#define OUTPUT_TDC_CFG 0xccd0
+
+#define OUTPUT_TDC_0 0xcd00
+
+#define OUTPUT_TDC_1 0xcd08
+
+#define OUTPUT_TDC_2 0xcd10
+
+#define OUTPUT_TDC_3 0xcd18
+
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+
+#define EEPROM 0xcf68
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+#endif
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 67d0199840fd..9d72ab593f13 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -149,11 +149,21 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
}
- if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
- req.extts.rsv[0] || req.extts.rsv[1]) &&
- cmd == PTP_EXTTS_REQUEST2) {
- err = -EINVAL;
- break;
+ if (cmd == PTP_EXTTS_REQUEST2) {
+ /* Tell the drivers to check the flags carefully. */
+ req.extts.flags |= PTP_STRICT_FLAGS;
+ /* Make sure no reserved bit is set. */
+ if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
+ req.extts.rsv[0] || req.extts.rsv[1]) {
+ err = -EINVAL;
+ break;
+ }
+ /* Ensure one of the rising/falling edge bits is set. */
+ if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
+ (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
+ err = -EINVAL;
+ break;
+ }
} else if (cmd == PTP_EXTTS_REQUEST) {
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
req.extts.rsv[0] = 0;
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
new file mode 100644
index 000000000000..a5110b7b4ece
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -0,0 +1,1427 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+
+#include "ptp_private.h"
+#include "ptp_clockmatrix.h"
+
+MODULE_DESCRIPTION("Driver for IDT ClockMatrix(TM) family");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+#define SETTIME_CORRECTION (0)
+
+static int char_array_to_timespec(u8 *buf,
+ u8 count,
+ struct timespec64 *ts)
+{
+ u8 i;
+ u64 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ nsec = buf[4];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[3 - i];
+ }
+
+ sec = buf[10];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[9 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+
+ return 0;
+}
+
+static int timespec_to_char_array(struct timespec64 const *ts,
+ u8 *buf,
+ u8 count)
+{
+ u8 i;
+ s32 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ buf[0] = 0;
+ for (i = 1; i < 5; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 5; i < TOD_BYTE_COUNT; i++) {
+
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+
+ return 0;
+}
+
+static int idtcm_xfer(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ struct i2c_client *client = idtcm->client;
+ struct i2c_msg msg[2];
+ int cnt;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &regaddr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+
+ if (cnt < 0) {
+ dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
+{
+ u8 buf[4];
+ int err;
+
+ if (idtcm->page_offset == val)
+ return 0;
+
+ buf[0] = 0x0;
+ buf[1] = val;
+ buf[2] = 0x10;
+ buf[3] = 0x20;
+
+ err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1);
+
+ if (err)
+ dev_err(&idtcm->client->dev, "failed to set page offset\n");
+ else
+ idtcm->page_offset = val;
+
+ return err;
+}
+
+static int _idtcm_rdwr(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ u8 hi;
+ u8 lo;
+ int err;
+
+ hi = (regaddr >> 8) & 0xff;
+ lo = regaddr & 0xff;
+
+ err = idtcm_page_offset(idtcm, hi);
+
+ if (err)
+ goto out;
+
+ err = idtcm_xfer(idtcm, lo, buf, count, write);
+out:
+ return err;
+}
+
+static int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
+}
+
+static int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
+}
+
+static int _idtcm_gettime(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+ if (err)
+ return err;
+
+ trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
+ trigger |= TOD_READ_TRIGGER_MODE;
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idtcm->calculate_overhead_flag)
+ idtcm->start_time = ktime_get_raw();
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = char_array_to_timespec(buf, sizeof(buf), ts);
+
+ return err;
+}
+
+static int _sync_pll_output(struct idtcm *idtcm,
+ u8 pll,
+ u8 sync_src,
+ u8 qn,
+ u8 qn_plus_1)
+{
+ int err;
+ u8 val;
+ u16 sync_ctrl0;
+ u16 sync_ctrl1;
+
+ if ((qn == 0) && (qn_plus_1 == 0))
+ return 0;
+
+ switch (pll) {
+ case 0:
+ sync_ctrl0 = HW_Q0_Q1_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q0_Q1_CH_SYNC_CTRL_1;
+ break;
+ case 1:
+ sync_ctrl0 = HW_Q2_Q3_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q2_Q3_CH_SYNC_CTRL_1;
+ break;
+ case 2:
+ sync_ctrl0 = HW_Q4_Q5_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q4_Q5_CH_SYNC_CTRL_1;
+ break;
+ case 3:
+ sync_ctrl0 = HW_Q6_Q7_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q6_Q7_CH_SYNC_CTRL_1;
+ break;
+ case 4:
+ sync_ctrl0 = HW_Q8_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q8_CH_SYNC_CTRL_1;
+ break;
+ case 5:
+ sync_ctrl0 = HW_Q9_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q9_CH_SYNC_CTRL_1;
+ break;
+ case 6:
+ sync_ctrl0 = HW_Q10_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q10_CH_SYNC_CTRL_1;
+ break;
+ case 7:
+ sync_ctrl0 = HW_Q11_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q11_CH_SYNC_CTRL_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = SYNCTRL1_MASTER_SYNC_RST;
+
+ /* Place master sync in reset */
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl0, &sync_src, sizeof(sync_src));
+ if (err)
+ return err;
+
+ /* Set sync trigger mask */
+ val |= SYNCTRL1_FBDIV_FRAME_SYNC_TRIG | SYNCTRL1_FBDIV_SYNC_TRIG;
+
+ if (qn)
+ val |= SYNCTRL1_Q0_DIV_SYNC_TRIG;
+
+ if (qn_plus_1)
+ val |= SYNCTRL1_Q1_DIV_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ /* Place master sync out of reset */
+ val &= ~(SYNCTRL1_MASTER_SYNC_RST);
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+
+ return err;
+}
+
+static int idtcm_sync_pps_output(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 pll;
+ u8 sync_src;
+ u8 qn;
+ u8 qn_plus_1;
+ int err = 0;
+
+ u16 output_mask = channel->output_mask;
+
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
+ break;
+ case DPLL_1:
+ sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
+ break;
+ case DPLL_2:
+ sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
+ break;
+ case DPLL_3:
+ sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (pll = 0; pll < 8; pll++) {
+
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+
+ if (pll < 4) {
+ /* First 4 pll has 2 outputs */
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else {
+ qn_plus_1 = 0;
+ }
+
+ if ((qn != 0) || (qn_plus_1 != 0))
+ err = _sync_pll_output(idtcm, pll, sync_src, qn,
+ qn_plus_1);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[TOD_BYTE_COUNT];
+ u8 cmd;
+ int err;
+ struct timespec64 local_ts = *ts;
+ s64 total_overhead_ns;
+
+ /* Configure HW TOD write trigger. */
+ err = idtcm_read(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ cmd &= ~(0x0f);
+ cmd |= wr_trig | 0x08;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ if (wr_trig != HW_TOD_WR_TRIG_SEL_MSB) {
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+ }
+
+ /* ARM HW TOD write trigger. */
+ cmd &= ~(0x08);
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
+
+ if (idtcm->calculate_overhead_flag) {
+ total_overhead_ns = ktime_to_ns(ktime_get_raw()
+ - idtcm->start_time)
+ + idtcm->tod_write_overhead_ns
+ + SETTIME_CORRECTION;
+
+ timespec64_add_ns(&local_ts, total_overhead_ns);
+
+ idtcm->calculate_overhead_flag = 0;
+ }
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+ }
+
+ return err;
+}
+
+static int _idtcm_settime(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ s32 retval;
+ int err;
+ int i;
+ u8 trig_sel;
+
+ err = _idtcm_set_dpll_tod(channel, ts, wr_trig);
+
+ if (err)
+ return err;
+
+ /* Wait for the operation to complete. */
+ for (i = 0; i < 10000; i++) {
+ err = idtcm_read(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_CTRL_1, &trig_sel,
+ sizeof(trig_sel));
+
+ if (err)
+ return err;
+
+ if (trig_sel == 0x4a)
+ break;
+
+ err = 1;
+ }
+
+ if (err)
+ return err;
+
+ retval = idtcm_sync_pps_output(channel);
+
+ return retval;
+}
+
+static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
+ s32 offset_ns)
+{
+ int err;
+ int i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[4];
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = 0xff & (offset_ns);
+ offset_ns >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in, PULL_IN_OFFSET,
+ buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_set_phase_pull_in_slope_limit(struct idtcm_channel *channel,
+ u32 max_ffo_ppb)
+{
+ int err;
+ u8 i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[3];
+
+ if (max_ffo_ppb & 0xff000000)
+ max_ffo_ppb = 0;
+
+ for (i = 0; i < 3; i++) {
+ buf[i] = 0xff & (max_ffo_ppb);
+ max_ffo_ppb >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_SLOPE_LIMIT, buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf;
+
+ err = idtcm_read(idtcm, channel->dpll_phase_pull_in, PULL_IN_CTRL,
+ &buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ if (buf == 0) {
+ buf = 0x01;
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_CTRL, &buf, sizeof(buf));
+ } else {
+ err = -EBUSY;
+ }
+
+ return err;
+}
+
+static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
+ s32 offset_ns,
+ u32 max_ffo_ppb)
+{
+ int err;
+
+ err = idtcm_set_phase_pull_in_offset(channel, -offset_ns);
+
+ if (err)
+ return err;
+
+ err = idtcm_set_phase_pull_in_slope_limit(channel, max_ffo_ppb);
+
+ if (err)
+ return err;
+
+ err = idtcm_start_phase_pull_in(channel);
+
+ return err;
+}
+
+static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts;
+ s64 now;
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ err = idtcm_do_phase_pull_in(channel, delta, 0);
+ } else {
+ idtcm->calculate_overhead_flag = 1;
+
+ err = _idtcm_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now = timespec64_to_ns(&ts);
+ now += delta;
+
+ ts = ns_to_timespec64(now);
+
+ err = _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+ }
+
+ return err;
+}
+
+static int idtcm_state_machine_reset(struct idtcm *idtcm)
+{
+ int err;
+ u8 byte = SM_RESET_CMD;
+
+ err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+
+ if (!err)
+ msleep_interruptible(POST_SM_RESET_DELAY_MS);
+
+ return err;
+}
+
+static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HW_REV_ID,
+ hw_rev_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_bond_id(struct idtcm *idtcm, u8 *bond_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ BOND_ID,
+ bond_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_hw_csr_id(struct idtcm *idtcm, u16 *hw_csr_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_CSR_ID, buf, sizeof(buf));
+
+ *hw_csr_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_hw_irq_id(struct idtcm *idtcm, u16 *hw_irq_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_IRQ_ID, buf, sizeof(buf));
+
+ *hw_irq_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, PRODUCT_ID, buf, sizeof(buf));
+
+ *product_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_major_release(struct idtcm *idtcm, u8 *major)
+{
+ int err;
+ u8 buf = 0;
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, MAJ_REL, &buf, sizeof(buf));
+
+ *major = buf >> 1;
+
+ return err;
+}
+
+static int idtcm_read_minor_release(struct idtcm *idtcm, u8 *minor)
+{
+ return idtcm_read(idtcm, GENERAL_STATUS, MIN_REL, minor, sizeof(u8));
+}
+
+static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HOTFIX_REL,
+ hotfix,
+ sizeof(u8));
+}
+
+static int idtcm_read_pipeline(struct idtcm *idtcm, u32 *pipeline)
+{
+ int err;
+ u8 buf[4] = {0};
+
+ err = idtcm_read(idtcm,
+ GENERAL_STATUS,
+ PIPELINE_ID,
+ &buf[0],
+ sizeof(buf));
+
+ *pipeline = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
+{
+ int err = 0;
+
+ if (addr == PLL_MASK_ADDR) {
+ if ((val & 0xf0) || !(val & 0xf)) {
+ dev_err(&idtcm->client->dev,
+ "Invalid PLL mask 0x%hhx\n", val);
+ err = -EINVAL;
+ }
+ *mask = val;
+ }
+
+ return err;
+}
+
+static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
+{
+ int err = 0;
+
+ switch (addr) {
+ case OUTPUT_MASK_PLL0_ADDR:
+ SET_U16_LSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL0_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR:
+ SET_U16_LSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR:
+ SET_U16_LSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR:
+ SET_U16_LSB(idtcm->channel[3].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[3].output_mask, val);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int check_and_set_masks(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 val)
+{
+ int err = 0;
+
+ if (set_pll_output_mask(idtcm, regaddr, val)) {
+ /* Not an output mask, check for pll mask */
+ err = process_pll_mask(idtcm, regaddr, val, &idtcm->pll_mask);
+ }
+
+ return err;
+}
+
+static void display_pll_and_output_masks(struct idtcm *idtcm)
+{
+ u8 i;
+ u8 mask;
+
+ dev_dbg(&idtcm->client->dev, "pllmask = 0x%02x\n", idtcm->pll_mask);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if (mask & idtcm->pll_mask)
+ dev_dbg(&idtcm->client->dev,
+ "PLL%d output_mask = 0x%04x\n",
+ i, idtcm->channel[i].output_mask);
+ }
+}
+
+static int idtcm_load_firmware(struct idtcm *idtcm,
+ struct device *dev)
+{
+ const struct firmware *fw;
+ struct idtcm_fwrc *rec;
+ u32 regaddr;
+ int err;
+ s32 len;
+ u8 val;
+ u8 loaddr;
+
+ dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", FW_FILENAME);
+
+ err = request_firmware(&fw, FW_FILENAME, dev);
+
+ if (err)
+ return err;
+
+ dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtcm_fwrc *) fw->data;
+
+ if (fw->size > 0)
+ idtcm_state_machine_reset(idtcm);
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idtcm->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ regaddr = rec->hiaddr << 8;
+ regaddr |= rec->loaddr;
+
+ val = rec->value;
+ loaddr = rec->loaddr;
+
+ rec++;
+
+ err = check_and_set_masks(idtcm, regaddr, val);
+ }
+
+ if (err == 0) {
+ /* Top (status registers) and bottom are read-only */
+ if ((regaddr < GPIO_USER_CONTROL)
+ || (regaddr >= SCRATCH))
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ continue;
+
+ err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ display_pll_and_output_masks(idtcm);
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u32 module;
+ u8 val;
+ int err;
+
+ /*
+ * This assumes that the 1-PPS is on the second of the two
+ * output. But is this always true?
+ */
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ module = OUTPUT_1;
+ break;
+ case DPLL_1:
+ module = OUTPUT_3;
+ break;
+ case DPLL_2:
+ module = OUTPUT_5;
+ break;
+ case DPLL_3:
+ module = OUTPUT_7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = idtcm_read(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ if (enable)
+ val |= SQUELCH_DISABLE;
+ else
+ val &= ~SQUELCH_DISABLE;
+
+ err = idtcm_write(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int idtcm_set_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode pll_mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ u8 dpll_mode;
+
+ err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
+
+ channel->pll_mode = pll_mode;
+
+ err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* PTP Hardware Clock interface */
+
+static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ u8 i;
+ bool neg_adj = 0;
+ int err;
+ u8 buf[6] = {0};
+ s64 fcw;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) {
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Frequency Control Word unit is: 1.11 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 111
+ *
+ * adjfine:
+ * ppm_16 * 5^12
+ * FCW = -------------
+ * 111 * 2^4
+ */
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
+ fcw = ppb * 1000000000000ULL;
+
+ fcw = div_u64(fcw, 111022);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 6; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ,
+ buf, sizeof(buf));
+
+ mutex_unlock(&idtcm->reg_lock);
+ return err;
+}
+
+static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_gettime(channel, ts);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjtime(channel, delta);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ return idtcm_pps_enable(channel, false);
+
+ /* Only accept a 1-PPS aligned to the second. */
+ if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ return -ERANGE;
+
+ return idtcm_pps_enable(channel, true);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int idtcm_enable_tod(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts = {0, 0};
+ u8 cfg;
+ int err;
+
+ err = idtcm_pps_enable(channel, false);
+ if (err)
+ return err;
+
+ /*
+ * Start the TOD clock ticking.
+ */
+ err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg |= TOD_ENABLE;
+
+ err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ return _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+}
+
+static void idtcm_display_version_info(struct idtcm *idtcm)
+{
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u32 pipeline;
+ u16 product_id;
+ u16 csr_id;
+ u16 irq_id;
+ u8 hw_rev_id;
+ u8 bond_id;
+
+ idtcm_read_major_release(idtcm, &major);
+ idtcm_read_minor_release(idtcm, &minor);
+ idtcm_read_hotfix_release(idtcm, &hotfix);
+ idtcm_read_pipeline(idtcm, &pipeline);
+
+ idtcm_read_product_id(idtcm, &product_id);
+ idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
+ idtcm_read_bond_id(idtcm, &bond_id);
+ idtcm_read_hw_csr_id(idtcm, &csr_id);
+ idtcm_read_hw_irq_id(idtcm, &irq_id);
+
+ dev_info(&idtcm->client->dev, "Version: %d.%d.%d, Pipeline %u\t"
+ "0x%04x, Rev %d, Bond %d, CSR %d, IRQ %d\n",
+ major, minor, hotfix, pipeline,
+ product_id, hw_rev_id, bond_id, csr_id, irq_id);
+}
+
+static struct ptp_clock_info idtcm_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = 244000,
+ .n_per_out = 1,
+ .adjfreq = &idtcm_adjfreq,
+ .adjtime = &idtcm_adjtime,
+ .gettime64 = &idtcm_gettime,
+ .settime64 = &idtcm_settime,
+ .enable = &idtcm_enable,
+};
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_PHC_PLL))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
+ switch (index) {
+ case 0:
+ channel->dpll_freq = DPLL_FREQ_0;
+ channel->dpll_n = DPLL_0;
+ channel->tod_read_primary = TOD_READ_PRIMARY_0;
+ channel->tod_write = TOD_WRITE_0;
+ channel->tod_n = TOD_0;
+ channel->hw_dpll_n = HW_DPLL_0;
+ channel->dpll_phase = DPLL_PHASE_0;
+ channel->dpll_ctrl_n = DPLL_CTRL_0;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_0;
+ break;
+ case 1:
+ channel->dpll_freq = DPLL_FREQ_1;
+ channel->dpll_n = DPLL_1;
+ channel->tod_read_primary = TOD_READ_PRIMARY_1;
+ channel->tod_write = TOD_WRITE_1;
+ channel->tod_n = TOD_1;
+ channel->hw_dpll_n = HW_DPLL_1;
+ channel->dpll_phase = DPLL_PHASE_1;
+ channel->dpll_ctrl_n = DPLL_CTRL_1;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_1;
+ break;
+ case 2:
+ channel->dpll_freq = DPLL_FREQ_2;
+ channel->dpll_n = DPLL_2;
+ channel->tod_read_primary = TOD_READ_PRIMARY_2;
+ channel->tod_write = TOD_WRITE_2;
+ channel->tod_n = TOD_2;
+ channel->hw_dpll_n = HW_DPLL_2;
+ channel->dpll_phase = DPLL_PHASE_2;
+ channel->dpll_ctrl_n = DPLL_CTRL_2;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_2;
+ break;
+ case 3:
+ channel->dpll_freq = DPLL_FREQ_3;
+ channel->dpll_n = DPLL_3;
+ channel->tod_read_primary = TOD_READ_PRIMARY_3;
+ channel->tod_write = TOD_WRITE_3;
+ channel->tod_n = TOD_3;
+ channel->hw_dpll_n = HW_DPLL_3;
+ channel->dpll_phase = DPLL_PHASE_3;
+ channel->dpll_ctrl_n = DPLL_CTRL_3;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ channel->idtcm = idtcm;
+
+ channel->caps = idtcm_caps;
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT CM PLL%u", index);
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+
+ err = idtcm_enable_tod(channel);
+ if (err)
+ return err;
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static void ptp_clock_unregister_all(struct idtcm *idtcm)
+{
+ u8 i;
+ struct idtcm_channel *channel;
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+
+ channel = &idtcm->channel[i];
+
+ if (channel->ptp_clock)
+ ptp_clock_unregister(channel->ptp_clock);
+ }
+}
+
+static void set_default_masks(struct idtcm *idtcm)
+{
+ idtcm->pll_mask = DEFAULT_PLL_MASK;
+
+ idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+ idtcm->channel[2].output_mask = DEFAULT_OUTPUT_MASK_PLL2;
+ idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
+}
+
+static int set_tod_write_overhead(struct idtcm *idtcm)
+{
+ int err;
+ u8 i;
+
+ s64 total_ns = 0;
+
+ ktime_t start;
+ ktime_t stop;
+
+ char buf[TOD_BYTE_COUNT];
+
+ struct idtcm_channel *channel = &idtcm->channel[2];
+
+ /* Set page offset */
+ idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
+ buf, sizeof(buf));
+
+ for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
+
+ start = ktime_get_raw();
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ total_ns += ktime_to_ns(stop - start);
+ }
+
+ idtcm->tod_write_overhead_ns = div_s64(total_ns,
+ TOD_WRITE_OVERHEAD_COUNT_MAX);
+
+ return err;
+}
+
+static int idtcm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idtcm *idtcm;
+ int err;
+ u8 i;
+
+ /* Unused for now */
+ (void)id;
+
+ idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+
+ if (!idtcm)
+ return -ENOMEM;
+
+ idtcm->client = client;
+ idtcm->page_offset = 0xff;
+ idtcm->calculate_overhead_flag = 0;
+
+ set_default_masks(idtcm);
+
+ mutex_init(&idtcm->reg_lock);
+ mutex_lock(&idtcm->reg_lock);
+
+ idtcm_display_version_info(idtcm);
+
+ err = set_tod_write_overhead(idtcm);
+
+ if (err) {
+ mutex_unlock(&idtcm->reg_lock);
+ return err;
+ }
+
+ err = idtcm_load_firmware(idtcm, &client->dev);
+
+ if (err)
+ dev_warn(&idtcm->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idtcm->pll_mask) {
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ if (idtcm->pll_mask & (1 << i)) {
+ err = idtcm_enable_channel(idtcm, i);
+ if (err)
+ break;
+ }
+ }
+ } else {
+ dev_err(&idtcm->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ if (err) {
+ ptp_clock_unregister_all(idtcm);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idtcm);
+
+ return 0;
+}
+
+static int idtcm_remove(struct i2c_client *client)
+{
+ struct idtcm *idtcm = i2c_get_clientdata(client);
+
+ ptp_clock_unregister_all(idtcm);
+
+ mutex_destroy(&idtcm->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idtcm_dt_id[] = {
+ { .compatible = "idt,8a34000" },
+ { .compatible = "idt,8a34001" },
+ { .compatible = "idt,8a34002" },
+ { .compatible = "idt,8a34003" },
+ { .compatible = "idt,8a34004" },
+ { .compatible = "idt,8a34005" },
+ { .compatible = "idt,8a34006" },
+ { .compatible = "idt,8a34007" },
+ { .compatible = "idt,8a34008" },
+ { .compatible = "idt,8a34009" },
+ { .compatible = "idt,8a34010" },
+ { .compatible = "idt,8a34011" },
+ { .compatible = "idt,8a34012" },
+ { .compatible = "idt,8a34013" },
+ { .compatible = "idt,8a34014" },
+ { .compatible = "idt,8a34015" },
+ { .compatible = "idt,8a34016" },
+ { .compatible = "idt,8a34017" },
+ { .compatible = "idt,8a34018" },
+ { .compatible = "idt,8a34019" },
+ { .compatible = "idt,8a34040" },
+ { .compatible = "idt,8a34041" },
+ { .compatible = "idt,8a34042" },
+ { .compatible = "idt,8a34043" },
+ { .compatible = "idt,8a34044" },
+ { .compatible = "idt,8a34045" },
+ { .compatible = "idt,8a34046" },
+ { .compatible = "idt,8a34047" },
+ { .compatible = "idt,8a34048" },
+ { .compatible = "idt,8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idtcm_dt_id);
+#endif
+
+static const struct i2c_device_id idtcm_i2c_id[] = {
+ { "8a34000" },
+ { "8a34001" },
+ { "8a34002" },
+ { "8a34003" },
+ { "8a34004" },
+ { "8a34005" },
+ { "8a34006" },
+ { "8a34007" },
+ { "8a34008" },
+ { "8a34009" },
+ { "8a34010" },
+ { "8a34011" },
+ { "8a34012" },
+ { "8a34013" },
+ { "8a34014" },
+ { "8a34015" },
+ { "8a34016" },
+ { "8a34017" },
+ { "8a34018" },
+ { "8a34019" },
+ { "8a34040" },
+ { "8a34041" },
+ { "8a34042" },
+ { "8a34043" },
+ { "8a34044" },
+ { "8a34045" },
+ { "8a34046" },
+ { "8a34047" },
+ { "8a34048" },
+ { "8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
+
+static struct i2c_driver idtcm_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idtcm_dt_id),
+ .name = "idtcm",
+ },
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
+ .id_table = idtcm_i2c_id,
+};
+
+module_i2c_driver(idtcm_driver);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
new file mode 100644
index 000000000000..6c1f93ab46f3
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTCLOCKMATRIX_H
+#define PTP_IDTCLOCKMATRIX_H
+
+#include <linux/ktime.h>
+
+#include "idt8a340_reg.h"
+
+#define FW_FILENAME "idtcm.bin"
+#define MAX_PHC_PLL 4
+
+#define PLL_MASK_ADDR (0xFFA5)
+#define DEFAULT_PLL_MASK (0x04)
+
+#define SET_U16_LSB(orig, val8) (orig = (0xff00 & (orig)) | (val8))
+#define SET_U16_MSB(orig, val8) (orig = (0x00ff & (orig)) | (val8 << 8))
+
+#define OUTPUT_MASK_PLL0_ADDR (0xFFB0)
+#define OUTPUT_MASK_PLL1_ADDR (0xFFB2)
+#define OUTPUT_MASK_PLL2_ADDR (0xFFB4)
+#define OUTPUT_MASK_PLL3_ADDR (0xFFB6)
+
+#define DEFAULT_OUTPUT_MASK_PLL0 (0x003)
+#define DEFAULT_OUTPUT_MASK_PLL1 (0x00c)
+#define DEFAULT_OUTPUT_MASK_PLL2 (0x030)
+#define DEFAULT_OUTPUT_MASK_PLL3 (0x0c0)
+
+#define POST_SM_RESET_DELAY_MS (3000)
+#define PHASE_PULL_IN_THRESHOLD_NS (150000)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
+#define TOD_BYTE_COUNT (11)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_NORMAL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_MAX = PLL_MODE_PHASE_MEASUREMENT,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+struct idtcm;
+
+struct idtcm_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idtcm *idtcm;
+ u16 dpll_phase;
+ u16 dpll_freq;
+ u16 dpll_n;
+ u16 dpll_ctrl_n;
+ u16 dpll_phase_pull_in;
+ u16 tod_read_primary;
+ u16 tod_write;
+ u16 tod_n;
+ u16 hw_dpll_n;
+ enum pll_mode pll_mode;
+ u16 output_mask;
+};
+
+struct idtcm {
+ struct idtcm_channel channel[MAX_PHC_PLL];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 pll_mask;
+
+ /* Overhead calculation for adjtime */
+ u8 calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ ktime_t start_time;
+
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+struct idtcm_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+#endif /* PTP_IDTCLOCKMATRIX_H */
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 0dcfdc806f57..82d31ba32690 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -240,14 +240,12 @@ static int ptp_dte_probe(struct platform_device *pdev)
{
struct ptp_dte *ptp_dte;
struct device *dev = &pdev->dev;
- struct resource *res;
ptp_dte = devm_kzalloc(dev, sizeof(struct ptp_dte), GFP_KERNEL);
if (!ptp_dte)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ptp_dte->regs = devm_ioremap_resource(dev, res);
+ ptp_dte->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ptp_dte->regs))
return PTR_ERR(ptp_dte->regs);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index e3a2518503ed..bd21655c37a6 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -508,15 +508,6 @@ config PWM_TIEHRPWM
To compile this driver as a module, choose M here: the module
will be called pwm-tiehrpwm.
-config PWM_TIPWMSS
- bool
- default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM)
- help
- PWM Subsystem driver support for AM33xx SOC.
-
- PWM submodules require PWM config space access from submodule
- drivers and require common parent driver support.
-
config PWM_TWL
tristate "TWL4030/6030 PWM support"
depends on TWL4030_CORE
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 26326adf71d7..9a475073dafc 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
-obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 3ee63531f6d5..74eb5af7295f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -841,10 +841,10 @@ config REGULATOR_SKY81452
will be called sky81452-regulator.
config REGULATOR_SLG51000
- tristate "Dialog Semiconductor SLG51000 regulators"
- depends on I2C
- select REGMAP_I2C
- help
+ tristate "Dialog Semiconductor SLG51000 regulators"
+ depends on I2C
+ select REGMAP_I2C
+ help
Say y here to support for the Dialog Semiconductor SLG51000.
The SLG51000 is seven compact and customizable low dropout
regulators.
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index efb2f01a9101..f60e1b26c2d2 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -953,23 +953,6 @@ static struct ab8500_regulator_info
.update_val_idle = 0x82,
.update_val_normal = 0x02,
},
- [AB8505_LDO_USB] = {
- .desc = {
- .name = "LDO-USB",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8505_LDO_USB,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_3300000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x82,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- },
[AB8505_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
index 0248a61f1006..ec764022621f 100644
--- a/drivers/regulator/bd70528-regulator.c
+++ b/drivers/regulator/bd70528-regulator.c
@@ -286,3 +286,4 @@ module_platform_driver(bd70528_regulator);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 voltage regulator driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-pmic");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index bdab46a5c461..13a43eee2e46 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -1293,3 +1293,4 @@ module_platform_driver(bd718xx_regulator);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD71837/BD71847 voltage regulator driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd718xx-pmic");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a46be221dbdc..679ad3d2ed23 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1403,7 +1403,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
rdev_err(rdev, "failed to enable\n");
return ret;
}
- rdev->use_count++;
+
+ if (rdev->constraints->always_on)
+ rdev->use_count++;
}
print_constraints(rdev);
@@ -1844,6 +1846,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator;
const char *devname = dev ? dev_name(dev) : "deviceless";
+ struct device_link *link;
int ret;
if (get_type >= MAX_GET_TYPE) {
@@ -1951,7 +1954,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
rdev->use_count = 0;
}
- device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
+ link = device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
+ if (!IS_ERR_OR_NULL(link))
+ regulator->device_link = true;
return regulator;
}
@@ -2046,7 +2051,8 @@ static void _regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
if (regulator->dev) {
- device_link_remove(regulator->dev, &rdev->dev);
+ if (regulator->device_link)
+ device_link_remove(regulator->dev, &rdev->dev);
/* remove any sysfs entries */
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
@@ -4963,6 +4969,12 @@ static int generic_coupler_attach(struct regulator_coupler *coupler,
return -EPERM;
}
+ if (!rdev->constraints->always_on) {
+ rdev_err(rdev,
+ "Coupling of a non always-on regulator is unimplemented\n");
+ return -ENOTSUPP;
+ }
+
return 0;
}
@@ -5198,6 +5210,7 @@ unset_supplies:
regulator_remove_coupling(rdev);
mutex_unlock(&regulator_list_mutex);
wash:
+ kfree(rdev->coupling_desc.coupled_rdevs);
kfree(rdev->constraints);
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index 710e67081d53..d3ce0278bfbe 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -16,6 +16,7 @@
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/da9062/core.h>
#include <linux/mfd/da9062/registers.h>
+#include <dt-bindings/regulator/dlg,da9063-regulator.h>
/* Regulator IDs */
enum {
@@ -75,14 +76,6 @@ struct da9062_regulators {
struct da9062_regulator regulator[0];
};
-/* BUCK modes */
-enum {
- BUCK_MODE_MANUAL, /* 0 */
- BUCK_MODE_SLEEP, /* 1 */
- BUCK_MODE_SYNC, /* 2 */
- BUCK_MODE_AUTO /* 3 */
-};
-
/* Regulator operations */
/* Current limits array (in uA)
@@ -105,6 +98,20 @@ static const unsigned int da9062_buck_b_limits[] = {
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
+static unsigned int da9062_map_buck_mode(unsigned int mode)
+{
+ switch (mode) {
+ case DA9063_BUCK_MODE_SLEEP:
+ return REGULATOR_MODE_STANDBY;
+ case DA9063_BUCK_MODE_SYNC:
+ return REGULATOR_MODE_FAST;
+ case DA9063_BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
@@ -112,13 +119,13 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- val = BUCK_MODE_SYNC;
+ val = DA9063_BUCK_MODE_SYNC;
break;
case REGULATOR_MODE_NORMAL:
- val = BUCK_MODE_AUTO;
+ val = DA9063_BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
- val = BUCK_MODE_SLEEP;
+ val = DA9063_BUCK_MODE_SLEEP;
break;
default:
return -EINVAL;
@@ -136,7 +143,7 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- unsigned int val, mode = 0;
+ unsigned int val;
int ret;
ret = regmap_field_read(regl->mode, &val);
@@ -145,15 +152,13 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
switch (val) {
default:
- case BUCK_MODE_MANUAL:
- mode = REGULATOR_MODE_FAST | REGULATOR_MODE_STANDBY;
/* Sleep flag bit decides the mode */
break;
- case BUCK_MODE_SLEEP:
+ case DA9063_BUCK_MODE_SLEEP:
return REGULATOR_MODE_STANDBY;
- case BUCK_MODE_SYNC:
+ case DA9063_BUCK_MODE_SYNC:
return REGULATOR_MODE_FAST;
- case BUCK_MODE_AUTO:
+ case DA9063_BUCK_MODE_AUTO:
return REGULATOR_MODE_NORMAL;
}
@@ -162,11 +167,9 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
return 0;
if (val)
- mode &= REGULATOR_MODE_STANDBY;
+ return REGULATOR_MODE_STANDBY;
else
- mode &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
-
- return mode;
+ return REGULATOR_MODE_FAST;
}
/*
@@ -282,13 +285,13 @@ static int da9062_buck_set_suspend_mode(struct regulator_dev *rdev,
switch (mode) {
case REGULATOR_MODE_FAST:
- val = BUCK_MODE_SYNC;
+ val = DA9063_BUCK_MODE_SYNC;
break;
case REGULATOR_MODE_NORMAL:
- val = BUCK_MODE_AUTO;
+ val = DA9063_BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
- val = BUCK_MODE_SLEEP;
+ val = DA9063_BUCK_MODE_SLEEP;
break;
default:
return -EINVAL;
@@ -371,6 +374,7 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK1_A,
.desc.vsel_mask = DA9062AA_VBUCK1_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK1_A,
__builtin_ffs((int)DA9062AA_BUCK1_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -407,6 +411,7 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK3_A,
.desc.vsel_mask = DA9062AA_VBUCK3_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK3_A,
__builtin_ffs((int)DA9062AA_BUCK3_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -443,6 +448,7 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK4_A,
.desc.vsel_mask = DA9062AA_VBUCK4_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK4_A,
__builtin_ffs((int)DA9062AA_BUCK4_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -615,6 +621,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK1_A,
.desc.vsel_mask = DA9062AA_VBUCK1_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK1_A,
__builtin_ffs((int)DA9062AA_BUCK1_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -651,6 +658,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK2_A,
.desc.vsel_mask = DA9062AA_VBUCK2_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK2_A,
__builtin_ffs((int)DA9062AA_BUCK2_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -687,6 +695,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK3_A,
.desc.vsel_mask = DA9062AA_VBUCK3_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK3_A,
__builtin_ffs((int)DA9062AA_BUCK3_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -723,6 +732,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK4_A,
.desc.vsel_mask = DA9062AA_VBUCK4_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK4_A,
__builtin_ffs((int)DA9062AA_BUCK4_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -942,8 +952,7 @@ static int da9062_regulator_probe(struct platform_device *pdev)
regulators->n_regulators = max_regulators;
platform_set_drvdata(pdev, regulators);
- n = 0;
- while (n < regulators->n_regulators) {
+ for (n = 0; n < regulators->n_regulators; n++) {
/* Initialise regulator structure */
regl = &regulators->regulator[n];
regl->hw = chip;
@@ -1002,8 +1011,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
regl->desc.name);
return PTR_ERR(regl->rdev);
}
-
- n++;
}
/* LDOs overcurrent event support */
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 28b1b20f45bd..2aceb3b7afc2 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -225,7 +225,7 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
struct regmap_field *field;
- unsigned int val, mode = 0;
+ unsigned int val;
int ret;
ret = regmap_field_read(regl->mode, &val);
@@ -235,7 +235,6 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
switch (val) {
default:
case BUCK_MODE_MANUAL:
- mode = REGULATOR_MODE_FAST | REGULATOR_MODE_STANDBY;
/* Sleep flag bit decides the mode */
break;
case BUCK_MODE_SLEEP:
@@ -262,11 +261,9 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
return 0;
if (val)
- mode &= REGULATOR_MODE_STANDBY;
+ return REGULATOR_MODE_STANDBY;
else
- mode &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
-
- return mode;
+ return REGULATOR_MODE_FAST;
}
/*
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index bf80748f1ccc..523dc1b95826 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -283,12 +283,12 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
pdata->init_data[n] = da9211_matches[i].init_data;
pdata->reg_node[n] = da9211_matches[i].of_node;
- pdata->gpiod_ren[n] = devm_gpiod_get_from_of_node(dev,
- da9211_matches[i].of_node,
- "enable-gpios",
- 0,
- GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
- "da9211-enable");
+ pdata->gpiod_ren[n] = devm_fwnode_gpiod_get(dev,
+ of_fwnode_handle(pdata->reg_node[n]),
+ "enable",
+ GPIOD_OUT_HIGH |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "da9211-enable");
if (IS_ERR(pdata->gpiod_ren[n]))
pdata->gpiod_ren[n] = NULL;
n++;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index dbe477da4e55..00c83492f774 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -83,6 +83,7 @@ enum {
enum {
SILERGY_SYR82X = 8,
+ SILERGY_SYR83X = 9,
};
struct fan53555_device_info {
@@ -302,6 +303,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
/* Init voltage range and step */
switch (di->chip_id) {
case SILERGY_SYR82X:
+ case SILERGY_SYR83X:
di->vsel_min = 712500;
di->vsel_step = 12500;
break;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f81533070058..bc0bbd99e98d 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -123,6 +123,7 @@ of_get_fixed_voltage_config(struct device *dev,
config->enabled_at_boot = true;
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
+ of_property_read_u32(np, "off-on-delay-us", &config->off_on_delay);
if (of_find_property(np, "vin-supply", NULL))
config->input_supply = "vin";
@@ -189,6 +190,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->desc.enable_time = config->startup_delay;
+ drvdata->desc.off_on_delay = config->off_on_delay;
if (config->input_supply) {
drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 83ae442f515b..2391b565ef11 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -36,6 +36,7 @@ struct regulator {
struct list_head list;
unsigned int always_on:1;
unsigned int bypass:1;
+ unsigned int device_link:1;
int uA_load;
unsigned int enable_count;
unsigned int deferred_disables;
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index c8e579e99316..9089ec608fcc 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -256,8 +256,9 @@ static int max77686_of_parse_cb(struct device_node *np,
case MAX77686_BUCK8:
case MAX77686_BUCK9:
case MAX77686_LDO20 ... MAX77686_LDO22:
- config->ena_gpiod = gpiod_get_from_of_node(np,
- "maxim,ena-gpios",
+ config->ena_gpiod = fwnode_gpiod_get_index(
+ of_fwnode_handle(np),
+ "maxim,ena",
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"max77686-regulator");
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 76152aaa330b..96dc0eea7659 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -296,7 +296,10 @@ static int max8907_regulator_probe(struct platform_device *pdev)
memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
/* Backwards compatibility with MAX8907B; SD1 uses different voltages */
- regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_II2RR_VERSION_MASK) ==
MAX8907_II2RR_VERSION_REV_B) {
pmic->desc[MAX8907_SD1].min_uV = 637500;
@@ -333,14 +336,20 @@ static int max8907_regulator_probe(struct platform_device *pdev)
}
if (pmic->desc[i].ops == &max8907_ldo_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_MASK_LDO_SEQ) !=
MAX8907_MASK_LDO_SEQ)
pmic->desc[i].ops = &max8907_ldo_hwctl_ops;
} else if (pmic->desc[i].ops == &max8907_out5v_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & (MAX8907_MASK_OUT5V_VINEN |
MAX8907_MASK_OUT5V_ENSRC)) !=
MAX8907_MASK_OUT5V_ENSRC)
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 92b41a6a4dc2..bfc15dd3f730 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -38,15 +38,6 @@ struct pbias_reg_info {
int n_voltages;
};
-struct pbias_regulator_data {
- struct regulator_desc desc;
- void __iomem *pbias_addr;
- struct regulator_dev *dev;
- struct regmap *syscon;
- const struct pbias_reg_info *info;
- int voltage;
-};
-
struct pbias_of_data {
unsigned int offset;
};
@@ -157,14 +148,13 @@ MODULE_DEVICE_TABLE(of, pbias_of_match);
static int pbias_regulator_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct pbias_regulator_data *drvdata;
struct resource *res;
struct regulator_config cfg = { };
+ struct regulator_desc *desc;
+ struct regulator_dev *rdev;
struct regmap *syscon;
const struct pbias_reg_info *info;
- int ret = 0;
- int count, idx, data_idx = 0;
- const struct of_device_id *match;
+ int ret, count, idx;
const struct pbias_of_data *data;
unsigned int offset;
@@ -173,19 +163,16 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (count < 0)
return count;
- drvdata = devm_kcalloc(&pdev->dev,
- count, sizeof(struct pbias_regulator_data),
- GFP_KERNEL);
- if (!drvdata)
+ desc = devm_kcalloc(&pdev->dev, count, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
return -ENOMEM;
syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(syscon))
return PTR_ERR(syscon);
- match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev);
- if (match && match->data) {
- data = match->data;
+ data = of_device_get_match_data(&pdev->dev);
+ if (data) {
offset = data->offset;
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -200,7 +187,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
cfg.regmap = syscon;
cfg.dev = &pdev->dev;
- for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) {
+ for (idx = 0; idx < PBIAS_NUM_REGS && count; idx++) {
if (!pbias_matches[idx].init_data ||
!pbias_matches[idx].of_node)
continue;
@@ -209,41 +196,35 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (!info)
return -ENODEV;
- drvdata[data_idx].syscon = syscon;
- drvdata[data_idx].info = info;
- drvdata[data_idx].desc.name = info->name;
- drvdata[data_idx].desc.owner = THIS_MODULE;
- drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
- drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
- drvdata[data_idx].desc.volt_table = info->pbias_volt_table;
- drvdata[data_idx].desc.n_voltages = info->n_voltages;
- drvdata[data_idx].desc.enable_time = info->enable_time;
- drvdata[data_idx].desc.vsel_reg = offset;
- drvdata[data_idx].desc.vsel_mask = info->vmode;
- drvdata[data_idx].desc.enable_reg = offset;
- drvdata[data_idx].desc.enable_mask = info->enable_mask;
- drvdata[data_idx].desc.enable_val = info->enable;
- drvdata[data_idx].desc.disable_val = info->disable_val;
+ desc->name = info->name;
+ desc->owner = THIS_MODULE;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->ops = &pbias_regulator_voltage_ops;
+ desc->volt_table = info->pbias_volt_table;
+ desc->n_voltages = info->n_voltages;
+ desc->enable_time = info->enable_time;
+ desc->vsel_reg = offset;
+ desc->vsel_mask = info->vmode;
+ desc->enable_reg = offset;
+ desc->enable_mask = info->enable_mask;
+ desc->enable_val = info->enable;
+ desc->disable_val = info->disable_val;
cfg.init_data = pbias_matches[idx].init_data;
- cfg.driver_data = &drvdata[data_idx];
cfg.of_node = pbias_matches[idx].of_node;
- drvdata[data_idx].dev = devm_regulator_register(&pdev->dev,
- &drvdata[data_idx].desc, &cfg);
- if (IS_ERR(drvdata[data_idx].dev)) {
- ret = PTR_ERR(drvdata[data_idx].dev);
+ rdev = devm_regulator_register(&pdev->dev, desc, &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(&pdev->dev,
"Failed to register regulator: %d\n", ret);
- goto err_regulator;
+ return ret;
}
- data_idx++;
+ desc++;
+ count--;
}
- platform_set_drvdata(pdev, drvdata);
-
-err_regulator:
- return ret;
+ return 0;
}
static struct platform_driver pbias_regulator_driver = {
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index c2469263db95..0345f38f6f78 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -86,10 +86,6 @@ static const unsigned int SW1_table[] = {
#define SW2_table SW1_table
-static const unsigned int SW3_table[] = {
- 4000000, 4500000, 5000000, 5500000,
-};
-
struct pcap_regulator {
const u8 reg;
const u8 en;
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 0246b6f99fb5..c86ad40015ce 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -878,6 +878,58 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
{},
};
+static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_hfsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_hfsmps510, "vdd-s5"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4-l7-l8"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_nldo, "vdd-l4-l7-l8"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l4-l7-l8"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo_lv, "vdd-l10-l14-l15"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo_lv, "vdd-l11-l12-l13"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l11-l12-l13"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo_lv, "vdd-l11-l12-l13"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l10-l14-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l10-l14-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pm6150l_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_hfsmps510, "vdd-s8"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_pldo_lv, "vdd-l1-l8"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo, "vdd-l1-l8"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
+ RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
+ {},
+};
+
static int rpmh_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -940,6 +992,14 @@ static const struct of_device_id rpmh_regulator_match_table[] = {
.compatible = "qcom,pmi8998-rpmh-regulators",
.data = pmi8998_vreg_data,
},
+ {
+ .compatible = "qcom,pm6150-rpmh-regulators",
+ .data = pm6150_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm6150l-rpmh-regulators",
+ .data = pm6150l_vreg_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 3b0828c79e2b..fff8d5fdef6a 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -338,6 +338,63 @@ static const struct regulator_desc pm8916_buck_hvo_smps = {
.ops = &rpm_smps_ldo_ops,
};
+static const struct regulator_desc pm8950_hfsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 96, 127, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_ftsmps2p5 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(80000, 0, 255, 5000),
+ REGULATOR_LINEAR_RANGE(160000, 256, 460, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 461,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_ult_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 202, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 203,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_ult_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_pldo_lv = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1500000, 0, 16, 25000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 17,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(975000, 0, 164, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 165,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+
static const struct regulator_desc pm8994_hfsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
@@ -638,6 +695,40 @@ static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8950_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8950_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8950_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8950_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8950_ftsmps2p5, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_hfsmps, "vdd_s6" },
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8950_ult_nldo, "vdd_l1_l19" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8950_ult_nldo, "vdd_l2_l23" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8950_ult_nldo, "vdd_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_nldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16"},
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l19", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l1_l19"},
+ { "l20", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l20"},
+ { "l21", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l21"},
+ { "l22", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l23", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l2_l23"},
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" },
@@ -767,6 +858,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ { .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 7f51c5fc8194..95737e4dd6bb 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -1869,6 +1869,39 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8950_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "l1", 0x4000, "vdd_l1_l19", },
+ { "l2", 0x4100, "vdd_l2_l23", },
+ { "l3", 0x4200, "vdd_l3", },
+ { "l4", 0x4300, "vdd_l4_l5_l6_l7_l16", },
+ { "l5", 0x4400, "vdd_l4_l5_l6_l7_l16", },
+ { "l6", 0x4500, "vdd_l4_l5_l6_l7_l16", },
+ { "l7", 0x4600, "vdd_l4_l5_l6_l7_l16", },
+ { "l8", 0x4700, "vdd_l8_l11_l12_l17_l22", },
+ { "l9", 0x4800, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l10", 0x4900, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l11", 0x4a00, "vdd_l8_l11_l12_l17_l22", },
+ { "l12", 0x4b00, "vdd_l8_l11_l12_l17_l22", },
+ { "l13", 0x4c00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l14", 0x4d00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l15", 0x4e00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l16", 0x4f00, "vdd_l4_l5_l6_l7_l16", },
+ { "l17", 0x5000, "vdd_l8_l11_l12_l17_l22", },
+ { "l18", 0x5100, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l19", 0x5200, "vdd_l1_l19", },
+ { "l20", 0x5300, "vdd_l20", },
+ { "l21", 0x5400, "vdd_l21", },
+ { "l22", 0x5500, "vdd_l8_l11_l12_l17_l22", },
+ { "l23", 0x5600, "vdd_l2_l23", },
+ { }
+};
+
static const struct spmi_regulator_data pm8994_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -1927,6 +1960,12 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8004_regulators[] = {
+ { "s2", 0x1700, "vdd_s2", },
+ { "s5", 0x2000, "vdd_s5", },
+ { }
+};
+
static const struct spmi_regulator_data pm8005_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -1941,10 +1980,12 @@ static const struct spmi_regulator_data pms405_regulators[] = {
};
static const struct of_device_id qcom_spmi_regulator_match[] = {
+ { .compatible = "qcom,pm8004-regulators", .data = &pm8004_regulators },
{ .compatible = "qcom,pm8005-regulators", .data = &pm8005_regulators },
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
+ { .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 61bd5ef0806c..5b4003226484 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -388,7 +388,7 @@ static int rk817_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
break;
default:
dev_warn(&rdev->dev,
- "%s ramp_delay: %d not supported, setting 10000\n",
+ "%s ramp_delay: %d not supported, setting 25000\n",
rdev->desc->name, ramp_delay);
}
@@ -411,21 +411,6 @@ static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
sel);
}
-static int rk817_set_suspend_voltage(struct regulator_dev *rdev, int uv)
-{
- unsigned int reg;
- int sel = regulator_map_voltage_linear(rdev, uv, uv);
- /* only ldo1~ldo9 */
- if (sel < 0)
- return -EINVAL;
-
- reg = rdev->desc->vsel_reg + RK808_SLP_REG_OFFSET;
-
- return regmap_update_bits(rdev->regmap, reg,
- rdev->desc->vsel_mask,
- sel);
-}
-
static int rk808_set_suspend_voltage_range(struct regulator_dev *rdev, int uv)
{
unsigned int reg;
@@ -686,7 +671,7 @@ static const struct regulator_linear_range rk805_buck_1_2_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(2300000, 63, 63, 0),
};
-static struct regulator_ops rk809_buck5_ops_range = {
+static const struct regulator_ops rk809_buck5_ops_range = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -700,7 +685,7 @@ static struct regulator_ops rk809_buck5_ops_range = {
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_reg_ops = {
+static const struct regulator_ops rk817_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -708,12 +693,12 @@ static struct regulator_ops rk817_reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = rk8xx_is_enabled_wmsk_regmap,
- .set_suspend_voltage = rk817_set_suspend_voltage,
+ .set_suspend_voltage = rk808_set_suspend_voltage,
.set_suspend_enable = rk817_set_suspend_enable,
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_boost_ops = {
+static const struct regulator_ops rk817_boost_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -725,7 +710,7 @@ static struct regulator_ops rk817_boost_ops = {
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_buck_ops_range = {
+static const struct regulator_ops rk817_buck_ops_range = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -743,7 +728,7 @@ static struct regulator_ops rk817_buck_ops_range = {
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_switch_ops = {
+static const struct regulator_ops rk817_switch_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = rk8xx_is_enabled_wmsk_regmap,
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index eb807a059479..4a91be0ad5ae 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -90,7 +90,7 @@ static const struct regulator_desc rc5t619_regulators[] = {
REG(LDO7, LDOEN1, BIT(6), LDO7DAC, 0x7f, 900000, 3500000, 25000),
REG(LDO8, LDOEN1, BIT(7), LDO8DAC, 0x7f, 900000, 3500000, 25000),
REG(LDO9, LDOEN2, BIT(0), LDO9DAC, 0x7f, 900000, 3500000, 25000),
- REG(LDO10, LDOEN2, BIT(0), LDO10DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO10, LDOEN2, BIT(1), LDO10DAC, 0x7f, 900000, 3500000, 25000),
/* LDO RTC */
REG(LDORTC1, LDOEN2, BIT(4), LDORTCDAC, 0x7f, 1700000, 3500000, 25000),
REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 5bc00884cf51..4f2dc5ebffdc 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -844,10 +844,9 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
if (!rdata[reg].init_data || !rdata[reg].of_node)
continue;
- gpio[reg] = devm_gpiod_get_from_of_node(&pdev->dev,
- rdata[reg].of_node,
- "samsung,ext-control-gpios",
- 0,
+ gpio[reg] = devm_fwnode_gpiod_get(&pdev->dev,
+ of_fwnode_handle(rdata[reg].of_node),
+ "samsung,ext-control",
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s2mps11-regulator");
if (PTR_ERR(gpio[reg]) == -ENOENT)
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 6ca27e9d5ef7..bdc07739e9a2 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -567,11 +567,10 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
continue;
}
- rdata->ext_control_gpiod = devm_gpiod_get_from_of_node(
+ rdata->ext_control_gpiod = devm_fwnode_gpiod_get(
&pdev->dev,
- reg_np,
- "s5m8767,pmic-ext-control-gpios",
- 0,
+ of_fwnode_handle(reg_np),
+ "s5m8767,pmic-ext-control",
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s5m8767");
if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index a0565daecace..bf1a3508ebc4 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -198,17 +198,14 @@ static int slg51000_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
- struct slg51000 *chip = config->driver_data;
struct gpio_desc *ena_gpiod;
- enum gpiod_flags gflags = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
- ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
- "enable-gpios", 0,
- gflags, "gpio-en-ldo");
- if (!IS_ERR(ena_gpiod)) {
+ ena_gpiod = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0,
+ GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "gpio-en-ldo");
+ if (!IS_ERR(ena_gpiod))
config->ena_gpiod = ena_gpiod;
- devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
- }
return 0;
}
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index 8919a5130bec..bdfaf7edb75a 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -181,7 +181,6 @@ static const struct regulator_desc stm32_vrefbuf_regu = {
static int stm32_vrefbuf_probe(struct platform_device *pdev)
{
- struct resource *res;
struct stm32_vrefbuf *priv;
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -192,8 +191,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index f09061473613..f3d7d007ecbb 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -54,6 +54,8 @@ enum {
/* Enable time worst case is 5000mV/(2250uV/uS) */
#define PMIC_ENABLE_TIME_US 2200
+/* Ramp delay worst case is (2250uV/uS) */
+#define PMIC_RAMP_DELAY 2200
static const struct regulator_linear_range buck1_ranges[] = {
REGULATOR_LINEAR_RANGE(725000, 0, 4, 0),
@@ -208,6 +210,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.supply_name = #base, \
}
@@ -227,6 +230,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.bypass_reg = LDO3_ACTIVE_CR, \
.bypass_mask = LDO_BYPASS_MASK, \
.bypass_val_on = LDO_BYPASS_MASK, \
@@ -248,6 +252,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.supply_name = #base, \
}
@@ -267,6 +272,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.of_map_mode = stpmic1_map_mode, \
.pull_down_reg = ids##_PULL_DOWN_REG, \
.pull_down_mask = ids##_PULL_DOWN_MASK, \
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 06059a94f7c6..f8939af0bd2c 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -37,6 +37,7 @@ static struct regulator_ops tps6105x_regulator_ops = {
static const struct regulator_desc tps6105x_regulator_desc = {
.name = "tps6105x-boost",
+ .of_match = of_match_ptr("regulator"),
.ops = &tps6105x_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = 0,
@@ -71,6 +72,7 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
config.dev = &tps6105x->client->dev;
config.init_data = pdata->regulator_data;
config.driver_data = tps6105x;
+ config.of_node = pdev->dev.parent->of_node;
config.regmap = tps6105x->regmap;
/* Register regulator with framework */
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 10ea4b5a0f55..f0b660e9f15f 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -346,16 +346,20 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
for (idx = 0; idx < ARRAY_SIZE(tps65090_matches); idx++) {
struct regulator_init_data *ri_data;
struct tps65090_regulator_plat_data *rpdata;
+ struct device_node *np;
rpdata = &reg_pdata[idx];
ri_data = tps65090_matches[idx].init_data;
- if (!ri_data || !tps65090_matches[idx].of_node)
+ if (!ri_data)
+ continue;
+
+ np = tps65090_matches[idx].of_node;
+ if (!np)
continue;
rpdata->reg_init_data = ri_data;
- rpdata->enable_ext_control = of_property_read_bool(
- tps65090_matches[idx].of_node,
- "ti,enable-ext-control");
+ rpdata->enable_ext_control = of_property_read_bool(np,
+ "ti,enable-ext-control");
if (rpdata->enable_ext_control) {
enum gpiod_flags gflags;
@@ -366,11 +370,12 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
gflags = GPIOD_OUT_LOW;
gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
- rpdata->gpiod = devm_gpiod_get_from_of_node(&pdev->dev,
- tps65090_matches[idx].of_node,
- "dcdc-ext-control-gpios", 0,
- gflags,
- "tps65090");
+ rpdata->gpiod = devm_fwnode_gpiod_get(
+ &pdev->dev,
+ of_fwnode_handle(np),
+ "dcdc-ext-control",
+ gflags,
+ "tps65090");
if (PTR_ERR(rpdata->gpiod) == -ENOENT) {
dev_err(&pdev->dev,
"could not find DCDC external control GPIO\n");
@@ -379,8 +384,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
return ERR_CAST(rpdata->gpiod);
}
- if (of_property_read_u32(tps65090_matches[idx].of_node,
- "ti,overcurrent-wait",
+ if (of_property_read_u32(np, "ti,overcurrent-wait",
&rpdata->overcurrent_wait) == 0)
rpdata->overcurrent_wait_valid = true;
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index e302bd01a084..7b0e38f8d627 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -136,9 +136,10 @@ static int tps65132_of_parse_cb(struct device_node *np,
struct tps65132_reg_pdata *rpdata = &tps->reg_pdata[desc->id];
int ret;
- rpdata->en_gpiod = devm_fwnode_get_index_gpiod_from_child(tps->dev,
- "enable", 0, &np->fwnode, 0, "enable");
- if (IS_ERR_OR_NULL(rpdata->en_gpiod)) {
+ rpdata->en_gpiod = devm_fwnode_gpiod_get(tps->dev, of_fwnode_handle(np),
+ "enable", GPIOD_ASIS,
+ "enable");
+ if (IS_ERR(rpdata->en_gpiod)) {
ret = PTR_ERR(rpdata->en_gpiod);
/* Ignore the error other than probe defer */
@@ -147,10 +148,12 @@ static int tps65132_of_parse_cb(struct device_node *np,
return 0;
}
- rpdata->act_dis_gpiod = devm_fwnode_get_index_gpiod_from_child(
- tps->dev, "active-discharge", 0,
- &np->fwnode, 0, "active-discharge");
- if (IS_ERR_OR_NULL(rpdata->act_dis_gpiod)) {
+ rpdata->act_dis_gpiod = devm_fwnode_gpiod_get(tps->dev,
+ of_fwnode_handle(np),
+ "active-discharge",
+ GPIOD_ASIS,
+ "active-discharge");
+ if (IS_ERR(rpdata->act_dis_gpiod)) {
ret = PTR_ERR(rpdata->act_dis_gpiod);
/* Ignore the error other than probe defer */
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 2311924c3103..2e02e26b516c 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -45,7 +45,6 @@ static int uniphier_regulator_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct regulator_dev *rdev;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
const char *name;
int i, ret, nr;
@@ -58,8 +57,7 @@ static int uniphier_regulator_probe(struct platform_device *pdev)
if (WARN_ON(!priv->data))
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/regulator/vexpress-regulator.c b/drivers/regulator/vexpress-regulator.c
index 1235f46e633e..5d39663efcaa 100644
--- a/drivers/regulator/vexpress-regulator.c
+++ b/drivers/regulator/vexpress-regulator.c
@@ -75,10 +75,7 @@ static int vexpress_regulator_probe(struct platform_device *pdev)
config.of_node = pdev->dev.of_node;
rdev = devm_regulator_register(&pdev->dev, desc, &config);
- if (IS_ERR(rdev))
- return PTR_ERR(rdev);
-
- return 0;
+ return PTR_ERR_OR_ZERO(rdev);
}
static const struct of_device_id vexpress_regulator_of_match[] = {
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 5542d9eadfe0..7d079154f849 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -116,7 +116,9 @@ int dasd_scan_partitions(struct dasd_block *block)
return -ENODEV;
}
- rc = blkdev_reread_part(bdev);
+ mutex_lock(&bdev->bd_mutex);
+ rc = bdev_disk_changed(bdev, false);
+ mutex_unlock(&bdev->bd_mutex);
if (rc)
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, rc %d", rc);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index f6a8db04177c..23eae4188876 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -5,7 +5,7 @@
# The following is required for define_trace.h to find ./trace.h
CFLAGS_trace.o := -I$(src)
-CFLAGS_vfio_ccw_fsm.o := -I$(src)
+CFLAGS_vfio_ccw_trace.o := -I$(src)
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
@@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
- vfio_ccw_async.o
+ vfio_ccw_async.o vfio_ccw_trace.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a58b45df95d7..4b0798472643 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -82,6 +82,7 @@ enum qdio_irq_states {
#define QDIO_SIGA_WRITE 0x00
#define QDIO_SIGA_READ 0x01
#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEM 0x03
#define QDIO_SIGA_WRITEQ 0x04
#define QDIO_SIGA_QEBSM_FLAG 0x80
@@ -252,9 +253,6 @@ struct qdio_q {
/* input or output queue */
int is_input_q;
- /* list of thinint input queues */
- struct list_head entry;
-
/* upper-layer program handler */
qdio_handler_t (*handler);
@@ -272,6 +270,7 @@ struct qdio_irq {
struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
+ struct list_head entry; /* list of thinint devices */
struct dentry *debugfs_dev;
struct dentry *debugfs_perf;
@@ -317,13 +316,15 @@ struct qdio_irq {
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
-#define qperf_inc(__q, __attr) \
+#define QDIO_PERF_STAT_INC(__irq, __attr) \
({ \
- struct qdio_irq *qdev = (__q)->irq_ptr; \
+ struct qdio_irq *qdev = __irq; \
if (qdev->perf_stat_enabled) \
(qdev->perf_stat.__attr)++; \
})
+#define qperf_inc(__q, __attr) QDIO_PERF_STAT_INC((__q)->irq_ptr, __attr)
+
static inline void account_sbals_error(struct qdio_q *q, int count)
{
q->q_stats.nr_sbal_error += count;
@@ -355,14 +356,10 @@ static inline int multicast_outbound(struct qdio_q *q)
for (i = 0; i < irq_ptr->nr_output_qs && \
({ q = irq_ptr->output_qs[i]; 1; }); i++)
-#define prev_buf(bufnr) \
- ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
-#define next_buf(bufnr) \
- ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
-#define add_buf(bufnr, inc) \
- ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
-#define sub_buf(bufnr, dec) \
- ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+#define add_buf(bufnr, inc) QDIO_BUFNR((bufnr) + (inc))
+#define next_buf(bufnr) add_buf(bufnr, 1)
+#define sub_buf(bufnr, dec) QDIO_BUFNR((bufnr) - (dec))
+#define prev_buf(bufnr) sub_buf(bufnr, 1)
#define queue_irqs_enabled(q) \
(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
@@ -375,8 +372,8 @@ extern u64 last_ai_time;
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
-void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
-void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_add_device(struct qdio_irq *irq_ptr);
+void tiqdio_remove_device(struct qdio_irq *irq_ptr);
void tiqdio_inbound_processing(unsigned long q);
int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5b63c505a2f7..f8b897b7e78b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -131,7 +131,7 @@ again:
case 96:
/* not all buffers processed */
qperf_inc(q, eqbs_partial);
- DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
tmp_count);
return count - tmp_count;
case 97:
@@ -310,18 +310,19 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
- unsigned long aob)
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
+ unsigned int *busy_bit, unsigned long aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
int retries = 0, cc;
- unsigned long laob = 0;
- if (aob) {
- fc = QDIO_SIGA_WRITEQ;
- laob = aob;
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
+ if (count > 1)
+ fc = QDIO_SIGA_WRITEM;
+ else if (aob)
+ fc = QDIO_SIGA_WRITEQ;
}
if (is_qebsm(q)) {
@@ -329,7 +330,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
fc |= QDIO_SIGA_QEBSM_FLAG;
}
again:
- cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
@@ -423,9 +424,6 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count)
static void process_buffer_error(struct qdio_q *q, unsigned int start,
int count)
{
- unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
- SLSB_P_OUTPUT_NOT_INIT;
-
q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
@@ -433,7 +431,7 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
- goto set;
+ return;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
@@ -442,13 +440,6 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
DBF_ERROR("F14:%2x F15:%2x",
q->sbal[start]->element[14].sflags,
q->sbal[start]->element[15].sflags);
-
-set:
- /*
- * Interrupts may be avoided as long as the error is present
- * so change the buffer state immediately to avoid starvation.
- */
- set_buf_states(q, start, state, count);
}
static inline void inbound_primed(struct qdio_q *q, unsigned int start,
@@ -530,6 +521,11 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
return count;
case SLSB_P_INPUT_ERROR:
process_buffer_error(q, start, count);
+ /*
+ * Interrupts may be avoided as long as the error is present
+ * so change the buffer state immediately to avoid starvation.
+ */
+ set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
@@ -781,7 +777,8 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
return count;
}
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
+ unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -793,7 +790,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
retry:
qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit, aob);
+ cc = qdio_siga_output(q, count, &busy_bit, aob);
switch (cc) {
case 0:
break;
@@ -963,7 +960,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) {
- qperf_inc(q, int_discarded);
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
continue;
}
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
@@ -1162,7 +1159,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
- tiqdio_remove_input_queues(irq_ptr);
+ tiqdio_remove_device(irq_ptr);
qdio_shutdown_queues(cdev);
qdio_shutdown_debug_entries(irq_ptr);
@@ -1284,6 +1281,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
init_data->no_output_qs))
goto out_rel;
+ INIT_LIST_HEAD(&irq_ptr->entry);
init_data->cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
@@ -1428,7 +1426,7 @@ int qdio_activate(struct ccw_device *cdev)
}
if (is_thinint_irq(irq_ptr))
- tiqdio_add_input_queues(irq_ptr);
+ tiqdio_add_device(irq_ptr);
/* wait for subchannel to become active */
msleep(5);
@@ -1526,7 +1524,7 @@ set:
* @count: how many buffers are filled
*/
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
- int bufnr, int count)
+ unsigned int bufnr, unsigned int count)
{
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
@@ -1549,13 +1547,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
if (queue_type(q) == QDIO_IQDIO_QFMT) {
unsigned long phys_aob = 0;
- /* One SIGA-W per buffer required for unicast HSI */
- WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
-
- if (q->u.out.use_cq)
+ if (q->u.out.use_cq && count == 1)
phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
- rc = qdio_kick_outbound_q(q, phys_aob);
+ rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
@@ -1564,7 +1559,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
/* The previous buffer is not processed yet, tack on. */
qperf_inc(q, fast_requeue);
} else {
- rc = qdio_kick_outbound_q(q, 0);
+ rc = qdio_kick_outbound_q(q, count, 0);
}
/* Let drivers implement their own completion scanning: */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index cd164886132f..dc430bd86ade 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -150,7 +150,6 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
irq_ptr_qs[i] = q;
- INIT_LIST_HEAD(&q->entry);
}
return 0;
}
@@ -179,7 +178,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
- INIT_LIST_HEAD(&q->entry);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 93ee067c10ca..7c4e4ec08a12 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -39,14 +39,6 @@ struct indicator_t {
static LIST_HEAD(tiq_list);
static DEFINE_MUTEX(tiq_list_lock);
-/* Adapter interrupt definitions */
-static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating);
-
-static struct airq_struct tiqdio_airq = {
- .handler = tiqdio_thinint_handler,
- .isc = QDIO_AIRQ_ISC,
-};
-
static struct indicator_t *q_indicators;
u64 last_ai_time;
@@ -74,26 +66,20 @@ static void put_indicator(u32 *addr)
atomic_dec(&ind->count);
}
-void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+void tiqdio_add_device(struct qdio_irq *irq_ptr)
{
mutex_lock(&tiq_list_lock);
- list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+ list_add_rcu(&irq_ptr->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
}
-void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+void tiqdio_remove_device(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
-
- q = irq_ptr->input_qs[0];
- if (!q)
- return;
-
mutex_lock(&tiq_list_lock);
- list_del_rcu(&q->entry);
+ list_del_rcu(&irq_ptr->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
- INIT_LIST_HEAD(&q->entry);
+ INIT_LIST_HEAD(&irq_ptr->entry);
}
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
@@ -154,7 +140,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) {
- qperf_inc(q, int_discarded);
+ QDIO_PERF_STAT_INC(irq, int_discarded);
continue;
}
@@ -182,7 +168,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
u32 si_used = clear_shared_ind();
- struct qdio_q *q;
+ struct qdio_irq *irq;
last_ai_time = S390_lowcore.int_clock;
inc_irq_stat(IRQIO_QAI);
@@ -190,12 +176,8 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
- /* check for work on all inbound thinint queues */
- list_for_each_entry_rcu(q, &tiq_list, entry) {
- struct qdio_irq *irq;
-
+ list_for_each_entry_rcu(irq, &tiq_list, entry) {
/* only process queues from changed sets */
- irq = q->irq_ptr;
if (unlikely(references_shared_dsci(irq))) {
if (!si_used)
continue;
@@ -204,11 +186,16 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
tiqdio_call_inq_handlers(irq);
- qperf_inc(q, adapter_int);
+ QDIO_PERF_STAT_INC(irq, adapter_int);
}
rcu_read_unlock();
}
+static struct airq_struct tiqdio_airq = {
+ .handler = tiqdio_thinint_handler,
+ .isc = QDIO_AIRQ_ISC,
+};
+
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 7cdc38049033..ba31240ce965 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -15,6 +15,7 @@
#include <asm/scsw.h>
#include "orb.h"
+#include "vfio_ccw_trace.h"
/*
* Max length for ccw chain.
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 4a1e727c62d9..23e61aa638e4 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -15,9 +15,6 @@
#include "ioasm.h"
#include "vfio_ccw_private.h"
-#define CREATE_TRACE_POINTS
-#include "vfio_ccw_trace.h"
-
static int fsm_io_helper(struct vfio_ccw_private *private)
{
struct subchannel *sch;
@@ -321,8 +318,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
}
err_out:
- trace_vfio_ccw_io_fctl(scsw->cmd.fctl, schid,
- io_region->ret_code, errstr);
+ trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
+ io_region->ret_code, errstr);
}
/*
@@ -344,6 +341,10 @@ static void fsm_async_request(struct vfio_ccw_private *private,
/* should not happen? */
cmd_region->ret_code = -EINVAL;
}
+
+ trace_vfio_ccw_fsm_async_request(get_schid(private),
+ cmd_region->command,
+ cmd_region->ret_code);
}
/*
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index bbe9babf767b..9b9bb4982972 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -135,6 +135,7 @@ extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
int event)
{
+ trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event);
}
diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c
new file mode 100644
index 000000000000..8c671d2519f6
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoint definitions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Eric Farman <farman@linux.ibm.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
index b1da53ddec1f..30162a318a8a 100644
--- a/drivers/s390/cio/vfio_ccw_trace.h
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -7,6 +7,8 @@
* Halil Pasic <pasic@linux.vnet.ibm.com>
*/
+#include "cio.h"
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM vfio_ccw
@@ -15,28 +17,88 @@
#include <linux/tracepoint.h>
-TRACE_EVENT(vfio_ccw_io_fctl,
+TRACE_EVENT(vfio_ccw_fsm_async_request,
+ TP_PROTO(struct subchannel_id schid,
+ int command,
+ int errno),
+ TP_ARGS(schid, command, errno),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
+ __field(int, command)
+ __field(int, errno)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
+ __entry->command = command;
+ __entry->errno = errno;
+ ),
+
+ TP_printk("schid=%x.%x.%04x command=0x%x errno=%d",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
+ __entry->command,
+ __entry->errno)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_event,
+ TP_PROTO(struct subchannel_id schid, int state, int event),
+ TP_ARGS(schid, state, event),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field(int, state)
+ __field(int, event)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->schno = schid.sch_no;
+ __entry->state = state;
+ __entry->event = event;
+ ),
+
+ TP_printk("schid=%x.%x.%04x state=%d event=%d",
+ __entry->cssid, __entry->ssid, __entry->schno,
+ __entry->state,
+ __entry->event)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_io_request,
TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
TP_ARGS(fctl, schid, errno, errstr),
TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
__field(int, fctl)
- __field_struct(struct subchannel_id, schid)
__field(int, errno)
__field(char*, errstr)
),
TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
__entry->fctl = fctl;
- __entry->schid = schid;
__entry->errno = errno;
__entry->errstr = errstr;
),
- TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
- __entry->schid.cssid,
- __entry->schid.ssid,
- __entry->schid.sch_no,
+ TP_printk("schid=%x.%x.%04x fctl=0x%x errno=%d info=%s",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
__entry->fctl,
__entry->errno,
__entry->errstr)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 9de3d46b3253..d78d77686d7b 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -715,36 +715,18 @@ out:
static void *_copy_key_from_user(void __user *ukey, size_t keylen)
{
- void *kkey;
-
if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
return ERR_PTR(-EINVAL);
- kkey = kmalloc(keylen, GFP_KERNEL);
- if (!kkey)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(kkey, ukey, keylen)) {
- kfree(kkey);
- return ERR_PTR(-EFAULT);
- }
- return kkey;
+ return memdup_user(ukey, keylen);
}
static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
{
- void *kapqns = NULL;
- size_t nbytes;
-
- if (uapqns && nr_apqns > 0) {
- nbytes = nr_apqns * sizeof(struct pkey_apqn);
- kapqns = kmalloc(nbytes, GFP_KERNEL);
- if (!kapqns)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(kapqns, uapqns, nbytes))
- return ERR_PTR(-EFAULT);
- }
+ if (!uapqns || nr_apqns == 0)
+ return NULL;
- return kapqns;
+ return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
}
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 66eac2b9704d..1901e9c80ed8 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -32,8 +32,6 @@
#define ISM_UNREG_SBA 0x11
#define ISM_UNREG_IEQ 0x12
-#define ISM_ERROR 0xFFFF
-
struct ism_req_hdr {
u32 cmd;
u16 : 16;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e4b55f9aa062..293dd99b7fef 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -368,6 +368,7 @@ enum qeth_header_ids {
QETH_HEADER_TYPE_L3_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
QETH_HEADER_TYPE_L2_TSO = 0x06,
+ QETH_HEADER_MASK_INVAL = 0x80,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
@@ -477,12 +478,16 @@ struct qeth_card_stats {
u64 rx_sg_frags;
u64 rx_sg_alloc_page;
+ u64 rx_dropped_nomem;
+ u64 rx_dropped_notsupp;
+
/* rtnl_link_stats64 */
u64 rx_packets;
u64 rx_bytes;
- u64 rx_errors;
- u64 rx_dropped;
u64 rx_multicast;
+ u64 rx_length_errors;
+ u64 rx_frame_errors;
+ u64 rx_fifo_errors;
};
struct qeth_out_q_stats {
@@ -532,6 +537,8 @@ struct qeth_qdio_out_q {
struct timer_list timer;
struct qeth_hdr *prev_hdr;
u8 bulk_start;
+ u8 bulk_count;
+ u8 bulk_max;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -817,7 +824,6 @@ struct qeth_card {
struct workqueue_struct *event_wq;
struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
struct mutex ip_lock;
@@ -839,6 +845,7 @@ struct qeth_card {
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
debug_info_t *debug;
+ struct mutex sbp_lock;
struct mutex conf_mutex;
struct mutex discipline_mutex;
struct napi_struct napi;
@@ -878,6 +885,13 @@ static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
return txq;
}
+static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
+ QETH_IQD_MCAST_TXQ;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dda274351c21..efcbe60220d1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -901,30 +901,30 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
- return 1;
+ return -EIO;
}
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
QETH_CARD_TEXT(card, 2, "REVIND");
- return 1;
+ return -EIO;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
QETH_CARD_TEXT(card, 2, "CMDREJi");
- return 1;
+ return -EIO;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
QETH_CARD_TEXT(card, 2, "AFFE");
- return 1;
+ return -EIO;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
QETH_CARD_TEXT(card, 2, "DGENCHK");
- return 1;
+ return -EIO;
}
return 0;
}
@@ -1513,7 +1513,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
- card->state = CARD_STATE_DOWN;
return rc;
}
EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
@@ -1957,6 +1956,7 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
ccw_device_get_id(CARD_DDEV(card), &dev_id);
iob->finalize = qeth_idx_finalize_cmd;
+ port |= QETH_IDX_ACT_INVAL_FRAME;
memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
@@ -2634,6 +2634,18 @@ static int qeth_init_input_buffer(struct qeth_card *card,
return 0;
}
+static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ if (!IS_IQD(card) ||
+ qeth_iqd_is_mcast_queue(card, queue) ||
+ card->options.cq == QETH_CQ_ENABLED ||
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
+ return 1;
+
+ return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
+}
+
int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int i;
@@ -2673,6 +2685,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
queue->do_pack = 0;
queue->prev_hdr = NULL;
queue->bulk_start = 0;
+ queue->bulk_count = 0;
+ queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3080,7 +3094,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card,
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
- QETH_CARD_STAT_INC(card, rx_dropped);
+ QETH_CARD_STAT_INC(card, rx_fifo_errors);
return 0;
} else
return 1;
@@ -3107,7 +3121,7 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
for (i = queue->next_buf_to_init;
i < queue->next_buf_to_init + count; ++i) {
if (qeth_init_input_buffer(card,
- &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+ &queue->bufs[QDIO_BUFNR(i)])) {
break;
} else {
newcount++;
@@ -3149,8 +3163,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
- queue->next_buf_to_init = (queue->next_buf_to_init + count) %
- QDIO_MAX_BUFFERS_PER_Q;
+ queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
+ count);
}
}
@@ -3198,7 +3212,7 @@ static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
return 1;
}
return 0;
@@ -3252,7 +3266,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ unsigned int bidx = QDIO_BUFNR(i);
+
buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
@@ -3318,10 +3333,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
- qeth_flush_buffers(queue, queue->bulk_start, 1);
+ qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
- queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
+ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
queue->prev_hdr = NULL;
+ queue->bulk_count = 0;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
@@ -3419,8 +3435,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
}
for (i = first_element; i < first_element + count; ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
- struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
+ struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
int e = 0;
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
@@ -3441,8 +3456,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
"QDIO reported an error, rc=%i\n", rc);
QETH_CARD_TEXT(card, 2, "qcqherr");
}
- card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
- + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+ cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
}
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
@@ -3468,7 +3483,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
- struct qeth_qdio_out_buffer *buffer;
struct net_device *dev = card->dev;
struct netdev_queue *txq;
int i;
@@ -3482,10 +3496,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
for (i = first_element; i < (first_element + count); ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
- buffer = queue->bufs[bidx];
- qeth_handle_send_error(card, buffer, qdio_error);
- qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
+ struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
+
+ qeth_handle_send_error(card, buf, qdio_error);
+ qeth_clear_output_buffer(queue, buf, qdio_error, 0);
}
atomic_sub(count, &queue->used_buffers);
@@ -3680,10 +3694,10 @@ check_layout:
}
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buffer,
struct sk_buff *curr_skb,
struct qeth_hdr *curr_hdr)
{
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
struct qeth_hdr *prev_hdr = queue->prev_hdr;
if (!prev_hdr)
@@ -3803,13 +3817,14 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len)
{
- struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
unsigned int bytes = qdisc_pkt_len(skb);
+ struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
bool flush;
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* Just a sanity check, the wake/stop logic should ensure that we always
@@ -3818,11 +3833,23 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
- !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
- atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
- buffer = queue->bufs[queue->bulk_start];
+ flush = !qeth_iqd_may_bulk(queue, skb, hdr);
+
+ if (flush ||
+ (buffer->next_element_to_fill + elements > queue->max_elements)) {
+ if (buffer->next_element_to_fill > 0) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->bulk_count++;
+ }
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
+
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
+ queue->bulk_count)];
/* Sanity-check again: */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
@@ -3848,7 +3875,13 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (flush || next_element >= queue->max_elements) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
+ queue->bulk_count++;
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
}
if (stopped && !qeth_out_queue_is_full(queue))
@@ -3898,8 +3931,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
buffer = queue->bufs[queue->next_buf_to_fill];
/* We stepped forward, so sanity-check again: */
@@ -3932,8 +3964,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
flush_count++;
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ queue->next_buf_to_fill =
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
}
if (flush_count)
@@ -4261,7 +4293,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
}
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
void qeth_tx_timeout(struct net_device *dev)
{
@@ -4316,7 +4347,9 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
case MII_NWAYTEST: /* N-way auto-neg test register */
break;
case MII_RERRCOUNTER: /* rx error counter */
- rc = card->stats.rx_errors;
+ rc = card->stats.rx_length_errors +
+ card->stats.rx_frame_errors +
+ card->stats.rx_fifo_errors;
break;
case MII_SREVISION: /* silicon revision */
break;
@@ -4822,7 +4855,6 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->data);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
- qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
@@ -4977,6 +5009,15 @@ retriable:
goto out;
}
}
+
+ if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
+ (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
+ card->info.hwtrap = 0;
+
+ rc = qeth_set_access_ctrl_online(card, 0);
+ if (rc)
+ goto out;
+
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5023,13 +5064,14 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
int offset = *__offset;
+ bool use_rx_sg = false;
+ unsigned int headroom;
struct sk_buff *skb;
int skb_len = 0;
void *data_ptr;
int data_len;
- int headroom = 0;
- int use_rx_sg = 0;
+next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
@@ -5043,27 +5085,45 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
switch ((*hdr)->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb_len = (*hdr)->hdr.l2.pkt_length;
+ headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
skb_len = (*hdr)->hdr.l3.length;
+ if (!IS_LAYER3(card)) {
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+ skb = NULL;
+ goto walk_packet;
+ }
+
headroom = ETH_HLEN;
break;
case QETH_HEADER_TYPE_OSN:
skb_len = (*hdr)->hdr.osn.pdu_length;
+ if (!IS_OSN(card)) {
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+ skb = NULL;
+ goto walk_packet;
+ }
+
headroom = sizeof(struct qeth_hdr);
break;
default:
- break;
+ if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
+ QETH_CARD_STAT_INC(card, rx_frame_errors);
+ else
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+
+ /* Can't determine packet length, drop the whole buffer. */
+ return NULL;
}
if (!skb_len)
return NULL;
- if (((skb_len >= card->options.rx_sg_cb) &&
- !IS_OSN(card) &&
- (!atomic_read(&card->force_alloc_skb))) ||
- (card->options.cq == QETH_CQ_ENABLED))
- use_rx_sg = 1;
+ use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
+ ((skb_len >= card->options.rx_sg_cb) &&
+ !atomic_read(&card->force_alloc_skb) &&
+ !IS_OSN(card));
if (use_rx_sg && qethbuffer->rx_skb) {
/* QETH_CQ_ENABLED only: */
@@ -5074,15 +5134,18 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb = napi_alloc_skb(&card->napi, linear + headroom);
}
+
if (!skb)
- goto no_mem;
- if (headroom)
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+ else if (headroom)
skb_reserve(skb, headroom);
+walk_packet:
data_ptr = element->addr + offset;
while (skb_len) {
data_len = min(skb_len, (int)(element->length - offset));
- if (data_len) {
+
+ if (skb && data_len) {
if (use_rx_sg)
qeth_create_skb_frag(element, skb, offset,
data_len);
@@ -5094,8 +5157,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
if (qeth_is_last_sbale(element)) {
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
- dev_kfree_skb_any(skb);
- QETH_CARD_STAT_INC(card, rx_errors);
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ QETH_CARD_STAT_INC(card,
+ rx_length_errors);
+ }
return NULL;
}
element++;
@@ -5105,6 +5171,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
offset += data_len;
}
}
+
+ /* This packet was skipped, go get another one: */
+ if (!skb)
+ goto next_packet;
+
*__element = element;
*__offset = offset;
if (use_rx_sg) {
@@ -5113,12 +5184,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb_shinfo(skb)->nr_frags);
}
return skb;
-no_mem:
- if (net_ratelimit()) {
- QETH_CARD_TEXT(card, 2, "noskbmem");
- }
- QETH_CARD_STAT_INC(card, rx_dropped);
- return NULL;
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
@@ -5165,8 +5230,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
card->rx.b_count--;
if (card->rx.b_count) {
card->rx.b_index =
- (card->rx.b_index + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(card->rx.b_index + 1);
card->rx.b_element =
&card->qdio.in_q
->bufs[card->rx.b_index]
@@ -5182,9 +5246,9 @@ int qeth_poll(struct napi_struct *napi, int budget)
}
}
- napi_complete_done(napi, work_done);
- if (qdio_start_irq(card->data.ccwdev, 0))
- napi_schedule(&card->napi);
+ if (napi_complete_done(napi, work_done) &&
+ qdio_start_irq(CARD_DDEV(card), 0))
+ napi_schedule(napi);
out:
return work_done;
}
@@ -5703,6 +5767,8 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_core_free_discipline(card);
}
+ qeth_free_qdio_queues(card);
+
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
@@ -6198,9 +6264,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_packets = card->stats.rx_packets;
stats->rx_bytes = card->stats.rx_bytes;
- stats->rx_errors = card->stats.rx_errors;
- stats->rx_dropped = card->stats.rx_dropped;
+ stats->rx_errors = card->stats.rx_length_errors +
+ card->stats.rx_frame_errors +
+ card->stats.rx_fifo_errors;
+ stats->rx_dropped = card->stats.rx_dropped_nomem +
+ card->stats.rx_dropped_notsupp;
stats->multicast = card->stats.rx_multicast;
+ stats->rx_length_errors = card->stats.rx_length_errors;
+ stats->rx_frame_errors = card->stats.rx_frame_errors;
+ stats->rx_fifo_errors = card->stats.rx_fifo_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6420b58cf42b..53fcf6641154 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -11,6 +11,7 @@
#include <asm/qeth.h>
#include <uapi/linux/if_ether.h>
+#include <uapi/linux/in6.h>
#define IPA_PDU_HEADER_SIZE 0x40
#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -365,8 +366,7 @@ struct qeth_ipacmd_setdelip6 {
struct qeth_ipacmd_setdelipm {
__u8 mac[6];
__u8 padding[2];
- __u8 ip6[12];
- __u8 ip4[4];
+ struct in6_addr ip;
} __attribute__ ((packed));
struct qeth_ipacmd_layer2setdelmac {
@@ -900,6 +900,7 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define IDX_ACTIVATE_SIZE 0x22
#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
+#define QETH_IDX_ACT_INVAL_FRAME 0x40
#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9f392497d570..e81170ab6d9a 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -20,8 +20,6 @@ static ssize_t qeth_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
switch (card->state) {
case CARD_STATE_DOWN:
@@ -45,8 +43,6 @@ static ssize_t qeth_dev_chpid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%02X\n", card->info.chpid);
}
@@ -57,8 +53,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
+
return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
}
@@ -68,8 +63,6 @@ static ssize_t qeth_dev_card_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
}
@@ -94,8 +87,6 @@ static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
}
@@ -106,8 +97,6 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%i\n", card->dev->dev_port);
}
@@ -120,9 +109,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
unsigned int portno, limit;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -171,9 +157,6 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
return sprintf(buf, "%s\n", "by precedence");
@@ -195,9 +178,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (IS_IQD(card))
return -EOPNOTSUPP;
@@ -262,9 +242,6 @@ static ssize_t qeth_dev_bufcnt_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
@@ -276,9 +253,6 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
int cnt, old_cnt;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -307,9 +281,6 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
char *tmp;
int i;
- if (!card)
- return -EINVAL;
-
if (!qeth_card_hw_is_reachable(card))
return -EPERM;
@@ -325,11 +296,6 @@ static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "1\n");
}
@@ -342,9 +308,6 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
bool reset;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtobool(buf, &reset);
if (rc)
return rc;
@@ -370,9 +333,6 @@ static ssize_t qeth_dev_layer2_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.layer);
}
@@ -385,9 +345,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
int i, rc = 0;
enum qeth_discipline_id newdis;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->discipline_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -453,9 +410,6 @@ static ssize_t qeth_dev_isolation_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
switch (card->options.isolation) {
case ISOLATION_MODE_NONE:
return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
@@ -475,9 +429,6 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
enum qeth_ipa_isolation_modes isolation;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (!IS_OSD(card) && !IS_OSX(card)) {
rc = -EOPNOTSUPP;
@@ -522,9 +473,6 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
struct qeth_switch_info sw_info;
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (!qeth_card_hw_is_reachable(card))
return sprintf(buf, "n/a\n");
@@ -555,8 +503,6 @@ static ssize_t qeth_hw_trap_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
if (card->info.hwtrap)
return snprintf(buf, 5, "arm\n");
else
@@ -570,9 +516,6 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
int rc = 0;
int state = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (qeth_card_hw_is_reachable(card))
state = 1;
@@ -607,24 +550,12 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
qeth_hw_trap_store);
-static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
-{
-
- if (!card)
- return -EINVAL;
-
- return sprintf(buf, "%i\n", value);
-}
-
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
int i, rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -645,7 +576,7 @@ static ssize_t qeth_dev_blkt_total_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
+ return sprintf(buf, "%i\n", card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
@@ -657,8 +588,6 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
&card->info.blkt.time_total, 5000);
}
-
-
static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
qeth_dev_blkt_total_store);
@@ -667,7 +596,7 @@ static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
+ return sprintf(buf, "%i\n", card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
@@ -687,8 +616,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card,
- card->info.blkt.inter_packet_jumbo);
+ return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 096698df3886..f7485c6dea25 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -49,6 +49,8 @@ static const struct qeth_stats card_stats[] = {
QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
+ QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem),
+ QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp),
};
#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bd8143e51747..989935d67b31 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -315,29 +315,19 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
*done = 1;
break;
}
- switch (hdr->hdr.l2.id) {
- case QETH_HEADER_TYPE_LAYER2:
+
+ if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
skb->protocol = eth_type_trans(skb, skb->dev);
qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
len = skb->len;
napi_gro_receive(&card->napi, skb);
- break;
- case QETH_HEADER_TYPE_OSN:
- if (IS_OSN(card)) {
- skb_push(skb, sizeof(struct qeth_hdr));
- skb_copy_to_linear_data(skb, hdr,
- sizeof(struct qeth_hdr));
- len = skb->len;
- card->osn_info.data_cb(skb);
- break;
- }
- /* Else, fall through */
- default:
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
- continue;
+ } else {
+ skb_push(skb, sizeof(*hdr));
+ skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
+ len = skb->len;
+ card->osn_info.data_cb(skb);
}
+
work_done++;
budget--;
QETH_CARD_STAT_INC(card, rx_packets);
@@ -467,10 +457,14 @@ static void qeth_l2_set_promisc_mode(struct qeth_card *card)
if (card->info.promisc_mode == enable)
return;
- if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
qeth_setadp_promisc_mode(card, enable);
- else if (card->options.sbp.reflect_promisc)
- qeth_l2_promisc_to_bridge(card, enable);
+ } else {
+ mutex_lock(&card->sbp_lock);
+ if (card->options.sbp.reflect_promisc)
+ qeth_l2_promisc_to_bridge(card, enable);
+ mutex_unlock(&card->sbp_lock);
+ }
}
/* New MAC address is added to the hash table and marked to be written on card
@@ -631,6 +625,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
int rc;
qeth_l2_vnicc_set_defaults(card);
+ mutex_init(&card->sbp_lock);
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev);
@@ -759,14 +754,6 @@ add_napi:
return rc;
}
-static int qeth_l2_start_ipassists(struct qeth_card *card)
-{
- /* configure isolation level */
- if (qeth_set_access_ctrl_online(card, 0))
- return -ENODEV;
- return 0;
-}
-
static void qeth_l2_trace_features(struct qeth_card *card)
{
/* Set BridgePort features */
@@ -797,17 +784,12 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
- if (card->info.hwtrap &&
- qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
- card->info.hwtrap = 0;
- } else
- card->info.hwtrap = 0;
-
+ mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
+ mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card);
@@ -825,12 +807,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
- if (IS_OSD(card) || IS_OSX(card)) {
- rc = qeth_l2_start_ipassists(card);
- if (rc)
- goto out_remove;
- }
-
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
@@ -1162,9 +1138,9 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
/* Role should not change by itself, but if it did, */
/* information from the hardware is authoritative. */
- mutex_lock(&data->card->conf_mutex);
+ mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.role = entry->role;
- mutex_unlock(&data->card->conf_mutex);
+ mutex_unlock(&data->card->sbp_lock);
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s",
@@ -1230,9 +1206,9 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
: (data->hostevs.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
- mutex_lock(&data->card->conf_mutex);
+ mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0;
- mutex_unlock(&data->card->conf_mutex);
+ mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL);
} else
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index f2c3b127b1e4..f70c7aac2dcc 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -18,12 +18,10 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
int rc = 0;
char *word;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
+ mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card,
@@ -57,6 +55,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
else
rc = sprintf(buf, "%s\n", word);
}
+ mutex_unlock(&card->sbp_lock);
return rc;
}
@@ -79,8 +78,6 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
int rc = 0;
enum qeth_sbp_roles role;
- if (!card)
- return -EINVAL;
if (sysfs_streq(buf, "primary"))
role = QETH_SBP_ROLE_PRIMARY;
else if (sysfs_streq(buf, "secondary"))
@@ -91,6 +88,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -104,6 +102,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
} else
card->options.sbp.role = role;
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -132,9 +131,6 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
@@ -150,14 +146,12 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
bool enable;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtobool(buf, &enable);
if (rc)
return rc;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -168,6 +162,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
} else
card->options.sbp.hostnotification = enable;
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -183,9 +178,6 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *state;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
@@ -207,9 +199,6 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
int enable, primary;
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (sysfs_streq(buf, "none")) {
enable = 0;
primary = 0;
@@ -223,6 +212,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -234,6 +224,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
rc = 0;
}
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -269,6 +260,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
return;
if (!card->options.sbp.supported_funcs)
return;
+
+ mutex_lock(&card->sbp_lock);
if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role);
@@ -280,8 +273,10 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
rc = qeth_bridgeport_an_set(card, 1);
if (rc)
card->options.sbp.hostnotification = 0;
- } else
+ } else {
qeth_bridgeport_an_set(card, 0);
+ }
+ mutex_unlock(&card->sbp_lock);
}
/* VNIC CHARS support */
@@ -315,9 +310,6 @@ static ssize_t qeth_vnicc_timeout_show(struct device *dev,
u32 timeout;
int rc;
- if (!card)
- return -EINVAL;
-
rc = qeth_l2_vnicc_get_timeout(card, &timeout);
if (rc == -EBUSY)
return sprintf(buf, "n/a (BridgePort)\n");
@@ -335,9 +327,6 @@ static ssize_t qeth_vnicc_timeout_store(struct device *dev,
u32 timeout;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtou32(buf, 10, &timeout);
if (rc)
return rc;
@@ -357,9 +346,6 @@ static ssize_t qeth_vnicc_char_show(struct device *dev,
u32 vnicc;
int rc;
- if (!card)
- return -EINVAL;
-
vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
@@ -380,9 +366,6 @@ static ssize_t qeth_vnicc_char_store(struct device *dev,
u32 vnicc;
int rc;
- if (!card)
- return -EINVAL;
-
if (kstrtobool(buf, &state))
return -EINVAL;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 87659cfc9066..5db04fe472c0 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,8 +13,6 @@
#include "qeth_core.h"
#include <linux/hashtable.h>
-#define QETH_SNIFF_AVAIL 0x0008
-
enum qeth_ip_types {
QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA,
@@ -24,7 +22,6 @@ enum qeth_ip_types {
struct qeth_ipaddr {
struct hlist_node hnode;
enum qeth_ip_types type;
- unsigned char mac[ETH_ALEN];
u8 is_multicast:1;
u8 in_progress:1;
u8 disp_flag:2;
@@ -37,7 +34,7 @@ struct qeth_ipaddr {
enum qeth_prot_versions proto;
union {
struct {
- unsigned int addr;
+ __be32 addr;
unsigned int mask;
} a4;
struct {
@@ -55,6 +52,7 @@ static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr,
addr->type = type;
addr->proto = proto;
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ addr->ref_counter = 1;
}
static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
@@ -74,12 +72,10 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
* so 'proto' and 'addr' match for sure.
*
* For ucast:
- * - 'mac' is always 0.
* - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
* values are required to avoid mixups in takeover eligibility.
*
* For mcast,
- * - 'mac' is mapped from the IP, and thus always matches.
* - 'mask'/'pfxlen' is always 0.
*/
if (a1->type != a2->type)
@@ -89,21 +85,12 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
return a1->u.a4.mask == a2->u.a4.mask;
}
-static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{
- u64 ret = 0;
- u8 *point;
-
- if (addr->proto == QETH_PROT_IPV6) {
- point = (u8 *) &addr->u.a6.addr;
- ret = get_unaligned((u64 *)point) ^
- get_unaligned((u64 *) (point + 8));
- }
- if (addr->proto == QETH_PROT_IPV4) {
- point = (u8 *) &addr->u.a4.addr;
- ret = get_unaligned((u32 *) point);
- }
- return ret;
+ if (addr->proto == QETH_PROT_IPV6)
+ return ipv6_addr_hash(&addr->u.a6.addr);
+ else
+ return ipv4_addr_hash(addr->u.a4.addr);
}
struct qeth_ipato_entry {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d7bfc7a0e4c0..e7ce73b9f016 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -39,7 +39,6 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
-static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
@@ -64,19 +63,10 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
qeth_l3_ipaddr6_to_string(addr, buf);
}
-static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
-{
- struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
-
- if (addr)
- qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot);
- return addr;
-}
-
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
struct qeth_ipaddr *query)
{
- u64 key = qeth_l3_ipaddr_hash(query);
+ u32 key = qeth_l3_ipaddr_hash(query);
struct qeth_ipaddr *addr;
if (query->is_multicast) {
@@ -217,13 +207,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
"Registering IP address %s failed\n", buf);
return -EADDRINUSE;
} else {
- addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
+ addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
- memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
- addr->ref_counter = 1;
-
if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->ipato = 1;
@@ -381,12 +368,13 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
- ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
- if (addr->proto == QETH_PROT_IPV6)
- memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
- sizeof(struct in6_addr));
- else
- memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
+ if (addr->proto == QETH_PROT_IPV6) {
+ cmd->data.setdelipm.ip = addr->u.a6.addr;
+ ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
+ } else {
+ cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
+ ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
+ }
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
@@ -953,8 +941,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
- if (qeth_set_access_ctrl_online(card, 0))
- return -EIO;
qeth_l3_start_ipa_arp_processing(card); /* go on*/
qeth_l3_start_ipa_source_mac(card); /* go on*/
qeth_l3_start_ipa_vlan(card); /* go on*/
@@ -1115,176 +1101,83 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
-static void
-qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
+static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
{
+ struct qeth_card *card = arg;
+ struct inet6_dev *in6_dev;
+ struct in_device *in4_dev;
+ struct qeth_ipaddr *ipm;
+ struct qeth_ipaddr tmp;
struct ip_mc_list *im4;
- struct qeth_ipaddr *tmp, *ipm;
+ struct ifmcaddr6 *im6;
QETH_CARD_TEXT(card, 4, "addmc");
- tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!tmp)
- return;
+ if (!dev || !(dev->flags & IFF_UP))
+ goto out;
+
+ in4_dev = __in_dev_get_rtnl(dev);
+ if (!in4_dev)
+ goto walk_ipv6;
- for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
- im4 = rcu_dereference(im4->next_rcu)) {
- ip_eth_mc_map(im4->multiaddr, tmp->mac);
- tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
- tmp->is_multicast = 1;
+ qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
+ tmp.disp_flag = QETH_DISP_ADDR_ADD;
+ tmp.is_multicast = 1;
- ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL;
+ im4 = rtnl_dereference(im4->next_rcu)) {
+ tmp.u.a4.addr = im4->multiaddr;
+
+ ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- } else {
- ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!ipm)
- continue;
- ether_addr_copy(ipm->mac, tmp->mac);
- ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
- ipm->is_multicast = 1;
- ipm->disp_flag = QETH_DISP_ADDR_ADD;
- hash_add(card->ip_mc_htable,
- &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+ continue;
}
- }
-
- kfree(tmp);
-}
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc(struct qeth_card *card)
-{
- struct in_device *in_dev;
- u16 vid;
-
- QETH_CARD_TEXT(card, 4, "addmcvl");
-
- if (!qeth_is_supported(card, IPA_FULL_VLAN))
- return;
-
- for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
- struct net_device *netdev;
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
- vid);
- if (netdev == NULL ||
- !(netdev->flags & IFF_UP))
- continue;
- in_dev = __in_dev_get_rcu(netdev);
- if (!in_dev)
+ ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL);
+ if (!ipm)
continue;
- qeth_l3_add_mc_to_hash(card, in_dev);
- }
-}
-static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
-{
- struct in_device *in4_dev;
-
- QETH_CARD_TEXT(card, 4, "chkmcv4");
-
- rcu_read_lock();
- in4_dev = __in_dev_get_rcu(card->dev);
- if (in4_dev == NULL)
- goto unlock;
- qeth_l3_add_mc_to_hash(card, in4_dev);
- qeth_l3_add_vlan_mc(card);
-unlock:
- rcu_read_unlock();
-}
+ hash_add(card->ip_mc_htable, &ipm->hnode,
+ qeth_l3_ipaddr_hash(ipm));
+ }
-static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
- struct inet6_dev *in6_dev)
-{
- struct qeth_ipaddr *ipm;
- struct ifmcaddr6 *im6;
- struct qeth_ipaddr *tmp;
+walk_ipv6:
+ if (!qeth_is_supported(card, IPA_IPV6))
+ goto out;
- QETH_CARD_TEXT(card, 4, "addmc6");
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev)
+ goto out;
- tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (!tmp)
- return;
+ qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
+ tmp.disp_flag = QETH_DISP_ADDR_ADD;
+ tmp.is_multicast = 1;
+ read_lock_bh(&in6_dev->lock);
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
- ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
- memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
- sizeof(struct in6_addr));
- tmp->is_multicast = 1;
+ tmp.u.a6.addr = im6->mca_addr;
- ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue;
}
- ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+ ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC);
if (!ipm)
continue;
- ether_addr_copy(ipm->mac, tmp->mac);
- memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
- sizeof(struct in6_addr));
- ipm->is_multicast = 1;
- ipm->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->ip_mc_htable,
&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
}
- kfree(tmp);
-}
-
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
-{
- struct inet6_dev *in_dev;
- u16 vid;
-
- QETH_CARD_TEXT(card, 4, "admc6vl");
-
- if (!qeth_is_supported(card, IPA_FULL_VLAN))
- return;
-
- for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
- struct net_device *netdev;
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
- vid);
- if (netdev == NULL ||
- !(netdev->flags & IFF_UP))
- continue;
- in_dev = in6_dev_get(netdev);
- if (!in_dev)
- continue;
- read_lock_bh(&in_dev->lock);
- qeth_l3_add_mc6_to_hash(card, in_dev);
- read_unlock_bh(&in_dev->lock);
- in6_dev_put(in_dev);
- }
-}
-
-static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
-{
- struct inet6_dev *in6_dev;
-
- QETH_CARD_TEXT(card, 4, "chkmcv6");
-
- if (!qeth_is_supported(card, IPA_IPV6))
- return ;
- in6_dev = in6_dev_get(card->dev);
- if (!in6_dev)
- return;
-
- rcu_read_lock();
- read_lock_bh(&in6_dev->lock);
- qeth_l3_add_mc6_to_hash(card, in6_dev);
- qeth_l3_add_vlan_mc6(card);
read_unlock_bh(&in6_dev->lock);
- rcu_read_unlock();
- in6_dev_put(in6_dev);
+
+out:
+ return 0;
}
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
@@ -1292,7 +1185,7 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- set_bit(vid, card->active_vlans);
+ QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
return 0;
}
@@ -1302,9 +1195,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
-
- clear_bit(vid, card->active_vlans);
- qeth_l3_set_rx_mode(dev);
return 0;
}
@@ -1372,7 +1262,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
- unsigned int len;
*done = 0;
WARN_ON_ONCE(!budget);
@@ -1384,25 +1273,17 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
*done = 1;
break;
}
- switch (hdr->hdr.l3.id) {
- case QETH_HEADER_TYPE_LAYER3:
+
+ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
qeth_l3_rebuild_skb(card, skb, hdr);
- /* fall through */
- case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
- skb->protocol = eth_type_trans(skb, skb->dev);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- break;
- default:
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
- continue;
- }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ QETH_CARD_STAT_INC(card, rx_packets);
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+
+ napi_gro_receive(&card->napi, skb);
work_done++;
budget--;
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
@@ -1468,8 +1349,11 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
QETH_CARD_TEXT(card, 3, "setmulti");
if (!card->options.sniffer) {
- qeth_l3_add_multicast_ipv4(card);
- qeth_l3_add_multicast_ipv6(card);
+ rtnl_lock();
+ qeth_l3_add_mcast_rtnl(card->dev, 0, card);
+ if (qeth_is_supported(card, IPA_FULL_VLAN))
+ vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
+ rtnl_unlock();
hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
switch (addr->disp_flag) {
@@ -2313,13 +2197,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
- if (card->info.hwtrap &&
- qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
- card->info.hwtrap = 0;
- } else
- card->info.hwtrap = 0;
-
card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
@@ -2557,7 +2434,7 @@ static int qeth_l3_ip_event(struct notifier_block *this,
QETH_CARD_TEXT(card, 3, "ipevent");
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
- addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
+ addr.u.a4.addr = ifa->ifa_address;
addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
return qeth_l3_handle_ip_event(card, &addr, event);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 2f73b33c9347..f9067ed6c7d3 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -60,9 +60,6 @@ static ssize_t qeth_l3_dev_route4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_show(card, &card->options.route4, buf);
}
@@ -109,9 +106,6 @@ static ssize_t qeth_l3_dev_route4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_store(card, &card->options.route4,
QETH_PROT_IPV4, buf, count);
}
@@ -124,9 +118,6 @@ static ssize_t qeth_l3_dev_route6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_show(card, &card->options.route6, buf);
}
@@ -135,9 +126,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_store(card, &card->options.route6,
QETH_PROT_IPV6, buf, count);
}
@@ -150,9 +138,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
}
@@ -163,9 +148,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
char *tmp;
int i, rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -190,9 +172,6 @@ static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
}
@@ -203,9 +182,6 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
int rc = 0;
unsigned long i;
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
@@ -228,7 +204,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
break;
case 1:
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
- if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+ if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
card->options.sniffer = i;
if (card->qdio.init_pool.buf_count !=
QETH_IN_BUF_COUNT_MAX)
@@ -248,16 +224,12 @@ out:
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
qeth_l3_dev_sniffer_store);
-
static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char tmp_hsuid[9];
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
@@ -273,9 +245,6 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
char *tmp;
int rc;
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
if (card->state != CARD_STATE_DOWN)
@@ -336,9 +305,6 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
}
@@ -349,9 +315,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
bool enable;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -385,9 +348,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
}
@@ -399,9 +359,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
bool invert;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert4;
@@ -460,9 +417,6 @@ static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
}
@@ -528,9 +482,6 @@ static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -558,9 +509,6 @@ static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -572,9 +520,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
}
@@ -585,9 +530,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
bool invert;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert6;
@@ -617,9 +559,6 @@ static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
}
@@ -628,9 +567,6 @@ static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -643,9 +579,6 @@ static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -679,9 +612,6 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
- if (!card)
- return -EINVAL;
-
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
mutex_lock(&card->ip_lock);
@@ -741,9 +671,6 @@ static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -771,9 +698,6 @@ static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -793,9 +717,6 @@ static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -808,9 +729,6 @@ static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -884,9 +802,6 @@ static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -914,9 +829,6 @@ static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -936,9 +848,6 @@ static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -951,9 +860,6 @@ static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
}
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 222c77c9621f..b6a0432f305a 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -39,7 +39,7 @@ static irqreturn_t a3000_intr(int irq, void *data)
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
- pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
+ pr_warn("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
return IRQ_NONE;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 6afad68e5ba2..238240984bc1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -76,9 +76,11 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
- wait_event_timeout(vha->vref_waitq,
- atomic_read(&vha->vref_count), HZ);
+ for (i = 0; i < 10; i++) {
+ if (wait_event_timeout(vha->vref_waitq,
+ !atomic_read(&vha->vref_count), HZ) > 0)
+ break;
+ }
spin_lock_irqsave(&ha->vport_slock, flags);
if (atomic_read(&vha->vref_count)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 337162ac3a77..726ad4cbf4a6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1119,9 +1119,11 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
qla2x00_mark_all_devices_lost(vha, 0);
- for (i = 0; i < 10; i++)
- wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha),
- HZ);
+ for (i = 0; i < 10; i++) {
+ if (wait_event_timeout(vha->fcport_waitQ,
+ test_fcport_count(vha), HZ) > 0)
+ break;
+ }
flush_workqueue(vha->hw->wq);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f5b5c8a7f72..7a1b6c76f263 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -434,8 +434,8 @@ static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
return;
mutex_lock(&sdev->inquiry_mutex);
- rcu_swap_protected(*sdev_vpd_buf, vpd_buf,
- lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_buf)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5447738906ac..91c007d26c1e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1883,7 +1883,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
- sgl_size = scsi_mq_inline_sgl_size(shost);
+ sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
+ scsi_mq_inline_sgl_size(shost));
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) +
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6d7362e7367e..cc51f4756077 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -466,10 +466,10 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
sdev->request_queue = NULL;
mutex_lock(&sdev->inquiry_mutex);
- rcu_swap_protected(sdev->vpd_pg80, vpd_pg80,
- lockdep_is_held(&sdev->inquiry_mutex));
- rcu_swap_protected(sdev->vpd_pg83, vpd_pg83,
- lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80,
+ lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg83)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ebb40160539f..470ee6dc3f7e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1291,9 +1291,17 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_RESET:
- return sd_zbc_setup_reset_cmnd(cmd, false);
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
+ false);
case REQ_OP_ZONE_RESET_ALL:
- return sd_zbc_setup_reset_cmnd(cmd, true);
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
+ true);
+ case REQ_OP_ZONE_OPEN:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
+ case REQ_OP_ZONE_CLOSE:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
+ case REQ_OP_ZONE_FINISH:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
default:
WARN_ON_ONCE(1);
return BLK_STS_NOTSUPP;
@@ -1961,6 +1969,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 1eab779f812b..42fd3f00e4a5 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -209,11 +209,12 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all);
+blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op, bool all);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
-extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -225,8 +226,9 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd,
- bool all)
+static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op,
+ bool all)
{
return BLK_STS_TARGET;
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index de4019dc0f0b..0e5ede48f045 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -19,34 +19,27 @@
#include "sd.h"
-/**
- * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
- * @sdkp: The disk the report originated from
- * @buf: Address of the report zone descriptor
- * @zone: the destination zone structure
- *
- * All LBA sized values are converted to 512B sectors unit.
- */
-static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
- struct blk_zone *zone)
+static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
+ unsigned int idx, report_zones_cb cb, void *data)
{
struct scsi_device *sdp = sdkp->device;
+ struct blk_zone zone = { 0 };
- memset(zone, 0, sizeof(struct blk_zone));
-
- zone->type = buf[0] & 0x0f;
- zone->cond = (buf[1] >> 4) & 0xf;
+ zone.type = buf[0] & 0x0f;
+ zone.cond = (buf[1] >> 4) & 0xf;
if (buf[1] & 0x01)
- zone->reset = 1;
+ zone.reset = 1;
if (buf[1] & 0x02)
- zone->non_seq = 1;
-
- zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
- zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
- zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- if (zone->type != ZBC_ZONE_TYPE_CONV &&
- zone->cond == ZBC_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
+ zone.non_seq = 1;
+
+ zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
+ zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
+ if (zone.type != ZBC_ZONE_TYPE_CONV &&
+ zone.cond == ZBC_ZONE_COND_FULL)
+ zone.wp = zone.start + zone.len;
+
+ return cb(&zone, idx, data);
}
/**
@@ -104,11 +97,6 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
-/*
- * Maximum number of zones to get with one report zones command.
- */
-#define SD_ZBC_REPORT_MAX_ZONES 8192U
-
/**
* Allocate a buffer for report zones reply.
* @sdkp: The target disk
@@ -138,82 +126,94 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
*/
- nr_zones = min(nr_zones, SD_ZBC_REPORT_MAX_ZONES);
- bufsize = roundup((nr_zones + 1) * 64, 512);
+ nr_zones = min(nr_zones, sdkp->nr_zones);
+ bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
- buf = vzalloc(bufsize);
- if (buf)
- *buflen = bufsize;
+ while (bufsize >= SECTOR_SIZE) {
+ buf = __vmalloc(bufsize,
+ GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
+ PAGE_KERNEL);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
- return buf;
+ return NULL;
}
/**
- * sd_zbc_report_zones - Disk report zones operation.
- * @disk: The target disk
- * @sector: Start 512B sector of the report
- * @zones: Array of zone descriptors
- * @nr_zones: Number of descriptors in the array
- *
- * Execute a report zones command on the target disk.
+ * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
+ * @sdkp: The target disk
*/
+static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
+{
+ return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+}
+
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct scsi_disk *sdkp = scsi_disk(disk);
- unsigned int i, nrz = *nr_zones;
+ unsigned int nr, i;
unsigned char *buf;
- size_t buflen = 0, offset = 0;
- int ret = 0;
+ size_t offset, buflen = 0;
+ int zone_idx = 0;
+ int ret;
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
return -EOPNOTSUPP;
- buf = sd_zbc_alloc_report_buffer(sdkp, nrz, &buflen);
+ buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
if (!buf)
return -ENOMEM;
- ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
- sectors_to_logical(sdkp->device, sector), true);
- if (ret)
- goto out;
+ while (zone_idx < nr_zones && sector < get_capacity(disk)) {
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+ sectors_to_logical(sdkp->device, sector), true);
+ if (ret)
+ goto out;
+
+ offset = 0;
+ nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
+ if (!nr)
+ break;
+
+ for (i = 0; i < nr && zone_idx < nr_zones; i++) {
+ offset += 64;
+ ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
+ cb, data);
+ if (ret)
+ goto out;
+ zone_idx++;
+ }
- nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
- for (i = 0; i < nrz; i++) {
- offset += 64;
- sd_zbc_parse_report(sdkp, buf + offset, zones);
- zones++;
+ sector += sd_zbc_zone_sectors(sdkp) * i;
}
- *nr_zones = nrz;
-
+ ret = zone_idx;
out:
kvfree(buf);
-
return ret;
}
/**
- * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
- * @sdkp: The target disk
- */
-static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
-{
- return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
-}
-
-/**
- * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
+ * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
+ * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
* @cmd: the command to setup
- * @all: Reset all zones control.
+ * @op: Operation to be performed
+ * @all: All zones control
*
- * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
+ * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
+ * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
*/
-blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
+blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op, bool all)
{
struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
@@ -234,7 +234,7 @@ blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
cmd->cmd_len = 16;
memset(cmd->cmnd, 0, cmd->cmd_len);
cmd->cmnd[0] = ZBC_OUT;
- cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
+ cmd->cmnd[1] = op;
if (all)
cmd->cmnd[14] = 0x1;
else
@@ -263,25 +263,16 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
int result = cmd->result;
struct request *rq = cmd->request;
- switch (req_op(rq)) {
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_RESET_ALL:
-
- if (result &&
- sshdr->sense_key == ILLEGAL_REQUEST &&
- sshdr->asc == 0x24)
- /*
- * INVALID FIELD IN CDB error: reset of a conventional
- * zone was attempted. Nothing to worry about, so be
- * quiet about the error.
- */
- rq->rq_flags |= RQF_QUIET;
- break;
-
- case REQ_OP_WRITE:
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
- break;
+ if (op_is_zone_mgmt(req_op(rq)) &&
+ result &&
+ sshdr->sense_key == ILLEGAL_REQUEST &&
+ sshdr->asc == 0x24) {
+ /*
+ * INVALID FIELD IN CDB error: a zone management command was
+ * attempted on a conventional zone. Nothing to worry about,
+ * so be quiet about the error.
+ */
+ rq->rq_flags |= RQF_QUIET;
}
}
@@ -344,32 +335,18 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
* Returns the zone size in number of blocks upon success or an error code
* upon failure.
*/
-static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
+static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ u32 *zblocks)
{
- size_t bufsize, buflen;
- unsigned int noio_flag;
u64 zone_blocks = 0;
- sector_t max_lba, block = 0;
- unsigned char *buf;
+ sector_t max_lba;
unsigned char *rec;
int ret;
- u8 same;
-
- /* Do all memory allocations as if GFP_NOIO was specified */
- noio_flag = memalloc_noio_save();
-
- /* Get a buffer */
- buf = sd_zbc_alloc_report_buffer(sdkp, SD_ZBC_REPORT_MAX_ZONES,
- &bufsize);
- if (!buf) {
- ret = -ENOMEM;
- goto out;
- }
- /* Do a report zone to get max_lba and the same field */
- ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, 0, false);
+ /* Do a report zone to get max_lba and the size of the first zone */
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
if (ret)
- goto out_free;
+ return ret;
if (sdkp->rc_basis == 0) {
/* The max_lba field is the capacity of this device */
@@ -384,82 +361,27 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
}
}
- /*
- * Check same field: for any value other than 0, we know that all zones
- * have the same size.
- */
- same = buf[4] & 0x0f;
- if (same > 0) {
- rec = &buf[64];
- zone_blocks = get_unaligned_be64(&rec[8]);
- goto out;
- }
-
- /*
- * Check the size of all zones: all zones must be of
- * equal size, except the last zone which can be smaller
- * than other zones.
- */
- do {
-
- /* Parse REPORT ZONES header */
- buflen = min_t(size_t, get_unaligned_be32(&buf[0]) + 64,
- bufsize);
- rec = buf + 64;
-
- /* Parse zone descriptors */
- while (rec < buf + buflen) {
- u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
-
- if (zone_blocks == 0) {
- zone_blocks = this_zone_blocks;
- } else if (this_zone_blocks != zone_blocks &&
- (block + this_zone_blocks < sdkp->capacity
- || this_zone_blocks > zone_blocks)) {
- zone_blocks = 0;
- goto out;
- }
- block += this_zone_blocks;
- rec += 64;
- }
-
- if (block < sdkp->capacity) {
- ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block,
- true);
- if (ret)
- goto out_free;
- }
-
- } while (block < sdkp->capacity);
-
-out:
- if (!zone_blocks) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "Devices with non constant zone "
- "size are not supported\n");
- ret = -ENODEV;
- } else if (!is_power_of_2(zone_blocks)) {
+ /* Parse REPORT ZONES header */
+ rec = buf + 64;
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ if (!zone_blocks || !is_power_of_2(zone_blocks)) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Devices with non power of 2 zone "
"size are not supported\n");
- ret = -ENODEV;
- } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ return -ENODEV;
+ }
+
+ if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Zone size too large\n");
- ret = -EFBIG;
- } else {
- *zblocks = zone_blocks;
- ret = 0;
+ return -EFBIG;
}
-out_free:
- memalloc_noio_restore(noio_flag);
- kvfree(buf);
+ *zblocks = zone_blocks;
- return ret;
+ return 0;
}
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
@@ -485,7 +407,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- ret = sd_zbc_check_zones(sdkp, &zone_blocks);
+ ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
if (ret != 0)
goto err;
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 46f0f322d4d8..8485e812d9b2 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -100,8 +100,8 @@ static void __init intc_register_irq(struct intc_desc *desc,
primary = 1;
if (!data[0] && !data[1])
- pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
- irq, irq2evt(irq));
+ pr_warn("missing unique irq mask for irq %d (vect 0x%04x)\n",
+ irq, irq2evt(irq));
data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1);
data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index bf68d86d80ee..1e164e03410a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1749,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu)
}
EXPORT_SYMBOL(qman_get_affine_portal);
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+ return (!device_link_add(dev, p->config->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
return __poll_portal_fast(p, limit);
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index c8c80df090d1..c725d0a8b288 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -24,7 +24,7 @@ config SOUNDWIRE_CADENCE
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
- depends on X86 && ACPI && SND_SOC
+ depends on ACPI && SND_SOC
help
SoundWire Intel Master driver.
If you have an Intel platform which has a SoundWire Master then
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index fc53dbe57f85..be5d437058ed 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -422,10 +422,11 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
{
- if (slave->id.unique_id != id.unique_id ||
- slave->id.mfg_id != id.mfg_id ||
+ if (slave->id.mfg_id != id.mfg_id ||
slave->id.part_id != id.part_id ||
- slave->id.class_id != id.class_id)
+ slave->id.class_id != id.class_id ||
+ (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
+ slave->id.unique_id != id.unique_id))
return -ENODEV;
return 0;
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 502ed4ec8f07..fed21e2b2277 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -183,9 +183,6 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_DEFAULT_SSP_INTERVAL 0x18
#define CDNS_TX_TIMEOUT 2000
-#define CDNS_PCM_PDI_OFFSET 0x2
-#define CDNS_PDM_PDI_OFFSET 0x6
-
#define CDNS_SCP_RX_FIFOLEVEL 0x2
/*
@@ -232,6 +229,22 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
}
/*
+ * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL
+ * need to be confirmed with a write to MCP_CONFIG_UPDATE
+ */
+static int cdns_update_config(struct sdw_cdns *cdns)
+{
+ int ret;
+
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+ if (ret < 0)
+ dev_err(cdns->dev, "Config update timedout\n");
+
+ return ret;
+}
+
+/*
* debugfs
*/
#ifdef CONFIG_DEBUG_FS
@@ -279,11 +292,7 @@ static int cdns_reg_show(struct seq_file *s, void *data)
ret += scnprintf(buf + ret, RD_BUF - ret,
"\nDPn B0 Registers\n");
- /*
- * in sdw_cdns_pdi_init() we filter out the Bulk PDIs,
- * so the indices need to be corrected again
- */
- num_ports = cdns->num_ports + CDNS_PCM_PDI_OFFSET;
+ num_ports = cdns->num_ports;
for (i = 0; i < num_ports; i++) {
ret += scnprintf(buf + ret, RD_BUF - ret,
@@ -324,6 +333,26 @@ static int cdns_reg_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(cdns_reg);
+static int cdns_hw_reset(void *data, u64 value)
+{
+ struct sdw_cdns *cdns = data;
+ int ret;
+
+ if (value != 1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ ret = sdw_cdns_exit_reset(cdns);
+
+ dev_dbg(cdns->dev, "link hw_reset done: %d\n", ret);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cdns_hw_reset_fops, NULL, cdns_hw_reset, "%llu\n");
+
/**
* sdw_cdns_debugfs_init() - Cadence debugfs init
* @cdns: Cadence instance
@@ -332,6 +361,9 @@ DEFINE_SHOW_ATTRIBUTE(cdns_reg);
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root)
{
debugfs_create_file("cdns-registers", 0400, root, cdns, &cdns_reg_fops);
+
+ debugfs_create_file("cdns-hw-reset", 0200, root, cdns,
+ &cdns_hw_reset_fops);
}
EXPORT_SYMBOL_GPL(sdw_cdns_debugfs_init);
@@ -752,14 +784,48 @@ EXPORT_SYMBOL(sdw_cdns_thread);
/*
* init routines
*/
-static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
+
+/**
+ * sdw_cdns_exit_reset() - Program reset parameters and start bus operations
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
{
- u32 mask;
+ /* program maximum length reset to be safe */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_RST_DELAY,
+ CDNS_MCP_CONTROL_RST_DELAY);
+
+ /* use hardware generated reset */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_HW_RST,
+ CDNS_MCP_CONTROL_HW_RST);
+
+ /* enable bus operations with clock and data */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG,
+ CDNS_MCP_CONFIG_OP,
+ CDNS_MCP_CONFIG_OP_NORMAL);
+
+ /* commit changes */
+ return cdns_update_config(cdns);
+}
+EXPORT_SYMBOL(sdw_cdns_exit_reset);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
- CDNS_MCP_SLAVE_INTMASK0_MASK);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
- CDNS_MCP_SLAVE_INTMASK1_MASK);
+/**
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
+{
+ u32 slave_intmask0 = 0;
+ u32 slave_intmask1 = 0;
+ u32 mask = 0;
+
+ if (!state)
+ goto update_masks;
+
+ slave_intmask0 = CDNS_MCP_SLAVE_INTMASK0_MASK;
+ slave_intmask1 = CDNS_MCP_SLAVE_INTMASK1_MASK;
/* enable detection of all slave state changes */
mask = CDNS_MCP_INT_SLAVE_MASK;
@@ -782,26 +848,13 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
if (interrupt_mask) /* parameter override */
mask = interrupt_mask;
+update_masks:
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
- return 0;
-}
-
-/**
- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
- * @cdns: Cadence instance
- */
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
-{
- int ret;
-
- _cdns_enable_interrupt(cdns);
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT);
- if (ret < 0)
- dev_err(cdns->dev, "Config update timedout\n");
-
- return ret;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
@@ -821,7 +874,6 @@ static int cdns_allocate_pdi(struct sdw_cdns *cdns,
for (i = 0; i < num; i++) {
pdi[i].num = i + pdi_offset;
- pdi[i].assigned = false;
}
*stream = pdi;
@@ -838,7 +890,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config)
{
struct sdw_cdns_streams *stream;
- int offset, i, ret;
+ int offset;
+ int ret;
cdns->pcm.num_bd = config.pcm_bd;
cdns->pcm.num_in = config.pcm_in;
@@ -850,11 +903,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PCMs */
stream = &cdns->pcm;
- /* First two PDIs are reserved for bulk transfers */
- if (stream->num_bd < CDNS_PCM_PDI_OFFSET)
- return -EINVAL;
- stream->num_bd -= CDNS_PCM_PDI_OFFSET;
- offset = CDNS_PCM_PDI_OFFSET;
+ /* we allocate PDI0 and PDI1 which are used for Bulk */
+ offset = 0;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
@@ -881,7 +931,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PDMs */
stream = &cdns->pdm;
- offset = CDNS_PDM_PDI_OFFSET;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
if (ret)
@@ -898,6 +947,9 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
ret = cdns_allocate_pdi(cdns, &stream->out,
stream->num_out, offset);
+
+ offset += stream->num_out;
+
if (ret)
return ret;
@@ -905,18 +957,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
cdns->num_ports += stream->num_pdi;
- cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports,
- sizeof(*cdns->ports), GFP_KERNEL);
- if (!cdns->ports) {
- ret = -ENOMEM;
- return ret;
- }
-
- for (i = 0; i < cdns->num_ports; i++) {
- cdns->ports[i].assigned = false;
- cdns->ports[i].num = i + 1; /* Port 0 reserved for bulk */
- }
-
return 0;
}
EXPORT_SYMBOL(sdw_cdns_pdi_init);
@@ -939,7 +979,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
* sdw_cdns_init() - Cadence initialization
* @cdns: Cadence instance
*/
-int sdw_cdns_init(struct sdw_cdns *cdns)
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
@@ -947,12 +987,13 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
int divider;
int ret;
- /* Exit clock stop */
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CLK_STOP_CLR);
- if (ret < 0) {
- dev_err(cdns->dev, "Couldn't exit from clock stop\n");
- return ret;
+ if (clock_stop_exit) {
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ if (ret < 0) {
+ dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+ return ret;
+ }
}
/* Set clock divider */
@@ -975,6 +1016,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+ /* flush command FIFOs */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST,
+ CDNS_MCP_CONTROL_CMD_RST);
+
/* Set cmd accept mode */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT);
@@ -997,13 +1042,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
/* Set cmd mode for Tx and Rx cmds */
val &= ~CDNS_MCP_CONFIG_CMD;
- /* Set operation to normal */
- val &= ~CDNS_MCP_CONFIG_OP;
- val |= CDNS_MCP_CONFIG_OP_NORMAL;
-
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
- return 0;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_init);
@@ -1185,20 +1227,20 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
* @num: Number of PDIs
* @pdi: PDI instances
*
- * Find and return a free PDI for a given PDI array
+ * Find a PDI for a given PDI array. The PDI num and dai_id are
+ * expected to match, return NULL otherwise.
*/
static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
+ unsigned int offset,
unsigned int num,
- struct sdw_cdns_pdi *pdi)
+ struct sdw_cdns_pdi *pdi,
+ int dai_id)
{
int i;
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
- pdi[i].assigned = true;
- return &pdi[i];
- }
+ for (i = offset; i < offset + num; i++)
+ if (pdi[i].num == dai_id)
+ return &pdi[i];
return NULL;
}
@@ -1207,13 +1249,11 @@ static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
* sdw_cdns_config_stream: Configure a stream
*
* @cdns: Cadence instance
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
* @pdi: PDI to be used
*/
void sdw_cdns_config_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_port *port,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
{
u32 offset, val = 0;
@@ -1221,113 +1261,51 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
if (dir == SDW_DATA_DIR_RX)
val = CDNS_PORTCTRL_DIRN;
- offset = CDNS_PORTCTRL + port->num * CDNS_PORT_OFFSET;
+ offset = CDNS_PORTCTRL + pdi->num * CDNS_PORT_OFFSET;
cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val);
- val = port->num;
+ val = pdi->num;
val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
EXPORT_SYMBOL(sdw_cdns_config_stream);
/**
- * cdns_get_num_pdi() - Get number of PDIs required
- *
- * @cdns: Cadence instance
- * @pdi: PDI to be used
- * @num: Number of PDIs
- * @ch_count: Channel count
- */
-static int cdns_get_num_pdi(struct sdw_cdns *cdns,
- struct sdw_cdns_pdi *pdi,
- unsigned int num, u32 ch_count)
-{
- int i, pdis = 0;
-
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
-
- if (pdi[i].ch_count < ch_count)
- ch_count -= pdi[i].ch_count;
- else
- ch_count = 0;
-
- pdis++;
-
- if (!ch_count)
- break;
- }
-
- if (ch_count)
- return 0;
-
- return pdis;
-}
-
-/**
- * sdw_cdns_get_stream() - Get stream information
- *
- * @cdns: Cadence instance
- * @stream: Stream to be allocated
- * @ch: Channel count
- * @dir: Data direction
- */
-int sdw_cdns_get_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- u32 ch, u32 dir)
-{
- int pdis = 0;
-
- if (dir == SDW_DATA_DIR_RX)
- pdis = cdns_get_num_pdi(cdns, stream->in, stream->num_in, ch);
- else
- pdis = cdns_get_num_pdi(cdns, stream->out, stream->num_out, ch);
-
- /* check if we found PDI, else find in bi-directional */
- if (!pdis)
- pdis = cdns_get_num_pdi(cdns, stream->bd, stream->num_bd, ch);
-
- return pdis;
-}
-EXPORT_SYMBOL(sdw_cdns_get_stream);
-
-/**
- * sdw_cdns_alloc_stream() - Allocate a stream
+ * sdw_cdns_alloc_pdi() - Allocate a PDI
*
* @cdns: Cadence instance
* @stream: Stream to be allocated
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
*/
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir)
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id)
{
struct sdw_cdns_pdi *pdi = NULL;
if (dir == SDW_DATA_DIR_RX)
- pdi = cdns_find_pdi(cdns, stream->num_in, stream->in);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_in, stream->in,
+ dai_id);
else
- pdi = cdns_find_pdi(cdns, stream->num_out, stream->out);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_out, stream->out,
+ dai_id);
/* check if we found a PDI, else find in bi-directional */
if (!pdi)
- pdi = cdns_find_pdi(cdns, stream->num_bd, stream->bd);
-
- if (!pdi)
- return -EIO;
-
- port->pdi = pdi;
- pdi->l_ch_num = 0;
- pdi->h_ch_num = ch - 1;
- pdi->dir = dir;
- pdi->ch_count = ch;
+ pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
+ dai_id);
+
+ if (pdi) {
+ pdi->l_ch_num = 0;
+ pdi->h_ch_num = ch - 1;
+ pdi->dir = dir;
+ pdi->ch_count = ch;
+ }
- return 0;
+ return pdi;
}
-EXPORT_SYMBOL(sdw_cdns_alloc_stream);
+EXPORT_SYMBOL(sdw_cdns_alloc_pdi);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Cadence Soundwire Library");
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 0b72b7094735..001457cbe5ad 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -8,7 +8,6 @@
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
- * @assigned: pdi assigned
* @num: pdi number
* @intel_alh_id: link identifier
* @l_ch_num: low channel for PDI
@@ -18,7 +17,6 @@
* @type: stream type, PDM or PCM
*/
struct sdw_cdns_pdi {
- bool assigned;
int num;
int intel_alh_id;
int l_ch_num;
@@ -29,23 +27,6 @@ struct sdw_cdns_pdi {
};
/**
- * struct sdw_cdns_port: Cadence port structure
- *
- * @num: port number
- * @assigned: port assigned
- * @ch: channel count
- * @direction: data port direction
- * @pdi: pdi for this port
- */
-struct sdw_cdns_port {
- unsigned int num;
- bool assigned;
- unsigned int ch;
- enum sdw_data_direction direction;
- struct sdw_cdns_pdi *pdi;
-};
-
-/**
* struct sdw_cdns_streams: Cadence stream data structure
*
* @num_bd: number of bidirectional streams
@@ -95,8 +76,8 @@ struct sdw_cdns_stream_config {
* struct sdw_cdns_dma_data: Cadence DMA data
*
* @name: SoundWire stream name
- * @nr_ports: Number of ports
- * @port: Ports
+ * @stream: stream runtime
+ * @pdi: PDI used for this dai
* @bus: Bus handle
* @stream_type: Stream type
* @link_id: Master link id
@@ -104,8 +85,7 @@ struct sdw_cdns_stream_config {
struct sdw_cdns_dma_data {
char *name;
struct sdw_stream_runtime *stream;
- int nr_ports;
- struct sdw_cdns_port **port;
+ struct sdw_cdns_pdi *pdi;
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
int link_id;
@@ -158,10 +138,11 @@ extern struct sdw_master_ops sdw_cdns_master_ops;
irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
-int sdw_cdns_init(struct sdw_cdns *cdns);
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit);
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config);
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns);
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state);
#ifdef CONFIG_DEBUG_FS
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
@@ -170,10 +151,10 @@ void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
int sdw_cdns_get_stream(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
u32 ch, u32 dir);
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir);
-void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port,
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id);
+void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 13c54eac0cc3..99dc61021211 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -479,7 +480,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
int pdi_conf = 0;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/*
* Program stream parameters to stream SHIM register
@@ -508,7 +512,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
unsigned int conf;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/* Program Stream config ALH register */
conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
@@ -603,66 +610,6 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
-static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw,
- u32 ch, u32 dir, bool pcm)
-{
- struct sdw_cdns *cdns = &sdw->cdns;
- struct sdw_cdns_port *port = NULL;
- int i, ret = 0;
-
- for (i = 0; i < cdns->num_ports; i++) {
- if (cdns->ports[i].assigned)
- continue;
-
- port = &cdns->ports[i];
- port->assigned = true;
- port->direction = dir;
- port->ch = ch;
- break;
- }
-
- if (!port) {
- dev_err(cdns->dev, "Unable to find a free port\n");
- return NULL;
- }
-
- if (pcm) {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pcm, port, ch, dir);
- if (ret)
- goto out;
-
- intel_pdi_shim_configure(sdw, port->pdi);
- sdw_cdns_config_stream(cdns, port, ch, dir, port->pdi);
-
- intel_pdi_alh_configure(sdw, port->pdi);
-
- } else {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pdm, port, ch, dir);
- }
-
-out:
- if (ret) {
- port->assigned = false;
- port = NULL;
- }
-
- return port;
-}
-
-static void intel_port_cleanup(struct sdw_cdns_dma_data *dma)
-{
- int i;
-
- for (i = 0; i < dma->nr_ports; i++) {
- if (dma->port[i]) {
- dma->port[i]->pdi->assigned = false;
- dma->port[i]->pdi = NULL;
- dma->port[i]->assigned = false;
- dma->port[i] = NULL;
- }
- }
-}
-
static int intel_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -670,9 +617,11 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_pdi *pdi;
struct sdw_stream_config sconfig;
struct sdw_port_config *pconfig;
- int ret, i, ch, dir;
+ int ch, dir;
+ int ret;
bool pcm = true;
dma = snd_soc_dai_get_dma_data(dai, substream);
@@ -685,38 +634,30 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
else
dir = SDW_DATA_DIR_TX;
- if (dma->stream_type == SDW_STREAM_PDM) {
- /* TODO: Check whether PDM decimator is already in use */
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pdm, ch, dir);
+ if (dma->stream_type == SDW_STREAM_PDM)
pcm = false;
- } else {
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pcm, ch, dir);
- }
- if (!dma->nr_ports) {
- dev_err(dai->dev, "ports/resources not available\n");
- return -EINVAL;
+ if (pcm)
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
+ else
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
+
+ if (!pdi) {
+ ret = -EINVAL;
+ goto error;
}
- dma->port = kcalloc(dma->nr_ports, sizeof(*dma->port), GFP_KERNEL);
- if (!dma->port)
- return -ENOMEM;
+ /* do run-time configurations for SHIM, ALH and PDI/PORT */
+ intel_pdi_shim_configure(sdw, pdi);
+ intel_pdi_alh_configure(sdw, pdi);
+ sdw_cdns_config_stream(cdns, ch, dir, pdi);
- for (i = 0; i < dma->nr_ports; i++) {
- dma->port[i] = intel_alloc_port(sdw, ch, dir, pcm);
- if (!dma->port[i]) {
- ret = -EINVAL;
- goto port_error;
- }
- }
/* Inform DSP about PDI stream number */
- for (i = 0; i < dma->nr_ports; i++) {
- ret = intel_config_stream(sdw, substream, dai, params,
- dma->port[i]->pdi->intel_alh_id);
- if (ret)
- goto port_error;
- }
+ ret = intel_config_stream(sdw, substream, dai, params,
+ pdi->intel_alh_id);
+ if (ret)
+ goto error;
sconfig.direction = dir;
sconfig.ch_count = ch;
@@ -731,32 +672,22 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
}
/* Port configuration */
- pconfig = kcalloc(dma->nr_ports, sizeof(*pconfig), GFP_KERNEL);
+ pconfig = kcalloc(1, sizeof(*pconfig), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
- goto port_error;
+ goto error;
}
- for (i = 0; i < dma->nr_ports; i++) {
- pconfig[i].num = dma->port[i]->num;
- pconfig[i].ch_mask = (1 << ch) - 1;
- }
+ pconfig->num = pdi->num;
+ pconfig->ch_mask = (1 << ch) - 1;
ret = sdw_stream_add_master(&cdns->bus, &sconfig,
- pconfig, dma->nr_ports, dma->stream);
- if (ret) {
+ pconfig, 1, dma->stream);
+ if (ret)
dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
- goto stream_error;
- }
kfree(pconfig);
- return ret;
-
-stream_error:
- kfree(pconfig);
-port_error:
- intel_port_cleanup(dma);
- kfree(dma->port);
+error:
return ret;
}
@@ -776,8 +707,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
dma->stream->name, ret);
- intel_port_cleanup(dma);
- kfree(dma->port);
return ret;
}
@@ -842,14 +771,6 @@ static int intel_create_dai(struct sdw_cdns *cdns,
return -ENOMEM;
if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
- dais[i].playback.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Tx%d",
- cdns->instance, i);
- if (!dais[i].playback.stream_name) {
- kfree(dais[i].name);
- return -ENOMEM;
- }
-
dais[i].playback.channels_min = 1;
dais[i].playback.channels_max = max_ch;
dais[i].playback.rates = SNDRV_PCM_RATE_48000;
@@ -857,23 +778,12 @@ static int intel_create_dai(struct sdw_cdns *cdns,
}
if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
- dais[i].capture.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Rx%d",
- cdns->instance, i);
- if (!dais[i].capture.stream_name) {
- kfree(dais[i].name);
- kfree(dais[i].playback.stream_name);
- return -ENOMEM;
- }
-
dais[i].capture.channels_min = 1;
dais[i].capture.channels_max = max_ch;
dais[i].capture.rates = SNDRV_PCM_RATE_48000;
dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
- dais[i].id = SDW_DAI_ID_RANGE_START + i;
-
if (pcm)
dais[i].ops = &intel_pcm_dai_ops;
else
@@ -993,6 +903,15 @@ static struct sdw_master_ops sdw_intel_ops = {
.post_bank_switch = intel_post_bank_switch,
};
+static int intel_init(struct sdw_intel *sdw)
+{
+ /* Initialize shim and controller */
+ intel_link_power_up(sdw);
+ intel_shim_init(sdw);
+
+ return sdw_cdns_init(&sdw->cdns, false);
+}
+
/*
* probe and init
*/
@@ -1026,7 +945,7 @@ static int intel_probe(struct platform_device *pdev)
ret = sdw_add_bus_master(&sdw->cdns.bus);
if (ret) {
dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
- goto err_master_reg;
+ return ret;
}
if (sdw->cdns.bus.prop.hw_disabled) {
@@ -1035,16 +954,11 @@ static int intel_probe(struct platform_device *pdev)
return 0;
}
- /* Initialize shim and controller */
- intel_link_power_up(sdw);
- intel_shim_init(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
+ /* Initialize shim, controller and Cadence IP */
+ ret = intel_init(sdw);
if (ret)
goto err_init;
- ret = sdw_cdns_enable_interrupt(&sdw->cdns);
-
/* Read the PDI config and initialize cadence PDI */
intel_pdi_init(sdw, &config);
ret = sdw_cdns_pdi_init(&sdw->cdns, config);
@@ -1062,23 +976,35 @@ static int intel_probe(struct platform_device *pdev)
goto err_init;
}
+ ret = sdw_cdns_enable_interrupt(&sdw->cdns, true);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "cannot enable interrupts\n");
+ goto err_init;
+ }
+
+ ret = sdw_cdns_exit_reset(&sdw->cdns);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n");
+ goto err_interrupt;
+ }
+
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
snd_soc_unregister_component(sdw->cdns.dev);
- goto err_dai;
+ goto err_interrupt;
}
intel_debugfs_init(sdw);
return 0;
-err_dai:
+err_interrupt:
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
err_init:
sdw_delete_bus_master(&sdw->cdns.bus);
-err_master_reg:
return ret;
}
@@ -1090,6 +1016,7 @@ static int intel_remove(struct platform_device *pdev)
if (!sdw->cdns.bus.prop.hw_disabled) {
intel_debugfs_exit(sdw);
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index b74c2f144962..2a2b4d8df462 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soundwire/sdw_intel.h>
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 6473fa602f82..19919975bb6d 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -29,10 +29,17 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev.parent = bus->dev;
slave->dev.fwnode = fwnode;
- /* name shall be sdw:link:mfg:part:class:unique */
- dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
- bus->link_id, id->mfg_id, id->part_id,
- id->class_id, id->unique_id);
+ if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
+ /* name shall be sdw:link:mfg:part:class */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id);
+ } else {
+ /* name shall be sdw:link:mfg:part:class:unique */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
+ }
slave->dev.release = sdw_slave_release;
slave->dev.bus = &sdw_bus_type;
@@ -64,6 +71,36 @@ static int sdw_slave_add(struct sdw_bus *bus,
}
#if IS_ENABLED(CONFIG_ACPI)
+
+static bool find_slave(struct sdw_bus *bus,
+ struct acpi_device *adev,
+ struct sdw_slave_id *id)
+{
+ unsigned long long addr;
+ unsigned int link_id;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle,
+ METHOD_NAME__ADR, NULL, &addr);
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(bus->dev, "_ADR resolution failed: %x\n",
+ status);
+ return false;
+ }
+
+ /* Extract link id from ADR, Bit 51 to 48 (included) */
+ link_id = (addr >> 48) & GENMASK(3, 0);
+
+ /* Check for link_id match */
+ if (link_id != bus->link_id)
+ return false;
+
+ sdw_extract_slave_id(bus, addr, id);
+
+ return true;
+}
+
/*
* sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node
* @bus: SDW bus instance
@@ -73,6 +110,7 @@ static int sdw_slave_add(struct sdw_bus *bus,
int sdw_acpi_find_slaves(struct sdw_bus *bus)
{
struct acpi_device *adev, *parent;
+ struct acpi_device *adev2, *parent2;
parent = ACPI_COMPANION(bus->dev);
if (!parent) {
@@ -81,28 +119,46 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
}
list_for_each_entry(adev, &parent->children, node) {
- unsigned long long addr;
struct sdw_slave_id id;
- unsigned int link_id;
- acpi_status status;
+ struct sdw_slave_id id2;
+ bool ignore_unique_id = true;
- status = acpi_evaluate_integer(adev->handle,
- METHOD_NAME__ADR, NULL, &addr);
+ if (!find_slave(bus, adev, &id))
+ continue;
- if (ACPI_FAILURE(status)) {
- dev_err(bus->dev, "_ADR resolution failed: %x\n",
- status);
- return status;
+ /* brute-force O(N^2) search for duplicates */
+ parent2 = parent;
+ list_for_each_entry(adev2, &parent2->children, node) {
+
+ if (adev == adev2)
+ continue;
+
+ if (!find_slave(bus, adev2, &id2))
+ continue;
+
+ if (id.sdw_version != id2.sdw_version ||
+ id.mfg_id != id2.mfg_id ||
+ id.part_id != id2.part_id ||
+ id.class_id != id2.class_id)
+ continue;
+
+ if (id.unique_id != id2.unique_id) {
+ dev_dbg(bus->dev,
+ "Valid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ ignore_unique_id = false;
+ } else {
+ dev_err(bus->dev,
+ "Invalid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ return -ENODEV;
+ }
}
- /* Extract link id from ADR, Bit 51 to 48 (included) */
- link_id = (addr >> 48) & GENMASK(3, 0);
-
- /* Check for link_id match */
- if (link_id != bus->link_id)
- continue;
-
- sdw_extract_slave_id(bus, addr, &id);
+ if (ignore_unique_id)
+ id.unique_id = SDW_IGNORED_UNIQUE_ID;
/*
* don't error check for sdw_slave_add as we want to continue
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 6f7fdcbb9151..870f7797b56b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -80,6 +80,7 @@ config SPI_ARMADA_3700
config SPI_ATMEL
tristate "Atmel SPI Controller"
depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
help
This selects a driver for the Atmel SPI Controller, present on
many AT91 ARM chips.
@@ -143,7 +144,7 @@ config SPI_BCM63XX
tristate "Broadcom BCM63xx SPI controller"
depends on BCM63XX || COMPILE_TEST
help
- Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+ Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
config SPI_BCM63XX_HSSPI
tristate "Broadcom BCM63XX HS SPI controller driver"
@@ -234,11 +235,11 @@ config SPI_DLN2
tristate "Diolan DLN-2 USB SPI adapter"
depends on MFD_DLN2
help
- If you say yes to this option, support will be included for Diolan
- DLN2, a USB to SPI interface.
+ If you say yes to this option, support will be included for Diolan
+ DLN2, a USB to SPI interface.
- This driver can also be built as a module. If so, the module
- will be called spi-dln2.
+ This driver can also be built as a module. If so, the module
+ will be called spi-dln2.
config SPI_EFM32
tristate "EFM32 SPI controller"
@@ -747,10 +748,10 @@ config SPI_SYNQUACER
It also supports the new dual-bit and quad-bit SPI protocol.
config SPI_MXIC
- tristate "Macronix MX25F0A SPI controller"
- depends on SPI_MASTER
- help
- This selects the Macronix MX25F0A SPI controller driver.
+ tristate "Macronix MX25F0A SPI controller"
+ depends on SPI_MASTER
+ help
+ This selects the Macronix MX25F0A SPI controller driver.
config SPI_MXS
tristate "Freescale MXS SPI controller"
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index a40bb2ef89dc..88033422a42a 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -132,7 +132,7 @@ static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- ctlr->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
if (IS_ERR(ctlr->dma_tx)) {
err = PTR_ERR(ctlr->dma_tx);
@@ -145,7 +145,7 @@ static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
goto at91_usart_spi_error_clear;
}
- ctlr->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
if (IS_ERR(ctlr->dma_rx)) {
err = PTR_ERR(ctlr->dma_rx);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index acf318e7330c..56f0ca361deb 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -222,37 +222,13 @@
| SPI_BF(name, value))
/* Register access macros */
-#ifdef CONFIG_AVR32
-#define spi_readl(port, reg) \
- __raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port, reg, value) \
- __raw_writel((value), (port)->regs + SPI_##reg)
-
-#define spi_readw(port, reg) \
- __raw_readw((port)->regs + SPI_##reg)
-#define spi_writew(port, reg, value) \
- __raw_writew((value), (port)->regs + SPI_##reg)
-
-#define spi_readb(port, reg) \
- __raw_readb((port)->regs + SPI_##reg)
-#define spi_writeb(port, reg, value) \
- __raw_writeb((value), (port)->regs + SPI_##reg)
-#else
#define spi_readl(port, reg) \
readl_relaxed((port)->regs + SPI_##reg)
#define spi_writel(port, reg, value) \
writel_relaxed((value), (port)->regs + SPI_##reg)
-
-#define spi_readw(port, reg) \
- readw_relaxed((port)->regs + SPI_##reg)
#define spi_writew(port, reg, value) \
writew_relaxed((value), (port)->regs + SPI_##reg)
-#define spi_readb(port, reg) \
- readb_relaxed((port)->regs + SPI_##reg)
-#define spi_writeb(port, reg, value) \
- writeb_relaxed((value), (port)->regs + SPI_##reg)
-#endif
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
@@ -299,17 +275,16 @@ struct atmel_spi {
bool use_dma;
bool use_pdc;
- bool use_cs_gpios;
bool keep_cs;
- bool cs_active;
u32 fifo_size;
+ u8 native_cs_free;
+ u8 native_cs_for_gpio;
};
/* Controller-specific per-slave state */
struct atmel_spi_device {
- struct gpio_desc *npcs_pin;
u32 csr;
};
@@ -336,11 +311,9 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
* transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
* controllers have CSAAT and friends.
*
- * Since the CSAAT functionality is a bit weird on newer controllers as
- * well, we use GPIO to control nCSx pins on all controllers, updating
- * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
- * support active-high chipselects despite the controller's belief that
- * only active-low devices/systems exists.
+ * Even controller newer than ar91rm9200, using GPIOs can make sens as
+ * it lets us support active-high chipselects despite the controller's
+ * belief that only active-low devices/systems exists.
*
* However, at91rm9200 has a second erratum whereby nCS0 doesn't work
* right when driven with GPIO. ("Mode Fault does not allow more than one
@@ -352,30 +325,36 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
+ int chip_select;
u32 mr;
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
if (atmel_spi_is_v2(as)) {
- spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr);
+ spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
/* For the low SPI version, there is a issue that PDC transfer
* on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
*/
spi_writel(as, CSR0, asd->csr);
if (as->caps.has_wdrbt) {
spi_writel(as, MR,
- SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ SPI_BF(PCS, ~(0x01 << chip_select))
| SPI_BIT(WDRBT)
| SPI_BIT(MODFDIS)
| SPI_BIT(MSTR));
} else {
spi_writel(as, MR,
- SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ SPI_BF(PCS, ~(0x01 << chip_select))
| SPI_BIT(MODFDIS)
| SPI_BIT(MSTR));
}
mr = spi_readl(as, MR);
- if (as->use_cs_gpios)
- gpiod_set_value(asd->npcs_pin, 1);
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 1);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@@ -390,9 +369,9 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
}
mr = spi_readl(as, MR);
- mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
- if (as->use_cs_gpios && spi->chip_select != 0)
- gpiod_set_value(asd->npcs_pin, 1);
+ mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 1);
spi_writel(as, MR, mr);
}
@@ -401,24 +380,29 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
- struct atmel_spi_device *asd = spi->controller_state;
+ int chip_select;
u32 mr;
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
/* only deactivate *this* device; sometimes transfers to
* another device may be active when this routine is called.
*/
mr = spi_readl(as, MR);
- if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
+ if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
mr = SPI_BFINS(PCS, 0xf, mr);
spi_writel(as, MR, mr);
}
dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
- if (!as->use_cs_gpios)
+ if (!spi->cs_gpiod)
spi_writel(as, CR, SPI_BIT(LASTXFER));
- else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
- gpiod_set_value(asd->npcs_pin, 0);
+ else
+ gpiod_set_value(spi->cs_gpiod, 0);
}
static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -527,7 +511,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
err = PTR_ERR(master->dma_tx);
if (err == -EPROBE_DEFER) {
@@ -844,6 +828,12 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
{
u32 scbr, csr;
unsigned long bus_hz;
+ int chip_select;
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
/* v1 chips start out at half the peripheral bus speed. */
bus_hz = as->spi_clk;
@@ -872,9 +862,9 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
xfer->speed_hz, scbr, bus_hz);
return -EINVAL;
}
- csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
+ csr = spi_readl(as, CSR0 + 4 * chip_select);
csr = SPI_BFINS(SCBR, scbr, csr);
- spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
return 0;
}
@@ -1173,40 +1163,105 @@ atmel_spi_pdc_interrupt(int irq, void *dev_id)
return ret;
}
+static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
+{
+ struct spi_delay *delay = &spi->word_delay;
+ u32 value = delay->value;
+
+ switch (delay->unit) {
+ case SPI_DELAY_UNIT_NSECS:
+ value /= 1000;
+ break;
+ case SPI_DELAY_UNIT_USECS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (as->spi_clk / 1000000 * value) >> 5;
+}
+
+static void initialize_native_cs_for_gpio(struct atmel_spi *as)
+{
+ int i;
+ struct spi_master *master = platform_get_drvdata(as->pdev);
+
+ if (!as->native_cs_free)
+ return; /* already initialized */
+
+ if (!master->cs_gpiods)
+ return; /* No CS GPIO */
+
+ /*
+ * On the first version of the controller (AT91RM9200), CS0
+ * can't be used associated with GPIO
+ */
+ if (atmel_spi_is_v2(as))
+ i = 0;
+ else
+ i = 1;
+
+ for (; i < 4; i++)
+ if (master->cs_gpiods[i])
+ as->native_cs_free |= BIT(i);
+
+ if (as->native_cs_free)
+ as->native_cs_for_gpio = ffs(as->native_cs_free);
+}
+
static int atmel_spi_setup(struct spi_device *spi)
{
struct atmel_spi *as;
struct atmel_spi_device *asd;
u32 csr;
unsigned int bits = spi->bits_per_word;
+ int chip_select;
+ int word_delay_csr;
as = spi_master_get_devdata(spi->master);
/* see notes above re chipselect */
- if (!atmel_spi_is_v2(as)
- && spi->chip_select == 0
- && (spi->mode & SPI_CS_HIGH)) {
- dev_dbg(&spi->dev, "setup: can't be active-high\n");
+ if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
+ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
return -EINVAL;
}
+ /* Setup() is called during spi_register_controller(aka
+ * spi_register_master) but after all membmers of the cs_gpiod
+ * array have been filled, so we can looked for which native
+ * CS will be free for using with GPIO
+ */
+ initialize_native_cs_for_gpio(as);
+
+ if (spi->cs_gpiod && as->native_cs_free) {
+ dev_err(&spi->dev,
+ "No native CS available to support this GPIO CS\n");
+ return -EBUSY;
+ }
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
csr = SPI_BF(BITS, bits - 8);
if (spi->mode & SPI_CPOL)
csr |= SPI_BIT(CPOL);
if (!(spi->mode & SPI_CPHA))
csr |= SPI_BIT(NCPHA);
- if (!as->use_cs_gpios)
- csr |= SPI_BIT(CSAAT);
- /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
- */
+ if (!spi->cs_gpiod)
+ csr |= SPI_BIT(CSAAT);
csr |= SPI_BF(DLYBS, 0);
+ word_delay_csr = atmel_word_delay_csr(spi, as);
+ if (word_delay_csr < 0)
+ return word_delay_csr;
+
/* DLYBCT adds delays between words. This is useful for slow devices
* that need a bit of time to setup the next transfer.
*/
- csr |= SPI_BF(DLYBCT,
- (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5);
+ csr |= SPI_BF(DLYBCT, word_delay_csr);
asd = spi->controller_state;
if (!asd) {
@@ -1214,21 +1269,6 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
- /*
- * If use_cs_gpios is true this means that we have "cs-gpios"
- * defined in the device tree node so we should have
- * gotten the GPIO lines from the device tree inside the
- * SPI core. Warn if this is not the case but continue since
- * CS GPIOs are after all optional.
- */
- if (as->use_cs_gpios) {
- if (!spi->cs_gpiod) {
- dev_err(&spi->dev,
- "host claims to use CS GPIOs but no CS found in DT by the SPI core\n");
- }
- asd->npcs_pin = spi->cs_gpiod;
- }
-
spi->controller_state = asd;
}
@@ -1239,7 +1279,7 @@ static int atmel_spi_setup(struct spi_device *spi)
bits, spi->mode, spi->chip_select, csr);
if (!atmel_spi_is_v2(as))
- spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
return 0;
}
@@ -1368,19 +1408,16 @@ static int atmel_spi_one_transfer(struct spi_master *master,
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
as->keep_cs = true;
} else {
- as->cs_active = !as->cs_active;
- if (as->cs_active)
- cs_activate(as, msg->spi);
- else
- cs_deactivate(as, msg->spi);
+ cs_deactivate(as, msg->spi);
+ udelay(10);
+ cs_activate(as, msg->spi);
}
}
@@ -1403,7 +1440,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
atmel_spi_lock(as);
cs_activate(as, spi);
- as->cs_active = true;
as->keep_cs = false;
msg->status = 0;
@@ -1527,7 +1563,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
- master->num_chipselect = master->dev.of_node ? 0 : 4;
+ master->num_chipselect = 4;
master->setup = atmel_spi_setup;
master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
master->transfer_one_message = atmel_spi_transfer_one_message;
@@ -1555,19 +1591,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
atmel_get_caps(as);
- /*
- * If there are chip selects in the device tree, those will be
- * discovered by the SPI core when registering the SPI master
- * and assigned to each SPI device.
- */
- as->use_cs_gpios = true;
- if (atmel_spi_is_v2(as) &&
- pdev->dev.of_node &&
- !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
- as->use_cs_gpios = false;
- master->num_chipselect = 4;
- }
-
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
@@ -1775,20 +1798,18 @@ static const struct dev_pm_ops atmel_spi_pm_ops = {
#define ATMEL_SPI_PM_OPS NULL
#endif
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_spi_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-spi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
-#endif
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.pm = ATMEL_SPI_PM_OPS,
- .of_match_table = of_match_ptr(atmel_spi_dt_ids),
+ .of_match_table = atmel_spi_dt_ids,
},
.probe = atmel_spi_probe,
.remove = atmel_spi_remove,
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 74842f6019ed..eb9b78a90dcf 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -163,10 +163,21 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
}
static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
- struct spi_engine *spi_engine, unsigned int clk_div, unsigned int delay)
+ struct spi_engine *spi_engine, unsigned int clk_div,
+ struct spi_transfer *xfer)
{
unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
unsigned int t;
+ int delay;
+
+ if (xfer->delay_usecs) {
+ delay = xfer->delay_usecs;
+ } else {
+ delay = spi_delay_to_ns(&xfer->delay, xfer);
+ if (delay < 0)
+ return;
+ delay /= 1000;
+ }
if (delay == 0)
return;
@@ -218,8 +229,7 @@ static int spi_engine_compile_message(struct spi_engine *spi_engine,
spi_engine_gen_cs(p, dry, spi, true);
spi_engine_gen_xfer(p, dry, xfer);
- spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
- xfer->delay_usecs);
+ spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
cs_change = xfer->cs_change;
if (list_is_last(&xfer->transfer_list, &msg->transfers))
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 7a3531856491..85bad70f59e3 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -803,7 +803,8 @@ static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
return -EIO;
from = op->addr.val;
- bcm_qspi_chip_select(qspi, spi->chip_select);
+ if (!spi->cs_gpiod)
+ bcm_qspi_chip_select(qspi, spi->chip_select);
bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
/*
@@ -882,7 +883,8 @@ static int bcm_qspi_transfer_one(struct spi_master *master,
int slots;
unsigned long timeo = msecs_to_jiffies(100);
- bcm_qspi_chip_select(qspi, spi->chip_select);
+ if (!spi->cs_gpiod)
+ bcm_qspi_chip_select(qspi, spi->chip_select);
qspi->trans_pos.trans = trans;
qspi->trans_pos.byte = 0;
@@ -1234,6 +1236,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
master->cleanup = bcm_qspi_cleanup;
master->dev.of_node = dev->of_node;
master->num_chipselect = NUM_CHIPSELECT;
+ master->use_gpio_descriptors = true;
qspi->big_endian = of_device_is_big_endian(dev->of_node);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index b4070c0de3df..fb61a620effc 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1248,7 +1248,7 @@ static int bcm2835_spi_setup(struct spi_device *spi)
/*
* Retrieve the corresponding GPIO line used for CS.
* The inversion semantics will be handled by the GPIO core
- * code, so we pass GPIOS_OUT_LOW for "unasserted" and
+ * code, so we pass GPIOD_OUT_LOW for "unasserted" and
* the correct flag for inversion semantics. The SPI_CS_HIGH
* on spi->mode cannot be checked for polarity in this case
* as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index c6836a931dbf..7327309ea3d5 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -291,8 +291,7 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
msg->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (t->cs_change)
bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index fdd7eaa0b8ed..0f1b10a4ef0c 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -368,7 +368,7 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
}
/* CS will be deasserted directly after transfer */
- if (t->delay_usecs) {
+ if (t->delay_usecs || t->delay.value) {
dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
status = -EINVAL;
goto exit;
diff --git a/drivers/spi/spi-cavium.c b/drivers/spi/spi-cavium.c
index 5aaf21582cb5..6854c3ce423b 100644
--- a/drivers/spi/spi-cavium.c
+++ b/drivers/spi/spi-cavium.c
@@ -119,8 +119,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
*rx_buf++ = (u8)v;
}
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
return xfer->len;
}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index bd46fca3f094..384a3ab6dc2d 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -9,6 +9,7 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
@@ -193,6 +194,8 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
goto out;
}
+ pm_runtime_enable(&pdev->dev);
+
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
goto out;
@@ -201,6 +204,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
return 0;
out:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsmmio->pclk);
out_clk:
clk_disable_unprepare(dwsmmio->clk);
@@ -212,6 +216,7 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsmmio->dws);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
@@ -223,6 +228,7 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
{ .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
{ .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
+ { .compatible = "renesas,rzn1-spi", },
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 140644913e6c..12c131b5fb4e 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -7,6 +7,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
@@ -35,7 +36,7 @@ static struct spi_pci_desc spi_pci_mid_desc_2 = {
};
static struct spi_pci_desc spi_pci_ehl_desc = {
- .num_cs = 1,
+ .num_cs = 2,
.bus_num = -1,
.max_freq = 100000000,
};
@@ -57,13 +58,18 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
+ pci_set_master(pdev);
ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev));
if (ret)
return ret;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
dws->regs = pcim_iomap_table(pdev)[pci_bar];
- dws->irq = pdev->irq;
+ dws->irq = pci_irq_vector(pdev, 0);
/*
* Specific handling for platforms, like dma setup,
@@ -80,12 +86,15 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
}
} else {
+ pci_free_irq_vectors(pdev);
return -ENODEV;
}
ret = dw_spi_add_host(&pdev->dev, dws);
- if (ret)
+ if (ret) {
+ pci_free_irq_vectors(pdev);
return ret;
+ }
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dws);
@@ -93,6 +102,11 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
return 0;
}
@@ -100,7 +114,11 @@ static void spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi *dws = pci_get_drvdata(pdev);
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
dw_spi_remove_host(dws);
+ pci_free_irq_vectors(pdev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 9a49e073e8b7..a92aa5cd4fbe 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -308,7 +308,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
| ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
- (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
+ (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
+ (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
| (chip->tmode << SPI_TMOD_OFFSET);
/*
@@ -493,6 +494,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->dev.of_node = dev->of_node;
master->dev.fwnode = dev->fwnode;
master->flags = SPI_MASTER_GPIO_SS;
+ master->auto_runtime_pm = true;
if (dws->set_cs)
master->set_cs = dws->set_cs;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index c9c15881e982..38c7de1f0aa9 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -4,7 +4,6 @@
#include <linux/io.h>
#include <linux/scatterlist.h>
-#include <linux/gpio.h>
/* Register offsets */
#define DW_SPI_CTRL0 0x00
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 00f46c816a56..d3336a63f462 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -377,7 +377,7 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
m->actual_length += t->len;
- WARN_ON(t->delay_usecs || t->cs_change);
+ WARN_ON(t->delay_usecs || t->delay.value || t->cs_change);
spi_flags = 0;
}
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 858f0544289e..54ad0ac121e5 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -392,7 +392,8 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
- cpm_muram_free(cpm_muram_offset(mspi->pram));
+ if (!(mspi->flags & SPI_CPM1))
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
fsl_spi_free_dummy_rx();
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index bec758e978fb..442cff71a0d2 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -129,6 +129,7 @@ enum dspi_trans_mode {
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
+ bool ptp_sts_supported;
bool xspi_mode;
};
@@ -140,12 +141,14 @@ static const struct fsl_dspi_devtype_data vf610_data = {
static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
.trans_mode = DSPI_TCFQ_MODE,
.max_clock_factor = 8,
+ .ptp_sts_supported = true,
.xspi_mode = true,
};
static const struct fsl_dspi_devtype_data ls2085a_data = {
.trans_mode = DSPI_TCFQ_MODE,
.max_clock_factor = 8,
+ .ptp_sts_supported = true,
};
static const struct fsl_dspi_devtype_data coldfire_data = {
@@ -654,6 +657,9 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
u16 spi_tcnt;
u32 spi_tcr;
+ spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
+ dspi->tx - dspi->bytes_per_word, !dspi->irq);
+
/* Get transfer counter (in number of SPI transfers). It was
* reset to 0 when transfer(s) were started.
*/
@@ -672,6 +678,9 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
/* Success! */
return 0;
+ spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
+ dspi->tx, !dspi->irq);
+
if (trans_mode == DSPI_EOQ_MODE)
dspi_eoq_write(dspi);
else if (trans_mode == DSPI_TCFQ_MODE)
@@ -707,7 +716,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)))
+ if (!(spi_sr & SPI_SR_EOQF))
return IRQ_NONE;
if (dspi_rxtx(dspi) == 0) {
@@ -779,6 +788,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_FRAME_EBITS(transfer->bits_per_word) |
SPI_CTARE_DTCP(1));
+ spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
+ dspi->tx, !dspi->irq);
+
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
case DSPI_EOQ_MODE:
@@ -815,8 +827,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dev_err(&dspi->pdev->dev,
"Waiting for transfer to complete failed!\n");
- if (transfer->delay_usecs)
- udelay(transfer->delay_usecs);
+ spi_transfer_delay_exec(transfer);
}
out:
@@ -1006,6 +1017,25 @@ static void dspi_init(struct fsl_dspi *dspi)
SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
}
+static int dspi_slave_abort(struct spi_master *master)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ /*
+ * Terminate all pending DMA transactions for the SPI working
+ * in SLAVE mode.
+ */
+ dmaengine_terminate_sync(dspi->dma->chan_rx);
+ dmaengine_terminate_sync(dspi->dma->chan_tx);
+
+ /* Clear the internal DSPI RX and TX FIFO buffers */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+
+ return 0;
+}
+
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1030,6 +1060,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
+ ctlr->slave_abort = dspi_slave_abort;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
pdata = dev_get_platdata(&pdev->dev);
@@ -1114,6 +1145,9 @@ static int dspi_probe(struct platform_device *pdev)
dspi_init(dspi);
+ if (dspi->devtype_data->trans_mode == DSPI_TCFQ_MODE)
+ goto poll_mode;
+
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
dev_info(&pdev->dev,
@@ -1132,6 +1166,7 @@ static int dspi_probe(struct platform_device *pdev)
init_waitqueue_head(&dspi->waitq);
poll_mode:
+
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {
@@ -1143,6 +1178,8 @@ poll_mode:
ctlr->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
+ ctlr->ptp_sts_supported = dspi->devtype_data->ptp_sts_supported;
+
platform_set_drvdata(pdev, ctlr);
ret = spi_register_controller(ctlr);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index f20326714b9d..e60581283a24 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -427,8 +427,7 @@ static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
ret = fsl_espi_bufs(spi, trans);
- if (trans->delay_usecs)
- udelay(trans->delay_usecs);
+ spi_transfer_delay_exec(trans);
return ret;
}
@@ -437,6 +436,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
unsigned int delay_usecs = 0, rx_nbits = 0;
+ unsigned int delay_nsecs = 0, delay_nsecs1 = 0;
struct spi_transfer *t, trans = {};
int ret;
@@ -445,8 +445,16 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
goto out;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->delay_usecs > delay_usecs)
- delay_usecs = t->delay_usecs;
+ if (t->delay_usecs) {
+ if (t->delay_usecs > delay_usecs) {
+ delay_usecs = t->delay_usecs;
+ delay_nsecs = delay_usecs * 1000;
+ }
+ } else {
+ delay_nsecs1 = spi_delay_to_ns(&t->delay, t);
+ if (delay_nsecs1 > delay_nsecs)
+ delay_nsecs = delay_nsecs1;
+ }
if (t->rx_nbits > rx_nbits)
rx_nbits = t->rx_nbits;
}
@@ -457,7 +465,8 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
trans.len = m->frame_length;
trans.speed_hz = t->speed_hz;
trans.bits_per_word = t->bits_per_word;
- trans.delay_usecs = delay_usecs;
+ trans.delay.value = delay_nsecs;
+ trans.delay.unit = SPI_DELAY_UNIT_NSECS;
trans.rx_nbits = rx_nbits;
if (trans.len)
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index d08e9324140e..2cc0ddb4a988 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -675,7 +675,7 @@ static int fsl_lpspi_dma_init(struct device *dev,
int ret;
/* Prepare for TX DMA: */
- controller->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ controller->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
@@ -684,7 +684,7 @@ static int fsl_lpspi_dma_init(struct device *dev,
}
/* Prepare for RX DMA: */
- controller->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ controller->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
@@ -779,7 +779,7 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
- complete(&fsl_lpspi->xfer_done);
+ complete(&fsl_lpspi->xfer_done);
return IRQ_HANDLED;
}
@@ -938,7 +938,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
dev_err(fsl_lpspi->dev, "failed to enable clock\n");
- return ret;
+ goto out_controller_put;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index c02e24c01136..79b1558b74b8 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -63,6 +63,16 @@
#define QUADSPI_IPCR 0x08
#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
+#define QUADSPI_FLSHCR 0x0c
+#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
+#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
+#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
+
+#define QUADSPI_BUF0CR 0x10
+#define QUADSPI_BUF1CR 0x14
+#define QUADSPI_BUF2CR 0x18
+#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
+
#define QUADSPI_BUF3CR 0x1c
#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
@@ -95,6 +105,9 @@
#define QUADSPI_FR 0x160
#define QUADSPI_FR_TFF_MASK BIT(0)
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE BIT(0)
+
#define QUADSPI_SPTRCLR 0x16c
#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
@@ -112,9 +125,6 @@
#define QUADSPI_LCKER_LOCK BIT(0)
#define QUADSPI_LCKER_UNLOCK BIT(1)
-#define QUADSPI_RSER 0x164
-#define QUADSPI_RSER_TFIE BIT(0)
-
#define QUADSPI_LUT_BASE 0x310
#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
#define QUADSPI_LUT_REG(idx) \
@@ -181,9 +191,16 @@
*/
#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
+/*
+ * Controller uses TDH bits in register QUADSPI_FLSHCR.
+ * They need to be set in accordance with the DDR/SDR mode.
+ */
+#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
+
struct fsl_qspi_devtype_data {
unsigned int rxfifo;
unsigned int txfifo;
+ int invalid_mstrid;
unsigned int ahb_buf_size;
unsigned int quirks;
bool little_endian;
@@ -192,6 +209,7 @@ struct fsl_qspi_devtype_data {
static const struct fsl_qspi_devtype_data vybrid_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
.little_endian = true,
@@ -200,6 +218,7 @@ static const struct fsl_qspi_devtype_data vybrid_data = {
static const struct fsl_qspi_devtype_data imx6sx_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
.little_endian = true,
@@ -208,22 +227,27 @@ static const struct fsl_qspi_devtype_data imx6sx_data = {
static const struct fsl_qspi_devtype_data imx7d_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
- .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx6ul_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
- .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data ls1021a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = 0,
.little_endian = false,
@@ -233,6 +257,7 @@ static const struct fsl_qspi_devtype_data ls2080a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.ahb_buf_size = SZ_1K,
+ .invalid_mstrid = 0x0,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
.little_endian = true,
};
@@ -275,6 +300,11 @@ static inline int needs_amba_base_offset(struct fsl_qspi *q)
return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
}
+static inline int needs_tdh_setting(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
+}
+
/*
* An IC bug makes it necessary to rearrange the 32-bit data.
* Later chips, such as IMX6SLX, have fixed this bug.
@@ -615,6 +645,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
void __iomem *base = q->iobase;
u32 addr_offset = 0;
int err = 0;
+ int invalid_mstrid = q->devtype_data->invalid_mstrid;
mutex_lock(&q->lock);
@@ -638,6 +669,10 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
base + QUADSPI_SPTRCLR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF0CR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF1CR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF2CR);
+
fsl_qspi_prepare_lut(q, op);
/*
@@ -710,6 +745,16 @@ static int fsl_qspi_default_setup(struct fsl_qspi *q)
qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
base + QUADSPI_MCR);
+ /*
+ * Previous boot stages (BootROM, bootloader) might have used DDR
+ * mode and did not clear the TDH bits. As we currently use SDR mode
+ * only, clear the TDH bits if necessary.
+ */
+ if (needs_tdh_setting(q))
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
+ ~QUADSPI_FLSHCR_TDH_MASK,
+ base + QUADSPI_FLSHCR);
+
reg = qspi_readl(q, base + QUADSPI_SMPR);
qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
| QUADSPI_SMPR_FSPHS_MASK
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 4b80ace1d137..114801a32371 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -416,8 +416,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
}
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change) {
ndelay(nsecs);
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 1d3e23ec20a6..7ceb0ba27b75 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -362,19 +362,18 @@ static int spi_gpio_probe(struct platform_device *pdev)
struct spi_gpio *spi_gpio;
struct device *dev = &pdev->dev;
struct spi_bitbang *bb;
- const struct of_device_id *of_id;
-
- of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev);
master = spi_alloc_master(dev, sizeof(*spi_gpio));
if (!master)
return -ENOMEM;
status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master);
- if (status)
+ if (status) {
+ spi_master_put(master);
return status;
+ }
- if (of_id)
+ if (pdev->dev.of_node)
status = spi_gpio_probe_dt(pdev, master);
else
status = spi_gpio_probe_pdata(pdev, master);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 439b01e4a2c8..f4a8f470aecc 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -673,6 +673,8 @@ static int img_spfi_probe(struct platform_device *pdev)
dma_release_channel(spfi->tx_ch);
if (spfi->rx_ch)
dma_release_channel(spfi->rx_ch);
+ spfi->tx_ch = NULL;
+ spfi->rx_ch = NULL;
dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
} else {
master->dma_tx = spfi->tx_ch;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 09c9a1edb2c6..49f0099db0cb 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1272,7 +1272,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
/* Prepare for TX DMA: */
- master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
ret = PTR_ERR(master->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
@@ -1281,7 +1281,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
}
/* Prepare for RX : */
- master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ master->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(master->dma_rx)) {
ret = PTR_ERR(master->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index 9dfe8b04e688..1fd7ee53d451 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -797,7 +797,6 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_master *master;
- struct resource *res;
struct lantiq_ssc_spi *spi;
const struct lantiq_ssc_hwcfg *hwcfg;
const struct of_device_id *match;
@@ -812,12 +811,6 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
}
hwcfg = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get resources\n");
- return -ENXIO;
- }
-
rx_irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
if (rx_irq < 0)
return -ENXIO;
@@ -839,8 +832,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
spi->dev = dev;
spi->hwcfg = hwcfg;
platform_set_drvdata(pdev, spi);
-
- spi->regbase = devm_ioremap_resource(dev, res);
+ spi->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->regbase)) {
err = PTR_ERR(spi->regbase);
goto err_master_put;
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 6f18d4952767..b6d79cd156fb 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -298,12 +298,18 @@ static struct spi_test spi_tests[] = {
{
.tx_buf = TX(0),
.rx_buf = RX(0),
- .delay_usecs = 1000,
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
},
{
.tx_buf = TX(0),
.rx_buf = RX(0),
- .delay_usecs = 1000,
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
},
},
},
@@ -537,7 +543,7 @@ static int spi_test_check_elapsed_time(struct spi_device *spi,
unsigned long long nbits = (unsigned long long)BITS_PER_BYTE *
xfer->len;
- delay_usecs += xfer->delay_usecs;
+ delay_usecs += xfer->delay.value;
if (!xfer->speed_hz)
continue;
estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz);
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 9f0fa9f3116d..e5a46f0eb93b 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -286,7 +286,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;
- if (ctlr->mem_ops) {
+ if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index a337b842ae8c..ea1b07953d38 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -311,8 +311,7 @@ static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
break;
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change)
mpc512x_psc_spi_deactivate_cs(spi);
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index c7e478b9b586..17935e71b02f 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -234,8 +234,7 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
break;
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change)
mpc52xx_psc_spi_deactivate_cs(spi);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 6888a4dcff6d..6783e12c40c2 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -139,7 +139,6 @@ static const struct mtk_spi_compatible mt8183_compat = {
* supplies it.
*/
static const struct mtk_chip_config mtk_default_chip_info = {
- .cs_pol = 0,
.sample_sel = 0,
};
@@ -230,10 +229,12 @@ static int mtk_spi_prepare_message(struct spi_master *master,
#endif
if (mdata->dev_comp->enhance_timing) {
- if (chip_config->cs_pol)
+ /* set CS polarity */
+ if (spi->mode & SPI_CS_HIGH)
reg_val |= SPI_CMD_CS_POL;
else
reg_val &= ~SPI_CMD_CS_POL;
+
if (chip_config->sample_sel)
reg_val |= SPI_CMD_SAMPLE_SEL;
else
@@ -264,6 +265,9 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
u32 reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
reg_val = readl(mdata->base + SPI_CMD_REG);
if (!enable) {
reg_val |= SPI_CMD_PAUSE_EN;
@@ -619,7 +623,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct mtk_spi *mdata;
const struct of_device_id *of_id;
- struct resource *res;
int i, irq, ret, addr_bits;
master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
@@ -647,6 +650,10 @@ static int mtk_spi_probe(struct platform_device *pdev)
mdata = spi_master_get_devdata(master);
mdata->dev_comp = of_id->data;
+
+ if (mdata->dev_comp->enhance_timing)
+ master->mode_bits |= SPI_CS_HIGH;
+
if (mdata->dev_comp->must_tx)
master->flags = SPI_MASTER_MUST_TX;
@@ -682,15 +689,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, master);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "failed to determine base address\n");
- goto err_put_master;
- }
-
- mdata->base = devm_ioremap_resource(&pdev->dev, res);
+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdata->base)) {
ret = PTR_ERR(mdata->base);
goto err_put_master;
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index f48563c09b97..69491f3a515d 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -145,8 +145,8 @@
#define LWR_SUSP_CTRL_EN BIT(31)
#define DMAS_CTRL 0x9c
-#define DMAS_CTRL_DIR_READ BIT(31)
-#define DMAS_CTRL_EN BIT(30)
+#define DMAS_CTRL_EN BIT(31)
+#define DMAS_CTRL_DIR_READ BIT(30)
#define DATA_STROB 0xa0
#define DATA_STROB_EDO_EN BIT(2)
@@ -275,7 +275,7 @@ static void mxic_spi_hw_init(struct mxic_spi *mxic)
writel(0, mxic->regs + HC_EN);
writel(0, mxic->regs + LRD_CFG);
writel(0, mxic->regs + LRD_CTRL);
- writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NAND) |
+ writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NOR) |
HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | HC_CFG_IDLE_SIO_LVL(1),
mxic->regs + HC_CFG);
}
@@ -346,7 +346,7 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
if (op->addr.nbytes > 7)
return false;
- return true;
+ return spi_mem_default_supports_op(mem, op);
}
static int mxic_spi_mem_exec_op(struct spi_mem *mem,
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index b191d57d1dc0..fe624731c74c 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -293,7 +293,6 @@ static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
{
struct npcm_pspi *priv = dev_id;
- u16 val;
u8 stat;
stat = ioread8(priv->base + NPCM_PSPI_STAT);
@@ -303,7 +302,7 @@ static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
if (priv->tx_buf) {
if (stat & NPCM_PSPI_STAT_RBF) {
- val = ioread8(NPCM_PSPI_DATA + priv->base);
+ ioread8(NPCM_PSPI_DATA + priv->base);
if (priv->tx_bytes == 0) {
npcm_pspi_disable(priv);
complete(&priv->xfer_done);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 501b923f2c27..c36bb1bb464e 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -1027,7 +1027,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = np;
- ret = spi_register_controller(ctlr);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret)
goto err_destroy_mutex;
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index b955ca8796d2..5c704ba6d8ea 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -128,7 +128,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
static int spi100k_read_data(struct spi_master *master, int len)
{
- int dataH, dataL;
+ int dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* Always do at least 16 bits */
@@ -146,7 +146,7 @@ static int spi100k_read_data(struct spi_master *master, int len)
udelay(1000);
dataL = readw(spi100k->base + SPI_RX_LSB);
- dataH = readw(spi100k->base + SPI_RX_MSB);
+ readw(spi100k->base + SPI_RX_MSB);
spi100k_disable_clock(master);
return dataL;
@@ -321,8 +321,7 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
}
}
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
/* ignore the "leave it on after last xfer" hint */
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 848e03e5f42d..7e2292c11d12 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -397,30 +397,26 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
+ struct dma_async_tx_descriptor *tx;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- if (mcspi_dma->dma_tx) {
- struct dma_async_tx_descriptor *tx;
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
- dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
-
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
- xfer->tx_sg.nents,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (tx) {
- tx->callback = omap2_mcspi_tx_callback;
- tx->callback_param = spi;
- dmaengine_submit(tx);
- } else {
- /* FIXME: fall back to PIO? */
- }
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_tx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
}
dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
-
}
static unsigned
@@ -439,6 +435,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
int word_len, element_count;
struct omap2_mcspi_cs *cs = spi->controller_state;
void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ struct dma_async_tx_descriptor *tx;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -462,55 +459,47 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
else /* word_len <= 32 */
element_count = count >> 2;
- if (mcspi_dma->dma_rx) {
- struct dma_async_tx_descriptor *tx;
- dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+ dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+ /*
+ * Reduce DMA transfer length by one more if McSPI is
+ * configured in turbo mode.
+ */
+ if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
+ transfer_reduction += es;
+
+ if (transfer_reduction) {
+ /* Split sgl into two. The second sgl won't be used. */
+ sizes[0] = count - transfer_reduction;
+ sizes[1] = transfer_reduction;
+ nb_sizes = 2;
+ } else {
/*
- * Reduce DMA transfer length by one more if McSPI is
- * configured in turbo mode.
+ * Don't bother splitting the sgl. This essentially
+ * clones the original sgl.
*/
- if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
- transfer_reduction += es;
-
- if (transfer_reduction) {
- /* Split sgl into two. The second sgl won't be used. */
- sizes[0] = count - transfer_reduction;
- sizes[1] = transfer_reduction;
- nb_sizes = 2;
- } else {
- /*
- * Don't bother splitting the sgl. This essentially
- * clones the original sgl.
- */
- sizes[0] = count;
- nb_sizes = 1;
- }
+ sizes[0] = count;
+ nb_sizes = 1;
+ }
- ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents,
- 0, nb_sizes,
- sizes,
- sg_out, out_mapped_nents,
- GFP_KERNEL);
+ ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
+ sizes, sg_out, out_mapped_nents, GFP_KERNEL);
- if (ret < 0) {
- dev_err(&spi->dev, "sg_split failed\n");
- return 0;
- }
+ if (ret < 0) {
+ dev_err(&spi->dev, "sg_split failed\n");
+ return 0;
+ }
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx,
- sg_out[0],
- out_mapped_nents[0],
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (tx) {
- tx->callback = omap2_mcspi_rx_callback;
- tx->callback_param = spi;
- dmaengine_submit(tx);
- } else {
- /* FIXME: fall back to PIO? */
- }
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
+ out_mapped_nents[0], DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_rx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
}
dma_async_issue_pending(mcspi_dma->dma_rx);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 6643ccdc2508..c7266ef295fd 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -467,8 +467,7 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
goto out;
count--;
- if (xfer->word_delay_usecs)
- udelay(xfer->word_delay_usecs);
+ spi_delay_exec(&xfer->word_delay, xfer);
} while (count);
} else if (word_len == 16) {
const u16 *tx = xfer->tx_buf;
@@ -478,8 +477,7 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
goto out;
count -= 2;
- if (xfer->word_delay_usecs)
- udelay(xfer->word_delay_usecs);
+ spi_delay_exec(&xfer->word_delay, xfer);
} while (count);
}
@@ -772,9 +770,6 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status < 0)
goto out_rel_pm;
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
-
master->dev.of_node = pdev->dev.of_node;
status = spi_register_master(master);
if (status < 0)
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 69f517ec59c6..156961b4ca86 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -606,25 +606,30 @@ static void pic32_spi_cleanup(struct spi_device *spi)
gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
}
-static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
+static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
{
struct spi_master *master = pic32s->master;
- dma_cap_mask_t mask;
+ int ret = 0;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ master->dma_rx = dma_request_chan(dev, "spi-rx");
+ if (IS_ERR(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ dev_warn(dev, "RX channel not found.\n");
- master->dma_rx = dma_request_slave_channel_compat(mask, NULL, NULL,
- dev, "spi-rx");
- if (!master->dma_rx) {
- dev_warn(dev, "RX channel not found.\n");
+ master->dma_rx = NULL;
goto out_err;
}
- master->dma_tx = dma_request_slave_channel_compat(mask, NULL, NULL,
- dev, "spi-tx");
- if (!master->dma_tx) {
- dev_warn(dev, "TX channel not found.\n");
+ master->dma_tx = dma_request_chan(dev, "spi-tx");
+ if (IS_ERR(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ dev_warn(dev, "TX channel not found.\n");
+
+ master->dma_tx = NULL;
goto out_err;
}
@@ -634,14 +639,20 @@ static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
/* DMA chnls allocated and prepared */
set_bit(PIC32F_DMA_PREP, &pic32s->flags);
- return;
+ return 0;
out_err:
- if (master->dma_rx)
+ if (master->dma_rx) {
dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
- if (master->dma_tx)
+ if (master->dma_tx) {
dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
+ return ret;
}
static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
@@ -776,7 +787,10 @@ static int pic32_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
/* optional DMA support */
- pic32_spi_dma_prep(pic32s, &pdev->dev);
+ ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
+ if (ret)
+ goto err_bailout;
+
if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
master->can_dma = pic32_spi_can_dma;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 7fedea67159c..66028ebbc336 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -485,12 +485,11 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
- if (last_transfer->delay_usecs)
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- udelay(last_transfer->delay_usecs);
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ spi_transfer_delay_exec(last_transfer);
if (!last_transfer->cs_change) {
struct spi_message *next_msg;
@@ -1159,7 +1158,7 @@ static int pl022_dma_autoprobe(struct pl022 *pl022)
int err;
/* automatically configure DMA channels from platform, normally using DT */
- chan = dma_request_slave_channel_reason(dev, "rx");
+ chan = dma_request_chan(dev, "rx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_no_rxchan;
@@ -1167,7 +1166,7 @@ static int pl022_dma_autoprobe(struct pl022 *pl022)
pl022->dma_rx_channel = chan;
- chan = dma_request_slave_channel_reason(dev, "tx");
+ chan = dma_request_chan(dev, "tx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_no_txchan;
@@ -1401,12 +1400,11 @@ static void pump_transfers(unsigned long data)
previous = list_entry(transfer->transfer_list.prev,
struct spi_transfer,
transfer_list);
- if (previous->delay_usecs)
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- udelay(previous->delay_usecs);
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ spi_transfer_delay_exec(previous);
/* Reselect chip select only if cs_change was requested */
if (previous->cs_change)
@@ -1520,8 +1518,7 @@ static void do_polling_transfer(struct pl022 *pl022)
previous =
list_entry(transfer->transfer_list.prev,
struct spi_transfer, transfer_list);
- if (previous->delay_usecs)
- udelay(previous->delay_usecs);
+ spi_transfer_delay_exec(previous);
if (previous->cs_change)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
} else {
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index bb6a14d1ab0f..16b6b2ad4e7c 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -4,27 +4,29 @@
* Copyright (C) 2013, Intel Corporation
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <linux/acpi.h>
-#include <linux/of_device.h>
#include "spi-pxa2xx.h"
@@ -1457,6 +1459,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
+ /* CML-H */
+ { PCI_VDEVICE(INTEL, 0x06aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x06ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x06fb), LPSS_CNL_SSP },
/* TGL-LP */
{ PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
@@ -1476,11 +1482,13 @@ MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
#ifdef CONFIG_ACPI
-static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+static int pxa2xx_spi_get_port_id(struct device *dev)
{
+ struct acpi_device *adev;
unsigned int devid;
int port_id = -1;
+ adev = ACPI_COMPANION(dev);
if (adev && adev->pnp.unique_id &&
!kstrtouint(adev->pnp.unique_id, 0, &devid))
port_id = devid;
@@ -1489,7 +1497,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
#else /* !CONFIG_ACPI */
-static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+static int pxa2xx_spi_get_port_id(struct device *dev)
{
return -1;
}
@@ -1510,34 +1518,22 @@ static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_controller *pdata;
- struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
- const struct acpi_device_id *adev_id = NULL;
+ struct device *parent = pdev->dev.parent;
+ struct pci_dev *pcidev = dev_is_pci(parent) ? to_pci_dev(parent) : NULL;
const struct pci_device_id *pcidev_id = NULL;
- const struct of_device_id *of_id = NULL;
enum pxa_ssp_type type;
+ const void *match;
- adev = ACPI_COMPANION(&pdev->dev);
-
- if (pdev->dev.of_node)
- of_id = of_match_device(pdev->dev.driver->of_match_table,
- &pdev->dev);
- else if (dev_is_pci(pdev->dev.parent))
- pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
- to_pci_dev(pdev->dev.parent));
- else if (adev)
- adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
- &pdev->dev);
- else
- return NULL;
+ if (pcidev)
+ pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match, pcidev);
- if (adev_id)
- type = (enum pxa_ssp_type)adev_id->driver_data;
+ match = device_get_match_data(&pdev->dev);
+ if (match)
+ type = (enum pxa_ssp_type)match;
else if (pcidev_id)
type = (enum pxa_ssp_type)pcidev_id->driver_data;
- else if (of_id)
- type = (enum pxa_ssp_type)of_id->data;
else
return NULL;
@@ -1545,32 +1541,36 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
if (!pdata)
return NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return NULL;
-
ssp = &pdata->ssp;
- ssp->phys_base = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ssp->mmio_base))
return NULL;
+ ssp->phys_base = res->start;
+
#ifdef CONFIG_PCI
if (pcidev_id) {
- pdata->tx_param = pdev->dev.parent;
- pdata->rx_param = pdev->dev.parent;
+ pdata->tx_param = parent;
+ pdata->rx_param = parent;
pdata->dma_filter = pxa2xx_spi_idma_filter;
}
#endif
ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return NULL;
+
ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0)
+ return NULL;
+
ssp->type = type;
- ssp->pdev = pdev;
- ssp->port_id = pxa2xx_spi_get_port_id(adev);
+ ssp->dev = &pdev->dev;
+ ssp->port_id = pxa2xx_spi_get_port_id(&pdev->dev);
- pdata->is_slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
+ pdata->is_slave = device_property_read_bool(&pdev->dev, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
pdata->dma_burst_size = 1;
@@ -1602,6 +1602,11 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
return cs;
}
+static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi)
+{
+ return MAX_DMA_LEN;
+}
+
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1707,6 +1712,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
} else {
controller->can_dma = pxa2xx_spi_can_dma;
controller->max_dma_len = MAX_DMA_LEN;
+ controller->max_transfer_size =
+ pxa2xx_spi_max_dma_transfer_size;
}
}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 2f559e531100..dd3434a407ea 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -932,11 +932,11 @@ static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
int ret;
/* allocate dma resources, if available */
- master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ master->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(master->dma_rx))
return PTR_ERR(master->dma_rx);
- master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
ret = PTR_ERR(master->dma_tx);
goto err_tx;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 15f5723d9f95..7222c7689c3c 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1257,9 +1257,9 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
- ret = platform_get_irq_byname(pdev, "rx");
+ ret = platform_get_irq_byname_optional(pdev, "rx");
if (ret < 0) {
- ret = platform_get_irq_byname(pdev, "mux");
+ ret = platform_get_irq_byname_optional(pdev, "mux");
if (ret < 0)
ret = platform_get_irq(pdev, 0);
if (ret >= 0)
@@ -1270,10 +1270,6 @@ static int rspi_probe(struct platform_device *pdev)
if (ret >= 0)
rspi->tx_irq = ret;
}
- if (ret < 0) {
- dev_err(&pdev->dev, "platform_get_irq error\n");
- goto error2;
- }
if (rspi->rx_irq == rspi->tx_irq) {
/* Single multiplexed interrupt */
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 7b7151ec14c8..cf67ea60dc0e 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1154,15 +1154,13 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (!is_polling(sdd)) {
/* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
- "rx");
+ sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
if (IS_ERR(sdd->rx_dma.ch)) {
dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
ret = PTR_ERR(sdd->rx_dma.ch);
goto err_disable_io_clk;
}
- sdd->tx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
- "tx");
+ sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
if (IS_ERR(sdd->tx_dma.ch)) {
dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
ret = PTR_ERR(sdd->tx_dma.ch);
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 11acddc83304..5497eeb3bf3e 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -211,8 +211,7 @@ static int sc18is602_transfer_one(struct spi_master *master,
}
status = 0;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
}
m->status = status;
spi_finalize_current_message(master);
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 7f73f91d412a..a62034e2a7cb 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -190,8 +190,7 @@ static int hspi_transfer_one_message(struct spi_controller *ctlr,
msg->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change) {
ndelay(nsecs);
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
index 35254bdc42c4..f7c1e20432e0 100644
--- a/drivers/spi/spi-sifive.c
+++ b/drivers/spi/spi-sifive.c
@@ -357,14 +357,14 @@ static int sifive_spi_probe(struct platform_device *pdev)
if (!cs_bits) {
dev_err(&pdev->dev, "Could not auto probe CS lines\n");
ret = -EINVAL;
- goto put_master;
+ goto disable_clk;
}
num_cs = ilog2(cs_bits) + 1;
if (num_cs > SIFIVE_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi slaves\n");
ret = -EINVAL;
- goto put_master;
+ goto disable_clk;
}
/* Define our master */
@@ -393,7 +393,7 @@ static int sifive_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), spi);
if (ret) {
dev_err(&pdev->dev, "Unable to bind to interrupt\n");
- goto put_master;
+ goto disable_clk;
}
dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
@@ -402,11 +402,13 @@ static int sifive_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master failed\n");
- goto put_master;
+ goto disable_clk;
}
return 0;
+disable_clk:
+ clk_disable_unprepare(spi->clk);
put_master:
spi_master_put(master);
@@ -420,6 +422,7 @@ static int sifive_spi_remove(struct platform_device *pdev)
/* Disable all the interrupts just in case */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+ clk_disable_unprepare(spi->clk);
return 0;
}
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
index 61bc43b0fe57..44edaa360405 100644
--- a/drivers/spi/spi-slave-mt27xx.c
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -368,7 +368,6 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mtk_spi_slave *mdata;
- struct resource *res;
int irq, ret;
ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
@@ -392,17 +391,8 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctlr);
init_completion(&mdata->xfer_done);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "failed to determine base address\n");
- goto err_put_ctlr;
- }
-
mdata->dev = &pdev->dev;
-
- mdata->base = devm_ioremap_resource(&pdev->dev, res);
+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdata->base)) {
ret = PTR_ERR(mdata->base);
goto err_put_ctlr;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index 9a051286f120..87dadb6b8ebf 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -77,6 +77,7 @@
/* Bits definitions for register REG_WDG_CTRL */
#define BIT_WDG_RUN BIT(1)
+#define BIT_WDG_NEW BIT(2)
#define BIT_WDG_RST BIT(3)
/* Registers definitions for PMIC */
@@ -383,6 +384,10 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
/* Unlock the watchdog */
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY);
+ sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+ val |= BIT_WDG_NEW;
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+
/* Load the watchdog timeout value, 50ms is always enough. */
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
WDG_LOAD_VAL & WDG_LOAD_MASK);
@@ -393,6 +398,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
val |= BIT_WDG_RUN | BIT_WDG_RST;
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+ /* Lock the watchdog */
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
+
mdelay(1000);
dev_emerg(sadi->dev, "Unable to restart system\n");
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 8c9021b7f7a9..2ee1feb41681 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -669,11 +669,15 @@ static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
}
-static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
+static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
{
+ struct spi_delay *d = &t->word_delay;
u16 word_delay, interval;
u32 val;
+ if (d->unit != SPI_DELAY_UNIT_SCK)
+ return -EINVAL;
+
val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
/* Set default chip selection, clock phase and clock polarity */
@@ -686,7 +690,7 @@ static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
* formula as below per datasheet:
* interval time (source clock cycles) = interval * 4 + 10.
*/
- word_delay = clamp_t(u16, t->word_delay, SPRD_SPI_MIN_DELAY_CYCLE,
+ word_delay = clamp_t(u16, d->value, SPRD_SPI_MIN_DELAY_CYCLE,
SPRD_SPI_MAX_DELAY_CYCLE);
interval = DIV_ROUND_UP(word_delay - 10, 4);
ss->word_delay = interval * 4 + 10;
@@ -711,6 +715,8 @@ static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
val &= ~SPRD_SPI_DATA_LINE2_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
+
+ return 0;
}
static int sprd_spi_setup_transfer(struct spi_device *sdev,
@@ -719,13 +725,16 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev,
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u8 bits_per_word = t->bits_per_word;
u32 val, mode = 0;
+ int ret;
ss->len = t->len;
ss->tx_buf = t->tx_buf;
ss->rx_buf = t->rx_buf;
ss->hw_mode = sdev->mode;
- sprd_spi_init_hw(ss, t);
+ ret = sprd_spi_init_hw(ss, t);
+ if (ret)
+ return ret;
/* Set tansfer speed and valid bits */
sprd_spi_set_speed(ss, t->speed_hz);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 0c24c494f386..77d26d64541a 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -381,6 +381,7 @@ static int spi_st_probe(struct platform_device *pdev)
return 0;
clk_disable:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
@@ -392,6 +393,8 @@ static int spi_st_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_st *spi_st = spi_master_get_devdata(master);
+ pm_runtime_disable(&pdev->dev);
+
clk_disable_unprepare(spi_st->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 9ac6f9fe13cf..4e726929bb4f 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -528,7 +528,6 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
stm32_qspi_dma_free(qspi);
mutex_destroy(&qspi->lock);
clk_disable_unprepare(qspi->clk);
- spi_master_put(qspi->ctrl);
}
static int stm32_qspi_probe(struct platform_device *pdev)
@@ -626,6 +625,8 @@ static int stm32_qspi_probe(struct platform_device *pdev)
err:
stm32_qspi_release(qspi);
+ spi_master_put(qspi->ctrl);
+
return ret;
}
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 39374c2edcf3..fc40ab146c86 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -666,8 +666,7 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
dma_addr_t dma_phys;
int ret;
- dma_chan = dma_request_slave_channel_reason(tspi->dev,
- dma_to_memory ? "rx" : "tx");
+ dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
if (ret != -EPROBE_DEFER)
@@ -723,15 +722,31 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
-static void tegra_spi_set_hw_cs_timing(struct spi_device *spi, u8 setup_dly,
- u8 hold_dly, u8 inactive_dly)
+static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
+ struct spi_delay *setup,
+ struct spi_delay *hold,
+ struct spi_delay *inactive)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u8 setup_dly, hold_dly, inactive_dly;
u32 setup_hold;
u32 spi_cs_timing;
u32 inactive_cycles;
u8 cs_state;
+ if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
+ SPI_DELAY_UNIT_SCK);
+ return -EINVAL;
+ }
+
+ setup_dly = setup ? setup->value : 0;
+ hold_dly = hold ? hold->value : 0;
+ inactive_dly = inactive ? inactive->value : 0;
+
setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
if (setup_dly && hold_dly) {
@@ -758,6 +773,8 @@ static void tegra_spi_set_hw_cs_timing(struct spi_device *spi, u8 setup_dly,
tspi->spi_cs_timing2 = spi_cs_timing;
tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
}
+
+ return 0;
}
static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
@@ -984,17 +1001,6 @@ static int tegra_spi_setup(struct spi_device *spi)
return 0;
}
-static void tegra_spi_transfer_delay(int delay)
-{
- if (!delay)
- return;
-
- if (delay >= 1000)
- mdelay(delay / 1000);
-
- udelay(delay % 1000);
-}
-
static void tegra_spi_transfer_end(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
@@ -1098,7 +1104,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
complete_xfer:
if (ret < 0 || skip) {
tegra_spi_transfer_end(spi);
- tegra_spi_transfer_delay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
goto exit;
} else if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
@@ -1106,11 +1112,11 @@ complete_xfer:
tspi->cs_control = spi;
else {
tegra_spi_transfer_end(spi);
- tegra_spi_transfer_delay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
}
} else if (xfer->cs_change) {
tegra_spi_transfer_end(spi);
- tegra_spi_transfer_delay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
}
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index a841a7250d14..514429379206 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -341,10 +341,11 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master,
goto exit;
}
msg->actual_length += xfer->len;
- if (xfer->cs_change && xfer->delay_usecs) {
+ if (xfer->cs_change &&
+ (xfer->delay_usecs || xfer->delay.value)) {
tegra_sflash_writel(tsd, tsd->def_command_reg,
SPI_COMMAND);
- udelay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
}
}
ret = 0;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 111fffc91435..7f4d932dade7 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -599,8 +599,7 @@ static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
int ret;
struct dma_slave_config dma_sconfig;
- dma_chan = dma_request_slave_channel_reason(tspi->dev,
- dma_to_memory ? "rx" : "tx");
+ dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
if (ret != -EPROBE_DEFER)
@@ -1073,7 +1072,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
ret = clk_enable(tspi->clk);
if (ret < 0) {
dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
- goto exit_free_master;
+ goto exit_clk_unprepare;
}
spi_irq = platform_get_irq(pdev, 0);
@@ -1146,6 +1145,8 @@ exit_free_irq:
free_irq(spi_irq, tspi);
exit_clk_disable:
clk_disable(tspi->clk);
+exit_clk_unprepare:
+ clk_unprepare(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1159,6 +1160,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
clk_disable(tspi->clk);
+ clk_unprepare(tspi->clk);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index f88cbb94ce12..223353fa2d8a 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1229,12 +1229,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
"%s:data->current_msg->actual_length=%d\n",
__func__, data->current_msg->actual_length);
- /* check for delay */
- if (data->cur_trans->delay_usecs) {
- dev_dbg(&data->master->dev, "%s:delay in usec=%d\n",
- __func__, data->cur_trans->delay_usecs);
- udelay(data->cur_trans->delay_usecs);
- }
+ spi_transfer_delay_exec(data->cur_trans);
spin_lock(&data->lock);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 51759d3fd45f..3606232f190f 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -26,7 +26,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#define SPI_FIFO_SIZE 4
@@ -79,7 +80,7 @@ struct txx9spi {
void __iomem *membase;
int baseclk;
struct clk *clk;
- int last_chipselect;
+ struct gpio_desc *last_chipselect;
int last_chipselect_val;
};
@@ -95,20 +96,22 @@ static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
int on, unsigned int cs_delay)
{
- int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
-
+ /*
+ * The GPIO descriptor will track polarity inversion inside
+ * gpiolib.
+ */
if (on) {
/* deselect the chip with cs_change hint in last transfer */
- if (c->last_chipselect >= 0)
- gpio_set_value(c->last_chipselect,
+ if (c->last_chipselect)
+ gpiod_set_value(c->last_chipselect,
!c->last_chipselect_val);
- c->last_chipselect = spi->chip_select;
- c->last_chipselect_val = val;
+ c->last_chipselect = spi->cs_gpiod;
+ c->last_chipselect_val = on;
} else {
- c->last_chipselect = -1;
+ c->last_chipselect = NULL;
ndelay(cs_delay); /* CS Hold Time */
}
- gpio_set_value(spi->chip_select, val);
+ gpiod_set_value(spi->cs_gpiod, on);
ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
}
@@ -119,12 +122,6 @@ static int txx9spi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- if (gpio_direction_output(spi->chip_select,
- !(spi->mode & SPI_CS_HIGH))) {
- dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
- return -EINVAL;
- }
-
/* deselect chip */
spin_lock(&c->lock);
txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
@@ -248,8 +245,7 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
len -= count * wsize;
}
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (!cs_change)
continue;
@@ -320,6 +316,47 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
+/*
+ * Chip select uses GPIO only, further the driver is using the chip select
+ * numer (from the device tree "reg" property, and this can only come from
+ * device tree since this i MIPS and there is no way to pass platform data) as
+ * the GPIO number. As the platform has only one GPIO controller (the txx9 GPIO
+ * chip) it is thus using the chip select number as an offset into that chip.
+ * This chip has a maximum of 16 GPIOs 0..15 and this is what all platforms
+ * register.
+ *
+ * We modernized this behaviour by explicitly converting that offset to an
+ * offset on the GPIO chip using a GPIO descriptor machine table of the same
+ * size as the txx9 GPIO chip with a 1-to-1 mapping of chip select to GPIO
+ * offset.
+ *
+ * This is admittedly a hack, but it is countering the hack of using "reg" to
+ * contain a GPIO offset when it should be using "cs-gpios" as the SPI bindings
+ * state.
+ */
+static struct gpiod_lookup_table txx9spi_cs_gpio_table = {
+ .dev_id = "spi0",
+ .table = {
+ GPIO_LOOKUP_IDX("TXx9", 0, "cs", 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 1, "cs", 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 2, "cs", 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 3, "cs", 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 4, "cs", 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 5, "cs", 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 6, "cs", 6, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 7, "cs", 7, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 8, "cs", 8, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 9, "cs", 9, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 10, "cs", 10, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 11, "cs", 11, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 12, "cs", 12, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 13, "cs", 13, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 14, "cs", 14, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 15, "cs", 15, GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
static int txx9spi_probe(struct platform_device *dev)
{
struct spi_master *master;
@@ -373,12 +410,14 @@ static int txx9spi_probe(struct platform_device *dev)
if (ret)
goto exit;
- c->last_chipselect = -1;
+ c->last_chipselect = NULL;
dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
(unsigned long long)res->start, irq,
(c->baseclk + 500000) / 1000000);
+ gpiod_add_lookup_table(&txx9spi_cs_gpio_table);
+
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
@@ -387,6 +426,7 @@ static int txx9spi_probe(struct platform_device *dev)
master->transfer = txx9spi_transfer;
master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->use_gpio_descriptors = true;
ret = devm_spi_register_master(&dev->dev, master);
if (ret)
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index a3496c46cc1b..1d9b3f03d986 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -188,8 +188,7 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
}
status = 0;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
is_first = false;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index d5f9d5fbb3e8..8dd2bb99cb4d 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -391,7 +391,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
struct xilinx_spi *xspi;
struct xspi_platform_data *pdata;
struct resource *res;
- int ret, num_cs = 0, bits_per_word = 8;
+ int ret, num_cs = 0, bits_per_word;
struct spi_master *master;
u32 tmp;
u8 i;
@@ -403,6 +403,11 @@ static int xilinx_spi_probe(struct platform_device *pdev)
} else {
of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
&num_cs);
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,num-transfer-bits",
+ &bits_per_word);
+ if (ret)
+ bits_per_word = 8;
}
if (!num_cs) {
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 86516eb1e143..fc2b5eb7d614 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -80,7 +80,6 @@ static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
static int xtfpga_spi_probe(struct platform_device *pdev)
{
struct xtfpga_spi *xspi;
- struct resource *mem;
int ret;
struct spi_master *master;
@@ -97,14 +96,7 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
xspi->bitbang.master = master;
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No memory resource\n");
- ret = -ENODEV;
- goto err;
- }
- xspi->regs = devm_ioremap_resource(&pdev->dev, mem);
+ xspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xspi->regs)) {
ret = PTR_ERR(xspi->regs);
goto err;
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 5cf6993ddce5..17641157354d 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -51,7 +50,6 @@
#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
-#define ZYNQ_QSPI_CONFIG_SSCTRL_MASK BIT(10) /* Slave Select Mask */
#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
@@ -61,9 +59,9 @@
* These are the values used in the calculation of baud rate divisor and
* setting the slave select.
*/
-#define ZYNQ_QSPI_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
-#define ZYNQ_QSPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
-#define ZYNQ_QSPI_SS_SHIFT 10 /* Slave Select field shift in CR */
+#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
+#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */
+#define ZYNQ_QSPI_CONFIG_PCS BIT(10) /* Peripheral Chip Select */
/*
* QSPI Interrupt Registers bit Masks
@@ -99,9 +97,9 @@
* It is named Linear Configuration but it controls other modes when not in
* linear mode also.
*/
-#define ZYNQ_QSPI_LCFG_TWO_MEM_MASK BIT(30) /* LQSPI Two memories Mask */
-#define ZYNQ_QSPI_LCFG_SEP_BUS_MASK BIT(29) /* LQSPI Separate bus Mask */
-#define ZYNQ_QSPI_LCFG_U_PAGE_MASK BIT(28) /* LQSPI Upper Page Mask */
+#define ZYNQ_QSPI_LCFG_TWO_MEM BIT(30) /* LQSPI Two memories */
+#define ZYNQ_QSPI_LCFG_SEP_BUS BIT(29) /* LQSPI Separate bus */
+#define ZYNQ_QSPI_LCFG_U_PAGE BIT(28) /* LQSPI Upper Page */
#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
@@ -116,8 +114,8 @@
*/
#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
-/* Default number of chip selects */
-#define ZYNQ_QSPI_DEFAULT_NUM_CS 1
+/* Maximum number of chip selects */
+#define ZYNQ_QSPI_MAX_NUM_CS 2
/**
* struct zynq_qspi - Defines qspi driver instance
@@ -161,6 +159,7 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
/**
* zynq_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynq_qspi structure
+ * @num_cs: Number of connected CS (to enable dual memories if needed)
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
@@ -178,7 +177,7 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
* - Set the little endian mode of TX FIFO and
* - Enable the QSPI controller
*/
-static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
+static void zynq_qspi_init_hw(struct zynq_qspi *xqspi, unsigned int num_cs)
{
u32 config_reg;
@@ -186,7 +185,12 @@ static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
/* Disable linear mode as the boot loader may have used it */
- zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, 0);
+ config_reg = 0;
+ /* At the same time, enable dual mode if more than 1 CS is available */
+ if (num_cs > 1)
+ config_reg |= ZYNQ_QSPI_LCFG_TWO_MEM;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
/* Clear the RX FIFO */
while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
@@ -284,21 +288,28 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
*/
static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
{
- struct spi_controller *ctrl = spi->master;
- struct zynq_qspi *xqspi = spi_controller_get_devdata(ctrl);
+ struct spi_controller *ctlr = spi->master;
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
u32 config_reg;
- config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
- if (assert) {
- /* Select the slave */
- config_reg &= ~ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
- config_reg |= (((~(BIT(spi->chip_select))) <<
- ZYNQ_QSPI_SS_SHIFT) &
- ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
- } else {
- config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ /* Select the lower (CS0) or upper (CS1) memory */
+ if (ctlr->num_chipselect > 1) {
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET);
+ if (!spi->chip_select)
+ config_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE;
+ else
+ config_reg |= ZYNQ_QSPI_LCFG_U_PAGE;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
}
+ /* Ground the line to assert the CS */
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ if (assert)
+ config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
+ else
+ config_reg |= ZYNQ_QSPI_CONFIG_PCS;
+
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
}
@@ -332,7 +343,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
* ----------------
* 111 - divide by 256
*/
- while ((baud_rate_val < ZYNQ_QSPI_BAUD_DIV_MAX) &&
+ while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
(clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
spi->max_speed_hz)
baud_rate_val++;
@@ -348,7 +359,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
- config_reg |= (baud_rate_val << ZYNQ_QSPI_BAUD_DIV_SHIFT);
+ config_reg |= (baud_rate_val << ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT);
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
return 0;
@@ -365,10 +376,10 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
static int zynq_qspi_setup_op(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
- struct zynq_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct spi_controller *ctlr = spi->master;
+ struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
- if (ctrl->busy)
+ if (ctlr->busy)
return -EBUSY;
clk_enable(qspi->refclk);
@@ -663,9 +674,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
goto clk_dis_pclk;
}
- /* QSPI controller initializations */
- zynq_qspi_init_hw(xqspi);
-
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
@@ -681,10 +689,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "num-cs",
&num_cs);
- if (ret < 0)
- ctlr->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS;
- else
+ if (ret < 0) {
+ ctlr->num_chipselect = 1;
+ } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+ dev_err(&pdev->dev, "only 2 chip selects are available\n");
+ goto remove_master;
+ } else {
ctlr->num_chipselect = num_cs;
+ }
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
@@ -692,6 +704,10 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
+
+ /* QSPI controller initializations */
+ zynq_qspi_init_hw(xqspi, ctlr->num_chipselect);
+
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f9502dbbb5c1..5e4c4532f7f3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -92,7 +92,7 @@ static ssize_t driver_override_store(struct device *dev,
if (len) {
spi->driver_override = driver_override;
} else {
- /* Emptry string, disable driver override */
+ /* Empty string, disable driver override */
spi->driver_override = NULL;
kfree(driver_override);
}
@@ -469,7 +469,7 @@ static LIST_HEAD(board_list);
static LIST_HEAD(spi_controller_list);
/*
- * Used to protect add/del opertion for board_info list and
+ * Used to protect add/del operation for board_info list and
* spi_controller list, and their matching process
* also used to protect object of type struct idr
*/
@@ -775,6 +775,15 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
static void spi_set_cs(struct spi_device *spi, bool enable)
{
+ bool enable1 = enable;
+
+ if (!spi->controller->set_cs_timing) {
+ if (enable1)
+ spi_delay_exec(&spi->controller->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->controller->cs_hold, NULL);
+ }
+
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
@@ -800,6 +809,11 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
+
+ if (!spi->controller->set_cs_timing) {
+ if (!enable1)
+ spi_delay_exec(&spi->controller->cs_inactive, NULL);
+ }
}
#ifdef CONFIG_HAS_DMA
@@ -1106,42 +1120,79 @@ static void _spi_transfer_delay_ns(u32 ns)
}
}
-static void _spi_transfer_cs_change_delay(struct spi_message *msg,
- struct spi_transfer *xfer)
+int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
{
- u32 delay = xfer->cs_change_delay;
- u32 unit = xfer->cs_change_delay_unit;
+ u32 delay = _delay->value;
+ u32 unit = _delay->unit;
u32 hz;
- /* return early on "fast" mode - for everything but USECS */
- if (!delay && unit != SPI_DELAY_UNIT_USECS)
- return;
+ if (!delay)
+ return 0;
switch (unit) {
case SPI_DELAY_UNIT_USECS:
- /* for compatibility use default of 10us */
- if (!delay)
- delay = 10000;
- else
- delay *= 1000;
+ delay *= 1000;
break;
case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
+ /* clock cycles need to be obtained from spi_transfer */
+ if (!xfer)
+ return -EINVAL;
/* if there is no effective speed know, then approximate
* by underestimating with half the requested hz
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ if (!hz)
+ return -EINVAL;
delay *= DIV_ROUND_UP(1000000000, hz);
break;
default:
+ return -EINVAL;
+ }
+
+ return delay;
+}
+EXPORT_SYMBOL_GPL(spi_delay_to_ns);
+
+int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ int delay;
+
+ if (!_delay)
+ return -EINVAL;
+
+ delay = spi_delay_to_ns(_delay, xfer);
+ if (delay < 0)
+ return delay;
+
+ _spi_transfer_delay_ns(delay);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_delay_exec);
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 delay = xfer->cs_change_delay.value;
+ u32 unit = xfer->cs_change_delay.unit;
+ int ret;
+
+ /* return early on "fast" mode - for everything but USECS */
+ if (!delay) {
+ if (unit == SPI_DELAY_UNIT_USECS)
+ _spi_transfer_delay_ns(10000);
+ return;
+ }
+
+ ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
+ if (ret) {
dev_err_once(&msg->spi->dev,
"Use of unsupported delay unit %i, using default of 10us\n",
- xfer->cs_change_delay_unit);
- delay = 10000;
+ unit);
+ _spi_transfer_delay_ns(10000);
}
- /* now sleep for the requested amount of time */
- _spi_transfer_delay_ns(delay);
}
/*
@@ -1171,6 +1222,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
+ if (!ctlr->ptp_sts_supported) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&ctlr->xfer_completion);
@@ -1197,13 +1253,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
xfer->len);
}
+ if (!ctlr->ptp_sts_supported) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs)
- _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
+ spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
@@ -1265,6 +1325,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
+ struct spi_transfer *xfer;
struct spi_message *msg;
bool was_busy = false;
unsigned long flags;
@@ -1391,6 +1452,13 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
goto out;
}
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
@@ -1419,6 +1487,99 @@ static void spi_pump_messages(struct kthread_work *work)
}
/**
+ * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. The frequency with which this function
+ * must be called (once per word, once for the whole
+ * transfer, once per batch of words etc) is arbitrary
+ * as long as the @tx buffer offset is greater than or
+ * equal to the requested byte at the time of the
+ * call. The timestamp is only taken once, at the
+ * first such call. It is assumed that the driver
+ * advances its @tx buffer pointer monotonically.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
+ * preparing to transmit right now.
+ * @irqs_off: If true, will disable IRQs and preemption for the duration of the
+ * transfer, for less jitter in time measurement. Only compatible
+ * with PIO drivers. If true, must follow up with
+ * spi_take_timestamp_post or otherwise system will crash.
+ * WARNING: for fully predictable results, the CPU frequency must
+ * also be under control (governor).
+ */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off)
+{
+ u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
+
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped_pre)
+ return;
+
+ if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+ return;
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+
+ xfer->timestamped_pre = true;
+
+ if (irqs_off) {
+ local_irq_save(ctlr->irq_flags);
+ preempt_disable();
+ }
+
+ ptp_read_system_prets(xfer->ptp_sts);
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
+
+/**
+ * spi_take_timestamp_post - helper for drivers to collect the end of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds
+ * or is equal to the requested word will be
+ * timestamped.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
+ * just transmitted.
+ * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ */
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off)
+{
+ u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
+
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped_post)
+ return;
+
+ if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+ return;
+
+ ptp_read_system_postts(xfer->ptp_sts);
+
+ if (irqs_off) {
+ local_irq_restore(ctlr->irq_flags);
+ preempt_enable();
+ }
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+
+ xfer->timestamped_post = true;
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
+
+/**
* spi_set_thread_rt - set the controller to pump at realtime priority
* @ctlr: controller to boost priority of
*
@@ -1503,6 +1664,7 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
*/
void spi_finalize_current_message(struct spi_controller *ctlr)
{
+ struct spi_transfer *xfer;
struct spi_message *mesg;
unsigned long flags;
int ret;
@@ -1511,6 +1673,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
mesg = ctlr->cur_msg;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+ }
+
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -1711,15 +1880,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
-
- /*
- * For descriptors associated with the device, polarity inversion is
- * handled in the gpiolib, so all chip selects are "active high" in
- * the logical sense, the gpiolib will invert the line if need be.
- */
- if (ctlr->use_gpio_descriptors)
- spi->mode |= SPI_CS_HIGH;
- else if (of_property_read_bool(nc, "spi-cs-high"))
+ if (of_property_read_bool(nc, "spi-cs-high"))
spi->mode |= SPI_CS_HIGH;
/* Device DUAL/QUAD mode */
@@ -1783,6 +1944,15 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
spi->chip_select = value;
+ /*
+ * For descriptors associated with the device, polarity inversion is
+ * handled in the gpiolib, so all gpio chip selects are "active high"
+ * in the logical sense, the gpiolib will invert the line if need be.
+ */
+ if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ spi->mode |= SPI_CS_HIGH;
+
/* Device speed */
rc = of_property_read_u32(nc, "spi-max-frequency", &value);
if (rc) {
@@ -2871,10 +3041,11 @@ struct spi_replaced_transfers *spi_replace_transfers(
/* add to list */
list_add(&xfer->transfer_list, rxfer->replaced_after);
- /* clear cs_change and delay_usecs for all but the last */
+ /* clear cs_change and delay for all but the last */
if (i) {
xfer->cs_change = false;
xfer->delay_usecs = 0;
+ xfer->delay.value = 0;
}
}
@@ -3091,7 +3262,29 @@ int spi_setup(struct spi_device *spi)
if (spi->controller->setup)
status = spi->controller->setup(spi);
- spi_set_cs(spi, false);
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+ status = pm_runtime_get_sync(spi->controller->dev.parent);
+ if (status < 0) {
+ pm_runtime_put_noidle(spi->controller->dev.parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ /*
+ * We do not want to return positive value from pm_runtime_get,
+ * there are many instances of devices calling spi_setup() and
+ * checking for a non-zero return value instead of a negative
+ * return value.
+ */
+ status = 0;
+
+ spi_set_cs(spi, false);
+ pm_runtime_mark_last_busy(spi->controller->dev.parent);
+ pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ } else {
+ spi_set_cs(spi, false);
+ }
if (spi->rt && !spi->controller->rt) {
spi->controller->rt = true;
@@ -3114,18 +3307,71 @@ EXPORT_SYMBOL_GPL(spi_setup);
/**
* spi_set_cs_timing - configure CS setup, hold, and inactive delays
* @spi: the device that requires specific CS timing configuration
- * @setup: CS setup time in terms of clock count
- * @hold: CS hold time in terms of clock count
- * @inactive_dly: CS inactive delay between transfers in terms of clock count
+ * @setup: CS setup time specified via @spi_delay
+ * @hold: CS hold time specified via @spi_delay
+ * @inactive: CS inactive delay between transfers specified via @spi_delay
+ *
+ * Return: zero on success, else a negative error code.
*/
-void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
- u8 inactive_dly)
+int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
+ struct spi_delay *hold, struct spi_delay *inactive)
{
+ size_t len;
+
if (spi->controller->set_cs_timing)
- spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
+ return spi->controller->set_cs_timing(spi, setup, hold,
+ inactive);
+
+ if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Clock-cycle delays for CS not supported in SW mode\n");
+ return -ENOTSUPP;
+ }
+
+ len = sizeof(struct spi_delay);
+
+ /* copy delays to controller */
+ if (setup)
+ memcpy(&spi->controller->cs_setup, setup, len);
+ else
+ memset(&spi->controller->cs_setup, 0, len);
+
+ if (hold)
+ memcpy(&spi->controller->cs_hold, hold, len);
+ else
+ memset(&spi->controller->cs_hold, 0, len);
+
+ if (inactive)
+ memcpy(&spi->controller->cs_inactive, inactive, len);
+ else
+ memset(&spi->controller->cs_inactive, 0, len);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(spi_set_cs_timing);
+static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
+ struct spi_device *spi)
+{
+ int delay1, delay2;
+
+ delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
+ if (delay1 < 0)
+ return delay1;
+
+ delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
+ if (delay2 < 0)
+ return delay2;
+
+ if (delay1 < delay2)
+ memcpy(&xfer->word_delay, &spi->word_delay,
+ sizeof(xfer->word_delay));
+
+ return 0;
+}
+
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -3261,8 +3507,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
}
- if (xfer->word_delay_usecs < spi->word_delay_usecs)
- xfer->word_delay_usecs = spi->word_delay_usecs;
+ if (_spi_xfer_word_delay_update(xfer, spi))
+ return -EINVAL;
}
message->status = -EINPROGRESS;
@@ -3273,6 +3519,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
/*
* Some controllers do not support doing regular SPI transfers. Return
@@ -3288,6 +3535,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
trace_spi_message_submit(message);
+ if (!ctlr->ptp_sts_supported) {
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
return ctlr->transfer(spi, message);
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 255786f2e844..1e217e3e9486 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -265,9 +265,11 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->tx_nbits = u_tmp->tx_nbits;
k_tmp->rx_nbits = u_tmp->rx_nbits;
k_tmp->bits_per_word = u_tmp->bits_per_word;
- k_tmp->delay_usecs = u_tmp->delay_usecs;
+ k_tmp->delay.value = u_tmp->delay_usecs;
+ k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
k_tmp->speed_hz = u_tmp->speed_hz;
- k_tmp->word_delay_usecs = u_tmp->word_delay_usecs;
+ k_tmp->word_delay.value = u_tmp->word_delay_usecs;
+ k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
if (!k_tmp->speed_hz)
k_tmp->speed_hz = spidev->speed_hz;
#ifdef VERBOSE
@@ -627,6 +629,9 @@ static int spidev_release(struct inode *inode, struct file *filp)
if (dofree)
kfree(spidev);
}
+#ifdef CONFIG_SPI_SLAVE
+ spi_slave_abort(spidev->spi);
+#endif
mutex_unlock(&device_list_lock);
return 0;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 927d29eb92c6..eaf753b70ec5 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -125,6 +125,8 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
-source "drivers/staging/vboxsf/Kconfig"
+source "drivers/staging/hp/Kconfig"
+
+source "drivers/staging/wfx/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index f01f04199073..0a4396c9067b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -53,4 +53,5 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
-obj-$(CONFIG_VBOXSF_FS) += vboxsf/
+obj-$(CONFIG_NET_VENDOR_HP) += hp/
+obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 805437fa249a..39e6c59df1e9 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -125,7 +125,6 @@ MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out;
struct axis_fifo {
int irq; /* interrupt */
- struct resource *mem; /* physical memory */
void __iomem *base_addr; /* kernel space memory */
unsigned int rx_fifo_depth; /* max words in the receive fifo */
@@ -701,6 +700,68 @@ static int get_dts_property(struct axis_fifo *fifo,
return 0;
}
+static int axis_fifo_parse_dt(struct axis_fifo *fifo)
+{
+ int ret;
+ unsigned int value;
+
+ ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,rx-fifo-depth",
+ &fifo->rx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,tx-fifo-depth",
+ &fifo->tx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ /* IP sets TDFV to fifo depth - 4 so we will do the same */
+ fifo->tx_fifo_depth -= 4;
+
+ ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
static int axis_fifo_probe(struct platform_device *pdev)
{
struct resource *r_irq; /* interrupt resources */
@@ -712,34 +773,6 @@ static int axis_fifo_probe(struct platform_device *pdev)
int rc = 0; /* error return value */
- /* IP properties from device tree */
- unsigned int rxd_tdata_width;
- unsigned int txc_tdata_width;
- unsigned int txd_tdata_width;
- unsigned int tdest_width;
- unsigned int tid_width;
- unsigned int tuser_width;
- unsigned int data_interface_type;
- unsigned int has_tdest;
- unsigned int has_tid;
- unsigned int has_tkeep;
- unsigned int has_tstrb;
- unsigned int has_tuser;
- unsigned int rx_fifo_depth;
- unsigned int rx_programmable_empty_threshold;
- unsigned int rx_programmable_full_threshold;
- unsigned int axi_id_width;
- unsigned int axi4_data_width;
- unsigned int select_xpm;
- unsigned int tx_fifo_depth;
- unsigned int tx_programmable_empty_threshold;
- unsigned int tx_programmable_full_threshold;
- unsigned int use_rx_cut_through;
- unsigned int use_rx_data;
- unsigned int use_tx_control;
- unsigned int use_tx_cut_through;
- unsigned int use_tx_data;
-
/* ----------------------------
* init wrapper device
* ----------------------------
@@ -772,32 +805,19 @@ static int axis_fifo_probe(struct platform_device *pdev)
goto err_initial;
}
- fifo->mem = r_mem;
-
/* request physical memory */
- if (!request_mem_region(fifo->mem->start, resource_size(fifo->mem),
- DRIVER_NAME)) {
- dev_err(fifo->dt_device,
- "couldn't lock memory region at 0x%pa\n",
- &fifo->mem->start);
- rc = -EBUSY;
+ fifo->base_addr = devm_ioremap_resource(fifo->dt_device, r_mem);
+ if (IS_ERR(fifo->base_addr)) {
+ rc = PTR_ERR(fifo->base_addr);
+ dev_err(fifo->dt_device, "can't remap IO resource (%d)\n", rc);
goto err_initial;
}
- dev_dbg(fifo->dt_device, "got memory location [0x%pa - 0x%pa]\n",
- &fifo->mem->start, &fifo->mem->end);
-
- /* map physical memory to kernel virtual address space */
- fifo->base_addr = ioremap(fifo->mem->start, resource_size(fifo->mem));
- if (!fifo->base_addr) {
- dev_err(fifo->dt_device, "couldn't map physical memory\n");
- rc = -ENOMEM;
- goto err_mem;
- }
+
dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
/* create unique device name */
snprintf(device_name, sizeof(device_name), "%s_%pa",
- DRIVER_NAME, &fifo->mem->start);
+ DRIVER_NAME, &r_mem->start);
dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
@@ -806,164 +826,9 @@ static int axis_fifo_probe(struct platform_device *pdev)
* ----------------------------
*/
- /* retrieve device tree properties */
- rc = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width",
- &rxd_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axi-str-txc-tdata-width",
- &txc_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width",
- &txd_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tdest-width", &tdest_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tid-width", &tid_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tuser-width", &tuser_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,data-interface-type",
- &data_interface_type);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tdest", &has_tdest);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tid", &has_tid);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tkeep", &has_tkeep);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tstrb", &has_tstrb);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tuser", &has_tuser);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-depth", &rx_fifo_depth);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-pe-threshold",
- &rx_programmable_empty_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-pf-threshold",
- &rx_programmable_full_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,s-axi-id-width", &axi_id_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,s-axi4-data-width", &axi4_data_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,select-xpm", &select_xpm);
+ rc = axis_fifo_parse_dt(fifo);
if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-depth", &tx_fifo_depth);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-pe-threshold",
- &tx_programmable_empty_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-pf-threshold",
- &tx_programmable_full_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-rx-cut-through",
- &use_rx_cut_through);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-rx-data", &use_rx_data);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-ctrl", &use_tx_control);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-cut-through",
- &use_tx_cut_through);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-data", &use_tx_data);
- if (rc)
- goto err_unmap;
-
- /* check validity of device tree properties */
- if (rxd_tdata_width != 32) {
- dev_err(fifo->dt_device,
- "rxd_tdata_width width [%u] unsupported\n",
- rxd_tdata_width);
- rc = -EIO;
- goto err_unmap;
- }
- if (txd_tdata_width != 32) {
- dev_err(fifo->dt_device,
- "txd_tdata_width width [%u] unsupported\n",
- txd_tdata_width);
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tdest) {
- dev_err(fifo->dt_device, "tdest not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tid) {
- dev_err(fifo->dt_device, "tid not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tkeep) {
- dev_err(fifo->dt_device, "tkeep not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tstrb) {
- dev_err(fifo->dt_device, "tstrb not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tuser) {
- dev_err(fifo->dt_device, "tuser not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_rx_cut_through) {
- dev_err(fifo->dt_device, "rx cut-through not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_tx_cut_through) {
- dev_err(fifo->dt_device, "tx cut-through not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_tx_control) {
- dev_err(fifo->dt_device, "tx control not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
-
- /* TODO
- * these exist in the device tree but it's unclear what they do
- * - select-xpm
- * - data-interface-type
- */
-
- /* set device wrapper properties based on IP config */
- fifo->rx_fifo_depth = rx_fifo_depth;
- /* IP sets TDFV to fifo depth - 4 so we will do the same */
- fifo->tx_fifo_depth = tx_fifo_depth - 4;
- fifo->has_rx_fifo = use_rx_data;
- fifo->has_tx_fifo = use_tx_data;
+ goto err_initial;
reset_ip_core(fifo);
@@ -976,18 +841,19 @@ static int axis_fifo_probe(struct platform_device *pdev)
r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!r_irq) {
dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n",
- &fifo->mem->start);
+ &r_mem->start);
rc = -EIO;
- goto err_unmap;
+ goto err_initial;
}
/* request IRQ */
fifo->irq = r_irq->start;
- rc = request_irq(fifo->irq, &axis_fifo_irq, 0, DRIVER_NAME, fifo);
+ rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0,
+ DRIVER_NAME, fifo);
if (rc) {
dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
fifo->irq);
- goto err_unmap;
+ goto err_initial;
}
/* ----------------------------
@@ -998,7 +864,7 @@ static int axis_fifo_probe(struct platform_device *pdev)
/* allocate device number */
rc = alloc_chrdev_region(&fifo->devt, 0, 1, DRIVER_NAME);
if (rc < 0)
- goto err_irq;
+ goto err_initial;
dev_dbg(fifo->dt_device, "allocated device number major %i minor %i\n",
MAJOR(fifo->devt), MINOR(fifo->devt));
@@ -1022,14 +888,14 @@ static int axis_fifo_probe(struct platform_device *pdev)
}
/* create sysfs entries */
- rc = sysfs_create_group(&fifo->device->kobj, &axis_fifo_attrs_group);
+ rc = devm_device_add_group(fifo->device, &axis_fifo_attrs_group);
if (rc < 0) {
dev_err(fifo->dt_device, "couldn't register sysfs group\n");
goto err_cdev;
}
dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i, major=%i, minor=%i\n",
- &fifo->mem->start, &fifo->base_addr, fifo->irq,
+ &r_mem->start, &fifo->base_addr, fifo->irq,
MAJOR(fifo->devt), MINOR(fifo->devt));
return 0;
@@ -1040,12 +906,6 @@ err_dev:
device_destroy(axis_fifo_driver_class, fifo->devt);
err_chrdev_region:
unregister_chrdev_region(fifo->devt, 1);
-err_irq:
- free_irq(fifo->irq, fifo);
-err_unmap:
- iounmap(fifo->base_addr);
-err_mem:
- release_mem_region(fifo->mem->start, resource_size(fifo->mem));
err_initial:
dev_set_drvdata(dev, NULL);
return rc;
@@ -1056,15 +916,12 @@ static int axis_fifo_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct axis_fifo *fifo = dev_get_drvdata(dev);
- sysfs_remove_group(&fifo->device->kobj, &axis_fifo_attrs_group);
cdev_del(&fifo->char_device);
dev_set_drvdata(fifo->device, NULL);
device_destroy(axis_fifo_driver_class, fifo->devt);
unregister_chrdev_region(fifo->devt, 1);
- free_irq(fifo->irq, fifo);
- iounmap(fifo->base_addr);
- release_mem_region(fifo->mem->start, resource_size(fifo->mem));
dev_set_drvdata(dev, NULL);
+
return 0;
}
diff --git a/drivers/staging/axis-fifo/axis-fifo.txt b/drivers/staging/axis-fifo/axis-fifo.txt
index 85d88c010e72..5828e1b8e822 100644
--- a/drivers/staging/axis-fifo/axis-fifo.txt
+++ b/drivers/staging/axis-fifo/axis-fifo.txt
@@ -25,10 +25,10 @@ Required properties:
- xlnx,axi-str-txc-tdata-width: Should be <0x20>
- xlnx,axi-str-txd-protocol: Should be "XIL_AXI_STREAM_ETH_DATA"
- xlnx,axi-str-txd-tdata-width: Should be <0x20>
-- xlnx,axis-tdest-width: AXI-Stream TDEST width
-- xlnx,axis-tid-width: AXI-Stream TID width
-- xlnx,axis-tuser-width: AXI-Stream TUSER width
-- xlnx,data-interface-type: Should be <0x0>
+- xlnx,axis-tdest-width: AXI-Stream TDEST width (ignored by the driver)
+- xlnx,axis-tid-width: AXI-Stream TID width (ignored by the driver)
+- xlnx,axis-tuser-width: AXI-Stream TUSER width (ignored by the driver)
+- xlnx,data-interface-type: Should be <0x0> (ignored by the driver)
- xlnx,has-axis-tdest: Should be <0x0> (this feature isn't supported)
- xlnx,has-axis-tid: Should be <0x0> (this feature isn't supported)
- xlnx,has-axis-tkeep: Should be <0x0> (this feature isn't supported)
@@ -36,13 +36,17 @@ Required properties:
- xlnx,has-axis-tuser: Should be <0x0> (this feature isn't supported)
- xlnx,rx-fifo-depth: Depth of RX FIFO in words
- xlnx,rx-fifo-pe-threshold: RX programmable empty interrupt threshold
+ (ignored by the driver)
- xlnx,rx-fifo-pf-threshold: RX programmable full interrupt threshold
-- xlnx,s-axi-id-width: Should be <0x4>
-- xlnx,s-axi4-data-width: Should be <0x20>
-- xlnx,select-xpm: Should be <0x0>
+ (ignored by the driver)
+- xlnx,s-axi-id-width: Should be <0x4> (ignored by the driver)
+- xlnx,s-axi4-data-width: Should be <0x20> (ignored by the driver)
+- xlnx,select-xpm: Should be <0x0> (ignored by the driver)
- xlnx,tx-fifo-depth: Depth of TX FIFO in words
- xlnx,tx-fifo-pe-threshold: TX programmable empty interrupt threshold
+ (ignored by the driver)
- xlnx,tx-fifo-pf-threshold: TX programmable full interrupt threshold
+ (ignored by the driver)
- xlnx,use-rx-cut-through: Should be <0x0> (this feature isn't supported)
- xlnx,use-rx-data: <0x1> if RX FIFO is enabled, <0x0> otherwise
- xlnx,use-tx-ctrl: Should be <0x0> (this feature isn't supported)
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 962cc0c79988..0225234dd7aa 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -50,16 +50,8 @@ static struct sh_mobile_lcdc_info lcdc0_info = {
};
static struct resource lcdc0_resources[] = {
- [0] = {
- .name = "LCD0",
- .start = 0xfe940000,
- .end = 0xfe943fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 177 + 32,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_MEM_NAMED(0xfe940000, 0x4000, "LCD0"),
+ DEFINE_RES_IRQ(177 + 32),
};
static struct platform_device lcdc0_device = {
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 15b7a82f4b1e..e52a64be93f3 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -135,7 +135,6 @@ static int clk_wzrd_probe(struct platform_device *pdev)
unsigned long rate;
const char *clk_name;
struct clk_wzrd *clk_wzrd;
- struct resource *mem;
struct device_node *np = pdev->dev.of_node;
clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
@@ -143,8 +142,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, clk_wzrd);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
+ clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(clk_wzrd->base))
return PTR_ERR(clk_wzrd->base);
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index caf4d4df4bd3..f7c365b70106 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -508,12 +508,11 @@ static int dt3k_ai_insn_read(struct comedi_device *dev,
unsigned int *data)
{
int i;
- unsigned int chan, gain, aref;
+ unsigned int chan, gain;
chan = CR_CHAN(insn->chanspec);
gain = CR_RANGE(insn->chanspec);
/* XXX docs don't explain how to select aref */
- aref = CR_AREF(insn->chanspec);
for (i = 0; i < insn->n; i++)
data[i] = dt3k_readsingle(dev, DPR_SUBSYS_AI, chan, gain);
diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c
index eb61494dc2bd..673d732dcb8f 100644
--- a/drivers/staging/comedi/drivers/ni_routes.c
+++ b/drivers/staging/comedi/drivers/ni_routes.c
@@ -49,8 +49,6 @@
/* Helper for accessing data. */
#define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)])
-static const size_t route_table_size = NI_NUM_NAMES * NI_NUM_NAMES;
-
/*
* Find the proper route_values and ni_device_routes tables for this particular
* device.
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 04bc488385e6..4af012968cb6 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
+ * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk
*/
/*
@@ -8,7 +8,7 @@
* Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: [ITL] USB-DUX-FAST (usbduxfast)
* Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
+ * Updated: 16 Nov 2019
* Status: stable
*/
@@ -22,6 +22,7 @@
*
*
* Revision history:
+ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
* 0.9: Dropping the first data packet which seems to be from the last transfer.
* Buffer overflows in the FX2 are handed over to comedi.
* 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
@@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd)
{
int err = 0;
+ int err2 = 0;
unsigned int steps;
unsigned int arg;
@@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
*/
steps = (cmd->convert_arg * 30) / 1000;
if (cmd->chanlist_len != 1)
- err |= comedi_check_trigger_arg_min(&steps,
- MIN_SAMPLING_PERIOD);
- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
- arg = (steps * 1000) / 30;
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ err2 |= comedi_check_trigger_arg_min(&steps,
+ MIN_SAMPLING_PERIOD);
+ else
+ err2 |= comedi_check_trigger_arg_min(&steps, 1);
+ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
+ if (err2) {
+ err |= err2;
+ arg = (steps * 1000) / 30;
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ }
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 147481bf680c..03929b9d3a8b 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -1072,9 +1072,8 @@ static int _nbu2ss_epn_in_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
if (i_word_length > 0) {
for (i = 0; i < i_word_length; i++) {
_nbu2ss_writel(
- &preg->EP_REGS[ep->epnum - 1].EP_WRITE
- , p_buf_32->dw
- );
+ &preg->EP_REGS[ep->epnum - 1].EP_WRITE,
+ p_buf_32->dw);
p_buf_32++;
}
@@ -2661,20 +2660,18 @@ static int nbu2ss_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
- if (&req->req == _req)
- break;
- }
- if (&req->req != _req) {
- spin_unlock_irqrestore(&udc->lock, flags);
- pr_debug("%s no queue(EINVAL)\n", __func__);
- return -EINVAL;
+ if (&req->req == _req) {
+ _nbu2ss_ep_done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+ }
}
- _nbu2ss_ep_done(ep, req, -ECONNRESET);
-
spin_unlock_irqrestore(&udc->lock, flags);
- return 0;
+ pr_debug("%s no queue(EINVAL)\n", __func__);
+
+ return -EINVAL;
}
/*-------------------------------------------------------------------------*/
@@ -3078,7 +3075,6 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
{
int status = -ENODEV;
struct nbu2ss_udc *udc;
- struct resource *r;
int irq;
void __iomem *mmio_base;
@@ -3088,8 +3084,7 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, udc);
/* require I/O memory and IRQ to be provided as resources */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base))
return PTR_ERR(mmio_base);
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index ce32dfe33bec..0130019cbec2 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -6,15 +6,6 @@ config EXFAT_FS
help
This adds support for the exFAT file system.
-config EXFAT_DONT_MOUNT_VFAT
- bool "Prohibit mounting of fat/vfat filesystems by exFAT"
- depends on EXFAT_FS
- default y
- help
- By default, the exFAT driver will only mount exFAT filesystems, and refuse
- to mount fat/vfat filesystems. Set this to 'n' to allow the exFAT driver
- to mount these filesystems.
-
config EXFAT_DISCARD
bool "enable discard support"
depends on EXFAT_FS
diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO
index a3eb282f9efc..a283ce534cf4 100644
--- a/drivers/staging/exfat/TODO
+++ b/drivers/staging/exfat/TODO
@@ -1,8 +1,22 @@
+A laundry list of things that need looking at, most of which will
+require more work than the average checkpatch cleanup...
+
+Note that some of these entries may not be bugs - they're things
+that need to be looked at, and *possibly* fixed.
+
+Clean up the ffsCamelCase function names.
+
+Fix (thing)->flags to not use magic numbers - multiple offenders
+
+Sort out all the s32/u32/u8 nonsense - most of these should be plain int.
+
exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse().
same for ffsWriteFile.
-exfat_core.c - fs_sync(sb,0) all over the place looks fishy as hell.
-There's only one place that calls it with a non-zero argument.
+All the calls to fs_sync() need to be looked at, particularly in the
+context of EXFAT_DELAYED_SYNC. Currently, if that's defined, we only
+flush to disk when sync() gets called. We should be doing at least
+metadata flushes at appropriate times.
ffsTruncateFile - if (old_size <= new_size) {
That doesn't look right. How did it ever work? Are they relying on lazy
@@ -10,3 +24,46 @@ block allocation when actual writes happen? If nothing else, it never
does the 'fid->size = new_size' and do the inode update....
ffsSetAttr() is just dangling in the breeze, not wired up at all...
+
+Convert global mutexes to a per-superblock mutex.
+
+Right now, we load exactly one UTF-8 table. Check to see
+if that plays nice with different codepage and iocharset values
+for simultanous mounts of different devices
+
+exfat_rmdir() checks for -EBUSY but ffsRemoveDir() doesn't return it.
+In fact, there's a complete lack of -EBUSY testing anywhere.
+
+There's probably a few missing checks for -EEXIST
+
+check return codes of sync_dirty_buffer()
+
+Why is remove_file doing a num_entries++??
+
+Double check a lot of can't-happen parameter checks (for null pointers for
+things that have only one call site and can't pass a null, etc).
+
+All the DEBUG stuff can probably be tossed, including the ioctl(). Either
+that, or convert to a proper fault-injection system.
+
+exfat_remount does exactly one thing. Fix to actually deal with remount
+options, particularly handling R/O correctly. For that matter, allow
+R/O mounts in the first place.
+
+Figure out why the VFAT code used multi_sector_(read|write) but the
+exfat code doesn't use it. The difference matters on SSDs with wear leveling.
+
+exfat_fat_sync(), exfat_buf_sync(), and sync_alloc_bitmap()
+aren't called anyplace....
+
+Create helper function for exfat_set_entry_time() and exfat_set_entry_type()
+because it's sort of ugly to be calling the same functionn directly and
+other code calling through the fs_func struc ponters...
+
+clean up the remaining vol_type checks, which are of two types:
+some are ?: operators with magic numbers, and the rest are places
+where we're doing stuff with '.' and '..'.
+
+Patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ Valdis Kletnieks <valdis.kletnieks@vt.edu>
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 3abab33e932c..2aac1e000977 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -30,6 +30,8 @@
#undef DEBUG
#endif
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
#define DENTRY_SIZE 32 /* dir entry size */
#define DENTRY_SIZE_BITS 5
@@ -206,28 +208,6 @@ static inline u16 get_row_index(u16 i)
#define FM_REGULAR 0x00
#define FM_SYMLINK 0x40
-/* return values */
-#define FFS_SUCCESS 0
-#define FFS_MEDIAERR 1
-#define FFS_FORMATERR 2
-#define FFS_MOUNTED 3
-#define FFS_NOTMOUNTED 4
-#define FFS_ALIGNMENTERR 5
-#define FFS_SEMAPHOREERR 6
-#define FFS_INVALIDPATH 7
-#define FFS_INVALIDFID 8
-#define FFS_NOTFOUND 9
-#define FFS_FILEEXIST 10
-#define FFS_PERMISSIONERR 11
-#define FFS_NOTOPENED 12
-#define FFS_MAXOPENED 13
-#define FFS_FULL 14
-#define FFS_EOF 15
-#define FFS_DIRBUSY 16
-#define FFS_MEMORYERR 17
-#define FFS_NAMETOOLONG 18
-#define FFS_ERROR 19
-
#define NUM_UPCASE 2918
#define DOS_CUR_DIR_NAME ". "
@@ -618,7 +598,7 @@ struct fs_info_t {
u32 dev_ejected; /* block device operation error flag */
struct fs_func *fs_func;
- struct semaphore v_sem;
+ struct mutex v_mutex;
/* FAT cache */
struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE];
@@ -749,14 +729,7 @@ static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
/* NLS management function */
u16 nls_upper(struct super_block *sb, u16 a);
-int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b);
int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b);
-void nls_uniname_to_dosname(struct super_block *sb,
- struct dos_name_t *p_dosname,
- struct uni_name_t *p_uniname, bool *p_lossy);
-void nls_dosname_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
struct uni_name_t *p_uniname);
void nls_cstring_to_uniname(struct super_block *sb,
@@ -764,48 +737,33 @@ void nls_cstring_to_uniname(struct super_block *sb,
bool *p_lossy);
/* buffer cache management */
-void buf_init(struct super_block *sb);
-void buf_shutdown(struct super_block *sb);
-int FAT_read(struct super_block *sb, u32 loc, u32 *content);
-s32 FAT_write(struct super_block *sb, u32 loc, u32 content);
-u8 *FAT_getblk(struct super_block *sb, sector_t sec);
-void FAT_modify(struct super_block *sb, sector_t sec);
-void FAT_release_all(struct super_block *sb);
-void FAT_sync(struct super_block *sb);
-u8 *buf_getblk(struct super_block *sb, sector_t sec);
-void buf_modify(struct super_block *sb, sector_t sec);
-void buf_lock(struct super_block *sb, sector_t sec);
-void buf_unlock(struct super_block *sb, sector_t sec);
-void buf_release(struct super_block *sb, sector_t sec);
-void buf_release_all(struct super_block *sb);
-void buf_sync(struct super_block *sb);
+void exfat_buf_init(struct super_block *sb);
+void exfat_buf_shutdown(struct super_block *sb);
+int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content);
+s32 exfat_fat_write(struct super_block *sb, u32 loc, u32 content);
+u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec);
+void exfat_fat_modify(struct super_block *sb, sector_t sec);
+void exfat_fat_release_all(struct super_block *sb);
+void exfat_fat_sync(struct super_block *sb);
+u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec);
+void exfat_buf_modify(struct super_block *sb, sector_t sec);
+void exfat_buf_lock(struct super_block *sb, sector_t sec);
+void exfat_buf_unlock(struct super_block *sb, sector_t sec);
+void exfat_buf_release(struct super_block *sb, sector_t sec);
+void exfat_buf_release_all(struct super_block *sb);
+void exfat_buf_sync(struct super_block *sb);
/* fs management functions */
void fs_set_vol_flags(struct super_block *sb, u32 new_flag);
void fs_error(struct super_block *sb);
/* cluster management functions */
-s32 clear_cluster(struct super_block *sb, u32 clu);
-s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
-void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
-u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain);
s32 count_num_clusters(struct super_block *sb, struct chain_t *dir);
-s32 fat_count_used_clusters(struct super_block *sb);
-s32 exfat_count_used_clusters(struct super_block *sb);
void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len);
/* allocation bitmap management functions */
s32 load_alloc_bitmap(struct super_block *sb);
void free_alloc_bitmap(struct super_block *sb);
-s32 set_alloc_bitmap(struct super_block *sb, u32 clu);
-s32 clr_alloc_bitmap(struct super_block *sb, u32 clu);
-u32 test_alloc_bitmap(struct super_block *sb, u32 clu);
void sync_alloc_bitmap(struct super_block *sb);
/* upcase table management functions */
@@ -813,63 +771,8 @@ s32 load_upcase_table(struct super_block *sb);
void free_upcase_table(struct super_block *sb);
/* dir entry management functions */
-u32 fat_get_entry_type(struct dentry_t *p_entry);
-u32 exfat_get_entry_type(struct dentry_t *p_entry);
-void fat_set_entry_type(struct dentry_t *p_entry, u32 type);
-void exfat_set_entry_type(struct dentry_t *p_entry, u32 type);
-u32 fat_get_entry_attr(struct dentry_t *p_entry);
-u32 exfat_get_entry_attr(struct dentry_t *p_entry);
-void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
-u8 fat_get_entry_flag(struct dentry_t *p_entry);
-u8 exfat_get_entry_flag(struct dentry_t *p_entry);
-void fat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
-u32 fat_get_entry_clu0(struct dentry_t *p_entry);
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
-void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
-u64 fat_get_entry_size(struct dentry_t *p_entry);
-u64 exfat_get_entry_size(struct dentry_t *p_entry);
-void fat_set_entry_size(struct dentry_t *p_entry, u64 size);
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
struct timestamp_t *tm_current(struct timestamp_t *tm);
-void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- u32 type, u32 start_clu, u64 size);
-s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size);
-s32 fat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
-s32 exfat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
-void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu);
-void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum,
- u16 *uniname);
-void init_file_entry(struct file_dentry_t *ep, u32 type);
-void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu,
- u64 size);
-void init_name_entry(struct name_dentry_t *ep, u16 *uniname);
-void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries);
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries);
-
-s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- sector_t *sector, s32 *offset);
-struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
- s32 offset);
+
struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
s32 entry, sector_t *sector);
struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
@@ -877,24 +780,6 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
u32 type,
struct dentry_t **file_ep);
void release_entry_set(struct entry_set_cache_t *es);
-s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es);
-s32 write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es,
- struct dentry_t *ep, u32 count);
-s32 search_deleted_or_unused_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 num_entries);
-s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir,
- s32 num_entries);
-s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
-s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry);
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry);
s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
u32 type);
void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
@@ -907,36 +792,13 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir);
s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 *entries,
struct dos_name_t *p_dosname);
-void get_uni_name_from_dos_entry(struct super_block *sb,
- struct dos_dentry_t *ep,
- struct uni_name_t *p_uniname, u8 mode);
-void fat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
-s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep,
- u16 *uniname, s32 order);
-s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep,
- u16 *uniname, s32 order);
-s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct dos_name_t *p_dosname);
-void fat_attach_count_to_dos_name(u8 *dosname, s32 count);
-s32 fat_calc_num_entries(struct uni_name_t *p_uniname);
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
-u8 calc_checksum_1byte(void *data, s32 len, u8 chksum);
u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type);
-u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type);
/* name resolution functions */
s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
struct uni_name_t *p_uniname);
-s32 resolve_name(u8 *name, u8 **arg);
/* file operation functions */
-s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
-s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
s32 create_dir(struct inode *inode, struct chain_t *p_dir,
struct uni_name_t *p_uniname, struct file_id_t *fid);
@@ -959,13 +821,13 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
int multi_sector_write(struct super_block *sb, sector_t sec,
struct buffer_head *bh, s32 num_secs, bool sync);
-void bdev_open(struct super_block *sb);
-void bdev_close(struct super_block *sb);
-int bdev_read(struct super_block *sb, sector_t secno,
+void exfat_bdev_open(struct super_block *sb);
+void exfat_bdev_close(struct super_block *sb);
+int exfat_bdev_read(struct super_block *sb, sector_t secno,
struct buffer_head **bh, u32 num_secs, bool read);
-int bdev_write(struct super_block *sb, sector_t secno,
+int exfat_bdev_write(struct super_block *sb, sector_t secno,
struct buffer_head *bh, u32 num_secs, bool sync);
-int bdev_sync(struct super_block *sb);
+int exfat_bdev_sync(struct super_block *sb);
extern const u8 uni_upcase[];
#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index 81d20e6241c6..7bcd98b13109 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -8,7 +8,7 @@
#include <linux/fs.h>
#include "exfat.h"
-void bdev_open(struct super_block *sb)
+void exfat_bdev_open(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -23,14 +23,14 @@ void bdev_open(struct super_block *sb)
p_bd->opened = true;
}
-void bdev_close(struct super_block *sb)
+void exfat_bdev_close(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
p_bd->opened = false;
}
-int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
+int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
u32 num_secs, bool read)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -40,11 +40,11 @@ int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
if (*bh)
__brelse(*bh);
@@ -62,10 +62,10 @@ int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
WARN(!p_fs->dev_ejected,
"[EXFAT] No bh, device seems wrong or to be ejected.\n");
- return FFS_MEDIAERR;
+ return -EIO;
}
-int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
+int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
u32 num_secs, bool sync)
{
s32 count;
@@ -77,11 +77,11 @@ int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
if (secno == bh->b_blocknr) {
lock_buffer(bh);
@@ -89,7 +89,7 @@ int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
mark_buffer_dirty(bh);
unlock_buffer(bh);
if (sync && (sync_dirty_buffer(bh) != 0))
- return FFS_MEDIAERR;
+ return -EIO;
} else {
count = num_secs << p_bd->sector_size_bits;
@@ -115,10 +115,10 @@ no_bh:
WARN(!p_fs->dev_ejected,
"[EXFAT] No bh, device seems wrong or to be ejected.\n");
- return FFS_MEDIAERR;
+ return -EIO;
}
-int bdev_sync(struct super_block *sb)
+int exfat_bdev_sync(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
#ifdef CONFIG_EXFAT_KERNEL_DEBUG
@@ -126,11 +126,11 @@ int bdev_sync(struct super_block *sb)
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
return sync_blockdev(sb->s_bdev);
}
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
index e1b001718709..3fd5604058a9 100644
--- a/drivers/staging/exfat/exfat_cache.c
+++ b/drivers/staging/exfat/exfat_cache.c
@@ -12,8 +12,8 @@
#define DIRTYBIT 0x02
/* Local variables */
-static DEFINE_SEMAPHORE(f_sem);
-static DEFINE_SEMAPHORE(b_sem);
+static DEFINE_MUTEX(f_mutex);
+static DEFINE_MUTEX(b_mutex);
static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec)
{
@@ -128,7 +128,7 @@ static void buf_cache_remove_hash(struct buf_cache_t *bp)
(bp->hash_next)->hash_prev = bp->hash_prev;
}
-void buf_init(struct super_block *sb)
+void exfat_buf_init(struct super_block *sb)
{
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -189,11 +189,11 @@ void buf_init(struct super_block *sb)
buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]);
}
-void buf_shutdown(struct super_block *sb)
+void exfat_buf_shutdown(struct super_block *sb)
{
}
-static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
+static int __exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
{
s32 off;
u32 _content;
@@ -202,107 +202,22 @@ static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_fs->vol_type == FAT12) {
- sec = p_fs->FAT1_start_sector +
- ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
- off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
- if (off == (p_bd->sector_size - 1)) {
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
+ fat_sector = exfat_fat_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
- _content = (u32)fat_sector[off];
+ fat_entry = &fat_sector[off];
+ _content = GET32_A(fat_entry);
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
-
- _content |= (u32)fat_sector[0] << 8;
- } else {
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
- _content = GET16(fat_entry);
- }
-
- if (loc & 1)
- _content >>= 4;
-
- _content &= 0x00000FFF;
-
- if (_content >= CLUSTER_16(0x0FF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == FAT16) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 1));
- off = (loc << 1) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- _content = GET16_A(fat_entry);
-
- _content &= 0x0000FFFF;
-
- if (_content >= CLUSTER_16(0xFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == FAT32) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- _content = GET32_A(fat_entry);
-
- _content &= 0x0FFFFFFF;
-
- if (_content >= CLUSTER_32(0x0FFFFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == EXFAT) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
- _content = GET32_A(fat_entry);
-
- if (_content >= CLUSTER_32(0xFFFFFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
+ if (_content >= CLUSTER_32(0xFFFFFFF8)) {
+ *content = CLUSTER_32(~0);
return 0;
}
-
- /* Unknown volume type, throw in the towel and go home */
- *content = CLUSTER_32(~0);
+ *content = CLUSTER_32(_content);
return 0;
}
@@ -311,18 +226,18 @@ static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
* returns 0 on success
* -1 on error
*/
-int FAT_read(struct super_block *sb, u32 loc, u32 *content)
+int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
{
s32 ret;
- down(&f_sem);
- ret = __FAT_read(sb, loc, content);
- up(&f_sem);
+ mutex_lock(&f_mutex);
+ ret = __exfat_fat_read(sb, loc, content);
+ mutex_unlock(&f_mutex);
return ret;
}
-static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
+static s32 __exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
{
s32 off;
sector_t sec;
@@ -330,118 +245,34 @@ static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_fs->vol_type == FAT12) {
- content &= 0x00000FFF;
-
- sec = p_fs->FAT1_start_sector +
- ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
- off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- if (loc & 1) { /* odd */
- content <<= 4;
-
- if (off == (p_bd->sector_size - 1)) {
- fat_sector[off] = (u8)(content |
- (fat_sector[off] &
- 0x0F));
- FAT_modify(sb, sec);
-
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
-
- fat_sector[0] = (u8)(content >> 8);
- } else {
- fat_entry = &fat_sector[off];
- content |= GET16(fat_entry) & 0x000F;
-
- SET16(fat_entry, content);
- }
- } else { /* even */
- fat_sector[off] = (u8)(content);
-
- if (off == (p_bd->sector_size - 1)) {
- fat_sector[off] = (u8)(content);
- FAT_modify(sb, sec);
-
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
- fat_sector[0] = (u8)((fat_sector[0] & 0xF0) |
- (content >> 8));
- } else {
- fat_entry = &fat_sector[off];
- content |= GET16(fat_entry) & 0xF000;
-
- SET16(fat_entry, content);
- }
- }
- }
-
- else if (p_fs->vol_type == FAT16) {
- content &= 0x0000FFFF;
-
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 1));
- off = (loc << 1) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- SET16_A(fat_entry, content);
- } else if (p_fs->vol_type == FAT32) {
- content &= 0x0FFFFFFF;
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
+ fat_sector = exfat_fat_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
+ fat_entry = &fat_sector[off];
- fat_entry = &fat_sector[off];
-
- content |= GET32_A(fat_entry) & 0xF0000000;
-
- SET32_A(fat_entry, content);
- } else { /* p_fs->vol_type == EXFAT */
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- SET32_A(fat_entry, content);
- }
+ SET32_A(fat_entry, content);
- FAT_modify(sb, sec);
+ exfat_fat_modify(sb, sec);
return 0;
}
-int FAT_write(struct super_block *sb, u32 loc, u32 content)
+int exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
{
s32 ret;
- down(&f_sem);
- ret = __FAT_write(sb, loc, content);
- up(&f_sem);
+ mutex_lock(&f_mutex);
+ ret = __exfat_fat_write(sb, loc, content);
+ mutex_unlock(&f_mutex);
return ret;
}
-u8 *FAT_getblk(struct super_block *sb, sector_t sec)
+u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -462,7 +293,7 @@ u8 *FAT_getblk(struct super_block *sb, sector_t sec)
FAT_cache_insert_hash(sb, bp);
- if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
FAT_cache_remove_hash(bp);
bp->drv = -1;
bp->sec = ~0;
@@ -476,7 +307,7 @@ u8 *FAT_getblk(struct super_block *sb, sector_t sec)
return bp->buf_bh->b_data;
}
-void FAT_modify(struct super_block *sb, sector_t sec)
+void exfat_fat_modify(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
@@ -485,12 +316,12 @@ void FAT_modify(struct super_block *sb, sector_t sec)
sector_write(sb, sec, bp->buf_bh, 0);
}
-void FAT_release_all(struct super_block *sb)
+void exfat_fat_release_all(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&f_sem);
+ mutex_lock(&f_mutex);
bp = p_fs->FAT_cache_lru_list.next;
while (bp != &p_fs->FAT_cache_lru_list) {
@@ -507,15 +338,15 @@ void FAT_release_all(struct super_block *sb)
bp = bp->next;
}
- up(&f_sem);
+ mutex_unlock(&f_mutex);
}
-void FAT_sync(struct super_block *sb)
+void exfat_fat_sync(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&f_sem);
+ mutex_lock(&f_mutex);
bp = p_fs->FAT_cache_lru_list.next;
while (bp != &p_fs->FAT_cache_lru_list) {
@@ -526,7 +357,7 @@ void FAT_sync(struct super_block *sb)
bp = bp->next;
}
- up(&f_sem);
+ mutex_unlock(&f_mutex);
}
static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec)
@@ -561,7 +392,7 @@ static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec)
return bp;
}
-static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
+static u8 *__exfat_buf_getblk(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -582,7 +413,7 @@ static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
buf_cache_insert_hash(sb, bp);
- if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
buf_cache_remove_hash(bp);
bp->drv = -1;
bp->sec = ~0;
@@ -596,22 +427,22 @@ static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
return bp->buf_bh->b_data;
}
-u8 *buf_getblk(struct super_block *sb, sector_t sec)
+u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec)
{
u8 *buf;
- down(&b_sem);
- buf = __buf_getblk(sb, sec);
- up(&b_sem);
+ mutex_lock(&b_mutex);
+ buf = __exfat_buf_getblk(sb, sec);
+ mutex_unlock(&b_mutex);
return buf;
}
-void buf_modify(struct super_block *sb, sector_t sec)
+void exfat_buf_modify(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -620,14 +451,14 @@ void buf_modify(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_lock(struct super_block *sb, sector_t sec)
+void exfat_buf_lock(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -636,14 +467,14 @@ void buf_lock(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_unlock(struct super_block *sb, sector_t sec)
+void exfat_buf_unlock(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -652,15 +483,15 @@ void buf_unlock(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_release(struct super_block *sb, sector_t sec)
+void exfat_buf_release(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp)) {
@@ -676,15 +507,15 @@ void buf_release(struct super_block *sb, sector_t sec)
move_to_lru(bp, &p_fs->buf_cache_lru_list);
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_release_all(struct super_block *sb)
+void exfat_buf_release_all(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = p_fs->buf_cache_lru_list.next;
while (bp != &p_fs->buf_cache_lru_list) {
@@ -701,15 +532,15 @@ void buf_release_all(struct super_block *sb)
bp = bp->next;
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_sync(struct super_block *sb)
+void exfat_buf_sync(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = p_fs->buf_cache_lru_list.next;
while (bp != &p_fs->buf_cache_lru_list) {
@@ -720,5 +551,5 @@ void buf_sync(struct super_block *sb)
bp = bp->next;
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index 79174e5c4145..d2d3447083c7 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -20,15 +20,6 @@ static void __set_sb_dirty(struct super_block *sb)
static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE];
-static char *reserved_names[] = {
- "AUX ", "CON ", "NUL ", "PRN ",
- "COM1 ", "COM2 ", "COM3 ", "COM4 ",
- "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ",
- "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
- "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ",
- NULL
-};
-
static u8 free_bit[] = {
0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */
0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */
@@ -99,25 +90,23 @@ void fs_set_vol_flags(struct super_block *sb, u32 new_flag)
p_fs->vol_flag = new_flag;
- if (p_fs->vol_type == EXFAT) {
- if (!p_fs->pbr_bh) {
- if (sector_read(sb, p_fs->PBR_sector,
- &p_fs->pbr_bh, 1) != FFS_SUCCESS)
- return;
- }
+ if (!p_fs->pbr_bh) {
+ if (sector_read(sb, p_fs->PBR_sector,
+ &p_fs->pbr_bh, 1) != 0)
+ return;
+ }
- p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
- p_bpb = (struct bpbex_t *)p_pbr->bpb;
- SET16(p_bpb->vol_flags, (u16)new_flag);
+ p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
+ p_bpb = (struct bpbex_t *)p_pbr->bpb;
+ SET16(p_bpb->vol_flags, (u16)new_flag);
- /* XXX duyoung
- * what can we do here? (cuz fs_set_vol_flags() is void)
- */
- if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
- else
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
- }
+ /* XXX duyoung
+ * what can we do here? (cuz fs_set_vol_flags() is void)
+ */
+ if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
+ else
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
}
void fs_error(struct super_block *sb)
@@ -136,10 +125,10 @@ void fs_error(struct super_block *sb)
* Cluster Management Functions
*/
-s32 clear_cluster(struct super_block *sb, u32 clu)
+static s32 clear_cluster(struct super_block *sb, u32 clu)
{
sector_t s, n;
- s32 ret = FFS_SUCCESS;
+ s32 ret = 0;
struct buffer_head *tmp_bh = NULL;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -154,12 +143,12 @@ s32 clear_cluster(struct super_block *sb, u32 clu)
for (; s < n; s++) {
ret = sector_read(sb, s, &tmp_bh, 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size);
ret = sector_write(sb, s, tmp_bh, 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
break;
}
@@ -167,61 +156,98 @@ s32 clear_cluster(struct super_block *sb, u32 clu)
return ret;
}
-s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain)
+static s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
{
- int i, num_clusters = 0;
- u32 new_clu, last_clu = CLUSTER_32(~0), read_clu;
+ int i, b;
+ sector_t sector;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- new_clu = p_chain->dir;
- if (new_clu == CLUSTER_32(~0))
- new_clu = p_fs->clu_srch_ptr;
- else if (new_clu >= p_fs->num_clusters)
- new_clu = 2;
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
- __set_sb_dirty(sb);
+ sector = START_SECTOR(p_fs->map_clu) + i;
- p_chain->dir = CLUSTER_32(~0);
+ exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
- for (i = 2; i < p_fs->num_clusters; i++) {
- if (FAT_read(sb, new_clu, &read_clu) != 0)
- return -1;
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+}
- if (read_clu == CLUSTER_32(0)) {
- if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
- return -1;
- num_clusters++;
+static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, b;
+ sector_t sector;
+#ifdef CONFIG_EXFAT_DISCARD
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+ int ret;
+#endif /* CONFIG_EXFAT_DISCARD */
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_chain->dir == CLUSTER_32(~0)) {
- p_chain->dir = new_clu;
- } else {
- if (FAT_write(sb, last_clu, new_clu) < 0)
- return -1;
- }
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
- last_clu = new_clu;
+ sector = START_SECTOR(p_fs->map_clu) + i;
- if ((--num_alloc) == 0) {
- p_fs->clu_srch_ptr = new_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
+ exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
- return num_clusters;
- }
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+
+#ifdef CONFIG_EXFAT_DISCARD
+ if (opts->discard) {
+ ret = sb_issue_discard(sb, START_SECTOR(clu),
+ (1 << p_fs->sectors_per_clu_bits),
+ GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ pr_warn("discard not supported by device, disabling");
+ opts->discard = 0;
}
- if ((++new_clu) >= p_fs->num_clusters)
- new_clu = 2;
}
+#endif /* CONFIG_EXFAT_DISCARD */
+}
- p_fs->clu_srch_ptr = new_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
+static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, map_i, map_b;
+ u32 clu_base, clu_free;
+ u8 k, clu_mask;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- return num_clusters;
+ clu_base = (clu & ~(0x7)) + 2;
+ clu_mask = (1 << (clu - clu_base + 2)) - 1;
+
+ map_i = clu >> (p_bd->sector_size_bits + 3);
+ map_b = (clu >> 3) & p_bd->sector_size_mask;
+
+ for (i = 2; i < p_fs->num_clusters; i += 8) {
+ k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
+ if (clu_mask > 0) {
+ k |= clu_mask;
+ clu_mask = 0;
+ }
+ if (k < 0xFF) {
+ clu_free = clu_base + free_bit[k];
+ if (clu_free < p_fs->num_clusters)
+ return clu_free;
+ }
+ clu_base += 8;
+
+ if (((++map_b) >= p_bd->sector_size) ||
+ (clu_base >= p_fs->num_clusters)) {
+ if ((++map_i) >= p_fs->map_sectors) {
+ clu_base = 2;
+ map_i = 0;
+ }
+ map_b = 0;
+ }
+ }
+
+ return CLUSTER_32(~0);
}
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
struct chain_t *p_chain)
{
s32 num_clusters = 0;
@@ -251,22 +277,22 @@ s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
}
}
- if (set_alloc_bitmap(sb, new_clu - 2) != FFS_SUCCESS)
- return -1;
+ if (set_alloc_bitmap(sb, new_clu - 2) != 0)
+ return -EIO;
num_clusters++;
if (p_chain->flags == 0x01) {
- if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
- return -1;
+ if (exfat_fat_write(sb, new_clu, CLUSTER_32(~0)) < 0)
+ return -EIO;
}
if (p_chain->dir == CLUSTER_32(~0)) {
p_chain->dir = new_clu;
} else {
if (p_chain->flags == 0x01) {
- if (FAT_write(sb, last_clu, new_clu) < 0)
- return -1;
+ if (exfat_fat_write(sb, last_clu, new_clu) < 0)
+ return -EIO;
}
}
last_clu = new_clu;
@@ -300,48 +326,7 @@ s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
return num_clusters;
}
-void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse)
-{
- s32 num_clusters = 0;
- u32 clu, prev;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int i;
- sector_t sector;
-
- if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
- return;
- __set_sb_dirty(sb);
- clu = p_chain->dir;
-
- if (p_chain->size <= 0)
- return;
-
- do {
- if (p_fs->dev_ejected)
- break;
-
- if (do_relse) {
- sector = START_SECTOR(clu);
- for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
- }
-
- prev = clu;
- if (FAT_read(sb, clu, &clu) == -1)
- break;
-
- if (FAT_write(sb, prev, CLUSTER_32(0)) < 0)
- break;
- num_clusters++;
-
- } while (clu != CLUSTER_32(~0));
-
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters -= num_clusters;
-}
-
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+static void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
s32 do_relse)
{
s32 num_clusters = 0;
@@ -367,10 +352,10 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
if (do_relse) {
sector = START_SECTOR(clu);
for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
+ exfat_buf_release(sb, sector + i);
}
- if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ if (clr_alloc_bitmap(sb, clu - 2) != 0)
break;
clu++;
@@ -384,13 +369,13 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
if (do_relse) {
sector = START_SECTOR(clu);
for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
+ exfat_buf_release(sb, sector + i);
}
- if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ if (clr_alloc_bitmap(sb, clu - 2) != 0)
break;
- if (FAT_read(sb, clu, &clu) == -1)
+ if (exfat_fat_read(sb, clu, &clu) == -1)
break;
num_clusters++;
} while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0)));
@@ -400,7 +385,7 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
p_fs->used_clusters -= num_clusters;
}
-u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
+static u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
{
u32 clu, next;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -410,7 +395,7 @@ u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
if (p_chain->flags == 0x03) {
clu += p_chain->size - 1;
} else {
- while ((FAT_read(sb, clu, &next) == 0) &&
+ while ((exfat_fat_read(sb, clu, &next) == 0) &&
(next != CLUSTER_32(~0))) {
if (p_fs->dev_ejected)
break;
@@ -437,7 +422,7 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
} else {
for (i = 2; i < p_fs->num_clusters; i++) {
count++;
- if (FAT_read(sb, clu, &clu) != 0)
+ if (exfat_fat_read(sb, clu, &clu) != 0)
return 0;
if (clu == CLUSTER_32(~0))
break;
@@ -447,30 +432,15 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
return count;
}
-s32 fat_count_used_clusters(struct super_block *sb)
-{
- int i, count = 0;
- u32 clu;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (i = 2; i < p_fs->num_clusters; i++) {
- if (FAT_read(sb, i, &clu) != 0)
- break;
- if (clu != CLUSTER_32(0))
- count++;
- }
-
- return count;
-}
-
-s32 exfat_count_used_clusters(struct super_block *sb)
+static s32 exfat_count_used_clusters(struct super_block *sb)
{
int i, map_i, map_b, count = 0;
u8 k;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- map_i = map_b = 0;
+ map_i = 0;
+ map_b = 0;
for (i = 2; i < p_fs->num_clusters; i += 8) {
k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
@@ -491,12 +461,12 @@ void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len)
return;
while (len > 1) {
- if (FAT_write(sb, chain, chain + 1) < 0)
+ if (exfat_fat_write(sb, chain, chain + 1) < 0)
break;
chain++;
len--;
}
- FAT_write(sb, chain, CLUSTER_32(~0));
+ exfat_fat_write(sb, chain, CLUSTER_32(~0));
}
/*
@@ -525,7 +495,7 @@ s32 load_alloc_bitmap(struct super_block *sb)
ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu,
i, NULL);
if (!ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
@@ -544,14 +514,14 @@ s32 load_alloc_bitmap(struct super_block *sb)
sizeof(struct buffer_head *),
GFP_KERNEL);
if (!p_fs->vol_amap)
- return FFS_MEMORYERR;
+ return -ENOMEM;
sector = START_SECTOR(p_fs->map_clu);
for (j = 0; j < p_fs->map_sectors; j++) {
p_fs->vol_amap[j] = NULL;
- ret = sector_read(sb, sector + j, &(p_fs->vol_amap[j]), 1);
- if (ret != FFS_SUCCESS) {
+ ret = sector_read(sb, sector + j, &p_fs->vol_amap[j], 1);
+ if (ret != 0) {
/* release all buffers and free vol_amap */
i = 0;
while (i < j)
@@ -564,15 +534,15 @@ s32 load_alloc_bitmap(struct super_block *sb)
}
p_fs->pbr_bh = NULL;
- return FFS_SUCCESS;
+ return 0;
}
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
- return FFS_FORMATERR;
+ return -EFSCORRUPTED;
}
void free_alloc_bitmap(struct super_block *sb)
@@ -589,97 +559,6 @@ void free_alloc_bitmap(struct super_block *sb)
p_fs->vol_amap = NULL;
}
-s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-}
-
-s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
-#ifdef CONFIG_EXFAT_DISCARD
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_mount_options *opts = &sbi->options;
- int ret;
-#endif /* CONFIG_EXFAT_DISCARD */
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-
-#ifdef CONFIG_EXFAT_DISCARD
- if (opts->discard) {
- ret = sb_issue_discard(sb, START_SECTOR(clu),
- (1 << p_fs->sectors_per_clu_bits),
- GFP_NOFS, 0);
- if (ret == -EOPNOTSUPP) {
- pr_warn("discard not supported by device, disabling");
- opts->discard = 0;
- }
- }
-#endif /* CONFIG_EXFAT_DISCARD */
-}
-
-u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, map_i, map_b;
- u32 clu_base, clu_free;
- u8 k, clu_mask;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- clu_base = (clu & ~(0x7)) + 2;
- clu_mask = (1 << (clu - clu_base + 2)) - 1;
-
- map_i = clu >> (p_bd->sector_size_bits + 3);
- map_b = (clu >> 3) & p_bd->sector_size_mask;
-
- for (i = 2; i < p_fs->num_clusters; i += 8) {
- k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
- if (clu_mask > 0) {
- k |= clu_mask;
- clu_mask = 0;
- }
- if (k < 0xFF) {
- clu_free = clu_base + free_bit[k];
- if (clu_free < p_fs->num_clusters)
- return clu_free;
- }
- clu_base += 8;
-
- if (((++map_b) >= p_bd->sector_size) ||
- (clu_base >= p_fs->num_clusters)) {
- if ((++map_i) >= p_fs->map_sectors) {
- clu_base = 2;
- map_i = 0;
- }
- map_b = 0;
- }
- }
-
- return CLUSTER_32(~0);
-}
-
void sync_alloc_bitmap(struct super_block *sb)
{
int i;
@@ -698,7 +577,7 @@ void sync_alloc_bitmap(struct super_block *sb)
static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
u32 num_sectors, u32 utbl_checksum)
{
- int i, ret = FFS_ERROR;
+ int i, ret = -EINVAL;
u32 j;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -712,15 +591,15 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
u32 checksum = 0;
- upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
- GFP_KERNEL);
+ upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
+ p_fs->vol_utbl = upcase_table;
if (!upcase_table)
- return FFS_MEMORYERR;
+ return -ENOMEM;
memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
while (sector < end_sector) {
ret = sector_read(sb, sector, &tmp_bh, 1);
- if (ret != FFS_SUCCESS) {
+ if (ret != 0) {
pr_debug("sector read (0x%llX)fail\n",
(unsigned long long)sector);
goto error;
@@ -755,7 +634,7 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
sizeof(u16), GFP_KERNEL);
if (!upcase_table[col_index]) {
- ret = FFS_MEMORYERR;
+ ret = -ENOMEM;
goto error;
}
@@ -771,9 +650,9 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
if (index >= 0xFFFF && utbl_checksum == checksum) {
if (tmp_bh)
brelse(tmp_bh);
- return FFS_SUCCESS;
+ return 0;
}
- ret = FFS_ERROR;
+ ret = -EINVAL;
error:
if (tmp_bh)
brelse(tmp_bh);
@@ -783,7 +662,7 @@ error:
static s32 __load_default_upcase_table(struct super_block *sb)
{
- int i, ret = FFS_ERROR;
+ int i, ret = -EINVAL;
u32 j;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -792,10 +671,10 @@ static s32 __load_default_upcase_table(struct super_block *sb)
u16 uni = 0;
u16 **upcase_table;
- upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
- GFP_KERNEL);
+ upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
+ p_fs->vol_utbl = upcase_table;
if (!upcase_table)
- return FFS_MEMORYERR;
+ return -ENOMEM;
memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
for (i = 0; index <= 0xFFFF && i < NUM_UPCASE * 2; i += 2) {
@@ -818,7 +697,7 @@ static s32 __load_default_upcase_table(struct super_block *sb)
sizeof(u16),
GFP_KERNEL);
if (!upcase_table[col_index]) {
- ret = FFS_MEMORYERR;
+ ret = -ENOMEM;
goto error;
}
@@ -832,7 +711,7 @@ static s32 __load_default_upcase_table(struct super_block *sb)
}
if (index >= 0xFFFF)
- return FFS_SUCCESS;
+ return 0;
error:
/* FATAL error: default upcase table has error */
@@ -855,14 +734,14 @@ s32 load_upcase_table(struct super_block *sb)
clu.flags = 0x01;
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
+ return -EIO;
while (clu.dir != CLUSTER_32(~0)) {
for (i = 0; i < p_fs->dentries_per_clu; i++) {
ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu,
i, NULL);
if (!ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
@@ -877,12 +756,12 @@ s32 load_upcase_table(struct super_block *sb)
sector = START_SECTOR(tbl_clu);
num_sectors = ((tbl_size - 1) >> p_bd->sector_size_bits) + 1;
if (__load_upcase_table(sb, sector, num_sectors,
- GET32_A(ep->checksum)) != FFS_SUCCESS)
+ GET32_A(ep->checksum)) != 0)
break;
- return FFS_SUCCESS;
+ return 0;
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
/* load default upcase table */
return __load_default_upcase_table(sb);
@@ -906,29 +785,7 @@ void free_upcase_table(struct super_block *sb)
* Directory Entry Management Functions
*/
-u32 fat_get_entry_type(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- if (*(ep->name) == 0x0)
- return TYPE_UNUSED;
-
- else if (*(ep->name) == 0xE5)
- return TYPE_DELETED;
-
- else if (ep->attr == ATTR_EXTEND)
- return TYPE_EXTEND;
-
- else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_VOLUME)
- return TYPE_VOLUME;
-
- else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_SUBDIR)
- return TYPE_DIR;
-
- return TYPE_FILE;
-}
-
-u32 exfat_get_entry_type(struct dentry_t *p_entry)
+static u32 exfat_get_entry_type(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -973,30 +830,7 @@ u32 exfat_get_entry_type(struct dentry_t *p_entry)
return TYPE_BENIGN_SEC;
}
-void fat_set_entry_type(struct dentry_t *p_entry, u32 type)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- if (type == TYPE_UNUSED)
- *(ep->name) = 0x0;
-
- else if (type == TYPE_DELETED)
- *(ep->name) = 0xE5;
-
- else if (type == TYPE_EXTEND)
- ep->attr = ATTR_EXTEND;
-
- else if (type == TYPE_DIR)
- ep->attr = ATTR_SUBDIR;
-
- else if (type == TYPE_FILE)
- ep->attr = ATTR_ARCHIVE;
-
- else if (type == TYPE_SYMLINK)
- ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK;
-}
-
-void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
+static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -1026,109 +860,56 @@ void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
}
}
-u32 fat_get_entry_attr(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return (u32)ep->attr;
-}
-
-u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+static u32 exfat_get_entry_attr(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
return (u32)GET16_A(ep->attr);
}
-void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- ep->attr = (u8)attr;
-}
-
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+static void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
SET16_A(ep->attr, (u16)attr);
}
-u8 fat_get_entry_flag(struct dentry_t *p_entry)
-{
- return 0x01;
-}
-
-u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+static u8 exfat_get_entry_flag(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return ep->flags;
}
-void fat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
-{
-}
-
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+static void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
ep->flags = flags;
}
-u32 fat_get_entry_clu0(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return ((u32)GET16_A(ep->start_clu_hi) << 16) |
- GET16_A(ep->start_clu_lo);
-}
-
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+static u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET32_A(ep->start_clu);
}
-void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
- SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
-}
-
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+static void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
SET32_A(ep->start_clu, start_clu);
}
-u64 fat_get_entry_size(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return (u64)GET32_A(ep->size);
-}
-
-u64 exfat_get_entry_size(struct dentry_t *p_entry)
+static u64 exfat_get_entry_size(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET64_A(ep->valid_size);
}
-void fat_set_entry_size(struct dentry_t *p_entry, u64 size)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- SET32_A(ep->size, (u32)size);
-}
-
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
@@ -1136,32 +917,7 @@ void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
SET64_A(ep->size, size);
}
-void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t = 0x00, d = 0x21;
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- switch (mode) {
- case TM_CREATE:
- t = GET16_A(ep->create_time);
- d = GET16_A(ep->create_date);
- break;
- case TM_MODIFY:
- t = GET16_A(ep->modify_time);
- d = GET16_A(ep->modify_date);
- break;
- }
-
- tp->sec = (t & 0x001F) << 1;
- tp->min = (t >> 5) & 0x003F;
- tp->hour = (t >> 11);
- tp->day = (d & 0x001F);
- tp->mon = (d >> 5) & 0x000F;
- tp->year = (d >> 9);
-}
-
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t = 0x00, d = 0x21;
@@ -1190,28 +946,7 @@ void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
tp->year = (d >> 9);
}
-void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t, d;
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
- d = (tp->year << 9) | (tp->mon << 5) | tp->day;
-
- switch (mode) {
- case TM_CREATE:
- SET16_A(ep->create_time, t);
- SET16_A(ep->create_date, d);
- break;
- case TM_MODIFY:
- SET16_A(ep->modify_time, t);
- SET16_A(ep->modify_date, d);
- break;
- }
-}
-
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+static void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t, d;
@@ -1236,24 +971,46 @@ void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
}
}
-s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- u32 type, u32 start_clu, u64 size)
+static void init_file_entry(struct file_dentry_t *ep, u32 type)
{
- sector_t sector;
- struct dos_dentry_t *dos_ep;
+ struct timestamp_t tm, *tp;
- dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!dos_ep)
- return FFS_MEDIAERR;
+ exfat_set_entry_type((struct dentry_t *)ep, type);
- init_dos_entry(dos_ep, type, start_clu);
- buf_modify(sb, sector);
+ tp = tm_current(&tm);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
+ ep->create_time_ms = 0;
+ ep->modify_time_ms = 0;
+ ep->access_time_ms = 0;
+}
- return FFS_SUCCESS;
+static void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
+{
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
+ ep->flags = flags;
+ SET32_A(ep->start_clu, start_clu);
+ SET64_A(ep->valid_size, size);
+ SET64_A(ep->size, size);
}
-s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+static void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
+{
+ int i;
+
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
+ ep->flags = 0x0;
+
+ for (i = 0; i < 30; i++, i++) {
+ SET16_A(ep->unicode_0_14 + i, *uniname);
+ if (*uniname == 0x0)
+ break;
+ uniname++;
+ }
+}
+
+static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
s32 entry, u32 type, u32 start_clu, u64 size)
{
sector_t sector;
@@ -1267,71 +1024,20 @@ s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
&sector);
if (!file_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
&sector);
if (!strm_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
init_file_entry(file_ep, type);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
init_strm_entry(strm_ep, flags, start_clu, size);
- buf_modify(sb, sector);
-
- return FFS_SUCCESS;
-}
-
-static s32 fat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname)
-{
- int i;
- sector_t sector;
- u8 chksum;
- u16 *uniname = p_uniname->name;
- struct dos_dentry_t *dos_ep;
- struct ext_dentry_t *ext_ep;
-
- dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!dos_ep)
- return FFS_MEDIAERR;
-
- dos_ep->lcase = p_dosname->name_case;
- memcpy(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH);
- buf_modify(sb, sector);
-
- if ((--num_entries) > 0) {
- chksum = calc_checksum_1byte((void *)dos_ep->name,
- DOS_NAME_LENGTH, 0);
-
- for (i = 1; i < num_entries; i++) {
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb,
- p_dir,
- entry - i,
- &sector);
- if (!ext_ep)
- return FFS_MEDIAERR;
-
- init_ext_entry(ext_ep, i, chksum, uniname);
- buf_modify(sb, sector);
- uniname += 13;
- }
-
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
- entry - i,
- &sector);
- if (!ext_ep)
- return FFS_MEDIAERR;
+ exfat_buf_modify(sb, sector);
- init_ext_entry(ext_ep, i + 0x40, chksum, uniname);
- buf_modify(sb, sector);
- }
-
- return FFS_SUCCESS;
+ return 0;
}
static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
@@ -1349,160 +1055,39 @@ static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
&sector);
if (!file_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
file_ep->num_ext = (u8)(num_entries - 1);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
&sector);
if (!strm_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
strm_ep->name_len = p_uniname->name_len;
SET16_A(strm_ep->name_hash, p_uniname->name_hash);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
for (i = 2; i < num_entries; i++) {
name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir,
entry + i,
&sector);
if (!name_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
init_name_entry(name_ep, uniname);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
uniname += 15;
}
update_dir_checksum(sb, p_dir, entry);
- return FFS_SUCCESS;
-}
-
-void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu)
-{
- struct timestamp_t tm, *tp;
-
- fat_set_entry_type((struct dentry_t *)ep, type);
- SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
- SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
- SET32_A(ep->size, 0);
-
- tp = tm_current(&tm);
- fat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
- fat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
- SET16_A(ep->access_date, 0);
- ep->create_time_ms = 0;
-}
-
-void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, u16 *uniname)
-{
- int i;
- bool end = false;
-
- fat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
- ep->order = (u8)order;
- ep->sysid = 0;
- ep->checksum = chksum;
- SET16_A(ep->start_clu, 0);
-
- for (i = 0; i < 10; i += 2) {
- if (!end) {
- SET16(ep->unicode_0_4 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16(ep->unicode_0_4 + i, 0xFFFF);
- }
- }
-
- for (i = 0; i < 12; i += 2) {
- if (!end) {
- SET16_A(ep->unicode_5_10 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16_A(ep->unicode_5_10 + i, 0xFFFF);
- }
- }
-
- for (i = 0; i < 4; i += 2) {
- if (!end) {
- SET16_A(ep->unicode_11_12 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16_A(ep->unicode_11_12 + i, 0xFFFF);
- }
- }
-}
-
-void init_file_entry(struct file_dentry_t *ep, u32 type)
-{
- struct timestamp_t tm, *tp;
-
- exfat_set_entry_type((struct dentry_t *)ep, type);
-
- tp = tm_current(&tm);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
- ep->create_time_ms = 0;
- ep->modify_time_ms = 0;
- ep->access_time_ms = 0;
-}
-
-void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
-{
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
- ep->flags = flags;
- SET32_A(ep->start_clu, start_clu);
- SET64_A(ep->valid_size, size);
- SET64_A(ep->size, size);
-}
-
-void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
-{
- int i;
-
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
- ep->flags = 0x0;
-
- for (i = 0; i < 30; i++, i++) {
- SET16_A(ep->unicode_0_14 + i, *uniname);
- if (*uniname == 0x0)
- break;
- uniname++;
- }
-}
-
-void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries)
-{
- int i;
- sector_t sector;
- struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (i = num_entries - 1; i >= order; i--) {
- ep = get_entry_in_dir(sb, p_dir, entry - i, &sector);
- if (!ep)
- return;
-
- p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
- buf_modify(sb, sector);
- }
+ return 0;
}
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries)
+static void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries)
{
int i;
sector_t sector;
@@ -1515,7 +1100,7 @@ void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return;
p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
}
}
@@ -1533,7 +1118,7 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
if (!file_ep)
return;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
num_entries = (s32)file_ep->num_ext + 1;
chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0,
@@ -1542,7 +1127,7 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
for (i = 1; i < num_entries; i++) {
ep = get_entry_in_dir(sb, p_dir, entry + i, NULL);
if (!ep) {
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
return;
}
@@ -1551,8 +1136,75 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
}
SET16_A(file_ep->checksum, chksum);
- buf_modify(sb, sector);
- buf_unlock(sb, sector);
+ exfat_buf_modify(sb, sector);
+ exfat_buf_unlock(sb, sector);
+}
+
+static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es,
+ sector_t sec, s32 off, u32 count)
+{
+ s32 num_entries, buf_off = (off - es->offset);
+ u32 remaining_byte_in_sector, copy_entries;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ u32 clu;
+ u8 *buf, *esbuf = (u8 *)&es->__buf;
+
+ pr_debug("%s entered es %p sec %llu off %d count %d\n",
+ __func__, es, (unsigned long long)sec, off, count);
+ num_entries = count;
+
+ while (num_entries) {
+ /* white per sector base */
+ remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
+ copy_entries = min_t(s32,
+ remaining_byte_in_sector >> DENTRY_SIZE_BITS,
+ num_entries);
+ buf = exfat_buf_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+ pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
+ pr_debug("copying %d entries from %p to sector %llu\n",
+ copy_entries, (esbuf + buf_off),
+ (unsigned long long)sec);
+ memcpy(buf + off, esbuf + buf_off,
+ copy_entries << DENTRY_SIZE_BITS);
+ exfat_buf_modify(sb, sec);
+ num_entries -= copy_entries;
+
+ if (num_entries) {
+ /* get next sector */
+ if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
+ clu = GET_CLUSTER_FROM_SECTOR(sec);
+ if (es->alloc_flag == 0x03) {
+ clu++;
+ } else {
+ if (exfat_fat_read(sb, clu, &clu) == -1)
+ goto err_out;
+ }
+ sec = START_SECTOR(clu);
+ } else {
+ sec++;
+ }
+ off = 0;
+ buf_off += copy_entries << DENTRY_SIZE_BITS;
+ }
+ }
+
+ pr_debug("%s exited successfully\n", __func__);
+ return 0;
+err_out:
+ pr_debug("%s failed\n", __func__);
+ return -EINVAL;
+}
+
+/* write back all entries in entry set */
+static s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
+{
+ return __write_partial_entries_in_entry_set(sb, es, es->sector,
+ es->offset,
+ es->num_entries);
}
void update_dir_checksum_with_entry_set(struct super_block *sb,
@@ -1562,7 +1214,7 @@ void update_dir_checksum_with_entry_set(struct super_block *sb,
u16 chksum = 0;
s32 chksum_type = CS_DIR_ENTRY, i;
- ep = (struct dentry_t *)&(es->__buf);
+ ep = (struct dentry_t *)&es->__buf;
for (i = 0; i < es->num_entries; i++) {
pr_debug("%s ep %p\n", __func__, ep);
chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
@@ -1571,7 +1223,7 @@ void update_dir_checksum_with_entry_set(struct super_block *sb,
chksum_type = CS_DEFAULT;
}
- ep = (struct dentry_t *)&(es->__buf);
+ ep = (struct dentry_t *)&es->__buf;
SET16_A(((struct file_dentry_t *)ep)->checksum, chksum);
write_whole_entry_set(sb, es);
}
@@ -1590,18 +1242,18 @@ static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
cur_clu += clu_offset;
} else {
while (clu_offset > 0) {
- if (FAT_read(sb, cur_clu, &cur_clu) == -1)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, cur_clu, &cur_clu) == -1)
+ return -EIO;
clu_offset--;
}
}
if (clu)
*clu = cur_clu;
- return FFS_SUCCESS;
+ return 0;
}
-s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
sector_t *sector, s32 *offset)
{
s32 off, ret;
@@ -1617,7 +1269,7 @@ s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
*sector += p_fs->root_start_sector;
} else {
ret = _walk_fat_chain(sb, p_dir, off, &clu);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
/* byte offset in cluster */
@@ -1630,20 +1282,7 @@ s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
*sector = off >> p_bd->sector_size_bits;
*sector += START_SECTOR(clu);
}
- return FFS_SUCCESS;
-}
-
-struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
- s32 offset)
-{
- u8 *buf;
-
- buf = buf_getblk(sb, sector);
-
- if (!buf)
- return NULL;
-
- return (struct dentry_t *)(buf + offset);
+ return 0;
}
struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
@@ -1653,10 +1292,10 @@ struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
sector_t sec;
u8 *buf;
- if (find_location(sb, p_dir, entry, &sec, &off) != FFS_SUCCESS)
+ if (find_location(sb, p_dir, entry, &sec, &off) != 0)
return NULL;
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
return NULL;
@@ -1703,11 +1342,11 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
size_t bufsize;
pr_debug("%s entered p_dir dir %u flags %x size %d\n",
- __func__, p_dir->dir, p_dir->flags, p_dir->size);
+ __func__, p_dir->dir, p_dir->flags, p_dir->size);
byte_offset = entry << DENTRY_SIZE_BITS;
ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return NULL;
/* byte offset in cluster */
@@ -1720,15 +1359,14 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
sec = byte_offset >> p_bd->sector_size_bits;
sec += START_SECTOR(clu);
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
goto err_out;
ep = (struct dentry_t *)(buf + off);
entry_type = p_fs->fs_func->get_entry_type(ep);
- if ((entry_type != TYPE_FILE)
- && (entry_type != TYPE_DIR))
+ if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR))
goto err_out;
if (type == ES_ALL_ENTRIES)
@@ -1812,14 +1450,14 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
if (es->alloc_flag == 0x03) {
clu++;
} else {
- if (FAT_read(sb, clu, &clu) == -1)
+ if (exfat_fat_read(sb, clu, &clu) == -1)
goto err_out;
}
sec = START_SECTOR(clu);
} else {
sec++;
}
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
goto err_out;
off = 0;
@@ -1832,11 +1470,11 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
}
if (file_ep)
- *file_ep = (struct dentry_t *)&(es->__buf);
+ *file_ep = (struct dentry_t *)&es->__buf;
pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n",
- __func__, es, (unsigned long long)es->sector, es->offset,
- es->alloc_flag, es->num_entries, &es->__buf);
+ __func__, es, (unsigned long long)es->sector, es->offset,
+ es->alloc_flag, es->num_entries, &es->__buf);
return es;
err_out:
pr_debug("%s exited NULL (es %p)\n", __func__, es);
@@ -1850,114 +1488,8 @@ void release_entry_set(struct entry_set_cache_t *es)
kfree(es);
}
-static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es,
- sector_t sec, s32 off, u32 count)
-{
- s32 num_entries, buf_off = (off - es->offset);
- u32 remaining_byte_in_sector, copy_entries;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- u32 clu;
- u8 *buf, *esbuf = (u8 *)&(es->__buf);
-
- pr_debug("%s entered es %p sec %llu off %d count %d\n",
- __func__, es, (unsigned long long)sec, off, count);
- num_entries = count;
-
- while (num_entries) {
- /* white per sector base */
- remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
- copy_entries = min_t(s32,
- remaining_byte_in_sector >> DENTRY_SIZE_BITS,
- num_entries);
- buf = buf_getblk(sb, sec);
- if (!buf)
- goto err_out;
- pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
- pr_debug("copying %d entries from %p to sector %llu\n",
- copy_entries, (esbuf + buf_off),
- (unsigned long long)sec);
- memcpy(buf + off, esbuf + buf_off,
- copy_entries << DENTRY_SIZE_BITS);
- buf_modify(sb, sec);
- num_entries -= copy_entries;
-
- if (num_entries) {
- /* get next sector */
- if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
- clu = GET_CLUSTER_FROM_SECTOR(sec);
- if (es->alloc_flag == 0x03) {
- clu++;
- } else {
- if (FAT_read(sb, clu, &clu) == -1)
- goto err_out;
- }
- sec = START_SECTOR(clu);
- } else {
- sec++;
- }
- off = 0;
- buf_off += copy_entries << DENTRY_SIZE_BITS;
- }
- }
-
- pr_debug("%s exited successfully\n", __func__);
- return FFS_SUCCESS;
-err_out:
- pr_debug("%s failed\n", __func__);
- return FFS_ERROR;
-}
-
-/* write back all entries in entry set */
-s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
-{
- return __write_partial_entries_in_entry_set(sb, es, es->sector,
- es->offset,
- es->num_entries);
-}
-
-/* write back some entries in entry set */
-s32 write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es, struct dentry_t *ep, u32 count)
-{
- s32 ret, byte_offset, off;
- u32 clu = 0;
- sector_t sec;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct chain_t dir;
-
- /* vaidity check */
- if (ep + count > ((struct dentry_t *)&(es->__buf)) + es->num_entries)
- return FFS_ERROR;
-
- dir.dir = GET_CLUSTER_FROM_SECTOR(es->sector);
- dir.flags = es->alloc_flag;
- dir.size = 0xffffffff; /* XXX */
-
- byte_offset = (es->sector - START_SECTOR(dir.dir)) <<
- p_bd->sector_size_bits;
- byte_offset += ((void **)ep - &(es->__buf)) + es->offset;
-
- ret = _walk_fat_chain(sb, &dir, byte_offset, &clu);
- if (ret != FFS_SUCCESS)
- return ret;
-
- /* byte offset in cluster */
- byte_offset &= p_fs->cluster_size - 1;
-
- /* byte offset in sector */
- off = byte_offset & p_bd->sector_size_mask;
-
- /* sector offset in cluster */
- sec = byte_offset >> p_bd->sector_size_bits;
- sec += START_SECTOR(clu);
- return __write_partial_entries_in_entry_set(sb, es, sec, off, count);
-}
-
/* search EMPTY CONTINUOUS "num_entries" entries */
-s32 search_deleted_or_unused_entry(struct super_block *sb,
+static s32 search_deleted_or_unused_entry(struct super_block *sb,
struct chain_t *p_dir, s32 num_entries)
{
int i, dentry, num_empty = 0;
@@ -2043,7 +1575,7 @@ s32 search_deleted_or_unused_entry(struct super_block *sb,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
return -1;
}
}
@@ -2051,7 +1583,7 @@ s32 search_deleted_or_unused_entry(struct super_block *sb,
return -1;
}
-s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
+static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
{
s32 ret, dentry;
u32 last_clu;
@@ -2070,10 +1602,8 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
if (p_fs->dev_ejected)
break;
- if (p_fs->vol_type == EXFAT) {
- if (p_dir->dir != p_fs->root_dir)
- size = i_size_read(inode);
- }
+ if (p_dir->dir != p_fs->root_dir)
+ size = i_size_read(inode);
last_clu = find_last_cluster(sb, p_dir);
clu.dir = last_clu + 1;
@@ -2083,10 +1613,10 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
/* (1) allocate a cluster */
ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
if (ret < 1)
- return -1;
+ return -EIO;
- if (clear_cluster(sb, clu.dir) != FFS_SUCCESS)
- return -1;
+ if (clear_cluster(sb, clu.dir) != 0)
+ return -EIO;
/* (2) append to the FAT chain */
if (clu.flags != p_dir->flags) {
@@ -2095,8 +1625,8 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
p_fs->hint_uentry.clu.flags = 0x01;
}
if (clu.flags == 0x01)
- if (FAT_write(sb, last_clu, clu.dir) < 0)
- return -1;
+ if (exfat_fat_write(sb, last_clu, clu.dir) < 0)
+ return -EIO;
if (p_fs->hint_uentry.entry == -1) {
p_fs->hint_uentry.dir = p_dir->dir;
@@ -2110,21 +1640,19 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
p_dir->size++;
/* (3) update the directory entry */
- if (p_fs->vol_type == EXFAT) {
- if (p_dir->dir != p_fs->root_dir) {
- size += p_fs->cluster_size;
-
- ep = get_entry_in_dir(sb, &fid->dir,
- fid->entry + 1, &sector);
- if (!ep)
- return -1;
- p_fs->fs_func->set_entry_size(ep, size);
- p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
- buf_modify(sb, sector);
-
- update_dir_checksum(sb, &(fid->dir),
- fid->entry);
- }
+ if (p_dir->dir != p_fs->root_dir) {
+ size += p_fs->cluster_size;
+
+ ep = get_entry_in_dir(sb, &fid->dir,
+ fid->entry + 1, &sector);
+ if (!ep)
+ return -ENOENT;
+ p_fs->fs_func->set_entry_size(ep, size);
+ p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+ exfat_buf_modify(sb, sector);
+
+ update_dir_checksum(sb, &fid->dir,
+ fid->entry);
}
i_size_write(inode, i_size_read(inode) + p_fs->cluster_size);
@@ -2137,102 +1665,21 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
return dentry;
}
-/* return values of fat_find_dir_entry()
- * >= 0 : return dir entiry position with the name in dir
- * -1 : (root dir, ".") it is the root dir itself
- * -2 : entry with the name does not exist
- */
-s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type)
+static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
+ s32 order)
{
- int i, dentry = 0, len;
- s32 order = 0;
- bool is_feasible_entry = true, has_ext_entry = false;
- s32 dentries_per_clu;
- u32 entry_type;
- u16 entry_uniname[14], *uniname = NULL, unichar;
- struct chain_t clu;
- struct dentry_t *ep;
- struct dos_dentry_t *dos_ep;
- struct ext_dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_dir->dir == p_fs->root_dir) {
- if ((!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_CUR_DIR_NAME)) ||
- (!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_PAR_DIR_NAME)))
- return -1; // special case, root directory itself
- }
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++, dentry++) {
- ep = get_entry_in_dir(sb, &clu, i, NULL);
- if (!ep)
- return -2;
-
- entry_type = p_fs->fs_func->get_entry_type(ep);
-
- if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
- if ((type == TYPE_ALL) || (type == entry_type)) {
- if (is_feasible_entry && has_ext_entry)
- return dentry;
-
- dos_ep = (struct dos_dentry_t *)ep;
- if (!nls_dosname_cmp(sb, p_dosname->name, dos_ep->name))
- return dentry;
- }
- is_feasible_entry = true;
- has_ext_entry = false;
- } else if (entry_type == TYPE_EXTEND) {
- if (is_feasible_entry) {
- ext_ep = (struct ext_dentry_t *)ep;
- if (ext_ep->order > 0x40) {
- order = (s32)(ext_ep->order - 0x40);
- uniname = p_uniname->name + 13 * (order - 1);
- } else {
- order = (s32)ext_ep->order;
- uniname -= 13;
- }
-
- len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order);
-
- unichar = *(uniname + len);
- *(uniname + len) = 0x0;
-
- if (nls_uniname_cmp(sb, uniname, entry_uniname))
- is_feasible_entry = false;
-
- *(uniname + len) = unichar;
- }
- has_ext_entry = true;
- } else if (entry_type == TYPE_UNUSED) {
- return -2;
- }
- is_feasible_entry = true;
- has_ext_entry = false;
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
+ int i, len = 0;
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return -2;
+ for (i = 0; i < 30; i += 2) {
+ *uniname = GET16_A(ep->unicode_0_14 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
}
- return -2;
+ *uniname = 0x0;
+ return len;
}
/* return values of exfat_find_dir_entry()
@@ -2240,7 +1687,7 @@ s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
* -1 : (root dir, ".") it is the root dir itself
* -2 : entry with the name does not exist
*/
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 num_entries,
struct dos_name_t *p_dosname, u32 type)
{
@@ -2375,7 +1822,7 @@ s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
return -2;
}
}
@@ -2383,37 +1830,7 @@ s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return -2;
}
-s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry)
-{
- s32 count = 0;
- u8 chksum;
- struct dos_dentry_t *dos_ep = (struct dos_dentry_t *)p_entry;
- struct ext_dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- chksum = calc_checksum_1byte((void *)dos_ep->name, DOS_NAME_LENGTH, 0);
-
- for (entry--; entry >= 0; entry--) {
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
- entry, NULL);
- if (!ext_ep)
- return -1;
-
- if ((p_fs->fs_func->get_entry_type((struct dentry_t *)ext_ep) ==
- TYPE_EXTEND) && (ext_ep->checksum == chksum)) {
- count++;
- if (ext_ep->order > 0x40)
- return count;
- } else {
- return count;
- }
- }
-
- return count;
-}
-
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+static s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
s32 entry, struct dentry_t *p_entry)
{
int i, count = 0;
@@ -2463,7 +1880,7 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
for (i = 0; i < dentries_per_clu; i++) {
ep = get_entry_in_dir(sb, &clu, i, NULL);
if (!ep)
- return -1;
+ return -ENOENT;
entry_type = p_fs->fs_func->get_entry_type(ep);
@@ -2486,8 +1903,8 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return -1;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
}
@@ -2546,7 +1963,7 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
else
clu.dir = CLUSTER_32(~0);
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
break;
}
@@ -2564,84 +1981,19 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 *entries,
struct dos_name_t *p_dosname)
{
- s32 ret, num_entries;
- bool lossy = false;
- char **r;
+ s32 num_entries;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
if (num_entries == 0)
- return FFS_INVALIDPATH;
-
- if (p_fs->vol_type != EXFAT) {
- nls_uniname_to_dosname(sb, p_dosname, p_uniname, &lossy);
-
- if (lossy) {
- ret = fat_generate_dos_name(sb, p_dir, p_dosname);
- if (ret)
- return ret;
- } else {
- for (r = reserved_names; *r; r++) {
- if (!strncmp((void *)p_dosname->name, *r, 8))
- return FFS_INVALIDPATH;
- }
-
- if (p_dosname->name_case != 0xFF)
- num_entries = 1;
- }
-
- if (num_entries > 1)
- p_dosname->name_case = 0x0;
- }
+ return -EINVAL;
*entries = num_entries;
- return FFS_SUCCESS;
-}
-
-void get_uni_name_from_dos_entry(struct super_block *sb,
- struct dos_dentry_t *ep,
- struct uni_name_t *p_uniname, u8 mode)
-{
- struct dos_name_t dos_name;
-
- if (mode == 0x0)
- dos_name.name_case = 0x0;
- else
- dos_name.name_case = ep->lcase;
-
- memcpy(dos_name.name, ep->name, DOS_NAME_LENGTH);
- nls_dosname_to_uniname(sb, p_uniname, &dos_name);
-}
-
-void fat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname)
-{
- int i;
- struct ext_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (entry--, i = 1; entry >= 0; entry--, i++) {
- ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- NULL);
- if (!ep)
- return;
-
- if (p_fs->fs_func->get_entry_type((struct dentry_t *)ep) ==
- TYPE_EXTEND) {
- extract_uni_name_from_ext_entry(ep, uniname, i);
- if (ep->order > 0x40)
- return;
- } else {
- return;
- }
-
- uniname += 13;
- }
+ return 0;
}
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
struct chain_t *p_dir, s32 entry,
u16 *uniname)
{
@@ -2678,203 +2030,7 @@ out:
release_entry_set(es);
}
-s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep, u16 *uniname,
- s32 order)
-{
- int i, len = 0;
-
- for (i = 0; i < 10; i += 2) {
- *uniname = GET16(ep->unicode_0_4 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- if (order < 20) {
- for (i = 0; i < 12; i += 2) {
- *uniname = GET16_A(ep->unicode_5_10 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
- } else {
- for (i = 0; i < 8; i += 2) {
- *uniname = GET16_A(ep->unicode_5_10 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
- *uniname = 0x0; /* uniname[MAX_NAME_LENGTH-1] */
- return len;
- }
-
- for (i = 0; i < 4; i += 2) {
- *uniname = GET16_A(ep->unicode_11_12 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- *uniname = 0x0;
- return len;
-}
-
-s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
- s32 order)
-{
- int i, len = 0;
-
- for (i = 0; i < 30; i += 2) {
- *uniname = GET16_A(ep->unicode_0_14 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- *uniname = 0x0;
- return len;
-}
-
-s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct dos_name_t *p_dosname)
-{
- int i, j, count = 0;
- bool count_begin = false;
- s32 dentries_per_clu;
- u32 type;
- u8 bmap[128/* 1 ~ 1023 */];
- struct chain_t clu;
- struct dos_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- memset(bmap, 0, sizeof(bmap));
- exfat_bitmap_set(bmap, 0);
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++) {
- ep = (struct dos_dentry_t *)get_entry_in_dir(sb, &clu,
- i, NULL);
- if (!ep)
- return FFS_MEDIAERR;
-
- type = p_fs->fs_func->get_entry_type((struct dentry_t *)
- ep);
-
- if (type == TYPE_UNUSED)
- break;
- if ((type != TYPE_FILE) && (type != TYPE_DIR))
- continue;
-
- count = 0;
- count_begin = false;
-
- for (j = 0; j < 8; j++) {
- if (ep->name[j] == ' ')
- break;
-
- if (ep->name[j] == '~') {
- count_begin = true;
- } else if (count_begin) {
- if ((ep->name[j] >= '0') &&
- (ep->name[j] <= '9')) {
- count = count * 10 +
- (ep->name[j] - '0');
- } else {
- count = 0;
- count_begin = false;
- }
- }
- }
-
- if ((count > 0) && (count < 1024))
- exfat_bitmap_set(bmap, count);
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
- }
-
- count = 0;
- for (i = 0; i < 128; i++) {
- if (bmap[i] != 0xFF) {
- for (j = 0; j < 8; j++) {
- if (exfat_bitmap_test(&bmap[i], j) == 0) {
- count = (i << 3) + j;
- break;
- }
- }
- if (count != 0)
- break;
- }
- }
-
- if ((count == 0) || (count >= 1024))
- return FFS_FILEEXIST;
- fat_attach_count_to_dos_name(p_dosname->name, count);
-
- /* Now dos_name has DOS~????.EXT */
- return FFS_SUCCESS;
-}
-
-void fat_attach_count_to_dos_name(u8 *dosname, s32 count)
-{
- int i, j, length;
- char str_count[6];
-
- snprintf(str_count, sizeof(str_count), "~%d", count);
- length = strlen(str_count);
-
- i = 0;
- j = 0;
- while (j <= (8 - length)) {
- i = j;
- if (dosname[j] == ' ')
- break;
- if (dosname[j] & 0x80)
- j += 2;
- else
- j++;
- }
-
- for (j = 0; j < length; i++, j++)
- dosname[i] = (u8)str_count[j];
-
- if (i == 7)
- dosname[7] = ' ';
-}
-
-s32 fat_calc_num_entries(struct uni_name_t *p_uniname)
-{
- s32 len;
-
- len = p_uniname->name_len;
- if (len == 0)
- return 0;
-
- /* 1 dos name entry + extended entries */
- return (len - 1) / 13 + 2;
-}
-
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+static s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
{
s32 len;
@@ -2886,17 +2042,6 @@ s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
return (len - 1) / 15 + 3;
}
-u8 calc_checksum_1byte(void *data, s32 len, u8 chksum)
-{
- int i;
- u8 *c = (u8 *)data;
-
- for (i = 0; i < len; i++, c++)
- chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c;
-
- return chksum;
-}
-
u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
{
int i;
@@ -2921,30 +2066,6 @@ u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
return chksum;
}
-u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type)
-{
- int i;
- u8 *c = (u8 *)data;
-
- switch (type) {
- case CS_PBR_SECTOR:
- for (i = 0; i < len; i++, c++) {
- if ((i == 106) || (i == 107) || (i == 112))
- continue;
- chksum = (((chksum & 1) << 31) |
- ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
- }
- break;
- default
- :
- for (i = 0; i < len; i++, c++)
- chksum = (((chksum & 1) << 31) |
- ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
- }
-
- return chksum;
-}
-
/*
* Name Resolution Functions
*/
@@ -2962,11 +2083,11 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
if (strscpy(name_buf, path, sizeof(name_buf)) < 0)
- return FFS_INVALIDPATH;
+ return -EINVAL;
nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy);
if (lossy)
- return FFS_INVALIDPATH;
+ return -EINVAL;
fid->size = i_size_read(inode);
@@ -2974,154 +2095,12 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits);
p_dir->flags = fid->flags;
- return FFS_SUCCESS;
+ return 0;
}
/*
* File Operation Functions
*/
-static struct fs_func fat_fs_func = {
- .alloc_cluster = fat_alloc_cluster,
- .free_cluster = fat_free_cluster,
- .count_used_clusters = fat_count_used_clusters,
-
- .init_dir_entry = fat_init_dir_entry,
- .init_ext_entry = fat_init_ext_entry,
- .find_dir_entry = fat_find_dir_entry,
- .delete_dir_entry = fat_delete_dir_entry,
- .get_uni_name_from_ext_entry = fat_get_uni_name_from_ext_entry,
- .count_ext_entries = fat_count_ext_entries,
- .calc_num_entries = fat_calc_num_entries,
-
- .get_entry_type = fat_get_entry_type,
- .set_entry_type = fat_set_entry_type,
- .get_entry_attr = fat_get_entry_attr,
- .set_entry_attr = fat_set_entry_attr,
- .get_entry_flag = fat_get_entry_flag,
- .set_entry_flag = fat_set_entry_flag,
- .get_entry_clu0 = fat_get_entry_clu0,
- .set_entry_clu0 = fat_set_entry_clu0,
- .get_entry_size = fat_get_entry_size,
- .set_entry_size = fat_set_entry_size,
- .get_entry_time = fat_get_entry_time,
- .set_entry_time = fat_set_entry_time,
-};
-
-s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
-{
- s32 num_reserved, num_root_sectors;
- struct bpb16_t *p_bpb = (struct bpb16_t *)p_pbr->bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
-
- num_root_sectors = GET16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS;
- num_root_sectors = ((num_root_sectors - 1) >>
- p_bd->sector_size_bits) + 1;
-
- p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
- p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
- p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
- p_bd->sector_size_bits;
- p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
-
- p_fs->num_FAT_sectors = GET16(p_bpb->num_fat_sectors);
-
- p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
- if (p_bpb->num_fats == 1)
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
- else
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
- p_fs->num_FAT_sectors;
-
- p_fs->root_start_sector = p_fs->FAT2_start_sector +
- p_fs->num_FAT_sectors;
- p_fs->data_start_sector = p_fs->root_start_sector + num_root_sectors;
-
- p_fs->num_sectors = GET16(p_bpb->num_sectors);
- if (p_fs->num_sectors == 0)
- p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
-
- num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
- p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
- p_fs->sectors_per_clu_bits) + 2;
- /* because the cluster index starts with 2 */
-
- if (p_fs->num_clusters < FAT12_THRESHOLD)
- p_fs->vol_type = FAT12;
- else
- p_fs->vol_type = FAT16;
- p_fs->vol_id = GET32(p_bpb->vol_serial);
-
- p_fs->root_dir = 0;
- p_fs->dentries_in_root = GET16(p_bpb->num_root_entries);
- p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
- DENTRY_SIZE_BITS);
-
- p_fs->vol_flag = VOL_CLEAN;
- p_fs->clu_srch_ptr = 2;
- p_fs->used_clusters = UINT_MAX;
-
- p_fs->fs_func = &fat_fs_func;
-
- return FFS_SUCCESS;
-}
-
-s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
-{
- s32 num_reserved;
- struct bpb32_t *p_bpb = (struct bpb32_t *)p_pbr->bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
-
- p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
- p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
- p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
- p_bd->sector_size_bits;
- p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
-
- p_fs->num_FAT_sectors = GET32(p_bpb->num_fat32_sectors);
-
- p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
- if (p_bpb->num_fats == 1)
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
- else
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
- p_fs->num_FAT_sectors;
-
- p_fs->root_start_sector = p_fs->FAT2_start_sector +
- p_fs->num_FAT_sectors;
- p_fs->data_start_sector = p_fs->root_start_sector;
-
- p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
- num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
-
- p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
- p_fs->sectors_per_clu_bits) + 2;
- /* because the cluster index starts with 2 */
-
- p_fs->vol_type = FAT32;
- p_fs->vol_id = GET32(p_bpb->vol_serial);
-
- p_fs->root_dir = GET32(p_bpb->root_cluster);
- p_fs->dentries_in_root = 0;
- p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
- DENTRY_SIZE_BITS);
-
- p_fs->vol_flag = VOL_CLEAN;
- p_fs->clu_srch_ptr = 2;
- p_fs->used_clusters = UINT_MAX;
-
- p_fs->fs_func = &fat_fs_func;
-
- return FFS_SUCCESS;
-}
-
static struct fs_func exfat_fs_func = {
.alloc_cluster = exfat_alloc_cluster,
.free_cluster = exfat_free_cluster,
@@ -3156,7 +2135,7 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
+ return -EFSCORRUPTED;
p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits;
p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits;
@@ -3194,7 +2173,7 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
p_fs->fs_func = &exfat_fs_func;
- return FFS_SUCCESS;
+ return 0;
}
s32 create_dir(struct inode *inode, struct chain_t *p_dir,
@@ -3203,7 +2182,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
s32 ret, dentry, num_entries;
u64 size;
struct chain_t clu;
- struct dos_name_t dos_name, dot_name;
+ struct dos_name_t dos_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct fs_func *fs_func = p_fs->fs_func;
@@ -3216,7 +2195,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* find_empty_entry must be called before alloc_cluster */
dentry = find_empty_entry(inode, p_dir, num_entries);
if (dentry < 0)
- return FFS_FULL;
+ return -ENOSPC;
clu.dir = CLUSTER_32(~0);
clu.size = 0;
@@ -3225,64 +2204,26 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* (1) allocate a cluster */
ret = fs_func->alloc_cluster(sb, 1, &clu);
if (ret < 0)
- return FFS_MEDIAERR;
+ return ret;
else if (ret == 0)
- return FFS_FULL;
+ return -ENOSPC;
ret = clear_cluster(sb, clu.dir);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
- if (p_fs->vol_type == EXFAT) {
- size = p_fs->cluster_size;
- } else {
- size = 0;
-
- /* initialize the . and .. entry
- * Information for . points to itself
- * Information for .. points to parent dir
- */
-
- dot_name.name_case = 0x0;
- memcpy(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH);
-
- ret = fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir,
- 0);
- if (ret != FFS_SUCCESS)
- return ret;
-
- ret = fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name);
- if (ret != FFS_SUCCESS)
- return ret;
-
- memcpy(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH);
-
- if (p_dir->dir == p_fs->root_dir)
- ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
- CLUSTER_32(0), 0);
- else
- ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
- p_dir->dir, 0);
-
- if (ret != FFS_SUCCESS)
- return ret;
-
- ret = p_fs->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL,
- &dot_name);
- if (ret != FFS_SUCCESS)
- return ret;
- }
+ size = p_fs->cluster_size;
/* (2) update the directory entry */
/* make sub-dir entry in parent directory */
ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
size);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fid->dir.dir = p_dir->dir;
@@ -3299,7 +2240,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
fid->rwoffset = 0;
fid->hint_last_off = -1;
- return FFS_SUCCESS;
+ return 0;
}
s32 create_file(struct inode *inode, struct chain_t *p_dir,
@@ -3319,7 +2260,7 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
/* find_empty_entry must be called before alloc_cluster() */
dentry = find_empty_entry(inode, p_dir, num_entries);
if (dentry < 0)
- return FFS_FULL;
+ return -ENOSPC;
/* (1) update the directory entry */
/* fill the dos name directory entry information of the created file.
@@ -3327,12 +2268,12 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
*/
ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
CLUSTER_32(0), 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fid->dir.dir = p_dir->dir;
@@ -3349,7 +2290,7 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
fid->rwoffset = 0;
fid->hint_last_off = -1;
- return FFS_SUCCESS;
+ return 0;
}
void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
@@ -3365,17 +2306,17 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
if (!ep)
return;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
if (num_entries < 0) {
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
return;
}
num_entries++;
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
/* (1) update the directory entry */
fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
@@ -3394,37 +2335,37 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
if (!epold)
- return FFS_MEDIAERR;
+ return -ENOENT;
- buf_lock(sb, sector_old);
+ exfat_buf_lock(sb, sector_old);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
epold);
if (num_old_entries < 0) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
num_old_entries++;
ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname,
&num_new_entries, &dos_name);
if (ret) {
- buf_unlock(sb, sector_old);
+ exfat_buf_unlock(sb, sector_old);
return ret;
}
if (num_old_entries < num_new_entries) {
newentry = find_empty_entry(inode, p_dir, num_new_entries);
if (newentry < 0) {
- buf_unlock(sb, sector_old);
- return FFS_FULL;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOSPC;
}
epnew = get_entry_in_dir(sb, p_dir, newentry, &sector_new);
if (!epnew) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
@@ -3434,30 +2375,28 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_old);
-
- if (p_fs->vol_type == EXFAT) {
- epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
- &sector_old);
- buf_lock(sb, sector_old);
- epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
- &sector_new);
-
- if (!epold || !epnew) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
- }
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_old);
- memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_old);
+ epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
+ &sector_old);
+ exfat_buf_lock(sb, sector_old);
+ epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
+ &sector_new);
+
+ if (!epold || !epnew) {
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
+ memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_old);
+
ret = fs_func->init_ext_entry(sb, p_dir, newentry,
num_new_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
@@ -3470,20 +2409,20 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_old);
- buf_unlock(sb, sector_old);
+ exfat_buf_modify(sb, sector_old);
+ exfat_buf_unlock(sb, sector_old);
ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
num_new_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
num_old_entries);
}
- return FFS_SUCCESS;
+ return 0;
}
s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
@@ -3492,7 +2431,6 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
{
s32 ret, newentry, num_new_entries, num_old_entries;
sector_t sector_mov, sector_new;
- struct chain_t clu;
struct dos_name_t dos_name;
struct dentry_t *epmov, *epnew;
struct super_block *sb = inode->i_sb;
@@ -3501,41 +2439,41 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
if (!epmov)
- return FFS_MEDIAERR;
+ return -ENOENT;
/* check if the source and target directory is the same */
if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
fs_func->get_entry_clu0(epmov) == p_newdir->dir)
- return FFS_INVALIDPATH;
+ return -EINVAL;
- buf_lock(sb, sector_mov);
+ exfat_buf_lock(sb, sector_mov);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
epmov);
if (num_old_entries < 0) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
num_old_entries++;
ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname,
&num_new_entries, &dos_name);
if (ret) {
- buf_unlock(sb, sector_mov);
+ exfat_buf_unlock(sb, sector_mov);
return ret;
}
newentry = find_empty_entry(inode, p_newdir, num_new_entries);
if (newentry < 0) {
- buf_unlock(sb, sector_mov);
- return FFS_FULL;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOSPC;
}
epnew = get_entry_in_dir(sb, p_newdir, newentry, &sector_new);
if (!epnew) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
@@ -3544,42 +2482,26 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_mov);
-
- if (p_fs->vol_type == EXFAT) {
- epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
- &sector_mov);
- buf_lock(sb, sector_mov);
- epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
- &sector_new);
- if (!epmov || !epnew) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
- }
-
- memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_mov);
- } else if (fs_func->get_entry_type(epnew) == TYPE_DIR) {
- /* change ".." pointer to new parent dir */
- clu.dir = fs_func->get_entry_clu0(epnew);
- clu.flags = 0x01;
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_mov);
- epnew = get_entry_in_dir(sb, &clu, 1, &sector_new);
- if (!epnew)
- return FFS_MEDIAERR;
-
- if (p_newdir->dir == p_fs->root_dir)
- fs_func->set_entry_clu0(epnew, CLUSTER_32(0));
- else
- fs_func->set_entry_clu0(epnew, p_newdir->dir);
- buf_modify(sb, sector_new);
+ epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
+ &sector_mov);
+ exfat_buf_lock(sb, sector_mov);
+ epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
+ &sector_new);
+ if (!epmov || !epnew) {
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
+ memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_mov);
+
ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
p_uniname, &dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
@@ -3590,7 +2512,7 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
fid->entry = newentry;
- return FFS_SUCCESS;
+ return 0;
}
/*
@@ -3600,7 +2522,7 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
bool read)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) &&
@@ -3612,8 +2534,8 @@ int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
}
if (!p_fs->dev_ejected) {
- ret = bdev_read(sb, sec, bh, 1, read);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_read(sb, sec, bh, 1, read);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3623,7 +2545,7 @@ int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
bool sync)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) &&
@@ -3641,8 +2563,8 @@ int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
}
if (!p_fs->dev_ejected) {
- ret = bdev_write(sb, sec, bh, 1, sync);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_write(sb, sec, bh, 1, sync);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3652,7 +2574,7 @@ int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
int multi_sector_read(struct super_block *sb, sector_t sec,
struct buffer_head **bh, s32 num_secs, bool read)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) &&
@@ -3664,8 +2586,8 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
}
if (!p_fs->dev_ejected) {
- ret = bdev_read(sb, sec, bh, num_secs, read);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_read(sb, sec, bh, num_secs, read);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3675,7 +2597,7 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
int multi_sector_write(struct super_block *sb, sector_t sec,
struct buffer_head *bh, s32 num_secs, bool sync)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) &&
@@ -3692,8 +2614,8 @@ int multi_sector_write(struct super_block *sb, sector_t sec,
}
if (!p_fs->dev_ejected) {
- ret = bdev_write(sb, sec, bh, num_secs, sync);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_write(sb, sec, bh, num_secs, sync);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
index a5c4b68925fb..91e8b0c4dce7 100644
--- a/drivers/staging/exfat/exfat_nls.c
+++ b/drivers/staging/exfat/exfat_nls.c
@@ -7,13 +7,6 @@
#include <linux/nls.h>
#include "exfat.h"
-static u16 bad_dos_chars[] = {
- /* + , ; = [ ] */
- 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D,
- 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D,
- 0
-};
-
static u16 bad_uni_chars[] = {
/* " * / : < > ? \ | */
0x0022, 0x002A, 0x002F, 0x003A,
@@ -96,11 +89,6 @@ static u16 *nls_wstrchr(u16 *str, u16 wchar)
return NULL;
}
-int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b)
-{
- return strncmp(a, b, DOS_NAME_LENGTH);
-}
-
int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
{
int i;
@@ -114,186 +102,6 @@ int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
return 0;
}
-void nls_uniname_to_dosname(struct super_block *sb,
- struct dos_name_t *p_dosname,
- struct uni_name_t *p_uniname, bool *p_lossy)
-{
- int i, j, len;
- bool lossy = false;
- u8 buf[MAX_CHARSET_SIZE];
- u8 lower = 0, upper = 0;
- u8 *dosname = p_dosname->name;
- u16 *uniname = p_uniname->name;
- u16 *p, *last_period;
- struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
-
- for (i = 0; i < DOS_NAME_LENGTH; i++)
- *(dosname + i) = ' ';
-
- if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_CUR_DIR_NAME)) {
- *(dosname) = '.';
- p_dosname->name_case = 0x0;
- if (p_lossy)
- *p_lossy = false;
- return;
- }
-
- if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_PAR_DIR_NAME)) {
- *(dosname) = '.';
- *(dosname + 1) = '.';
- p_dosname->name_case = 0x0;
- if (p_lossy)
- *p_lossy = false;
- return;
- }
-
- /* search for the last embedded period */
- last_period = NULL;
- for (p = uniname; *p; p++) {
- if (*p == (u16)'.')
- last_period = p;
- }
-
- i = 0;
- while (i < DOS_NAME_LENGTH) {
- if (i == 8) {
- if (!last_period)
- break;
-
- if (uniname <= last_period) {
- if (uniname < last_period)
- lossy = true;
- uniname = last_period + 1;
- }
- }
-
- if (*uniname == (u16)'\0') {
- break;
- } else if (*uniname == (u16)' ') {
- lossy = true;
- } else if (*uniname == (u16)'.') {
- if (uniname < last_period)
- lossy = true;
- else
- i = 8;
- } else if (nls_wstrchr(bad_dos_chars, *uniname)) {
- lossy = true;
- *(dosname + i) = '_';
- i++;
- } else {
- len = convert_uni_to_ch(nls, buf, *uniname, &lossy);
-
- if (len > 1) {
- if ((i >= 8) && ((i + len) > DOS_NAME_LENGTH))
- break;
-
- if ((i < 8) && ((i + len) > 8)) {
- i = 8;
- continue;
- }
-
- lower = 0xFF;
-
- for (j = 0; j < len; j++, i++)
- *(dosname + i) = *(buf + j);
- } else { /* len == 1 */
- if ((*buf >= 'a') && (*buf <= 'z')) {
- *(dosname + i) = *buf - ('a' - 'A');
-
- if (i < 8)
- lower |= 0x08;
- else
- lower |= 0x10;
- } else if ((*buf >= 'A') && (*buf <= 'Z')) {
- *(dosname + i) = *buf;
-
- if (i < 8)
- upper |= 0x08;
- else
- upper |= 0x10;
- } else {
- *(dosname + i) = *buf;
- }
- i++;
- }
- }
-
- uniname++;
- }
-
- if (*dosname == 0xE5)
- *dosname = 0x05;
-
- if (*uniname != 0x0)
- lossy = true;
-
- if (upper & lower)
- p_dosname->name_case = 0xFF;
- else
- p_dosname->name_case = lower;
-
- if (p_lossy)
- *p_lossy = lossy;
-}
-
-void nls_dosname_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname)
-{
- int i = 0, j, n = 0;
- u8 buf[DOS_NAME_LENGTH + 2];
- u8 *dosname = p_dosname->name;
- u16 *uniname = p_uniname->name;
- struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
-
- if (*dosname == 0x05) {
- *buf = 0xE5;
- i++;
- n++;
- }
-
- for (; i < 8; i++, n++) {
- if (*(dosname + i) == ' ')
- break;
-
- if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
- (p_dosname->name_case & 0x08))
- *(buf + n) = *(dosname + i) + ('a' - 'A');
- else
- *(buf + n) = *(dosname + i);
- }
- if (*(dosname + 8) != ' ') {
- *(buf + n) = '.';
- n++;
- }
-
- for (i = 8; i < DOS_NAME_LENGTH; i++, n++) {
- if (*(dosname + i) == ' ')
- break;
-
- if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
- (p_dosname->name_case & 0x10))
- *(buf + n) = *(dosname + i) + ('a' - 'A');
- else
- *(buf + n) = *(dosname + i);
- }
- *(buf + n) = '\0';
-
- i = 0;
- j = 0;
- while (j < (MAX_NAME_LENGTH - 1)) {
- if (*(buf + i) == '\0')
- break;
-
- i += convert_ch_to_uni(nls, uniname, (buf + i), NULL);
-
- uniname++;
- j++;
- }
-
- *uniname = (u16)'\0';
-}
-
void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
struct uni_name_t *p_uniname)
{
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 3b2b0ceb7297..6e481908c59f 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -26,7 +26,7 @@
#include <linux/sched.h>
#include <linux/fs_struct.h>
#include <linux/namei.h>
-
+#include <linux/random.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/mutex.h>
@@ -284,12 +284,12 @@ static const struct dentry_operations exfat_dentry_ops = {
.d_compare = exfat_cmp,
};
-static DEFINE_SEMAPHORE(z_sem);
+static DEFINE_MUTEX(z_mutex);
static inline void fs_sync(struct super_block *sb, bool do_sync)
{
if (do_sync)
- bdev_sync(sb);
+ exfat_bdev_sync(sb);
}
/*
@@ -353,26 +353,28 @@ static int ffsMountVol(struct super_block *sb)
pr_info("[EXFAT] trying to mount...\n");
- down(&z_sem);
+ mutex_lock(&z_mutex);
- buf_init(sb);
+ exfat_buf_init(sb);
- sema_init(&p_fs->v_sem, 1);
+ mutex_init(&p_fs->v_mutex);
p_fs->dev_ejected = 0;
/* open the block device */
- bdev_open(sb);
+ exfat_bdev_open(sb);
if (p_bd->sector_size < sb->s_blocksize) {
- ret = FFS_MEDIAERR;
+ printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n",
+ p_bd->sector_size, sb->s_blocksize);
+ ret = -EINVAL;
goto out;
}
if (p_bd->sector_size > sb->s_blocksize)
sb_set_blocksize(sb, p_bd->sector_size);
/* read Sector 0 */
- if (sector_read(sb, 0, &tmp_bh, 1) != FFS_SUCCESS) {
- ret = FFS_MEDIAERR;
+ if (sector_read(sb, 0, &tmp_bh, 1) != 0) {
+ ret = -EIO;
goto out;
}
@@ -383,8 +385,8 @@ static int ffsMountVol(struct super_block *sb)
/* check the validity of PBR */
if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) {
brelse(tmp_bh);
- bdev_close(sb);
- ret = FFS_FORMATERR;
+ exfat_bdev_close(sb);
+ ret = -EFSCORRUPTED;
goto out;
}
@@ -394,16 +396,10 @@ static int ffsMountVol(struct super_block *sb)
break;
if (i < 53) {
-#ifdef CONFIG_EXFAT_DONT_MOUNT_VFAT
+ /* Not sure how we'd get here, but complain if it does */
ret = -EINVAL;
- printk(KERN_INFO "EXFAT: Attempted to mount VFAT filesystem\n");
+ pr_info("EXFAT: Attempted to mount VFAT filesystem\n");
goto out;
-#else
- if (GET16(p_pbr->bpb + 11)) /* num_fat_sectors */
- ret = fat16_mount(sb, p_pbr);
- else
- ret = fat32_mount(sb, p_pbr);
-#endif
} else {
ret = exfat_mount(sb, p_pbr);
}
@@ -411,38 +407,34 @@ static int ffsMountVol(struct super_block *sb)
brelse(tmp_bh);
if (ret) {
- bdev_close(sb);
+ exfat_bdev_close(sb);
goto out;
}
- if (p_fs->vol_type == EXFAT) {
- ret = load_alloc_bitmap(sb);
- if (ret) {
- bdev_close(sb);
- goto out;
- }
- ret = load_upcase_table(sb);
- if (ret) {
- free_alloc_bitmap(sb);
- bdev_close(sb);
- goto out;
- }
+ ret = load_alloc_bitmap(sb);
+ if (ret) {
+ exfat_bdev_close(sb);
+ goto out;
+ }
+ ret = load_upcase_table(sb);
+ if (ret) {
+ free_alloc_bitmap(sb);
+ exfat_bdev_close(sb);
+ goto out;
}
if (p_fs->dev_ejected) {
- if (p_fs->vol_type == EXFAT) {
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
- }
- bdev_close(sb);
- ret = FFS_MEDIAERR;
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
+ exfat_bdev_close(sb);
+ ret = -EIO;
goto out;
}
pr_info("[EXFAT] mounted successfully\n");
out:
- up(&z_sem);
+ mutex_unlock(&z_mutex);
return ret;
}
@@ -450,39 +442,37 @@ out:
static int ffsUmountVol(struct super_block *sb)
{
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int err = FFS_SUCCESS;
+ int err = 0;
pr_info("[EXFAT] trying to unmount...\n");
- down(&z_sem);
+ mutex_lock(&z_mutex);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
- fs_sync(sb, false);
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
- if (p_fs->vol_type == EXFAT) {
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
- }
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
- FAT_release_all(sb);
- buf_release_all(sb);
+ exfat_fat_release_all(sb);
+ exfat_buf_release_all(sb);
/* close the block device */
- bdev_close(sb);
+ exfat_bdev_close(sb);
if (p_fs->dev_ejected) {
pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n");
- err = FFS_MEDIAERR;
+ err = -EIO;
}
- buf_shutdown(sb);
+ exfat_buf_shutdown(sb);
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
- up(&z_sem);
+ mutex_unlock(&p_fs->v_mutex);
+ mutex_unlock(&z_mutex);
pr_info("[EXFAT] unmounted successfully\n");
@@ -491,15 +481,15 @@ static int ffsUmountVol(struct super_block *sb)
static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
{
- int err = FFS_SUCCESS;
+ int err = 0;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* check the validity of pointer parameters */
if (!info)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (p_fs->used_clusters == UINT_MAX)
p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
@@ -511,31 +501,31 @@ static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
info->FreeClusters = info->NumClusters - info->UsedClusters;
if (p_fs->dev_ejected)
- err = FFS_MEDIAERR;
+ err = -EIO;
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return err;
}
static int ffsSyncVol(struct super_block *sb, bool do_sync)
{
- int err = FFS_SUCCESS;
+ int err = 0;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* synchronize the file system */
fs_sync(sb, do_sync);
fs_set_vol_flags(sb, VOL_CLEAN);
if (p_fs->dev_ejected)
- err = FFS_MEDIAERR;
+ err = -EIO;
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return err;
}
@@ -559,10 +549,10 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -578,7 +568,7 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
&dos_name, TYPE_ALL);
if (dentry < -1) {
- ret = FFS_NOTFOUND;
+ ret = -ENOENT;
goto out;
}
@@ -597,22 +587,13 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
fid->size = 0;
fid->start_clu = p_fs->root_dir;
} else {
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &dir, dentry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &dir, dentry, NULL);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
+ es = get_entry_set_in_dir(sb, &dir, dentry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
fid->type = p_fs->fs_func->get_entry_type(ep);
fid->rwoffset = 0;
@@ -628,15 +609,14 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
}
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
}
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -648,14 +628,14 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
struct uni_name_t uni_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int ret;
+ int ret = 0;
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -667,17 +647,17 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
/* create a new file */
ret = create_file(inode, &dir, &uni_name, mode, fid);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -697,18 +677,18 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!buffer)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -721,7 +701,7 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
if (count == 0) {
if (rcount)
*rcount = 0;
- ret = FFS_EOF;
+ ret = 0;
goto out;
}
@@ -742,9 +722,11 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
}
while (clu_offset > 0) {
- /* clu = FAT_read(sb, clu); */
- if (FAT_read(sb, clu, &clu) == -1)
- return FFS_MEDIAERR;
+ /* clu = exfat_fat_read(sb, clu); */
+ if (exfat_fat_read(sb, clu, &clu) == -1) {
+ ret = -EIO;
+ goto out;
+ }
clu_offset--;
}
@@ -772,13 +754,13 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
if ((offset == 0) && (oneblkread == p_bd->sector_size)) {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)buffer + read_bytes,
(char *)tmp_bh->b_data, (s32)oneblkread);
} else {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)buffer + read_bytes,
(char *)tmp_bh->b_data + offset,
@@ -797,11 +779,11 @@ err_out:
*rcount = read_bytes;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -814,7 +796,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
s32 num_clusters, num_alloc, num_alloced = (s32)~0;
int ret = 0;
u32 clu, last_clu;
- sector_t LogSector, sector = 0;
+ sector_t LogSector;
u64 oneblkwrite, write_bytes;
struct chain_t new_clu;
struct timestamp_t tm;
@@ -827,18 +809,18 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!buffer)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -848,7 +830,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (count == 0) {
if (wcount)
*wcount = 0;
- ret = FFS_SUCCESS;
+ ret = 0;
goto out;
}
@@ -864,7 +846,8 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
while (count > 0) {
clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
- clu = last_clu = fid->start_clu;
+ clu = fid->start_clu;
+ last_clu = fid->start_clu;
if (fid->flags == 0x03) {
if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
@@ -885,9 +868,9 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
last_clu = clu;
- /* clu = FAT_read(sb, clu); */
- if (FAT_read(sb, clu, &clu) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu = exfat_fat_read(sb, clu); */
+ if (exfat_fat_read(sb, clu, &clu) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -909,7 +892,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (num_alloced == 0)
break;
if (num_alloced < 0) {
- ret = FFS_MEDIAERR;
+ ret = num_alloced;
goto out;
}
@@ -928,7 +911,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
modified = true;
}
if (new_clu.flags == 0x01)
- FAT_write(sb, last_clu, new_clu.dir);
+ exfat_fat_write(sb, last_clu, new_clu.dir);
}
num_clusters += num_alloced;
@@ -957,12 +940,12 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) {
if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)tmp_bh->b_data,
(char *)buffer + write_bytes, (s32)oneblkwrite);
if (sector_write(sb, LogSector, tmp_bh, 0) !=
- FFS_SUCCESS) {
+ 0) {
brelse(tmp_bh);
goto err_out;
}
@@ -970,18 +953,18 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if ((offset > 0) ||
((fid->rwoffset + oneblkwrite) < fid->size)) {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
} else {
if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
}
memcpy((char *)tmp_bh->b_data + offset,
(char *)buffer + write_bytes, (s32)oneblkwrite);
if (sector_write(sb, LogSector, tmp_bh, 0) !=
- FFS_SUCCESS) {
+ 0) {
brelse(tmp_bh);
goto err_out;
}
@@ -1002,25 +985,15 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
brelse(tmp_bh);
/* (3) update the direcoty entry */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es)
- goto err_out;
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep)
- goto err_out;
- ep2 = ep;
- }
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es)
+ goto err_out;
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
p_fs->fs_func->set_entry_attr(ep, fid->attr);
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
-
if (modified) {
if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
p_fs->fs_func->set_entry_flag(ep2, fid->flags);
@@ -1030,18 +1003,13 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
-
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
}
- if (p_fs->vol_type == EXFAT) {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1051,14 +1019,14 @@ err_out:
*wcount = write_bytes;
if (num_alloced == 0)
- ret = FFS_FULL;
+ ret = -ENOSPC;
else if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1068,7 +1036,6 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
s32 num_clusters;
u32 last_clu = CLUSTER_32(0);
int ret = 0;
- sector_t sector = 0;
struct chain_t clu;
struct timestamp_t tm;
struct dentry_t *ep, *ep2;
@@ -1081,11 +1048,11 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
new_size);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -1095,7 +1062,7 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
if (old_size <= new_size) {
- ret = FFS_SUCCESS;
+ ret = 0;
goto out;
}
@@ -1114,8 +1081,8 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
} else {
while (num_clusters > 0) {
last_clu = clu.dir;
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
num_clusters--;
@@ -1133,22 +1100,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
/* (1) update the directory entry */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
- ep2 = ep;
- }
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
p_fs->fs_func->set_entry_attr(ep, fid->attr);
@@ -1159,17 +1117,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
}
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
/* (2) cut off from the FAT chain */
if (last_clu != CLUSTER_32(0)) {
if (fid->flags == 0x01)
- FAT_write(sb, last_clu, CLUSTER_32(~0));
+ exfat_fat_write(sb, last_clu, CLUSTER_32(~0));
}
/* (3) free the clusters */
@@ -1180,18 +1134,18 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
if (fid->rwoffset > fid->size)
fid->rwoffset = fid->size;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
pr_debug("%s exited (%d)\n", __func__, ret);
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1232,14 +1186,14 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!new_path || (*new_path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
update_parent_info(fid, old_parent_inode);
@@ -1252,19 +1206,19 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
/* check if the old file is "." or ".." */
if (p_fs->vol_type != EXFAT) {
if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out2;
}
}
ep = get_entry_in_dir(sb, &olddir, dentry, NULL);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out2;
}
if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out2;
}
@@ -1272,12 +1226,12 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
if (new_inode) {
u32 entry_type;
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
new_fid = &EXFAT_I(new_inode)->fid;
update_parent_info(new_fid, new_parent_inode);
- p_dir = &(new_fid->dir);
+ p_dir = &new_fid->dir;
new_entry = new_fid->entry;
ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
if (!ep)
@@ -1294,7 +1248,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
new_clu.flags = new_fid->flags;
if (!is_dir_empty(sb, &new_clu)) {
- ret = FFS_FILEEXIST;
+ ret = -EEXIST;
goto out;
}
}
@@ -1314,7 +1268,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
&uni_name, fid);
- if ((ret == FFS_SUCCESS) && new_inode) {
+ if ((ret == 0) && new_inode) {
/* delete entries of new_dir */
ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
if (!ep)
@@ -1328,16 +1282,16 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
num_entries + 1);
}
out:
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out2:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1345,7 +1299,7 @@ out2:
static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
{
s32 dentry;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir, clu_to_free;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
@@ -1353,10 +1307,10 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
dir.dir = fid->dir.dir;
dir.size = fid->dir.size;
@@ -1366,12 +1320,12 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
ep = get_entry_in_dir(sb, &dir, dentry, NULL);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out;
}
if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
fs_set_vol_flags(sb, VOL_DIRTY);
@@ -1390,16 +1344,16 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1409,7 +1363,7 @@ out:
static int ffsSetAttr(struct inode *inode, u32 attr)
{
u32 type;
- int ret = FFS_SUCCESS;
+ int ret = 0;
sector_t sector = 0;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
@@ -1420,36 +1374,28 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
if (fid->attr == attr) {
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
- return FFS_SUCCESS;
+ return -EIO;
+ return 0;
}
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
(fid->entry == -1)) {
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
- return FFS_SUCCESS;
+ return -EIO;
+ return 0;
}
}
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* get the directory entry of given file */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
type = p_fs->fs_func->get_entry_type(ep);
@@ -1457,12 +1403,11 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
else
- ret = FFS_ERROR;
+ ret = -EINVAL;
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
goto out;
}
@@ -1472,23 +1417,19 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
fid->attr = attr;
p_fs->fs_func->set_entry_attr(ep, attr);
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1496,9 +1437,8 @@ out:
static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
{
- sector_t sector = 0;
s32 count;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir;
struct uni_name_t uni_name;
struct timestamp_t tm;
@@ -1512,7 +1452,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
pr_debug("%s entered\n", __func__);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
@@ -1541,35 +1481,25 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = FFS_MEDIAERR;
+ ret = count; /* propogate error upward */
goto out;
}
info->NumSubdirs = count;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
}
}
/* get the directory entry of given file or directory */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
- buf_lock(sb, sector);
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
/* set FILE_INFO structure using the acquired struct dentry_t */
info->Attr = p_fs->fs_func->get_entry_attr(ep);
@@ -1594,31 +1524,19 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
memset((char *)&info->AccessTimestamp, 0, sizeof(struct date_time_t));
- *(uni_name.name) = 0x0;
+ *uni_name.name = 0x0;
/* XXX this is very bad for exfat cuz name is already included in es.
* API should be revised
*/
- p_fs->fs_func->get_uni_name_from_ext_entry(sb, &(fid->dir), fid->entry,
+ p_fs->fs_func->get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
uni_name.name);
- if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
- get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
- &uni_name, 0x1);
nls_uniname_to_cstring(sb, info->Name, &uni_name);
- if (p_fs->vol_type == EXFAT) {
- info->NumSubdirs = 2;
- } else {
- buf_unlock(sb, sector);
- get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
- &uni_name, 0x0);
- nls_uniname_to_cstring(sb, info->ShortName, &uni_name);
- info->NumSubdirs = 0;
- }
+ info->NumSubdirs = 2;
info->Size = p_fs->fs_func->get_entry_size(ep2);
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
if (is_dir) {
dir.dir = fid->start_clu;
@@ -1630,18 +1548,18 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = FFS_MEDIAERR;
+ ret = count; /* propogate error upward */
goto out;
}
info->NumSubdirs += count;
}
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
pr_debug("%s exited successfully\n", __func__);
return ret;
@@ -1649,8 +1567,7 @@ out:
static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
{
- sector_t sector = 0;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct timestamp_t tm;
struct dentry_t *ep, *ep2;
struct entry_set_cache_t *es = NULL;
@@ -1662,14 +1579,14 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
pr_debug("%s entered (inode %p info %p\n", __func__, inode, info);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
(fid->entry == -1)) {
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
- ret = FFS_SUCCESS;
+ ret = -EIO;
+ ret = 0;
goto out;
}
}
@@ -1677,23 +1594,13 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
fs_set_vol_flags(sb, VOL_DIRTY);
/* get the directory entry of given file or directory */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- /* for other than exfat */
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_attr(ep, info->Attr);
@@ -1716,19 +1623,15 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
p_fs->fs_func->set_entry_size(ep2, info->Size);
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
pr_debug("%s exited (%d)\n", __func__, ret);
@@ -1740,8 +1643,7 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
s32 num_clusters, num_alloced;
bool modified = false;
u32 last_clu;
- int ret = FFS_SUCCESS;
- sector_t sector = 0;
+ int ret = 0;
struct chain_t new_clu;
struct dentry_t *ep;
struct entry_set_cache_t *es = NULL;
@@ -1751,10 +1653,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* check the validity of pointer parameters */
if (!clu)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits;
@@ -1785,8 +1687,8 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
last_clu = *clu;
- if (FAT_read(sb, *clu, clu) == -1) {
- ret = FFS_MEDIAERR;
+ if (exfat_fat_read(sb, *clu, clu) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -1804,10 +1706,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* (1) allocate a cluster */
num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
if (num_alloced < 0) {
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
} else if (num_alloced == 0) {
- ret = FFS_FULL;
+ ret = -ENOSPC;
goto out;
}
@@ -1825,34 +1727,23 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
modified = true;
}
if (new_clu.flags == 0x01)
- FAT_write(sb, last_clu, new_clu.dir);
+ exfat_fat_write(sb, last_clu, new_clu.dir);
}
num_clusters += num_alloced;
*clu = new_clu.dir;
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- /* get stream entry */
- ep++;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ /* get stream entry */
+ ep++;
/* (3) update directory entry */
if (modified) {
- if (p_fs->vol_type != EXFAT) {
- ep = get_entry_in_dir(sb, &(fid->dir),
- fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- }
-
if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
p_fs->fs_func->set_entry_flag(ep, fid->flags);
@@ -1860,14 +1751,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
p_fs->fs_func->set_entry_clu0(ep,
fid->start_clu);
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
}
- if (p_fs->vol_type == EXFAT) {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
/* add number of new blocks to inode */
inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9);
@@ -1878,11 +1765,11 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
fid->hint_last_clu = *clu;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1893,7 +1780,7 @@ out:
static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
{
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir;
struct uni_name_t uni_name;
struct super_block *sb = inode->i_sb;
@@ -1903,10 +1790,10 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given old pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -1917,16 +1804,16 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
ret = create_dir(inode, &dir, &uni_name, fid);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1934,7 +1821,7 @@ out:
static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
{
int i, dentry, clu_offset;
- int ret = FFS_SUCCESS;
+ int ret = 0;
s32 dentries_per_clu, dentries_per_clu_bits = 0;
u32 type;
sector_t sector;
@@ -1949,14 +1836,14 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
/* check the validity of pointer parameters */
if (!dir_entry)
- return FFS_ERROR;
+ return -EINVAL;
/* check if the given file ID is opened */
if (fid->type != TYPE_DIR)
- return FFS_PERMISSIONERR;
+ return -ENOTDIR;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (fid->entry == -1) {
dir.dir = p_fs->root_dir;
@@ -2001,9 +1888,9 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
}
while (clu_offset > 0) {
- /* clu.dir = FAT_read(sb, clu.dir); */
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu.dir = exfat_fat_read(sb, clu.dir); */
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -2023,7 +1910,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
for ( ; i < dentries_per_clu; i++, dentry++) {
ep = get_entry_in_dir(sb, &clu, i, &sector);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out;
}
type = fs_func->get_entry_type(ep);
@@ -2034,7 +1921,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
if ((type != TYPE_FILE) && (type != TYPE_DIR))
continue;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
dir_entry->Attr = fs_func->get_entry_attr(ep);
fs_func->get_entry_time(ep, &tm, TM_CREATE);
@@ -2058,28 +1945,16 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
memset((char *)&dir_entry->AccessTimestamp, 0,
sizeof(struct date_time_t));
- *(uni_name.name) = 0x0;
+ *uni_name.name = 0x0;
fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
uni_name.name);
- if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
- get_uni_name_from_dos_entry(sb,
- (struct dos_dentry_t *)ep,
- &uni_name, 0x1);
nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
- if (p_fs->vol_type == EXFAT) {
- ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- } else {
- get_uni_name_from_dos_entry(sb,
- (struct dos_dentry_t *)ep,
- &uni_name, 0x0);
- nls_uniname_to_cstring(sb, dir_entry->ShortName,
- &uni_name);
+ ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
+ if (!ep) {
+ ret = -ENOENT;
+ goto out;
}
dir_entry->Size = fs_func->get_entry_size(ep);
@@ -2095,7 +1970,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
fid->rwoffset = (s64)(++dentry);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
}
@@ -2108,24 +1983,24 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
else
clu.dir = CLUSTER_32(~0);
} else {
- /* clu.dir = FAT_read(sb, clu.dir); */
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu.dir = exfat_fat_read(sb, clu.dir); */
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
}
}
- *(dir_entry->Name) = '\0';
+ *dir_entry->Name = '\0';
fid->rwoffset = (s64)(++dentry);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -2133,14 +2008,14 @@ out:
static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
{
s32 dentry;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir, clu_to_free;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
dir.dir = fid->dir.dir;
dir.size = fid->dir.size;
@@ -2151,18 +2026,18 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
/* check if the file is "." or ".." */
if (p_fs->vol_type != EXFAT) {
if ((dir.dir != p_fs->root_dir) && (dentry < 2))
- return FFS_PERMISSIONERR;
+ return -EPERM;
}
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
clu_to_free.dir = fid->start_clu;
clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1;
clu_to_free.flags = fid->flags;
if (!is_dir_empty(sb, &clu_to_free)) {
- ret = FFS_FILEEXIST;
+ ret = -ENOTEMPTY;
goto out;
}
@@ -2178,17 +2053,17 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -2202,7 +2077,7 @@ static int exfat_readdir(struct file *filp, struct dir_context *ctx)
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
struct dir_entry_t de;
unsigned long inum;
@@ -2244,12 +2119,11 @@ get_new:
/* at least we tried to read a sector
* move cpos to next sector position (should be aligned)
*/
- if (err == FFS_MEDIAERR) {
+ if (err == -EIO) {
cpos += 1 << p_bd->sector_size_bits;
cpos &= ~((1 << p_bd->sector_size_bits) - 1);
}
- err = -EIO;
goto end_of_dir;
}
@@ -2293,7 +2167,7 @@ static int exfat_ioctl_volume_id(struct inode *dir)
{
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
return p_fs->vol_id;
}
@@ -2301,7 +2175,7 @@ static int exfat_ioctl_volume_id(struct inode *dir)
static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
-struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = filp->f_path.dentry->d_inode;
#ifdef CONFIG_EXFAT_KERNEL_DEBUG
unsigned int flags;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
@@ -2351,6 +2225,7 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2361,21 +2236,14 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
pr_debug("%s entered\n", __func__);
err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_REGULAR, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else if (err == FFS_NAMETOOLONG)
- err = -ENAMETOOLONG;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2389,7 +2257,10 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/*
* timestamp is already written, so mark_inode_dirty() is unnecessary.
*/
@@ -2522,6 +2393,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2531,22 +2403,22 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
EXFAT_I(inode)->fid.size = i_size_read(inode);
err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid));
- if (err) {
- if (err == FFS_PERMISSIONERR)
- err = -EPERM;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
mark_inode_dirty(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
exfat_detach(inode);
remove_inode_hash(inode);
@@ -2560,6 +2432,7 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
const char *target)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2572,32 +2445,22 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
pr_debug("%s entered\n", __func__);
err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_SYMLINK, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
err = ffsWriteFile(dir, &fid, (char *)target, len, &ret);
if (err) {
ffsRemoveFile(dir, &fid);
-
- if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
goto out;
}
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2611,7 +2474,10 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
EXFAT_I(inode)->target = kmemdup(target, len + 1, GFP_KERNEL);
@@ -2632,6 +2498,7 @@ out:
static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2642,21 +2509,14 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
pr_debug("%s entered\n", __func__);
err = ffsCreateDir(dir, (u8 *)dentry->d_name.name, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else if (err == FFS_NAMETOOLONG)
- err = -ENAMETOOLONG;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2671,7 +2531,10 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
@@ -2687,6 +2550,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2696,21 +2560,13 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
EXFAT_I(inode)->fid.size = i_size_read(inode);
err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid));
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -ENOTEMPTY;
- else if (err == FFS_NOTFOUND)
- err = -ENOENT;
- else if (err == FFS_DIRBUSY)
- err = -EBUSY;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2718,7 +2574,9 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
exfat_detach(inode);
remove_inode_hash(inode);
@@ -2734,6 +2592,7 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
{
struct inode *old_inode, *new_inode;
struct super_block *sb = old_dir->i_sb;
+ struct timespec64 curtime;
loff_t i_pos;
int err;
@@ -2751,24 +2610,15 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir,
new_dentry);
- if (err) {
- if (err == FFS_PERMISSIONERR)
- err = -EPERM;
- else if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_NOTFOUND)
- err = -ENOENT;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(new_dir);
- new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
- current_time(new_dir);
+ curtime = current_time(new_dir);
+ new_dir->i_ctime = curtime;
+ new_dir->i_mtime = curtime;
+ new_dir->i_atime = curtime;
+
if (IS_DIRSYNC(new_dir))
(void)exfat_sync_inode(new_dir);
else
@@ -2790,7 +2640,9 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
inc_nlink(new_dir);
}
INC_IVERSION(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ curtime = current_time(old_dir);
+ old_dir->i_ctime = curtime;
+ old_dir->i_mtime = curtime;
if (IS_DIRSYNC(old_dir))
(void)exfat_sync_inode(old_dir);
else
@@ -2814,13 +2666,16 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
loff_t start = i_size_read(inode), count = size - i_size_read(inode);
+ struct timespec64 curtime;
int err, err2;
err = generic_cont_expand_simple(inode, size);
if (err != 0)
return err;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_ctime = curtime;
+ inode->i_mtime = curtime;
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
@@ -2895,7 +2750,8 @@ static void exfat_truncate(struct inode *inode, loff_t old_size)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2914,7 +2770,9 @@ static void exfat_truncate(struct inode *inode, loff_t old_size)
if (err)
goto out;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_ctime = curtime;
+ inode->i_mtime = curtime;
if (IS_DIRSYNC(inode))
(void)exfat_sync_inode(inode);
else
@@ -2936,8 +2794,8 @@ static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
pr_debug("%s entered\n", __func__);
- if ((attr->ia_valid & ATTR_SIZE)
- && (attr->ia_size > i_size_read(inode))) {
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size > i_size_read(inode)) {
error = exfat_cont_expand(inode, attr->ia_size);
if (error || attr->ia_valid == ATTR_SIZE)
return error;
@@ -2946,8 +2804,8 @@ static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
ia_valid = attr->ia_valid;
- if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET))
- && exfat_allow_set_time(sbi, inode)) {
+ if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) &&
+ exfat_allow_set_time(sbi, inode)) {
attr->ia_valid &= ~(ATTR_MTIME_SET |
ATTR_ATIME_SET |
ATTR_TIMES_SET);
@@ -3073,8 +2931,7 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
- struct bd_info_t *p_bd = &(sbi->bd_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
const unsigned long blocksize = sb->s_blocksize;
const unsigned char blocksize_bits = sb->s_blocksize_bits;
sector_t last_block;
@@ -3084,18 +2941,6 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
*phys = 0;
*mapped_blocks = 0;
- if ((p_fs->vol_type == FAT12) || (p_fs->vol_type == FAT16)) {
- if (inode->i_ino == EXFAT_ROOT_INO) {
- if (sector <
- (p_fs->dentries_in_root >>
- (p_bd->sector_size_bits - DENTRY_SIZE_BITS))) {
- *phys = sector + p_fs->root_start_sector;
- *mapped_blocks = 1;
- }
- return 0;
- }
- }
-
last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
if (sector >= last_block) {
if (*create == 0)
@@ -3114,12 +2959,7 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
err = ffsMapCluster(inode, clu_offset, &cluster);
- if (err) {
- if (err == FFS_FULL)
- return -ENOSPC;
- else
- return -EIO;
- } else if (cluster != CLUSTER_32(~0)) {
+ if (!err && (cluster != CLUSTER_32(~0))) {
*phys = START_SECTOR(cluster) + sec_offset;
*mapped_blocks = p_fs->sectors_per_clu - sec_offset;
}
@@ -3215,6 +3055,7 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ struct timespec64 curtime;
int err;
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
@@ -3223,7 +3064,9 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
exfat_write_failed(mapping, pos + len);
if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_ctime = curtime;
fid->attr |= ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
@@ -3302,7 +3145,7 @@ static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
{
struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
struct dir_entry_t info;
memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t));
@@ -3314,7 +3157,7 @@ static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
INC_IVERSION(inode);
- inode->i_generation = get_seconds();
+ inode->i_generation = prandom_u32();
if (info.Attr & ATTR_SUBDIR) { /* directory */
inode->i_generation &= ~1;
@@ -3501,7 +3344,7 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
struct vol_info_t info;
if (p_fs->used_clusters == UINT_MAX) {
- if (ffsGetVolInfo(sb, &info) == FFS_MEDIAERR)
+ if (ffsGetVolInfo(sb, &info) == -EIO)
return -EIO;
} else {
@@ -3674,7 +3517,8 @@ static int parse_options(char *options, int silent, int *debug,
opts->fs_uid = current_uid();
opts->fs_gid = current_gid();
- opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+ opts->fs_fmask = current->fs->umask;
+ opts->fs_dmask = current->fs->umask;
opts->allow_utime = U16_MAX;
opts->codepage = exfat_default_codepage;
opts->iocharset = exfat_default_iocharset;
@@ -3787,7 +3631,8 @@ static int exfat_read_root(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
+ struct timespec64 curtime;
struct dir_entry_t info;
EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir;
@@ -3818,7 +3663,10 @@ static int exfat_read_root(struct inode *inode)
EXFAT_I(inode)->mmu_private = i_size_read(inode);
exfat_save_attr(inode, ATTR_SUBDIR);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
set_nlink(inode, info.NumSubdirs + 2);
return 0;
@@ -3838,7 +3686,6 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
struct exfat_sb_info *sbi;
int debug, ret;
long error;
- char buf[50];
/*
* GFP_KERNEL is ok here, because while we do hold the
@@ -3885,17 +3732,6 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
* if (FAT_FIRST_ENT(sb, media) != first)
*/
- /* codepage is not meaningful in exfat */
- if (sbi->fs_info.vol_type != EXFAT) {
- error = -EINVAL;
- sprintf(buf, "cp%d", sbi->options.codepage);
- sbi->nls_disk = load_nls(buf);
- if (!sbi->nls_disk) {
- pr_err("[EXFAT] Codepage %s not found\n", buf);
- goto out_fail2;
- }
- }
-
sbi->nls_io = load_nls(sbi->options.iocharset);
error = -ENOMEM;
@@ -3984,10 +3820,10 @@ static void exfat_debug_kill_sb(struct super_block *sb)
* invalidate_bdev drops all device cache include
* dirty. We use this to simulate device removal.
*/
- down(&p_fs->v_sem);
- FAT_release_all(sb);
- buf_release_all(sb);
- up(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
+ exfat_fat_release_all(sb);
+ exfat_buf_release_all(sb);
+ mutex_unlock(&p_fs->v_mutex);
invalidate_bdev(bdev);
}
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index cb61c2a772bd..dad1ddcd7b0c 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
- depends on FB && SPI && OF
+ depends on FB && SPI
depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -95,8 +95,8 @@ config FB_TFT_PCD8544
Generic Framebuffer support for PCD8544
config FB_TFT_RA8875
- tristate "FB driver for the RA8875 LCD Controller"
- depends on FB_TFT
+ tristate "FB driver for the RA8875 LCD Controller"
+ depends on FB_TFT
help
Generic Framebuffer support for RA8875
@@ -112,6 +112,13 @@ config FB_TFT_S6D1121
help
Generic Framebuffer support for S6D1121
+config FB_TFT_SEPS525
+ tristate "FB driver for the SEPS525 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for SEPS525
+ Say Y if you have such a display that utilizes this controller.
+
config FB_TFT_SH1106
tristate "FB driver for the SH1106 OLED Controller"
depends on FB_TFT
@@ -125,10 +132,10 @@ config FB_TFT_SSD1289
Framebuffer support for SSD1289
config FB_TFT_SSD1305
- tristate "FB driver for the SSD1305 OLED Controller"
- depends on FB_TFT
- help
- Framebuffer support for SSD1305
+ tristate "FB driver for the SSD1305 OLED Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1305
config FB_TFT_SSD1306
tristate "FB driver for the SSD1306 OLED Controller"
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 27af43f32f81..e87193f7df14 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FB_TFT_PCD8544) += fb_pcd8544.o
obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o
obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o
obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o
+obj-$(CONFIG_FB_TFT_SEPS525) += fb_seps525.o
obj-$(CONFIG_FB_TFT_SH1106) += fb_sh1106.o
obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o
obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1305.o
diff --git a/drivers/staging/fbtft/fb_seps525.c b/drivers/staging/fbtft/fb_seps525.c
new file mode 100644
index 000000000000..05882e2cde7f
--- /dev/null
+++ b/drivers/staging/fbtft/fb_seps525.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FB driver for the NHD-1.69-160128UGC3 (Newhaven Display International, Inc.)
+ * using the SEPS525 (Syncoam) LCD Controller
+ *
+ * Copyright (C) 2016 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_seps525"
+#define WIDTH 160
+#define HEIGHT 128
+
+#define SEPS525_INDEX 0x00
+#define SEPS525_STATUS_RD 0x01
+#define SEPS525_OSC_CTL 0x02
+#define SEPS525_IREF 0x80
+#define SEPS525_CLOCK_DIV 0x03
+#define SEPS525_REDUCE_CURRENT 0x04
+#define SEPS525_SOFT_RST 0x05
+#define SEPS525_DISP_ONOFF 0x06
+#define SEPS525_PRECHARGE_TIME_R 0x08
+#define SEPS525_PRECHARGE_TIME_G 0x09
+#define SEPS525_PRECHARGE_TIME_B 0x0A
+#define SEPS525_PRECHARGE_CURRENT_R 0x0B
+#define SEPS525_PRECHARGE_CURRENT_G 0x0C
+#define SEPS525_PRECHARGE_CURRENT_B 0x0D
+#define SEPS525_DRIVING_CURRENT_R 0x10
+#define SEPS525_DRIVING_CURRENT_G 0x11
+#define SEPS525_DRIVING_CURRENT_B 0x12
+#define SEPS525_DISPLAYMODE_SET 0x13
+#define SEPS525_RGBIF 0x14
+#define SEPS525_RGB_POL 0x15
+#define SEPS525_MEMORY_WRITEMODE 0x16
+#define SEPS525_MX1_ADDR 0x17
+#define SEPS525_MX2_ADDR 0x18
+#define SEPS525_MY1_ADDR 0x19
+#define SEPS525_MY2_ADDR 0x1A
+#define SEPS525_MEMORY_ACCESS_POINTER_X 0x20
+#define SEPS525_MEMORY_ACCESS_POINTER_Y 0x21
+#define SEPS525_DDRAM_DATA_ACCESS_PORT 0x22
+#define SEPS525_GRAY_SCALE_TABLE_INDEX 0x50
+#define SEPS525_GRAY_SCALE_TABLE_DATA 0x51
+#define SEPS525_DUTY 0x28
+#define SEPS525_DSL 0x29
+#define SEPS525_D1_DDRAM_FAC 0x2E
+#define SEPS525_D1_DDRAM_FAR 0x2F
+#define SEPS525_D2_DDRAM_SAC 0x31
+#define SEPS525_D2_DDRAM_SAR 0x32
+#define SEPS525_SCR1_FX1 0x33
+#define SEPS525_SCR1_FX2 0x34
+#define SEPS525_SCR1_FY1 0x35
+#define SEPS525_SCR1_FY2 0x36
+#define SEPS525_SCR2_SX1 0x37
+#define SEPS525_SCR2_SX2 0x38
+#define SEPS525_SCR2_SY1 0x39
+#define SEPS525_SCR2_SY2 0x3A
+#define SEPS525_SCREEN_SAVER_CONTEROL 0x3B
+#define SEPS525_SS_SLEEP_TIMER 0x3C
+#define SEPS525_SCREEN_SAVER_MODE 0x3D
+#define SEPS525_SS_SCR1_FU 0x3E
+#define SEPS525_SS_SCR1_MXY 0x3F
+#define SEPS525_SS_SCR2_FU 0x40
+#define SEPS525_SS_SCR2_MXY 0x41
+#define SEPS525_MOVING_DIRECTION 0x42
+#define SEPS525_SS_SCR2_SX1 0x47
+#define SEPS525_SS_SCR2_SX2 0x48
+#define SEPS525_SS_SCR2_SY1 0x49
+#define SEPS525_SS_SCR2_SY2 0x4A
+
+/* SEPS525_DISPLAYMODE_SET */
+#define MODE_SWAP_BGR BIT(7)
+#define MODE_SM BIT(6)
+#define MODE_RD BIT(5)
+#define MODE_CD BIT(4)
+
+#define seps525_use_window 0 /* FBTFT doesn't really use it today */
+
+/* Init sequence taken from: Arduino Library for the Adafruit 2.2" display */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ usleep_range(1000, 5000);
+
+ /* Disable Oscillator Power Down */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x03);
+ usleep_range(1000, 5000);
+ /* Set Normal Driving Current */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x00);
+ usleep_range(1000, 5000);
+
+ write_reg(par, SEPS525_SCREEN_SAVER_CONTEROL, 0x00);
+ /* Set EXPORT1 Pin at Internal Clock */
+ write_reg(par, SEPS525_OSC_CTL, 0x01);
+ /* Set Clock as 120 Frames/Sec */
+ write_reg(par, SEPS525_CLOCK_DIV, 0x90);
+ /* Set Reference Voltage Controlled by External Resister */
+ write_reg(par, SEPS525_IREF, 0x01);
+
+ /* precharge time R G B */
+ write_reg(par, SEPS525_PRECHARGE_TIME_R, 0x04);
+ write_reg(par, SEPS525_PRECHARGE_TIME_G, 0x05);
+ write_reg(par, SEPS525_PRECHARGE_TIME_B, 0x05);
+
+ /* precharge current R G B (uA) */
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_R, 0x9D);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_G, 0x8C);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_B, 0x57);
+
+ /* driving current R G B (uA) */
+ write_reg(par, SEPS525_DRIVING_CURRENT_R, 0x56);
+ write_reg(par, SEPS525_DRIVING_CURRENT_G, 0x4D);
+ write_reg(par, SEPS525_DRIVING_CURRENT_B, 0x46);
+ /* Set Color Sequence */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, 0xA0);
+ write_reg(par, SEPS525_RGBIF, 0x01); /* Set MCU Interface Mode */
+ /* Set Memory Write Mode */
+ write_reg(par, SEPS525_MEMORY_WRITEMODE, 0x66);
+ write_reg(par, SEPS525_DUTY, 0x7F); /* 1/128 Duty (0x0F~0x7F) */
+ /* Set Mapping RAM Display Start Line (0x00~0x7F) */
+ write_reg(par, SEPS525_DSL, 0x00);
+ write_reg(par, SEPS525_DISP_ONOFF, 0x01); /* Display On (0x00/0x01) */
+ /* Set All Internal Register Value as Normal Mode */
+ write_reg(par, SEPS525_SOFT_RST, 0x00);
+ /* Set RGB Interface Polarity as Active Low */
+ write_reg(par, SEPS525_RGB_POL, 0x00);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ if (seps525_use_window) {
+ /* Set Window Xs,Ys Xe,Ye*/
+ write_reg(par, SEPS525_MX1_ADDR, xs);
+ write_reg(par, SEPS525_MX2_ADDR, xe);
+ write_reg(par, SEPS525_MY1_ADDR, ys);
+ write_reg(par, SEPS525_MY2_ADDR, ye);
+ }
+ /* start position X,Y */
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_X, xs);
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_Y, ys);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ u8 val;
+
+ switch (par->info->var.rotate) {
+ case 0:
+ val = 0;
+ break;
+ case 180:
+ val = MODE_RD | MODE_CD;
+ break;
+ case 90:
+ case 270:
+
+ default:
+ return -EINVAL;
+ }
+ /* Memory Access Control */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, val |
+ (par->bgr ? MODE_SWAP_BGR : 0));
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "syncoam,seps525", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:seps525");
+MODULE_ALIAS("platform:seps525");
+
+MODULE_DESCRIPTION("FB driver for the SEPS525 LCD Controller");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 65681d0fe200..e763205e9e4f 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -91,7 +91,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x2C | (pump & 0x03));
/* Set inverse display */
- write_reg(par, 0xA6 | (0x01 & 0x01));
+ write_reg(par, 0xA6 | 0x01);
/* Set 4-bit grayscale mode */
write_reg(par, 0xD0 | (0x02 & 0x03));
@@ -157,8 +157,8 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
@@ -171,11 +171,11 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
+ | (0x1 << 2) /* Mirror Y ON */
| (0x0 & 0x1) << 1 /* Mirror X OFF */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
@@ -183,13 +183,13 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 2) /* Mirror Y ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
default:
@@ -197,12 +197,12 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x0 & 0x1) << 2 /* Mirror Y OFF */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
}
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index a0a67aa517f0..ffb84987dd86 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -22,8 +22,9 @@
#include <linux/uaccess.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
-#include <linux/of.h>
+
#include <video/mipi_display.h>
#include "fbtft.h"
@@ -70,7 +71,6 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
}
EXPORT_SYMBOL(fbtft_dbg_hex);
-#ifdef CONFIG_OF
static int fbtft_request_one_gpio(struct fbtft_par *par,
const char *name, int index,
struct gpio_desc **gpiop)
@@ -92,14 +92,11 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
return ret;
}
-static int fbtft_request_gpios_dt(struct fbtft_par *par)
+static int fbtft_request_gpios(struct fbtft_par *par)
{
int i;
int ret;
- if (!par->info->device->of_node)
- return -EINVAL;
-
ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
if (ret)
return ret;
@@ -135,7 +132,6 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
return 0;
}
-#endif
#ifdef CONFIG_FB_BACKLIGHT
static int fbtft_backlight_update_status(struct backlight_device *bd)
@@ -317,7 +313,7 @@ static void fbtft_mkdirty(struct fb_info *info, int y, int height)
/* special case, needed ? */
if (y == -1) {
y = 0;
- height = info->var.yres - 1;
+ height = info->var.yres;
}
/* Mark display lines/area as dirty */
@@ -529,6 +525,7 @@ static void fbtft_merge_fbtftops(struct fbtft_ops *dst, struct fbtft_ops *src)
*
* @display: pointer to structure describing the display
* @dev: pointer to the device for this fb, this can be NULL
+ * @pdata: platform data for the display in use
*
* Creates a new frame buffer info structure.
*
@@ -666,7 +663,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbdefio->deferred_io = fbtft_deferred_io;
fb_deferred_io_init(info);
- strncpy(info->fix.id, dev->driver->name, 16);
+ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.xpanstep = 0;
@@ -897,46 +894,54 @@ int fbtft_unregister_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(fbtft_unregister_framebuffer);
-#ifdef CONFIG_OF
/**
- * fbtft_init_display_dt() - Device Tree init_display() function
+ * fbtft_init_display_from_property() - Device Tree init_display() function
* @par: Driver data
*
* Return: 0 if successful, negative if error
*/
-static int fbtft_init_display_dt(struct fbtft_par *par)
+static int fbtft_init_display_from_property(struct fbtft_par *par)
{
- struct device_node *node = par->info->device->of_node;
- struct property *prop;
- const __be32 *p;
+ struct device *dev = par->info->device;
+ int buf[64], count, index, i, j, ret;
+ u32 *values;
u32 val;
- int buf[64], i, j;
- if (!node)
+ count = device_property_count_u32(dev, "init");
+ if (count < 0)
+ return count;
+ if (count == 0)
return -EINVAL;
- prop = of_find_property(node, "init", NULL);
- p = of_prop_next_u32(prop, NULL, &val);
- if (!p)
- return -EINVAL;
+ values = kmalloc_array(count, sizeof(*values), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(dev, "init", values, count);
+ if (ret)
+ goto out_free;
par->fbtftops.reset(par);
if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
- while (p) {
+ index = -1;
+ while (index < count) {
+ val = values[++index];
+
if (val & FBTFT_OF_INIT_CMD) {
val &= 0xFFFF;
i = 0;
- while (p && !(val & 0xFFFF0000)) {
+ while ((index < count) && !(val & 0xFFFF0000)) {
if (i > 63) {
- dev_err(par->info->device,
+ dev_err(dev,
"%s: Maximum register values exceeded\n",
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
buf[i++] = val;
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
}
/* make debug message */
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
@@ -966,17 +971,18 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"init: msleep(%u)\n", val & 0xFFFF);
msleep(val & 0xFFFF);
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
} else {
- dev_err(par->info->device, "illegal init value 0x%X\n",
- val);
- return -EINVAL;
+ dev_err(dev, "illegal init value 0x%X\n", val);
+ ret = -EINVAL;
+ goto out_free;
}
}
- return 0;
+out_free:
+ kfree(values);
+ return ret;
}
-#endif
/**
* fbtft_init_display() - Generic init_display() function
@@ -1137,27 +1143,25 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
return 0;
}
-#ifdef CONFIG_OF
/* returns 0 if the property is not present */
-static u32 fbtft_of_value(struct device_node *node, const char *propname)
+static u32 fbtft_property_value(struct device *dev, const char *propname)
{
int ret;
u32 val = 0;
- ret = of_property_read_u32(node, propname, &val);
+ ret = device_property_read_u32(dev, propname, &val);
if (ret == 0)
- pr_info("%s: %s = %u\n", __func__, propname, val);
+ dev_info(dev, "%s: %s = %u\n", __func__, propname, val);
return val;
}
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
+static struct fbtft_platform_data *fbtft_properties_read(struct device *dev)
{
- struct device_node *node = dev->of_node;
struct fbtft_platform_data *pdata;
- if (!node) {
- dev_err(dev, "Missing platform data or DT\n");
+ if (!dev_fwnode(dev)) {
+ dev_err(dev, "Missing platform data or properties\n");
return ERR_PTR(-EINVAL);
}
@@ -1165,35 +1169,28 @@ static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->display.width = fbtft_of_value(node, "width");
- pdata->display.height = fbtft_of_value(node, "height");
- pdata->display.regwidth = fbtft_of_value(node, "regwidth");
- pdata->display.buswidth = fbtft_of_value(node, "buswidth");
- pdata->display.backlight = fbtft_of_value(node, "backlight");
- pdata->display.bpp = fbtft_of_value(node, "bpp");
- pdata->display.debug = fbtft_of_value(node, "debug");
- pdata->rotate = fbtft_of_value(node, "rotate");
- pdata->bgr = of_property_read_bool(node, "bgr");
- pdata->fps = fbtft_of_value(node, "fps");
- pdata->txbuflen = fbtft_of_value(node, "txbuflen");
- pdata->startbyte = fbtft_of_value(node, "startbyte");
- of_property_read_string(node, "gamma", (const char **)&pdata->gamma);
-
- if (of_find_property(node, "led-gpios", NULL))
+ pdata->display.width = fbtft_property_value(dev, "width");
+ pdata->display.height = fbtft_property_value(dev, "height");
+ pdata->display.regwidth = fbtft_property_value(dev, "regwidth");
+ pdata->display.buswidth = fbtft_property_value(dev, "buswidth");
+ pdata->display.backlight = fbtft_property_value(dev, "backlight");
+ pdata->display.bpp = fbtft_property_value(dev, "bpp");
+ pdata->display.debug = fbtft_property_value(dev, "debug");
+ pdata->rotate = fbtft_property_value(dev, "rotate");
+ pdata->bgr = device_property_read_bool(dev, "bgr");
+ pdata->fps = fbtft_property_value(dev, "fps");
+ pdata->txbuflen = fbtft_property_value(dev, "txbuflen");
+ pdata->startbyte = fbtft_property_value(dev, "startbyte");
+ device_property_read_string(dev, "gamma", (const char **)&pdata->gamma);
+
+ if (device_property_present(dev, "led-gpios"))
pdata->display.backlight = 1;
- if (of_find_property(node, "init", NULL))
- pdata->display.fbtftops.init_display = fbtft_init_display_dt;
- pdata->display.fbtftops.request_gpios = fbtft_request_gpios_dt;
+ if (device_property_present(dev, "init"))
+ pdata->display.fbtftops.init_display = fbtft_init_display_from_property;
+ pdata->display.fbtftops.request_gpios = fbtft_request_gpios;
return pdata;
}
-#else
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
-{
- dev_err(dev, "Missing platform data\n");
- return ERR_PTR(-EINVAL);
-}
-#endif
/**
* fbtft_probe_common() - Generic device probe() helper function
@@ -1227,7 +1224,7 @@ int fbtft_probe_common(struct fbtft_display *display,
pdata = dev->platform_data;
if (!pdata) {
- pdata = fbtft_probe_dt(dev);
+ pdata = fbtft_properties_read(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 9b6bdb62093d..5f782da51959 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -309,7 +309,7 @@ MODULE_DEVICE_TABLE(of, dt_ids); \
static struct spi_driver fbtft_driver_spi_driver = { \
.driver = { \
.name = _name, \
- .of_match_table = of_match_ptr(dt_ids), \
+ .of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_spi, \
.remove = fbtft_driver_remove_spi, \
@@ -319,7 +319,7 @@ static struct platform_driver fbtft_driver_platform_driver = { \
.driver = { \
.name = _name, \
.owner = THIS_MODULE, \
- .of_match_table = of_match_ptr(dt_ids), \
+ .of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_pdev, \
.remove = fbtft_driver_remove_pdev, \
diff --git a/drivers/staging/fieldbus/anybuss/anybuss-client.h b/drivers/staging/fieldbus/anybuss/anybuss-client.h
index 0c4b6a1ffe10..8ee1f1baccf1 100644
--- a/drivers/staging/fieldbus/anybuss/anybuss-client.h
+++ b/drivers/staging/fieldbus/anybuss/anybuss-client.h
@@ -12,6 +12,9 @@
#include <linux/types.h>
#include <linux/poll.h>
+/* move to <linux/fieldbus_dev.h> when taking this out of staging */
+#include "../fieldbus_dev.h"
+
struct anybuss_host;
struct anybuss_client {
@@ -61,12 +64,6 @@ anybuss_set_drvdata(struct anybuss_client *client, void *data)
int anybuss_set_power(struct anybuss_client *client, bool power_on);
-enum anybuss_offl_mode {
- AB_OFFL_MODE_CLEAR = 0,
- AB_OFFL_MODE_FREEZE,
- AB_OFFL_MODE_SET
-};
-
struct anybuss_memcfg {
u16 input_io;
u16 input_dpram;
@@ -76,7 +73,7 @@ struct anybuss_memcfg {
u16 output_dpram;
u16 output_total;
- enum anybuss_offl_mode offl_mode;
+ enum fieldbus_dev_offl_mode offl_mode;
};
int anybuss_start_init(struct anybuss_client *client,
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 2ecffa42e561..5b8d0bae9ff3 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -127,12 +127,10 @@ static const struct regmap_config arcx_regmap_cfg = {
static struct regmap *create_parallel_regmap(struct platform_device *pdev,
int idx)
{
- struct resource *res;
void __iomem *base;
struct device *dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, idx + 1);
if (IS_ERR(base))
return ERR_CAST(base);
return devm_regmap_init_mmio(dev, base, &arcx_regmap_cfg);
@@ -230,7 +228,6 @@ static int controller_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct regulator_dev *regulator;
int err, id;
- struct resource *res;
struct anybuss_host *host;
u8 status1, cap;
@@ -244,8 +241,7 @@ static int controller_probe(struct platform_device *pdev)
return PTR_ERR(cd->reset_gpiod);
/* CPLD control memory, sits at index 0 */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cd->cpld_base = devm_ioremap_resource(dev, res);
+ cd->cpld_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cd->cpld_base)) {
dev_err(dev,
"failed to map cpld base address\n");
diff --git a/drivers/staging/fieldbus/anybuss/hms-profinet.c b/drivers/staging/fieldbus/anybuss/hms-profinet.c
index 5446843e35f4..31c43a0a5776 100644
--- a/drivers/staging/fieldbus/anybuss/hms-profinet.c
+++ b/drivers/staging/fieldbus/anybuss/hms-profinet.c
@@ -96,7 +96,7 @@ static int __profi_enable(struct profi_priv *priv)
.output_io = 220,
.output_dpram = PROFI_DPRAM_SIZE,
.output_total = PROFI_DPRAM_SIZE,
- .offl_mode = AB_OFFL_MODE_CLEAR,
+ .offl_mode = FIELDBUS_DEV_OFFL_MODE_CLEAR,
};
/*
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index f69dc4930457..549cb7d51af8 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1022,13 +1022,13 @@ int anybuss_start_init(struct anybuss_client *client,
};
switch (cfg->offl_mode) {
- case AB_OFFL_MODE_CLEAR:
+ case FIELDBUS_DEV_OFFL_MODE_CLEAR:
op_mode = 0;
break;
- case AB_OFFL_MODE_FREEZE:
+ case FIELDBUS_DEV_OFFL_MODE_FREEZE:
op_mode = OP_MODE_FBFC;
break;
- case AB_OFFL_MODE_SET:
+ case FIELDBUS_DEV_OFFL_MODE_SET:
op_mode = OP_MODE_FBS;
break;
default:
diff --git a/drivers/staging/fieldbus/dev_core.c b/drivers/staging/fieldbus/dev_core.c
index f6f5b92ba914..1ba0234cc60d 100644
--- a/drivers/staging/fieldbus/dev_core.c
+++ b/drivers/staging/fieldbus/dev_core.c
@@ -23,9 +23,6 @@ static dev_t fieldbus_devt;
static DEFINE_IDA(fieldbus_ida);
static DEFINE_MUTEX(fieldbus_mtx);
-static const char ctrl_enabled[] = "enabled";
-static const char ctrl_disabled[] = "disabled";
-
static ssize_t online_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/staging/fieldbus/fieldbus_dev.h b/drivers/staging/fieldbus/fieldbus_dev.h
index a10fc3b446dc..301dca3b8d71 100644
--- a/drivers/staging/fieldbus/fieldbus_dev.h
+++ b/drivers/staging/fieldbus/fieldbus_dev.h
@@ -15,6 +15,12 @@ enum fieldbus_dev_type {
FIELDBUS_DEV_TYPE_PROFINET,
};
+enum fieldbus_dev_offl_mode {
+ FIELDBUS_DEV_OFFL_MODE_CLEAR = 0,
+ FIELDBUS_DEV_OFFL_MODE_FREEZE,
+ FIELDBUS_DEV_OFFL_MODE_SET
+};
+
/**
* struct fieldbus_dev - Fieldbus device
* @read_area: [DRIVER] function to read the process data area of the
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 14a9eebf687e..39c0fe347188 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -18,8 +18,6 @@
#include "ethsw.h"
-static struct workqueue_struct *ethsw_owq;
-
/* Minimal supported DPSW version */
#define DPSW_MIN_VER_MAJOR 8
#define DPSW_MIN_VER_MINOR 1
@@ -1174,10 +1172,6 @@ static int port_netdevice_event(struct notifier_block *unused,
return notifier_from_errno(err);
}
-static struct notifier_block port_nb __read_mostly = {
- .notifier_call = port_netdevice_event,
-};
-
struct ethsw_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
@@ -1233,8 +1227,10 @@ static int port_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
struct ethsw_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
if (!ethsw_port_dev_check(dev))
return NOTIFY_DONE;
@@ -1270,7 +1266,7 @@ static int port_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
- queue_work(ethsw_owq, &switchdev_work->work);
+ queue_work(ethsw->workqueue, &switchdev_work->work);
return NOTIFY_DONE;
@@ -1318,31 +1314,27 @@ static int port_switchdev_blocking_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static struct notifier_block port_switchdev_nb = {
- .notifier_call = port_switchdev_event,
-};
-
-static struct notifier_block port_switchdev_blocking_nb = {
- .notifier_call = port_switchdev_blocking_event,
-};
-
static int ethsw_register_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
- err = register_netdevice_notifier(&port_nb);
+ ethsw->port_nb.notifier_call = port_netdevice_event;
+ err = register_netdevice_notifier(&ethsw->port_nb);
if (err) {
dev_err(dev, "Failed to register netdev notifier\n");
return err;
}
- err = register_switchdev_notifier(&port_switchdev_nb);
+ ethsw->port_switchdev_nb.notifier_call = port_switchdev_event;
+ err = register_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err) {
dev_err(dev, "Failed to register switchdev notifier\n");
goto err_switchdev_nb;
}
- err = register_switchdev_blocking_notifier(&port_switchdev_blocking_nb);
+ ethsw->port_switchdevb_nb.notifier_call = port_switchdev_blocking_event;
+ err = register_switchdev_blocking_notifier(&ethsw->port_switchdevb_nb);
if (err) {
dev_err(dev, "Failed to register switchdev blocking notifier\n");
goto err_switchdev_blocking_nb;
@@ -1351,9 +1343,9 @@ static int ethsw_register_notifier(struct device *dev)
return 0;
err_switchdev_blocking_nb:
- unregister_switchdev_notifier(&port_switchdev_nb);
+ unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
err_switchdev_nb:
- unregister_netdevice_notifier(&port_nb);
+ unregister_netdevice_notifier(&ethsw->port_nb);
return err;
}
@@ -1435,9 +1427,10 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
}
}
- ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
- "ethsw");
- if (!ethsw_owq) {
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
err = -ENOMEM;
goto err_close;
}
@@ -1449,7 +1442,7 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
return 0;
err_destroy_ordered_workqueue:
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
err_close:
dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -1491,21 +1484,22 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
static void ethsw_unregister_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct notifier_block *nb;
int err;
- nb = &port_switchdev_blocking_nb;
+ nb = &ethsw->port_switchdevb_nb;
err = unregister_switchdev_blocking_notifier(nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev blocking notifier (%d)\n", err);
- err = unregister_switchdev_notifier(&port_switchdev_nb);
+ err = unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev notifier (%d)\n", err);
- err = unregister_netdevice_notifier(&port_nb);
+ err = unregister_netdevice_notifier(&ethsw->port_nb);
if (err)
dev_err(dev,
"Failed to unregister netdev notifier (%d)\n", err);
@@ -1536,7 +1530,7 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
ethsw_teardown_irqs(sw_dev);
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index 3ea8a0ad8c10..a0244f7d5003 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -66,6 +66,11 @@ struct ethsw_core {
u8 vlans[VLAN_VID_MASK + 1];
bool learning;
+
+ struct notifier_block port_nb;
+ struct notifier_block port_switchdev_nb;
+ struct notifier_block port_switchdevb_nb;
+ struct workqueue_struct *workqueue;
};
#endif /* __ETHSW_H */
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index 9543f8454af9..6964aac2a7ed 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config FIREWIRE_SERIAL
- tristate "TTY over Firewire"
- depends on FIREWIRE && TTY
- help
- This enables TTY over IEEE 1394, providing high-speed serial
+ tristate "TTY over Firewire"
+ depends on FIREWIRE && TTY
+ help
+ This enables TTY over IEEE 1394, providing high-speed serial
connectivity to cabled peers. This driver implements a
ad-hoc transport protocol and is currently limited to
Linux-to-Linux communication.
@@ -14,18 +14,18 @@ config FIREWIRE_SERIAL
if FIREWIRE_SERIAL
config FWTTY_MAX_TOTAL_PORTS
- int "Maximum number of serial ports supported"
- default "64"
- help
- Set this to the maximum number of serial ports you want the
+ int "Maximum number of serial ports supported"
+ default "64"
+ help
+ Set this to the maximum number of serial ports you want the
firewire-serial driver to support.
config FWTTY_MAX_CARD_PORTS
- int "Maximum number of serial ports supported per adapter"
- range 0 FWTTY_MAX_TOTAL_PORTS
- default "32"
- help
- Set this to the maximum number of serial ports each firewire
+ int "Maximum number of serial ports supported per adapter"
+ range 0 FWTTY_MAX_TOTAL_PORTS
+ default "32"
+ help
+ Set this to the maximum number of serial ports each firewire
adapter supports. The actual number of serial ports registered
is set with the module parameter "ttys".
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
index 240f9bb10b71..e3047d36d8db 100644
--- a/drivers/staging/gasket/gasket_ioctl.c
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -34,8 +34,8 @@ static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
- return gasket_interrupt_set_eventfd(
- gasket_dev->interrupt_data, die.interrupt, die.event_fd);
+ return gasket_interrupt_set_eventfd(gasket_dev->interrupt_data,
+ die.interrupt, die.event_fd);
}
/* Read the size of the page table. */
@@ -54,9 +54,9 @@ static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
ibuf.size = gasket_page_table_num_entries(
gasket_dev->page_table[ibuf.page_table_index]);
- trace_gasket_ioctl_page_table_data(
- ibuf.page_table_index, ibuf.size, ibuf.host_address,
- ibuf.device_address);
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
@@ -101,9 +101,9 @@ static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
- trace_gasket_ioctl_page_table_data(
- ibuf.page_table_index, ibuf.size, ibuf.host_address,
- ibuf.device_address);
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/staging/hp/Kconfig
index fb395cfe6b92..fb395cfe6b92 100644
--- a/drivers/net/ethernet/hp/Kconfig
+++ b/drivers/staging/hp/Kconfig
diff --git a/drivers/net/ethernet/hp/Makefile b/drivers/staging/hp/Makefile
index 5ed723bb11e2..5ed723bb11e2 100644
--- a/drivers/net/ethernet/hp/Makefile
+++ b/drivers/staging/hp/Makefile
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/staging/hp/hp100.c
index 6ec78f5c602f..6ec78f5c602f 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/staging/hp/hp100.c
diff --git a/drivers/net/ethernet/hp/hp100.h b/drivers/staging/hp/hp100.h
index 7239b94c9de5..7239b94c9de5 100644
--- a/drivers/net/ethernet/hp/hp100.h
+++ b/drivers/staging/hp/hp100.h
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 82099db4bf0c..a480409090c0 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index e6b660489165..bf3e2a9cc07f 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -155,6 +155,11 @@
* The DOUT/RDY output must also be wired to an interrupt capable GPIO.
*/
+enum {
+ AD7192_SYSCALIB_ZERO_SCALE,
+ AD7192_SYSCALIB_FULL_SCALE,
+};
+
struct ad7192_state {
struct regulator *avdd;
struct regulator *dvdd;
@@ -169,10 +174,80 @@ struct ad7192_state {
u8 devid;
u8 clock_sel;
struct mutex lock; /* protect sensor state */
+ u8 syscalib_mode[8];
struct ad_sigma_delta sd;
};
+static const char * const ad7192_syscalib_modes[] = {
+ [AD7192_SYSCALIB_ZERO_SCALE] = "zero_scale",
+ [AD7192_SYSCALIB_FULL_SCALE] = "full_scale",
+};
+
+static int ad7192_set_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ st->syscalib_mode[chan->channel] = mode;
+
+ return 0;
+}
+
+static int ad7192_get_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ return st->syscalib_mode[chan->channel];
+}
+
+static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ bool sys_calib;
+ int ret, temp;
+
+ ret = strtobool(buf, &sys_calib);
+ if (ret)
+ return ret;
+
+ temp = st->syscalib_mode[chan->channel];
+ if (sys_calib) {
+ if (temp == AD7192_SYSCALIB_ZERO_SCALE)
+ ret = ad_sd_calibrate(&st->sd, AD7192_MODE_CAL_SYS_ZERO,
+ chan->address);
+ else
+ ret = ad_sd_calibrate(&st->sd, AD7192_MODE_CAL_SYS_FULL,
+ chan->address);
+ }
+
+ return ret ? ret : len;
+}
+
+static const struct iio_enum ad7192_syscalib_mode_enum = {
+ .items = ad7192_syscalib_modes,
+ .num_items = ARRAY_SIZE(ad7192_syscalib_modes),
+ .set = ad7192_set_syscalib_mode,
+ .get = ad7192_get_syscalib_mode
+};
+
+static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = {
+ {
+ .name = "sys_calibration",
+ .write = ad7192_write_syscalib,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
+ &ad7192_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", &ad7192_syscalib_mode_enum),
+ {}
+};
+
static struct ad7192_state *ad_sigma_delta_to_ad7192(struct ad_sigma_delta *sd)
{
return container_of(sd, struct ad7192_state, sd);
@@ -770,9 +845,11 @@ static int ad7192_channels_config(struct iio_dev *indio_dev)
*chan = channels[i];
chan->info_mask_shared_by_all |=
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY);
- if (chan->type != IIO_TEMP)
+ if (chan->type != IIO_TEMP) {
chan->info_mask_shared_by_type_available |=
BIT(IIO_CHAN_INFO_SCALE);
+ chan->ext_info = ad7192_calibsys_ext_info;
+ }
chan++;
}
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 038d6732c3fd..23026978a5a5 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -417,6 +417,10 @@ static int ad9834_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->mclk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(st->mclk)) {
+ ret = PTR_ERR(st->mclk);
+ goto error_disable_reg;
+ }
ret = clk_prepare_enable(st->mclk);
if (ret) {
diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c
index 40ca1e8fa09f..32ec8cf31fd0 100644
--- a/drivers/staging/isdn/avm/b1.c
+++ b/drivers/staging/isdn/avm/b1.c
@@ -261,9 +261,10 @@ int b1_loaded(avmcard *card)
b1_put_byte(base, SEND_POLL);
for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
if (b1_rx_full(base)) {
- if ((ans = b1_get_byte(base)) == RECEIVE_POLL) {
+ ans = b1_get_byte(base);
+ if (ans == RECEIVE_POLL)
return 1;
- }
+
printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n",
card->name, ans);
return 0;
@@ -284,8 +285,9 @@ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
int retval;
b1_reset(port);
+ retval = b1_load_t4file(card, &data->firmware);
- if ((retval = b1_load_t4file(card, &data->firmware))) {
+ if (retval) {
b1_reset(port);
printk(KERN_ERR "%s: failed to load t4file!!\n",
card->name);
@@ -295,7 +297,8 @@ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
b1_disable_irq(port);
if (data->configuration.len > 0 && data->configuration.data) {
- if ((retval = b1_load_config(card, &data->configuration))) {
+ retval = b1_load_config(card, &data->configuration);
+ if (retval) {
b1_reset(port);
printk(KERN_ERR "%s: failed to load config!!\n",
card->name);
@@ -525,7 +528,9 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
MsgLen = 30;
CAPIMSG_SETLEN(card->msgbuf, 30);
}
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
+
+ skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC);
+ if (!skb) {
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
@@ -539,7 +544,9 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
ApplId = (unsigned) b1_get_word(card->port);
MsgLen = b1_get_slice(card->port, card->msgbuf);
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ skb = alloc_skb(MsgLen, GFP_ATOMIC);
+
+ if (!skb) {
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
spin_unlock_irqrestore(&card->lock, flags);
@@ -663,11 +670,17 @@ int b1_proc_show(struct seq_file *m, void *v)
seq_printf(m, "%-16s %s\n", "type", s);
if (card->cardtype == avm_t1isa)
seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
+
+ s = cinfo->version[VER_DRIVER];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
+
+ s = cinfo->version[VER_CARDTYPE];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
+
+ s = cinfo->version[VER_SERIAL];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
@@ -784,13 +797,15 @@ static int __init b1_init(void)
char *p;
char rev[32];
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ p = strchr(revision, ':');
+ if (p && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
+ p = strchr(rev, '$');
+ if (p && p > rev)
*(p - 1) = 0;
- } else
+ } else {
strcpy(rev, "1.0");
-
+ }
printk(KERN_INFO "b1: revision %s\n", rev);
return 0;
diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
index 17fa615a8c68..9ddadd07e707 100644
--- a/drivers/staging/isdn/gigaset/interface.c
+++ b/drivers/staging/isdn/gigaset/interface.c
@@ -518,7 +518,7 @@ void gigaset_if_init(struct cardstate *cs)
if (!IS_ERR(cs->tty_dev))
dev_set_drvdata(cs->tty_dev, cs);
else {
- pr_warning("could not register device to the tty subsystem\n");
+ pr_warn("could not register device to the tty subsystem\n");
cs->tty_dev = NULL;
}
mutex_unlock(&cs->mutex);
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index bc02534d8dc3..5460bf973c9c 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -99,7 +99,8 @@ struct i2c_device {
#define SMBHSTSTS_INTR 0x02
#define SMBHSTSTS_HOST_BUSY 0x01
-#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
+#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | \
+ SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
@@ -136,17 +137,18 @@ static int i801_check_pre(struct i2c_device *priv)
status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
- dev_err(&priv->adapter.dev, "SMBus is busy, can't use it! (status=%x)\n", status);
+ dev_err(&priv->adapter.dev,
+ "SMBus is busy, can't use it! (status=%x)\n", status);
return -EBUSY;
}
status &= STATUS_FLAGS;
if (status) {
- //dev_dbg(&priv->adapter.dev, "Clearing status flags (%02x)\n", status);
outb_p(status, SMBHSTSTS(priv));
status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
if (status) {
- dev_err(&priv->adapter.dev, "Failed clearing status flags (%02x)\n", status);
+ dev_err(&priv->adapter.dev,
+ "Failed clearing status flags (%02x)\n", status);
return -EBUSY;
}
}
@@ -162,15 +164,20 @@ static int i801_check_post(struct i2c_device *priv, int status, int timeout)
if (timeout) {
dev_err(&priv->adapter.dev, "Transaction timeout\n");
/* try to stop the current command */
- dev_dbg(&priv->adapter.dev, "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, SMBHSTCNT(priv));
+ dev_dbg(&priv->adapter.dev,
+ "Terminating the current operation\n");
+ outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
+ SMBHSTCNT(priv));
usleep_range(1000, 2000);
- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), SMBHSTCNT(priv));
+ outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
+ SMBHSTCNT(priv));
/* Check if it worked */
status = inb_p(SMBHSTSTS(priv));
- if ((status & SMBHSTSTS_HOST_BUSY) || !(status & SMBHSTSTS_FAILED))
- dev_err(&priv->adapter.dev, "Failed terminating the transaction\n");
+ if ((status & SMBHSTSTS_HOST_BUSY) ||
+ !(status & SMBHSTSTS_FAILED))
+ dev_err(&priv->adapter.dev,
+ "Failed terminating the transaction\n");
outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
return -ETIMEDOUT;
}
@@ -244,7 +251,9 @@ static void i801_wait_hwpec(struct i2c_device *priv)
outb_p(status, SMBHSTSTS(priv));
}
-static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int hwpec)
+static int i801_block_transaction_by_block(struct i2c_device *priv,
+ union i2c_smbus_data *data,
+ char read_write, int hwpec)
{
int i, len;
int status;
@@ -259,7 +268,8 @@ static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_sm
outb_p(data->block[i + 1], SMBBLKDAT(priv));
}
- status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
+ status = i801_transaction(priv,
+ I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
if (status)
return status;
@@ -275,7 +285,10 @@ static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_sm
return 0;
}
-static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
+ union i2c_smbus_data *data,
+ char read_write, int command,
+ int hwpec)
{
int i, len;
int smbcmd;
@@ -301,7 +314,8 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2
else
smbcmd = I801_BLOCK_LAST;
} else {
- if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_READ)
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
+ read_write == I2C_SMBUS_READ)
smbcmd = I801_I2C_BLOCK_DATA;
else
smbcmd = I801_BLOCK_DATA;
@@ -309,25 +323,33 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2
outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
if (i == 1)
- outb_p(inb(SMBHSTCNT(priv)) | I801_START, SMBHSTCNT(priv));
+ outb_p(inb(SMBHSTCNT(priv)) | I801_START,
+ SMBHSTCNT(priv));
/* We will always wait for a fraction of a second! */
timeout = 0;
do {
usleep_range(250, 500);
status = inb_p(SMBHSTSTS(priv));
- } while ((!(status & SMBHSTSTS_BYTE_DONE)) && (timeout++ < MAX_RETRIES));
+ } while (!(status & SMBHSTSTS_BYTE_DONE) &&
+ (timeout++ < MAX_RETRIES));
result = i801_check_post(priv, status, timeout > MAX_RETRIES);
if (result < 0)
return result;
- if (i == 1 && read_write == I2C_SMBUS_READ && command != I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (i == 1 && read_write == I2C_SMBUS_READ &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA) {
len = inb_p(SMBHSTDAT0(priv));
if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
- dev_err(&priv->adapter.dev, "Illegal SMBus block read size %d\n", len);
+ dev_err(&priv->adapter.dev,
+ "Illegal SMBus block read size %d\n",
+ len);
/* Recover */
- while (inb_p(SMBHSTSTS(priv)) & SMBHSTSTS_HOST_BUSY)
- outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ while (inb_p(SMBHSTSTS(priv)) &
+ SMBHSTSTS_HOST_BUSY)
+ outb_p(SMBHSTSTS_BYTE_DONE,
+ SMBHSTSTS(priv));
+ outb_p(SMBHSTSTS_INTR,
+ SMBHSTSTS(priv));
return -EPROTO;
}
data->block[0] = len;
@@ -354,7 +376,9 @@ static int i801_set_block_buffer_mode(struct i2c_device *priv)
}
/* Block transaction function */
-static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+static int i801_block_transaction(struct i2c_device *priv,
+ union i2c_smbus_data *data, char read_write,
+ int command, int hwpec)
{
int result = 0;
//unsigned char hostc;
@@ -366,12 +390,14 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
//pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
//pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN);
} else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
- dev_err(&priv->adapter.dev, "I2C block read is unsupported!\n");
+ dev_err(&priv->adapter.dev,
+ "I2C block read is unsupported!\n");
return -EOPNOTSUPP;
}
}
- if (read_write == I2C_SMBUS_WRITE || command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (read_write == I2C_SMBUS_WRITE ||
+ command == I2C_SMBUS_I2C_BLOCK_DATA) {
if (data->block[0] < 1)
data->block[0] = 1;
if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
@@ -384,13 +410,21 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
* SMBus (not I2C) block transactions, even though the datasheet
* doesn't mention this limitation.
*/
- if ((priv->features & FEATURE_BLOCK_BUFFER) && command != I2C_SMBUS_I2C_BLOCK_DATA && i801_set_block_buffer_mode(priv) == 0)
- result = i801_block_transaction_by_block(priv, data, read_write, hwpec);
- else
- result = i801_block_transaction_byte_by_byte(priv, data, read_write, command, hwpec);
+ if ((priv->features & FEATURE_BLOCK_BUFFER) &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA &&
+ i801_set_block_buffer_mode(priv) == 0) {
+ result = i801_block_transaction_by_block(priv, data,
+ read_write, hwpec);
+ } else {
+ result = i801_block_transaction_byte_by_byte(priv, data,
+ read_write,
+ command, hwpec);
+ }
+
if (result == 0 && hwpec)
i801_wait_hwpec(priv);
- if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_WRITE) {
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
+ read_write == I2C_SMBUS_WRITE) {
/* restore saved configuration register value */
//TODO: Figure out the right thing to do here...
//pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
@@ -399,32 +433,41 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
}
/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data)
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
{
int hwpec;
int block = 0;
int ret, xact = 0;
struct i2c_device *priv = i2c_get_adapdata(adap);
- hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
+ hwpec = (priv->features & FEATURE_SMBUS_PEC) &&
+ (flags & I2C_CLIENT_PEC) &&
+ size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
switch (size) {
case I2C_SMBUS_QUICK:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_QUICK\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
xact = I801_QUICK;
break;
case I2C_SMBUS_BYTE:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_WRITE)
outb_p(command, SMBHSTCMD(priv));
xact = I801_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE)
outb_p(data->byte, SMBHSTDAT0(priv));
@@ -432,7 +475,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_WORD_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_WORD_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE) {
outb_p(data->word & 0xff, SMBHSTDAT0(priv));
@@ -442,7 +487,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_BLOCK_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BLOCK_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
block = 1;
break;
@@ -463,7 +510,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
block = 1;
break;
default:
- dev_dbg(&priv->adapter.dev, " [acc] Unsupported transaction %d\n", size);
+ dev_dbg(&priv->adapter.dev,
+ " [acc] Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
@@ -472,13 +520,14 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
} else {
dev_dbg(&priv->adapter.dev, " [acc] hwpec: no\n");
- outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
+ outb_p(inb_p(SMBAUXCTL(priv)) &
+ (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
}
if (block) {
- //ret = 0;
dev_dbg(&priv->adapter.dev, " [acc] block: yes\n");
- ret = i801_block_transaction(priv, data, read_write, size, hwpec);
+ ret = i801_block_transaction(priv, data, read_write, size,
+ hwpec);
} else {
dev_dbg(&priv->adapter.dev, " [acc] block: no\n");
ret = i801_transaction(priv, xact | ENABLE_INT9);
@@ -490,7 +539,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
*/
if (hwpec || block) {
dev_dbg(&priv->adapter.dev, " [acc] hwpec || block\n");
- outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC |
+ SMBAUXCTL_E32B), SMBAUXCTL(priv));
}
if (block) {
dev_dbg(&priv->adapter.dev, " [acc] block\n");
@@ -501,19 +551,22 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
return ret;
}
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK)) {
- dev_dbg(&priv->adapter.dev, " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
+ dev_dbg(&priv->adapter.dev,
+ " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
return 0;
}
switch (xact & 0x7f) {
case I801_BYTE: /* Result put in SMBHSTDAT0 */
case I801_BYTE_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] I801_BYTE or I801_BYTE_DATA\n");
+ dev_dbg(&priv->adapter.dev,
+ " [acc] I801_BYTE or I801_BYTE_DATA\n");
data->byte = inb_p(SMBHSTDAT0(priv));
break;
case I801_WORD_DATA:
dev_dbg(&priv->adapter.dev, " [acc] I801_WORD_DATA\n");
- data->word = inb_p(SMBHSTDAT0(priv)) + (inb_p(SMBHSTDAT1(priv)) << 8);
+ data->word = inb_p(SMBHSTDAT0(priv)) +
+ (inb_p(SMBHSTDAT1(priv)) << 8);
break;
}
return 0;
@@ -535,30 +588,47 @@ static u32 i801_func(struct i2c_adapter *adapter)
// http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
u32 f =
- I2C_FUNC_I2C | /* 0x00000001 (I enabled this one) */
- !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
- !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
- ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
- !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
- I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
- !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
- !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
- !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
- !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
- !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
- !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
- !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
- ((priv->features & FEATURE_I2C_BLOCK_READ) ? I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
+ I2C_FUNC_I2C | /* 0x00000001(I enabled this
+ * one)
+ */
+ !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
+ !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
+ ((priv->features & FEATURE_SMBUS_PEC) ?
+ I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
+ !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
+ I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
+ !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
+ !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
+ !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
+ !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
+ !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
+ !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
+ !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
+ ((priv->features & FEATURE_I2C_BLOCK_READ) ?
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
I2C_FUNC_SMBUS_BYTE | /* _READ_BYTE _WRITE_BYTE */
- I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA _WRITE_BYTE_DATA */
- I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA _WRITE_WORD_DATA */
- I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA _WRITE_BLOCK_DATA */
- !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK _WRITE_I2C_BLOCK */
- !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE _BYTE_DATA _WORD_DATA _PROC_CALL _WRITE_BLOCK_DATA _I2C_BLOCK _PEC */
+ I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA
+ * _WRITE_BYTE_DATA
+ */
+ I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA
+ * _WRITE_WORD_DATA
+ */
+ I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA
+ * _WRITE_BLOCK_DATA
+ */
+ !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK
+ * _WRITE_I2C_BLOCK
+ */
+ !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE
+ * _BYTE_DATA _WORD_DATA
+ * _PROC_CALL
+ * _WRITE_BLOCK_DATA
+ * _I2C_BLOCK _PEC
+ */
return f;
}
@@ -610,8 +680,8 @@ static int pi2c_probe(struct platform_device *pldev)
/* Retry up to 3 times on lost arbitration */
priv->adapter.retries = 3;
- //snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter at %04lx", priv->smba);
- snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter");
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "Fake SMBus I801 adapter");
err = i2c_add_adapter(&priv->adapter);
if (err) {
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 3be33c450cab..8becf972af9c 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -50,6 +50,7 @@ static struct flash_platform_data p2kr0_spi0_pdata = {
.nr_parts = ARRAY_SIZE(p2kr0_spi0_parts),
.parts = p2kr0_spi0_parts,
};
+
static struct flash_platform_data p2kr0_spi1_pdata = {
.name = "SPI1",
.nr_parts = ARRAY_SIZE(p2kr0_spi1_parts),
@@ -162,14 +163,12 @@ union kp_spi_ffctrl {
kp_spi_read_reg(struct kp_spi_controller_state *cs, int idx)
{
u64 __iomem *addr = cs->base;
- u64 val;
addr += idx;
if ((idx == KP_SPI_REG_CONFIG) && (cs->conf_cache >= 0))
return cs->conf_cache;
- val = readq(addr);
- return val;
+ return readq(addr);
}
static inline void
@@ -227,8 +226,7 @@ kp_spi_txrx_pio(struct spi_device *spidev, struct spi_transfer *transfer)
kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, val);
processed++;
}
- }
- else if (rx) {
+ } else if (rx) {
for (i = 0 ; i < c ; i++) {
char test = 0;
@@ -315,19 +313,19 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
if (transfer->speed_hz > KP_SPI_CLK ||
(len && !(rx_buf || tx_buf))) {
dev_dbg(kpspi->dev, " transfer: %d Hz, %d %s%s, %d bpw\n",
- transfer->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- transfer->bits_per_word);
+ transfer->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ transfer->bits_per_word);
dev_dbg(kpspi->dev, " transfer -EINVAL\n");
return -EINVAL;
}
if (transfer->speed_hz &&
transfer->speed_hz < (KP_SPI_CLK >> 15)) {
dev_dbg(kpspi->dev, "speed_hz %d below minimum %d Hz\n",
- transfer->speed_hz,
- KP_SPI_CLK >> 15);
+ transfer->speed_hz,
+ KP_SPI_CLK >> 15);
dev_dbg(kpspi->dev, " speed_hz -EINVAL\n");
return -EINVAL;
}
@@ -478,7 +476,7 @@ kp_spi_probe(struct platform_device *pldev)
/* register the slave boards */
#define NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(table) \
for (i = 0 ; i < ARRAY_SIZE(table) ; i++) { \
- spi_new_device(master, &(table[i])); \
+ spi_new_device(master, &table[i]); \
}
switch ((drvdata->card_id & 0xFFFF0000) >> 16) {
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.c b/drivers/staging/media/allegro-dvt/nal-h264.c
index 4e14b77851e1..bd48b8883572 100644
--- a/drivers/staging/media/allegro-dvt/nal-h264.c
+++ b/drivers/staging/media/allegro-dvt/nal-h264.c
@@ -235,7 +235,7 @@ static inline int rbsp_write_bit(struct rbsp *rbsp, bool value)
rbsp->pos++;
- if (value == 1 ||
+ if (value ||
(rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) {
rbsp->num_consecutive_zeros = 0;
} else {
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index f670bbde4159..deb90ae37859 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -26,21 +26,9 @@
#include "hantro_hw.h"
-#define VP8_MB_DIM 16
-#define VP8_MB_WIDTH(w) DIV_ROUND_UP(w, VP8_MB_DIM)
-#define VP8_MB_HEIGHT(h) DIV_ROUND_UP(h, VP8_MB_DIM)
-
-#define H264_MB_DIM 16
-#define H264_MB_WIDTH(w) DIV_ROUND_UP(w, H264_MB_DIM)
-#define H264_MB_HEIGHT(h) DIV_ROUND_UP(h, H264_MB_DIM)
-
-#define MPEG2_MB_DIM 16
-#define MPEG2_MB_WIDTH(w) DIV_ROUND_UP(w, MPEG2_MB_DIM)
-#define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM)
-
-#define JPEG_MB_DIM 16
-#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
-#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
+#define MB_DIM 16
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
struct hantro_ctx;
struct hantro_codec_ops;
@@ -379,7 +367,7 @@ static inline void hantro_reg_write(struct hantro_dev *vpu,
bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts);
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts);
static inline struct vb2_v4l2_buffer *
hantro_get_src_buf(struct hantro_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 6d9d41170832..26108c96b674 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -43,8 +43,9 @@ void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
return ctrl ? ctrl->p_cur.p : NULL;
}
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts)
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
{
+ struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
struct vb2_buffer *buf;
int index;
@@ -413,20 +414,18 @@ static int hantro_open(struct file *filp)
if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
ctx->buf_finish = hantro_enc_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
ctx->buf_finish = hantro_dec_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else {
- ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto err_ctx_free;
}
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
- kfree(ctx);
- return ret;
+ goto err_ctx_free;
}
v4l2_fh_init(&ctx->fh, vdev);
@@ -447,6 +446,7 @@ static int hantro_open(struct file *filp)
err_fh_free:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
+err_ctx_free:
kfree(ctx);
return ret;
}
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 7ab534936843..3cd40a8f0daa 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -34,9 +34,11 @@ static void set_params(struct hantro_ctx *ctx)
reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0);
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
- reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
- if (dec_param->nal_ref_idc)
- reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ if (sps->profile_idc > 66) {
+ reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
+ if (dec_param->nal_ref_idc)
+ reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ }
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
@@ -49,8 +51,8 @@ static void set_params(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Decoder control register 1. */
- reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(sps->pic_width_in_mbs_minus1 + 1) |
- G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(sps->pic_height_in_map_units_minus1 + 1) |
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width)) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
@@ -61,7 +63,7 @@ static void set_params(struct hantro_ctx *ctx)
/* always use the matrix sent from userspace */
reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
- if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
@@ -79,7 +81,7 @@ static void set_params(struct hantro_ctx *ctx)
reg |= G1_REG_DEC_CTRL4_CABAC_E;
if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
- if (sps->chroma_format_idc == 0)
+ if (sps->profile_idc >= 100 && sps->chroma_format_idc == 0)
reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
@@ -220,10 +222,9 @@ static void set_ref(struct hantro_ctx *ctx)
/* Set up addresses of DPB buffers. */
for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
- struct vb2_buffer *buf = hantro_h264_get_ref_buf(ctx, i);
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
- vdpu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(buf, 0),
- G1_REG_ADDR_REF(i));
+ vdpu_write_relaxed(vpu, dma_addr, G1_REG_ADDR_REF(i));
}
}
@@ -233,6 +234,7 @@ static void set_buffers(struct hantro_ctx *ctx)
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
@@ -243,18 +245,30 @@ static void set_buffers(struct hantro_ctx *ctx)
/* Destination (decoded frame) buffer. */
dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
/* Higher profiles require DMV buffer appended to reference frames. */
- if (ctrls->sps->profile_idc > 66) {
- size_t pic_size = ctx->h264_dec.pic_size;
- size_t mv_offset = round_up(pic_size, 8);
-
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
- mv_offset += 32 * H264_MB_WIDTH(ctx->dst_fmt.width);
-
- vdpu_write_relaxed(vpu, dst_dma + mv_offset,
- G1_REG_ADDR_DIR_MV);
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
}
/* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index 80f0e94f8afa..f3bf67d8a289 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -105,17 +105,14 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -207,8 +204,8 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
G1_REG_DEC_AXI_WR_ID(0);
vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
- reg = G1_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- G1_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
G1_REG_ALT_SCAN_E(picture->alternate_scan) |
G1_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index 6d99c2be01cf..cad18094fee0 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -370,19 +370,18 @@ static void cfg_tap(struct hantro_ctx *ctx,
static void cfg_ref(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame_header *hdr)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
dma_addr_t ref;
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -390,7 +389,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= G1_REG_ADDR_REF_TOPC_E;
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -470,8 +469,8 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index ecd34a7db190..938b48d4d3d9 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -116,8 +116,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
/* Make sure that all registers are written at this point. */
vepu_write(vpu, reg, H1_REG_AXI_CTRL);
- reg = H1_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | H1_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = H1_REG_ENC_CTRL_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| H1_REG_ENC_CTRL_ENC_MODE_JPEG
| H1_REG_ENC_PIC_INTRA
| H1_REG_ENC_CTRL_EN_BIT;
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 0d758e0c0f99..568640eab3a6 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -20,9 +20,9 @@
/* Size with u32 units. */
#define CABAC_INIT_BUFFER_SIZE (460 * 2)
#define POC_BUFFER_SIZE 34
-#define SCALING_LIST_SIZE (6 * 16 + 6 * 64)
+#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
-#define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1)
+#define HANTRO_CMP(a, b) ((a) < (b) ? -1 : 1)
/* Data structure describing auxiliary buffer format. */
struct hantro_h264_dec_priv_tbl {
@@ -194,23 +194,6 @@ static const u32 h264_cabac_table[] = {
0x1f0c2517, 0x1f261440
};
-/*
- * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process
- * to get the values in matrix order. In addition, the hardware requires bytes
- * swapped within each subsequent 4 bytes. Both arrays below include both
- * transformations.
- */
-static const u32 zig_zag_4x4[] = {
- 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12
-};
-
-static const u32 zig_zag_8x8[] = {
- 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6,
- 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31,
- 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48,
- 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60
-};
-
static void
reorder_scaling_list(struct hantro_ctx *ctx)
{
@@ -218,33 +201,23 @@ reorder_scaling_list(struct hantro_ctx *ctx)
const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
- const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8);
const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
- u8 *dst = tbl->scaling_list;
- const u8 *src;
+ u32 *dst = (u32 *)tbl->scaling_list;
+ const u32 *src;
int i, j;
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4);
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8);
- BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) !=
- num_list_4x4 * list_len_4x4 +
- num_list_8x8 * list_len_8x8);
-
- src = &scaling->scaling_list_4x4[0][0];
- for (i = 0; i < num_list_4x4; ++i) {
- for (j = 0; j < list_len_4x4; ++j)
- dst[zig_zag_4x4[j]] = src[j];
- src += list_len_4x4;
- dst += list_len_4x4;
+ for (i = 0; i < num_list_4x4; i++) {
+ src = (u32 *)&scaling->scaling_list_4x4[i];
+ for (j = 0; j < list_len_4x4 / 4; j++)
+ *dst++ = swab32(src[j]);
}
- src = &scaling->scaling_list_8x8[0][0];
- for (i = 0; i < num_list_8x8; ++i) {
- for (j = 0; j < list_len_8x8; ++j)
- dst[zig_zag_8x8[j]] = src[j];
- src += list_len_8x8;
- dst += list_len_8x8;
+ /* Only Intra/Inter Y lists */
+ for (i = 0; i < 2; i++) {
+ src = (u32 *)&scaling->scaling_list_8x8[i];
+ for (j = 0; j < list_len_8x8 / 4; j++)
+ *dst++ = swab32(src[j]);
}
}
@@ -271,6 +244,7 @@ struct hantro_h264_reflist_builder {
const struct v4l2_h264_dpb_entry *dpb;
s32 pocs[HANTRO_H264_DPB_SIZE];
u8 unordered_reflist[HANTRO_H264_DPB_SIZE];
+ int frame_nums[HANTRO_H264_DPB_SIZE];
s32 curpoc;
u8 num_valid;
};
@@ -294,13 +268,20 @@ static void
init_reflist_builder(struct hantro_ctx *ctx,
struct hantro_h264_reflist_builder *b)
{
+ const struct v4l2_ctrl_h264_slice_params *slice_params;
const struct v4l2_ctrl_h264_decode_params *dec_param;
+ const struct v4l2_ctrl_h264_sps *sps;
struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx);
const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ int cur_frame_num, max_frame_num;
unsigned int i;
dec_param = ctx->h264_dec.ctrls.decode;
+ slice_params = &ctx->h264_dec.ctrls.slices[0];
+ sps = ctx->h264_dec.ctrls.sps;
+ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ cur_frame_num = slice_params->frame_num;
memset(b, 0, sizeof(*b));
b->dpb = dpb;
@@ -318,6 +299,18 @@ init_reflist_builder(struct hantro_ctx *ctx,
continue;
buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx));
+
+ /*
+ * Handle frame_num wraparound as described in section
+ * '8.2.4.1 Decoding process for picture numbers' of the spec.
+ * TODO: This logic will have to be adjusted when we start
+ * supporting interlaced content.
+ */
+ if (dpb[i].frame_num > cur_frame_num)
+ b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num;
+ else
+ b->frame_nums[i] = dpb[i].frame_num;
+
b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt,
dpb[i].bottom_field_order_cnt);
b->unordered_reflist[b->num_valid] = i;
@@ -353,9 +346,10 @@ static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* ascending order.
*/
if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return b->frame_num - a->frame_num;
+ return HANTRO_CMP(builder->frame_nums[idxb],
+ builder->frame_nums[idxa]);
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
}
static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -381,7 +375,7 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
@@ -392,11 +386,11 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -422,22 +416,22 @@ static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
/*
* Short term pics with POC > cur POC first in POC ascending order
- * followed by short term pics with POC > cur POC in POC descending
+ * followed by short term pics with POC < cur POC in POC descending
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static void
@@ -537,22 +531,18 @@ static void update_dpb(struct hantro_ctx *ctx)
}
}
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx)
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
- struct vb2_buffer *buf;
- int buf_idx = -1;
+ dma_addr_t dma_addr = 0;
if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[dpb_idx].reference_ts, 0);
+ dma_addr = hantro_get_ref(ctx, dpb[dpb_idx].reference_ts);
- if (buf_idx >= 0) {
- buf = vb2_get_buffer(cap_q, buf_idx);
- } else {
+ if (!dma_addr) {
struct vb2_v4l2_buffer *dst_buf;
+ struct vb2_buffer *buf;
/*
* If a DPB entry is unused or invalid, address of current
@@ -560,9 +550,10 @@ struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
*/
dst_buf = hantro_get_dst_buf(ctx);
buf = &dst_buf->vb2_buf;
+ dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
}
- return buf;
+ return dma_addr;
}
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
@@ -627,7 +618,6 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
struct hantro_aux_buf *priv = &h264_dec->priv;
struct hantro_h264_dec_priv_tbl *tbl;
- struct v4l2_pix_format_mplane pix_mp;
priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
GFP_KERNEL);
@@ -638,9 +628,5 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
tbl = priv->cpu;
memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
- v4l2_fill_pixfmt_mp(&pix_mp, ctx->dst_fmt.pixelformat,
- ctx->dst_fmt.width, ctx->dst_fmt.height);
- h264_dec->pic_size = pix_mp.plane_fmt[0].sizeimage;
-
return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 2fab655bf098..fa91dd1848b7 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -80,15 +80,12 @@ struct hantro_h264_dec_reflists {
* @dpb: DPB
* @reflists: P/B0/B1 reflists
* @ctrls: V4L2 controls attached to a run
- * @pic_size: Size in bytes of decoded picture, this is needed
- * to pass the location of motion vectors.
*/
struct hantro_h264_dec_hw_ctx {
struct hantro_aux_buf priv;
struct v4l2_h264_dpb_entry dpb[HANTRO_H264_DPB_SIZE];
struct hantro_h264_dec_reflists reflists;
struct hantro_h264_dec_ctrls ctrls;
- size_t pic_size;
};
/**
@@ -158,8 +155,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx);
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
void hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
int hantro_h264_dec_init(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 3dae52abb96c..1dae76f20034 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -240,14 +240,30 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
pix_mp->height);
/*
+ * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
+ * 448 bytes per macroblock with additional 32 bytes on
+ * multi-core variants.
+ *
* The H264 decoder needs extra space on the output buffers
* to store motion vectors. This is needed for reference
* frames.
+ *
+ * Memory layout is as follow:
+ *
+ * +---------------------------+
+ * | Y-plane 256 bytes x MBs |
+ * +---------------------------+
+ * | UV-plane 128 bytes x MBs |
+ * +---------------------------+
+ * | MV buffer 64 bytes x MBs |
+ * +---------------------------+
+ * | MC sync 32 bytes |
+ * +---------------------------+
*/
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
pix_mp->plane_fmt[0].sizeimage +=
- 128 * DIV_ROUND_UP(pix_mp->width, 16) *
- DIV_ROUND_UP(pix_mp->height, 16);
+ 64 * MB_WIDTH(pix_mp->width) *
+ MB_WIDTH(pix_mp->height) + 32;
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -367,20 +383,27 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
const struct hantro_fmt *formats;
unsigned int num_fmts;
- struct vb2_queue *vq;
int ret;
- /* Change not allowed if queue is busy. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_busy(vq))
- return -EBUSY;
+ ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
if (!hantro_is_encoder_ctx(ctx)) {
struct vb2_queue *peer_vq;
/*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ pix_mp->pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
* Since format change on the OUTPUT queue will reset
* the CAPTURE queue, we can't allow doing so
* when the CAPTURE queue has buffers allocated.
@@ -389,12 +412,15 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(peer_vq))
return -EBUSY;
+ } else {
+ /*
+ * The encoder doesn't admit a format change if
+ * there are OUTPUT buffers allocated.
+ */
+ if (vb2_is_busy(vq))
+ return -EBUSY;
}
- ret = vidioc_try_fmt_out_mplane(file, priv, f);
- if (ret)
- return ret;
-
formats = hantro_get_formats(ctx, &num_fmts);
ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
pix_mp->pixelformat);
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
index 6bfcc47d1e58..f8db6fcaad73 100644
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -48,10 +48,10 @@ static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,11 +67,11 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.max_depth = 2,
.frmsize = {
.min_width = 48,
- .max_width = 3840,
- .step_width = H264_MB_DIM,
+ .max_width = 4096,
+ .step_width = MB_DIM,
.min_height = 48,
- .max_height = 2160,
- .step_height = H264_MB_DIM,
+ .max_height = 2304,
+ .step_height = MB_DIM,
},
},
{
@@ -81,10 +81,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -94,10 +94,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
index 14d14bc6b12b..9ac1f2cb6a16 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw.c
@@ -47,10 +47,10 @@ static const struct hantro_fmt rk3399_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,10 +67,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -80,10 +80,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 06162f569b5e..067892345b5d 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -149,8 +149,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
- reg = VEPU_REG_MB_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | VEPU_REG_MB_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| VEPU_REG_FRAME_TYPE_INTRA
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index e7ba5c0441cc..b40d2cdf832f 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -107,17 +107,14 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -223,8 +220,8 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
VDPU_REG_DEC_CLK_GATE_E(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
- reg = VDPU_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- VDPU_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
VDPU_REG_ALT_SCAN_E(picture->alternate_scan) |
VDPU_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
index f17e32620b08..76d7ed3fd69a 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
@@ -449,18 +449,16 @@ static void cfg_ref(struct hantro_ctx *ctx,
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
- struct vb2_queue *cap_q;
dma_addr_t ref;
- cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -468,7 +466,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -563,8 +561,8 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 35e60a120dc1..2a4f77e83ed3 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -428,32 +428,19 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
- int i, ret;
u32 code;
- for (i = 0; i < PRP_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRP_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
-
/* init default frame interval */
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
/* set a default mbus format */
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
- ret = imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
- V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, PRP_NUM_PADS, priv->pad);
+ return imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
+ V4L2_FIELD_NONE, NULL);
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
@@ -487,6 +474,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -496,7 +484,12 @@ static int prp_init(struct imx_ic_priv *ic_priv)
ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
- return 0;
+ for (i = 0; i < PRP_NUM_PADS; i++)
+ priv->pad[i].flags = (i == PRP_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&ic_priv->sd.entity, PRP_NUM_PADS,
+ priv->pad);
}
static void prp_remove(struct imx_ic_priv *ic_priv)
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 67ffa46a8e96..09c4e3f33807 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1240,21 +1240,16 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
int i, ret;
u32 code;
+ /* set a default mbus format */
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
- /* set a default mbus format */
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
640, 480, code, V4L2_FIELD_NONE,
&priv->cc[i]);
@@ -1266,22 +1261,26 @@ static int prp_registered(struct v4l2_subdev *sd)
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
- ret = media_entity_pads_init(&sd->entity, PRPENCVF_NUM_PADS,
- priv->pad);
- if (ret)
- return ret;
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
+ PRPENCVF_SRC_PAD);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- return ret;
+ goto remove_vdev;
ret = prp_init_controls(priv);
if (ret)
- goto unreg;
+ goto unreg_vdev;
return 0;
-unreg:
+
+unreg_vdev:
imx_media_capture_device_unregister(priv->vdev);
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -1290,6 +1289,8 @@ static void prp_unregistered(struct v4l2_subdev *sd)
struct prp_priv *priv = sd_to_priv(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
}
@@ -1325,6 +1326,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i, ret;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1336,15 +1338,19 @@ static int prp_init(struct imx_ic_priv *ic_priv)
spin_lock_init(&priv->irqlock);
timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
- priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
- &ic_priv->sd,
- PRPENCVF_SRC_PAD);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
-
mutex_init(&priv->lock);
- return 0;
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&ic_priv->sd.entity, PRPENCVF_NUM_PADS,
+ priv->pad);
+ if (ret)
+ mutex_destroy(&priv->lock);
+
+ return ret;
}
static void prp_remove(struct imx_ic_priv *ic_priv)
@@ -1352,7 +1358,6 @@ static void prp_remove(struct imx_ic_priv *ic_priv)
struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
}
struct imx_ic_ops imx_ic_prpencvf_ops = {
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b33a07bc9105..7712e7be8625 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -26,6 +26,8 @@
#include <media/imx.h>
#include "imx-media.h"
+#define IMX_CAPTURE_NAME "imx-capture"
+
struct capture_priv {
struct imx_media_video_dev vdev;
@@ -69,8 +71,8 @@ static int vidioc_querycap(struct file *file, void *fh,
{
struct capture_priv *priv = video_drvdata(file);
- strscpy(cap->driver, "imx-media-capture", sizeof(cap->driver));
- strscpy(cap->card, "imx-media-capture", sizeof(cap->card));
+ strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", priv->src_sd->name);
@@ -765,13 +767,6 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
INIT_LIST_HEAD(&priv->ready_q);
- priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
- if (ret) {
- v4l2_err(sd, "failed to init dev pad\n");
- goto unreg;
- }
-
/* create the link from the src_sd devnode pad to device node */
ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
&vfd->entity, 0, 0);
@@ -834,6 +829,7 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
{
struct capture_priv *priv;
struct video_device *vfd;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -858,6 +854,13 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
vfd->queue = &priv->q;
priv->vdev.vfd = vfd;
+ priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
+ if (ret) {
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
INIT_LIST_HEAD(&priv->vdev.list);
video_set_drvdata(vfd, priv);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 367e39f5b382..b60ed4f22f6d 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -627,8 +627,8 @@ static int csi_idmac_start(struct csi_priv *priv)
}
priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
- priv->idmac_ch,
- IPU_IRQ_NFB4EOF);
+ priv->idmac_ch,
+ IPU_IRQ_NFB4EOF);
ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
csi_idmac_nfb4eof_interrupt, 0,
"imx-smfc-nfb4eof", priv);
@@ -1472,7 +1472,7 @@ static void csi_try_fmt(struct csi_priv *priv,
imx_media_enum_mbus_format(&code, 0,
CS_SEL_ANY, false);
*cc = imx_media_find_mbus_format(code,
- CS_SEL_ANY, false);
+ CS_SEL_ANY, false);
sdformat->format.code = (*cc)->codes[0];
}
@@ -1740,9 +1740,6 @@ static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_unsubscribe(fh, sub);
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi_registered(struct v4l2_subdev *sd)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1759,9 +1756,6 @@ static int csi_registered(struct v4l2_subdev *sd)
priv->csi = csi;
for (i = 0; i < CSI_NUM_PADS; i++) {
- priv->pad[i].flags = (i == CSI_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
code = 0;
if (i != CSI_SINK_PAD)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -1793,16 +1787,22 @@ static int csi_registered(struct v4l2_subdev *sd)
goto put_csi;
}
- ret = media_entity_pads_init(&sd->entity, CSI_NUM_PADS, priv->pad);
- if (ret)
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev,
+ &priv->sd,
+ CSI_SRC_PAD_IDMAC);
+ if (IS_ERR(priv->vdev)) {
+ ret = PTR_ERR(priv->vdev);
goto free_fim;
+ }
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- goto free_fim;
+ goto remove_vdev;
return 0;
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
free_fim:
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1816,6 +1816,7 @@ static void csi_unregistered(struct v4l2_subdev *sd)
struct csi_priv *priv = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1923,7 +1924,7 @@ static int imx_csi_probe(struct platform_device *pdev)
struct ipu_client_platformdata *pdata;
struct pinctrl *pinctrl;
struct csi_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1963,10 +1964,14 @@ static int imx_csi_probe(struct platform_device *pdev)
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
- priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
- CSI_SRC_PAD_IDMAC);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
+ for (i = 0; i < CSI_NUM_PADS; i++)
+ priv->pad[i].flags = (i == CSI_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, CSI_NUM_PADS,
+ priv->pad);
+ if (ret)
+ return ret;
mutex_init(&priv->lock);
@@ -1997,7 +2002,6 @@ static int imx_csi_probe(struct platform_device *pdev)
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -2008,7 +2012,6 @@ static int imx_csi_remove(struct platform_device *pdev)
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 4cc6a7462ae2..0788a1874557 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -184,7 +184,15 @@ static const struct imx_media_pixfmt rgb_formats[] = {
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 24,
}, {
- .fourcc = V4L2_PIX_FMT_BGR32,
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
},
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index cfad65a16917..0d83c2c41606 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -841,9 +841,6 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int vdic_registered(struct v4l2_subdev *sd)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -851,9 +848,6 @@ static int vdic_registered(struct v4l2_subdev *sd)
u32 code;
for (i = 0; i < VDIC_NUM_PADS; i++) {
- priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
-
code = 0;
if (i != VDIC_SINK_PAD_IDMAC)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -874,15 +868,7 @@ static int vdic_registered(struct v4l2_subdev *sd)
priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
- ret = vdic_init_controls(priv);
- if (ret)
- return ret;
-
- ret = media_entity_pads_init(&sd->entity, VDIC_NUM_PADS, priv->pad);
- if (ret)
- v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
-
- return ret;
+ return vdic_init_controls(priv);
}
static void vdic_unregistered(struct v4l2_subdev *sd)
@@ -927,7 +913,7 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
u32 grp_id)
{
struct vdic_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -949,6 +935,15 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
mutex_init(&priv->lock);
+ for (i = 0; i < VDIC_NUM_PADS; i++)
+ priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&priv->sd.entity, VDIC_NUM_PADS,
+ priv->pad);
+ if (ret)
+ goto free;
+
ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret)
goto free;
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index bfa4b254c4e4..cd3dd6e33ef0 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -497,26 +497,13 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi2_registered(struct v4l2_subdev *sd)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
- int i, ret;
-
- for (i = 0; i < CSI2_NUM_PADS; i++) {
- csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
/* set a default mbus format */
- ret = imx_media_init_mbus_fmt(&csi2->format_mbus,
+ return imx_media_init_mbus_fmt(&csi2->format_mbus,
640, 480, 0, V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, CSI2_NUM_PADS, csi2->pad);
}
static const struct media_entity_operations csi2_entity_ops = {
@@ -573,7 +560,7 @@ static int csi2_probe(struct platform_device *pdev)
unsigned int sink_port = 0;
struct csi2_dev *csi2;
struct resource *res;
- int ret;
+ int i, ret;
csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
if (!csi2)
@@ -592,6 +579,16 @@ static int csi2_probe(struct platform_device *pdev)
csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
+ for (i = 0; i < CSI2_NUM_PADS; i++) {
+ csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&csi2->sd.entity, CSI2_NUM_PADS,
+ csi2->pad);
+ if (ret)
+ return ret;
+
csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(csi2->pllref_clk)) {
v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index bfd6b5fbf484..db30e2c70f2f 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1100,9 +1100,6 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
int i;
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&csi->format_mbus[i],
800, 600, 0, V4L2_FIELD_NONE,
@@ -1115,11 +1112,16 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
csi->frame_interval[i].denominator = 30;
}
- ret = media_entity_pads_init(&sd->entity, IMX7_CSI_PADS_NUM, csi->pad);
- if (ret < 0)
- return ret;
+ csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
+ IMX7_CSI_PAD_SRC);
+ if (IS_ERR(csi->vdev))
+ return PTR_ERR(csi->vdev);
+
+ ret = imx_media_capture_device_register(csi->vdev);
+ if (ret)
+ imx_media_capture_device_remove(csi->vdev);
- return imx_media_capture_device_register(csi->vdev);
+ return ret;
}
static void imx7_csi_unregistered(struct v4l2_subdev *sd)
@@ -1127,6 +1129,7 @@ static void imx7_csi_unregistered(struct v4l2_subdev *sd)
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(csi->vdev);
+ imx_media_capture_device_remove(csi->vdev);
}
static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
@@ -1189,7 +1192,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct imx_media_dev *imxmd;
struct imx7_csi *csi;
- int ret;
+ int i, ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
if (!csi)
@@ -1251,14 +1254,18 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
- csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
- IMX7_CSI_PAD_SRC);
- if (IS_ERR(csi->vdev))
- return PTR_ERR(csi->vdev);
-
v4l2_ctrl_handler_init(&csi->ctrl_hdlr, 0);
csi->sd.ctrl_handler = &csi->ctrl_hdlr;
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++)
+ csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM,
+ csi->pad);
+ if (ret < 0)
+ goto free;
+
ret = v4l2_async_register_fwnode_subdev(&csi->sd,
sizeof(struct v4l2_async_subdev),
NULL, 0,
@@ -1269,8 +1276,6 @@ static int imx7_csi_probe(struct platform_device *pdev)
return 0;
free:
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
cleanup:
@@ -1298,9 +1303,6 @@ static int imx7_csi_remove(struct platform_device *pdev)
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
-
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 73d8354e618c..99166afca071 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -293,7 +293,7 @@ static int mipi_csis_dump_regs(struct csi_state *state)
struct device *dev = &state->pdev->dev;
unsigned int i;
u32 cfg;
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
@@ -350,6 +350,8 @@ static void mipi_csis_sw_reset(struct csi_state *state)
static int mipi_csis_phy_init(struct csi_state *state)
{
state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
+ if (IS_ERR(state->mipi_phy_regulator))
+ return PTR_ERR(state->mipi_phy_regulator);
return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
1000000);
@@ -780,17 +782,6 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mipi_csis_registered(struct v4l2_subdev *mipi_sd)
-{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
-
- state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- return media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
- state->pads);
-}
-
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
};
@@ -816,10 +807,6 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
-static const struct v4l2_subdev_internal_ops mipi_csis_internal_ops = {
- .registered = mipi_csis_registered,
-};
-
static int mipi_csis_parse_dt(struct platform_device *pdev,
struct csi_state *state)
{
@@ -880,7 +867,6 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
mipi_sd->entity.ops = &mipi_csis_entity_ops;
- mipi_sd->internal_ops = &mipi_csis_internal_ops;
mipi_sd->dev = &pdev->dev;
@@ -892,6 +878,13 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
v4l2_set_subdevdata(mipi_sd, &pdev->dev);
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&mipi_sd->entity, CSIS_PADS_NUM,
+ state->pads);
+ if (ret)
+ return ret;
+
ret = v4l2_async_register_fwnode_subdev(mipi_sd,
sizeof(struct v4l2_async_subdev),
&sink_port, 1,
@@ -947,7 +940,6 @@ static void mipi_csis_debugfs_exit(struct csi_state *state)
static int mipi_csis_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *mem_res;
struct csi_state *state;
int ret;
@@ -966,11 +958,13 @@ static int mipi_csis_probe(struct platform_device *pdev)
return ret;
}
- mipi_csis_phy_init(state);
+ ret = mipi_csis_phy_init(state);
+ if (ret < 0)
+ return ret;
+
mipi_csis_phy_reset(state);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, mem_res);
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
index cc288ae6d5f2..9def80ef28f3 100644
--- a/drivers/staging/media/ipu3/Makefile
+++ b/drivers/staging/media/ipu3/Makefile
@@ -10,9 +10,3 @@ ipu3-imgu-objs += \
ipu3-css.o ipu3-v4l2.o ipu3.o
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3-imgu.o
-
-# HACK! While this driver is in bad shape, don't enable several warnings
-# that would be otherwise enabled with W=1
-ccflags-y += $(call cc-disable-warning, packed-not-aligned)
-ccflags-y += $(call cc-disable-warning, type-limits)
-ccflags-y += $(call cc-disable-warning, unused-const-variable)
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index 5e55baeaea1a..b44bb4a72ca7 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -9,12 +9,9 @@ staging directory.
relevant. (Sakari)
- IPU3 driver documentation (Laurent)
- Add diagram in driver rst to describe output capability.
Comments on configuring v4l2 subdevs for CIO2 and ImgU.
- uAPI documentation:
- Further clarification on some ambiguities such as data type conversion of
- IEFD CU inputs. (Sakari)
Move acronyms to doc-rst file. (Mauro)
- Switch to yavta from v4l2n in driver docs.
@@ -27,5 +24,3 @@ staging directory.
- Document different operation modes, and which buffer queues are relevant
in each mode. To process an image, which queues require a buffer an in
which ones is it optional?
-
-- Make sure it builds fine with no warnings with W=1
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index c7cd27efac8a..08eaa0bad0de 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -1217,6 +1217,11 @@ struct ipu3_uapi_shd_config {
*
* All CU inputs are unsigned, they will be converted to signed when written
* to register, i.e. a01 will be written to 9 bit register in s4.4 format.
+ * The data precision s4.4 means 4 bits for integer parts and 4 bits for the
+ * fractional part, the first bit indicates positive or negative value.
+ * For userspace software (commonly the imaging library), the computation for
+ * the CU slope values should be based on the slope resolution 1/16 (binary
+ * 0.0001 - the minimal interval value), the slope value range is [-256, +255].
* This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1,
* &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad.
*/
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 1a966cb2f3a6..6fb60b58447a 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -908,11 +908,7 @@ static int iss_map_mem_resource(struct platform_device *pdev,
struct iss_device *iss,
enum iss_mem_resources res)
{
- struct resource *mem;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
-
- iss->regs[res] = devm_ioremap_resource(iss->dev, mem);
+ iss->regs[res] = devm_platform_ioremap_resource(pdev, res);
return PTR_ERR_OR_ZERO(iss->regs[res]);
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 54144dc9f509..673aa3a5f2bd 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -671,7 +671,7 @@ iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
/*
@@ -726,7 +726,7 @@ iss_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
sdsel.pad = pad;
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
index c85ac6db0302..1bce49d3e7e2 100644
--- a/drivers/staging/media/sunxi/cedrus/Makefile
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o \
- cedrus_mpeg2.o cedrus_h264.o
+ cedrus_mpeg2.o cedrus_h264.o cedrus_h265.o
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 2d3ea8b74dfd..c6ddd46eff82 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -95,6 +95,45 @@ static const struct cedrus_control cedrus_controls[] = {
.codec = CEDRUS_CODEC_H264,
.required = false,
},
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
@@ -241,6 +280,16 @@ static int cedrus_open(struct file *file)
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_ctrls;
}
+ ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ cedrus_prepare_format(&ctx->dst_fmt);
+ ctx->src_fmt.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
+ /*
+ * TILED_NV12 has more strict requirements, so copy the width and
+ * height to src_fmt to ensure that is matches the dst_fmt resolution.
+ */
+ ctx->src_fmt.width = ctx->dst_fmt.width;
+ ctx->src_fmt.height = ctx->dst_fmt.height;
+ cedrus_prepare_format(&ctx->src_fmt);
v4l2_fh_add(&ctx->fh);
@@ -330,6 +379,7 @@ static int cedrus_probe(struct platform_device *pdev)
dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
dev->dec_ops[CEDRUS_CODEC_H264] = &cedrus_dec_ops_h264;
+ dev->dec_ops[CEDRUS_CODEC_H265] = &cedrus_dec_ops_h265;
mutex_init(&dev->dev_mutex);
@@ -357,6 +407,8 @@ static int cedrus_probe(struct platform_device *pdev)
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->mdev.ops = &cedrus_m2m_media_ops;
@@ -438,22 +490,26 @@ static const struct cedrus_variant sun8i_a33_cedrus_variant = {
};
static const struct cedrus_variant sun8i_h3_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h5_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h6_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 2f017a651848..96765555ab8a 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -27,12 +27,14 @@
#define CEDRUS_NAME "cedrus"
#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+#define CEDRUS_CAPABILITY_H265_DEC BIT(1)
#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
CEDRUS_CODEC_H264,
+ CEDRUS_CODEC_H265,
CEDRUS_CODEC_LAST,
};
@@ -67,6 +69,12 @@ struct cedrus_mpeg2_run {
const struct v4l2_ctrl_mpeg2_quantization *quantization;
};
+struct cedrus_h265_run {
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+};
+
struct cedrus_run {
struct vb2_v4l2_buffer *src;
struct vb2_v4l2_buffer *dst;
@@ -74,6 +82,7 @@ struct cedrus_run {
union {
struct cedrus_h264_run h264;
struct cedrus_mpeg2_run mpeg2;
+ struct cedrus_h265_run h265;
};
};
@@ -107,9 +116,24 @@ struct cedrus_ctx {
ssize_t mv_col_buf_size;
void *pic_info_buf;
dma_addr_t pic_info_buf_dma;
+ ssize_t pic_info_buf_size;
void *neighbor_info_buf;
dma_addr_t neighbor_info_buf_dma;
+ void *deblk_buf;
+ dma_addr_t deblk_buf_dma;
+ ssize_t deblk_buf_size;
+ void *intra_pred_buf;
+ dma_addr_t intra_pred_buf_dma;
+ ssize_t intra_pred_buf_size;
} h264;
+ struct {
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_addr;
+ ssize_t mv_col_buf_size;
+ ssize_t mv_col_buf_unit_size;
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_addr;
+ } h265;
} codec;
};
@@ -155,6 +179,7 @@ struct cedrus_dev {
extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
extern struct cedrus_dec_ops cedrus_dec_ops_h264;
+extern struct cedrus_dec_ops cedrus_dec_ops_h265;
static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
{
@@ -179,12 +204,16 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
int index, unsigned int plane)
{
- struct vb2_buffer *buf;
+ struct vb2_buffer *buf = NULL;
+ struct vb2_queue *vq;
if (index < 0)
return 0;
- buf = ctx->fh.m2m_ctx->cap_q_ctx.q.bufs[index];
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vq)
+ buf = vb2_get_buffer(vq, index);
+
return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 56ca4c9ad01c..4a2fc33a1d79 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -59,6 +59,15 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_H264_SPS);
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ run.h265.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SPS);
+ run.h265.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_PPS);
+ run.h265.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
+ break;
+
default:
break;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index d6a782703c9b..bfb4a4820a67 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -6,6 +6,7 @@
* Copyright (c) 2018 Bootlin
*/
+#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
@@ -38,7 +39,7 @@ struct cedrus_h264_sram_ref_pic {
#define CEDRUS_H264_FRAME_NUM 18
#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
-#define CEDRUS_PIC_INFO_BUF_SIZE (128 * SZ_1K)
+#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
static void cedrus_h264_write_sram(struct cedrus_dev *dev,
enum cedrus_h264_sram_off off,
@@ -96,7 +97,7 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_buffer *output_buf;
struct cedrus_dev *dev = ctx->dev;
unsigned long used_dpbs = 0;
@@ -104,6 +105,8 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
unsigned int output = 0;
unsigned int i;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(pic_list, 0, sizeof(pic_list));
for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
@@ -167,12 +170,14 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
enum cedrus_h264_sram_off sram)
{
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_dev *dev = ctx->dev;
u8 sram_array[CEDRUS_MAX_REF_IDX];
unsigned int i;
size_t size;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(sram_array, 0, sizeof(sram_array));
for (i = 0; i < num_ref; i++) {
@@ -240,8 +245,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
sizeof(scaling->scaling_list_8x8[0]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
- scaling->scaling_list_8x8[3],
- sizeof(scaling->scaling_list_8x8[3]));
+ scaling->scaling_list_8x8[1],
+ sizeof(scaling->scaling_list_8x8[1]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
scaling->scaling_list_4x4,
@@ -289,6 +294,28 @@ static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
}
}
+/*
+ * It turns out that using VE_H264_VLD_OFFSET to skip bits is not reliable. In
+ * rare cases frame is not decoded correctly. However, setting offset to 0 and
+ * skipping appropriate amount of bits with flush bits trigger always works.
+ */
+static void cedrus_skip_bits(struct cedrus_dev *dev, int num)
+{
+ int count = 0;
+
+ while (count < num) {
+ int tmp = min(num - count, 32);
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_FLUSH_BITS |
+ VE_H264_TRIGGER_TYPE_N_BITS(tmp));
+ while (cedrus_read(dev, VE_H264_STATUS) & VE_H264_STATUS_VLD_BUSY)
+ udelay(1);
+
+ count += tmp;
+ }
+}
+
static void cedrus_set_params(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
@@ -299,12 +326,13 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
struct vb2_buffer *src_buf = &run->src->vb2_buf;
struct cedrus_dev *dev = ctx->dev;
dma_addr_t src_buf_addr;
- u32 offset = slice->header_bit_size;
- u32 len = (slice->size * 8) - offset;
+ u32 len = slice->size * 8;
+ unsigned int pic_width_in_mbs;
+ bool mbaff_pic;
u32 reg;
cedrus_write(dev, VE_H264_VLD_LEN, len);
- cedrus_write(dev, VE_H264_VLD_OFFSET, offset);
+ cedrus_write(dev, VE_H264_VLD_OFFSET, 0);
src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
cedrus_write(dev, VE_H264_VLD_END,
@@ -314,6 +342,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
VE_H264_VLD_ADDR_LAST);
+ if (ctx->src_fmt.width > 2048) {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_MIXED_RAM |
+ VE_BUF_CTRL_DBLK_MIXED_RAM);
+ cedrus_write(dev, VE_DBLK_DRAM_BUF_ADDR,
+ ctx->codec.h264.deblk_buf_dma);
+ cedrus_write(dev, VE_INTRAPRED_DRAM_BUF_ADDR,
+ ctx->codec.h264.intra_pred_buf_dma);
+ } else {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_INT_SRAM |
+ VE_BUF_CTRL_DBLK_INT_SRAM);
+ }
+
/*
* FIXME: Since the bitstream parsing is done in software, and
* in userspace, this shouldn't be needed anymore. But it
@@ -323,6 +365,8 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+ cedrus_skip_bits(dev, slice->header_bit_size);
+
if (((pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) &&
(slice->slice_type == V4L2_H264_SLICE_TYPE_P ||
slice->slice_type == V4L2_H264_SLICE_TYPE_SP)) ||
@@ -370,12 +414,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
cedrus_write(dev, VE_H264_SPS, reg);
+ mbaff_pic = !(slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
+
// slice parameters
reg = 0;
+ reg |= ((slice->first_mb_in_slice % pic_width_in_mbs) & 0xff) << 24;
+ reg |= (((slice->first_mb_in_slice / pic_width_in_mbs) *
+ (mbaff_pic + 1)) & 0xff) << 16;
reg |= decode->nal_ref_idc ? BIT(12) : 0;
reg |= (slice->slice_type & 0xf) << 8;
reg |= slice->cabac_init_idc & 0x3;
- reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
+ if (ctx->fh.m2m_ctx->new_frame)
+ reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
reg |= VE_H264_SHS_FIELD_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
@@ -447,7 +499,7 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
{
struct cedrus_dev *dev = ctx->dev;
- cedrus_engine_enable(dev, CEDRUS_CODEC_H264);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H264);
cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
@@ -464,18 +516,30 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
static int cedrus_h264_start(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
+ unsigned int pic_info_size;
unsigned int field_size;
unsigned int mv_col_size;
int ret;
+ /* Formula for picture buffer size is taken from CedarX source. */
+
+ if (ctx->src_fmt.width > 2048)
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x4000;
+ else
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x1000;
+
/*
- * FIXME: It seems that the H6 cedarX code is using a formula
- * here based on the size of the frame, while all the older
- * code is using a fixed size, so that might need to be
- * changed at some point.
+ * FIXME: If V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY is set,
+ * there is no need to multiply by 2.
*/
+ pic_info_size += ctx->src_fmt.height * 2 * 64;
+
+ if (pic_info_size < CEDRUS_MIN_PIC_INFO_BUF_SIZE)
+ pic_info_size = CEDRUS_MIN_PIC_INFO_BUF_SIZE;
+
+ ctx->codec.h264.pic_info_buf_size = pic_info_size;
ctx->codec.h264.pic_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_alloc_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
&ctx->codec.h264.pic_info_buf_dma,
GFP_KERNEL);
if (!ctx->codec.h264.pic_info_buf)
@@ -528,15 +592,56 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
goto err_neighbor_buf;
}
+ if (ctx->src_fmt.width > 2048) {
+ /*
+ * Formulas for deblock and intra prediction buffer sizes
+ * are taken from CedarX source.
+ */
+
+ ctx->codec.h264.deblk_buf_size =
+ ALIGN(ctx->src_fmt.width, 32) * 12;
+ ctx->codec.h264.deblk_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.deblk_buf_size,
+ &ctx->codec.h264.deblk_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.deblk_buf) {
+ ret = -ENOMEM;
+ goto err_mv_col_buf;
+ }
+
+ ctx->codec.h264.intra_pred_buf_size =
+ ALIGN(ctx->src_fmt.width, 64) * 5;
+ ctx->codec.h264.intra_pred_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.intra_pred_buf_size,
+ &ctx->codec.h264.intra_pred_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.intra_pred_buf) {
+ ret = -ENOMEM;
+ goto err_deblk_buf;
+ }
+ }
+
return 0;
+err_deblk_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+
+err_mv_col_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma);
+
err_neighbor_buf:
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
err_pic_buf:
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
return ret;
@@ -552,9 +657,17 @@ static void cedrus_h264_stop(struct cedrus_ctx *ctx)
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
+ if (ctx->codec.h264.deblk_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+ if (ctx->codec.h264.intra_pred_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.intra_pred_buf_size,
+ ctx->codec.h264.intra_pred_buf,
+ ctx->codec.h264.intra_pred_buf_dma);
}
static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
new file mode 100644
index 000000000000..6945dc74e1d7
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+/*
+ * These are the sizes for side buffers required by the hardware for storing
+ * internal decoding metadata. They match the values used by the early BSP
+ * implementations, that were initially exposed in libvdpau-sunxi.
+ * Subsequent BSP implementations seem to double the neighbor info buffer size
+ * for the H6 SoC, which may be related to 10 bit H265 support.
+ */
+#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K)
+#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
+#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
+
+struct cedrus_h265_sram_frame_info {
+ __le32 top_pic_order_cnt;
+ __le32 bottom_pic_order_cnt;
+ __le32 top_mv_col_buf_addr;
+ __le32 bottom_mv_col_buf_addr;
+ __le32 luma_addr;
+ __le32 chroma_addr;
+} __packed;
+
+struct cedrus_h265_sram_pred_weight {
+ __s8 delta_weight;
+ __s8 offset;
+} __packed;
+
+static enum cedrus_irq_status cedrus_h265_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_H265_STATUS);
+ reg &= VE_DEC_H265_STATUS_CHECK_MASK;
+
+ if (reg & VE_DEC_H265_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_H265_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_h265_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_CHECK_MASK);
+}
+
+static void cedrus_h265_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_H265_CTRL);
+
+ reg &= ~VE_DEC_H265_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_H265_CTRL, reg);
+}
+
+static void cedrus_h265_sram_write_offset(struct cedrus_dev *dev, u32 offset)
+{
+ cedrus_write(dev, VE_DEC_H265_SRAM_OFFSET, offset);
+}
+
+static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
+ unsigned int size)
+{
+ u32 *word = data;
+
+ while (size >= sizeof(u32)) {
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, *word++);
+ size -= sizeof(u32);
+ }
+}
+
+static inline dma_addr_t
+cedrus_h265_frame_info_mv_col_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int index, unsigned int field)
+{
+ return ctx->codec.h265.mv_col_buf_addr + index *
+ ctx->codec.h265.mv_col_buf_unit_size +
+ field * ctx->codec.h265.mv_col_buf_unit_size / 2;
+}
+
+static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
+ unsigned int index,
+ bool field_pic,
+ u32 pic_order_cnt[],
+ int buffer_index)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 0);
+ dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 1);
+ dma_addr_t mv_col_buf_addr[2] = {
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index, 0),
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index,
+ field_pic ? 1 : 0)
+ };
+ u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
+ VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
+ struct cedrus_h265_sram_frame_info frame_info = {
+ .top_pic_order_cnt = cpu_to_le32(pic_order_cnt[0]),
+ .bottom_pic_order_cnt = cpu_to_le32(field_pic ?
+ pic_order_cnt[1] :
+ pic_order_cnt[0]),
+ .top_mv_col_buf_addr =
+ cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .bottom_mv_col_buf_addr = cpu_to_le32(field_pic ?
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[1]) :
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .luma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_luma_addr)),
+ .chroma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_chroma_addr)),
+ };
+
+ cedrus_h265_sram_write_offset(dev, offset);
+ cedrus_h265_sram_write_data(dev, &frame_info, sizeof(frame_info));
+}
+
+static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ u8 num_active_dpb_entries)
+{
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ unsigned int i;
+
+ for (i = 0; i < num_active_dpb_entries; i++) {
+ int buffer_index = vb2_find_timestamp(vq, dpb[i].timestamp, 0);
+ u32 pic_order_cnt[2] = {
+ dpb[i].pic_order_cnt[0],
+ dpb[i].pic_order_cnt[1]
+ };
+
+ cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
+ pic_order_cnt,
+ buffer_index);
+ }
+}
+
+static void cedrus_h265_ref_pic_list_write(struct cedrus_dev *dev,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ const u8 list[],
+ u8 num_ref_idx_active,
+ u32 sram_offset)
+{
+ unsigned int i;
+ u32 word = 0;
+
+ cedrus_h265_sram_write_offset(dev, sram_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int shift = (i % 4) * 8;
+ unsigned int index = list[i];
+ u8 value = list[i];
+
+ if (dpb[index].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ value |= VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF;
+
+ /* Each SRAM word gathers up to 4 references. */
+ word |= value << shift;
+
+ /* Write the word to SRAM and clear it for the next batch. */
+ if ((i % 4) == 3 || i == (num_ref_idx_active - 1)) {
+ cedrus_h265_sram_write_data(dev, &word, sizeof(word));
+ word = 0;
+ }
+ }
+}
+
+static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
+ const s8 delta_luma_weight[],
+ const s8 luma_offset[],
+ const s8 delta_chroma_weight[][2],
+ const s8 chroma_offset[][2],
+ u8 num_ref_idx_active,
+ u32 sram_luma_offset,
+ u32 sram_chroma_offset)
+{
+ struct cedrus_h265_sram_pred_weight pred_weight[2] = { { 0 } };
+ unsigned int i, j;
+
+ cedrus_h265_sram_write_offset(dev, sram_luma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int index = i % 2;
+
+ pred_weight[index].delta_weight = delta_luma_weight[i];
+ pred_weight[index].offset = luma_offset[i];
+
+ if (index == 1 || i == (num_ref_idx_active - 1))
+ cedrus_h265_sram_write_data(dev, (u32 *)&pred_weight,
+ sizeof(pred_weight));
+ }
+
+ cedrus_h265_sram_write_offset(dev, sram_chroma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ for (j = 0; j < 2; j++) {
+ pred_weight[j].delta_weight = delta_chroma_weight[i][j];
+ pred_weight[j].offset = chroma_offset[i][j];
+ }
+
+ cedrus_h265_sram_write_data(dev, &pred_weight,
+ sizeof(pred_weight));
+ }
+}
+
+static void cedrus_h265_setup(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_hevc_pred_weight_table *pred_weight_table;
+ dma_addr_t src_buf_addr;
+ dma_addr_t src_buf_end_addr;
+ u32 chroma_log2_weight_denom;
+ u32 output_pic_list_index;
+ u32 pic_order_cnt[2];
+ u32 reg;
+
+ sps = run->h265.sps;
+ pps = run->h265.pps;
+ slice_params = run->h265.slice_params;
+ pred_weight_table = &slice_params->pred_weight_table;
+
+ /* MV column buffer size and allocation. */
+ if (!ctx->codec.h265.mv_col_buf_size) {
+ unsigned int num_buffers =
+ run->dst->vb2_buf.vb2_queue->num_buffers;
+ unsigned int log2_max_luma_coding_block_size =
+ sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ unsigned int ctb_size_luma =
+ 1UL << log2_max_luma_coding_block_size;
+
+ /*
+ * Each CTB requires a MV col buffer with a specific unit size.
+ * Since the address is given with missing lsb bits, 1 KiB is
+ * added to each buffer to ensure proper alignment.
+ */
+ ctx->codec.h265.mv_col_buf_unit_size =
+ DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
+ DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
+ CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
+
+ ctx->codec.h265.mv_col_buf_size = num_buffers *
+ ctx->codec.h265.mv_col_buf_unit_size;
+
+ ctx->codec.h265.mv_col_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h265.mv_col_buf_size,
+ &ctx->codec.h265.mv_col_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.mv_col_buf) {
+ ctx->codec.h265.mv_col_buf_size = 0;
+ // TODO: Abort the process here.
+ return;
+ }
+ }
+
+ /* Activate H265 engine. */
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H265);
+
+ /* Source offset and length in bits. */
+
+ reg = slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, reg);
+
+ reg = slice_params->bit_size - slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_H265_BITS_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA;
+
+ cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
+
+ src_buf_end_addr = src_buf_addr +
+ DIV_ROUND_UP(slice_params->bit_size, 8);
+
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
+ cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
+
+ /* Coding tree block address: start at the beginning. */
+ reg = VE_DEC_H265_DEC_CTB_ADDR_X(0) | VE_DEC_H265_DEC_CTB_ADDR_Y(0);
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg);
+
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
+
+ /* Clear the number of correctly-decoded coding tree blocks. */
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_NUM, 0);
+
+ /* Initialize bitstream access. */
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
+
+ /* Bitstream parameters. */
+
+ reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
+ VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(slice_params->nuh_temporal_id_plus1);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_NAL_HDR, reg);
+
+ /* SPS. */
+
+ reg = VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(sps->max_transform_hierarchy_depth_intra) |
+ VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(sps->max_transform_hierarchy_depth_inter) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(sps->log2_diff_max_min_luma_transform_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(sps->log2_min_luma_transform_block_size_minus2) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
+ VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
+ V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_AMP_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE,
+ V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SPS_HDR, reg);
+
+ reg = VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_pcm_luma_coding_block_size) |
+ VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_pcm_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(sps->pcm_sample_bit_depth_chroma_minus1) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(sps->pcm_sample_bit_depth_luma_minus1);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PCM_CTRL, reg);
+
+ /* PPS. */
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(pps->pps_cr_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(pps->pps_cb_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(pps->init_qp_minus26) |
+ VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(pps->diff_cu_qp_delta_depth);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED,
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED,
+ V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED,
+ V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED,
+ pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL0, reg);
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(pps->log2_parallel_merge_level_minus2);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ pps->flags);
+
+ /* TODO: VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED */
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED, pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED, pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL1, reg);
+
+ /* Slice Parameters. */
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(slice_params->pic_struct) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(slice_params->five_minus_max_num_merge_cand) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(slice_params->num_ref_idx_l1_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(slice_params->num_ref_idx_l0_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(slice_params->collocated_ref_idx) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(slice_params->colour_plane_id) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(slice_params->slice_type);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
+ V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
+ pps->flags);
+
+ /* FIXME: For multi-slice support. */
+ reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO0, reg);
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(slice_params->num_rps_poc_st_curr_after == 0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ slice_params->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg);
+
+ chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom +
+ pred_weight_table->delta_chroma_log2_weight_denom;
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg);
+
+ /* Decoded picture size. */
+
+ reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) |
+ VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PIC_SIZE, reg);
+
+ /* Scaling list. */
+
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
+
+ /* Neightbor information address. */
+ reg = VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(ctx->codec.h265.neighbor_info_buf_addr);
+ cedrus_write(dev, VE_DEC_H265_NEIGHBOR_INFO_ADDR, reg);
+
+ /* Write decoded picture buffer in pic list. */
+ cedrus_h265_frame_info_write_dpb(ctx, slice_params->dpb,
+ slice_params->num_active_dpb_entries);
+
+ /* Output frame. */
+
+ output_pic_list_index = V4L2_HEVC_DPB_ENTRIES_NUM_MAX;
+ pic_order_cnt[0] = slice_params->slice_pic_order_cnt;
+ pic_order_cnt[1] = slice_params->slice_pic_order_cnt;
+
+ cedrus_h265_frame_info_write_single(ctx, output_pic_list_index,
+ slice_params->pic_struct != 0,
+ pic_order_cnt,
+ run->dst->vb2_buf.index);
+
+ cedrus_write(dev, VE_DEC_H265_OUTPUT_FRAME_IDX, output_pic_list_index);
+
+ /* Reference picture list 0 (for P/B frames). */
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0);
+
+ if ((pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED) ||
+ (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED))
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l0,
+ pred_weight_table->luma_offset_l0,
+ pred_weight_table->delta_chroma_weight_l0,
+ pred_weight_table->chroma_offset_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0);
+ }
+
+ /* Reference picture list 1 (for B frames). */
+ if (slice_params->slice_type == V4L2_HEVC_SLICE_TYPE_B) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1);
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED)
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l1,
+ pred_weight_table->luma_offset_l1,
+ pred_weight_table->delta_chroma_weight_l1,
+ pred_weight_table->chroma_offset_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1);
+ }
+
+ /* Enable appropriate interruptions. */
+ cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK);
+}
+
+static int cedrus_h265_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ /* The buffer size is calculated at setup time. */
+ ctx->codec.h265.mv_col_buf_size = 0;
+
+ ctx->codec.h265.neighbor_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h265.neighbor_info_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.neighbor_info_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cedrus_h265_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (ctx->codec.h265.mv_col_buf_size > 0) {
+ dma_free_coherent(dev->dev, ctx->codec.h265.mv_col_buf_size,
+ ctx->codec.h265.mv_col_buf,
+ ctx->codec.h265.mv_col_buf_addr);
+
+ ctx->codec.h265.mv_col_buf_size = 0;
+ }
+
+ dma_free_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr);
+}
+
+static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_DEC_SLICE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h265 = {
+ .irq_clear = cedrus_h265_irq_clear,
+ .irq_disable = cedrus_h265_irq_disable,
+ .irq_status = cedrus_h265_irq_status,
+ .setup = cedrus_h265_setup,
+ .start = cedrus_h265_start,
+ .stop = cedrus_h265_stop,
+ .trigger = cedrus_h265_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index a942cd9bed57..daf5f244f93b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -30,7 +30,7 @@
#include "cedrus_hw.h"
#include "cedrus_regs.h"
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec)
{
u32 reg = 0;
@@ -50,11 +50,20 @@ int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
reg |= VE_MODE_DEC_H264;
break;
+ case CEDRUS_CODEC_H265:
+ reg |= VE_MODE_DEC_H265;
+ break;
+
default:
return -EINVAL;
}
- cedrus_write(dev, VE_MODE, reg);
+ if (ctx->src_fmt.width == 4096)
+ reg |= VE_MODE_PIC_WIDTH_IS_4096;
+ if (ctx->src_fmt.width > 2048)
+ reg |= VE_MODE_PIC_WIDTH_MORE_2048;
+
+ cedrus_write(ctx->dev, VE_MODE, reg);
return 0;
}
@@ -103,7 +112,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
{
struct cedrus_dev *dev = data;
struct cedrus_ctx *ctx;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
enum vb2_buffer_state state;
enum cedrus_irq_status status;
@@ -121,24 +129,13 @@ static irqreturn_t cedrus_irq(int irq, void *data)
dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- if (!src_buf || !dst_buf) {
- v4l2_err(&dev->v4l2_dev,
- "Missing source and/or destination buffers\n");
- return IRQ_HANDLED;
- }
-
if (status == CEDRUS_IRQ_ERROR)
state = VB2_BUF_STATE_ERROR;
else
state = VB2_BUF_STATE_DONE;
- v4l2_m2m_buf_done(src_buf, state);
- v4l2_m2m_buf_done(dst_buf, state);
-
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ state);
return IRQ_HANDLED;
}
@@ -146,7 +143,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
int cedrus_hw_probe(struct cedrus_dev *dev)
{
const struct cedrus_variant *variant;
- struct resource *res;
int irq_dec;
int ret;
@@ -225,8 +221,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
goto err_sram;
}
- res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(dev->dev, res);
+ dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
if (IS_ERR(dev->base)) {
dev_err(dev->dev, "Failed to map registers\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
index 27d0882397aa..604ff932fbf5 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -16,7 +16,7 @@
#ifndef _CEDRUS_HW_H_
#define _CEDRUS_HW_H_
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec);
void cedrus_engine_disable(struct cedrus_dev *dev);
void cedrus_dst_format_set(struct cedrus_dev *dev,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 13c34927bad5..8bcd6b8f9e2d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -96,7 +96,7 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
quantization = run->mpeg2.quantization;
/* Activate MPEG engine. */
- cedrus_engine_enable(dev, CEDRUS_CODEC_MPEG2);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_MPEG2);
/* Set intra quantization matrix. */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index ddd29788d685..7beb03d3bb39 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -10,6 +10,9 @@
#ifndef _CEDRUS_REGS_H_
#define _CEDRUS_REGS_H_
+#define SHIFT_AND_MASK_BITS(v, h, l) \
+ (((unsigned long)(v) << (l)) & GENMASK(h, l))
+
/*
* Common acronyms and contractions used in register descriptions:
* * VLD : Variable-Length Decoder
@@ -18,13 +21,22 @@
* * MC: Motion Compensation
* * STCD: Start Code Detect
* * SDRT: Scale Down and Rotate
+ * * WB: Writeback
+ * * BITS/BS: Bitstream
+ * * MB: Macroblock
+ * * CTU: Coding Tree Unit
+ * * CTB: Coding Tree Block
+ * * IDX: Index
*/
#define VE_ENGINE_DEC_MPEG 0x100
#define VE_ENGINE_DEC_H264 0x200
+#define VE_ENGINE_DEC_H265 0x500
#define VE_MODE 0x00
+#define VE_MODE_PIC_WIDTH_IS_4096 BIT(22)
+#define VE_MODE_PIC_WIDTH_MORE_2048 BIT(21)
#define VE_MODE_REC_WR_MODE_2MB (0x01 << 20)
#define VE_MODE_REC_WR_MODE_1MB (0x00 << 20)
#define VE_MODE_DDR_MODE_BW_128 (0x03 << 16)
@@ -34,11 +46,22 @@
#define VE_MODE_DEC_H264 (0x01 << 0)
#define VE_MODE_DEC_MPEG (0x00 << 0)
+#define VE_BUF_CTRL 0x50
+
+#define VE_BUF_CTRL_INTRAPRED_EXT_RAM (0x02 << 2)
+#define VE_BUF_CTRL_INTRAPRED_MIXED_RAM (0x01 << 2)
+#define VE_BUF_CTRL_INTRAPRED_INT_SRAM (0x00 << 2)
+#define VE_BUF_CTRL_DBLK_EXT_RAM (0x02 << 0)
+#define VE_BUF_CTRL_DBLK_MIXED_RAM (0x01 << 0)
+#define VE_BUF_CTRL_DBLK_INT_SRAM (0x00 << 0)
+
+#define VE_DBLK_DRAM_BUF_ADDR 0x54
+#define VE_INTRAPRED_DRAM_BUF_ADDR 0x58
#define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
#define VE_PRIMARY_FB_LINE_STRIDE 0xc8
-#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16))
-#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0))
+#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) SHIFT_AND_MASK_BITS(s, 31, 16)
+#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) SHIFT_AND_MASK_BITS(s, 15, 0)
#define VE_CHROMA_BUF_LEN 0xe8
@@ -46,7 +69,7 @@
#define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
#define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
#define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
-#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0))
+#define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0)
#define VE_PRIMARY_OUT_FMT 0xec
@@ -69,15 +92,15 @@
#define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
-#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28))
+#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) SHIFT_AND_MASK_BITS(t, 30, 28)
#define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
#define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
- (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+ (((unsigned long)(__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
#define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
- (((p) << 10) & GENMASK(11, 10))
+ SHIFT_AND_MASK_BITS(p, 11, 10)
#define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
- (((s) << 8) & GENMASK(9, 8))
+ SHIFT_AND_MASK_BITS(s, 9, 8)
#define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
((v) ? BIT(7) : 0)
#define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
@@ -98,19 +121,19 @@
#define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
#define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
- ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(w, 16), 15, 8)
#define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
- ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(h, 16), 7, 0)
#define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
-#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16))
-#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0))
+#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16)
+#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0)
#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
-#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
-#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(7, 0))
+#define VE_DEC_MPEG_MBADDR_X(w) SHIFT_AND_MASK_BITS(w, 15, 8)
+#define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0)
#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
@@ -225,13 +248,277 @@
#define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
#define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
#define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
- (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8)))
+ (SHIFT_AND_MASK_BITS(i, 13, 8) | SHIFT_AND_MASK_BITS(v, 7, 0))
#define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
#define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+#define VE_DEC_H265_DEC_NAL_HDR (VE_ENGINE_DEC_H265 + 0x00)
+
+#define VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 0)
+
+#define VE_DEC_H265_FLAG(reg_flag, ctrl_flag, flags) \
+ (((flags) & (ctrl_flag)) ? reg_flag : 0)
+
+#define VE_DEC_H265_DEC_SPS_HDR (VE_ENGINE_DEC_H265 + 0x04)
+
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE BIT(26)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED BIT(25)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED BIT(24)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED BIT(23)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE BIT(2)
+
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(v) \
+ SHIFT_AND_MASK_BITS(v, 22, 20)
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 17)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 16, 15)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 13)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 11)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 9)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 3)
+#define VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(v) \
+ SHIFT_AND_MASK_BITS(v, 1, 0)
+
+#define VE_DEC_H265_DEC_PIC_SIZE (VE_ENGINE_DEC_H265 + 0x08)
+
+#define VE_DEC_H265_DEC_PIC_SIZE_WIDTH(w) (((w) << 0) & GENMASK(13, 0))
+#define VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(h) (((h) << 16) & GENMASK(29, 16))
+
+#define VE_DEC_H265_DEC_PCM_CTRL (VE_ENGINE_DEC_H265 + 0x0c)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED BIT(15)
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED BIT(14)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 11, 10)
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 9, 8)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 7, 4)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0 (VE_ENGINE_DEC_H265 + 0x10)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 24)
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 16)
+#define VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 8)
+#define VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1 (VE_ENGINE_DEC_H265 + 0x14)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(6)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED BIT(5)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED BIT(4)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 8)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0 (VE_ENGINE_DEC_H265 + 0x18)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED BIT(31)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_SRAM (0 << 30)
+#define VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT (1 << 30)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0 (VE_ENGINE_DEC_H265 + 0x20)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0 BIT(11)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT BIT(10)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO BIT(9)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA BIT(8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA BIT(7)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE BIT(6)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT BIT(1)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC BIT(0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(v) \
+ SHIFT_AND_MASK_BITS(v, 26, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 23, 20)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(v) \
+ SHIFT_AND_MASK_BITS(v, 15, 12)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 2)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1 (VE_ENGINE_DEC_H265 + 0x24)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED BIT(23)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(22)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 31, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 27, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(v) \
+ ((v) ? BIT(21) : 0)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 20, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2 (VE_ENGINE_DEC_H265 + 0x28)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 2, 0)
+
+#define VE_DEC_H265_DEC_CTB_ADDR (VE_ENGINE_DEC_H265 + 0x2c)
+
+#define VE_DEC_H265_DEC_CTB_ADDR_Y(y) SHIFT_AND_MASK_BITS(y, 25, 16)
+#define VE_DEC_H265_DEC_CTB_ADDR_X(x) SHIFT_AND_MASK_BITS(x, 9, 0)
+
+#define VE_DEC_H265_CTRL (VE_ENGINE_DEC_H265 + 0x30)
+
+#define VE_DEC_H265_CTRL_DDR_CONSISTENCY_EN BIT(31)
+#define VE_DEC_H265_CTRL_STCD_EN BIT(25)
+#define VE_DEC_H265_CTRL_EPTB_DEC_BYPASS_EN BIT(24)
+#define VE_DEC_H265_CTRL_TQ_BYPASS_EN BIT(12)
+#define VE_DEC_H265_CTRL_VLD_BYPASS_EN BIT(11)
+#define VE_DEC_H265_CTRL_NCRI_CACHE_DISABLE BIT(10)
+#define VE_DEC_H265_CTRL_ROTATE_SCALE_OUT_EN BIT(9)
+#define VE_DEC_H265_CTRL_MC_NO_WRITEBACK BIT(8)
+#define VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN BIT(2)
+#define VE_DEC_H265_CTRL_ERROR_IRQ_EN BIT(1)
+#define VE_DEC_H265_CTRL_FINISH_IRQ_EN BIT(0)
+#define VE_DEC_H265_CTRL_IRQ_MASK \
+ (VE_DEC_H265_CTRL_FINISH_IRQ_EN | VE_DEC_H265_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_H265_TRIGGER (VE_ENGINE_DEC_H265 + 0x34)
+
+#define VE_DEC_H265_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_AVS (0x01 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_HEVC (0x00 << 4)
+#define VE_DEC_H265_TRIGGER_DEC_SLICE (0x08 << 0)
+#define VE_DEC_H265_TRIGGER_INIT_SWDEC (0x07 << 0)
+#define VE_DEC_H265_TRIGGER_BYTE_ALIGN (0x06 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCUE (0x05 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCSE (0x04 << 0)
+#define VE_DEC_H265_TRIGGER_FLUSH_BITS (0x03 << 0)
+#define VE_DEC_H265_TRIGGER_GET_BITS (0x02 << 0)
+#define VE_DEC_H265_TRIGGER_SHOW_BITS (0x01 << 0)
+
+#define VE_DEC_H265_STATUS (VE_ENGINE_DEC_H265 + 0x38)
+
+#define VE_DEC_H265_STATUS_STCD BIT(24)
+#define VE_DEC_H265_STATUS_STCD_BUSY BIT(21)
+#define VE_DEC_H265_STATUS_WB_BUSY BIT(20)
+#define VE_DEC_H265_STATUS_BS_DMA_BUSY BIT(19)
+#define VE_DEC_H265_STATUS_IQIT_BUSY BIT(18)
+#define VE_DEC_H265_STATUS_INTER_BUSY BIT(17)
+#define VE_DEC_H265_STATUS_MORE_DATA BIT(16)
+#define VE_DEC_H265_STATUS_VLD_BUSY BIT(14)
+#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY BIT(13)
+#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY BIT(12)
+#define VE_DEC_H265_STATUS_INTRA_BUSY BIT(11)
+#define VE_DEC_H265_STATUS_SAO_BUSY BIT(10)
+#define VE_DEC_H265_STATUS_MVP_BUSY BIT(9)
+#define VE_DEC_H265_STATUS_SWDEC_BUSY BIT(8)
+#define VE_DEC_H265_STATUS_OVER_TIME BIT(3)
+#define VE_DEC_H265_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_H265_STATUS_ERROR BIT(1)
+#define VE_DEC_H265_STATUS_SUCCESS BIT(0)
+#define VE_DEC_H265_STATUS_STCD_TYPE_MASK GENMASK(23, 22)
+#define VE_DEC_H265_STATUS_CHECK_MASK \
+ (VE_DEC_H265_STATUS_SUCCESS | VE_DEC_H265_STATUS_ERROR | \
+ VE_DEC_H265_STATUS_VLD_DATA_REQ)
+#define VE_DEC_H265_STATUS_CHECK_ERROR \
+ (VE_DEC_H265_STATUS_ERROR | VE_DEC_H265_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_H265_DEC_CTB_NUM (VE_ENGINE_DEC_H265 + 0x3c)
+
+#define VE_DEC_H265_BITS_ADDR (VE_ENGINE_DEC_H265 + 0x40)
+
+#define VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA BIT(30)
+#define VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA BIT(29)
+#define VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA BIT(28)
+#define VE_DEC_H265_BITS_ADDR_BASE(a) (((a) >> 8) & GENMASK(27, 0))
+
+#define VE_DEC_H265_BITS_OFFSET (VE_ENGINE_DEC_H265 + 0x44)
+#define VE_DEC_H265_BITS_LEN (VE_ENGINE_DEC_H265 + 0x48)
+
+#define VE_DEC_H265_BITS_END_ADDR (VE_ENGINE_DEC_H265 + 0x4c)
+
+#define VE_DEC_H265_BITS_END_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_SDRT_CTRL (VE_ENGINE_DEC_H265 + 0x50)
+#define VE_DEC_H265_SDRT_LUMA_ADDR (VE_ENGINE_DEC_H265 + 0x54)
+#define VE_DEC_H265_SDRT_CHROMA_ADDR (VE_ENGINE_DEC_H265 + 0x58)
+
+#define VE_DEC_H265_OUTPUT_FRAME_IDX (VE_ENGINE_DEC_H265 + 0x5c)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR (VE_ENGINE_DEC_H265 + 0x60)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR (VE_ENGINE_DEC_H265 + 0x64)
+#define VE_DEC_H265_TILE_START_CTB (VE_ENGINE_DEC_H265 + 0x68)
+#define VE_DEC_H265_TILE_END_CTB (VE_ENGINE_DEC_H265 + 0x6c)
+
+#define VE_DEC_H265_LOW_ADDR (VE_ENGINE_DEC_H265 + 0x80)
+
+#define VE_DEC_H265_LOW_ADDR_PRIMARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 31, 24)
+#define VE_DEC_H265_LOW_ADDR_SECONDARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 23, 16)
+#define VE_DEC_H265_LOW_ADDR_ENTRY_POINTS_BUF(a) \
+ SHIFT_AND_MASK_BITS(a, 7, 0)
+
+#define VE_DEC_H265_SRAM_OFFSET (VE_ENGINE_DEC_H265 + 0xe0)
+
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0 0x00
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0 0x20
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1 0x60
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1 0x80
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO 0x400
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT 0x20
+#define VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS 0x800
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0 0xc00
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1 0xc10
+
+#define VE_DEC_H265_SRAM_DATA (VE_ENGINE_DEC_H265 + 0xe4)
+
+#define VE_DEC_H265_SRAM_DATA_ADDR_BASE(a) ((a) >> 8)
+#define VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF BIT(7)
+
#define VE_H264_SPS 0x200
#define VE_H264_SPS_MBS_ONLY BIT(18)
#define VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD BIT(17)
@@ -267,13 +554,16 @@
VE_H264_CTRL_SLICE_DECODE_INT)
#define VE_H264_TRIGGER_TYPE 0x224
+#define VE_H264_TRIGGER_TYPE_N_BITS(x) (((x) & 0x3f) << 8)
#define VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE (8 << 0)
#define VE_H264_TRIGGER_TYPE_INIT_SWDEC (7 << 0)
+#define VE_H264_TRIGGER_TYPE_FLUSH_BITS (3 << 0)
#define VE_H264_STATUS 0x228
#define VE_H264_STATUS_VLD_DATA_REQ_INT VE_H264_CTRL_VLD_DATA_REQ_INT
#define VE_H264_STATUS_DECODE_ERR_INT VE_H264_CTRL_DECODE_ERR_INT
#define VE_H264_STATUS_SLICE_DECODE_INT VE_H264_CTRL_SLICE_DECODE_INT
+#define VE_H264_STATUS_VLD_BUSY BIT(8)
#define VE_H264_STATUS_INT_MASK VE_H264_CTRL_INT_MASK
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index eeee3efd247b..15cf1f10221b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -29,8 +29,8 @@
#define CEDRUS_MIN_WIDTH 16U
#define CEDRUS_MIN_HEIGHT 16U
-#define CEDRUS_MAX_WIDTH 3840U
-#define CEDRUS_MAX_HEIGHT 2160U
+#define CEDRUS_MAX_WIDTH 4096U
+#define CEDRUS_MAX_HEIGHT 2304U
static struct cedrus_format cedrus_formats[] = {
{
@@ -42,6 +42,11 @@ static struct cedrus_format cedrus_formats[] = {
.directions = CEDRUS_DECODE_SRC,
},
{
+ .pixelformat = V4L2_PIX_FMT_HEVC_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
.directions = CEDRUS_DECODE_DST,
},
@@ -62,34 +67,31 @@ static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
static struct cedrus_format *cedrus_find_format(u32 pixelformat, u32 directions,
unsigned int capabilities)
{
+ struct cedrus_format *first_valid_fmt = NULL;
struct cedrus_format *fmt;
unsigned int i;
for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
fmt = &cedrus_formats[i];
- if (fmt->capabilities && (fmt->capabilities & capabilities) !=
- fmt->capabilities)
+ if ((fmt->capabilities & capabilities) != fmt->capabilities ||
+ !(fmt->directions & directions))
continue;
- if (fmt->pixelformat == pixelformat &&
- (fmt->directions & directions) != 0)
+ if (fmt->pixelformat == pixelformat)
break;
+
+ if (!first_valid_fmt)
+ first_valid_fmt = fmt;
}
if (i == CEDRUS_FORMATS_COUNT)
- return NULL;
+ return first_valid_fmt;
return &cedrus_formats[i];
}
-static bool cedrus_check_format(u32 pixelformat, u32 directions,
- unsigned int capabilities)
-{
- return cedrus_find_format(pixelformat, directions, capabilities);
-}
-
-static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
{
unsigned int width = pix_fmt->width;
unsigned int height = pix_fmt->height;
@@ -105,9 +107,11 @@ static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
-
+ /* Choose some minimum size since this can't be 0 */
+ sizeimage = max_t(u32, SZ_1K, sizeimage);
break;
case V4L2_PIX_FMT_SUNXI_TILED_NV12:
@@ -214,16 +218,7 @@ static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->dst_fmt;
-
return 0;
}
@@ -232,17 +227,7 @@ static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
- f->fmt.pix.sizeimage = SZ_1K;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->src_fmt;
-
return 0;
}
@@ -252,11 +237,14 @@ static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
- dev->capabilities))
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -268,15 +256,14 @@ static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
- dev->capabilities))
- return -EINVAL;
-
- /* Source image size has to be provided by userspace. */
- if (pix_fmt->sizeimage == 0)
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -322,6 +309,17 @@ static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
ctx->src_fmt = f->fmt.pix;
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ vq->subsystem_flags |=
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ default:
+ vq->subsystem_flags &=
+ ~VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ }
+
/* Propagate colorspace information to capture. */
ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
@@ -355,6 +353,9 @@ const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -364,21 +365,12 @@ static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
struct device *alloc_devs[])
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
- struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt;
- u32 directions;
- if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
- directions = CEDRUS_DECODE_SRC;
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
pix_fmt = &ctx->src_fmt;
- } else {
- directions = CEDRUS_DECODE_DST;
+ else
pix_fmt = &ctx->dst_fmt;
- }
-
- if (!cedrus_check_format(pix_fmt->pixelformat, directions,
- dev->capabilities))
- return -EINVAL;
if (*nplanes) {
if (sizes[0] < pix_fmt->sizeimage)
@@ -453,6 +445,10 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
ctx->current_codec = CEDRUS_CODEC_H264;
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ ctx->current_codec = CEDRUS_CODEC_H265;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.h b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
index 0e4f7a8cccf2..05050c0a0921 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
@@ -26,5 +26,6 @@ extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt);
#endif
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 8948d5246409..6262eb25c80b 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig MOST
- tristate "MOST support"
+ tristate "MOST support"
depends on HAS_DMA && CONFIGFS_FS
- default n
- help
+ default n
+ help
Say Y here if you want to enable MOST support.
This driver needs at least one additional component to enable the
desired access from userspace (e.g. character devices) and one that
@@ -12,7 +12,7 @@ menuconfig MOST
To compile this driver as a module, choose M here: the
module will be called most_core.
- If in doubt, say N here.
+ If in doubt, say N here.
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index 724d098aeef0..f880147c82fd 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -494,6 +494,7 @@ err_remove_ida:
static struct cdev_component comp = {
.cc = {
+ .mod = THIS_MODULE,
.name = "cdev",
.probe_channel = comp_probe,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/most/configfs.c b/drivers/staging/most/configfs.c
index 025495657b68..34a9fb53985c 100644
--- a/drivers/staging/most/configfs.c
+++ b/drivers/staging/most/configfs.c
@@ -164,6 +164,7 @@ static ssize_t mdev_link_direction_store(struct config_item *item,
!sysfs_streq(page, "dir_tx") && !sysfs_streq(page, "tx"))
return -EINVAL;
strcpy(mdev_link->direction, page);
+ strim(mdev_link->direction);
return count;
}
@@ -182,6 +183,7 @@ static ssize_t mdev_link_datatype_store(struct config_item *item,
!sysfs_streq(page, "isoc_avp"))
return -EINVAL;
strcpy(mdev_link->datatype, page);
+ strim(mdev_link->datatype);
return count;
}
@@ -196,6 +198,7 @@ static ssize_t mdev_link_device_store(struct config_item *item,
struct mdev_link *mdev_link = to_mdev_link(item);
strcpy(mdev_link->device, page);
+ strim(mdev_link->device);
return count;
}
@@ -210,6 +213,7 @@ static ssize_t mdev_link_channel_store(struct config_item *item,
struct mdev_link *mdev_link = to_mdev_link(item);
strcpy(mdev_link->channel, page);
+ strim(mdev_link->channel);
return count;
}
@@ -391,22 +395,29 @@ static const struct config_item_type mdev_link_type = {
struct most_common {
struct config_group group;
+ struct module *mod;
+ struct configfs_subsystem subsys;
};
-static struct most_common *to_most_common(struct config_item *item)
+static struct most_common *to_most_common(struct configfs_subsystem *subsys)
{
- return container_of(to_config_group(item), struct most_common, group);
+ return container_of(subsys, struct most_common, subsys);
}
static struct config_item *most_common_make_item(struct config_group *group,
const char *name)
{
struct mdev_link *mdev_link;
+ struct most_common *mc = to_most_common(group->cg_subsys);
mdev_link = kzalloc(sizeof(*mdev_link), GFP_KERNEL);
if (!mdev_link)
return ERR_PTR(-ENOMEM);
+ if (!try_module_get(mc->mod)) {
+ kfree(mdev_link);
+ return ERR_PTR(-ENOLCK);
+ }
config_item_init_type_name(&mdev_link->item, name,
&mdev_link_type);
@@ -422,15 +433,26 @@ static struct config_item *most_common_make_item(struct config_group *group,
static void most_common_release(struct config_item *item)
{
- kfree(to_most_common(item));
+ struct config_group *group = to_config_group(item);
+
+ kfree(to_most_common(group->cg_subsys));
}
static struct configfs_item_operations most_common_item_ops = {
.release = most_common_release,
};
+static void most_common_disconnect(struct config_group *group,
+ struct config_item *item)
+{
+ struct most_common *mc = to_most_common(group->cg_subsys);
+
+ module_put(mc->mod);
+}
+
static struct configfs_group_operations most_common_group_ops = {
.make_item = most_common_make_item,
+ .disconnect_notify = most_common_disconnect,
};
static const struct config_item_type most_common_type = {
@@ -439,29 +461,35 @@ static const struct config_item_type most_common_type = {
.ct_owner = THIS_MODULE,
};
-static struct configfs_subsystem most_cdev_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_cdev",
- .ci_type = &most_common_type,
+static struct most_common most_cdev = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_cdev",
+ .ci_type = &most_common_type,
+ },
},
},
};
-static struct configfs_subsystem most_net_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_net",
- .ci_type = &most_common_type,
+static struct most_common most_net = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_net",
+ .ci_type = &most_common_type,
+ },
},
},
};
-static struct configfs_subsystem most_video_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_video",
- .ci_type = &most_common_type,
+static struct most_common most_video = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_video",
+ .ci_type = &most_common_type,
+ },
},
},
};
@@ -487,7 +515,7 @@ static struct config_item *most_snd_grp_make_item(struct config_group *group,
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&mdev_link->item, name, &mdev_link_type);
- mdev_link->create_link = 0;
+ mdev_link->create_link = false;
strcpy(mdev_link->name, name);
strcpy(mdev_link->comp, "sound");
return &mdev_link->item;
@@ -545,13 +573,14 @@ static const struct config_item_type most_snd_grp_type = {
struct most_sound {
struct configfs_subsystem subsys;
struct list_head soundcard_list;
+ struct module *mod;
};
static struct config_group *most_sound_make_group(struct config_group *group,
const char *name)
{
struct most_snd_grp *most;
- struct most_sound *ms = container_of(to_configfs_subsystem(group),
+ struct most_sound *ms = container_of(group->cg_subsys,
struct most_sound, subsys);
list_for_each_entry(most, &ms->soundcard_list, list) {
@@ -560,17 +589,29 @@ static struct config_group *most_sound_make_group(struct config_group *group,
return ERR_PTR(-EPROTO);
}
}
+ if (!try_module_get(ms->mod))
+ return ERR_PTR(-ENOLCK);
most = kzalloc(sizeof(*most), GFP_KERNEL);
- if (!most)
+ if (!most) {
+ module_put(ms->mod);
return ERR_PTR(-ENOMEM);
-
+ }
config_group_init_type_name(&most->group, name, &most_snd_grp_type);
list_add_tail(&most->list, &ms->soundcard_list);
return &most->group;
}
+static void most_sound_disconnect(struct config_group *group,
+ struct config_item *item)
+{
+ struct most_sound *ms = container_of(group->cg_subsys,
+ struct most_sound, subsys);
+ module_put(ms->mod);
+}
+
static struct configfs_group_operations most_sound_group_ops = {
.make_group = most_sound_make_group,
+ .disconnect_notify = most_sound_disconnect,
};
static const struct config_item_type most_sound_type = {
@@ -593,16 +634,21 @@ int most_register_configfs_subsys(struct core_component *c)
{
int ret;
- if (!strcmp(c->name, "cdev"))
- ret = configfs_register_subsystem(&most_cdev_subsys);
- else if (!strcmp(c->name, "net"))
- ret = configfs_register_subsystem(&most_net_subsys);
- else if (!strcmp(c->name, "video"))
- ret = configfs_register_subsystem(&most_video_subsys);
- else if (!strcmp(c->name, "sound"))
+ if (!strcmp(c->name, "cdev")) {
+ most_cdev.mod = c->mod;
+ ret = configfs_register_subsystem(&most_cdev.subsys);
+ } else if (!strcmp(c->name, "net")) {
+ most_net.mod = c->mod;
+ ret = configfs_register_subsystem(&most_net.subsys);
+ } else if (!strcmp(c->name, "video")) {
+ most_video.mod = c->mod;
+ ret = configfs_register_subsystem(&most_video.subsys);
+ } else if (!strcmp(c->name, "sound")) {
+ most_sound_subsys.mod = c->mod;
ret = configfs_register_subsystem(&most_sound_subsys.subsys);
- else
+ } else {
return -ENODEV;
+ }
if (ret) {
pr_err("Error %d while registering subsystem %s\n",
@@ -631,11 +677,11 @@ void most_interface_register_notify(const char *mdev)
void most_deregister_configfs_subsys(struct core_component *c)
{
if (!strcmp(c->name, "cdev"))
- configfs_unregister_subsystem(&most_cdev_subsys);
+ configfs_unregister_subsystem(&most_cdev.subsys);
else if (!strcmp(c->name, "net"))
- configfs_unregister_subsystem(&most_net_subsys);
+ configfs_unregister_subsystem(&most_net.subsys);
else if (!strcmp(c->name, "video"))
- configfs_unregister_subsystem(&most_video_subsys);
+ configfs_unregister_subsystem(&most_video.subsys);
else if (!strcmp(c->name, "sound"))
configfs_unregister_subsystem(&most_sound_subsys.subsys);
}
@@ -643,14 +689,14 @@ EXPORT_SYMBOL_GPL(most_deregister_configfs_subsys);
int __init configfs_init(void)
{
- config_group_init(&most_cdev_subsys.su_group);
- mutex_init(&most_cdev_subsys.su_mutex);
+ config_group_init(&most_cdev.subsys.su_group);
+ mutex_init(&most_cdev.subsys.su_mutex);
- config_group_init(&most_net_subsys.su_group);
- mutex_init(&most_net_subsys.su_mutex);
+ config_group_init(&most_net.subsys.su_group);
+ mutex_init(&most_net.subsys.su_mutex);
- config_group_init(&most_video_subsys.su_group);
- mutex_init(&most_video_subsys.su_mutex);
+ config_group_init(&most_video.subsys.su_group);
+ mutex_init(&most_video.subsys.su_mutex);
config_group_init(&most_sound_subsys.subsys.su_group);
mutex_init(&most_sound_subsys.subsys.su_mutex);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 8e9a0b67c6ed..51a6b41d5b82 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -52,7 +52,7 @@ struct most_channel {
u16 channel_id;
char name[STRING_SIZE];
bool is_poisoned;
- struct mutex start_mutex;
+ struct mutex start_mutex; /* channel activation synchronization */
struct mutex nq_mutex; /* nq thread synchronization */
int is_starving;
struct most_interface *iface;
@@ -60,7 +60,7 @@ struct most_channel {
bool keep_mbo;
bool enqueue_halt;
struct list_head fifo;
- spinlock_t fifo_lock;
+ spinlock_t fifo_lock; /* fifo access synchronization */
struct list_head halt_fifo;
struct list_head list;
struct pipe pipe0;
@@ -84,11 +84,11 @@ static const struct {
int most_ch_data_type;
const char *name;
} ch_data_type[] = {
- { MOST_CH_CONTROL, "control\n" },
- { MOST_CH_ASYNC, "async\n" },
- { MOST_CH_SYNC, "sync\n" },
- { MOST_CH_ISOC, "isoc\n"},
- { MOST_CH_ISOC, "isoc_avp\n"},
+ { MOST_CH_CONTROL, "control" },
+ { MOST_CH_ASYNC, "async" },
+ { MOST_CH_SYNC, "sync" },
+ { MOST_CH_ISOC, "isoc"},
+ { MOST_CH_ISOC, "isoc_avp"},
};
/**
@@ -521,48 +521,6 @@ static ssize_t components_show(struct device_driver *drv, char *buf)
}
/**
- * split_string - parses buf and extracts ':' separated substrings.
- *
- * @buf: complete string from attribute 'add_channel'
- * @a: storage for 1st substring (=interface name)
- * @b: storage for 2nd substring (=channel name)
- * @c: storage for 3rd substring (=component name)
- * @d: storage optional 4th substring (=user defined name)
- *
- * Examples:
- *
- * Input: "mdev0:ch6:cdev:my_channel\n" or
- * "mdev0:ch6:cdev:my_channel"
- *
- * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
- *
- * Input: "mdev1:ep81:cdev\n"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
- *
- * Input: "mdev1:ep81"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
- */
-static int split_string(char *buf, char **a, char **b, char **c, char **d)
-{
- *a = strsep(&buf, ":");
- if (!*a)
- return -EIO;
-
- *b = strsep(&buf, ":\n");
- if (!*b)
- return -EIO;
-
- *c = strsep(&buf, ":\n");
- if (!*c)
- return -EIO;
-
- if (d)
- *d = strsep(&buf, ":\n");
-
- return 0;
-}
-
-/**
* get_channel - get pointer to channel
* @mdev: name of the device interface
* @mdev_ch: name of channel
@@ -675,13 +633,13 @@ int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
if (!c)
return -ENODEV;
- if (!strcmp(buf, "dir_rx\n")) {
+ if (!strcmp(buf, "dir_rx")) {
c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "rx\n")) {
+ } else if (!strcmp(buf, "rx")) {
c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "dir_tx\n")) {
+ } else if (!strcmp(buf, "dir_tx")) {
c->cfg.direction = MOST_CH_TX;
- } else if (!strcmp(buf, "tx\n")) {
+ } else if (!strcmp(buf, "tx")) {
c->cfg.direction = MOST_CH_TX;
} else {
pr_info("Invalid direction\n");
@@ -723,48 +681,6 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
return link_channel_to_component(c, comp, link_name, comp_param);
}
-/**
- * remove_link_store - store function for remove_link attribute
- * @drv: device driver
- * @buf: buffer
- * @len: buffer length
- *
- * Example:
- * echo "mdev0:ep81" >remove_link
- */
-static ssize_t remove_link_store(struct device_driver *drv,
- const char *buf,
- size_t len)
-{
- struct most_channel *c;
- struct core_component *comp;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- char *comp_name;
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
- strlcpy(buffer, buf, max_len);
- ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
- if (ret)
- return ret;
- comp = match_component(comp_name);
- if (!comp)
- return -ENODEV;
- c = get_channel(mdev, mdev_ch);
- if (!c)
- return -ENODEV;
-
- if (comp->disconnect_channel(c->iface, c->channel_id))
- return -EIO;
- if (c->pipe0.comp == comp)
- c->pipe0.comp = NULL;
- if (c->pipe1.comp == comp)
- c->pipe1.comp = NULL;
- return len;
-}
-
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
{
struct most_channel *c;
@@ -790,12 +706,10 @@ int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
static DRIVER_ATTR_RO(links);
static DRIVER_ATTR_RO(components);
-static DRIVER_ATTR_WO(remove_link);
static struct attribute *mc_attrs[] = {
DRV_ATTR(links),
DRV_ATTR(components),
- DRV_ATTR(remove_link),
NULL,
};
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/core.h
index 652aaa771029..49859aef98df 100644
--- a/drivers/staging/most/core.h
+++ b/drivers/staging/most/core.h
@@ -265,6 +265,7 @@ struct most_interface {
struct core_component {
struct list_head list;
const char *name;
+ struct module *mod;
int (*probe_channel)(struct most_interface *iface, int channel_idx,
struct most_channel_config *cfg, char *name,
char *param);
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index 26a31854c636..6cab1bb8956e 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -498,6 +498,7 @@ put_nd:
}
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = "net",
.probe_channel = comp_probe_channel,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 79817061fcfa..723d0bd1cc21 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -344,8 +344,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
pr_err("Requested number of channels not supported.\n");
return -EINVAL;
}
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
/**
@@ -359,7 +358,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
*/
static int pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/**
@@ -469,7 +468,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int split_arg_list(char *buf, u16 *ch_num, char **sample_res)
@@ -663,6 +661,8 @@ skip_adpt_alloc:
pcm->private_data = channel;
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
@@ -782,6 +782,7 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
* Initialization of the struct core_component
*/
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = DRIVER_NAME,
.probe_channel = audio_probe_channel,
.disconnect_channel = audio_disconnect_channel,
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 250af9fb704d..10c1ef7e3a3e 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -528,6 +528,7 @@ static int comp_disconnect_channel(struct most_interface *iface,
}
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = "video",
.probe_channel = comp_probe_channel,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d964642d95a3..20b898954416 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -208,8 +208,8 @@ static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
{
- dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
- "tctx %08x, tdtx: %08x, rbase %08x, " \
+ dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, "
+ "tctx %08x, tdtx: %08x, rbase %08x, "
"rcnt %08x, rctx %08x, rdtx %08x\n",
mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
@@ -220,7 +220,7 @@ static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
- dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
+ dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, "
"intr_stat %08x, intr_mask %08x\n",
mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
@@ -243,9 +243,9 @@ static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
tx_desc = &chan->tx_ring[i];
rx_desc = &chan->rx_ring[i];
- dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
+ dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, "
"tx addr1: %08x, rx addr0 %08x, flags %08x\n",
- i, tx_desc->addr0, tx_desc->flags, \
+ i, tx_desc->addr0, tx_desc->flags,
tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
}
}
@@ -548,7 +548,8 @@ static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
int i;
chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+ 2 * HSDMA_DESCS_NUM *
+ sizeof(*chan->tx_ring),
&chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
if (!chan->tx_ring)
goto no_mem;
@@ -569,8 +570,8 @@ static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
{
if (chan->tx_ring) {
dma_free_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
- chan->tx_ring, chan->desc_addr);
+ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+ chan->tx_ring, chan->desc_addr);
chan->tx_ring = NULL;
chan->rx_ring = NULL;
}
@@ -650,7 +651,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
struct mtk_hsdma_chan *chan;
struct mtk_hsdam_engine *hsdma;
struct dma_device *dd;
- struct resource *res;
int ret;
int irq;
void __iomem *base;
@@ -667,8 +667,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (!hsdma)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
hsdma->base = base + HSDMA_BASE_OFFSET;
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index af928b75a940..ce58042f2f21 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -2,7 +2,6 @@
config PCI_MT7621
tristate "MediaTek MT7621 PCI Controller"
depends on RALINK
- depends on PCI
select PCI_DRIVERS_GENERIC
help
This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 6b98827da57f..3633c924848e 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -29,15 +29,14 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/sys_soc.h>
#include <mt7621.h>
#include <ralink_regs.h>
#include "../../pci/pci.h"
/* sysctl */
-#define MT7621_CHIP_REV_ID 0x0c
#define MT7621_GPIO_MODE 0x60
-#define CHIP_REV_MT7621_E2 0x0101
/* MediaTek specific configuration registers */
#define PCIE_FTS_NUM 0x70c
@@ -126,6 +125,8 @@ struct mt7621_pcie_port {
* @ports: pointer to PCIe port information
* @perst: gpio reset
* @rst: pointer to pcie reset
+ * @resets_inverted: depends on chip revision
+ * reset lines are inverted.
*/
struct mt7621_pcie {
void __iomem *base;
@@ -140,6 +141,7 @@ struct mt7621_pcie {
struct list_head ports;
struct gpio_desc *perst;
struct reset_control *rst;
+ bool resets_inverted;
};
static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
@@ -229,9 +231,9 @@ static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
{
- u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
+ struct mt7621_pcie *pcie = port->pcie;
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ if (pcie->resets_inverted)
reset_control_assert(port->pcie_rst);
else
reset_control_deassert(port->pcie_rst);
@@ -239,9 +241,9 @@ static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
{
- u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
+ struct mt7621_pcie *pcie = port->pcie;
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ if (pcie->resets_inverted)
reset_control_deassert(port->pcie_rst);
else
reset_control_assert(port->pcie_rst);
@@ -641,9 +643,14 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host,
return pci_host_probe(host);
}
+static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
+ { .soc_id = "mt7621", .revision = "E2" }
+};
+
static int mt7621_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct soc_device_attribute *attr;
struct mt7621_pcie *pcie;
struct pci_host_bridge *bridge;
int err;
@@ -661,6 +668,10 @@ static int mt7621_pci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
INIT_LIST_HEAD(&pcie->ports);
+ attr = soc_device_match(mt7621_pci_quirks_match);
+ if (attr)
+ pcie->resets_inverted = true;
+
err = mt7621_pcie_parse_dt(pcie);
if (err) {
dev_err(dev, "Parsing DT failed\n");
diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO
index 8f172b017b94..20e22ecb9903 100644
--- a/drivers/staging/netlogic/TODO
+++ b/drivers/staging/netlogic/TODO
@@ -1,6 +1,6 @@
* Implementing 64bit stat counter in software
* All memory allocation should be changed to DMA allocations
-* Changing comments in to linux standred format
+* Changing comments into linux standard format
Please send patches
To:
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 05079f7be841..204fcdfc022f 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -976,8 +976,7 @@ static int xlr_net_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->port_id = (pdev->id * 4) + port;
priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, port);
- priv->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->base_addr = devm_platform_ioremap_resource(pdev, port);
if (IS_ERR(priv->base_addr)) {
err = PTR_ERR(priv->base_addr);
goto err_gmac;
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 5c12cacf75e1..9fa98c16f1d9 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -8,7 +8,7 @@ config MFD_NVEC
controller.
To compile this driver as a module, say M here: the module will be
- called mfd-nvec
+ called mfd-nvec
config KEYBOARD_NVEC
tristate "Keyboard on nVidia compliant EC"
@@ -18,7 +18,7 @@ config KEYBOARD_NVEC
a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called keyboard-nvec
+ called keyboard-nvec
config SERIO_NVEC_PS2
tristate "PS2 on nVidia EC"
@@ -28,7 +28,7 @@ config SERIO_NVEC_PS2
to a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called serio-nvec-ps2
+ called serio-nvec-ps2
config NVEC_POWER
@@ -39,7 +39,7 @@ config NVEC_POWER
nVidia compliant embedded controllers.
To compile this driver as a module, say M here: the module will be
- called nvec-power
+ called nvec-power
config NVEC_PAZ00
@@ -50,5 +50,5 @@ config NVEC_PAZ00
devices, e.g. Toshbia AC100 and Dynabooks AZ netbooks.
To compile this driver as a module, say M here: the module will be
- called nvec-paz00
+ called nvec-paz00
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index a5321cc692c5..582c9187559d 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -1836,8 +1836,7 @@ static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
*
* Returns: Pipe or NULL if none are ready
*/
-static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
- struct octeon_hcd *usb,
+static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
enum cvmx_usb_transfer xfer_type)
{
struct list_head *list = usb->active_pipes + xfer_type;
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index ffac0c4b3f5c..c798672d61b2 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -65,7 +65,7 @@ int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li)
+ union cvmx_helper_link_info li)
{
if (li.s.link_up) {
pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
@@ -81,7 +81,7 @@ void cvm_oct_note_carrier(struct octeon_ethernet *priv,
void cvm_oct_adjust_link(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
link_info.u64 = 0;
link_info.s.link_up = dev->phydev->link ? 1 : 0;
@@ -106,7 +106,7 @@ int cvm_oct_common_stop(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
union cvmx_gmxx_prtx_cfg gmx_cfg;
int index = INDEX(priv->port);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d91fd5ce9e68..0c4fac31540a 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -53,7 +53,7 @@ static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
static void cvm_oct_check_preamble_errors(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
unsigned long flags;
link_info.u64 = priv->link_info;
@@ -103,7 +103,7 @@ static void cvm_oct_check_preamble_errors(struct net_device *dev)
static void cvm_oct_rgmii_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
bool status_change;
link_info = cvmx_helper_link_get(priv->port);
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0e65955c746b..2c16230f993c 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -60,7 +60,7 @@ static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
*
* Returns Non-zero if the packet can be dropped, zero otherwise.
*/
-static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
+static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
{
int port;
@@ -135,7 +135,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
return 0;
}
-static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
+static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
{
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
@@ -215,7 +215,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
struct sk_buff *skb = NULL;
struct sk_buff **pskb = NULL;
int skb_in_hw;
- cvmx_wqe_t *work;
+ struct cvmx_wqe *work;
int port;
if (USE_ASYNC_IOBDMA && did_work_request)
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 83469061a542..b334cf89794e 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -127,7 +127,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
*/
int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
{
- cvmx_pko_command_word0_t pko_command;
+ union cvmx_pko_command_word0 pko_command;
union cvmx_buf_ptr hw_buffer;
u64 old_scratch;
u64 old_scratch2;
@@ -514,7 +514,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
void *copy_location;
/* Get a work queue entry */
- cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+ struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(!work)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
@@ -598,7 +598,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
#endif
work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
(ip_hdr(skb)->frag_off ==
- 1 << 14));
+ cpu_to_be16(1 << 14)));
#if 0
/* Assume Linux is sending a good packet */
work->word2.s.IP_exc = 0;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index cf8e9a23ebf9..f42c3816ce49 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -172,7 +172,7 @@ static void cvm_oct_configure_common_hw(void)
*/
int cvm_oct_free_work(void *work_queue_entry)
{
- cvmx_wqe_t *work = work_queue_entry;
+ struct cvmx_wqe *work = work_queue_entry;
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
@@ -460,7 +460,7 @@ int cvm_oct_common_open(struct net_device *dev,
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
int rv;
rv = cvm_oct_phy_setup_device(dev);
@@ -496,7 +496,7 @@ int cvm_oct_common_open(struct net_device *dev,
void cvm_oct_link_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index a8a864b40913..a6140705706f 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -14,7 +14,7 @@
#include <linux/of.h>
#include <linux/phy.h>
-#ifdef CONFIG_MIPS
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
@@ -91,7 +91,7 @@ int cvm_oct_common_stop(struct net_device *dev);
int cvm_oct_common_open(struct net_device *dev,
void (*link_poll)(struct net_device *));
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li);
+ union cvmx_helper_link_info li);
void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow;
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
index b78ce9eaab85..79213c045504 100644
--- a/drivers/staging/octeon/octeon-stubs.h
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -1,5 +1,8 @@
#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
-#define XKPHYS_TO_PHYS(p) (p)
+
+#ifndef XKPHYS_TO_PHYS
+# define XKPHYS_TO_PHYS(p) (p)
+#endif
#define OCTEON_IRQ_WORKQ0 0
#define OCTEON_IRQ_RML 0
@@ -38,7 +41,7 @@
#define CVMX_NPI_RSL_INT_BLOCKS 0
#define CVMX_POW_WQ_INT_PC 0
-typedef union {
+union cvmx_pip_wqe_word2 {
uint64_t u64;
struct {
uint64_t bufs:8;
@@ -114,13 +117,13 @@ typedef union {
uint64_t err_code:8;
} snoip;
-} cvmx_pip_wqe_word2;
+};
union cvmx_pip_wqe_word0 {
struct {
uint64_t next_ptr:40;
uint8_t unused;
- uint16_t hw_chksum;
+ __wsum hw_chksum;
} cn38xx;
struct {
uint64_t pknd:6; /* 0..5 */
@@ -180,15 +183,15 @@ union cvmx_buf_ptr {
} s;
};
-typedef struct {
+struct cvmx_wqe {
union cvmx_wqe_word0 word0;
union cvmx_wqe_word1 word1;
- cvmx_pip_wqe_word2 word2;
+ union cvmx_pip_wqe_word2 word2;
union cvmx_buf_ptr packet_ptr;
uint8_t packet_data[96];
-} cvmx_wqe_t;
+};
-typedef union {
+union cvmx_helper_link_info {
uint64_t u64;
struct {
uint64_t reserved_20_63:44;
@@ -196,18 +199,18 @@ typedef union {
uint64_t full_duplex:1; /**< 1 if the link is full duplex */
uint64_t speed:18; /**< Speed of the link in Mbps */
} s;
-} cvmx_helper_link_info_t;
+};
-typedef enum {
+enum cvmx_fau_reg_32 {
CVMX_FAU_REG_32_START = 0,
-} cvmx_fau_reg_32_t;
+};
-typedef enum {
+enum cvmx_fau_op_size {
CVMX_FAU_OP_SIZE_8 = 0,
CVMX_FAU_OP_SIZE_16 = 1,
CVMX_FAU_OP_SIZE_32 = 2,
CVMX_FAU_OP_SIZE_64 = 3
-} cvmx_fau_op_size_t;
+};
typedef enum {
CVMX_SPI_MODE_UNKNOWN = 0,
@@ -1134,27 +1137,27 @@ union cvmx_npi_rsl_int_blocks {
} cn50xx;
};
-typedef union {
+union cvmx_pko_command_word0 {
uint64_t u64;
struct {
- uint64_t total_bytes:16;
- uint64_t segs:6;
- uint64_t dontfree:1;
- uint64_t ignore_i:1;
- uint64_t ipoffp1:7;
- uint64_t gather:1;
- uint64_t rsp:1;
- uint64_t wqp:1;
- uint64_t n2:1;
- uint64_t le:1;
- uint64_t reg0:11;
- uint64_t subone0:1;
- uint64_t reg1:11;
- uint64_t subone1:1;
- uint64_t size0:2;
- uint64_t size1:2;
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
} s;
-} cvmx_pko_command_word0_t;
+};
union cvmx_ciu_timx {
uint64_t u64;
@@ -1175,16 +1178,18 @@ union cvmx_gmxx_rxx_rx_inbnd {
} s;
};
-static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
+static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
int32_t value)
{
return value;
}
-static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
+static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
{ }
-static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
+static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
{ }
static inline uint64_t cvmx_scratch_read64(uint64_t address)
@@ -1195,7 +1200,7 @@ static inline uint64_t cvmx_scratch_read64(uint64_t address)
static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
{ }
-static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
{
return 0;
}
@@ -1264,15 +1269,15 @@ static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
return 0;
}
-static inline cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
{
- cvmx_helper_link_info_t ret = { .u64 = 0 };
+ union cvmx_helper_link_info ret = { .u64 = 0 };
return ret;
}
static inline int cvmx_helper_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info)
+ union cvmx_helper_link_info link_info)
{
return 0;
}
@@ -1342,14 +1347,14 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
cvmx_pow_wait_t wait)
{ }
-static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
+static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
{
- cvmx_wqe_t *wqe = (void *)(unsigned long)scr_addr;
+ struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
return wqe;
}
-static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
{
return (void *)(unsigned long)wait;
}
@@ -1361,7 +1366,7 @@ static inline int cvmx_spi_restart_interface(int interface,
}
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
- cvmx_fau_reg_32_t reg,
+ enum cvmx_fau_reg_32 reg,
int32_t value)
{ }
@@ -1370,6 +1375,7 @@ static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
int port)
{
union cvmx_gmxx_rxx_rx_inbnd r;
+
r.u64 = 0;
return r;
}
@@ -1379,29 +1385,27 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
{ }
static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
- uint64_t queue, cvmx_pko_command_word0_t pko_command,
+ uint64_t queue, union cvmx_pko_command_word0 pko_command,
union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
{
- cvmx_pko_status_t ret = 0;
-
- return ret;
+ return 0;
}
-static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
{ }
-static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
{ }
-static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
{
return 0;
}
-static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
{ }
-static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
+static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
enum cvmx_pow_tag_type tag_type,
uint64_t qos, uint64_t grp)
{ }
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index f5c716bb3413..d1a0dea09ef0 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -3,7 +3,7 @@ config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
depends on I2C
- depends on (GPIO_CS5535 || GPIO_CS5535=n)
+ depends on GPIO_CS5535 && ACPI
select BACKLIGHT_CLASS_DEVICE
help
In order to support very low power operation, the XO laptop uses a
@@ -15,22 +15,3 @@ config FB_OLPC_DCON
This controller is only available on OLPC platforms. Unless you have
one of these platforms, you will want to say 'N'.
-config FB_OLPC_DCON_1
- bool "OLPC XO-1 DCON support"
- depends on FB_OLPC_DCON && GPIO_CS5535
- default y
- help
- Enable support for the DCON in XO-1 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1 (or if you're unsure what model you have), you should
- say 'Y'.
-
-config FB_OLPC_DCON_1_5
- bool "OLPC XO-1.5 DCON support"
- depends on FB_OLPC_DCON && ACPI
- default y
- help
- Enable support for the DCON in XO-1.5 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1.5 (or if you're unsure what model you have), you
- should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
index cb1248c5c162..734b2ce26066 100644
--- a/drivers/staging/olpc_dcon/Makefile
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-olpc-dcon-objs += olpc_dcon.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
+olpc-dcon-objs += olpc_dcon.o olpc_dcon_xo_1.o olpc_dcon_xo_1_5.o
obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index d8296f2ae872..7c263358b44a 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -8,7 +8,6 @@ TODO:
internals, but isn't properly integrated, is not the correct solution.
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- - allow simultaneous XO-1 and XO-1.5 support
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index a254238be181..a0d6d90f4cc8 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -790,15 +790,11 @@ static struct i2c_driver dcon_driver = {
static int __init olpc_dcon_init(void)
{
-#ifdef CONFIG_FB_OLPC_DCON_1_5
/* XO-1.5 */
if (olpc_board_at_least(olpc_board(0xd0)))
pdata = &dcon_pdata_xo_1_5;
-#endif
-#ifdef CONFIG_FB_OLPC_DCON_1
- if (!pdata)
+ else
pdata = &dcon_pdata_xo_1;
-#endif
return i2c_add_driver(&dcon_driver);
}
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 22d976a09785..41bd1360b56e 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -106,12 +106,7 @@ struct dcon_gpio {
irqreturn_t dcon_interrupt(int irq, void *id);
-#ifdef CONFIG_FB_OLPC_DCON_1
extern struct dcon_platform_data dcon_pdata_xo_1;
-#endif
-
-#ifdef CONFIG_FB_OLPC_DCON_1_5
extern struct dcon_platform_data dcon_pdata_xo_1_5;
-#endif
#endif
diff --git a/drivers/staging/pi433/Kconfig b/drivers/staging/pi433/Kconfig
index 8acde0814206..dd9e4709d1a8 100644
--- a/drivers/staging/pi433/Kconfig
+++ b/drivers/staging/pi433/Kconfig
@@ -1,17 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
config PI433
- tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
- depends on SPI
- help
- This option allows you to enable support for the radio module Pi433.
+ tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
+ depends on SPI
+ help
+ This option allows you to enable support for the radio module Pi433.
- Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
- or compatible. It extends the Raspberry Pi with the option, to
- send and receive data in the 433MHz ISM band - for example to
- communicate between two systems without using ethernet or bluetooth
- or for control or read sockets, actors, sensors, widely available
- for low price.
+ Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
+ or compatible. It extends the Raspberry Pi with the option, to
+ send and receive data in the 433MHz ISM band - for example to
+ communicate between two systems without using ethernet or bluetooth
+ or for control or read sockets, actors, sensors, widely available
+ for low price.
- For details or the option to buy, please visit https://pi433.de/en.html
+ For details or the option to buy, please visit https://pi433.de/en.html
- If in doubt, say N here, but saying yes most probably won't hurt
+ If in doubt, say N here, but saying yes most probably won't hurt
diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO
index 51c509084e80..f93f7428f5d5 100644
--- a/drivers/staging/qlge/TODO
+++ b/drivers/staging/qlge/TODO
@@ -1,6 +1,3 @@
-* reception stalls permanently (until admin intervention) if the rx buffer
- queues become empty because of allocation failures (ex. under memory
- pressure)
* commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1)
introduced dead code in the receive routines, which should be rewritten
anyways by the admission of the author himself, see the comment above
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index ad7c5eb8a3b6..6ec7e3ce3863 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -34,8 +34,13 @@
#define NUM_TX_RING_ENTRIES 256
#define NUM_RX_RING_ENTRIES 256
-#define NUM_SMALL_BUFFERS 512
-#define NUM_LARGE_BUFFERS 512
+/* Use the same len for sbq and lbq. Note that it seems like the device might
+ * support different sizes.
+ */
+#define QLGE_BQ_SHIFT 9
+#define QLGE_BQ_LEN BIT(QLGE_BQ_SHIFT)
+#define QLGE_BQ_SIZE (QLGE_BQ_LEN * sizeof(__le64))
+
#define DB_PAGE_SIZE 4096
/* Calculate the number of (4k) pages required to
@@ -46,8 +51,8 @@
(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64))
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
@@ -77,6 +82,11 @@
#define LSD(x) ((u32)((u64)(x)))
#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+/* In some cases, the device interprets a value of 0x0000 as 65536. These
+ * cases are marked using the following macro.
+ */
+#define QLGE_FIT16(value) ((u16)(value))
+
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
@@ -1358,25 +1368,6 @@ struct tx_ring_desc {
struct tx_ring_desc *next;
};
-struct page_chunk {
- struct page *page; /* master page */
- char *va; /* virt addr for this chunk */
- u64 map; /* mapping for master */
- unsigned int offset; /* offset for this chunk */
- unsigned int last_flag; /* flag set for last chunk in page */
-};
-
-struct bq_desc {
- union {
- struct page_chunk pg_chunk;
- struct sk_buff *skb;
- } p;
- __le64 *addr;
- u32 index;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
struct tx_ring {
@@ -1406,15 +1397,68 @@ struct tx_ring {
u64 tx_errors;
};
-/*
- * Type of inbound queue.
- */
-enum {
- DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
- TX_Q = 3, /* Handles outbound completions. */
- RX_Q = 4, /* Handles inbound completions. */
+struct qlge_page_chunk {
+ struct page *page;
+ void *va; /* virt addr including offset */
+ unsigned int offset;
};
+struct qlge_bq_desc {
+ union {
+ /* for large buffers */
+ struct qlge_page_chunk pg_chunk;
+ /* for small buffers */
+ struct sk_buff *skb;
+ } p;
+ dma_addr_t dma_addr;
+ /* address in ring where the buffer address is written for the device */
+ __le64 *buf_ptr;
+ u32 index;
+};
+
+/* buffer queue */
+struct qlge_bq {
+ __le64 *base;
+ dma_addr_t base_dma;
+ __le64 *base_indirect;
+ dma_addr_t base_indirect_dma;
+ struct qlge_bq_desc *queue;
+ /* prod_idx is the index of the first buffer that may NOT be used by
+ * hw, ie. one after the last. Advanced by sw.
+ */
+ void __iomem *prod_idx_db_reg;
+ /* next index where sw should refill a buffer for hw */
+ u16 next_to_use;
+ /* next index where sw expects to find a buffer filled by hw */
+ u16 next_to_clean;
+ enum {
+ QLGE_SB, /* small buffer */
+ QLGE_LB, /* large buffer */
+ } type;
+};
+
+#define QLGE_BQ_CONTAINER(bq) \
+({ \
+ typeof(bq) _bq = bq; \
+ (struct rx_ring *)((char *)_bq - (_bq->type == QLGE_SB ? \
+ offsetof(struct rx_ring, sbq) : \
+ offsetof(struct rx_ring, lbq))); \
+})
+
+/* Experience shows that the device ignores the low 4 bits of the tail index.
+ * Refill up to a x16 multiple.
+ */
+#define QLGE_BQ_ALIGN(index) ALIGN_DOWN(index, 16)
+
+#define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1))
+
+#define QLGE_BQ_HW_OWNED(bq) \
+({ \
+ typeof(bq) _bq = bq; \
+ QLGE_BQ_WRAP(QLGE_BQ_ALIGN((_bq)->next_to_use) - \
+ (_bq)->next_to_clean); \
+})
+
struct rx_ring {
struct cqicb cqicb; /* The chip's completion queue init control block. */
@@ -1432,40 +1476,17 @@ struct rx_ring {
void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
/* Large buffer queue elements. */
- u32 lbq_len; /* entry count */
- u32 lbq_size; /* size in bytes of queue */
- u32 lbq_buf_size;
- void *lbq_base;
- dma_addr_t lbq_base_dma;
- void *lbq_base_indirect;
- dma_addr_t lbq_base_indirect_dma;
- struct page_chunk pg_chunk; /* current page for chunks */
- struct bq_desc *lbq; /* array of control blocks */
- void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
- u32 lbq_prod_idx; /* current sw prod idx */
- u32 lbq_curr_idx; /* next entry we expect */
- u32 lbq_clean_idx; /* beginning of new descs */
- u32 lbq_free_cnt; /* free buffer desc cnt */
+ struct qlge_bq lbq;
+ struct qlge_page_chunk master_chunk;
+ dma_addr_t chunk_dma_addr;
/* Small buffer queue elements. */
- u32 sbq_len; /* entry count */
- u32 sbq_size; /* size in bytes of queue */
- u32 sbq_buf_size;
- void *sbq_base;
- dma_addr_t sbq_base_dma;
- void *sbq_base_indirect;
- dma_addr_t sbq_base_indirect_dma;
- struct bq_desc *sbq; /* array of control blocks */
- void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
- u32 sbq_prod_idx; /* current sw prod idx */
- u32 sbq_curr_idx; /* next entry we expect */
- u32 sbq_clean_idx; /* beginning of new descs */
- u32 sbq_free_cnt; /* free buffer desc cnt */
+ struct qlge_bq sbq;
/* Misc. handler elements. */
- u32 type; /* Type of queue, tx, rx. */
u32 irq; /* Which vector this ring is assigned. */
u32 cpu; /* Which CPU this should run on. */
+ struct delayed_work refill_work;
char name[IFNAMSIZ + 5];
struct napi_struct napi;
u8 reserved;
@@ -1982,11 +2003,6 @@ struct intr_context {
u32 intr_dis_mask; /* value/mask used to disable this intr */
u32 intr_read_mask; /* value/mask used to read this intr */
char name[IFNAMSIZ * 2];
- atomic_t irq_cnt; /* irq_cnt is used in single vector
- * environment. It's incremented for each
- * irq handler that is scheduled. When each
- * handler finishes it decrements irq_cnt and
- * enables interrupts if it's zero. */
irq_handler_t handler;
};
@@ -2074,7 +2090,6 @@ struct ql_adapter {
u32 port; /* Port number this adapter */
spinlock_t adapter_lock;
- spinlock_t hw_lock;
spinlock_t stats_lock;
/* PCI Bus Relative Register Addresses */
@@ -2115,6 +2130,7 @@ struct ql_adapter {
struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS];
unsigned int lbq_buf_order;
+ u32 lbq_buf_size;
int rx_csum;
u32 default_rx_queue;
@@ -2235,7 +2251,6 @@ void ql_mpi_reset_work(struct work_struct *work);
void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
void ql_set_ethtool_ops(struct net_device *ndev);
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
void ql_mpi_idc_work(struct work_struct *work);
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 31389ab8bdf7..83f34ca43aa4 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -7,7 +7,7 @@
/* Read a NIC register from the alternate function. */
static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
- u32 reg)
+ u32 reg)
{
u32 register_to_read;
u32 reg_val;
@@ -26,7 +26,7 @@ static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
/* Write a NIC register from the alternate function. */
static int ql_write_other_func_reg(struct ql_adapter *qdev,
- u32 reg, u32 reg_val)
+ u32 reg, u32 reg_val)
{
u32 register_to_read;
int status = 0;
@@ -41,7 +41,7 @@ static int ql_write_other_func_reg(struct ql_adapter *qdev,
}
static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
- u32 bit, u32 err_bit)
+ u32 bit, u32 err_bit)
{
u32 temp;
int count = 10;
@@ -61,22 +61,22 @@ static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
}
static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+ u32 *data)
{
int status;
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* set up for reg read */
- ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
@@ -111,8 +111,8 @@ exit:
}
static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
- u32 *direct_ptr, u32 *indirect_ptr,
- unsigned int direct_valid, unsigned int indirect_valid)
+ u32 *direct_ptr, u32 *indirect_ptr,
+ bool direct_valid, bool indirect_valid)
{
unsigned int status;
@@ -133,16 +133,15 @@ static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
}
static int ql_get_serdes_regs(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump)
+ struct ql_mpi_coredump *mpi_coredump)
{
int status;
- unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
- unsigned int xaui_indirect_valid, i;
+ bool xfi_direct_valid = false, xfi_indirect_valid = false;
+ bool xaui_direct_valid = true, xaui_indirect_valid = true;
+ unsigned int i;
u32 *direct_ptr, temp;
u32 *indirect_ptr;
- xfi_direct_valid = xfi_indirect_valid = 0;
- xaui_direct_valid = xaui_indirect_valid = 1;
/* The XAUI needs to be read out per port */
status = ql_read_other_func_serdes_reg(qdev,
@@ -152,7 +151,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_indirect_valid = 0;
+ xaui_indirect_valid = false;
status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
@@ -161,7 +160,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_direct_valid = 0;
+ xaui_direct_valid = false;
/*
* XFI register is shared so only need to read one
@@ -176,18 +175,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_indirect_valid = 1;
+ xfi_indirect_valid = true;
else
- xfi_direct_valid = 1;
+ xfi_direct_valid = true;
}
if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
XG_SERDES_ADDR_XFI2_PWR_UP) {
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_direct_valid = 1;
+ xfi_direct_valid = true;
else
- xfi_indirect_valid = 1;
+ xfi_indirect_valid = true;
}
/* Get XAUI_AN register block. */
@@ -203,7 +202,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -220,7 +219,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_XFI_AN register block. */
if (qdev->func & 1) {
@@ -233,7 +232,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_TRAIN register block. */
if (qdev->func & 1) {
@@ -248,7 +247,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -265,7 +264,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_TX register block. */
if (qdev->func & 1) {
@@ -280,7 +279,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_RX register block. */
if (qdev->func & 1) {
@@ -296,7 +295,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PLL register block. */
@@ -313,18 +312,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
return 0;
}
static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+ u32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
@@ -333,7 +332,7 @@ static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
@@ -347,17 +346,17 @@ exit:
* skipping unused locations.
*/
static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
- unsigned int other_function)
+ unsigned int other_function)
{
int status = 0;
int i;
for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
/* We're reading 400 xgmac registers, but we filter out
- * serveral locations that are non-responsive to reads.
+ * several locations that are non-responsive to reads.
*/
if ((i == 0x00000114) ||
- (i == 0x00000118) ||
+ (i == 0x00000118) ||
(i == 0x0000013c) ||
(i == 0x00000140) ||
(i > 0x00000150 && i < 0x000001fc) ||
@@ -389,7 +388,6 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
{
- int status = 0;
int i;
for (i = 0; i < 8; i++, buf++) {
@@ -402,7 +400,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
*buf = ql_read32(qdev, CNA_ETS);
}
- return status;
+ return 0;
}
static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
@@ -411,7 +409,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
ql_write32(qdev, INTR_EN,
- qdev->intr_context[i].intr_read_mask);
+ qdev->intr_context[i].intr_read_mask);
*buf = ql_read32(qdev, INTR_EN);
}
}
@@ -427,7 +425,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
for (i = 0; i < 16; i++) {
status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_CAM_MAC, i, value);
+ MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -439,7 +437,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
}
for (i = 0; i < 32; i++) {
status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_MULTI_MAC, i, value);
+ MAC_ADDR_TYPE_MULTI_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -498,7 +496,7 @@ end:
/* Read the MPI Processor core registers */
static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
- u32 offset, u32 count)
+ u32 offset, u32 count)
{
int i, status = 0;
for (i = 0; i < count; i++, buf++) {
@@ -511,7 +509,7 @@ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
/* Read the ASIC probe dump */
static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
- u32 valid, u32 *buf)
+ u32 valid, u32 *buf)
{
u32 module, mux_sel, probe, lo_val, hi_val;
@@ -546,13 +544,13 @@ static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
/* First we have to enable the probe mux */
ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
- PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
- PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
- PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
- PRB_MX_ADDR_VALID_FC_MOD, buf);
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
return 0;
}
@@ -667,7 +665,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
result_index = 0;
while ((result_index & MAC_ADDR_MR) == 0) {
result_index = ql_read32(qdev,
- MAC_ADDR_IDX);
+ MAC_ADDR_IDX);
}
result_data = ql_read32(qdev, MAC_ADDR_DATA);
*buf = result_index;
@@ -741,7 +739,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Insert the global header */
memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
+ sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.headerSize =
sizeof(struct mpi_coredump_global_header);
@@ -752,23 +750,23 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get generic NIC reg dump */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
+ NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
- NIC2_CONTROL_SEG_NUM,
+ NIC2_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
/* Get XGMac registers. (Segment 18, Rev C. step 21) */
ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
- NIC1_XGMAC_SEG_NUM,
+ NIC1_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
- NIC2_XGMAC_SEG_NUM,
+ NIC2_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
@@ -799,97 +797,97 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Rev C. Step 20a */
ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
- XAUI_AN_SEG_NUM,
+ XAUI_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_an),
"XAUI AN Registers");
/* Rev C. Step 20b */
ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
- XAUI_HSS_PCS_SEG_NUM,
+ XAUI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_hss_pcs),
"XAUI HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
+ sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_an),
"XFI AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
- XFI_TRAIN_SEG_NUM,
+ XFI_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_train),
"XFI TRAIN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
- XFI_HSS_PCS_SEG_NUM,
+ XFI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pcs),
"XFI HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
- XFI_HSS_TX_SEG_NUM,
+ XFI_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_tx),
"XFI HSS TX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
- XFI_HSS_RX_SEG_NUM,
+ XFI_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_rx),
"XFI HSS RX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
- XFI_HSS_PLL_SEG_NUM,
+ XFI_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pll),
"XFI HSS PLL Registers");
ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
- XAUI2_AN_SEG_NUM,
+ XAUI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_an),
"XAUI2 AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
- XAUI2_HSS_PCS_SEG_NUM,
+ XAUI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
"XAUI2 HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
- XFI2_AN_SEG_NUM,
+ XFI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_an),
"XFI2 AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
- XFI2_TRAIN_SEG_NUM,
+ XFI2_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_train),
"XFI2 TRAIN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
- XFI2_HSS_PCS_SEG_NUM,
+ XFI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
"XFI2 HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
- XFI2_HSS_TX_SEG_NUM,
+ XFI2_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_tx),
"XFI2 HSS TX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
- XFI2_HSS_RX_SEG_NUM,
+ XFI2_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_rx),
"XFI2 HSS RX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
- XFI2_HSS_PLL_SEG_NUM,
+ XFI2_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pll),
"XFI2 HSS PLL Registers");
@@ -903,7 +901,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
- CORE_SEG_NUM,
+ CORE_SEG_NUM,
sizeof(mpi_coredump->core_regs_seg_hdr) +
sizeof(mpi_coredump->mpi_core_regs) +
sizeof(mpi_coredump->mpi_core_sh_regs),
@@ -922,7 +920,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the Test Logic Registers */
ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
- TEST_LOGIC_SEG_NUM,
+ TEST_LOGIC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->test_logic_regs),
"Test Logic Regs");
@@ -933,7 +931,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the RMII Registers */
ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
- RMII_SEG_NUM,
+ RMII_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->rmii_regs),
"RMII Registers");
@@ -944,7 +942,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FCMAC1 Registers */
ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
- FCMAC1_SEG_NUM,
+ FCMAC1_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac1_regs),
"FCMAC1 Registers");
@@ -956,7 +954,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FCMAC2 Registers */
ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
- FCMAC2_SEG_NUM,
+ FCMAC2_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac2_regs),
"FCMAC2 Registers");
@@ -968,7 +966,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FC1 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
- FC1_MBOX_SEG_NUM,
+ FC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc1_mbx_regs),
"FC1 MBox Regs");
@@ -979,7 +977,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the IDE Registers */
ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
- IDE_SEG_NUM,
+ IDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ide_regs),
"IDE Registers");
@@ -990,7 +988,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the NIC1 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
- NIC1_MBOX_SEG_NUM,
+ NIC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic1_mbx_regs),
"NIC1 MBox Regs");
@@ -1001,7 +999,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the SMBus Registers */
ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
- SMBUS_SEG_NUM,
+ SMBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->smbus_regs),
"SMBus Registers");
@@ -1012,7 +1010,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FC2 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
- FC2_MBOX_SEG_NUM,
+ FC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc2_mbx_regs),
"FC2 MBox Regs");
@@ -1023,7 +1021,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the NIC2 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
- NIC2_MBOX_SEG_NUM,
+ NIC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic2_mbx_regs),
"NIC2 MBox Regs");
@@ -1034,7 +1032,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the I2C Registers */
ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
- I2C_SEG_NUM,
+ I2C_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->i2c_regs),
"I2C Registers");
@@ -1045,7 +1043,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the MEMC Registers */
ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
- MEMC_SEG_NUM,
+ MEMC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_regs),
"MEMC Registers");
@@ -1056,7 +1054,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the PBus Registers */
ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
- PBUS_SEG_NUM,
+ PBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->pbus_regs),
"PBUS Registers");
@@ -1067,7 +1065,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the MDE Registers */
ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
- MDE_SEG_NUM,
+ MDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mde_regs),
"MDE Registers");
@@ -1077,7 +1075,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
+ MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
@@ -1089,14 +1087,14 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Segment 31 */
/* Get indexed register values. */
ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
+ INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
+ CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
@@ -1105,18 +1103,18 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
+ ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
goto err;
/* Segment 34 (Rev C. step 23) */
ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
+ ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
@@ -1125,24 +1123,24 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
- PROBE_DUMP_SEG_NUM,
+ PROBE_DUMP_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->probe_dump),
"Probe Dump");
ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
- ROUTING_INDEX_SEG_NUM,
+ ROUTING_INDEX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->routing_regs),
"Routing Regs");
status = ql_get_routing_index_registers(qdev,
- &mpi_coredump->routing_regs[0]);
+ &mpi_coredump->routing_regs[0]);
if (status)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
- MAC_PROTOCOL_SEG_NUM,
+ MAC_PROTOCOL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mac_prot_regs),
"MAC Prot Regs");
@@ -1150,7 +1148,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the semaphore registers for all 5 functions */
ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
- SEM_REGS_SEG_NUM,
+ SEM_REGS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->sem_regs), "Sem Registers");
@@ -1176,12 +1174,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
- WCS_RAM_SEG_NUM,
+ WCS_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->code_ram),
"WCS RAM");
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
- CODE_RAM_ADDR, CODE_RAM_CNT);
+ CODE_RAM_ADDR, CODE_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of CODE RAM. Status = 0x%.08x\n",
@@ -1191,12 +1189,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Insert the segment header */
ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
- MEMC_RAM_SEG_NUM,
+ MEMC_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_ram),
"MEMC RAM");
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
- MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of MEMC RAM. Status = 0x%.08x\n",
@@ -1231,7 +1229,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
+ sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.headerSize =
sizeof(struct mpi_coredump_global_header);
@@ -1243,7 +1241,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* segment 16 */
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
+ MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
@@ -1254,7 +1252,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* Segment 16, Rev C. Step 18 */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
+ NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_regs),
"NIC Registers");
@@ -1265,14 +1263,14 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* Segment 31 */
/* Get indexed register values. */
ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
+ INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
+ CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
@@ -1281,18 +1279,18 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
return;
ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
+ ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
return;
/* Segment 34 (Rev C. step 23) */
ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
+ ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
@@ -1630,6 +1628,7 @@ void ql_dump_qdev(struct ql_adapter *qdev)
DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
+ DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
}
#endif
@@ -1650,7 +1649,7 @@ void ql_dump_wqicb(struct wqicb *wqicb)
void ql_dump_tx_ring(struct tx_ring *tx_ring)
{
- if (tx_ring == NULL)
+ if (!tx_ring)
return;
pr_err("===================== Dumping tx_ring %d ===============\n",
tx_ring->wq_id);
@@ -1730,16 +1729,24 @@ void ql_dump_cqicb(struct cqicb *cqicb)
le16_to_cpu(cqicb->sbq_len));
}
+static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+
+ if (rx_ring->cq_id < qdev->rss_ring_count)
+ return "RX COMPLETION";
+ else
+ return "TX COMPLETION";
+};
+
void ql_dump_rx_ring(struct rx_ring *rx_ring)
{
- if (rx_ring == NULL)
+ if (!rx_ring)
return;
pr_err("===================== Dumping rx_ring %d ===============\n",
rx_ring->cq_id);
- pr_err("Dumping rx_ring %d, type = %s%s%s\n",
- rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
- rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
- rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+ pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
+ qlge_rx_ring_type_name(rx_ring));
pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
pr_err("rx_ring->cq_base_dma = %llx\n",
@@ -1758,41 +1765,33 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
- pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
- pr_err("rx_ring->lbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_dma);
- pr_err("rx_ring->lbq_base_indirect = %p\n",
- rx_ring->lbq_base_indirect);
- pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_indirect_dma);
- pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
- pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
- pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
- pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
- rx_ring->lbq_prod_idx_db_reg);
- pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
- pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+ pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
+ pr_err("rx_ring->lbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_dma);
+ pr_err("rx_ring->lbq.base_indirect = %p\n",
+ rx_ring->lbq.base_indirect);
+ pr_err("rx_ring->lbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_indirect_dma);
+ pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
+ pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
+ rx_ring->lbq.prod_idx_db_reg);
+ pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
+ pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
- pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
-
- pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
- pr_err("rx_ring->sbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_dma);
- pr_err("rx_ring->sbq_base_indirect = %p\n",
- rx_ring->sbq_base_indirect);
- pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_indirect_dma);
- pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
- pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
- pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
- pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
- rx_ring->sbq_prod_idx_db_reg);
- pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
- pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
- pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
- pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
- pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+
+ pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
+ pr_err("rx_ring->sbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_dma);
+ pr_err("rx_ring->sbq.base_indirect = %p\n",
+ rx_ring->sbq.base_indirect);
+ pr_err("rx_ring->sbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_indirect_dma);
+ pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
+ pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
+ rx_ring->sbq.prod_idx_db_reg);
+ pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
+ pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
pr_err("rx_ring->irq = %d\n", rx_ring->irq);
pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
@@ -1806,7 +1805,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
pr_err("%s: Enter\n", __func__);
ptr = kmalloc(size, GFP_ATOMIC);
- if (ptr == NULL)
+ if (!ptr)
return;
if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
@@ -1992,7 +1991,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
le16_to_cpu(ib_mac_rsp->vlan_id));
pr_err("flags4 = %s%s%s\n",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 6cae33072496..6ad4515311f7 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -167,9 +167,9 @@ void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
{
u32 temp;
- int count = UDELAY_COUNT;
+ int count;
- while (count) {
+ for (count = 0; count < UDELAY_COUNT; count++) {
temp = ql_read32(qdev, reg);
/* check for errors */
@@ -181,7 +181,6 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
- count--;
}
netif_alert(qdev, probe, qdev->ndev,
"Timed out waiting for reg %x to come ready.\n", reg);
@@ -193,17 +192,16 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
*/
static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
{
- int count = UDELAY_COUNT;
+ int count;
u32 temp;
- while (count) {
+ for (count = 0; count < UDELAY_COUNT; count++) {
temp = ql_read32(qdev, CFG);
if (temp & CFG_LE)
return -EIO;
if (!(temp & bit))
return 0;
udelay(UDELAY_DELAY);
- count--;
}
return -ETIMEDOUT;
}
@@ -625,75 +623,26 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
}
-/* If we're running with multiple MSI-X vectors then we enable on the fly.
- * Otherwise, we may have multiple outstanding workers and don't want to
- * enable until the last one finishes. In this case, the irq_cnt gets
- * incremented every time we queue a worker and decremented every time
- * a worker finishes. Once it hits zero we enable the interrupt.
- */
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
- u32 var = 0;
- unsigned long hw_flags = 0;
- struct intr_context *ctx = qdev->intr_context + intr;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
- /* Always enable if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- return var;
- }
+ struct intr_context *ctx = &qdev->intr_context[intr];
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- if (atomic_dec_and_test(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- }
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return var;
+ ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
}
-static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
- u32 var = 0;
- struct intr_context *ctx;
-
- /* HW disables for us if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
- return 0;
+ struct intr_context *ctx = &qdev->intr_context[intr];
- ctx = qdev->intr_context + intr;
- spin_lock(&qdev->hw_lock);
- if (!atomic_read(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_dis_mask);
- var = ql_read32(qdev, STS);
- }
- atomic_inc(&ctx->irq_cnt);
- spin_unlock(&qdev->hw_lock);
- return var;
+ ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
}
static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
{
int i;
- for (i = 0; i < qdev->intr_count; i++) {
- /* The enable call does a atomic_dec_and_test
- * and enables only if the result is zero.
- * So we precharge it here.
- */
- if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
- i == 0))
- atomic_set(&qdev->intr_context[i].irq_cnt, 1);
- ql_enable_completion_interrupt(qdev, i);
- }
+ for (i = 0; i < qdev->intr_count; i++)
+ ql_enable_completion_interrupt(qdev, i);
}
static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
@@ -1027,48 +976,32 @@ static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
return PAGE_SIZE << qdev->lbq_buf_order;
}
-/* Get the next large buffer. */
-static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
{
- struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
- rx_ring->lbq_curr_idx++;
- if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_free_cnt++;
- return lbq_desc;
+ struct qlge_bq_desc *bq_desc;
+
+ bq_desc = &bq->queue[bq->next_to_clean];
+ bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
+
+ return bq_desc;
}
-static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
{
- struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+ struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(lbq_desc, mapaddr),
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
- /* If it's the last chunk of our master page then
- * we unmap it.
- */
- if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
- == ql_lbq_block_size(qdev))
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- return lbq_desc;
-}
+ if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
+ ql_lbq_block_size(qdev)) {
+ /* last chunk of the master page */
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ }
-/* Get the next small buffer. */
-static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
-{
- struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
- rx_ring->sbq_curr_idx++;
- if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_free_cnt++;
- return sbq_desc;
+ return lbq_desc;
}
/* Update an rx ring index. */
@@ -1087,178 +1020,192 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
-static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
- struct bq_desc *lbq_desc)
+static const char * const bq_type_name[] = {
+ [QLGE_SB] = "sbq",
+ [QLGE_LB] = "lbq",
+};
+
+/* return 0 or negative error */
+static int qlge_refill_sb(struct rx_ring *rx_ring,
+ struct qlge_bq_desc *sbq_desc, gfp_t gfp)
{
- if (!rx_ring->pg_chunk.page) {
- u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
- qdev->lbq_buf_order);
- if (unlikely(!rx_ring->pg_chunk.page)) {
- netif_err(qdev, drv, qdev->ndev,
- "page allocation failed.\n");
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct sk_buff *skb;
+
+ if (sbq_desc->p.skb)
+ return 0;
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u sbq: getting new skb for index %d.\n",
+ rx_ring->cq_id, sbq_desc->index);
+
+ skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, QLGE_SB_PAD);
+
+ sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
+ SMALL_BUF_MAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
+ netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+ *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
+
+ sbq_desc->p.skb = skb;
+ return 0;
+}
+
+/* return 0 or negative error */
+static int qlge_refill_lb(struct rx_ring *rx_ring,
+ struct qlge_bq_desc *lbq_desc, gfp_t gfp)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
+
+ if (!master_chunk->page) {
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
+ if (unlikely(!page))
return -ENOMEM;
- }
- rx_ring->pg_chunk.offset = 0;
- map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
- 0, ql_lbq_block_size(qdev),
+ dma_addr = pci_map_page(qdev->pdev, page, 0,
+ ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- __free_pages(rx_ring->pg_chunk.page,
- qdev->lbq_buf_order);
- rx_ring->pg_chunk.page = NULL;
+ if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
+ __free_pages(page, qdev->lbq_buf_order);
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
- return -ENOMEM;
+ return -EIO;
}
- rx_ring->pg_chunk.map = map;
- rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+ master_chunk->page = page;
+ master_chunk->va = page_address(page);
+ master_chunk->offset = 0;
+ rx_ring->chunk_dma_addr = dma_addr;
}
- /* Copy the current master pg_chunk info
- * to the current descriptor.
- */
- lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+ lbq_desc->p.pg_chunk = *master_chunk;
+ lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
+ *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
+ lbq_desc->p.pg_chunk.offset);
/* Adjust the master page chunk for next
* buffer get.
*/
- rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
- if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
- rx_ring->pg_chunk.page = NULL;
- lbq_desc->p.pg_chunk.last_flag = 1;
+ master_chunk->offset += qdev->lbq_buf_size;
+ if (master_chunk->offset == ql_lbq_block_size(qdev)) {
+ master_chunk->page = NULL;
} else {
- rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
- get_page(rx_ring->pg_chunk.page);
- lbq_desc->p.pg_chunk.last_flag = 0;
+ master_chunk->va += qdev->lbq_buf_size;
+ get_page(master_chunk->page);
}
+
return 0;
}
-/* Process (refill) a large buffer queue. */
-static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+
+/* return 0 or negative error */
+static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
{
- u32 clean_idx = rx_ring->lbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *lbq_desc;
- u64 map;
+ struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_bq_desc *bq_desc;
+ int refill_count;
+ int retval;
int i;
- while (rx_ring->lbq_free_cnt > 32) {
- for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- lbq_desc = &rx_ring->lbq[clean_idx];
- if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- rx_ring->lbq_clean_idx = clean_idx;
- netif_err(qdev, ifup, qdev->ndev,
- "Could not get a page chunk, i=%d, clean_idx =%d .\n",
- i, clean_idx);
- return;
- }
+ refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
+ bq->next_to_use);
+ if (!refill_count)
+ return 0;
+
+ i = bq->next_to_use;
+ bq_desc = &bq->queue[i];
+ i -= QLGE_BQ_LEN;
+ do {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u %s: try cleaning idx %d\n",
+ rx_ring->cq_id, bq_type_name[bq->type], i);
- map = lbq_desc->p.pg_chunk.map +
- lbq_desc->p.pg_chunk.offset;
- dma_unmap_addr_set(lbq_desc, mapaddr, map);
- dma_unmap_len_set(lbq_desc, maplen,
- rx_ring->lbq_buf_size);
- *lbq_desc->addr = cpu_to_le64(map);
-
- pci_dma_sync_single_for_device(qdev->pdev, map,
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
- clean_idx++;
- if (clean_idx == rx_ring->lbq_len)
- clean_idx = 0;
+ if (bq->type == QLGE_SB)
+ retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
+ else
+ retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
+ if (retval < 0) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "ring %u %s: Could not get a page chunk, idx %d\n",
+ rx_ring->cq_id, bq_type_name[bq->type], i);
+ break;
}
- rx_ring->lbq_clean_idx = clean_idx;
- rx_ring->lbq_prod_idx += 16;
- if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_free_cnt -= 16;
- }
+ bq_desc++;
+ i++;
+ if (unlikely(!i)) {
+ bq_desc = &bq->queue[0];
+ i -= QLGE_BQ_LEN;
+ }
+ refill_count--;
+ } while (refill_count);
+ i += QLGE_BQ_LEN;
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
- ql_write_db_reg(rx_ring->lbq_prod_idx,
- rx_ring->lbq_prod_idx_db_reg);
+ if (bq->next_to_use != i) {
+ if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u %s: updating prod idx = %d.\n",
+ rx_ring->cq_id, bq_type_name[bq->type],
+ i);
+ ql_write_db_reg(i, bq->prod_idx_db_reg);
+ }
+ bq->next_to_use = i;
}
+
+ return retval;
}
-/* Process (refill) a small buffer queue. */
-static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
+ unsigned long delay)
{
- u32 clean_idx = rx_ring->sbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *sbq_desc;
- u64 map;
- int i;
-
- while (rx_ring->sbq_free_cnt > 16) {
- for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
- sbq_desc = &rx_ring->sbq[clean_idx];
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- if (sbq_desc->p.skb == NULL) {
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
- sbq_desc->p.skb =
- netdev_alloc_skb(qdev->ndev,
- SMALL_BUFFER_SIZE);
- if (sbq_desc->p.skb == NULL) {
- rx_ring->sbq_clean_idx = clean_idx;
- return;
- }
- skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
- map = pci_map_single(qdev->pdev,
- sbq_desc->p.skb->data,
- rx_ring->sbq_buf_size,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev,
- "PCI mapping failed.\n");
- rx_ring->sbq_clean_idx = clean_idx;
- dev_kfree_skb_any(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- return;
- }
- dma_unmap_addr_set(sbq_desc, mapaddr, map);
- dma_unmap_len_set(sbq_desc, maplen,
- rx_ring->sbq_buf_size);
- *sbq_desc->addr = cpu_to_le64(map);
- }
+ bool sbq_fail, lbq_fail;
- clean_idx++;
- if (clean_idx == rx_ring->sbq_len)
- clean_idx = 0;
- }
- rx_ring->sbq_clean_idx = clean_idx;
- rx_ring->sbq_prod_idx += 16;
- if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_free_cnt -= 16;
- }
+ sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
+ lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
- ql_write_db_reg(rx_ring->sbq_prod_idx,
- rx_ring->sbq_prod_idx_db_reg);
- }
+ /* Minimum number of buffers needed to be able to receive at least one
+ * frame of any format:
+ * sbq: 1 for header + 1 for data
+ * lbq: mtu 9000 / lb size
+ * Below this, the queue might stall.
+ */
+ if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
+ (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
+ DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
+ /* Allocations can take a long time in certain cases (ex.
+ * reclaim). Therefore, use a workqueue for long-running
+ * work items.
+ */
+ queue_delayed_work_on(smp_processor_id(), system_long_wq,
+ &rx_ring->refill_work, delay);
}
-static void ql_update_buffer_queues(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static void qlge_slow_refill(struct work_struct *work)
{
- ql_update_sbq(qdev, rx_ring);
- ql_update_lbq(qdev, rx_ring);
+ struct rx_ring *rx_ring = container_of(work, struct rx_ring,
+ refill_work.work);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi_disable(napi);
+ ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
+ napi_enable(napi);
+
+ local_bh_disable();
+ /* napi_disable() might have prevented incomplete napi work from being
+ * rescheduled.
+ */
+ napi_schedule(napi);
+ /* trigger softirq processing */
+ local_bh_enable();
}
/* Unmaps tx buffers. Can be called from send() if a pci mapping
@@ -1495,7 +1442,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
u16 vlan_id)
{
struct sk_buff *skb;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
@@ -1544,7 +1491,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
@@ -1634,31 +1581,24 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
u32 length,
u16 vlan_id)
{
+ struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- struct sk_buff *new_skb = NULL;
- struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+ struct sk_buff *skb, *new_skb;
skb = sbq_desc->p.skb;
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
- if (new_skb == NULL) {
+ if (!new_skb) {
rx_ring->rx_dropped++;
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
skb_put_data(new_skb, skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */
@@ -1759,11 +1699,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
- struct bq_desc *lbq_desc;
- struct bq_desc *sbq_desc;
- struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ struct qlge_bq_desc *lbq_desc, *sbq_desc;
+ struct sk_buff *skb = NULL;
size_t hlen = ETH_HLEN;
/*
@@ -1776,11 +1715,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
/*
* Headers fit nicely into a small buffer.
*/
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
@@ -1808,35 +1745,22 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* from the "data" small buffer to the "header" small
* buffer.
*/
- sbq_desc = ql_get_curr_sbuf(rx_ring);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr
- (sbq_desc, mapaddr),
- dma_unmap_len
- (sbq_desc, maplen),
+ sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr
- (sbq_desc,
- mapaddr),
- dma_unmap_len
- (sbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes in a single small buffer.\n",
length);
- sbq_desc = ql_get_curr_sbuf(rx_ring);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc,
- mapaddr),
- dma_unmap_len(sbq_desc,
- maplen),
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
}
@@ -1868,15 +1792,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
- if (skb == NULL) {
+ if (!skb) {
netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
"No skb available, drop the packet.\n");
return NULL;
}
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(lbq_desc,
- mapaddr),
- dma_unmap_len(lbq_desc, maplen),
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size,
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1907,11 +1829,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* eventually be in trouble.
*/
int size, i = 0;
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
@@ -1931,8 +1851,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
do {
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- size = (length < rx_ring->lbq_buf_size) ? length :
- rx_ring->lbq_buf_size;
+ size = min(length, qdev->lbq_buf_size);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Adding page %d to skb for %d bytes.\n",
@@ -2286,7 +2205,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
if (count == budget)
break;
}
- ql_update_buffer_queues(qdev, rx_ring);
+ ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
ql_write_cq_idx(rx_ring);
return count;
}
@@ -2500,21 +2419,22 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
u32 var;
int work_done = 0;
- spin_lock(&qdev->hw_lock);
- if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "Shared Interrupt, Not ours!\n");
- spin_unlock(&qdev->hw_lock);
- return IRQ_NONE;
- }
- spin_unlock(&qdev->hw_lock);
+ /* Experience shows that when using INTx interrupts, interrupts must
+ * be masked manually.
+ * When using MSI mode, INTR_EN_EN must be explicitly disabled
+ * (even though it is auto-masked), otherwise a later command to
+ * enable it is not effective.
+ */
+ if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
+ ql_disable_completion_interrupt(qdev, 0);
- var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+ var = ql_read32(qdev, STS);
/*
* Check for fatal error.
*/
if (var & STS_FE) {
+ ql_disable_completion_interrupt(qdev, 0);
ql_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
@@ -2534,7 +2454,6 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
netif_err(qdev, intr, qdev->ndev,
"Got MPI processor interrupt.\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
qdev->workqueue, &qdev->mpi_work, 0);
@@ -2550,11 +2469,18 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
if (var & intr_context->irq_mask) {
netif_info(qdev, intr, qdev->ndev,
"Waking handler for rx_ring[0].\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
+ } else {
+ /* Experience shows that the device sometimes signals an
+ * interrupt but no work is scheduled from this function.
+ * Nevertheless, the interrupt is auto-masked. Therefore, we
+ * systematically re-enable the interrupt if we didn't
+ * schedule napi.
+ */
+ ql_enable_completion_interrupt(qdev, 0);
}
- ql_enable_completion_interrupt(qdev, intr_context->intr);
+
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
@@ -2737,7 +2663,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
qdev->rx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->rx_ring_shadow_reg_dma);
- if (qdev->rx_ring_shadow_reg_area == NULL) {
+ if (!qdev->rx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
@@ -2746,7 +2672,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
qdev->tx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
- if (qdev->tx_ring_shadow_reg_area == NULL) {
+ if (!qdev->tx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
@@ -2798,14 +2724,14 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
&tx_ring->wq_base_dma);
- if ((tx_ring->wq_base == NULL) ||
+ if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
goto pci_alloc_err;
tx_ring->q =
kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
GFP_KERNEL);
- if (tx_ring->q == NULL)
+ if (!tx_ring->q)
goto err;
return 0;
@@ -2820,54 +2746,46 @@ pci_alloc_err:
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
- struct bq_desc *lbq_desc;
+ struct qlge_bq *lbq = &rx_ring->lbq;
+ unsigned int last_offset;
- uint32_t curr_idx, clean_idx;
+ last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
+ while (lbq->next_to_clean != lbq->next_to_use) {
+ struct qlge_bq_desc *lbq_desc =
+ &lbq->queue[lbq->next_to_clean];
- curr_idx = rx_ring->lbq_curr_idx;
- clean_idx = rx_ring->lbq_clean_idx;
- while (curr_idx != clean_idx) {
- lbq_desc = &rx_ring->lbq[curr_idx];
-
- if (lbq_desc->p.pg_chunk.last_flag) {
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
+ if (lbq_desc->p.pg_chunk.offset == last_offset)
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
-
put_page(lbq_desc->p.pg_chunk.page);
- lbq_desc->p.pg_chunk.page = NULL;
-
- if (++curr_idx == rx_ring->lbq_len)
- curr_idx = 0;
+ lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
}
- if (rx_ring->pg_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
- put_page(rx_ring->pg_chunk.page);
- rx_ring->pg_chunk.page = NULL;
+
+ if (rx_ring->master_chunk.page) {
+ pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ put_page(rx_ring->master_chunk.page);
+ rx_ring->master_chunk.page = NULL;
}
}
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
- struct bq_desc *sbq_desc;
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- if (sbq_desc == NULL) {
+ for (i = 0; i < QLGE_BQ_LEN; i++) {
+ struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
+
+ if (!sbq_desc) {
netif_err(qdev, ifup, qdev->ndev,
"sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -2881,89 +2799,83 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
static void ql_free_rx_buffers(struct ql_adapter *qdev)
{
int i;
- struct rx_ring *rx_ring;
for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->lbq)
+ struct rx_ring *rx_ring = &qdev->rx_ring[i];
+
+ if (rx_ring->lbq.queue)
ql_free_lbq_buffers(qdev, rx_ring);
- if (rx_ring->sbq)
+ if (rx_ring->sbq.queue)
ql_free_sbq_buffers(qdev, rx_ring);
}
}
static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
{
- struct rx_ring *rx_ring;
int i;
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->type != TX_Q)
- ql_update_buffer_queues(qdev, rx_ring);
- }
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
+ HZ / 2);
}
-static void ql_init_lbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static int qlge_init_bq(struct qlge_bq *bq)
{
+ struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_bq_desc *bq_desc;
+ __le64 *buf_ptr;
int i;
- struct bq_desc *lbq_desc;
- __le64 *bq = rx_ring->lbq_base;
- memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->lbq_len; i++) {
- lbq_desc = &rx_ring->lbq[i];
- memset(lbq_desc, 0, sizeof(*lbq_desc));
- lbq_desc->index = i;
- lbq_desc->addr = bq;
- bq++;
+ bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ &bq->base_dma);
+ if (!bq->base) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "ring %u %s allocation failed.\n", rx_ring->cq_id,
+ bq_type_name[bq->type]);
+ return -ENOMEM;
}
-}
-static void ql_init_sbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *sbq_desc;
- __le64 *bq = rx_ring->sbq_base;
+ bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
+ GFP_KERNEL);
+ if (!bq->queue)
+ return -ENOMEM;
- memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- memset(sbq_desc, 0, sizeof(*sbq_desc));
- sbq_desc->index = i;
- sbq_desc->addr = bq;
- bq++;
+ buf_ptr = bq->base;
+ bq_desc = &bq->queue[0];
+ for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
+ bq_desc->p.skb = NULL;
+ bq_desc->index = i;
+ bq_desc->buf_ptr = buf_ptr;
}
+
+ return 0;
}
static void ql_free_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
/* Free the small buffer queue. */
- if (rx_ring->sbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->sbq_size,
- rx_ring->sbq_base, rx_ring->sbq_base_dma);
- rx_ring->sbq_base = NULL;
+ if (rx_ring->sbq.base) {
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ rx_ring->sbq.base, rx_ring->sbq.base_dma);
+ rx_ring->sbq.base = NULL;
}
/* Free the small buffer queue control blocks. */
- kfree(rx_ring->sbq);
- rx_ring->sbq = NULL;
+ kfree(rx_ring->sbq.queue);
+ rx_ring->sbq.queue = NULL;
/* Free the large buffer queue. */
- if (rx_ring->lbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->lbq_size,
- rx_ring->lbq_base, rx_ring->lbq_base_dma);
- rx_ring->lbq_base = NULL;
+ if (rx_ring->lbq.base) {
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ rx_ring->lbq.base, rx_ring->lbq.base_dma);
+ rx_ring->lbq.base = NULL;
}
/* Free the large buffer queue control blocks. */
- kfree(rx_ring->lbq);
- rx_ring->lbq = NULL;
+ kfree(rx_ring->lbq.queue);
+ rx_ring->lbq.queue = NULL;
/* Free the rx queue. */
if (rx_ring->cq_base) {
@@ -2987,67 +2899,18 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
&rx_ring->cq_base_dma);
- if (rx_ring->cq_base == NULL) {
+ if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
- if (rx_ring->sbq_len) {
- /*
- * Allocate small buffer queue.
- */
- rx_ring->sbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
- &rx_ring->sbq_base_dma);
-
- if (rx_ring->sbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue allocation failed.\n");
- goto err_mem;
- }
-
- /*
- * Allocate small buffer queue control blocks.
- */
- rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL)
- goto err_mem;
-
- ql_init_sbq_ring(qdev, rx_ring);
- }
-
- if (rx_ring->lbq_len) {
- /*
- * Allocate large buffer queue.
- */
- rx_ring->lbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
- &rx_ring->lbq_base_dma);
-
- if (rx_ring->lbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue allocation failed.\n");
- goto err_mem;
- }
- /*
- * Allocate large buffer queue control blocks.
- */
- rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL)
- goto err_mem;
-
- ql_init_lbq_ring(qdev, rx_ring);
+ if (rx_ring->cq_id < qdev->rss_ring_count &&
+ (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
+ ql_free_rx_resources(qdev, rx_ring);
+ return -ENOMEM;
}
return 0;
-
-err_mem:
- ql_free_rx_resources(qdev, rx_ring);
- return -ENOMEM;
}
static void ql_tx_ring_clean(struct ql_adapter *qdev)
@@ -3133,7 +2996,6 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
- u16 bq_len;
u64 tmp;
__le64 *base_indirect_ptr;
int page_entries;
@@ -3144,12 +3006,12 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
*rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64);
- rx_ring->lbq_base_indirect = shadow_reg;
- rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- rx_ring->sbq_base_indirect = shadow_reg;
- rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+ rx_ring->lbq.base_indirect = shadow_reg;
+ rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ rx_ring->sbq.base_indirect = shadow_reg;
+ rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
@@ -3160,16 +3022,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->valid_db_reg = doorbell_area + 0x04;
/* PCI doorbell mem area + 0x18 for large buffer consumer */
- rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
+ rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
/* PCI doorbell mem area + 0x1c */
- rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
+ rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
memset((void *)cqicb, 0, sizeof(struct cqicb));
cqicb->msix_vect = rx_ring->irq;
- bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
- cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+ cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
+ LEN_CPP_CONT);
cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
@@ -3181,59 +3043,42 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->flags = FLAGS_LC | /* Load queue base address */
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
- if (rx_ring->lbq_len) {
+ if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq_base_dma;
- base_indirect_ptr = rx_ring->lbq_base_indirect;
+ tmp = (u64)rx_ring->lbq.base_dma;
+ base_indirect_ptr = rx_ring->lbq.base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- cqicb->lbq_addr =
- cpu_to_le64(rx_ring->lbq_base_indirect_dma);
- bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
- (u16) rx_ring->lbq_buf_size;
- cqicb->lbq_buf_size = cpu_to_le16(bq_len);
- bq_len = (rx_ring->lbq_len == 65536) ? 0 :
- (u16) rx_ring->lbq_len;
- cqicb->lbq_len = cpu_to_le16(bq_len);
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_clean_idx = 0;
- rx_ring->lbq_free_cnt = rx_ring->lbq_len;
- }
- if (rx_ring->sbq_len) {
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
+ cqicb->lbq_buf_size =
+ cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
+ cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
+ rx_ring->lbq.next_to_use = 0;
+ rx_ring->lbq.next_to_clean = 0;
+
cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq_base_dma;
- base_indirect_ptr = rx_ring->sbq_base_indirect;
+ tmp = (u64)rx_ring->sbq.base_dma;
+ base_indirect_ptr = rx_ring->sbq.base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
cqicb->sbq_addr =
- cpu_to_le64(rx_ring->sbq_base_indirect_dma);
- cqicb->sbq_buf_size =
- cpu_to_le16((u16)(rx_ring->sbq_buf_size));
- bq_len = (rx_ring->sbq_len == 65536) ? 0 :
- (u16) rx_ring->sbq_len;
- cqicb->sbq_len = cpu_to_le16(bq_len);
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_clean_idx = 0;
- rx_ring->sbq_free_cnt = rx_ring->sbq_len;
- }
- switch (rx_ring->type) {
- case TX_Q:
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
- break;
- case RX_Q:
+ cpu_to_le64(rx_ring->sbq.base_indirect_dma);
+ cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
+ cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
+ rx_ring->sbq.next_to_use = 0;
+ rx_ring->sbq.next_to_clean = 0;
+ }
+ if (rx_ring->cq_id < qdev->rss_ring_count) {
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
@@ -3241,10 +3086,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
- break;
- default:
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Invalid rx_ring->type = %d.\n", rx_ring->type);
+ } else {
+ cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+ cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
}
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
@@ -3366,6 +3210,7 @@ msi:
}
}
qlge_irq_type = LEG_IRQ;
+ set_bit(QL_LEGACY_ENABLED, &qdev->flags);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Running with legacy interrupts.\n");
}
@@ -3509,6 +3354,16 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE;
+ if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
+ /* Experience shows that when using INTx interrupts,
+ * the device does not always auto-mask INTR_EN_EN.
+ * Moreover, masking INTR_EN_EN manually does not
+ * immediately prevent interrupt generation.
+ */
+ intr_context->intr_en_mask |= INTR_EN_EI << 16 |
+ INTR_EN_EI;
+ intr_context->intr_dis_mask |= INTR_EN_EI << 16;
+ }
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
/*
@@ -3557,7 +3412,6 @@ static int ql_request_irq(struct ql_adapter *qdev)
ql_resolve_queues_to_irqs(qdev);
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- atomic_set(&intr_context->irq_cnt, 0);
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
status = request_irq(qdev->msi_x_entry[i].vector,
intr_context->handler,
@@ -3591,12 +3445,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
goto err_irq;
netif_err(qdev, ifup, qdev->ndev,
- "Hooked intr %d, queue type %s, with name %s.\n",
- i,
- qdev->rx_ring[0].type == DEFAULT_Q ?
- "DEFAULT_Q" :
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ "Hooked intr 0, queue type RX_Q, with name %s.\n",
intr_context->name);
}
intr_context->hooked = 1;
@@ -4072,6 +3921,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ int i;
/* If we hit pci_channel_io_perm_failure
* failure condition, then we already
@@ -4089,21 +3939,31 @@ static int qlge_close(struct net_device *ndev)
*/
while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
msleep(1);
+
+ /* Make sure refill_work doesn't re-enable napi */
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
+
ql_adapter_down(qdev);
ql_release_adapter_resources(qdev);
return 0;
}
+static void qlge_set_lb_size(struct ql_adapter *qdev)
+{
+ if (qdev->ndev->mtu <= 1500)
+ qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
+ else
+ qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
+ qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
+}
+
static int ql_configure_rings(struct ql_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
- unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
-
- qdev->lbq_buf_order = get_order(lbq_buf_len);
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
@@ -4148,15 +4008,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->cq_len = qdev->rx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = NUM_LARGE_BUFFERS;
- rx_ring->lbq_size =
- rx_ring->lbq_len * sizeof(__le64);
- rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- rx_ring->sbq_len = NUM_SMALL_BUFFERS;
- rx_ring->sbq_size =
- rx_ring->sbq_len * sizeof(__le64);
- rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
- rx_ring->type = RX_Q;
+ rx_ring->lbq.type = QLGE_LB;
+ rx_ring->sbq.type = QLGE_SB;
+ INIT_DELAYED_WORK(&rx_ring->refill_work,
+ &qlge_slow_refill);
} else {
/*
* Outbound queue handles outbound completions only.
@@ -4165,13 +4020,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = 0;
- rx_ring->lbq_size = 0;
- rx_ring->lbq_buf_size = 0;
- rx_ring->sbq_len = 0;
- rx_ring->sbq_size = 0;
- rx_ring->sbq_buf_size = 0;
- rx_ring->type = TX_Q;
}
}
return 0;
@@ -4186,6 +4034,7 @@ static int qlge_open(struct net_device *ndev)
if (err)
return err;
+ qlge_set_lb_size(qdev);
err = ql_configure_rings(qdev);
if (err)
return err;
@@ -4207,9 +4056,7 @@ error_up:
static int ql_change_rx_buffers(struct ql_adapter *qdev)
{
- struct rx_ring *rx_ring;
- int i, status;
- u32 lbq_buf_len;
+ int status;
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
@@ -4232,16 +4079,7 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
if (status)
goto error;
- /* Get the new rx buffer size. */
- lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
- qdev->lbq_buf_order = get_order(lbq_buf_len);
-
- for (i = 0; i < qdev->rss_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- /* Set the new size. */
- rx_ring->lbq_buf_size = lbq_buf_len;
- }
+ qlge_set_lb_size(qdev);
status = ql_adapter_up(qdev);
if (status)
@@ -4642,13 +4480,12 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
- spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
if (qlge_mpi_coredump) {
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
- if (qdev->mpi_coredump == NULL) {
+ if (!qdev->mpi_coredump) {
err = -ENOMEM;
goto err_out2;
}
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 957c72985a06..9e422bbbb6ab 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -1257,7 +1257,6 @@ void ql_mpi_work(struct work_struct *work)
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
- ql_enable_completion_interrupt(qdev, 0);
}
void ql_mpi_reset_work(struct work_struct *work)
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 900424db9b97..eabf1093328e 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -796,7 +796,6 @@ static int gdma_dma_probe(struct platform_device *pdev)
struct gdma_dma_dev *dma_dev;
struct dma_device *dd;
unsigned int i;
- struct resource *res;
int ret;
int irq;
void __iomem *base;
@@ -818,8 +817,7 @@ static int gdma_dma_probe(struct platform_device *pdev)
return -EINVAL;
dma_dev->data = data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
dma_dev->base = base;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 51a5b71f8c25..88e42cc1d837 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -440,15 +440,9 @@ static void update_bmc_sta(struct adapter *padapter)
tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i] & 0x7f);
}
- if (pcur_network->Configuration.DSConfig > 14) {
- /* force to A mode. 5G doesn't support CCK rates */
- network_type = WIRELESS_11A;
- tx_ra_bitmap = 0x150; /* 6, 12, 24 Mbps */
- } else {
- /* force to b mode */
- network_type = WIRELESS_11B;
- tx_ra_bitmap = 0xf;
- }
+ /* force to b mode */
+ network_type = WIRELESS_11B;
+ tx_ra_bitmap = 0xf;
raid = networktype_to_raid(network_type);
init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
@@ -560,29 +554,24 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
static void update_hw_ht_param(struct adapter *padapter)
{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 max_ampdu_len;
+ u8 min_mpdu_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
DBG_88E("%s\n", __func__);
- /* handle A-MPDU parameter field */
- /*
- ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- ampdu_params_info [4:2]:Min MPDU Start Spacing
- */
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
-
- min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+ /* handle A-MPDU parameter field
+ * ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ * ampdu_params_info [4:2]:Min MPDU Start Spacing
+ */
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ min_mpdu_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, &min_mpdu_spacing);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, &max_ampdu_len);
- /* */
- /* Config SM Power Save setting */
- /* */
+ /* Config SM Power Save setting */
pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & 0x0C) >> 2;
if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 02c476f45b33..d9b0f9e6235c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -25,7 +25,7 @@ enum{
* When we want to enable write operation, we should change to pwr on state.
* When we stop write, we should switch to 500k mode and disable LDO 2.5V.
*/
-void efuse_power_switch(struct adapter *pAdapter, u8 write, u8 pwrstate)
+static void efuse_power_switch(struct adapter *pAdapter, u8 write, u8 pwrstate)
{
u8 tempval;
u16 tmpv16;
@@ -615,10 +615,9 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
{
u16 efuse_addr = *pAddr;
- u8 badworden = 0;
+ u8 badworden;
u32 PgWriteSuccess = 0;
- badworden = 0x0f;
badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data);
if (badworden == 0x0F) {
/* write ok */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 1ec3b237212e..e764436e120f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -2045,9 +2045,9 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
while (1) {
do_join_r = rtw_do_join(padapter);
- if (do_join_r == _SUCCESS) {
+ if (do_join_r == _SUCCESS)
break;
- }
+
DBG_88E("roaming do_join return %d\n", do_join_r);
pmlmepriv->to_roaming--;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 18dc9fc1c04a..e984b4605e91 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -507,7 +507,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
/* inerset & update wps_probe_resp_ie */
- if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
+ if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
uint wps_offset, remainder_ielen;
u8 *premainder_ie;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 7b16632048b7..03dc7e5fcc38 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -514,7 +514,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
else
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE);
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -621,7 +621,7 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
else
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE);
}
} else {
ret = -EINVAL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 91a30142c567..73f2cb5ebaa6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -83,7 +83,8 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
INIT_LIST_HEAD(&pstapriv->sta_hash[i]);
- list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
+ list_add_tail(&psta->list,
+ get_list_head(&pstapriv->free_sta_queue));
psta++;
}
@@ -134,31 +135,30 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
struct recv_reorder_ctrl *preorder_ctrl;
int index;
- if (pstapriv) {
- /* delete all reordering_ctrl_timer */
- spin_lock_bh(&pstapriv->sta_hash_lock);
- for (index = 0; index < NUM_STA; index++) {
- phead = &pstapriv->sta_hash[index];
- plist = phead->next;
+ if (!pstapriv)
+ return _SUCCESS;
- while (phead != plist) {
- int i;
+ /* delete all reordering_ctrl_timer */
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &pstapriv->sta_hash[index];
+ plist = phead->next;
- psta = container_of(plist, struct sta_info,
- hash_list);
- plist = plist->next;
+ while (phead != plist) {
+ int i;
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
- }
+ psta = container_of(plist, struct sta_info, hash_list);
+ plist = plist->next;
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
}
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- /*===============================*/
-
- vfree(pstapriv->pallocated_stainfo_buf);
}
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ vfree(pstapriv->pallocated_stainfo_buf);
return _SUCCESS;
}
@@ -167,7 +167,7 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
s32 index;
struct list_head *phash_list;
- struct sta_info *psta;
+ struct sta_info *psta;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
int i = 0;
@@ -180,65 +180,70 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
struct sta_info, list);
if (!psta) {
spin_unlock_bh(&pfree_sta_queue->lock);
- } else {
- list_del_init(&psta->list);
- spin_unlock_bh(&pfree_sta_queue->lock);
- _rtw_init_stainfo(psta);
- memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
- index = wifi_mac_hash(hwaddr);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("%s: index=%x", __func__, index));
- if (index >= NUM_STA) {
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("ERROR => %s: index >= NUM_STA", __func__));
- psta = NULL;
- goto exit;
- }
- phash_list = &pstapriv->sta_hash[index];
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- list_add_tail(&psta->hash_list, phash_list);
- pstapriv->asoc_sta_count++;
- spin_unlock_bh(&pstapriv->sta_hash_lock);
+ return NULL;
+ }
-/* Commented by Albert 2009/08/13 */
-/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
-/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
-/* So, we initialize the tid_rxseq variable as the 0xffff. */
+ list_del_init(&psta->list);
+ spin_unlock_bh(&pfree_sta_queue->lock);
+ _rtw_init_stainfo(psta);
+ memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
+ index = wifi_mac_hash(hwaddr);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("%s: index=%x", __func__, index));
+ if (index >= NUM_STA) {
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("ERROR => %s: index >= NUM_STA", __func__));
+ return NULL;
+ }
+ phash_list = &pstapriv->sta_hash[index];
- for (i = 0; i < 16; i++)
- memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ list_add_tail(&psta->hash_list, phash_list);
+ pstapriv->asoc_sta_count++;
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
- ("alloc number_%d stainfo with hwaddr = %pM\n",
- pstapriv->asoc_sta_count, hwaddr));
+ /* Commented by Albert 2009/08/13
+ * For the SMC router, the sequence number of first packet of
+ * WPS handshake will be 0. In this case, this packet will be
+ * dropped by recv_decache function if we use the 0x00 as the
+ * default value for tid_rxseq variable. So, we initialize the
+ * tid_rxseq variable as the 0xffff.
+ */
- init_addba_retry_timer(pstapriv->padapter, psta);
+ for (i = 0; i < 16; i++)
+ memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i],
+ &wRxSeqInitialValue, 2);
- /* for A-MPDU Rx reordering buffer control */
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("alloc number_%d stainfo with hwaddr = %pM\n",
+ pstapriv->asoc_sta_count, hwaddr));
- preorder_ctrl->padapter = pstapriv->padapter;
+ init_addba_retry_timer(pstapriv->padapter, psta);
- preorder_ctrl->enable = false;
+ /* for A-MPDU Rx reordering buffer control */
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- preorder_ctrl->wsize_b = 64;/* 64; */
+ preorder_ctrl->padapter = pstapriv->padapter;
- _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+ preorder_ctrl->enable = false;
- rtw_init_recv_timer(preorder_ctrl);
- }
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* 64; */
- /* init for DM */
- psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
- psta->rssi_stat.UndecoratedSmoothedCCK = -1;
+ _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
- /* init for the sequence number of received management frame */
- psta->RxMgmtFrameSeqNum = 0xffff;
+ rtw_init_recv_timer(preorder_ctrl);
}
-exit:
+ /* init for DM */
+ psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
+ psta->rssi_stat.UndecoratedSmoothedCCK = -1;
+
+ /* init for the sequence number of received management frame */
+ psta->RxMgmtFrameSeqNum = 0xffff;
+
return psta;
}
@@ -296,7 +301,9 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
del_timer_sync(&psta->addba_retry_timer);
- /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
+ /* for A-MPDU Rx reordering buffer control, cancel
+ * reordering_ctrl_timer
+ */
for (i = 0; i < 16; i++) {
struct list_head *phead, *plist;
struct recv_frame *prframe;
@@ -311,7 +318,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&ppending_recvframe_queue->lock);
- phead = get_list_head(ppending_recvframe_queue);
+ phead = get_list_head(ppending_recvframe_queue);
plist = phead->next;
while (!list_empty(phead)) {
@@ -444,23 +451,21 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
struct sta_info *psta;
- u32 res = _SUCCESS;
- unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
- psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
+ psta = rtw_alloc_stainfo(pstapriv, bc_addr);
if (!psta) {
- res = _FAIL;
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail"));
- goto exit;
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("rtw_alloc_stainfo fail"));
+ return _FAIL;
}
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
-exit:
- return res;
+ return _SUCCESS;
}
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
@@ -471,13 +476,13 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
return rtw_get_stainfo(pstapriv, bc_addr);
}
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
+bool rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
- u8 res = true;
+ bool res = true;
#ifdef CONFIG_88EU_AP_MODE
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
- u8 match = false;
+ bool match = false;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -499,9 +504,9 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_unlock_bh(&pacl_node_q->lock);
if (pacl_list->mode == 1)/* accept unless in deny list */
- res = (match) ? false : true;
+ res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match) ? true : false;
+ res = match;
else
res = true;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index c985b1468d41..af8a79ce8736 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -667,7 +667,7 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var
void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
{
unsigned int i;
- u8 max_AMPDU_len, min_MPDU_spacing;
+ u8 max_ampdu_len, min_mpdu_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -689,16 +689,16 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
} else {
/* modify from fw by Thomas 2010/11/17 */
if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x3) > (pIE->data[i] & 0x3))
- max_AMPDU_len = pIE->data[i] & 0x3;
+ max_ampdu_len = pIE->data[i] & 0x3;
else
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x3;
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x3;
if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) > (pIE->data[i] & 0x1c))
- min_MPDU_spacing = pmlmeinfo->HT_caps.ampdu_params_info & 0x1c;
+ min_mpdu_spacing = pmlmeinfo->HT_caps.ampdu_params_info & 0x1c;
else
- min_MPDU_spacing = pIE->data[i] & 0x1c;
+ min_mpdu_spacing = pIE->data[i] & 0x1c;
- pmlmeinfo->HT_caps.ampdu_params_info = max_AMPDU_len | min_MPDU_spacing;
+ pmlmeinfo->HT_caps.ampdu_params_info = max_ampdu_len | min_mpdu_spacing;
}
}
@@ -729,8 +729,8 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
void HTOnAssocRsp(struct adapter *padapter)
{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
+ u8 max_ampdu_len;
+ u8 min_mpdu_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -748,13 +748,11 @@ void HTOnAssocRsp(struct adapter *padapter)
* AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
* AMPDU_para [4:2]:Min MPDU Start Spacing
*/
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ min_mpdu_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
- min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, &min_mpdu_spacing);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, &max_ampdu_len);
}
void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 952f2ab51347..c37591657bac 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -776,7 +776,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -784,7 +784,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 47352f210c0b..7646167a0b36 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -47,8 +47,6 @@ static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
******************************************/
static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
{
- u8 bcmd_down = false;
- s32 retry_cnts = 100;
u8 h2c_box_num;
u32 msgbox_addr;
u32 msgbox_ex_addr;
@@ -71,39 +69,34 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
goto exit;
/* pay attention to if race condition happened in H2C cmd setting. */
- do {
- h2c_box_num = adapt->HalData->LastHMEBoxNum;
-
- if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
- DBG_88E(" fw read cmd failed...\n");
- goto exit;
- }
-
- *(u8 *)(&h2c_cmd) = ElementID;
-
- if (CmdLen <= 3) {
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
- } else {
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, 3);
- ext_cmd_len = CmdLen-3;
- memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer+3, ext_cmd_len);
+ h2c_box_num = adapt->HalData->LastHMEBoxNum;
- /* Write Ext command */
- msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++)
- usb_write8(adapt, msgbox_ex_addr+cmd_idx, *((u8 *)(&h2c_cmd_ex)+cmd_idx));
- }
- /* Write command */
- msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++)
- usb_write8(adapt, msgbox_addr+cmd_idx, *((u8 *)(&h2c_cmd)+cmd_idx));
+ if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
+ DBG_88E(" fw read cmd failed...\n");
+ goto exit;
+ }
- bcmd_down = true;
+ *(u8 *)(&h2c_cmd) = ElementID;
- adapt->HalData->LastHMEBoxNum =
- (h2c_box_num+1) % RTL88E_MAX_H2C_BOX_NUMS;
+ if (CmdLen <= 3) {
+ memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, CmdLen);
+ } else {
+ memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, 3);
+ ext_cmd_len = CmdLen - 3;
+ memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer + 3, ext_cmd_len);
+
+ /* Write Ext command */
+ msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++)
+ usb_write8(adapt, msgbox_ex_addr + cmd_idx, *((u8 *)(&h2c_cmd_ex) + cmd_idx));
+ }
+ /* Write command */
+ msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++)
+ usb_write8(adapt, msgbox_addr + cmd_idx, *((u8 *)(&h2c_cmd) + cmd_idx));
- } while ((!bcmd_down) && (retry_cnts--));
+ adapt->HalData->LastHMEBoxNum =
+ (h2c_box_num + 1) % RTL88E_MAX_H2C_BOX_NUMS;
ret = _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 086f98d38cba..57ae0e83dd3e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -552,7 +552,6 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
pHalData->AntDivCfg = 1; /* 0xC1[3] is ignored. */
} else {
pHalData->AntDivCfg = 0;
- pHalData->TRxAntDivType = pHalData->TRxAntDivType; /* The value in the driver setting of device manager. */
}
DBG_88E("EEPROM : AntDivCfg = %x, TRxAntDivType = %x\n", pHalData->AntDivCfg, pHalData->TRxAntDivType);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index c0d51ba70a75..1cf8cff9a2a4 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -22,8 +22,7 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
int i, res = _SUCCESS;
struct recv_buf *precvbuf;
- tasklet_init(&precvpriv->recv_tasklet,
- (void(*)(unsigned long))rtl8188eu_recv_tasklet,
+ tasklet_init(&precvpriv->recv_tasklet, rtl8188eu_recv_tasklet,
(unsigned long)padapter);
/* init recv_buf */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index ab94ad9d608a..2808f2b119bf 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -17,8 +17,7 @@ s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- tasklet_init(&pxmitpriv->xmit_tasklet,
- (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
+ tasklet_init(&pxmitpriv->xmit_tasklet, rtl8188eu_xmit_tasklet,
(unsigned long)adapt);
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index c2c7ef974dc5..23251ffa8404 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -43,7 +43,7 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_recv_tasklet(void *priv);
+void rtl8188eu_recv_tasklet(unsigned long priv);
void rtl8188e_process_phy_info(struct adapter *padapter,
struct recv_frame *prframe);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 421e9f45306f..c6c2ad20d9cf 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -148,7 +148,7 @@ void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
#define hal_xmit_handler rtl8188eu_xmit_buf_handler
-void rtl8188eu_xmit_tasklet(void *priv);
+void rtl8188eu_xmit_tasklet(unsigned long priv);
bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 3ec53761e9fd..7a9c8ff0daa9 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -82,7 +82,6 @@ u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data);
void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset,
u16 _size_byte, u8 *pbuf);
-void efuse_power_switch(struct adapter *adapt, u8 write, u8 pwrstate);
int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data);
bool Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data);
void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index dc685a14aeb8..6165adafc451 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -354,6 +354,6 @@ void rtw_free_all_stainfo(struct adapter *adapt);
struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+bool rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index ec5835d1aa8c..710c33fd4965 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -148,17 +148,10 @@ static char *translate_scan(struct adapter *padapter,
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
} else {
- if (pnetwork->network.Configuration.DSConfig > 14) {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
- } else {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
- }
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
}
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
@@ -650,17 +643,10 @@ static int rtw_wx_get_name(struct net_device *dev,
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
} else {
- if (pcur_bss->Configuration.DSConfig > 14) {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a");
- } else {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
- }
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
}
} else {
snprintf(wrqu->name, IFNAMSIZ, "unassociated");
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index aaab0d577453..3cd6da1f843d 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -773,10 +773,10 @@ void usb_write_port_cancel(struct adapter *padapter)
}
}
-void rtl8188eu_recv_tasklet(void *priv)
+void rtl8188eu_recv_tasklet(unsigned long priv)
{
struct sk_buff *pskb;
- struct adapter *adapt = priv;
+ struct adapter *adapt = (struct adapter *)priv;
struct recv_priv *precvpriv = &adapt->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
@@ -792,9 +792,9 @@ void rtl8188eu_recv_tasklet(void *priv)
}
}
-void rtl8188eu_xmit_tasklet(void *priv)
+void rtl8188eu_xmit_tasklet(unsigned long priv)
{
- struct adapter *adapt = priv;
+ struct adapter *adapt = (struct adapter *)priv;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index ef92ce957466..980b850d729a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1686,11 +1686,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
static u32 last_beacon_adc_pwdb;
struct rtllib_hdr_3addr *hdr;
u16 sc;
- unsigned int frag, seq;
+ unsigned int seq;
hdr = (struct rtllib_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
- frag = WLAN_GET_SEQ_FRAG(sc);
seq = WLAN_GET_SEQ_SEQ(sc);
curr_st->Seq_Num = seq;
if (!prev_st->bIsAMPDU)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index f932cb15e4e5..dace81a7d1ba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -715,8 +715,8 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
if ((wireless_mode == WIRELESS_MODE_N_24G) ||
(wireless_mode == WIRELESS_MODE_N_5G)) {
priv->rtllib->pHTInfo->bEnableHT = 1;
- RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
- __func__, wireless_mode);
+ RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
+ __func__, wireless_mode);
} else {
priv->rtllib->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 0\n",
@@ -1616,14 +1616,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb_push(skb, priv->rtllib->tx_headroom);
ret = _rtl92e_tx(dev, skb);
- if (ret != 0)
- kfree_skb(skb);
if (queue_index != MGNT_QUEUE) {
priv->rtllib->stats.tx_bytes += (skb->len -
priv->rtllib->tx_headroom);
priv->rtllib->stats.tx_packets++;
}
+
+ if (ret != 0)
+ kfree_skb(skb);
}
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index f2f7529e7c80..6e2f620afd14 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2044,8 +2044,9 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
}
-static inline void rtllib_sta_ps(struct rtllib_device *ieee)
+static inline void rtllib_sta_ps(unsigned long data)
{
+ struct rtllib_device *ieee = (struct rtllib_device *)data;
u64 time;
short sleep;
unsigned long flags, flags2;
@@ -3027,9 +3028,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) rtllib_sta_ps,
- (unsigned long)ieee);
+ tasklet_init(&ieee->ps_task, rtllib_sta_ps, (unsigned long)ieee);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 5c33bcb0db2e..00fea127bdc3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1620,7 +1620,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
#endif
@@ -1647,7 +1647,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates_ex[i]);
#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 33a6af7aad22..90692db81b71 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1683,8 +1683,9 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 1;
}
-static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
+static inline void ieee80211_sta_ps(unsigned long data)
{
+ struct ieee80211_device *ieee = (struct ieee80211_device *)data;
u32 th, tl;
short sleep;
@@ -2331,7 +2332,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
/* ensure no-one start an associating process (thus setting
* the ieee->state to ieee80211_ASSOCIATING) while we
- * have just cheked it and we are going to enable scan.
+ * have just checked it and we are going to enable scan.
* The ieee80211_new_net function is always called with
* lock held (from both ieee80211_softmac_check_all_nets and
* the rx path), so we cannot be in the middle of such function
@@ -2593,9 +2594,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) ieee80211_sta_ps,
- (unsigned long)ieee);
+ tasklet_init(&ieee->ps_task, ieee80211_sta_ps, (unsigned long)ieee);
}
void ieee80211_softmac_free(struct ieee80211_device *ieee)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 9dd5c04181ea..33c596f9ec96 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -109,7 +109,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
/* Add basic and extended rates */
max_rate = 0;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
for (i = 0, j = 0; i < network->rates_len; ) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
@@ -119,12 +119,12 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
rate = network->rates[i++] & 0x7F;
if (rate > max_rate)
max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
if (rate > max_rate)
max_rate = rate;
@@ -214,7 +214,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
* for given network. */
iwe.cmd = IWEVCUSTOM;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index b169460b9f26..63e0f7b1b852 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -43,8 +43,8 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
if (priv->card_8192_version == VERSION_819XU_A ||
- priv->card_8192_version ==
- VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
+ priv->card_8192_version == VERSION_819XU_B) {
+ /* 8256 D-cut, E-cut, xiong: consider it later! */
rtl8192_phy_SetRFReg(dev,
(enum rf90_radio_path_e)eRFPath,
0x0b, bMask12Bits, 0x100); /* phy para:1ba */
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 2821411878ce..7e2cabd16e88 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -98,8 +98,6 @@ static char *ifname = "wlan%d";
static int hwwep = 1; /* default use hw. set 0 to use software security */
static int channels = 0x3fff;
-
-
module_param(ifname, charp, 0644);
module_param(hwwep, int, 0644);
module_param(channels, int, 0644);
@@ -112,7 +110,6 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void rtl8192_usb_disconnect(struct usb_interface *intf);
-
static struct usb_driver rtl8192_usb_driver = {
.name = RTL819XU_MODULE_NAME, /* Driver name */
.id_table = rtl8192_usb_id_tbl, /* PCI_ID table */
@@ -122,7 +119,6 @@ static struct usb_driver rtl8192_usb_driver = {
.resume = NULL, /* PM resume fn */
};
-
struct CHANNEL_LIST {
u8 Channel[32];
u8 Len;
@@ -207,9 +203,6 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
}
}
-
-
-
static void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
@@ -297,7 +290,6 @@ int write_nic_byte(struct net_device *dev, int indx, u8 data)
return 0;
}
-
int write_nic_word(struct net_device *dev, int indx, u16 data)
{
int status;
@@ -324,7 +316,6 @@ int write_nic_word(struct net_device *dev, int indx, u16 data)
return 0;
}
-
int write_nic_dword(struct net_device *dev, int indx, u32 data)
{
int status;
@@ -343,7 +334,6 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
usbdata, 4, HZ / 2);
kfree(usbdata);
-
if (status < 0) {
netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
return status;
@@ -352,8 +342,6 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
return 0;
}
-
-
int read_nic_byte(struct net_device *dev, int indx, u8 *data)
{
int status;
@@ -379,8 +367,6 @@ int read_nic_byte(struct net_device *dev, int indx, u8 *data)
return 0;
}
-
-
int read_nic_word(struct net_device *dev, int indx, u16 *data)
{
int status;
@@ -628,13 +614,13 @@ static void rtl8192_proc_init_one(struct net_device *dev)
return;
proc_create_single("stats-rx", S_IFREG | S_IRUGO, dir,
- proc_get_stats_rx);
+ proc_get_stats_rx);
proc_create_single("stats-tx", S_IFREG | S_IRUGO, dir,
- proc_get_stats_tx);
+ proc_get_stats_tx);
proc_create_single("stats-ap", S_IFREG | S_IRUGO, dir,
- proc_get_stats_ap);
+ proc_get_stats_ap);
proc_create_single("registers", S_IFREG | S_IRUGO, dir,
- proc_get_registers);
+ proc_get_registers);
}
static void rtl8192_proc_remove_one(struct net_device *dev)
@@ -788,7 +774,6 @@ void rtl8192_set_rxconf(struct net_device *dev)
rxconf = rxconf | RCR_CBSSID;
}
-
if (priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
rxconf = rxconf | RCR_AICV;
rxconf = rxconf | RCR_APWRMGT;
@@ -797,7 +782,6 @@ void rtl8192_set_rxconf(struct net_device *dev)
if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
rxconf = rxconf | RCR_ACRC32;
-
rxconf = rxconf & ~RX_FIFO_THRESHOLD_MASK;
rxconf = rxconf | (RX_FIFO_THRESHOLD_NONE << RX_FIFO_THRESHOLD_SHIFT);
rxconf = rxconf & ~MAX_RX_DMA_MASK;
@@ -901,13 +885,11 @@ static u32 rtl819xusb_rx_command_packet(struct net_device *dev,
return status;
}
-
static void rtl8192_data_hard_stop(struct net_device *dev)
{
/* FIXME !! */
}
-
static void rtl8192_data_hard_resume(struct net_device *dev)
{
/* FIXME !! */
@@ -951,7 +933,6 @@ static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
-
spin_lock_irqsave(&priv->tx_lock, flags);
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
@@ -1123,7 +1104,6 @@ static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
}
}
-
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
@@ -1188,7 +1168,6 @@ static void rtl8192_net_update(struct net_device *dev)
*/
void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
{
-
}
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
@@ -1232,6 +1211,8 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
return 0;
DMESGE("Error TX CMD URB, error %d", status);
+ dev_kfree_skb(skb);
+ usb_free_urb(tx_urb);
return -1;
}
@@ -1389,7 +1370,6 @@ static u8 MRateToHwRate8190Pci(u8 rate)
return ret;
}
-
static u8 QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
{
u8 tmp_Short;
@@ -1422,7 +1402,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
(struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
struct usb_device *udev = priv->udev;
int pend;
- int status;
+ int status, rt = -1;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
unsigned int idx_pipe;
@@ -1566,8 +1546,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
if (bSend0Byte) {
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb_zero)
- return -ENOMEM;
+ if (!tx_urb_zero) {
+ rt = -ENOMEM;
+ goto error;
+ }
usb_fill_bulk_urb(tx_urb_zero, udev,
usb_sndbulkpipe(udev, idx_pipe),
&zero, 0, tx_zero_isr, dev);
@@ -1577,7 +1559,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
"Error TX URB for zero byte %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+ goto error;
}
}
netif_trans_update(dev);
@@ -1588,7 +1570,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+
+error:
+ dev_kfree_skb_any(skb);
+ usb_free_urb(tx_urb);
+ usb_free_urb(tx_urb_zero);
+ return rt;
}
static short rtl8192_usb_initendpoints(struct net_device *dev)
@@ -1742,7 +1729,6 @@ static const struct ieee80211_qos_parameters def_qos_parameters = {
{0, 0, 0, 0} /* tx_op_limit */
};
-
static void rtl8192_update_beacon(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv,
@@ -1913,15 +1899,12 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
if (set_qos_param == 1)
schedule_work(&priv->qos_activate);
-
return 0;
}
-
-static int rtl8192_handle_assoc_response(
- struct net_device *dev,
- struct ieee80211_assoc_response_frame *resp,
- struct ieee80211_network *network)
+static int rtl8192_handle_assoc_response(struct net_device *dev,
+ struct ieee80211_assoc_response_frame *resp,
+ struct ieee80211_network *network)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1929,7 +1912,6 @@ static int rtl8192_handle_assoc_response(
return 0;
}
-
static void rtl8192_update_ratr_table(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2211,14 +2193,13 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
static void rtl819x_watchdog_wqcallback(struct work_struct *work);
-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
+static void rtl8192_irq_rx_tasklet(unsigned long data);
/* init tasklet and wait_queue here. only 2.6 above kernel is considered */
#define DRV_NAME "wlan0"
static void rtl8192_init_priv_task(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-
INIT_WORK(&priv->reset_wq, rtl8192_restart);
INIT_DELAYED_WORK(&priv->watch_dog_wq,
@@ -2233,8 +2214,7 @@ static void rtl8192_init_priv_task(struct net_device *dev)
InitialGainOperateWorkItemCallBack);
INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
- tasklet_init(&priv->irq_rx_tasklet,
- (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
+ tasklet_init(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet,
(unsigned long)priv);
}
@@ -2515,7 +2495,6 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
break;
}
-
if (priv->rf_type == RF_1T2R)
RT_TRACE(COMP_EPROM, "\n1T2R config\n");
else
@@ -2668,7 +2647,6 @@ static void rtl8192_hwconfig(struct net_device *dev)
/* Set Auto Rate fallback control */
}
-
/* InitializeAdapter and PhyCfg */
static bool rtl8192_adapter_start(struct net_device *dev)
{
@@ -2803,14 +2781,12 @@ static bool rtl8192_adapter_start(struct net_device *dev)
RT_TRACE(COMP_INIT, "%s():after phy RF config\n", __func__);
}
-
if (priv->ieee80211->FwRWRF)
/* We can force firmware to do RF-R/W */
priv->Rf_Mode = RF_OP_By_FW;
else
priv->Rf_Mode = RF_OP_By_SW_3wire;
-
rtl8192_phy_updateInitGain(dev);
/*--set CCK and OFDM Block "ON"--*/
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
@@ -2865,7 +2841,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
}
write_nic_byte(dev, 0x87, 0x0);
-
return init_status;
}
@@ -2996,7 +2971,6 @@ static RESET_TYPE RxCheckStuck(struct net_device *dev)
return RESET_TYPE_NORESET;
}
-
/**
* This function is called by Checkforhang to check whether we should
* ask OS to reset driver
@@ -3052,8 +3026,6 @@ static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
static int _rtl8192_up(struct net_device *dev);
static int rtl8192_close(struct net_device *dev);
-
-
static void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
@@ -3070,7 +3042,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
RT_TRACE(COMP_SEC, "%s:\n", __func__);
-
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
for (EntryId = 0; EntryId < 4; EntryId++) {
@@ -3096,8 +3067,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
MacAddr, 0, NULL);
}
-
-
if (priv->ieee80211->group_key_type == KEY_TYPE_TKIP) {
MacAddr = CAM_CONST_BROAD;
for (EntryId = 1; EntryId < 4; EntryId++) {
@@ -3134,7 +3103,6 @@ static void rtl819x_ifsilentreset(struct net_device *dev)
int reset_status = 0;
struct ieee80211_device *ieee = priv->ieee80211;
-
/* If we need to check CCK stop, please uncomment this line. */
/* bStuck = Adapter->HalFunc.CheckHWStopHandler(Adapter); */
@@ -3258,7 +3226,6 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
}
}
-
static void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -3369,7 +3336,6 @@ static int _rtl8192_up(struct net_device *dev)
return 0;
}
-
static int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3381,7 +3347,6 @@ static int rtl8192_open(struct net_device *dev)
return ret;
}
-
int rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3392,7 +3357,6 @@ int rtl8192_up(struct net_device *dev)
return _rtl8192_up(dev);
}
-
static int rtl8192_close(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3440,7 +3404,6 @@ int rtl8192_down(struct net_device *dev)
deinit_hal_dm(dev);
del_timer_sync(&priv->watch_dog_timer);
-
ieee80211_softmac_stop_protocol(priv->ieee80211);
memset(&priv->ieee80211->current_network, 0,
offsetof(struct ieee80211_network, list));
@@ -3449,7 +3412,6 @@ int rtl8192_down(struct net_device *dev)
return 0;
}
-
void rtl8192_commit(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3495,7 +3457,6 @@ static void r8192_set_multicast(struct net_device *dev)
priv->promisc = promisc;
}
-
static int r8192_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3525,7 +3486,6 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
mutex_lock(&priv->wx_mutex);
-
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
ret = -EINVAL;
goto out;
@@ -3778,7 +3738,6 @@ static long rtl819x_translate_todbm(u8 signal_strength_index)
return signal_power;
}
-
/* We can not declare RSSI/EVM total value of sliding window to
* be a local static. Otherwise, it may increase when we return from S3/S4. The
* value will be kept in memory or disk. Declare the value in the adaptor
@@ -3841,7 +3800,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (!bcheck)
return;
-
/* only rtl8190 supported
* rtl8190_process_cck_rxpathsel(priv,pprevious_stats);
*/
@@ -3851,17 +3809,15 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
/* record the general signal strength to the sliding window. */
-
/* <2> Showed on UI for engineering
* hardware does not provide rssi information for each rf path in CCK
*/
if (!pprevious_stats->bIsCCK &&
(pprevious_stats->bPacketToSelf || pprevious_stats->bToSelfBA)) {
for (rfpath = RF90_PATH_A; rfpath < priv->NumTotalRFPath; rfpath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(
- priv->ieee80211->dev, rfpath))
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev,
+ rfpath))
continue;
-
if (priv->stats.rx_rssi_percentage[rfpath] == 0)
priv->stats.rx_rssi_percentage[rfpath] =
pprevious_stats->RxMIMOSignalStrength[rfpath];
@@ -3881,7 +3837,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
}
}
-
/* Check PWDB. */
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
pprevious_stats->bIsCCK ? "CCK" : "OFDM",
@@ -3908,7 +3863,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
pprevious_stats->bIsCCK ? "CCK" : "OFDM",
pprevious_stats->RxPWDBAll);
-
if (pprevious_stats->bPacketToSelf ||
pprevious_stats->bPacketBeacon ||
pprevious_stats->bToSelfBA) {
@@ -4083,7 +4037,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
u8 rf_rx_num = 0;
u8 sq;
-
priv->stats.numqry_phystatus++;
is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
@@ -4192,8 +4145,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
else
continue;
- if (!rtl8192_phy_CheckIsLegalRFPath(
- priv->ieee80211->dev, i))
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, i))
continue;
rx_pwr[i] =
@@ -4214,7 +4166,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
precord_stats->RxMIMOSignalStrength[i] = (u8)RSSI;
}
-
/* (2)PWDB, Average PWDB calculated by hardware
* (for rate adaptive)
*/
@@ -4259,7 +4210,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
evm & 0xff;
}
-
/* record rx statistics for debug */
rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
prxsc = (struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
@@ -4288,16 +4238,14 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
}
} /* QueryRxPhyStatus8190Pci */
-static void rtl8192_record_rxdesc_forlateruse(
- struct ieee80211_rx_stats *psrc_stats,
- struct ieee80211_rx_stats *ptarget_stats)
+static void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
+ struct ieee80211_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
ptarget_stats->Seq_Num = psrc_stats->Seq_Num;
}
-
static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
struct ieee80211_rx_stats *pstats,
struct rx_drvinfo_819x_usb *pdrvinfo)
@@ -4341,8 +4289,6 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
bToSelfBA = true;
}
-
-
if (bpacket_match_bssid)
priv->stats.numpacket_matchbssid++;
if (bpacket_toself)
@@ -4383,7 +4329,6 @@ UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
/* 1: short preamble/GI, 0: long preamble/GI */
u32 preamble_guardinterval;
-
if (stats->bCRC)
rcvType = 2;
else if (stats->bICV)
@@ -4491,7 +4436,6 @@ UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
-
static void query_rxdesc_status(struct sk_buff *skb,
struct ieee80211_rx_stats *stats,
bool bIsRxAggrSubframe)
@@ -4526,8 +4470,7 @@ static void query_rxdesc_status(struct sk_buff *skb,
* Driver info are written to the RxBuffer following rx desc
*/
if (stats->RxDrvInfoSize != 0) {
- driver_info = (struct rx_drvinfo_819x_usb *)(
- skb->data
+ driver_info = (struct rx_drvinfo_819x_usb *)(skb->data
+ sizeof(struct rx_desc_819x_usb)
+ stats->RxBufShift
);
@@ -4556,7 +4499,6 @@ static void query_rxdesc_status(struct sk_buff *skb,
stats->bShortPreamble = driver_info->SPLCP;
-
UpdateReceivedRateHistogramStatistics8190(dev, stats);
stats->bIsAMPDU = (driver_info->PartAggr == 1);
@@ -4569,7 +4511,7 @@ static void query_rxdesc_status(struct sk_buff *skb,
/* Rx A-MPDU */
if (driver_info->FirstAGGR == 1 || driver_info->PartAggr == 1)
RT_TRACE(COMP_RXDESC,
- "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
+ "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
driver_info->FirstAGGR, driver_info->PartAggr);
}
@@ -4636,9 +4578,8 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
}
}
-static void rtl819xusb_process_received_packet(
- struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+static void rtl819xusb_process_received_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -4671,8 +4612,6 @@ static void rtl819xusb_process_received_packet(
#ifdef SW_CRC_CHECK
SwCrcCheck();
#endif
-
-
}
static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
@@ -4691,7 +4630,6 @@ static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
stats->ntotalfrag = 1;
}
-
static void rtl8192_rx_cmd(struct sk_buff *skb)
{
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
@@ -4716,8 +4654,9 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
}
}
-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_rx_tasklet(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
struct sk_buff *skb;
struct rtl8192_rx_info *info;
@@ -4759,7 +4698,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_start_xmit = ieee80211_xmit,
};
-
/****************************************************************************
* ---------------------------- USB_STUFF---------------------------
*****************************************************************************/
@@ -4815,7 +4753,6 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
RT_TRACE(COMP_INIT, "dev name=======> %s\n", dev->name);
rtl8192_proc_init_one(dev);
-
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -4843,7 +4780,6 @@ static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
cancel_work_sync(&priv->qos_activate);
}
-
static void rtl8192_usb_disconnect(struct usb_interface *intf)
{
struct net_device *dev = usb_get_intfdata(intf);
@@ -4907,7 +4843,6 @@ static int __init rtl8192_usb_module_init(void)
return usb_register(&rtl8192_usb_driver);
}
-
static void __exit rtl8192_usb_module_exit(void)
{
usb_deregister(&rtl8192_usb_driver);
@@ -4949,7 +4884,6 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
write_nic_byte(dev, SECR, SECR_value);
}
-
void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
{
@@ -4970,7 +4904,6 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
else
usConfig |= BIT(15) | (KeyType << 2) | KeyIndex;
-
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
TargetCommand |= BIT(31) | BIT(16);
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index e064f43fd8b6..bc98cdaf61ec 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -169,19 +169,20 @@ static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
- /* 87B have to S/W beacon for DTM encryption_cmn. */
- if (priv->ieee80211->current_network.mode == IEEE_A ||
- priv->ieee80211->current_network.mode == IEEE_N_5G ||
- (priv->ieee80211->current_network.mode == IEEE_N_24G &&
- (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
- tx_rate = 60;
- DMESG("send beacon frame tx rate is 6Mbpm\n");
- } else {
- tx_rate = 10;
- DMESG("send beacon frame tx rate is 1Mbpm\n");
- }
- rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
+ /* 87B have to S/W beacon for DTM encryption_cmn. */
+ if (priv->ieee80211->current_network.mode == IEEE_A ||
+ priv->ieee80211->current_network.mode == IEEE_N_5G ||
+ (priv->ieee80211->current_network.mode == IEEE_N_24G &&
+ (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
+ tx_rate = 60;
+ DMESG("send beacon frame tx rate is 6Mbpm\n");
+ } else {
+ tx_rate = 10;
+ DMESG("send beacon frame tx rate is 1Mbpm\n");
+ }
+
+ rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
}
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index db99129d3169..5901026949f2 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -75,7 +75,7 @@ static void BlinkWorkItemCallback(struct work_struct *work);
* Initialize an LED_871x object.
*/
static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
- enum LED_PIN_871x LedPin)
+ enum LED_PIN_871x LedPin)
{
pLed->padapter = padapter;
pLed->LedPin = LedPin;
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 9901815604f4..00ea0beb12c9 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -33,7 +33,7 @@ static u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
-static void recv_tasklet(void *priv);
+static void recv_tasklet(unsigned long priv);
void r8712_init_recv_priv(struct recv_priv *precvpriv,
struct _adapter *padapter)
@@ -61,13 +61,12 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv,
precvbuf->ref_cnt = 0;
precvbuf->adapter = padapter;
list_add_tail(&precvbuf->list,
- &(precvpriv->free_recv_buf_queue.queue));
+ &(precvpriv->free_recv_buf_queue.queue));
precvbuf++;
}
precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
- tasklet_init(&precvpriv->recv_tasklet,
- (void(*)(unsigned long))recv_tasklet,
- (unsigned long)padapter);
+ tasklet_init(&precvpriv->recv_tasklet, recv_tasklet,
+ (unsigned long)padapter);
skb_queue_head_init(&precvpriv->rx_skb_queue);
skb_queue_head_init(&precvpriv->free_recv_skb_queue);
@@ -119,7 +118,7 @@ void r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
}
void r8712_free_recvframe(union recv_frame *precvframe,
- struct __queue *pfree_recv_queue)
+ struct __queue *pfree_recv_queue)
{
unsigned long irqL;
struct _adapter *padapter = precvframe->u.hdr.adapter;
@@ -140,7 +139,7 @@ void r8712_free_recvframe(union recv_frame *precvframe,
}
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
- struct recv_stat *prxstat)
+ struct recv_stat *prxstat)
{
u16 drvinfo_sz;
@@ -177,7 +176,7 @@ static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
/*perform defrag*/
static union recv_frame *recvframe_defrag(struct _adapter *adapter,
- struct __queue *defrag_q)
+ struct __queue *defrag_q)
{
struct list_head *plist, *phead;
u8 wlanhdr_offset;
@@ -289,7 +288,6 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
r8712_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
}
-
}
if ((ismfrag == 0) && (fragnum != 0)) {
/* the last fragment frame
@@ -379,26 +377,26 @@ static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
/* convert hdr + possible LLC headers into Ethernet header */
eth_type = (sub_skb->data[6] << 8) | sub_skb->data[7];
if (sub_skb->len >= 8 &&
- ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
- eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
+ ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
+ eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
+ !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType
*/
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
- ETH_ALEN);
+ ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
- ETH_ALEN);
+ ETH_ALEN);
} else {
__be16 len;
/* Leave Ethernet header part of hdr and full payload */
len = htons(sub_skb->len);
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
- ETH_ALEN);
+ ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
- ETH_ALEN);
+ ETH_ALEN);
}
/* Indicate the packets to upper layer */
if (sub_skb) {
@@ -438,7 +436,6 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
r8712_event_handle(padapter, (__le32 *)poffset);
poffset += (cmd_len + 8);/*8 bytes alignment*/
} while (le32_to_cpu(voffset) & BIT(31));
-
}
static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
@@ -472,7 +469,7 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
}
static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
struct list_head *phead, *plist;
union recv_frame *pnextrframe;
@@ -499,8 +496,8 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
}
int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
- struct recv_reorder_ctrl *preorder_ctrl,
- int bforced)
+ struct recv_reorder_ctrl *preorder_ctrl,
+ int bforced)
{
struct list_head *phead, *plist;
union recv_frame *prframe;
@@ -530,7 +527,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
plist = plist->next;
list_del_init(&(prframe->u.hdr.list));
if (SN_EQUAL(preorder_ctrl->indicate_seq,
- pattrib->seq_num))
+ pattrib->seq_num))
preorder_ctrl->indicate_seq =
(preorder_ctrl->indicate_seq + 1) % 4096;
/*indicate this recv_frame*/
@@ -555,7 +552,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
}
static int recv_indicatepkt_reorder(struct _adapter *padapter,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
unsigned long irql;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
@@ -624,7 +621,7 @@ void r8712_reordering_ctrl_timeout_handler(void *pcontext)
}
static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
int retval = _SUCCESS;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1080,10 +1077,10 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
} while ((transfer_len > 0) && pkt_cnt > 0);
}
-static void recv_tasklet(void *priv)
+static void recv_tasklet(unsigned long priv)
{
struct sk_buff *pskb;
- struct _adapter *padapter = priv;
+ struct _adapter *padapter = (struct _adapter *)priv;
struct recv_priv *precvpriv = &padapter->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 944336e0d2e2..363b82e3e7c6 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -142,9 +142,9 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "wpa_ie=");
for (i = 0; i < wpa_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ n += scnprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", wpa_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
+ if (n == MAX_WPA_IE_LEN-1)
break;
}
memset(iwe, 0, sizeof(*iwe));
@@ -162,9 +162,9 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "rsn_ie=");
for (i = 0; i < rsn_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ n += scnprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", rsn_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
+ if (n == MAX_WPA_IE_LEN-1)
break;
}
memset(iwe, 0, sizeof(*iwe));
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index aa8f8500cbb2..29b85330815f 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -231,8 +231,7 @@ end_of_mp_stop_test:
return _SUCCESS;
}
-uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -283,17 +282,15 @@ uint oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != SET_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
if (mp_stop_test(Adapter) == _FAIL)
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -328,8 +325,7 @@ uint oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_tx_power_control_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -347,71 +343,61 @@ uint oid_rt_pro_set_tx_power_control_hdl(
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_tx_packet_sent_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.tx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_rx_packet_received_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.rx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_rx_packet_crc32_error_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.rx_crcerrpktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -422,10 +408,8 @@ uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -435,13 +419,12 @@ uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv
Adapter->mppriv.rx_pktcount = 0;
Adapter->mppriv.rx_crcerrpktcount = 0;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -452,8 +435,7 @@ uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -468,8 +450,7 @@ uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -484,8 +465,7 @@ uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -497,8 +477,7 @@ uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -511,8 +490,7 @@ uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -525,8 +503,7 @@ uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -539,8 +516,7 @@ uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -553,8 +529,7 @@ uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_read_register_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -735,8 +710,7 @@ uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
}
/*----------------------------------------------------------------------*/
-uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -829,8 +803,7 @@ uint oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index cc5809e49e35..f0b85338b567 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -143,9 +143,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
INIT_WORK(&padapter->wk_filter_rx_ff0, r8712_SetFilter);
alloc_hwxmits(padapter);
init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- tasklet_init(&pxmitpriv->xmit_tasklet,
- (void(*)(unsigned long))r8712_xmit_bh,
- (unsigned long)padapter);
+ tasklet_init(&pxmitpriv->xmit_tasklet, r8712_xmit_bh,
+ (unsigned long)padapter);
return 0;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index b14da38bf652..f227828094bf 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -277,7 +277,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe);
int r8712_xmit_enqueue(struct _adapter *padapter,
struct xmit_frame *pxmitframe);
void r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
-void r8712_xmit_bh(void *priv);
+void r8712_xmit_bh(unsigned long priv);
void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
struct xmit_buf *pxmitbuf);
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 9d290bc2fdb7..0045da3bb69a 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -308,10 +308,10 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter)
}
}
-void r8712_xmit_bh(void *priv)
+void r8712_xmit_bh(unsigned long priv)
{
int ret = false;
- struct _adapter *padapter = priv;
+ struct _adapter *padapter = (struct _adapter *)priv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
if (padapter->driver_stopped ||
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 6d18d23acdc0..7117d16a30f9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -216,8 +216,9 @@ void expire_timeout_chk(struct adapter *padapter)
/* check auth_queue */
#ifdef DBG_EXPIRATION_CHK
if (phead != plist) {
- DBG_871X(FUNC_NDEV_FMT" auth_list, cnt:%u\n"
- , FUNC_NDEV_ARG(padapter->pnetdev), pstapriv->auth_list_cnt);
+ DBG_871X(FUNC_NDEV_FMT " auth_list, cnt:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev),
+ pstapriv->auth_list_cnt);
}
#endif
while (phead != plist) {
@@ -1446,14 +1447,14 @@ u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
- kfree((u8 *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1496,7 +1497,7 @@ static int rtw_ap_set_key(
}
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (psetkeyparm == NULL) {
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
res = _FAIL;
goto exit;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 8d93c2f26890..13a9b54b4561 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -372,13 +372,13 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
if ((pcmd->cmdcode != _JoinBss_CMD_) &&
(pcmd->cmdcode != _CreateBss_CMD_)) {
/* free parmbuf in cmd_obj */
- kfree((unsigned char *)pcmd->parmbuf);
+ kfree(pcmd->parmbuf);
}
if (pcmd->rsp != NULL) {
if (pcmd->rspsz != 0) {
/* free rsp in cmd_obj */
- kfree((unsigned char *)pcmd->rsp);
+ kfree(pcmd->rsp);
}
}
@@ -507,19 +507,9 @@ post_process:
cmd_process_time = jiffies_to_msecs(jiffies - cmd_start_time);
if (cmd_process_time > 1000) {
- if (pcmd->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
- DBG_871X(ADPT_FMT" cmd =%d process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- } else if (pcmd->cmdcode == GEN_CMD_CODE(_Set_MLME_EVT)) {
- DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- } else {
- DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- }
+ DBG_871X(ADPT_FMT "cmd= %d process_time= %lu > 1 sec\n",
+ ADPT_ARG(pcmd->padapter), pcmd->cmdcode,
+ cmd_process_time);
}
/* call callback function for post-processed */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 34adf5789c98..71fcb466019a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -19,7 +19,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)
int i;
u8 *pbuf;
struct wlan_network *pnetwork;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int res = _SUCCESS;
pmlmepriv->nic_hdl = (u8 *)padapter;
@@ -40,7 +40,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)
pbuf = vzalloc(array_size(MAX_BSS_CNT, sizeof(struct wlan_network)));
- if (pbuf == NULL) {
+ if (!pbuf) {
res = _FAIL;
goto exit;
}
@@ -112,9 +112,8 @@ void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
if (pmlmepriv) {
rtw_free_mlme_priv_ie_data(pmlmepriv);
- if (pmlmepriv->free_bss_buf) {
+ if (pmlmepriv->free_bss_buf)
vfree(pmlmepriv->free_bss_buf);
- }
}
}
@@ -185,10 +184,10 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
/* _irqL irqL; */
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
- if (pnetwork->fixed == true)
+ if (pnetwork->fixed)
return;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
@@ -209,7 +208,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
pmlmepriv->num_of_scanned--;
-
/* DBG_871X("_rtw_free_network:SSID =%s\n", pnetwork->network.Ssid.Ssid); */
spin_unlock_bh(&free_queue->lock);
@@ -220,10 +218,10 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
- if (pnetwork->fixed == true)
+ if (pnetwork->fixed)
return;
/* spin_lock_irqsave(&free_queue->lock, irqL); */
@@ -301,12 +299,8 @@ void rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
spin_unlock_bh(&scanned_queue->lock);
}
-
-
-
sint rtw_if_up(struct adapter *padapter)
{
-
sint res;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
@@ -318,7 +312,6 @@ sint rtw_if_up(struct adapter *padapter)
return res;
}
-
void rtw_generate_random_ibss(u8 *pibss)
{
unsigned long curtime = jiffies;
@@ -329,7 +322,6 @@ void rtw_generate_random_ibss(u8 *pibss)
pibss[3] = (u8)(curtime & 0xff) ;/* p[0]; */
pibss[4] = (u8)((curtime>>8) & 0xff) ;/* p[1]; */
pibss[5] = (u8)((curtime>>16) & 0xff) ;/* p[2]; */
- return;
}
u8 *rtw_get_capability_from_ie(u8 *ie)
@@ -337,7 +329,6 @@ u8 *rtw_get_capability_from_ie(u8 *ie)
return ie + 8 + 2;
}
-
u16 rtw_get_capability(struct wlan_bssid_ex *bss)
{
__le16 val;
@@ -425,7 +416,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 fea
memcpy((u8 *)&tmps, rtw_get_capability_from_ie(src->IEs), 2);
memcpy((u8 *)&tmpd, rtw_get_capability_from_ie(dst->IEs), 2);
-
s_cap = le16_to_cpu(tmps);
d_cap = le16_to_cpu(tmpd);
@@ -467,7 +457,6 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
{
struct list_head *plist, *phead;
-
struct wlan_network *pwlan = NULL;
struct wlan_network *oldest = NULL;
@@ -482,7 +471,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
- if (pwlan->fixed != true) {
+ if (!pwlan->fixed) {
if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
@@ -579,12 +568,8 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
}
}
-
/*
-
Caller must hold pmlmepriv->lock first.
-
-
*/
void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
{
@@ -625,7 +610,6 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
}
-
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information */
/* if (phead == plist) { */
@@ -634,7 +618,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
/* If there are no more slots, expire the oldest */
/* list_del_init(&oldest->list); */
pnetwork = oldest;
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
goto exit;
}
@@ -655,7 +639,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
goto exit;
}
@@ -739,7 +723,7 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
privacy = pnetwork->network.Privacy;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL)
+ if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
return true;
else
return false;
@@ -754,15 +738,13 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
if (psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPA2PSK) {
p = rtw_get_ie(pnetwork->network.IEs + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pnetwork->network.IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0) {
+ if (p && ie_len > 0)
bselected = true;
- } else {
+ else
bselected = false;
- }
}
}
-
if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
DBG_871X("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
bselected = false;
@@ -773,7 +755,6 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
bselected = false;
}
-
return bselected;
}
@@ -783,7 +764,6 @@ void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_event\n"));
}
-
void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
{
u32 len;
@@ -800,7 +780,6 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
return;
}
-
spin_lock_bh(&pmlmepriv->lock);
/* update IBSS_network 's timestamp */
@@ -823,21 +802,16 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
/* lock pmlmepriv->lock when you accessing network_q */
if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false) {
- if (pnetwork->Ssid.Ssid[0] == 0) {
+ if (pnetwork->Ssid.Ssid[0] == 0)
pnetwork->Ssid.SsidLength = 0;
- }
rtw_add_network(adapter, pnetwork);
}
exit:
spin_unlock_bh(&pmlmepriv->lock);
-
- return;
}
-
-
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
u8 timer_cancelled = false;
@@ -868,12 +842,11 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
if (timer_cancelled)
_cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled);
-
spin_lock_bh(&pmlmepriv->lock);
rtw_set_signal_stat_timer(&adapter->recvpriv);
- if (pmlmepriv->to_join == true) {
+ if (pmlmepriv->to_join) {
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
@@ -896,9 +869,8 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
- if (rtw_createbss_cmd(adapter) != _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd status FAIL\n"));
- }
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd status FAIL\n"));
pmlmepriv->to_join = false;
}
@@ -1009,7 +981,6 @@ static void find_network(struct adapter *adapter)
else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources : pwlan == NULL\n\n"));
-
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) &&
(adapter->stapriv.asoc_sta_count == 1))
rtw_free_network_nolock(adapter, pwlan);
@@ -1169,9 +1140,8 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
- if (psta == NULL) {
+ if (!psta)
psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
- }
if (psta) { /* update ptarget_sta */
@@ -1189,7 +1159,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
psta->wireless_mode = pmlmeext->cur_wireless_mode;
psta->raid = networktype_to_raid_ex(padapter, psta);
-
/* sta mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
@@ -1221,7 +1190,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
padapter->securitypriv.wps_ie_len = 0;
}
-
/* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
/* if A-MPDU Rx is enabled, resetting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
/* todo: check if AP can send A-MPDU packets */
@@ -1238,7 +1206,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
preorder_ctrl->wsize_b = 64;/* max_ampdu_sz;ex. 32(kbytes) -> wsize_b =32 */
}
-
bmc_sta = rtw_get_bcmc_stainfo(padapter);
if (bmc_sta) {
for (i = 0; i < 16 ; i++) {
@@ -1272,7 +1239,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nfw_state:%x, BSSID:"MAC_FMT"\n"
, get_fwstate(pmlmepriv), MAC_ARG(pnetwork->network.MacAddress)));
-
/* why not use ptarget_wlan?? */
memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
/* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
@@ -1281,7 +1247,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
cur_network->aid = pnetwork->join_res;
-
rtw_set_signal_stat_timer(&padapter->recvpriv);
padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
@@ -1349,12 +1314,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
-
- if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
- } else {
+ else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
- }
the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
@@ -1377,7 +1340,7 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
/* s1. find ptarget_wlan */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (the_same_macaddr == true) {
+ if (the_same_macaddr) {
ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
} else {
pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
@@ -1412,11 +1375,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
goto ignore_joinbss_callback;
}
-
/* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
- if (ptarget_sta == NULL) {
+ if (!ptarget_sta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
goto ignore_joinbss_callback;
@@ -1432,7 +1394,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
}
-
/* s5. Cancel assoc_timer */
_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
@@ -1506,7 +1467,7 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta, u3
{
u16 media_status_rpt;
- if (psta == NULL)
+ if (!psta)
return;
media_status_rpt = (u16)((psta->mac_id<<8)|mstatus); /* MACID|OPMODE:1 connect */
@@ -1564,7 +1525,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
/* for AD-HOC mode */
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta != NULL) {
+ if (psta) {
/* the sta have been in sta_info_queue => do nothing */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
@@ -1573,7 +1534,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
}
psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta == NULL) {
+ if (!psta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
return;
}
@@ -1591,7 +1552,6 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
-
psta->ieee8021x_blocked = false;
spin_lock_bh(&pmlmepriv->lock);
@@ -1612,7 +1572,6 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
spin_unlock_bh(&pmlmepriv->lock);
-
mlmeext_sta_add_event_callback(adapter, psta);
}
@@ -1648,7 +1607,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
return;
-
mlmeext_sta_del_event_callback(adapter);
spin_lock_bh(&pmlmepriv->lock);
@@ -1726,13 +1684,8 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
_clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
}
- if (rtw_createbss_cmd(adapter) != _SUCCESS) {
-
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
-
- }
-
-
}
}
@@ -1750,7 +1703,6 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
cpwm_int_hdl(padapter, preportpwrstate);
}
-
void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
{
WMMOnAssocRsp(padapter);
@@ -1840,8 +1792,6 @@ void rtw_mlme_reset_auto_scan_int(struct adapter *adapter)
mlme->auto_scan_int_ms = mlme->roam_scan_int_ms;
} else
mlme->auto_scan_int_ms = 0; /* disabled */
-
- return;
}
static void rtw_auto_scan_handler(struct adapter *padapter)
@@ -1859,7 +1809,7 @@ static void rtw_auto_scan_handler(struct adapter *padapter)
goto exit;
}
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) {
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
DBG_871X(FUNC_ADPT_FMT" exit BusyTraffic\n", FUNC_ADPT_ARG(padapter));
goto exit;
}
@@ -1879,20 +1829,20 @@ void rtw_dynamic_check_timer_handler(struct adapter *adapter)
if (!adapter)
return;
- if (adapter->hw_init_completed == false)
+ if (!adapter->hw_init_completed)
return;
- if ((adapter->bDriverStopped == true) || (adapter->bSurpriseRemoved == true))
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- if (adapter->net_closed == true)
+ if (adapter->net_closed)
return;
if (is_primary_adapter(adapter))
DBG_871X("IsBtDisabled =%d, IsBtControlLps =%d\n", hal_btcoex_IsBtDisabled(adapter), hal_btcoex_IsBtControlLps(adapter));
- if ((adapter_to_pwrctl(adapter)->bFwCurrentInPSMode == true)
- && (hal_btcoex_IsBtControlLps(adapter) == false)
+ if ((adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
+ && !(hal_btcoex_IsBtControlLps(adapter))
) {
u8 bEnterPS;
@@ -1907,16 +1857,14 @@ void rtw_dynamic_check_timer_handler(struct adapter *adapter)
}
} else {
- if (is_primary_adapter(adapter)) {
+ if (is_primary_adapter(adapter))
rtw_dynamic_chk_wk_cmd(adapter);
- }
}
/* auto site survey */
rtw_auto_scan_handler(adapter);
}
-
inline bool rtw_is_scan_deny(struct adapter *adapter)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
@@ -1994,26 +1942,24 @@ int rtw_select_roaming_candidate(struct mlme_priv *mlme)
{
int ret = _FAIL;
struct list_head *phead;
- struct adapter *adapter;
struct __queue *queue = &(mlme->scanned_queue);
struct wlan_network *pnetwork = NULL;
struct wlan_network *candidate = NULL;
- if (mlme->cur_network_scanned == NULL) {
+ if (!mlme->cur_network_scanned) {
rtw_warn_on(1);
return ret;
}
spin_lock_bh(&(mlme->scanned_queue.lock));
phead = get_list_head(queue);
- adapter = (struct adapter *)mlme->nic_hdl;
mlme->pscanned = get_next(phead);
while (phead != mlme->pscanned) {
pnetwork = LIST_CONTAINOR(mlme->pscanned, struct wlan_network, list);
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
ret = _FAIL;
goto exit;
@@ -2031,7 +1977,7 @@ int rtw_select_roaming_candidate(struct mlme_priv *mlme)
}
- if (candidate == NULL) {
+ if (!candidate) {
DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
ret = _FAIL;
goto exit;
@@ -2064,9 +2010,8 @@ static int rtw_check_join_candidate(struct mlme_priv *mlme
int updated = false;
struct adapter *adapter = container_of(mlme, struct adapter, mlmepriv);
-
/* check bssid, if needed */
- if (mlme->assoc_by_bssid == true) {
+ if (mlme->assoc_by_bssid) {
if (memcmp(competitor->network.MacAddress, mlme->assoc_bssid, ETH_ALEN))
goto exit;
}
@@ -2115,12 +2060,8 @@ exit:
/*
Calling context:
The caller of the sub-routine will be in critical section...
-
The caller must hold the following spinlock
-
pmlmepriv->lock
-
-
*/
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
@@ -2148,7 +2089,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
while (phead != pmlmepriv->pscanned) {
pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
ret = _FAIL;
goto exit;
@@ -2166,7 +2107,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
}
- if (candidate == NULL) {
+ if (!candidate) {
DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
#ifdef CONFIG_WOWLAN
_clr_fwstate_(pmlmepriv, _FW_LINKED|_FW_UNDER_LINKING);
@@ -2207,14 +2148,14 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
sint res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
psetauthparm = rtw_zmalloc(sizeof(struct setauth_parm));
- if (psetauthparm == NULL) {
- kfree((unsigned char *)pcmd);
+ if (!psetauthparm) {
+ kfree(pcmd);
res = _FAIL;
goto exit;
}
@@ -2227,7 +2168,6 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
pcmd->rsp = NULL;
pcmd->rspsz = 0;
-
INIT_LIST_HEAD(&pcmd->list);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("after enqueue set_auth_cmd, auth_mode =%x\n", psecuritypriv->dot11AuthAlgrthm));
@@ -2247,7 +2187,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
sint res = _SUCCESS;
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
res = _FAIL;
goto exit;
}
@@ -2291,15 +2231,14 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
default:
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key:psecuritypriv->dot11PrivacyAlgrthm = %x (must be 1 or 2 or 4 or 5)\n", psecuritypriv->dot11PrivacyAlgrthm));
res = _FAIL;
- kfree((unsigned char *)psetkeyparm);
+ kfree(psetkeyparm);
goto exit;
}
-
if (enqueue) {
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
- kfree((unsigned char *)psetkeyparm);
+ if (!pcmd) {
+ kfree(psetkeyparm);
res = _FAIL; /* try again */
goto exit;
}
@@ -2315,7 +2254,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
} else {
setkey_hdl(adapter, (u8 *)psetkeyparm);
- kfree((u8 *) psetkeyparm);
+ kfree(psetkeyparm);
}
exit:
return res;
@@ -2350,7 +2289,6 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
}
-
/* */
/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
/* Added by Annie, 2006-05-07. */
@@ -2679,7 +2617,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SGI_20);
/* Get HT BW */
- if (in_ie == NULL) {
+ if (!in_ie) {
/* TDLS: TODO 20/40 issue */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
operation_bw = padapter->mlmeextpriv.cur_bwmode;
@@ -2794,7 +2732,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
phtpriv->ht_option = true;
- if (in_ie != NULL) {
+ if (in_ie) {
p = rtw_get_ie(in_ie, _HT_ADD_INFO_IE_, &ielen, in_len);
if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
out_len = *pout_len;
@@ -2824,7 +2762,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 cbw40_enable = 0;
-
if (!phtpriv->ht_option)
return;
@@ -2834,7 +2771,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
DBG_871X("+rtw_update_ht_cap()\n");
/* maybe needs check if ap supports rx ampdu. */
- if ((phtpriv->ampdu_enable == false) && (pregistrypriv->ampdu_enable == 1)) {
+ if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1) {
if (pregistrypriv->wifi_spec == 1) {
/* remove this part because testbed AP should disable RX AMPDU */
/* phtpriv->ampdu_enable = false; */
@@ -2847,7 +2784,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
/* phtpriv->ampdu_enable = true; */
}
-
/* check Max Rx A-MPDU Size */
len = 0;
p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
@@ -2861,7 +2797,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
}
-
len = 0;
p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
if (p && len > 0) {
@@ -2961,7 +2896,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
return;
}
- if (psta == NULL) {
+ if (!psta) {
DBG_871X("%s, psta ==NUL\n", __func__);
return;
}
@@ -2971,10 +2906,9 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
return;
}
-
phtpriv = &psta->htpriv;
- if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true)) {
+ if (phtpriv->ht_option && phtpriv->ampdu_enable) {
issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
@@ -2994,10 +2928,8 @@ void rtw_append_exented_cap(struct adapter *padapter, u8 *out_ie, uint *pout_len
u8 cap_content[8] = {0};
u8 *pframe;
-
- if (phtpriv->bss_coexist) {
+ if (phtpriv->bss_coexist)
SET_EXT_CAPABILITY_ELE_BSS_COEXIST(cap_content, 1);
- }
pframe = rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 2128886c9924..5e687f6d2c3e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -344,7 +344,7 @@ static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel
struct p2p_channels *channel_list)
{
- struct p2p_oper_class_map op_class[] = {
+ static const struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
{ IEEE80211G, 82, 14, 14, 1, BW20 },
{ IEEE80211A, 115, 36, 48, 4, BW20 },
@@ -363,7 +363,7 @@ static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel
for (op = 0; op_class[op].op_class; op++) {
u8 ch;
- struct p2p_oper_class_map *o = &op_class[op];
+ const struct p2p_oper_class_map *o = &op_class[op];
struct p2p_reg_class *reg = NULL;
for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
@@ -2922,7 +2922,8 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
int i = 0;
do {
- ret = _issue_probereq(padapter, pssid, da, ch, append_wps, wait_ms > 0?true:false);
+ ret = _issue_probereq(padapter, pssid, da, ch, append_wps,
+ wait_ms > 0);
i++;
@@ -3086,8 +3087,6 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
DBG_871X("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
@@ -3405,8 +3404,6 @@ exit:
rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
else
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
-
- return;
}
/* when wait_ack is ture, this function shoule be called at process context */
@@ -3513,7 +3510,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
}
do {
- ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0?true:false);
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0);
i++;
@@ -3661,7 +3658,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
da = get_my_bssid(&(pmlmeinfo->network));
do {
- ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0?true:false);
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0);
i++;
@@ -3769,7 +3766,7 @@ int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int
int i = 0;
do {
- ret = _issue_deauth(padapter, da, reason, wait_ms > 0?true:false);
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
i++;
@@ -5260,8 +5257,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
DBG_871X("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
@@ -5306,8 +5301,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
DBG_871X("report_add_sta_event: add STA\n");
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
/****************************************************************************
@@ -5869,8 +5862,6 @@ void link_timer_hdl(struct timer_list *t)
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
-
- return;
}
void addba_timer_hdl(struct timer_list *t)
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index 4075de07e0a9..30137f0bd984 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -190,7 +190,6 @@ void rtw_ps_processor(struct adapter *padapter)
}
exit:
pwrpriv->ps_processing = false;
- return;
}
static void pwr_state_check_handler(struct timer_list *t)
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 687ff3c6f09f..7fa8c84cf5f4 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -1400,10 +1400,8 @@ static sint validate_80211w_mgmt(struct adapter *adapter, union recv_frame *prec
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
u8 *ptr = precv_frame->u.hdr.rx_data;
- u8 type;
u8 subtype;
- type = GetFrameType(ptr);
subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
/* only support station mode */
@@ -1412,9 +1410,8 @@ static sint validate_80211w_mgmt(struct adapter *adapter, union recv_frame *prec
/* unicast management frame decrypt */
if (pattrib->privacy && !(IS_MCAST(GetAddr1Ptr(ptr))) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC || subtype == WIFI_ACTION)) {
- u8 *ppp, *mgmt_DATA;
+ u8 *mgmt_DATA;
u32 data_len = 0;
- ppp = GetAddr2Ptr(ptr);
pattrib->bdecrypted = 0;
pattrib->encrypt = _AES_;
@@ -1709,7 +1706,7 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
- u8 *data, wlanhdr_offset;
+ u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr, *pnfhdr;
union recv_frame *prframe, *pnextrframe;
@@ -1739,8 +1736,6 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
plist = get_next(plist);
- data = get_recvframe_data(prframe);
-
while (phead != plist) {
pnextrframe = (union recv_frame *)plist;
pnfhdr = &pnextrframe->u.hdr;
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 57cfe06d7d73..9c4607114cea 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -303,13 +303,18 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
*((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
- crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
+ __func__,
+ crc[3], payload[length - 1],
+ crc[2], payload[length - 2],
+ crc[1], payload[length - 3],
+ crc[0], payload[length - 4]));
}
WEP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
}
- return;
}
/* 3 =====TKIP related ===== */
@@ -657,11 +662,9 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
u8 hw_hdr_offset = 0;
struct arc4context mycontext;
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
- /* struct sta_info *stainfo; */
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -676,36 +679,14 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _TKIP_) {
-/*
- if (pattrib->psta)
- {
- stainfo = pattrib->psta;
- }
- else
{
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
- }
-*/
- /* if (stainfo!= NULL) */
- {
-/*
- if (!(stainfo->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
- return _FAIL;
- }
-*/
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
- /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
prwskey = pattrib->dot118021x_UncstKey.skey;
- prwskeylen = 16;
-
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
iv = pframe+pattrib->hdrlen;
payload = pframe+pattrib->iv_len+pattrib->hdrlen;
@@ -742,13 +723,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
TKIP_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra);
}
-/*
- else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo == NULL!!!\n"));
- DBG_871X("%s, psta ==NUL\n", __func__);
- res = _FAIL;
- }
-*/
}
return res;
@@ -765,14 +739,12 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
u8 crc[4];
struct arc4context mycontext;
sint length;
- u32 prwskeylen;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
-/* struct recv_priv *precvpriv =&padapter->recvpriv; */
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -817,13 +789,9 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
no_gkey_bc_cnt = 0;
no_gkey_mc_cnt = 0;
- /* DBG_871X("rx bc/mc packets, to perform sw rtw_tkip_decrypt\n"); */
- /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
- prwskeylen = 16;
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- prwskeylen = 16;
}
iv = pframe+prxattrib->hdrlen;
@@ -846,15 +814,19 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
*((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
- crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ crc[3], payload[length - 1],
+ crc[2], payload[length - 2],
+ crc[1], payload[length - 3],
+ crc[0], payload[length - 4]));
res = _FAIL;
}
TKIP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo == NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo == NULL!!!\n", __func__));
res = _FAIL;
}
@@ -1426,7 +1398,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
aes128k128d(key, chain_buffer, aes_out);
for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
payload_index += 16;
aes128k128d(key, chain_buffer, aes_out);
@@ -1437,7 +1409,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++) {
- padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
+ padded_buffer[j] = pframe[payload_index++];
}
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
@@ -1449,7 +1421,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
/* Insert MIC into payload */
for (j = 0; j < 8; j++)
- pframe[payload_index+j] = mic[j]; /* message[payload_index+j] = mic[j]; */
+ pframe[payload_index+j] = mic[j];
payload_index = hdrlen + 8;
for (i = 0; i < num_blocks; i++) {
@@ -1463,9 +1435,9 @@ static sint aes_cipher(u8 *key, uint hdrlen,
frtype
); /* add for CONFIG_IEEE80211W, none 11w also can use */
aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<16;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
}
if (payload_remainder > 0) {
@@ -1484,12 +1456,12 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index+j];/* padded_buffer[j] = message[payload_index+j]; */
+ padded_buffer[j] = pframe[payload_index+j];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<payload_remainder;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
}
/* Encrypt the MIC */
@@ -1506,12 +1478,12 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < 8; j++)
- padded_buffer[j] = pframe[j+hdrlen+8+plen];/* padded_buffer[j] = message[j+hdrlen+8+plen]; */
+ padded_buffer[j] = pframe[j+hdrlen+8+plen];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<8;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
return _SUCCESS;
}
@@ -1525,15 +1497,12 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* Intermediate Buffers */
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *prwskey; /* *payload,*iv */
u8 hw_hdr_offset = 0;
- /* struct sta_info *stainfo = NULL; */
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-/* uint offset = 0; */
u32 res = _SUCCESS;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
@@ -1544,16 +1513,13 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _AES_) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
- /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
prwskey = pattrib->dot118021x_UncstKey.skey;
- prwskeylen = 16;
-
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
@@ -1574,10 +1540,10 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
}
static sint aes_decipher(u8 *key, uint hdrlen,
- u8 *pframe, uint plen)
+ u8 *pframe, uint plen)
{
static u8 message[MAX_MSG_SIZE];
- uint qc_exists, a4_exists, i, j, payload_remainder,
+ uint qc_exists, a4_exists, i, j, payload_remainder,
num_blocks, payload_index;
sint res = _SUCCESS;
u8 pn_vector[6];
@@ -1593,9 +1559,8 @@ static sint aes_decipher(u8 *key, uint hdrlen,
u8 mic[8];
-/* uint offset = 0; */
- uint frtype = GetFrameType(pframe);
- uint frsubtype = GetFrameSubType(pframe);
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
frsubtype = frsubtype>>4;
@@ -1615,11 +1580,11 @@ static sint aes_decipher(u8 *key, uint hdrlen,
payload_remainder = (plen-8) % 16;
pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen+1];
- pn_vector[2] = pframe[hdrlen+4];
- pn_vector[3] = pframe[hdrlen+5];
- pn_vector[4] = pframe[hdrlen+6];
- pn_vector[5] = pframe[hdrlen+7];
+ pn_vector[1] = pframe[hdrlen + 1];
+ pn_vector[2] = pframe[hdrlen + 4];
+ pn_vector[3] = pframe[hdrlen + 5];
+ pn_vector[4] = pframe[hdrlen + 6];
+ pn_vector[5] = pframe[hdrlen + 7];
if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
a4_exists = 0;
@@ -1651,22 +1616,17 @@ static sint aes_decipher(u8 *key, uint hdrlen,
payload_index = hdrlen + 8; /* 8 is for extiv */
for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(
- ctr_preload,
- a4_exists,
- qc_exists,
- pframe,
- pn_vector,
- i+1,
- frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
- );
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
-
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
+ construct_ctr_preload(ctr_preload, a4_exists,
+ qc_exists, pframe,
+ pn_vector, i + 1,
+ frtype); /* add for CONFIG_IEEE80211W, none 11w also can use */
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
if (payload_remainder > 0) {
/* If there is a short final block, then pad it,*/
@@ -1835,10 +1795,18 @@ static sint aes_decipher(u8 *key, uint hdrlen,
/* compare the mic */
for (i = 0; i < 8; i++) {
if (pframe[hdrlen+8+plen-8+i] != message[hdrlen+8+plen-8+i]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]));
- DBG_871X("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]);
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ __func__,
+ i,
+ pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]));
+ DBG_871X("%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ __func__,
+ i,
+ pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]);
res = _FAIL;
}
}
@@ -1861,7 +1829,6 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
-/* struct recv_priv *precvpriv =&padapter->recvpriv; */
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -1869,15 +1836,15 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo != NULL) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(prxattrib->ra)) {
static unsigned long start;
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
- /* DBG_871X("rx bc/mc packets, to perform sw rtw_aes_decrypt\n"); */
- /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
if (psecuritypriv->binstallGrpkey == false) {
res = _FAIL;
@@ -1927,7 +1894,9 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
AES_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo == NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s: stainfo == NULL!!!\n", __func__));
res = _FAIL;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index bdc52d8d5625..09d2ca30d653 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -187,7 +187,6 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
/* struct sta_info *rtw_alloc_stainfo(_queue *pfree_sta_queue, unsigned char *hwaddr) */
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- uint tmp_aid;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
@@ -211,8 +210,6 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
/* spin_unlock_bh(&(pfree_sta_queue->lock)); */
- tmp_aid = psta->aid;
-
_rtw_init_stainfo(psta);
psta->padapter = pstapriv->padapter;
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index ea3ea2a6b314..9590e6f351c1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -606,19 +606,6 @@ inline void clear_cam_entry(struct adapter *adapter, u8 id)
clear_cam_cache(adapter, id);
}
-inline void write_cam_from_cache(struct adapter *adapter, u8 id)
-{
- struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
- struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
- struct cam_entry_cache cache;
-
- spin_lock_bh(&cam_ctl->lock);
- memcpy(&cache, &dvobj->cam_cache[id], sizeof(struct cam_entry_cache));
- spin_unlock_bh(&cam_ctl->lock);
-
- _write_cam(adapter, id, cache.ctrl, cache.mac, cache.key);
-}
-
void write_cam_cache(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key)
{
struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
@@ -1170,8 +1157,6 @@ void HT_info_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
pmlmeinfo->HT_info_enable = 1;
memcpy(&(pmlmeinfo->HT_info), pIE->data, pIE->Length);
-
- return;
}
void HTOnAssocRsp(struct adapter *padapter)
@@ -1481,11 +1466,11 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
}
- kfree((u8 *)bssid);
+ kfree(bssid);
return _SUCCESS;
_mismatch:
- kfree((u8 *)bssid);
+ kfree(bssid);
if (pmlmepriv->NumOfBcnInfoChkFail == 0)
pmlmepriv->timeBcnInfoChkStart = jiffies;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index b5dcb78fb4f4..fdb585ff5925 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -25,9 +25,6 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
spin_lock_init(&psta_xmitpriv->lock);
- /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
- /* _init_txservq(&(psta_xmitpriv->blk_q[i])); */
-
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
_init_txservq(&psta_xmitpriv->vi_q);
@@ -54,18 +51,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->adapter = padapter;
- /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
- /* _rtw_init_queue(&pxmitpriv->blk_strms[i]); */
-
_rtw_init_queue(&pxmitpriv->be_pending);
_rtw_init_queue(&pxmitpriv->bk_pending);
_rtw_init_queue(&pxmitpriv->vi_pending);
_rtw_init_queue(&pxmitpriv->vo_pending);
_rtw_init_queue(&pxmitpriv->bm_pending);
- /* _rtw_init_queue(&pxmitpriv->legacy_dz_queue); */
- /* _rtw_init_queue(&pxmitpriv->apsd_queue); */
-
_rtw_init_queue(&pxmitpriv->free_xmit_queue);
/*
@@ -83,13 +74,11 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_frame_buf), 4);
- /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
- /* ((SIZE_PTR) (pxmitpriv->pallocated_frame_buf) &3); */
pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
for (i = 0; i < NR_XMITFRAME; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -99,7 +88,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xmit_queue.queue);
pxframe++;
}
@@ -108,7 +98,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
-
/* init xmit_buf */
_rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
_rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
@@ -122,8 +111,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
}
pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmitbuf), 4);
- /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
- /* ((SIZE_PTR) (pxmitpriv->pallocated_xmitbuf) &3); */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
@@ -150,13 +137,13 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->flags = XMIT_VO_QUEUE;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmitbuf_queue.queue);
#ifdef DBG_XMIT_BUF
pxmitbuf->no = i;
#endif
pxmitbuf++;
-
}
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
@@ -176,7 +163,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -188,7 +175,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->ext_tag = 1;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue));
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xframe_ext_queue.queue);
pxframe++;
}
@@ -227,12 +215,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->len = 0;
pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmit_extbuf_queue.queue);
#ifdef DBG_XMIT_BUF_EXT
pxmitbuf->no = i;
#endif
pxmitbuf++;
-
}
pxmitpriv->free_xmit_extbuf_cnt = NR_XMIT_EXTBUFF;
@@ -265,9 +253,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++)
pxmitpriv->wmm_para_seq[i] = i;
- }
pxmitpriv->ack_tx = false;
mutex_init(&pxmitpriv->ack_tx_mutex);
@@ -306,7 +293,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
if (pxmitpriv->pallocated_frame_buf)
vfree(pxmitpriv->pallocated_frame_buf);
-
if (pxmitpriv->pallocated_xmitbuf)
vfree(pxmitpriv->pallocated_xmitbuf);
@@ -329,9 +315,8 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitbuf++;
}
- if (pxmitpriv->pallocated_xmit_extbuf) {
+ if (pxmitpriv->pallocated_xmit_extbuf)
vfree(pxmitpriv->pallocated_xmit_extbuf);
- }
for (i = 0; i < CMDBUF_MAX; i++) {
pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
@@ -372,8 +357,8 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
u32 sz;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
/* struct sta_info *psta = pattrib->psta; */
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pattrib->nr_frags != 1)
sz = padapter->xmitpriv.frag_len;
@@ -404,7 +389,6 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
break;
}
-
/* check ERP protection */
if (pattrib->rtsen || pattrib->cts2self) {
if (pattrib->rtsen)
@@ -485,20 +469,12 @@ static void update_attrib_phy_info(struct adapter *padapter, struct pkt_attrib *
else
pattrib->ampdu_spacing = psta->htpriv.rx_ampdu_min_spacing;
- /* if (pattrib->ht_en && psta->htpriv.ampdu_enable) */
- /* */
- /* if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) */
- /* pattrib->ampdu_en = true; */
- /* */
-
-
pattrib->retry_ctrl = false;
#ifdef CONFIG_AUTO_AP_MODE
if (psta->isrc && psta->pid > 0)
pattrib->pctrl = true;
#endif
-
}
static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta)
@@ -548,7 +524,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
/* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
if (((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) && (pattrib->ether_type == 0x888e))
pattrib->encrypt = _NO_PRIVACY_;
-
}
switch (pattrib->encrypt) {
@@ -576,7 +551,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
else
TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
-
memcpy(pattrib->dot11tkiptxmickey.skey, psta->dot11tkiptxmickey.skey, 16);
break;
@@ -620,7 +594,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
exit:
return res;
-
}
u8 qos_acm(u8 acm_mask, u8 priority)
@@ -658,14 +631,12 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
struct iphdr ip_hdr;
s32 UserPriority = 0;
-
_rtw_open_pktfile(ppktfile->pkt, ppktfile);
_rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
/* get UserPriority from IP hdr */
if (pattrib->ether_type == 0x0800) {
_rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
-/* UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
UserPriority = ip_hdr.tos >> 5;
}
pattrib->priority = UserPriority;
@@ -675,7 +646,6 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib *pattrib)
{
- uint i;
struct pkt_file pktfile;
struct sta_info *psta = NULL;
struct ethhdr etherhdr;
@@ -689,15 +659,13 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib);
_rtw_open_pktfile(pkt, &pktfile);
- i = _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+ _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
-
memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
-
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
@@ -748,8 +716,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
}
}
-
-
} else if (0x888e == pattrib->ether_type) {
DBG_871X_LEVEL(_drv_always_, "send eapol packet\n");
}
@@ -804,8 +770,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
return _FAIL;
}
-
-
/* TODO:_lock */
if (update_attrib_sec_info(padapter, pattrib, psta) == _FAIL) {
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
@@ -815,8 +779,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
update_attrib_phy_info(padapter, pattrib, psta);
- /* DBG_8192C("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
-
pattrib->psta = psta;
/* TODO:_unlock */
@@ -839,7 +801,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
if (pmlmepriv->acm_mask != 0)
pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
-
}
}
@@ -854,7 +815,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
sint curfragnum, length;
u8 *pframe, *payload, mic[8];
struct mic_data micdata;
- /* struct sta_info *stainfo; */
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -862,54 +822,23 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
u8 hw_hdr_offset = 0;
sint bmcst = IS_MCAST(pattrib->ra);
-/*
- if (pattrib->psta)
- {
- stainfo = pattrib->psta;
- }
- else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
- }
-
- if (stainfo == NULL)
- {
- DBG_871X("%s, psta ==NUL\n", __func__);
- return _FAIL;
- }
-
- if (!(stainfo->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
- return _FAIL;
- }
-*/
-
hw_hdr_offset = TXDESC_OFFSET;
- if (pattrib->encrypt == _TKIP_) { /* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
+ if (pattrib->encrypt == _TKIP_) {
/* encode mic code */
- /* if (stainfo!= NULL) */
{
u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
pframe = pxmitframe->buf_addr + hw_hdr_offset;
if (bmcst) {
- if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)) {
- /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* msleep(10); */
+ if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
return _FAIL;
- }
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
} else {
- if (!memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16)) {
- /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* msleep(10); */
+ if (!memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16))
return _FAIL;
- }
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
}
@@ -926,14 +855,11 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
rtw_secmicappend(&micdata, &pframe[16], 6);
else
rtw_secmicappend(&micdata, &pframe[10], 6);
-
}
- /* if (pqospriv->qos_option == 1) */
if (pattrib->qos_en)
priority[0] = (u8)pxmitframe->attrib.priority;
-
rtw_secmicappend(&micdata, &priority[0], 4);
payload = pframe;
@@ -956,7 +882,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum =%d length =%d pattrib->icv_len =%d", curfragnum, length, pattrib->icv_len));
}
}
- rtw_secgetmic(&micdata, &(mic[0]));
+ rtw_secgetmic(&micdata, &mic[0]);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz =%d!!!\n", pattrib->last_txcmdsz));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]= 0x%.2x , mic[1]= 0x%.2x , mic[2]= 0x%.2x , mic[3]= 0x%.2x\n\
@@ -964,7 +890,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
/* add mic code and add the mic code length in last_txcmdsz */
- memcpy(payload, &(mic[0]), 8);
+ memcpy(payload, &mic[0], 8);
pattrib->last_txcmdsz += 8;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ========last pkt ========\n"));
@@ -975,9 +901,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
*(payload+curfragnum+4), *(payload+curfragnum+5), *(payload+curfragnum+6), *(payload+curfragnum+7)));
}
/*
- else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo == NULL!!!\n"));
- }
*/
}
return _SUCCESS;
@@ -985,13 +908,9 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
-
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- /* struct security_priv *psecuritypriv =&padapter->securitypriv; */
- /* if ((psecuritypriv->sw_encrypt)||(pattrib->bswenc)) */
if (pattrib->bswenc) {
- /* DBG_871X("start xmitframe_swencrypt\n"); */
RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
switch (pattrib->encrypt) {
case _WEP40_:
@@ -1007,7 +926,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
default:
break;
}
-
} else
RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
@@ -1030,7 +948,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
- if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* to_ds = 1, fr_ds = 0; */
{
@@ -1044,8 +962,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (pqospriv->qos_option)
qos_option = true;
-
- } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)) {
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* to_ds = 0, fr_ds = 1; */
SetFrDs(fctrl);
memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
@@ -1106,7 +1023,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
return _FAIL;
}
-
if (psta) {
psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
@@ -1119,7 +1035,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
pattrib->ampdu_en = true;
-
/* re-check if enable ampdu by BA_starting_seqctrl */
if (pattrib->ampdu_en == true) {
u16 tx_seq;
@@ -1128,24 +1043,19 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
/* check BA_starting_seqctrl */
if (SN_LESS(pattrib->seqnum, tx_seq)) {
- /* DBG_871X("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
pattrib->ampdu_en = false;/* AGG BK */
} else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
pattrib->ampdu_en = true;/* AGG EN */
} else {
- /* DBG_871X("tx ampdu over run\n"); */
psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
pattrib->ampdu_en = true;/* AGG EN */
}
-
}
}
}
-
} else {
-
}
exit:
@@ -1203,9 +1113,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
u8 *pframe, *mem_start;
u8 hw_hdr_offset;
- /* struct sta_info *psta; */
- /* struct sta_priv *pstapriv = &padapter->stapriv; */
- /* struct mlme_priv *pmlmepriv = &padapter->mlmepriv; */
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
@@ -1215,30 +1122,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
s32 bmcst = IS_MCAST(pattrib->ra);
s32 res = _SUCCESS;
-/*
- if (pattrib->psta)
- {
- psta = pattrib->psta;
- } else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
- }
-
- if (psta == NULL)
- {
-
- DBG_871X("%s, psta ==NUL\n", __func__);
- return _FAIL;
- }
-
-
- if (!(psta->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
- return _FAIL;
- }
-*/
if (!pxmitframe->buf_addr) {
DBG_8192C("==> %s buf_addr == NULL\n", __func__);
return _FAIL;
@@ -1293,10 +1176,8 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
mpdu_len -= llc_sz;
}
- if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc))
mpdu_len -= pattrib->icv_len;
- }
-
if (bmcst) {
/* don't do fragment to broadcat/multicast packets */
@@ -1330,7 +1211,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
-
}
if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
@@ -1410,7 +1290,8 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
pmlmeext->mgnt_80211w_IPN++;
/* add MME IE with MIC all zero, MME string doesn't include element id and length */
- pframe = rtw_set_ie(pframe, _MME_IE_, 16, MME, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _MME_IE_, 16,
+ MME, &pattrib->pktlen);
pattrib->last_txcmdsz = pattrib->pktlen;
/* total frame length - header length */
frame_body_len = pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr);
@@ -1441,7 +1322,6 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (!psta) {
-
DBG_871X("%s, psta ==NUL\n", __func__);
goto xmitframe_coalesce_fail;
}
@@ -1451,7 +1331,6 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
goto xmitframe_coalesce_fail;
}
- /* DBG_871X("%s, action frame category =%d\n", __func__, pframe[WLAN_HDR_A3_LEN]); */
/* according 802.11-2012 standard, these five types are not robust types */
if (subtype == WIFI_ACTION &&
(pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_PUBLIC ||
@@ -1550,7 +1429,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
{
-
uint protection;
u8 *perp;
sint erp_len;
@@ -1582,7 +1460,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
}
break;
-
}
}
@@ -1666,7 +1543,6 @@ struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
pxmitbuf->priv_data = pcmdframe;
return pcmdframe;
-
}
struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
@@ -1681,14 +1557,13 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
if (list_empty(&pfree_queue->queue)) {
pxmitbuf = NULL;
} else {
-
phead = get_list_head(pfree_queue);
plist = get_next(phead);
pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
- list_del_init(&(pxmitbuf->list));
+ list_del_init(&pxmitbuf->list);
}
if (pxmitbuf) {
@@ -1697,7 +1572,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
DBG_871X("DBG_XMIT_BUF_EXT ALLOC no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
#endif
-
pxmitbuf->priv_data = NULL;
pxmitbuf->len = 0;
@@ -1708,7 +1582,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__);
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
}
-
}
spin_unlock_irqrestore(&pfree_queue->lock, irqL);
@@ -1728,7 +1601,7 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
pxmitpriv->free_xmit_extbuf_cnt++;
#ifdef DBG_XMIT_BUF_EXT
DBG_871X("DBG_XMIT_BUF_EXT FREE no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
@@ -1746,21 +1619,18 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
struct list_head *plist, *phead;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- /* DBG_871X("+rtw_alloc_xmitbuf\n"); */
-
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
if (list_empty(&pfree_xmitbuf_queue->queue)) {
pxmitbuf = NULL;
} else {
-
phead = get_list_head(pfree_xmitbuf_queue);
plist = get_next(phead);
pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
- list_del_init(&(pxmitbuf->list));
+ list_del_init(&pxmitbuf->list);
}
if (pxmitbuf) {
@@ -1768,7 +1638,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
#ifdef DBG_XMIT_BUF
DBG_871X("DBG_XMIT_BUF ALLOC no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
#endif
- /* DBG_871X("alloc, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
pxmitbuf->priv_data = NULL;
@@ -1797,8 +1666,6 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
_irqL irqL;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- /* DBG_871X("+rtw_free_xmitbuf\n"); */
-
if (!pxmitbuf)
return _FAIL;
@@ -1815,10 +1682,10 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+ list_add_tail(&pxmitbuf->list,
+ get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
- /* DBG_871X("FREE, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
#ifdef DBG_XMIT_BUF
DBG_871X("DBG_XMIT_BUF FREE no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
#endif
@@ -1834,7 +1701,6 @@ static void rtw_init_xmitframe(struct xmit_frame *pxframe)
pxframe->pxmitbuf = NULL;
memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
- /* pxframe->attrib.psta = NULL; */
pxframe->frame_tag = DATA_FRAMETAG;
@@ -1879,7 +1745,7 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
- list_del_init(&(pxframe->list));
+ list_del_init(&pxframe->list);
pxmitpriv->free_xmitframe_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt =%d\n", pxmitpriv->free_xmitframe_cnt));
}
@@ -1906,7 +1772,7 @@ struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv)
plist = get_next(phead);
pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
- list_del_init(&(pxframe->list));
+ list_del_init(&pxframe->list);
pxmitpriv->free_xframe_ext_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe_ext():free_xmitframe_cnt =%d\n", pxmitpriv->free_xframe_ext_cnt));
}
@@ -1974,7 +1840,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
else if (pxmitframe->ext_tag == 1)
queue = &pxmitpriv->free_xframe_ext_queue;
else {
-
}
spin_lock_bh(&queue->lock);
@@ -2006,21 +1871,19 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
- spin_lock_bh(&(pframequeue->lock));
+ spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
plist = get_next(phead);
while (phead != plist) {
-
pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
plist = get_next(plist);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
}
- spin_unlock_bh(&(pframequeue->lock));
+ spin_unlock_bh(&pframequeue->lock);
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -2029,7 +1892,6 @@ s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitfram
if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n"));
-/* pxmitframe->pkt = NULL; */
return _FAIL;
}
@@ -2043,21 +1905,21 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
switch (up) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
*(ac) = 3;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
*(ac) = 1;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
*(ac) = 0;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
break;
@@ -2065,11 +1927,10 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
*(ac) = 2;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
break;
-
}
return ptxservq;
@@ -2081,7 +1942,6 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
*/
s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- /* _irqL irqL0; */
u8 ac_index;
struct sta_info *psta;
struct tx_servq *ptxservq;
@@ -2091,15 +1951,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class);
-/*
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta = rtw_get_stainfo(pstapriv, pattrib->ra);
- }
-*/
-
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (pattrib->psta != psta) {
DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_sta);
@@ -2123,22 +1974,13 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
- /* spin_lock_irqsave(&pstapending->lock, irqL0); */
-
- if (list_empty(&ptxservq->tx_pending)) {
+ if (list_empty(&ptxservq->tx_pending))
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
- }
-
- /* spin_lock_irqsave(&ptxservq->sta_pending.lock, irqL1); */
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
- /* spin_unlock_irqrestore(&ptxservq->sta_pending.lock, irqL1); */
-
- /* spin_unlock_irqrestore(&pstapending->lock, irqL0); */
-
exit:
return res;
@@ -2161,45 +2003,24 @@ s32 rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits = pxmitpriv->hwxmits;
if (pxmitpriv->hwxmit_entry == 5) {
- /* pxmitpriv->bmc_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
-
} else if (pxmitpriv->hwxmit_entry == 4) {
-
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
} else {
-
}
return _SUCCESS;
@@ -2207,24 +2028,17 @@ s32 rtw_alloc_hwxmits(struct adapter *padapter)
void rtw_free_hwxmits(struct adapter *padapter)
{
- struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- hwxmits = pxmitpriv->hwxmits;
- if (hwxmits)
- kfree((u8 *)hwxmits);
+ kfree(pxmitpriv->hwxmits);
}
void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry)
{
sint i;
- for (i = 0; i < entry; i++, phwxmit++) {
- /* spin_lock_init(&phwxmit->xmit_lock); */
- /* INIT_LIST_HEAD(&phwxmit->pending); */
- /* phwxmit->txcmdcnt = 0; */
+ for (i = 0; i < entry; i++, phwxmit++)
phwxmit->accnt = 0;
- }
}
u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
@@ -2259,11 +2073,9 @@ u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
default:
addr = MGT_QUEUE_INX;
break;
-
}
return addr;
-
}
static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
@@ -2310,7 +2122,7 @@ s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt)
if (!pxmitframe) {
drop_cnt++;
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n"));
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: no more pxmitframe\n", __func__));
DBG_COUNTER(padapter->tx_logs.core_tx_err_pxmitframe);
return -1;
}
@@ -2318,7 +2130,7 @@ s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt)
res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
if (res == _FAIL) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n"));
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: update attrib fail\n", __func__));
#ifdef DBG_TX_DROP_FRAME
DBG_871X("DBG_TX_DROP_FRAME %s update attrib fail\n", __func__);
#endif
@@ -2355,7 +2167,6 @@ inline bool xmitframe_hiq_filter(struct xmit_frame *xmitframe)
struct registry_priv *registry = &adapter->registrypriv;
if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_SPECIAL) {
-
struct pkt_attrib *attrib = &xmitframe->attrib;
if (attrib->ether_type == 0x0806
@@ -2389,17 +2200,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate);
return ret;
}
-/*
- if (pattrib->psta)
- {
- psta = pattrib->psta;
- }
- else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta =rtw_get_stainfo(pstapriv, pattrib->ra);
- }
-*/
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (pattrib->psta != psta) {
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_sta);
@@ -2421,16 +2221,13 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (pattrib->triggered == 1) {
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_trigger);
- /* DBG_871X("directly xmit pspoll_triggered packet\n"); */
- /* pattrib->triggered = 0; */
if (bmcst && xmitframe_hiq_filter(pxmitframe))
pattrib->qsel = 0x11;/* HIQ */
return ret;
}
-
if (bmcst) {
spin_lock_bh(&psta->sleep_q.lock);
@@ -2439,8 +2236,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
list_del_init(&pxmitframe->list);
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
psta->sleepq_len++;
@@ -2448,32 +2243,24 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (!(pstapriv->tim_bitmap & BIT(0)))
update_tim = true;
- pstapriv->tim_bitmap |= BIT(0);/* */
+ pstapriv->tim_bitmap |= BIT(0);
pstapriv->sta_dz_bitmap |= BIT(0);
- /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
- if (update_tim) {
+ if (update_tim)
update_beacon(padapter, _TIM_IE_, NULL, true);
- } else {
+ else
chk_bmc_sleepq_cmd(padapter);
- }
-
- /* spin_unlock_bh(&psta->sleep_q.lock); */
ret = true;
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_mcast);
-
}
spin_unlock_bh(&psta->sleep_q.lock);
return ret;
-
}
-
spin_lock_bh(&psta->sleep_q.lock);
if (psta->state&WIFI_SLEEP_STATE) {
@@ -2482,8 +2269,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (pstapriv->sta_dz_bitmap & BIT(psta->aid)) {
list_del_init(&pxmitframe->list);
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
psta->sleepq_len++;
@@ -2517,32 +2302,20 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
pstapriv->tim_bitmap |= BIT(psta->aid);
- /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
if (update_tim)
- /* DBG_871X("sleepq_len == 1, update BCNTIM\n"); */
/* upate BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, true);
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
-
- /* if (psta->sleepq_len > (NR_XMITFRAME>>3)) */
- /* */
- /* wakeup_sta_to_xmit(padapter, psta); */
- /* */
-
ret = true;
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_ucast);
}
-
}
spin_unlock_bh(&psta->sleep_q.lock);
return ret;
-
}
static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
@@ -2575,11 +2348,8 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
ptxservq->qcnt--;
phwxmits[ac_index].accnt--;
} else {
- /* DBG_871X("xmitframe_enqueue_for_sleeping_sta return false\n"); */
}
-
}
-
}
void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
@@ -2594,34 +2364,28 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
-
spin_lock_bh(&pxmitpriv->lock);
psta->state |= WIFI_SLEEP_STATE;
pstapriv->sta_dz_bitmap |= BIT(psta->aid);
-
-
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
/* for BC/MC Frames */
pstaxmitpriv = &psta_bmc->sta_xmitpriv;
dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
}
@@ -2637,8 +2401,6 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
psta_bmc = rtw_get_bcmc_stainfo(padapter);
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
@@ -2690,26 +2452,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
-/*
- spin_unlock_bh(&psta->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe) == true)
- {
- rtw_os_xmit_complete(padapter, pxmitframe);
- }
- spin_lock_bh(&psta->sleep_q.lock);
-*/
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
-
-
}
if (psta->sleepq_len == 0) {
- if (pstapriv->tim_bitmap & BIT(psta->aid)) {
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
+ if (pstapriv->tim_bitmap & BIT(psta->aid))
update_mask = BIT(0);
- }
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -2746,44 +2494,25 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
else
pxmitframe->attrib.mdata = 0;
-
pxmitframe->attrib.triggered = 1;
-/*
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe) == true)
- {
- rtw_os_xmit_complete(padapter, pxmitframe);
- }
- spin_lock_bh(&psta_bmc->sleep_q.lock);
-
-*/
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
-
}
if (psta_bmc->sleepq_len == 0) {
- if (pstapriv->tim_bitmap & BIT(0)) {
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
+ if (pstapriv->tim_bitmap & BIT(0))
update_mask |= BIT(1);
- }
+
pstapriv->tim_bitmap &= ~BIT(0);
pstapriv->sta_dz_bitmap &= ~BIT(0);
}
-
}
_exit:
- /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
if (update_mask)
- /* update_BCNTIM(padapter); */
- /* printk("%s => call update_beacon\n", __func__); */
update_beacon(padapter, _TIM_IE_, NULL, true);
-
}
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
@@ -2794,8 +2523,6 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
@@ -2848,16 +2575,10 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, true);
- /* update_mask = BIT(0); */
}
-
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
}
@@ -2875,7 +2596,7 @@ void enqueue_pending_xmitbuf(
list_add_tail(&pxmitbuf->list, get_list_head(pqueue));
spin_unlock_bh(&pqueue->lock);
- complete(&(pri_adapter->xmitpriv.xmit_comp));
+ complete(&pri_adapter->xmitpriv.xmit_comp);
}
void enqueue_pending_xmitbuf_to_head(
@@ -2898,7 +2619,6 @@ struct xmit_buf *dequeue_pending_xmitbuf(
struct xmit_buf *pxmitbuf;
struct __queue *pqueue;
-
pxmitbuf = NULL;
pqueue = &pxmitpriv->pending_xmitbuf_queue;
@@ -2924,7 +2644,6 @@ struct xmit_buf *dequeue_pending_xmitbuf_under_survey(
struct xmit_buf *pxmitbuf;
struct __queue *pqueue;
-
pxmitbuf = NULL;
pqueue = &pxmitpriv->pending_xmitbuf_queue;
@@ -2983,7 +2702,6 @@ int rtw_xmit_thread(void *context)
s32 err;
struct adapter *padapter;
-
err = _SUCCESS;
padapter = context;
@@ -2992,7 +2710,7 @@ int rtw_xmit_thread(void *context)
do {
err = rtw_hal_xmit_thread_handler(padapter);
flush_signals_thread();
- } while (_SUCCESS == err);
+ } while (err == _SUCCESS);
complete(&padapter->xmitpriv.terminate_xmitthread_comp);
@@ -3022,9 +2740,8 @@ int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
status = sctx->status;
}
- if (status == RTW_SCTX_DONE_SUCCESS) {
+ if (status == RTW_SCTX_DONE_SUCCESS)
ret = _SUCCESS;
- }
return ret;
}
@@ -3075,9 +2792,8 @@ void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
- if (pxmitpriv->ack_tx) {
+ if (pxmitpriv->ack_tx)
rtw_sctx_done_err(&pack_tx_ops, status);
- } else {
+ else
DBG_871X("%s ack_tx not set\n", __func__);
- }
}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 3239d37087c8..1ca9063a269f 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -402,8 +402,6 @@ static void GetDeltaSwingTable_8723B(
*TemperatureUP_B = (u8 *)DeltaSwingTableIdx_2GA_P_8188E;
*TemperatureDOWN_B = (u8 *)DeltaSwingTableIdx_2GA_N_8188E;
}
-
- return;
}
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 6e4a1fcb8790..d5793e4614bf 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1315,7 +1315,7 @@ void EXhalbtcoutsrc_DisplayBtCoexInfo(PBTC_COEXIST pBtCoexist)
/*
* Description:
- *Run BT-Coexist mechansim or not
+ *Run BT-Coexist mechanism or not
*
*/
void hal_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist)
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index eddd56abbb2d..109bd85b0cd8 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -30,7 +30,6 @@ void rtw_hal_data_deinit(struct adapter *padapter)
{
if (is_primary_adapter(padapter)) { /* if (padapter->isprimary) */
if (padapter->HalData) {
- phy_free_filebuf(padapter);
vfree(padapter->HalData);
padapter->HalData = NULL;
padapter->hal_data_sz = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 6539bee9b5ba..eb7de3617d83 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -2202,1079 +2202,3 @@ void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan)
break;
}
}
-
-
-static char file_path_bs[PATH_MAX];
-
-#define GetLineFromBuffer(buffer) strsep(&buffer, "\n")
-
-int phy_ConfigMACWithParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_MAC_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->mac_reg_len == 0) && !pHalData->mac_reg) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->mac_reg = vzalloc(rlen);
- if (pHalData->mac_reg) {
- memcpy(pHalData->mac_reg, pHalData->para_file_buf, rlen);
- pHalData->mac_reg_len = rlen;
- } else
- DBG_871X("%s mac_reg alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->mac_reg_len != 0) && (pHalData->mac_reg != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->mac_reg, pHalData->mac_reg_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove))
- rtw_write8(Adapter, u4bRegOffset, (u8)u4bRegValue);
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-int phy_ConfigBBWithParaFile(
- struct adapter *Adapter, char *pFileName, u32 ConfigType
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
- char *pBuf = NULL;
- u32 *pBufLen = NULL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PARA_FILE))
- return rtStatus;
-
- switch (ConfigType) {
- case CONFIG_BB_PHY_REG:
- pBuf = pHalData->bb_phy_reg;
- pBufLen = &pHalData->bb_phy_reg_len;
- break;
- case CONFIG_BB_AGC_TAB:
- pBuf = pHalData->bb_agc_tab;
- pBufLen = &pHalData->bb_agc_tab_len;
- break;
- default:
- DBG_871X("Unknown ConfigType!! %d\r\n", ConfigType);
- break;
- }
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pBuf = vzalloc(rlen);
- if (pBuf) {
- memcpy(pBuf, pHalData->para_file_buf, rlen);
- *pBufLen = rlen;
-
- switch (ConfigType) {
- case CONFIG_BB_PHY_REG:
- pHalData->bb_phy_reg = pBuf;
- break;
- case CONFIG_BB_AGC_TAB:
- pHalData->bb_agc_tab = pBuf;
- break;
- }
- } else
- DBG_871X("%s(): ConfigType %d alloc fail !\n", __func__, ConfigType);
- }
- }
- } else {
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
- else if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe)
- msleep(50);
- else if (u4bRegOffset == 0xfd)
- mdelay(5);
- else if (u4bRegOffset == 0xfc)
- mdelay(1);
- else if (u4bRegOffset == 0xfb)
- udelay(50);
- else if (u4bRegOffset == 0xfa)
- udelay(5);
- else if (u4bRegOffset == 0xf9)
- udelay(1);
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- /* DBG_871X("[BB-ADDR]%03lX =%08lX\n", u4bRegOffset, u4bRegValue); */
- PHY_SetBBReg(Adapter, u4bRegOffset, bMaskDWord, u4bRegValue);
-
- if (u4bRegOffset == 0xa24)
- pHalData->odmpriv.RFCalibrateInfo.RegA24 = u4bRegValue;
-
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static void phy_DecryptBBPgParaFile(struct adapter *Adapter, char *buffer)
-{
- u32 i = 0, j = 0;
- u8 map[95] = {0};
- u8 currentChar;
- char *BufOfLines, *ptmp;
-
- /* DBG_871X("=====>phy_DecryptBBPgParaFile()\n"); */
- /* 32 the ascii code of the first visable char, 126 the last one */
- for (i = 0; i < 95; ++i)
- map[i] = (u8) (94 - i);
-
- ptmp = buffer;
- i = 0;
- for (BufOfLines = GetLineFromBuffer(ptmp); BufOfLines != NULL; BufOfLines = GetLineFromBuffer(ptmp)) {
- /* DBG_871X("Encrypted Line: %s\n", BufOfLines); */
-
- for (j = 0; j < strlen(BufOfLines); ++j) {
- currentChar = BufOfLines[j];
-
- if (currentChar == '\0')
- break;
-
- currentChar -= (u8) ((((i + j) * 3) % 128));
-
- BufOfLines[j] = map[currentChar - 32] + 32;
- }
- /* DBG_871X("Decrypted Line: %s\n", BufOfLines); */
- if (strlen(BufOfLines) != 0)
- i++;
- BufOfLines[strlen(BufOfLines)] = '\n';
- }
-}
-
-static int phy_ParseBBPgParaFile(struct adapter *Adapter, char *buffer)
-{
- int rtStatus = _SUCCESS;
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegMask, u4bRegValue;
- u32 u4bMove;
- bool firstLine = true;
- u8 tx_num = 0;
- u8 band = 0, rf_path = 0;
-
- /* DBG_871X("=====>phy_ParseBBPgParaFile()\n"); */
-
- if (Adapter->registrypriv.RegDecryptCustomFile == 1)
- phy_DecryptBBPgParaFile(Adapter, buffer);
-
- ptmp = buffer;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- if (isAllSpaceOrTab(szLine, sizeof(*szLine)))
- continue;
-
- /* Get header info (relative value or exact value) */
- if (firstLine) {
- if (eqNByte(szLine, (u8 *)("#[v1]"), 5)) {
-
- pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
- /* DBG_871X("This is a new format PHY_REG_PG.txt\n"); */
- } else if (eqNByte(szLine, (u8 *)("#[v0]"), 5)) {
- pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
- /* DBG_871X("This is a old format PHY_REG_PG.txt ok\n"); */
- } else {
- DBG_871X("The format in PHY_REG_PG are invalid %s\n", szLine);
- return _FAIL;
- }
-
- if (eqNByte(szLine + 5, (u8 *)("[Exact]#"), 8)) {
- pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
- /* DBG_871X("The values in PHY_REG_PG are exact values ok\n"); */
- firstLine = false;
- continue;
- } else if (eqNByte(szLine + 5, (u8 *)("[Relative]#"), 11)) {
- pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_RELATIVE_VALUE;
- /* DBG_871X("The values in PHY_REG_PG are relative values ok\n"); */
- firstLine = false;
- continue;
- } else {
- DBG_871X("The values in PHY_REG_PG are invalid %s\n", szLine);
- return _FAIL;
- }
- }
-
- if (pHalData->odmpriv.PhyRegPgVersion == 0) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- szLine += u4bMove;
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
-
- /* Get 2nd hex value as register mask. */
- if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
- /* Get 3rd hex value as register value. */
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, u4bRegValue);
- /* DBG_871X("[ADDR] %03X =%08X Mask =%08x\n", u4bRegOffset, u4bRegValue, u4bRegMask); */
- } else
- return _FAIL;
- } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
- u32 combineValue = 0;
- u8 integer = 0, fraction = 0;
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
- PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, combineValue);
-
- /* DBG_871X("[ADDR] 0x%3x = 0x%4x\n", u4bRegOffset, combineValue); */
- }
- }
- } else if (pHalData->odmpriv.PhyRegPgVersion > 0) {
- u32 index = 0;
-
- if (eqNByte(szLine, "0xffff", 6))
- break;
-
- if (!eqNByte("#[END]#", szLine, 7)) {
- /* load the table label info */
- if (szLine[0] == '#') {
- index = 0;
- if (eqNByte(szLine, "#[2.4G]", 7)) {
- band = BAND_ON_2_4G;
- index += 8;
- } else if (eqNByte(szLine, "#[5G]", 5)) {
- band = BAND_ON_5G;
- index += 6;
- } else {
- DBG_871X("Invalid band %s in PHY_REG_PG.txt\n", szLine);
- return _FAIL;
- }
-
- rf_path = szLine[index] - 'A';
- /* DBG_871X(" Table label Band %d, RfPath %d\n", band, rf_path); */
- } else { /* load rows of tables */
- if (szLine[1] == '1')
- tx_num = RF_1TX;
- else if (szLine[1] == '2')
- tx_num = RF_2TX;
- else if (szLine[1] == '3')
- tx_num = RF_3TX;
- else if (szLine[1] == '4')
- tx_num = RF_4TX;
- else {
- DBG_871X("Invalid row in PHY_REG_PG.txt %c\n", szLine[1]);
- return _FAIL;
- }
-
- while (szLine[index] != ']')
- ++index;
- ++index;/* skip ] */
-
- /* Get 2nd hex value as register offset. */
- szLine += index;
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- /* Get 2nd hex value as register mask. */
- if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
- /* Get 3rd hex value as register value. */
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, u4bRegValue);
- /* DBG_871X("[ADDR] %03X (tx_num %d) =%08X Mask =%08x\n", u4bRegOffset, tx_num, u4bRegValue, u4bRegMask); */
- } else
- return _FAIL;
- } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
- u32 combineValue = 0;
- u8 integer = 0, fraction = 0;
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
- PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, combineValue);
-
- /* DBG_871X("[ADDR] 0x%3x (tx_num %d) = 0x%4x\n", u4bRegOffset, tx_num, combineValue); */
- }
- }
- }
- }
- }
- }
- /* DBG_871X("<=====phy_ParseBBPgParaFile()\n"); */
- return rtStatus;
-}
-
-int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PG_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->bb_phy_reg_pg_len == 0) && !pHalData->bb_phy_reg_pg) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->bb_phy_reg_pg = vzalloc(rlen);
- if (pHalData->bb_phy_reg_pg) {
- memcpy(pHalData->bb_phy_reg_pg, pHalData->para_file_buf, rlen);
- pHalData->bb_phy_reg_pg_len = rlen;
- } else
- DBG_871X("%s bb_phy_reg_pg alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->bb_phy_reg_pg_len != 0) && (pHalData->bb_phy_reg_pg != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->bb_phy_reg_pg, pHalData->bb_phy_reg_pg_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("phy_ConfigBBWithPgParaFile(): read %s ok\n", pFileName); */
- phy_ParseBBPgParaFile(Adapter, pHalData->para_file_buf);
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-int PHY_ConfigRFWithParaFile(
- struct adapter *Adapter, char *pFileName, u8 eRFPath
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
- u16 i;
- char *pBuf = NULL;
- u32 *pBufLen = NULL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_PARA_FILE))
- return rtStatus;
-
- switch (eRFPath) {
- case ODM_RF_PATH_A:
- pBuf = pHalData->rf_radio_a;
- pBufLen = &pHalData->rf_radio_a_len;
- break;
- case ODM_RF_PATH_B:
- pBuf = pHalData->rf_radio_b;
- pBufLen = &pHalData->rf_radio_b_len;
- break;
- default:
- DBG_871X("Unknown RF path!! %d\r\n", eRFPath);
- break;
- }
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pBuf = vzalloc(rlen);
- if (pBuf) {
- memcpy(pBuf, pHalData->para_file_buf, rlen);
- *pBufLen = rlen;
-
- switch (eRFPath) {
- case ODM_RF_PATH_A:
- pHalData->rf_radio_a = pBuf;
- break;
- case ODM_RF_PATH_B:
- pHalData->rf_radio_b = pBuf;
- break;
- }
- } else
- DBG_871X("%s(): eRFPath =%d alloc fail !\n", __func__, eRFPath);
- }
- }
- } else {
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
-
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe) /* Deay specific ms. Only RF configuration require delay. */
- msleep(50);
- else if (u4bRegOffset == 0xfd) {
- /* mdelay(5); */
- for (i = 0; i < 100; i++)
- udelay(MAX_STALL_TIME);
- } else if (u4bRegOffset == 0xfc) {
- /* mdelay(1); */
- for (i = 0; i < 20; i++)
- udelay(MAX_STALL_TIME);
- } else if (u4bRegOffset == 0xfb)
- udelay(50);
- else if (u4bRegOffset == 0xfa)
- udelay(5);
- else if (u4bRegOffset == 0xf9)
- udelay(1);
- else if (u4bRegOffset == 0xffff)
- break;
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_SetRFReg(Adapter, eRFPath, u4bRegOffset, bRFRegOffsetMask, u4bRegValue);
-
- /* Temp add, for frequency lock, if no delay, that may cause */
- /* frequency shift, ex: 2412MHz => 2417MHz */
- /* If frequency shift, the following action may works. */
- /* Fractional-N table in radio_a.txt */
- /* 0x2a 0x00001 channel 1 */
- /* 0x2b 0x00808 frequency divider. */
- /* 0x2b 0x53333 */
- /* 0x2c 0x0000c */
- udelay(1);
- }
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static void initDeltaSwingIndexTables(
- struct adapter *Adapter,
- char *Band,
- char *Path,
- char *Sign,
- char *Channel,
- char *Rate,
- char *Data
-)
-{
- #define STR_EQUAL_5G(_band, _path, _sign, _rate, _chnl) \
- ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
- (strcmp(Rate, _rate) == 0) && (strcmp(Channel, _chnl) == 0)\
- )
- #define STR_EQUAL_2G(_band, _path, _sign, _rate) \
- ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
- (strcmp(Rate, _rate) == 0)\
- )
-
- #define STORE_SWING_TABLE(_array, _iteratedIdx) \
- for (token = strsep(&Data, delim); token != NULL; token = strsep(&Data, delim)) {\
- sscanf(token, "%d", &idx);\
- _array[_iteratedIdx++] = (u8)idx;\
- } \
-
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
- PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
- u32 j = 0;
- char *token;
- char delim[] = ",";
- u32 idx = 0;
-
- /* DBG_871X("===>initDeltaSwingIndexTables(): Band: %s;\nPath: %s;\nSign: %s;\nChannel: %s;\nRate: %s;\n, Data: %s;\n", */
- /* Band, Path, Sign, Channel, Rate, Data); */
-
- if (STR_EQUAL_2G("2G", "A", "+", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_P, j);
- } else if (STR_EQUAL_2G("2G", "A", "-", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_N, j);
- } else if (STR_EQUAL_2G("2G", "B", "+", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_P, j);
- } else if (STR_EQUAL_2G("2G", "B", "-", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_N, j);
- } else if (STR_EQUAL_2G("2G", "A", "+", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_P, j);
- } else if (STR_EQUAL_2G("2G", "A", "-", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_N, j);
- } else if (STR_EQUAL_2G("2G", "B", "+", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_P, j);
- } else if (STR_EQUAL_2G("2G", "B", "-", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_N, j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[0], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[0], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[0], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[0], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[1], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[1], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[1], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[1], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[2], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[2], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[2], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[2], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[3], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[3], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[3], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[3], j);
- } else
- DBG_871X("===>initDeltaSwingIndexTables(): The input is invalid!!\n");
-}
-
-int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_TRACK_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->rf_tx_pwr_track_len == 0) && !pHalData->rf_tx_pwr_track) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->rf_tx_pwr_track = vzalloc(rlen);
- if (pHalData->rf_tx_pwr_track) {
- memcpy(pHalData->rf_tx_pwr_track, pHalData->para_file_buf, rlen);
- pHalData->rf_tx_pwr_track_len = rlen;
- } else
- DBG_871X("%s rf_tx_pwr_track alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->rf_tx_pwr_track_len != 0) && (pHalData->rf_tx_pwr_track != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_track, pHalData->rf_tx_pwr_track_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
-
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- char band[5] = "", path[5] = "", sign[5] = "";
- char chnl[5] = "", rate[10] = "";
- char data[300] = ""; /* 100 is too small */
- const int len = strlen(szLine);
- int i;
-
- if (len < 10 || szLine[0] != '[')
- continue;
-
- strncpy(band, szLine+1, 2);
- strncpy(path, szLine+5, 1);
- strncpy(sign, szLine+8, 1);
-
- i = 10; /* szLine+10 */
- if (!ParseQualifiedString(szLine, &i, rate, '[', ']')) {
- /* DBG_871X("Fail to parse rate!\n"); */
- }
- if (!ParseQualifiedString(szLine, &i, chnl, '[', ']')) {
- /* DBG_871X("Fail to parse channel group!\n"); */
- }
- while (i < len && szLine[i] != '{')
- i++;
- if (!ParseQualifiedString(szLine, &i, data, '{', '}')) {
- /* DBG_871X("Fail to parse data!\n"); */
- }
-
- initDeltaSwingIndexTables(Adapter, band, path, sign, chnl, rate, data);
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static int phy_ParsePowerLimitTableFile(struct adapter *Adapter, char *buffer)
-{
- u32 i = 0, forCnt = 0;
- u8 loadingStage = 0, limitValue = 0, fraction = 0;
- char *szLine, *ptmp;
- int rtStatus = _SUCCESS;
- char band[10], bandwidth[10], rateSection[10],
- regulation[TXPWR_LMT_MAX_REGULATION_NUM][10], rfPath[10], colNumBuf[10];
- u8 colNum = 0;
-
- DBG_871X("===>phy_ParsePowerLimitTableFile()\n");
-
- if (Adapter->registrypriv.RegDecryptCustomFile == 1)
- phy_DecryptBBPgParaFile(Adapter, buffer);
-
- ptmp = buffer;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- /* skip comment */
- if (IsCommentString(szLine)) {
- continue;
- }
-
- if (loadingStage == 0) {
- for (forCnt = 0; forCnt < TXPWR_LMT_MAX_REGULATION_NUM; ++forCnt)
- memset((void *) regulation[forCnt], 0, 10);
-
- memset((void *) band, 0, 10);
- memset((void *) bandwidth, 0, 10);
- memset((void *) rateSection, 0, 10);
- memset((void *) rfPath, 0, 10);
- memset((void *) colNumBuf, 0, 10);
-
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- szLine[--i] = ' '; /* return the space in front of the regulation info */
-
- /* Parse the label of the table */
- if (!ParseQualifiedString(szLine, &i, band, ' ', ',')) {
- DBG_871X("Fail to parse band!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, bandwidth, ' ', ',')) {
- DBG_871X("Fail to parse bandwidth!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, rfPath, ' ', ',')) {
- DBG_871X("Fail to parse rf path!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, rateSection, ' ', ',')) {
- DBG_871X("Fail to parse rate!\n");
- return _FAIL;
- }
-
- loadingStage = 1;
- } else if (loadingStage == 1) {
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (!eqNByte((u8 *)(szLine + i), (u8 *)("START"), 5)) {
- DBG_871X("Lost \"## START\" label\n");
- return _FAIL;
- }
-
- loadingStage = 2;
- } else if (loadingStage == 2) {
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (!ParseQualifiedString(szLine, &i, colNumBuf, '#', '#')) {
- DBG_871X("Fail to parse column number!\n");
- return _FAIL;
- }
-
- if (!GetU1ByteIntegerFromStringInDecimal(colNumBuf, &colNum))
- return _FAIL;
-
- if (colNum > TXPWR_LMT_MAX_REGULATION_NUM) {
- DBG_871X(
- "invalid col number %d (greater than max %d)\n",
- colNum, TXPWR_LMT_MAX_REGULATION_NUM
- );
- return _FAIL;
- }
-
- for (forCnt = 0; forCnt < colNum; ++forCnt) {
- u8 regulation_name_cnt = 0;
-
- /* skip the space */
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- while (szLine[i] != ' ' && szLine[i] != '\t' && szLine[i] != '\0')
- regulation[forCnt][regulation_name_cnt++] = szLine[i++];
- /* DBG_871X("regulation %s!\n", regulation[forCnt]); */
-
- if (regulation_name_cnt == 0) {
- DBG_871X("invalid number of regulation!\n");
- return _FAIL;
- }
- }
-
- loadingStage = 3;
- } else if (loadingStage == 3) {
- char channel[10] = {0}, powerLimit[10] = {0};
- u8 cnt = 0;
-
- /* the table ends */
- if (szLine[0] == '#' && szLine[1] == '#') {
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (eqNByte((u8 *)(szLine + i), (u8 *)("END"), 3)) {
- loadingStage = 0;
- continue;
- } else {
- DBG_871X("Wrong format\n");
- DBG_871X("<===== phy_ParsePowerLimitTableFile()\n");
- return _FAIL;
- }
- }
-
- if ((szLine[0] != 'c' && szLine[0] != 'C') ||
- (szLine[1] != 'h' && szLine[1] != 'H')) {
- DBG_871X("Meet wrong channel => power limt pair\n");
- continue;
- }
- i = 2;/* move to the location behind 'h' */
-
- /* load the channel number */
- cnt = 0;
- while (szLine[i] >= '0' && szLine[i] <= '9') {
- channel[cnt] = szLine[i];
- ++cnt;
- ++i;
- }
- /* DBG_871X("chnl %s!\n", channel); */
-
- for (forCnt = 0; forCnt < colNum; ++forCnt) {
- /* skip the space between channel number and the power limit value */
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- /* load the power limit value */
- cnt = 0;
- fraction = 0;
- memset((void *) powerLimit, 0, 10);
- while ((szLine[i] >= '0' && szLine[i] <= '9') || szLine[i] == '.') {
- if (szLine[i] == '.') {
- if ((szLine[i+1] >= '0' && szLine[i+1] <= '9')) {
- fraction = szLine[i+1];
- i += 2;
- } else {
- DBG_871X("Wrong fraction in TXPWR_LMT.txt\n");
- return _FAIL;
- }
-
- break;
- }
-
- powerLimit[cnt] = szLine[i];
- ++cnt;
- ++i;
- }
-
- if (powerLimit[0] == '\0') {
- powerLimit[0] = '6';
- powerLimit[1] = '3';
- i += 2;
- } else {
- if (!GetU1ByteIntegerFromStringInDecimal(powerLimit, &limitValue))
- return _FAIL;
-
- limitValue *= 2;
- cnt = 0;
- if (fraction == '5')
- ++limitValue;
-
- /* the value is greater or equal to 100 */
- if (limitValue >= 100) {
- powerLimit[cnt++] = limitValue/100 + '0';
- limitValue %= 100;
-
- if (limitValue >= 10) {
- powerLimit[cnt++] = limitValue/10 + '0';
- limitValue %= 10;
- } else
- powerLimit[cnt++] = '0';
-
- powerLimit[cnt++] = limitValue + '0';
- } else if (limitValue >= 10) { /* the value is greater or equal to 10 */
- powerLimit[cnt++] = limitValue/10 + '0';
- limitValue %= 10;
- powerLimit[cnt++] = limitValue + '0';
- }
- /* the value is less than 10 */
- else
- powerLimit[cnt++] = limitValue + '0';
-
- powerLimit[cnt] = '\0';
- }
-
- /* DBG_871X("ch%s => %s\n", channel, powerLimit); */
-
- /* store the power limit value */
- PHY_SetTxPowerLimit(Adapter, (u8 *)regulation[forCnt], (u8 *)band,
- (u8 *)bandwidth, (u8 *)rateSection, (u8 *)rfPath, (u8 *)channel, (u8 *)powerLimit);
-
- }
- } else {
- DBG_871X("Abnormal loading stage in phy_ParsePowerLimitTableFile()!\n");
- rtStatus = _FAIL;
- break;
- }
- }
-
- DBG_871X("<===phy_ParsePowerLimitTableFile()\n");
- return rtStatus;
-}
-
-int PHY_ConfigRFWithPowerLimitTableParaFile(
- struct adapter *Adapter, char *pFileName
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_LMT_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->rf_tx_pwr_lmt_len == 0) && !pHalData->rf_tx_pwr_lmt) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->rf_tx_pwr_lmt = vzalloc(rlen);
- if (pHalData->rf_tx_pwr_lmt) {
- memcpy(pHalData->rf_tx_pwr_lmt, pHalData->para_file_buf, rlen);
- pHalData->rf_tx_pwr_lmt_len = rlen;
- } else
- DBG_871X("%s rf_tx_pwr_lmt alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->rf_tx_pwr_lmt_len != 0) && (pHalData->rf_tx_pwr_lmt != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_lmt, pHalData->rf_tx_pwr_lmt_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s ok\n", __func__, pFileName); */
- rtStatus = phy_ParsePowerLimitTableFile(Adapter, pHalData->para_file_buf);
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-void phy_free_filebuf(struct adapter *padapter)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->mac_reg)
- vfree(pHalData->mac_reg);
- if (pHalData->bb_phy_reg)
- vfree(pHalData->bb_phy_reg);
- if (pHalData->bb_agc_tab)
- vfree(pHalData->bb_agc_tab);
- if (pHalData->bb_phy_reg_pg)
- vfree(pHalData->bb_phy_reg_pg);
- if (pHalData->bb_phy_reg_mp)
- vfree(pHalData->bb_phy_reg_mp);
- if (pHalData->rf_radio_a)
- vfree(pHalData->rf_radio_a);
- if (pHalData->rf_radio_b)
- vfree(pHalData->rf_radio_b);
- if (pHalData->rf_tx_pwr_track)
- vfree(pHalData->rf_tx_pwr_track);
- if (pHalData->rf_tx_pwr_lmt)
- vfree(pHalData->rf_tx_pwr_lmt);
-
-}
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 70d98c58ca97..40fe43c62c45 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -1074,7 +1074,6 @@ void odm_FAThresholdCheck(
dm_FA_thres[1] = 4000;
dm_FA_thres[2] = 5000;
}
- return;
}
u8 odm_ForbiddenIGICheck(void *pDM_VOID, u8 DIG_Dynamic_MIN, u8 CurrentIGI)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 7760fd0eb6c9..71b5a50b6ef6 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -2071,8 +2071,6 @@ static void ConstructBtNullFunctionData(
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 pktlen;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
u8 bssid[ETH_ALEN];
@@ -2080,8 +2078,6 @@ static void ConstructBtNullFunctionData(
FUNC_ADPT_ARG(padapter), bQoS, bEosp, bForcePowerSave);
pwlanhdr = (struct ieee80211_hdr *)pframe;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
if (!StaAddr) {
memcpy(bssid, myid(&padapter->eeprompriv), ETH_ALEN);
@@ -2122,12 +2118,9 @@ static void ConstructBtNullFunctionData(
static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
struct xmit_frame *pcmdframe;
struct pkt_attrib *pattrib;
struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
u32 BeaconLength = 0;
u32 BTQosNullLength = 0;
u8 *ReservedPagePacket;
@@ -2140,10 +2133,7 @@ static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
/* DBG_8192C("+" FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter)); */
- pHalData = GET_HAL_DATA(padapter);
pxmitpriv = &padapter->xmitpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
TxDescLen = TXDESC_SIZE;
TxDescOffset = TXDESC_OFFSET;
PageSize = PAGE_SIZE_TX_8723B;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index c514cb735afd..650fbedd34e8 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -188,7 +188,8 @@ void rtl8723b_HalDmWatchDog(struct adapter *Adapter)
bBtDisabled = hal_btcoex_IsBtDisabled(Adapter);
- ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_BT_ENABLED, ((bBtDisabled == true)?false:true));
+ ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_BT_ENABLED,
+ !bBtDisabled);
ODM_DMWatchdog(&pHalData->odmpriv);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index faeaf24fa833..66127f6c8e4d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -2234,12 +2234,8 @@ void rtl8723b_set_hal_ops(struct hal_ops *pHalFunc)
void rtl8723b_InitAntenna_Selection(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
u8 val;
-
- pHalData = GET_HAL_DATA(padapter);
-
val = rtw_read8(padapter, REG_LEDCFG2);
/* Let 8051 take control antenna settting */
val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
@@ -3053,7 +3049,6 @@ static void rtl8723b_fill_default_txdesc(
{
struct adapter *padapter;
struct hal_com_data *pHalData;
- struct dm_priv *pdmpriv;
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
struct pkt_attrib *pattrib;
@@ -3064,7 +3059,6 @@ static void rtl8723b_fill_default_txdesc(
padapter = pxmitframe->padapter;
pHalData = GET_HAL_DATA(padapter);
- pdmpriv = &pHalData->dmpriv;
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -3773,7 +3767,6 @@ void C2HPacketHandler_8723B(struct adapter *padapter, u8 *pbuffer, u16 length)
process_c2h_event(padapter, &C2hEvent, tmpBuf);
/* c2h_handler_8723b(padapter,&C2hEvent); */
- return;
}
void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
@@ -4157,9 +4150,8 @@ void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
break;
}
- /* The value of ((usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B) */
- /* is getting the upper integer. */
- usNavUpper = (usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B;
+ usNavUpper = DIV_ROUND_UP(usNavUpper,
+ HAL_NAV_UPPER_UNIT_8723B);
rtw_write8(padapter, REG_NAV_UPPER, (u8)usNavUpper);
}
break;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 6df2b58bdc67..cf23414d7224 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -362,24 +362,10 @@ void PHY_SetRFReg_8723B(
*/
s32 PHY_MACConfig8723B(struct adapter *Adapter)
{
- int rtStatus = _SUCCESS;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- s8 *pszMACRegFile;
- s8 sz8723MACRegFile[] = RTL8723B_PHY_MACREG;
-
-
- pszMACRegFile = sz8723MACRegFile;
-
- /* */
- /* Config MAC */
- /* */
- rtStatus = phy_ConfigMACWithParaFile(Adapter, pszMACRegFile);
- if (rtStatus == _FAIL) {
- ODM_ReadAndConfig_MP_8723B_MAC_REG(&pHalData->odmpriv);
- rtStatus = _SUCCESS;
- }
- return rtStatus;
+ ODM_ReadAndConfig_MP_8723B_MAC_REG(&pHalData->odmpriv);
+ return _SUCCESS;
}
/**
@@ -427,17 +413,6 @@ static void phy_InitBBRFRegisterDefinition(struct adapter *Adapter)
static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rtStatus = _SUCCESS;
- u8 sz8723BBRegFile[] = RTL8723B_PHY_REG;
- u8 sz8723AGCTableFile[] = RTL8723B_AGC_TAB;
- u8 sz8723BBBRegPgFile[] = RTL8723B_PHY_REG_PG;
- u8 sz8723BRFTxPwrLmtFile[] = RTL8723B_TXPWR_LMT;
- u8 *pszBBRegFile = NULL, *pszAGCTableFile = NULL, *pszBBRegPgFile = NULL, *pszRFTxPwrLmtFile = NULL;
-
- pszBBRegFile = sz8723BBRegFile;
- pszAGCTableFile = sz8723AGCTableFile;
- pszBBRegPgFile = sz8723BBBRegPgFile;
- pszRFTxPwrLmtFile = sz8723BRFTxPwrLmtFile;
/* Read Tx Power Limit File */
PHY_InitTxPowerLimit(Adapter);
@@ -445,30 +420,14 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
Adapter->registrypriv.RegEnableTxPowerLimit == 1 ||
(Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
) {
- if (PHY_ConfigRFWithPowerLimitTableParaFile(Adapter, pszRFTxPwrLmtFile) == _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_TXPWR_LMT, (ODM_RF_RADIO_PATH_E)0))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_871X("%s():Read Tx power limit fail\n", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
+ ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_RF_TXPWR_LMT, 0);
}
/* */
/* 1. Read PHY_REG.TXT BB INIT!! */
/* */
- if (phy_ConfigBBWithParaFile(Adapter, pszBBRegFile, CONFIG_BB_PHY_REG) ==
- _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():Write BB Reg Fail!!", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG);
/* If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
PHY_InitTxPowerByRate(Adapter);
@@ -476,11 +435,8 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
Adapter->registrypriv.RegEnableTxPowerByRate == 1 ||
(Adapter->registrypriv.RegEnableTxPowerByRate == 2 && pHalData->EEPROMRegulatory != 2)
) {
- if (phy_ConfigBBWithPgParaFile(Adapter, pszBBRegPgFile) ==
- _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG_PG))
- rtStatus = _FAIL;
- }
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_BB_PHY_REG_PG);
if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE)
PHY_TxPowerByRateConfiguration(Adapter);
@@ -490,29 +446,14 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
(Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
)
PHY_ConvertTxPowerLimitToPowerIndex(Adapter);
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():BB_PG Reg Fail!!\n", __func__);
- }
}
/* */
/* 2. Read BB AGC table Initialization */
/* */
- if (phy_ConfigBBWithParaFile(Adapter, pszAGCTableFile,
- CONFIG_BB_AGC_TAB) == _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():AGC Table Fail\n", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
-
-phy_BB8190_Config_ParaFile_Fail:
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB);
- return rtStatus;
+ return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
index d0ffe0af5339..aafceaf9b139 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
@@ -85,19 +85,8 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
u32 u4RegValue = 0;
u8 eRFPath;
struct bb_register_def *pPhyReg;
-
- int rtStatus = _SUCCESS;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- static char sz8723RadioAFile[] = RTL8723B_PHY_RADIO_A;
- static char sz8723RadioBFile[] = RTL8723B_PHY_RADIO_B;
- static s8 sz8723BTxPwrTrackFile[] = RTL8723B_TXPWR_TRACK;
- char *pszRadioAFile, *pszRadioBFile, *pszTxPwrTrackFile;
-
- pszRadioAFile = sz8723RadioAFile;
- pszRadioBFile = sz8723RadioBFile;
- pszTxPwrTrackFile = sz8723BTxPwrTrackFile;
-
/* 3----------------------------------------------------------------- */
/* 3 <2> Initialize RF */
/* 3----------------------------------------------------------------- */
@@ -136,21 +125,11 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
/*----Initialize RF fom connfiguration file----*/
switch (eRFPath) {
case RF_PATH_A:
- if (PHY_ConfigRFWithParaFile(Adapter, pszRadioAFile,
- eRFPath) == _FAIL) {
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
- rtStatus = _FAIL;
- }
- break;
case RF_PATH_B:
- if (PHY_ConfigRFWithParaFile(Adapter, pszRadioBFile,
- eRFPath) == _FAIL) {
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
- rtStatus = _FAIL;
- }
+ ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_RF_RADIO, eRFPath);
break;
case RF_PATH_C:
- break;
case RF_PATH_D:
break;
}
@@ -166,28 +145,16 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue);
break;
}
-
- if (rtStatus != _SUCCESS) {
- /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */
- goto phy_RF6052_Config_ParaFile_Fail;
- }
-
}
/* 3 ----------------------------------------------------------------- */
/* 3 Configuration of Tx Power Tracking */
/* 3 ----------------------------------------------------------------- */
- if (PHY_ConfigRFWithTxPwrTrackParaFile(Adapter, pszTxPwrTrackFile) ==
- _FAIL) {
- ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
- }
+ ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
/* RT_TRACE(COMP_INIT, DBG_LOUD, ("<---phy_RF6052_Config_ParaFile()\n")); */
- return rtStatus;
-
-phy_RF6052_Config_ParaFile_Fail:
- return rtStatus;
+ return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 0f3301091258..1e8b61443408 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -179,8 +179,6 @@ static void rtl8723bs_c2h_packet_handler(struct adapter *padapter,
kfree(tmp);
/* DBG_871X("-%s res(%d)\n", __func__, res); */
-
- return;
}
static inline union recv_frame *try_alloc_recvframe(struct recv_priv *precvpriv,
@@ -232,7 +230,7 @@ static inline bool pkt_exceeds_tail(struct recv_priv *precvpriv,
return false;
}
-static void rtl8723bs_recv_tasklet(void *priv)
+static void rtl8723bs_recv_tasklet(unsigned long priv)
{
struct adapter *padapter;
struct hal_com_data *p_hal_data;
@@ -246,7 +244,7 @@ static void rtl8723bs_recv_tasklet(void *priv)
_pkt *pkt_copy = NULL;
u8 shift_sz = 0, rx_report_sz = 0;
- padapter = priv;
+ padapter = (struct adapter *)priv;
p_hal_data = GET_HAL_DATA(padapter);
precvpriv = &padapter->recvpriv;
recv_buf_queue = &precvpriv->recv_buf_pending_queue;
@@ -464,11 +462,8 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
goto initbuferror;
/* 3 2. init tasklet */
- tasklet_init(
- &precvpriv->recv_tasklet,
- (void(*)(unsigned long))rtl8723bs_recv_tasklet,
- (unsigned long)padapter
- );
+ tasklet_init(&precvpriv->recv_tasklet, rtl8723bs_recv_tasklet,
+ (unsigned long)padapter);
goto exit;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 0f5dd4629e6f..e813382e78a6 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -570,14 +570,11 @@ static void HalRxAggr8723BSdio(struct adapter *padapter)
static void sdio_AggSettingRxUpdate(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
u8 valueDMA;
u8 valueRxAggCtrl = 0;
u8 aggBurstNum = 3; /* 0:1, 1:2, 2:3, 3:4 */
u8 aggBurstSize = 0; /* 0:1K, 1:512Byte, 2:256Byte... */
- pHalData = GET_HAL_DATA(padapter);
-
valueDMA = rtw_read8(padapter, REG_TRXDMA_CTRL);
valueDMA |= RXDMA_AGG_EN;
rtw_write8(padapter, REG_TRXDMA_CTRL, valueDMA);
@@ -713,13 +710,11 @@ static u32 rtl8723bs_hal_init(struct adapter *padapter)
s32 ret;
struct hal_com_data *pHalData;
struct pwrctrl_priv *pwrctrlpriv;
- struct registry_priv *pregistrypriv;
u32 NavUpper = WiFiNavUpperUs;
u8 u1bTmp;
pHalData = GET_HAL_DATA(padapter);
pwrctrlpriv = adapter_to_pwrctl(padapter);
- pregistrypriv = &padapter->registrypriv;
if (
adapter_to_pwrctl(padapter)->bips_processing == true &&
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 301d327d0624..b6b4adb5a28a 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -15,7 +15,7 @@
/* */
/* Description: */
-/* The following mapping is for SDIO host local register space. */
+/* The following mapping is for SDIO host local register space. */
/* */
/* Creadted by Roger, 2011.01.31. */
/* */
@@ -61,7 +61,6 @@ static u8 get_deviceid(u32 addr)
u8 devide_id;
u16 pseudo_id;
-
pseudo_id = (u16)(addr >> 16);
switch (pseudo_id) {
case 0x1025:
@@ -72,10 +71,6 @@ static u8 get_deviceid(u32 addr)
devide_id = WLAN_IOREG_DEVICE_ID;
break;
-/* case 0x1027: */
-/* devide_id = SDIO_FIRMWARE_FIFO; */
-/* break; */
-
case 0x1031:
devide_id = WLAN_TX_HIQ_DEVICE_ID;
break;
@@ -93,7 +88,6 @@ static u8 get_deviceid(u32 addr)
break;
default:
-/* devide_id = (u8)((addr >> 13) & 0xF); */
devide_id = WLAN_IOREG_DEVICE_ID;
break;
}
@@ -111,7 +105,6 @@ static u32 _cvrt2ftaddr(const u32 addr, u8 *pdevice_id, u16 *poffset)
u16 offset;
u32 ftaddr;
-
device_id = get_deviceid(addr);
offset = 0;
@@ -427,20 +420,16 @@ static u32 sdio_read_port(
struct adapter *adapter;
struct sdio_data *psdio;
struct hal_com_data *hal;
- u32 oldcnt;
s32 err;
-
adapter = intfhdl->padapter;
psdio = &adapter_to_dvobj(adapter)->intf_data;
hal = GET_HAL_DATA(adapter);
HalSdioGetCmdAddr8723BSdio(adapter, addr, hal->SdioRxFIFOCnt++, &addr);
- oldcnt = cnt;
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
-/* cnt = sdio_align_size(cnt); */
err = _sd_read(intfhdl, addr, cnt, mem);
@@ -490,7 +479,6 @@ static u32 sdio_write_port(
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
-/* cnt = sdio_align_size(cnt); */
err = sd_write(intfhdl, addr, cnt, xmitbuf->pdata);
@@ -538,7 +526,6 @@ static s32 _sdio_local_read(
u8 *tmpbuf;
u32 n;
-
intfhdl = &adapter->iopriv.intf;
HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
@@ -711,7 +698,6 @@ static s32 ReadInterrupt8723BSdio(struct adapter *adapter, u32 *phisr)
u32 hisr, himr;
u8 val8, hisr_len;
-
if (!phisr)
return false;
@@ -737,73 +723,48 @@ static s32 ReadInterrupt8723BSdio(struct adapter *adapter, u32 *phisr)
}
/* */
-/* Description: */
-/* Initialize SDIO Host Interrupt Mask configuration variables for future use. */
+/* Description: */
+/* Initialize SDIO Host Interrupt Mask configuration variables for future use. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void InitInterrupt8723BSdio(struct adapter *adapter)
{
struct hal_com_data *haldata;
-
haldata = GET_HAL_DATA(adapter);
- haldata->sdio_himr = (u32)( \
- SDIO_HIMR_RX_REQUEST_MSK |
- SDIO_HIMR_AVAL_MSK |
-/* SDIO_HIMR_TXERR_MSK | */
-/* SDIO_HIMR_RXERR_MSK | */
-/* SDIO_HIMR_TXFOVW_MSK | */
-/* SDIO_HIMR_RXFOVW_MSK | */
-/* SDIO_HIMR_TXBCNOK_MSK | */
-/* SDIO_HIMR_TXBCNERR_MSK | */
-/* SDIO_HIMR_BCNERLY_INT_MSK | */
-/* SDIO_HIMR_C2HCMD_MSK | */
-/* SDIO_HIMR_HSISR_IND_MSK | */
-/* SDIO_HIMR_GTINT3_IND_MSK | */
-/* SDIO_HIMR_GTINT4_IND_MSK | */
-/* SDIO_HIMR_PSTIMEOUT_MSK | */
-/* SDIO_HIMR_OCPINT_MSK | */
-/* SDIO_HIMR_ATIMEND_MSK | */
-/* SDIO_HIMR_ATIMEND_E_MSK | */
-/* SDIO_HIMR_CTWEND_MSK | */
- 0);
+ haldata->sdio_himr = (u32)(SDIO_HIMR_RX_REQUEST_MSK |
+ SDIO_HIMR_AVAL_MSK |
+ 0);
}
/* */
-/* Description: */
-/* Initialize System Host Interrupt Mask configuration variables for future use. */
+/* Description: */
+/* Initialize System Host Interrupt Mask configuration variables for future use. */
/* */
-/* Created by Roger, 2011.08.03. */
+/* Created by Roger, 2011.08.03. */
/* */
void InitSysInterrupt8723BSdio(struct adapter *adapter)
{
struct hal_com_data *haldata;
-
haldata = GET_HAL_DATA(adapter);
- haldata->SysIntrMask = ( \
-/* HSIMR_GPIO12_0_INT_EN | */
-/* HSIMR_SPS_OCP_INT_EN | */
-/* HSIMR_RON_INT_EN | */
-/* HSIMR_PDNINT_EN | */
-/* HSIMR_GPIO9_INT_EN | */
- 0);
+ haldata->SysIntrMask = (0);
}
/* */
-/* Description: */
-/* Enalbe SDIO Host Interrupt Mask configuration on SDIO local domain. */
+/* Description: */
+/* Enalbe SDIO Host Interrupt Mask configuration on SDIO local domain. */
/* */
-/* Assumption: */
-/* 1. Using SDIO Local register ONLY for configuration. */
-/* 2. PASSIVE LEVEL */
+/* Assumption: */
+/* 1. Using SDIO Local register ONLY for configuration. */
+/* 2. PASSIVE LEVEL */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void EnableInterrupt8723BSdio(struct adapter *adapter)
{
@@ -849,13 +810,13 @@ void EnableInterrupt8723BSdio(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Disable SDIO Host IMR configuration to mask unnecessary interrupt service. */
+/* Description: */
+/* Disable SDIO Host IMR configuration to mask unnecessary interrupt service. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void DisableInterrupt8723BSdio(struct adapter *adapter)
{
@@ -866,13 +827,13 @@ void DisableInterrupt8723BSdio(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Using 0x100 to check the power status of FW. */
+/* Description: */
+/* Using 0x100 to check the power status of FW. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Isaac, 2013.09.10. */
+/* Created by Isaac, 2013.09.10. */
/* */
u8 CheckIPSStatus(struct adapter *adapter)
{
@@ -896,7 +857,6 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
struct recv_priv *recv_priv;
struct recv_buf *recvbuf;
-
/* Patch for some SDIO Host 4 bytes issue */
/* ex. RK3188 */
readsize = RND4(size);
@@ -938,7 +898,6 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
return NULL;
}
-
/* 3 4. init recvbuf */
recvbuf->len = size;
recvbuf->phead = recvbuf->pskb->head;
@@ -972,7 +931,6 @@ void sd_int_dpc(struct adapter *adapter)
struct intf_hdl *intfhdl = &adapter->iopriv.intf;
struct pwrctrl_priv *pwrctl;
-
hal = GET_HAL_DATA(adapter);
dvobj = adapter_to_dvobj(adapter);
pwrctl = dvobj_to_pwrctl(dvobj);
@@ -992,7 +950,6 @@ void sd_int_dpc(struct adapter *adapter)
report.state = SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HCPWM1_8723B);
- /* cpwm_int_hdl(adapter, &report); */
_set_workitem(&(pwrctl->cpwm_event));
}
@@ -1029,7 +986,7 @@ void sd_int_dpc(struct adapter *adapter)
if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
/* Handle CCX report here */
rtw_hal_c2h_handler(adapter, (u8 *)c2h_evt);
- kfree((u8 *)c2h_evt);
+ kfree(c2h_evt);
} else {
rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
}
@@ -1049,13 +1006,11 @@ void sd_int_dpc(struct adapter *adapter)
if (hal->sdio_hisr & SDIO_HISR_RXERR)
DBG_8192C("%s: Rx Error\n", __func__);
-
if (hal->sdio_hisr & SDIO_HISR_RX_REQUEST) {
struct recv_buf *recvbuf;
int alloc_fail_time = 0;
u32 hisr;
-/* DBG_8192C("%s: RX Request, size =%d\n", __func__, hal->SdioRxFIFOSize); */
hal->sdio_hisr ^= SDIO_HISR_RX_REQUEST;
do {
hal->SdioRxFIFOSize = SdioLocalCmd52Read2Byte(adapter, SDIO_REG_RX0_REQ_LEN);
@@ -1090,7 +1045,6 @@ void sd_int_hdl(struct adapter *adapter)
{
struct hal_com_data *hal;
-
if (
(adapter->bDriverStopped) || (adapter->bSurpriseRemoved)
)
@@ -1120,27 +1074,24 @@ void sd_int_hdl(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Query SDIO Local register to query current the number of Free TxPacketBuffer page. */
+/* Description: */
+/* Query SDIO Local register to query current the number of Free TxPacketBuffer page. */
/* */
-/* Assumption: */
-/* 1. Running at PASSIVE_LEVEL */
-/* 2. RT_TX_SPINLOCK is NOT acquired. */
+/* Assumption: */
+/* 1. Running at PASSIVE_LEVEL */
+/* 2. RT_TX_SPINLOCK is NOT acquired. */
/* */
-/* Created by Roger, 2011.01.28. */
+/* Created by Roger, 2011.01.28. */
/* */
u8 HalQueryTxBufferStatus8723BSdio(struct adapter *adapter)
{
struct hal_com_data *hal;
u32 numof_free_page;
- /* _irql irql; */
-
hal = GET_HAL_DATA(adapter);
numof_free_page = SdioLocalCmd53Read4Byte(adapter, SDIO_REG_FREE_TXPG);
- /* spin_lock_bh(&phal->SdioTxFIFOFreePageLock); */
memcpy(hal->SdioTxFIFOFreePage, &numof_free_page, 4);
RT_TRACE(_module_hci_ops_c_, _drv_notice_,
("%s: Free page for HIQ(%#x), MIDQ(%#x), LOWQ(%#x), PUBQ(%#x)\n",
@@ -1149,14 +1100,13 @@ u8 HalQueryTxBufferStatus8723BSdio(struct adapter *adapter)
hal->SdioTxFIFOFreePage[MID_QUEUE_IDX],
hal->SdioTxFIFOFreePage[LOW_QUEUE_IDX],
hal->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX]));
- /* spin_unlock_bh(&hal->SdioTxFIFOFreePageLock); */
return true;
}
/* */
-/* Description: */
-/* Query SDIO Local register to get the current number of TX OQT Free Space. */
+/* Description: */
+/* Query SDIO Local register to get the current number of TX OQT Free Space. */
/* */
void HalQueryTxOQTBufferStatus8723BSdio(struct adapter *adapter)
{
@@ -1190,7 +1140,6 @@ u8 RecvOnePkt(struct adapter *adapter, u32 size)
recvbuf = sd_recv_rxfifo(adapter, size);
if (recvbuf) {
- /* printk("Completed Recv One Pkt.\n"); */
sd_rxhandler(adapter, recvbuf);
res = true;
} else {
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 8d7fce1e39b7..6ec9087f2eb1 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -197,9 +197,6 @@ struct registry_priv
u8 RFE_Type;
u8 check_fw_ps;
- u8 load_phy_file;
- u8 RegDecryptCustomFile;
-
#ifdef CONFIG_MULTI_VIR_IFACES
u8 ext_iface_num;/* primary/secondary iface is excluded */
#endif
@@ -693,7 +690,6 @@ void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
void indicate_wx_scan_complete_event(struct adapter *padapter);
int rtw_change_ifname(struct adapter *padapter, const char *ifname);
-extern char *rtw_phy_file_path;
extern char *rtw_initmac;
extern int rtw_mc2u_disable;
extern int rtw_ht_enable;
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index 9167f1e7827f..e9a3006a3e20 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -219,30 +219,4 @@ struct adapter * Adapter,
u16 ChannelPlan
);
-#define MAX_PARA_FILE_BUF_LEN 25600
-
-#define LOAD_MAC_PARA_FILE BIT0
-#define LOAD_BB_PARA_FILE BIT1
-#define LOAD_BB_PG_PARA_FILE BIT2
-#define LOAD_BB_MP_PARA_FILE BIT3
-#define LOAD_RF_PARA_FILE BIT4
-#define LOAD_RF_TXPWR_TRACK_PARA_FILE BIT5
-#define LOAD_RF_TXPWR_LMT_PARA_FILE BIT6
-
-int phy_ConfigMACWithParaFile(struct adapter *Adapter, char*pFileName);
-
-int phy_ConfigBBWithParaFile(struct adapter *Adapter, char*pFileName, u32 ConfigType);
-
-int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char*pFileName);
-
-int phy_ConfigBBWithMpParaFile(struct adapter *Adapter, char*pFileName);
-
-int PHY_ConfigRFWithParaFile(struct adapter *Adapter, char*pFileName, u8 eRFPath);
-
-int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char*pFileName);
-
-int PHY_ConfigRFWithPowerLimitTableParaFile(struct adapter *Adapter, char*pFileName);
-
-void phy_free_filebuf(struct adapter *padapter);
-
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
index 7d782659a84f..e5e667df6154 100644
--- a/drivers/staging/rtl8723bs/include/hal_data.h
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -440,27 +440,6 @@ struct hal_com_data {
u32 SysIntrStatus;
u32 SysIntrMask;
-
- char para_file_buf[MAX_PARA_FILE_BUF_LEN];
- char *mac_reg;
- u32 mac_reg_len;
- char *bb_phy_reg;
- u32 bb_phy_reg_len;
- char *bb_agc_tab;
- u32 bb_agc_tab_len;
- char *bb_phy_reg_pg;
- u32 bb_phy_reg_pg_len;
- char *bb_phy_reg_mp;
- u32 bb_phy_reg_mp_len;
- char *rf_radio_a;
- u32 rf_radio_a_len;
- char *rf_radio_b;
- u32 rf_radio_b_len;
- char *rf_tx_pwr_track;
- u32 rf_tx_pwr_track_len;
- char *rf_tx_pwr_lmt;
- u32 rf_tx_pwr_lmt_len;
-
#ifdef CONFIG_BACKGROUND_NOISE_MONITOR
s16 noise[ODM_MAX_CHANNEL_NUM];
#endif
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index 81a9c19ecc6a..a40cf7b60a69 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -171,10 +171,6 @@ extern void rtw_softap_lock_suspend(void);
extern void rtw_softap_unlock_suspend(void);
#endif
-/* File operation APIs, just for linux now */
-extern int rtw_is_file_readable(char *path);
-extern int rtw_retrive_from_file(char *path, u8 *buf, u32 sz);
-
extern void rtw_free_netdev(struct net_device * netdev);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index c582ede1ac12..a2d9de866c4b 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -127,13 +127,6 @@ static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
netif_tx_stop_all_queues(pnetdev);
}
-static inline void rtw_merge_string(char *dst, int dst_len, char *src1, char *src2)
-{
- int len = 0;
- len += snprintf(dst+len, dst_len - len, "%s", src1);
- len += snprintf(dst+len, dst_len - len, "%s", src2);
-}
-
#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
#define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index 8f00ced1c697..f36516fa84c7 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -21,21 +21,6 @@
#include "hal_phy_cfg.h"
/* */
-/* RTL8723B From file */
-/* */
-#define RTL8723B_FW_IMG "rtl8723b/FW_NIC.bin"
-#define RTL8723B_FW_WW_IMG "rtl8723b/FW_WoWLAN.bin"
-#define RTL8723B_PHY_REG "rtl8723b/PHY_REG.txt"
-#define RTL8723B_PHY_RADIO_A "rtl8723b/RadioA.txt"
-#define RTL8723B_PHY_RADIO_B "rtl8723b/RadioB.txt"
-#define RTL8723B_TXPWR_TRACK "rtl8723b/TxPowerTrack.txt"
-#define RTL8723B_AGC_TAB "rtl8723b/AGC_TAB.txt"
-#define RTL8723B_PHY_MACREG "rtl8723b/MAC_REG.txt"
-#define RTL8723B_PHY_REG_PG "rtl8723b/PHY_REG_PG.txt"
-#define RTL8723B_PHY_REG_MP "rtl8723b/PHY_REG_MP.txt"
-#define RTL8723B_TXPWR_LMT "rtl8723b/TXPWR_LMT.txt"
-
-/* */
/* RTL8723B From header */
/* */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index fd3cf955c9f8..73e8ec09b6e1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -576,7 +576,6 @@ void read_cam(struct adapter *padapter , u8 entry, u8 *get_key);
/* modify HW only */
void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
void _clear_cam_entry(struct adapter *padapter, u8 entry);
-void write_cam_from_cache(struct adapter *adapter, u8 id);
/* modify both HW and cache */
void write_cam(struct adapter *padapter, u8 id, u16 ctrl, u8 *mac, u8 *key);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index f819abb756dc..322cabb97b99 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -165,7 +165,7 @@ static void rtw_spt_band_free(struct ieee80211_supported_band *spt_band)
+ sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
+ sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM;
}
- kfree((u8 *)spt_band);
+ kfree(spt_band);
}
static const struct ieee80211_txrx_stypes
@@ -240,10 +240,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
u16 channel;
u32 freq;
u64 notify_timestamp;
- u16 notify_capability;
- u16 notify_interval;
- u8 *notify_ie;
- size_t notify_ielen;
s32 notify_signal;
u8 *buf = NULL, *pbuf;
size_t len, bssinf_len = 0;
@@ -324,12 +320,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
notify_timestamp = ktime_to_us(ktime_get_boottime());
- notify_interval = le16_to_cpu(*(__le16 *)rtw_get_beacon_interval_from_ie(pnetwork->network.IEs));
- notify_capability = le16_to_cpu(*(__le16 *)rtw_get_capability_from_ie(pnetwork->network.IEs));
-
- notify_ie = pnetwork->network.IEs+_FIXED_IE_LENGTH_;
- notify_ielen = pnetwork->network.IELength-_FIXED_IE_LENGTH_;
-
/* We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm) */
if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
@@ -1156,7 +1146,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
addkey_end:
- kfree((u8 *)param);
+ kfree(param);
return ret;
@@ -1305,7 +1295,6 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
struct wireless_dev *rtw_wdev = padapter->rtw_wdev;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
int ret = 0;
- u8 change = false;
DBG_871X(FUNC_NDEV_FMT" type =%d\n", FUNC_NDEV_ARG(ndev), type);
@@ -1336,7 +1325,6 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
if (old_type != type)
{
- change = true;
pmlmeext->action_public_rxseq = 0xffff;
pmlmeext->action_public_dialog_token = 0xff;
}
@@ -1410,19 +1398,19 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
struct wireless_dev *pwdev = padapter->rtw_wdev;
struct wiphy *wiphy = pwdev->wiphy;
struct cfg80211_bss *bss = NULL;
- struct wlan_bssid_ex select_network = pnetwork->network;
+ struct wlan_bssid_ex *select_network = &pnetwork->network;
bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
- select_network.MacAddress, select_network.Ssid.Ssid,
- select_network.Ssid.SsidLength, 0/*WLAN_CAPABILITY_ESS*/,
+ select_network->MacAddress, select_network->Ssid.Ssid,
+ select_network->Ssid.SsidLength, 0/*WLAN_CAPABILITY_ESS*/,
0/*WLAN_CAPABILITY_ESS*/);
if (bss) {
cfg80211_unlink_bss(wiphy, bss);
- DBG_8192C("%s(): cfg80211_unlink %s!! () ", __func__, select_network.Ssid.Ssid);
+ DBG_8192C("%s(): cfg80211_unlink %s!! () ", __func__,
+ select_network->Ssid.Ssid);
cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
}
- return;
}
void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
@@ -1513,7 +1501,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
int i;
u8 _status = false;
int ret = 0;
- struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct ndis_802_11_ssid *ssid = NULL;
struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
u8 survey_times =3;
u8 survey_times_for_one_ch =6;
@@ -1604,7 +1592,13 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
goto check_need_indicate_scan_done;
}
- memset(ssid, 0, sizeof(struct ndis_802_11_ssid)*RTW_SSID_SCAN_AMOUNT);
+ ssid = kzalloc(RTW_SSID_SCAN_AMOUNT * sizeof(struct ndis_802_11_ssid),
+ GFP_KERNEL);
+ if (!ssid) {
+ ret = -ENOMEM;
+ goto check_need_indicate_scan_done;
+ }
+
/* parsing request ssids, n_ssids */
for (i = 0; i < request->n_ssids && i < RTW_SSID_SCAN_AMOUNT; i++) {
#ifdef DEBUG_CFG80211
@@ -1648,6 +1642,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
}
check_need_indicate_scan_done:
+ kfree(ssid);
if (need_indicate_scan_done)
{
rtw_cfg80211_surveydone_event_callback(padapter);
@@ -1798,7 +1793,7 @@ static int rtw_cfg80211_set_key_mgt(struct security_priv *psecuritypriv, u32 key
static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t ielen)
{
- u8 *buf = NULL, *pos = NULL;
+ u8 *buf = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
int wpa_ielen = 0;
@@ -1833,7 +1828,6 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
}
- pos = buf;
if (ielen < RSN_HEADER_LEN) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
ret = -1;
@@ -2194,7 +2188,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
ret = -EOPNOTSUPP ;
}
- kfree((u8 *)pwep);
+ kfree(pwep);
if (ret < 0)
goto exit;
@@ -2433,7 +2427,6 @@ void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter, unsigned char
static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
{
- int ret = 0;
int rtap_len;
int qos_len = 0;
int dot11_hdr_len = 24;
@@ -2499,9 +2492,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
DBG_8192C("should be eapol packet\n");
/* Use the real net device to transmit the packet */
- ret = _rtw_xmit_entry(skb, padapter->pnetdev);
-
- return ret;
+ return _rtw_xmit_entry(skb, padapter->pnetdev);
}
else if ((frame_control & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE))
@@ -2647,7 +2638,7 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
out:
if (ret && mon_wdev) {
- kfree((u8 *)mon_wdev);
+ kfree(mon_wdev);
mon_wdev = NULL;
}
@@ -2808,14 +2799,11 @@ static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_beacon_data *info)
{
- int ret = 0;
struct adapter *adapter = (struct adapter *)rtw_netdev_priv(ndev);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
- ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
-
- return ret;
+ return rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
}
static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
@@ -3503,7 +3491,7 @@ void rtw_wdev_free(struct wireless_dev *wdev)
wiphy_free(wdev->wiphy);
- kfree((u8 *)wdev);
+ kfree(wdev);
}
void rtw_wdev_unregister(struct wireless_dev *wdev)
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index d1b199e3e5bd..db6528a01229 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2425,19 +2425,13 @@ static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
return 0;
}
-static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
static int rtw_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
- u32 cnt = 0, wpa_ielen;
+ int wpa_ielen;
+ u32 cnt = 0;
struct list_head *plist, *phead;
unsigned char *pbuf;
u8 bssid[ETH_ALEN];
@@ -2793,7 +2787,7 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_871X("oper_ch =%d\n", rtw_get_oper_ch(padapter));
DBG_871X("oper_bw =%d\n", rtw_get_oper_bw(padapter));
- DBG_871X("oper_ch_offet =%d\n", rtw_get_oper_choffset(padapter));
+ DBG_871X("oper_ch_offset =%d\n", rtw_get_oper_choffset(padapter));
break;
case 0x05:
@@ -4458,43 +4452,6 @@ static int rtw_pm_set(struct net_device *dev,
return ret;
}
-static int rtw_mp_efuse_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- int err = 0;
- return err;
-}
-
-static int rtw_mp_efuse_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- int err = 0;
- return err;
-}
-
-static int rtw_tdls(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
-
-static int rtw_tdls_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
-
-
-
-
static int rtw_test(
struct net_device *dev,
struct iw_request_info *info,
@@ -4744,7 +4701,7 @@ static iw_handler rtw_private_handler[] = {
rtw_wx_write32, /* 0x00 */
rtw_wx_read32, /* 0x01 */
rtw_drvext_hdl, /* 0x02 */
- rtw_mp_ioctl_hdl, /* 0x03 */
+ NULL, /* 0x03 */
/* for MM DTV platform */
rtw_get_ap_info, /* 0x04 */
@@ -4771,15 +4728,15 @@ static iw_handler rtw_private_handler[] = {
NULL, /* 0x12 */
rtw_p2p_get2, /* 0x13 */
- rtw_tdls, /* 0x14 */
- rtw_tdls_get, /* 0x15 */
+ NULL, /* 0x14 */
+ NULL, /* 0x15 */
rtw_pm_set, /* 0x16 */
rtw_wx_priv_null, /* 0x17 */
rtw_rereg_nd_name, /* 0x18 */
rtw_wx_priv_null, /* 0x19 */
- rtw_mp_efuse_set, /* 0x1A */
- rtw_mp_efuse_get, /* 0x1B */
+ NULL, /* 0x1A */
+ NULL, /* 0x1B */
NULL, /* 0x1C is reserved for hostapd */
rtw_test, /* 0x1D */
};
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index ec3a75485233..47e984d5b7cb 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -201,24 +201,6 @@ MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable, "0:Disable, 1:Enable, 2: Depend on efuse
module_param(rtw_tx_pwr_by_rate, int, 0644);
MODULE_PARM_DESC(rtw_tx_pwr_by_rate, "0:Disable, 1:Enable, 2: Depend on efuse");
-char *rtw_phy_file_path = "";
-module_param(rtw_phy_file_path, charp, 0644);
-MODULE_PARM_DESC(rtw_phy_file_path, "The path of phy parameter");
-/* PHY FILE Bit Map */
-/* BIT0 - MAC, 0: non-support, 1: support */
-/* BIT1 - BB, 0: non-support, 1: support */
-/* BIT2 - BB_PG, 0: non-support, 1: support */
-/* BIT3 - BB_MP, 0: non-support, 1: support */
-/* BIT4 - RF, 0: non-support, 1: support */
-/* BIT5 - RF_TXPWR_TRACK, 0: non-support, 1: support */
-/* BIT6 - RF_TXPWR_LMT, 0: non-support, 1: support */
-static int rtw_load_phy_file = (BIT2 | BIT6);
-module_param(rtw_load_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_load_phy_file, "PHY File Bit Map");
-static int rtw_decrypt_phy_file;
-module_param(rtw_decrypt_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_decrypt_phy_file, "Enable Decrypt PHY File");
-
int _netdev_open(struct net_device *pnetdev);
int netdev_open (struct net_device *pnetdev);
static int netdev_close (struct net_device *pnetdev);
@@ -321,8 +303,6 @@ static void loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->bEn_RFE = 1;
registry_par->RFE_Type = 64;
- registry_par->load_phy_file = (u8)rtw_load_phy_file;
- registry_par->RegDecryptCustomFile = (u8)rtw_decrypt_phy_file;
registry_par->qos_opt_enable = (u8)rtw_qos_opt_enable;
registry_par->hiq_filter = (u8)rtw_hiq_filter;
@@ -1141,8 +1121,7 @@ void rtw_ndev_destructor(struct net_device *ndev)
{
DBG_871X(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
- if (ndev->ieee80211_ptr)
- kfree((u8 *)ndev->ieee80211_ptr);
+ kfree(ndev->ieee80211_ptr);
}
void rtw_dev_unload(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 25a80041ce87..f5614e56371e 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -65,142 +65,6 @@ void _rtw_init_queue(struct __queue *pqueue)
spin_lock_init(&(pqueue->lock));
}
-/*
-* Open a file with the specific @param path, @param flag, @param mode
-* @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
-* @param path the path of the file to open
-* @param flag file operation flags, please refer to linux document
-* @param mode please refer to linux document
-* @return Linux specific error code
-*/
-static int openFile(struct file **fpp, char *path, int flag, int mode)
-{
- struct file *fp;
-
- fp = filp_open(path, flag, mode);
- if (IS_ERR(fp)) {
- *fpp = NULL;
- return PTR_ERR(fp);
- }
- else {
- *fpp = fp;
- return 0;
- }
-}
-
-/*
-* Close the file with the specific @param fp
-* @param fp the pointer of struct file to close
-* @return always 0
-*/
-static int closeFile(struct file *fp)
-{
- filp_close(fp, NULL);
- return 0;
-}
-
-static int readFile(struct file *fp, char *buf, int len)
-{
- int rlen = 0, sum = 0;
-
- if (!fp->f_op || !fp->f_op->read)
- return -EPERM;
-
- while (sum < len) {
- rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
- if (rlen > 0)
- sum += rlen;
- else if (0 != rlen)
- return rlen;
- else
- break;
- }
-
- return sum;
-
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return Linux specific error code
-*/
-static int isFileReadable(char *path)
-{
- struct file *fp;
- int ret = 0;
- char buf;
-
- fp = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(fp))
- return PTR_ERR(fp);
-
- if (readFile(fp, &buf, 1) != 1)
- ret = -EINVAL;
-
- filp_close(fp, NULL);
- return ret;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read, or Linux specific error code
-*/
-static int retriveFromFile(char *path, u8 *buf, u32 sz)
-{
- int ret = -1;
- struct file *fp;
-
- if (path && buf) {
- ret = openFile(&fp, path, O_RDONLY, 0);
-
- if (ret == 0) {
- DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp);
-
- ret = readFile(fp, buf, sz);
- closeFile(fp);
-
- DBG_871X("%s readFile, ret:%d\n", __func__, ret);
-
- } else {
- DBG_871X("%s openFile path:%s Fail, ret:%d\n", __func__, path, ret);
- }
- } else {
- DBG_871X("%s NULL pointer\n", __func__);
- ret = -EINVAL;
- }
- return ret;
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return true or false
-*/
-int rtw_is_file_readable(char *path)
-{
- if (isFileReadable(path) == 0)
- return true;
- else
- return false;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read
-*/
-int rtw_retrive_from_file(char *path, u8 *buf, u32 sz)
-{
- int ret = retriveFromFile(path, buf, sz);
- return ret >= 0 ? ret : 0;
-}
-
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
{
struct net_device *pnetdev;
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index d3784c44f6d0..859f4a0afb95 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -18,18 +18,13 @@
static const struct sdio_device_id sdio_ids[] =
{
{ SDIO_DEVICE(0x024c, 0x0523), },
+ { SDIO_DEVICE(0x024c, 0x0525), },
{ SDIO_DEVICE(0x024c, 0x0623), },
{ SDIO_DEVICE(0x024c, 0x0626), },
{ SDIO_DEVICE(0x024c, 0xb723), },
{ /* end: all zeroes */ },
};
-static const struct acpi_device_id acpi_ids[] = {
- {"OBDA8723", 0x0000},
- {}
-};
-
MODULE_DEVICE_TABLE(sdio, sdio_ids);
-MODULE_DEVICE_TABLE(acpi, acpi_ids);
static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id);
static void rtw_dev_remove(struct sdio_func *func);
@@ -281,7 +276,6 @@ static void sdio_dvobj_deinit(struct sdio_func *func)
sdio_deinit(dvobj);
devobj_deinit(dvobj);
}
- return;
}
void rtw_set_hal_ops(struct adapter *padapter)
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index e853fa9cc950..d53dd138a356 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -590,7 +590,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
int retval, i;
u8 val;
- retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1);
+ retval = ms_set_rw_reg_addr(chip, PRO_STATUS_REG, 6, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -840,7 +840,7 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
ms_cleanup_work(chip);
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -885,7 +885,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
int found_sys_info = 0, found_model_name = 0;
#endif
- retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, PRO_INT_REG, 2, PRO_SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1232,7 +1232,7 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
int retval;
u8 val[2];
- retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0);
+ retval = ms_set_rw_reg_addr(chip, STATUS_REG0, 2, 0, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1255,8 +1255,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
int retval, i;
u8 val, data[10];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1307,8 +1307,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm,
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM,
6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1339,8 +1339,8 @@ static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
if (!buf || (buf_len < MS_EXTRA_SIZE))
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6 + MS_EXTRA_SIZE);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6 + MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1392,8 +1392,8 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
int retval;
u8 val, data[6];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1465,8 +1465,8 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1519,8 +1519,8 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
int retval, i = 0;
u8 val, data[6];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1579,7 +1579,7 @@ static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
memset(extra, 0xFF, MS_EXTRA_SIZE);
- if (type == setPS_NG) {
+ if (type == set_PS_NG) {
/* set page status as 1:NG,and block status keep 1:OK */
extra[0] = 0xB8;
} else {
@@ -1670,8 +1670,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1725,7 +1725,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
return STATUS_FAIL;
if (uncorrect_flag) {
- ms_set_page_status(log_blk, setPS_NG,
+ ms_set_page_status(log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
if (i == 0)
@@ -1738,8 +1738,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
- ms_set_page_status(log_blk, setPS_Error,
- extra,
+ ms_set_page_status(log_blk,
+ set_PS_error, extra,
MS_EXTRA_SIZE);
ms_write_extra_data(chip, new_blk, i,
extra,
@@ -1767,8 +1767,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
ms_set_err_code(chip, MS_NO_ERROR);
@@ -1822,8 +1822,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
if (i == 0) {
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm,
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM,
7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1980,8 +1980,8 @@ RE_SEARCH:
for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
- rtsx_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0);
- rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_device_type, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_support, 0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0)
@@ -2057,7 +2057,7 @@ RE_SEARCH:
/* Switch I/F Mode */
if (ptr[15]) {
- retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -2887,7 +2887,7 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01);
+ retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, PRO_TPC_PARM, 0x01);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -2970,8 +2970,8 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3026,7 +3026,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (!(chip->card_wp & MS_CARD)) {
reset_ms(chip);
ms_set_page_status
- (log_blk, setPS_NG,
+ (log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
ms_write_extra_data
@@ -3131,8 +3131,8 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
u8 *ptr;
if (!start_page) {
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3165,8 +3165,8 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
return STATUS_FAIL;
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3773,9 +3773,9 @@ static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
u8 buf[6];
if (type == 0)
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_TPCParm, 1);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_TPC_PARM, 1);
else
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -4154,7 +4154,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- goto SetICVFinish;
+ goto set_ICV_finish;
}
#ifdef MG_SET_ICV_SLOW
@@ -4195,7 +4195,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SENSE_TYPE_MG_WRITE_ERR);
}
retval = STATUS_FAIL;
- goto SetICVFinish;
+ goto set_ICV_finish;
}
}
#else
@@ -4214,11 +4214,11 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- goto SetICVFinish;
+ goto set_ICV_finish;
}
#endif
-SetICVFinish:
+set_ICV_finish:
kfree(buf);
return retval;
}
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index 952cc14dd079..33bda9ce36b6 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -92,37 +92,37 @@
#define PRO_FORMAT 0x10
#define PRO_SLEEP 0x11
-#define IntReg 0x01
-#define StatusReg0 0x02
-#define StatusReg1 0x03
-
-#define SystemParm 0x10
-#define BlockAdrs 0x11
-#define CMDParm 0x14
-#define PageAdrs 0x15
-
-#define OverwriteFlag 0x16
-#define ManagemenFlag 0x17
-#define LogicalAdrs 0x18
-#define ReserveArea 0x1A
-
-#define Pro_IntReg 0x01
-#define Pro_StatusReg 0x02
-#define Pro_TypeReg 0x04
-#define Pro_IFModeReg 0x05
-#define Pro_CatagoryReg 0x06
-#define Pro_ClassReg 0x07
-
-#define Pro_SystemParm 0x10
-#define Pro_DataCount1 0x11
-#define Pro_DataCount0 0x12
-#define Pro_DataAddr3 0x13
-#define Pro_DataAddr2 0x14
-#define Pro_DataAddr1 0x15
-#define Pro_DataAddr0 0x16
-
-#define Pro_TPCParm 0x17
-#define Pro_CMDParm 0x18
+#define INT_REG 0x01
+#define STATUS_REG0 0x02
+#define STATUS_REG1 0x03
+
+#define SYSTEM_PARAM 0x10
+#define BLOCK_ADRS 0x11
+#define CMD_PARM 0x14
+#define PAGE_ADRS 0x15
+
+#define OVERWRITE_FLAG 0x16
+#define MANAGEMEN_FLAG 0x17
+#define LOGICAL_ADRS 0x18
+#define RESERVE_AREA 0x1A
+
+#define PRO_INT_REG 0x01
+#define PRO_STATUS_REG 0x02
+#define PRO_TYPE_REG 0x04
+#define PRO_IF_mode_REG 0x05
+#define PRO_CATEGORY_REG 0x06
+#define PRO_CLASS_REG 0x07
+
+#define PRO_SYSTEM_PARAM 0x10
+#define PRO_DATA_COUNT1 0x11
+#define PRO_DATA_COUNT0 0x12
+#define PRO_DATA_ADDR3 0x13
+#define PRO_DATA_ADDR2 0x14
+#define PRO_DATA_ADDR1 0x15
+#define PRO_DATA_ADDR0 0x16
+
+#define PRO_TPC_PARM 0x17
+#define PRO_CMD_PARM 0x18
#define INT_REG_CED 0x80
#define INT_REG_ERR 0x40
@@ -152,12 +152,12 @@
#define PAGE_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 8)
#define PAGE_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 9)
-#define MS_Device_Type (PPBUF_BASE2 + 0x1D8)
+#define MS_device_type (PPBUF_BASE2 + 0x1D8)
-#define MS_4bit_Support (PPBUF_BASE2 + 0x1D3)
+#define MS_4bit_support (PPBUF_BASE2 + 0x1D3)
-#define setPS_NG 1
-#define setPS_Error 0
+#define set_PS_NG 1
+#define set_PS_error 0
#define PARALLEL_8BIT_IF 0x40
#define PARALLEL_4BIT_IF 0x00
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index fa597953e9a0..cb95ad6fa4f9 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -873,7 +873,8 @@ static int rtsx_probe(struct pci_dev *pci,
(unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN,
- &dev->rtsx_resv_buf_addr, GFP_KERNEL);
+ &dev->rtsx_resv_buf_addr,
+ GFP_KERNEL);
if (!dev->rtsx_resv_buf) {
dev_err(&pci->dev, "alloc dma buffer fail\n");
err = -ENXIO;
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 561851cc8780..5f1eefe80f1e 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -677,8 +677,8 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index dc9e8cad7a74..f4ff62653b56 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -232,7 +232,7 @@
#define DCM_LOW_FREQUENCY_MODE 0x01
#define DCM_HIGH_FREQUENCY_MODE_SET 0x0C
-#define DCM_Low_FREQUENCY_MODE_SET 0x00
+#define DCM_LOW_FREQUENCY_MODE_SET 0x00
#define MULTIPLY_BY_1 0x00
#define MULTIPLY_BY_2 0x01
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index f3dc96a4c59d..0f369935fb6c 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -630,13 +630,13 @@ static int reset_xd(struct rtsx_chip *chip)
xd_card->zone_cnt = 32;
xd_card->capacity = 1024000;
break;
- case xD_1G_X8_512:
+ case XD_1G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 64;
xd_card->capacity = 2048000;
break;
- case xD_2G_X8_512:
+ case XD_2G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 128;
@@ -669,10 +669,10 @@ static int reset_xd(struct rtsx_chip *chip)
return STATUS_FAIL;
}
- retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
+ retval = xd_read_id(chip, READ_XD_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- dev_dbg(rtsx_dev(chip), "READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ dev_dbg(rtsx_dev(chip), "READ_XD_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
if (id_buf[2] != XD_ID_CODE)
return STATUS_FAIL;
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
index 57b94129b26f..98c00f268e56 100644
--- a/drivers/staging/rts5208/xd.h
+++ b/drivers/staging/rts5208/xd.h
@@ -36,7 +36,7 @@
#define BLK_ERASE_1 0x60
#define BLK_ERASE_2 0xD0
#define READ_STS 0x70
-#define READ_xD_ID 0x9A
+#define READ_XD_ID 0x9A
#define COPY_BACK_512 0x8A
#define COPY_BACK_2K 0x85
#define READ1_1_2 0x30
@@ -72,8 +72,8 @@
#define XD_128M_X16_2048 0xC1
#define XD_4M_X8_512_1 0xE3
#define XD_4M_X8_512_2 0xE5
-#define xD_1G_X8_512 0xD3
-#define xD_2G_X8_512 0xD5
+#define XD_1G_X8_512 0xD3
+#define XD_2G_X8_512 0xD5
#define XD_ID_CODE 0xB5
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 5a317cc98a4b..02860d3ec365 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -56,7 +56,6 @@ static unsigned int get_mxclk_freq(void)
static void set_chip_clock(unsigned int frequency)
{
struct pll_value pll;
- unsigned int actual_mx_clk;
/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
if (sm750_get_chip_type() == SM750LE)
@@ -66,8 +65,8 @@ static void set_chip_clock(unsigned int frequency)
/*
* Set up PLL structure to hold the value to be set in clocks.
*/
- pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
- pll.clockType = MXCLK_PLL;
+ pll.input_freq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
+ pll.clock_type = MXCLK_PLL;
/*
* Call sm750_calc_pll_value() to fill the other fields
@@ -76,7 +75,7 @@ static void set_chip_clock(unsigned int frequency)
* Return value of sm750_calc_pll_value gives the actual
* possible clock.
*/
- actual_mx_clk = sm750_calc_pll_value(frequency, &pll);
+ sm750_calc_pll_value(frequency, &pll);
/* Master Clock Control: MXCLK_PLL */
poke32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
@@ -211,13 +210,13 @@ unsigned int ddk750_get_vm_size(void)
return data;
}
-int ddk750_init_hw(struct initchip_param *pInitParam)
+int ddk750_init_hw(struct initchip_param *p_init_param)
{
unsigned int reg;
- if (pInitParam->powerMode != 0)
- pInitParam->powerMode = 0;
- sm750_set_power_mode(pInitParam->powerMode);
+ if (p_init_param->power_mode != 0)
+ p_init_param->power_mode = 0;
+ sm750_set_power_mode(p_init_param->power_mode);
/* Enable display power gate & LOCALMEM power gate*/
reg = peek32(CURRENT_GATE);
@@ -238,13 +237,13 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
}
/* Set the Main Chip Clock */
- set_chip_clock(MHz((unsigned int)pInitParam->chipClock));
+ set_chip_clock(MHz((unsigned int)p_init_param->chip_clock));
/* Set up memory clock. */
- set_memory_clock(MHz(pInitParam->memClock));
+ set_memory_clock(MHz(p_init_param->mem_clock));
/* Set up master clock */
- set_master_clock(MHz(pInitParam->masterClock));
+ set_master_clock(MHz(p_init_param->master_clock));
/*
* Reset the memory controller.
@@ -252,7 +251,7 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
* the system might hang when sw accesses the memory.
* The memory should be resetted after changing the MXCLK.
*/
- if (pInitParam->resetMemory == 1) {
+ if (p_init_param->reset_memory == 1) {
reg = peek32(MISC_CTRL);
reg &= ~MISC_CTRL_LOCALMEM_RESET;
poke32(MISC_CTRL, reg);
@@ -261,7 +260,7 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
poke32(MISC_CTRL, reg);
}
- if (pInitParam->setAllEngOff == 1) {
+ if (p_init_param->set_all_eng_off == 1) {
sm750_enable_2d_engine(0);
/* Disable Overlay, if a former application left it on */
@@ -337,13 +336,13 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
ret = 0;
mini_diff = ~0;
request = request_orig / 1000;
- input = pll->inputFreq / 1000;
+ input = pll->input_freq / 1000;
/*
* for MXCLK register,
* no POD provided, so need be treated differently
*/
- if (pll->clockType == MXCLK_PLL)
+ if (pll->clock_type == MXCLK_PLL)
max_d = 3;
for (N = 15; N > 1; N--) {
@@ -365,7 +364,7 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
if (M < 256 && M > 0) {
unsigned int diff;
- tmp_clock = pll->inputFreq * M / N / X;
+ tmp_clock = pll->input_freq * M / N / X;
diff = abs(tmp_clock - request_orig);
if (diff < mini_diff) {
pll->M = M;
@@ -383,14 +382,14 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
return ret;
}
-unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
+unsigned int sm750_format_pll_reg(struct pll_value *p_PLL)
{
#ifndef VALIDATION_CHIP
- unsigned int POD = pPLL->POD;
+ unsigned int POD = p_PLL->POD;
#endif
- unsigned int OD = pPLL->OD;
- unsigned int M = pPLL->M;
- unsigned int N = pPLL->N;
+ unsigned int OD = p_PLL->OD;
+ unsigned int M = p_PLL->M;
+ unsigned int N = p_PLL->N;
/*
* Note that all PLL's have the same format. Here, we just use
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 3e92b3297160..ee2e9d90f7dd 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -40,8 +40,8 @@ enum clock_type {
};
struct pll_value {
- enum clock_type clockType;
- unsigned long inputFreq; /* Input clock frequency to the PLL */
+ enum clock_type clock_type;
+ unsigned long input_freq; /* Input clock frequency to the PLL */
/* Use this when clockType = PANEL_PLL */
unsigned long M;
@@ -53,41 +53,41 @@ struct pll_value {
/* input struct to initChipParam() function */
struct initchip_param {
/* Use power mode 0 or 1 */
- unsigned short powerMode;
+ unsigned short power_mode;
/*
* Speed of main chip clock in MHz unit
* 0 = keep the current clock setting
* Others = the new main chip clock
*/
- unsigned short chipClock;
+ unsigned short chip_clock;
/*
* Speed of memory clock in MHz unit
* 0 = keep the current clock setting
* Others = the new memory clock
*/
- unsigned short memClock;
+ unsigned short mem_clock;
/*
* Speed of master clock in MHz unit
* 0 = keep the current clock setting
* Others = the new master clock
*/
- unsigned short masterClock;
+ unsigned short master_clock;
/*
* 0 = leave all engine state untouched.
* 1 = make sure they are off: 2D, Overlay,
* video alpha, alpha, hardware cursors
*/
- unsigned short setAllEngOff;
+ unsigned short set_all_eng_off;
/*
* 0 = Do not reset the memory controller
* 1 = Reset the memory controller
*/
- unsigned char resetMemory;
+ unsigned char reset_memory;
/* More initialization parameter can be added if needed */
};
@@ -95,7 +95,7 @@ struct initchip_param {
enum logical_chip_type sm750_get_chip_type(void);
void sm750_set_chip_type(unsigned short dev_id, u8 rev_id);
unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
-unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
+unsigned int sm750_format_pll_reg(struct pll_value *p_PLL);
unsigned int ddk750_get_vm_size(void);
int ddk750_init_hw(struct initchip_param *pinit_param);
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 887ea8aef43f..172624ff98b0 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -147,8 +147,8 @@ void ddk750_set_logical_disp_out(enum disp_output output)
if (output & PNL_SEQ_USAGE) {
/* set panel sequence */
- sw_panel_power_sequence((output & PNL_SEQ_MASK) >> PNL_SEQ_OFFSET,
- 4);
+ sw_panel_power_sequence((output & PNL_SEQ_MASK) >>
+ PNL_SEQ_OFFSET, 4);
}
if (output & DAC_USAGE)
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 4dac691ad1b1..e00a6cb31947 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -13,8 +13,9 @@
* HW only supports 7 predefined pixel clocks, and clock select is
* in bit 29:27 of Display Control register.
*/
-static unsigned long displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
- unsigned long dispControl)
+static unsigned long
+displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
+ unsigned long dispControl)
{
unsigned long x, y;
@@ -81,7 +82,7 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
int cnt = 0;
unsigned int tmp, reg;
- if (pll->clockType == SECONDARY_PLL) {
+ if (pll->clock_type == SECONDARY_PLL) {
/* programe secondary pixel clock */
poke32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
@@ -134,7 +135,7 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
poke32(CRT_DISPLAY_CTRL, tmp | reg);
}
- } else if (pll->clockType == PRIMARY_PLL) {
+ } else if (pll->clock_type == PRIMARY_PLL) {
unsigned int reserved;
poke32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
@@ -209,12 +210,11 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
int ddk750_setModeTiming(struct mode_parameter *parm, enum clock_type clock)
{
struct pll_value pll;
- unsigned int uiActualPixelClk;
- pll.inputFreq = DEFAULT_INPUT_CLOCK;
- pll.clockType = clock;
+ pll.input_freq = DEFAULT_INPUT_CLOCK;
+ pll.clock_type = clock;
- uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll);
+ sm750_calc_pll_value(parm->pixel_clock, &pll);
if (sm750_get_chip_type() == SM750LE) {
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index c8e856c13912..73e0e9f41ec5 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -39,8 +39,10 @@ unsigned short sii164GetVendorID(void)
{
unsigned short vendorID;
- vendorID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_LOW);
+ vendorID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_VENDOR_ID_HIGH) << 8) |
+ (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_VENDOR_ID_LOW);
return vendorID;
}
@@ -56,13 +58,18 @@ unsigned short sii164GetDeviceID(void)
{
unsigned short deviceID;
- deviceID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_LOW);
+ deviceID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_DEVICE_ID_HIGH) << 8) |
+ (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_DEVICE_ID_LOW);
return deviceID;
}
-/* DVI.C will handle all SiI164 chip stuffs and try it best to make code minimal and useful */
+/*
+ * DVI.C will handle all SiI164 chip stuffs and try its best to make code
+ * minimal and useful
+ */
/*
* sii164InitChip
@@ -133,7 +140,8 @@ long sii164InitChip(unsigned char edge_select,
#endif
/* Check if SII164 Chip exists */
- if ((sii164GetVendorID() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID)) {
+ if ((sii164GetVendorID() == SII164_VENDOR_ID) &&
+ (sii164GetDeviceID() == SII164_DEVICE_ID)) {
/*
* Initialize SII164 controller chip.
*/
@@ -254,7 +262,9 @@ void sii164ResetChip(void)
/*
* sii164GetChipString
- * This function returns a char string name of the current DVI Controller chip.
+ * This function returns a char string name of the current DVI Controller
+ * chip.
+ *
* It's convenient for application need to display the chip name.
*/
char *sii164GetChipString(void)
@@ -330,8 +340,8 @@ void sii164EnableHotPlugDetection(unsigned char enableHotPlug)
detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
- /* Depending on each DVI controller, need to enable the hot plug based on each
- * individual chip design.
+ /* Depending on each DVI controller, need to enable the hot plug based
+ * on each individual chip design.
*/
if (enableHotPlug != 0)
sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 862e7bf27353..d940cb729066 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -6,10 +6,13 @@
/* Hot Plug detection mode structure */
enum sii164_hot_plug_mode {
- SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit (always high). */
- SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
- SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
- SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
+ SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit
+ * (always high).
+ */
+
+ SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
+ SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
+ SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
};
/* Silicon Image SiI164 chip prototype */
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index dbcbbd1055da..8faa601c700b 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -130,20 +130,28 @@ int sm750_hw_fillrect(struct lynx_accel *accel,
return 0;
}
-int sm750_hw_copyarea(
-struct lynx_accel *accel,
-unsigned int sBase, /* Address of source: offset in frame buffer */
-unsigned int sPitch, /* Pitch value of source surface in BYTE */
-unsigned int sx,
-unsigned int sy, /* Starting coordinate of source surface */
-unsigned int dBase, /* Address of destination: offset in frame buffer */
-unsigned int dPitch, /* Pitch value of destination surface in BYTE */
-unsigned int Bpp, /* Color depth of destination surface */
-unsigned int dx,
-unsigned int dy, /* Starting coordinate of destination surface */
-unsigned int width,
-unsigned int height, /* width and height of rectangle in pixel value */
-unsigned int rop2) /* ROP value */
+/**
+ * sm750_hm_copyarea
+ * @sBase: Address of source: offset in frame buffer
+ * @sPitch: Pitch value of source surface in BYTE
+ * @sx: Starting x coordinate of source surface
+ * @sy: Starting y coordinate of source surface
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @Bpp: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @rop2: ROP value
+ */
+int sm750_hw_copyarea(struct lynx_accel *accel,
+ unsigned int sBase, unsigned int sPitch,
+ unsigned int sx, unsigned int sy,
+ unsigned int dBase, unsigned int dPitch,
+ unsigned int Bpp, unsigned int dx, unsigned int dy,
+ unsigned int width, unsigned int height,
+ unsigned int rop2)
{
unsigned int nDirection, de_ctrl;
@@ -216,7 +224,7 @@ unsigned int rop2) /* ROP value */
/*
* Note:
- * DE_FOREGROUND are DE_BACKGROUND are don't care.
+ * DE_FOREGROUND and DE_BACKGROUND are don't care.
* DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
* are set by set deSetTransparency().
*/
@@ -235,21 +243,21 @@ unsigned int rop2) /* ROP value */
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
- /*
- * Program pitch (distance between the 1st points of two adjacent lines).
- * Note that input pitch is BYTE value, but the 2D Pitch register uses
- * pixel values. Need Byte to pixel conversion.
- */
+ /*
+ * Program pitch (distance between the 1st points of two adjacent lines).
+ * Note that input pitch is BYTE value, but the 2D Pitch register uses
+ * pixel values. Need Byte to pixel conversion.
+ */
write_dpr(accel, DE_PITCH,
((dPitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
DE_PITCH_DESTINATION_MASK) |
(sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
- /*
- * Screen Window width in Pixels.
- * 2D engine uses this value to calculate the linear address in frame buffer
- * for a given point.
- */
+ /*
+ * Screen Window width in Pixels.
+ * 2D engine uses this value to calculate the linear address in frame buffer
+ * for a given point.
+ */
write_dpr(accel, DE_WINDOW_WIDTH,
((dPitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
DE_WINDOW_WIDTH_DST_MASK) |
@@ -288,20 +296,28 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
return de_ctrl;
}
-int sm750_hw_imageblit(struct lynx_accel *accel,
- const char *pSrcbuf, /* pointer to start of source buffer in system memory */
- u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
- u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
- u32 dBase, /* Address of destination: offset in frame buffer */
- u32 dPitch, /* Pitch value of destination surface in BYTE */
- u32 bytePerPixel, /* Color depth of destination surface */
- u32 dx,
- u32 dy, /* Starting coordinate of destination surface */
- u32 width,
- u32 height, /* width and height of rectangle in pixel value */
- u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
- u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
- u32 rop2) /* ROP value */
+/**
+ * sm750_hw_imageblit
+ * @pSrcbuf: pointer to start of source buffer in system memory
+ * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top down
+ * and -ive mean button up
+ * @startBit: Mono data can start at any bit in a byte, this value should be
+ * 0 to 7
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @bytePerPixel: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @fColor: Foreground color (corresponding to a 1 in the monochrome data
+ * @bColor: Background color (corresponding to a 0 in the monochrome data
+ * @rop2: ROP value
+ */
+int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf,
+ u32 srcDelta, u32 startBit, u32 dBase, u32 dPitch,
+ u32 bytePerPixel, u32 dx, u32 dy, u32 width,
+ u32 height, u32 fColor, u32 bColor, u32 rop2)
{
unsigned int ulBytesPerScan;
unsigned int ul4BytesPerScan;
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index c4f42002a50f..2c79cb730a0a 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -190,37 +190,54 @@ void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt);
void sm750_hw_de_init(struct lynx_accel *accel);
int sm750_hw_fillrect(struct lynx_accel *accel,
- u32 base, u32 pitch, u32 Bpp,
- u32 x, u32 y, u32 width, u32 height,
- u32 color, u32 rop);
-
-int sm750_hw_copyarea(
-struct lynx_accel *accel,
-unsigned int sBase, /* Address of source: offset in frame buffer */
-unsigned int sPitch, /* Pitch value of source surface in BYTE */
-unsigned int sx,
-unsigned int sy, /* Starting coordinate of source surface */
-unsigned int dBase, /* Address of destination: offset in frame buffer */
-unsigned int dPitch, /* Pitch value of destination surface in BYTE */
-unsigned int bpp, /* Color depth of destination surface */
-unsigned int dx,
-unsigned int dy, /* Starting coordinate of destination surface */
-unsigned int width,
-unsigned int height, /* width and height of rectangle in pixel value */
-unsigned int rop2);
-
-int sm750_hw_imageblit(struct lynx_accel *accel,
- const char *pSrcbuf, /* pointer to start of source buffer in system memory */
- u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
- u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
- u32 dBase, /* Address of destination: offset in frame buffer */
- u32 dPitch, /* Pitch value of destination surface in BYTE */
- u32 bytePerPixel, /* Color depth of destination surface */
- u32 dx,
- u32 dy, /* Starting coordinate of destination surface */
- u32 width,
- u32 height, /* width and height of rectangle in pixel value */
- u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
- u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
- u32 rop2);
+ u32 base, u32 pitch, u32 Bpp,
+ u32 x, u32 y, u32 width, u32 height,
+ u32 color, u32 rop);
+
+/**
+ * sm750_hm_copyarea
+ * @sBase: Address of source: offset in frame buffer
+ * @sPitch: Pitch value of source surface in BYTE
+ * @sx: Starting x coordinate of source surface
+ * @sy: Starting y coordinate of source surface
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @Bpp: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @rop2: ROP value
+ */
+int sm750_hw_copyarea(struct lynx_accel *accel,
+ unsigned int sBase, unsigned int sPitch,
+ unsigned int sx, unsigned int sy,
+ unsigned int dBase, unsigned int dPitch,
+ unsigned int Bpp, unsigned int dx, unsigned int dy,
+ unsigned int width, unsigned int height,
+ unsigned int rop2);
+
+/**
+ * sm750_hw_imageblit
+ * @pSrcbuf: pointer to start of source buffer in system memory
+ * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top down
+ *>----- and -ive mean button up
+ * @startBit: Mono data can start at any bit in a byte, this value should be
+ *>----- 0 to 7
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @bytePerPixel: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @fColor: Foreground color (corresponding to a 1 in the monochrome data
+ * @bColor: Background color (corresponding to a 0 in the monochrome data
+ * @rop2: ROP value
+ */
+int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf,
+ u32 srcDelta, u32 startBit, u32 dBase, u32 dPitch,
+ u32 bytePerPixel, u32 dx, u32 dy, u32 width,
+ u32 height, u32 fColor, u32 bColor, u32 rop2);
+
#endif
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index 16ac07eb58d6..b59643dd61ed 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -5,14 +5,11 @@
/* hw_cursor_xxx works for voyager,718 and 750 */
void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
-void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
- int w, int h);
-void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
- int x, int y);
-void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
- u32 fg, u32 bg);
-void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
- u16 rop, const u8 *data, const u8 *mask);
-void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
- u16 rop, const u8 *data, const u8 *mask);
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h);
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y);
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg);
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
+ const u8 *data, const u8 *mask);
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
+ const u8 *data, const u8 *mask);
#endif
diff --git a/drivers/staging/uwb/rsv.c b/drivers/staging/uwb/rsv.c
index f45a04ff7275..d593a41c3d8d 100644
--- a/drivers/staging/uwb/rsv.c
+++ b/drivers/staging/uwb/rsv.c
@@ -614,7 +614,7 @@ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
struct uwb_rsv_move *mv;
int ret = 0;
- if (bow->can_reserve_extra_mases == false)
+ if (!bow->can_reserve_extra_mases)
return -EBUSY;
mv = &rsv->mv;
@@ -643,7 +643,7 @@ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
struct uwb_rsv *rsv;
struct uwb_mas_bm mas;
- if (bow->can_reserve_extra_mases == false)
+ if (!bow->can_reserve_extra_mases)
return;
list_for_each_entry(rsv, &rc->reservations, rc_node) {
diff --git a/drivers/staging/vboxsf/Kconfig b/drivers/staging/vboxsf/Kconfig
deleted file mode 100644
index b84586ae08b3..000000000000
--- a/drivers/staging/vboxsf/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config VBOXSF_FS
- tristate "VirtualBox guest shared folder (vboxsf) support"
- depends on X86 && VBOXGUEST
- select NLS
- help
- VirtualBox hosts can share folders with guests, this driver
- implements the Linux-guest side of this allowing folders exported
- by the host to be mounted under Linux.
-
- If you want to use shared folders in VirtualBox guests, answer Y or M.
diff --git a/drivers/staging/vboxsf/Makefile b/drivers/staging/vboxsf/Makefile
deleted file mode 100644
index 9e4328e79623..000000000000
--- a/drivers/staging/vboxsf/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-obj-$(CONFIG_VBOXSF_FS) += vboxsf.o
-
-vboxsf-y := dir.o file.o utils.o vboxsf_wrappers.o super.o
diff --git a/drivers/staging/vboxsf/TODO b/drivers/staging/vboxsf/TODO
deleted file mode 100644
index 8b9193d0d4f0..000000000000
--- a/drivers/staging/vboxsf/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
-- Find a file-system developer to review this and give their Reviewed-By
-- Address any items coming up during review
-- Move to fs/vboxfs
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-and Hans de Goede <hdegoede@redhat.com>
diff --git a/drivers/staging/vboxsf/dir.c b/drivers/staging/vboxsf/dir.c
deleted file mode 100644
index f260b5cc1646..000000000000
--- a/drivers/staging/vboxsf/dir.c
+++ /dev/null
@@ -1,418 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Directory inode and file operations
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/namei.h>
-#include <linux/vbox_utils.h>
-#include "vfsmod.h"
-
-static int vboxsf_dir_open(struct inode *inode, struct file *file)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
- struct shfl_createparms params = {};
- struct vboxsf_dir_info *sf_d;
- int err;
-
- sf_d = vboxsf_dir_info_alloc();
- if (!sf_d)
- return -ENOMEM;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_DIRECTORY | SHFL_CF_ACT_OPEN_IF_EXISTS |
- SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
-
- err = vboxsf_create_at_dentry(file_dentry(file), &params);
- if (err)
- goto err_free_dir_info;
-
- if (params.result != SHFL_FILE_EXISTS) {
- err = -ENOENT;
- goto err_close;
- }
-
- err = vboxsf_dir_read_all(sbi, sf_d, params.handle);
- if (err)
- goto err_close;
-
- vboxsf_close(sbi->root, params.handle);
- file->private_data = sf_d;
- return 0;
-
-err_close:
- vboxsf_close(sbi->root, params.handle);
-err_free_dir_info:
- vboxsf_dir_info_free(sf_d);
- return err;
-}
-
-static int vboxsf_dir_release(struct inode *inode, struct file *file)
-{
- if (file->private_data)
- vboxsf_dir_info_free(file->private_data);
-
- return 0;
-}
-
-static unsigned int vboxsf_get_d_type(u32 mode)
-{
- unsigned int d_type;
-
- switch (mode & SHFL_TYPE_MASK) {
- case SHFL_TYPE_FIFO:
- d_type = DT_FIFO;
- break;
- case SHFL_TYPE_DEV_CHAR:
- d_type = DT_CHR;
- break;
- case SHFL_TYPE_DIRECTORY:
- d_type = DT_DIR;
- break;
- case SHFL_TYPE_DEV_BLOCK:
- d_type = DT_BLK;
- break;
- case SHFL_TYPE_FILE:
- d_type = DT_REG;
- break;
- case SHFL_TYPE_SYMLINK:
- d_type = DT_LNK;
- break;
- case SHFL_TYPE_SOCKET:
- d_type = DT_SOCK;
- break;
- case SHFL_TYPE_WHITEOUT:
- d_type = DT_WHT;
- break;
- default:
- d_type = DT_UNKNOWN;
- break;
- }
- return d_type;
-}
-
-static bool vboxsf_dir_emit(struct file *dir, struct dir_context *ctx)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(file_inode(dir)->i_sb);
- struct vboxsf_dir_info *sf_d = dir->private_data;
- struct shfl_dirinfo *info;
- struct vboxsf_dir_buf *b;
- unsigned int d_type;
- loff_t i, cur = 0;
- ino_t fake_ino;
- size_t size;
- int err;
-
- list_for_each_entry(b, &sf_d->info_list, head) {
-try_next_entry:
- if (ctx->pos >= cur + b->entries) {
- cur += b->entries;
- continue;
- }
-
- /*
- * Note the vboxsf_dir_info objects we are iterating over here
- * are variable sized, so the info pointer may end up being
- * unaligned. This is how we get the data from the host.
- * Since vboxsf is only supported on x86 machines this is not
- * a problem.
- */
- for (i = 0, info = b->buf; i < ctx->pos - cur; i++) {
- size = offsetof(struct shfl_dirinfo, name.string) +
- info->name.size;
- info = (struct shfl_dirinfo *)((uintptr_t)info + size);
- }
-
- /* Info now points to the right entry, emit it. */
- d_type = vboxsf_get_d_type(info->info.attr.mode);
-
- /*
- * On 32 bit systems pos is 64 signed, while ino is 32 bit
- * unsigned so fake_ino may overflow, check for this.
- */
- if ((ino_t)(ctx->pos + 1) != (u64)(ctx->pos + 1)) {
- vbg_err("vboxsf: fake ino overflow, truncating dir\n");
- return false;
- }
- fake_ino = ctx->pos + 1;
-
- if (sbi->nls) {
- char d_name[NAME_MAX];
-
- err = vboxsf_nlscpy(sbi, d_name, NAME_MAX,
- info->name.string.utf8,
- info->name.length);
- if (err) {
- /* skip erroneous entry and proceed */
- ctx->pos += 1;
- goto try_next_entry;
- }
-
- return dir_emit(ctx, d_name, strlen(d_name),
- fake_ino, d_type);
- }
-
- return dir_emit(ctx, info->name.string.utf8, info->name.length,
- fake_ino, d_type);
- }
-
- return false;
-}
-
-static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx)
-{
- bool keep_iterating;
-
- for (keep_iterating = true; keep_iterating; ctx->pos += 1)
- keep_iterating = vboxsf_dir_emit(dir, ctx);
-
- return 0;
-}
-
-const struct file_operations vboxsf_dir_fops = {
- .open = vboxsf_dir_open,
- .iterate = vboxsf_dir_iterate,
- .release = vboxsf_dir_release,
- .read = generic_read_dir,
- .llseek = generic_file_llseek,
-};
-
-/*
- * This is called during name resolution/lookup to check if the @dentry in
- * the cache is still valid. the job is handled by vboxsf_inode_revalidate.
- */
-static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags)
-{
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- if (d_really_is_positive(dentry))
- return vboxsf_inode_revalidate(dentry) == 0;
- else
- return vboxsf_stat_dentry(dentry, NULL) == -ENOENT;
-}
-
-const struct dentry_operations vboxsf_dentry_ops = {
- .d_revalidate = vboxsf_dentry_revalidate
-};
-
-/* iops */
-
-static struct dentry *vboxsf_dir_lookup(struct inode *parent,
- struct dentry *dentry,
- unsigned int flags)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct shfl_fsobjinfo fsinfo;
- struct inode *inode;
- int err;
-
- dentry->d_time = jiffies;
-
- err = vboxsf_stat_dentry(dentry, &fsinfo);
- if (err) {
- inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
- } else {
- inode = vboxsf_new_inode(parent->i_sb);
- if (!IS_ERR(inode))
- vboxsf_init_inode(sbi, inode, &fsinfo);
- }
-
- return d_splice_alias(inode, dentry);
-}
-
-static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
- struct shfl_fsobjinfo *info)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct vboxsf_inode *sf_i;
- struct inode *inode;
-
- inode = vboxsf_new_inode(parent->i_sb);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- sf_i = VBOXSF_I(inode);
- /* The host may have given us different attr then requested */
- sf_i->force_restat = 1;
- vboxsf_init_inode(sbi, inode, info);
-
- d_instantiate(dentry, inode);
-
- return 0;
-}
-
-static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
- umode_t mode, int is_dir)
-{
- struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct shfl_createparms params = {};
- int err;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
- SHFL_CF_ACT_FAIL_IF_EXISTS |
- SHFL_CF_ACCESS_READWRITE |
- (is_dir ? SHFL_CF_DIRECTORY : 0);
- params.info.attr.mode = (mode & 0777) |
- (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
- params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
-
- err = vboxsf_create_at_dentry(dentry, &params);
- if (err)
- return err;
-
- if (params.result != SHFL_FILE_CREATED)
- return -EPERM;
-
- vboxsf_close(sbi->root, params.handle);
-
- err = vboxsf_dir_instantiate(parent, dentry, &params.info);
- if (err)
- return err;
-
- /* parent directory access/change time changed */
- sf_parent_i->force_restat = 1;
-
- return 0;
-}
-
-static int vboxsf_dir_mkfile(struct inode *parent, struct dentry *dentry,
- umode_t mode, bool excl)
-{
- return vboxsf_dir_create(parent, dentry, mode, 0);
-}
-
-static int vboxsf_dir_mkdir(struct inode *parent, struct dentry *dentry,
- umode_t mode)
-{
- return vboxsf_dir_create(parent, dentry, mode, 1);
-}
-
-static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
- struct inode *inode = d_inode(dentry);
- struct shfl_string *path;
- u32 flags;
- int err;
-
- if (S_ISDIR(inode->i_mode))
- flags = SHFL_REMOVE_DIR;
- else
- flags = SHFL_REMOVE_FILE;
-
- if (S_ISLNK(inode->i_mode))
- flags |= SHFL_REMOVE_SYMLINK;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- err = vboxsf_remove(sbi->root, path, flags);
- __putname(path);
- if (err)
- return err;
-
- /* parent directory access/change time changed */
- sf_parent_i->force_restat = 1;
-
- return 0;
-}
-
-static int vboxsf_dir_rename(struct inode *old_parent,
- struct dentry *old_dentry,
- struct inode *new_parent,
- struct dentry *new_dentry,
- unsigned int flags)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(old_parent->i_sb);
- struct vboxsf_inode *sf_old_parent_i = VBOXSF_I(old_parent);
- struct vboxsf_inode *sf_new_parent_i = VBOXSF_I(new_parent);
- u32 shfl_flags = SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS;
- struct shfl_string *old_path, *new_path;
- int err;
-
- if (flags)
- return -EINVAL;
-
- old_path = vboxsf_path_from_dentry(sbi, old_dentry);
- if (IS_ERR(old_path))
- return PTR_ERR(old_path);
-
- new_path = vboxsf_path_from_dentry(sbi, new_dentry);
- if (IS_ERR(new_path)) {
- err = PTR_ERR(new_path);
- goto err_put_old_path;
- }
-
- if (d_inode(old_dentry)->i_mode & S_IFDIR)
- shfl_flags = 0;
-
- err = vboxsf_rename(sbi->root, old_path, new_path, shfl_flags);
- if (err == 0) {
- /* parent directories access/change time changed */
- sf_new_parent_i->force_restat = 1;
- sf_old_parent_i->force_restat = 1;
- }
-
- __putname(new_path);
-err_put_old_path:
- __putname(old_path);
- return err;
-}
-
-static int vboxsf_dir_symlink(struct inode *parent, struct dentry *dentry,
- const char *symname)
-{
- struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- int symname_size = strlen(symname) + 1;
- struct shfl_string *path, *ssymname;
- struct shfl_fsobjinfo info;
- int err;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- ssymname = kmalloc(SHFLSTRING_HEADER_SIZE + symname_size, GFP_KERNEL);
- if (!ssymname) {
- __putname(path);
- return -ENOMEM;
- }
- ssymname->length = symname_size - 1;
- ssymname->size = symname_size;
- memcpy(ssymname->string.utf8, symname, symname_size);
-
- err = vboxsf_symlink(sbi->root, path, ssymname, &info);
- kfree(ssymname);
- __putname(path);
- if (err) {
- /* -EROFS means symlinks are note support -> -EPERM */
- return (err == -EROFS) ? -EPERM : err;
- }
-
- err = vboxsf_dir_instantiate(parent, dentry, &info);
- if (err)
- return err;
-
- /* parent directory access/change time changed */
- sf_parent_i->force_restat = 1;
- return 0;
-}
-
-const struct inode_operations vboxsf_dir_iops = {
- .lookup = vboxsf_dir_lookup,
- .create = vboxsf_dir_mkfile,
- .mkdir = vboxsf_dir_mkdir,
- .rmdir = vboxsf_dir_unlink,
- .unlink = vboxsf_dir_unlink,
- .rename = vboxsf_dir_rename,
- .symlink = vboxsf_dir_symlink,
- .getattr = vboxsf_getattr,
- .setattr = vboxsf_setattr,
-};
diff --git a/drivers/staging/vboxsf/file.c b/drivers/staging/vboxsf/file.c
deleted file mode 100644
index 4b61ccf83fca..000000000000
--- a/drivers/staging/vboxsf/file.c
+++ /dev/null
@@ -1,370 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Regular file inode and file ops.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/mm.h>
-#include <linux/page-flags.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-#include <linux/sizes.h>
-#include "vfsmod.h"
-
-struct vboxsf_handle {
- u64 handle;
- u32 root;
- u32 access_flags;
- struct kref refcount;
- struct list_head head;
-};
-
-static int vboxsf_file_open(struct inode *inode, struct file *file)
-{
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct shfl_createparms params = {};
- struct vboxsf_handle *sf_handle;
- u32 access_flags = 0;
- int err;
-
- sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
- if (!sf_handle)
- return -ENOMEM;
-
- /*
- * We check the value of params.handle afterwards to find out if
- * the call succeeded or failed, as the API does not seem to cleanly
- * distinguish error and informational messages.
- *
- * Furthermore, we must set params.handle to SHFL_HANDLE_NIL to
- * make the shared folders host service use our mode parameter.
- */
- params.handle = SHFL_HANDLE_NIL;
- if (file->f_flags & O_CREAT) {
- params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
- /*
- * We ignore O_EXCL, as the Linux kernel seems to call create
- * beforehand itself, so O_EXCL should always fail.
- */
- if (file->f_flags & O_TRUNC)
- params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
- else
- params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
- } else {
- params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
- if (file->f_flags & O_TRUNC)
- params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
- }
-
- switch (file->f_flags & O_ACCMODE) {
- case O_RDONLY:
- access_flags |= SHFL_CF_ACCESS_READ;
- break;
-
- case O_WRONLY:
- access_flags |= SHFL_CF_ACCESS_WRITE;
- break;
-
- case O_RDWR:
- access_flags |= SHFL_CF_ACCESS_READWRITE;
- break;
-
- default:
- WARN_ON(1);
- }
-
- if (file->f_flags & O_APPEND)
- access_flags |= SHFL_CF_ACCESS_APPEND;
-
- params.create_flags |= access_flags;
- params.info.attr.mode = inode->i_mode;
-
- err = vboxsf_create_at_dentry(file_dentry(file), &params);
- if (err == 0 && params.handle == SHFL_HANDLE_NIL)
- err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
- if (err) {
- kfree(sf_handle);
- return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
-
- /* init our handle struct and add it to the inode's handles list */
- sf_handle->handle = params.handle;
- sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
- sf_handle->access_flags = access_flags;
- kref_init(&sf_handle->refcount);
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_add(&sf_handle->head, &sf_i->handle_list);
- mutex_unlock(&sf_i->handle_list_mutex);
-
- file->private_data = sf_handle;
- return 0;
-}
-
-static void vboxsf_handle_release(struct kref *refcount)
-{
- struct vboxsf_handle *sf_handle =
- container_of(refcount, struct vboxsf_handle, refcount);
-
- vboxsf_close(sf_handle->root, sf_handle->handle);
- kfree(sf_handle);
-}
-
-static int vboxsf_file_release(struct inode *inode, struct file *file)
-{
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct vboxsf_handle *sf_handle = file->private_data;
-
- /*
- * When a file is closed on our (the guest) side, we want any subsequent
- * accesses done on the host side to see all changes done from our side.
- */
- filemap_write_and_wait(inode->i_mapping);
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_del(&sf_handle->head);
- mutex_unlock(&sf_i->handle_list_mutex);
-
- kref_put(&sf_handle->refcount, vboxsf_handle_release);
- return 0;
-}
-
-/*
- * Write back dirty pages now, because there may not be any suitable
- * open files later
- */
-static void vboxsf_vma_close(struct vm_area_struct *vma)
-{
- filemap_write_and_wait(vma->vm_file->f_mapping);
-}
-
-static const struct vm_operations_struct vboxsf_file_vm_ops = {
- .close = vboxsf_vma_close,
- .fault = filemap_fault,
- .map_pages = filemap_map_pages,
-};
-
-static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int err;
-
- err = generic_file_mmap(file, vma);
- if (!err)
- vma->vm_ops = &vboxsf_file_vm_ops;
-
- return err;
-}
-
-/*
- * Note that since we are accessing files on the host's filesystem, files
- * may always be changed underneath us by the host!
- *
- * The vboxsf API between the guest and the host does not offer any functions
- * to deal with this. There is no inode-generation to check for changes, no
- * events / callback on changes and no way to lock files.
- *
- * To avoid returning stale data when a file gets *opened* on our (the guest)
- * side, we do a "stat" on the host side, then compare the mtime with the
- * last known mtime and invalidate the page-cache if they differ.
- * This is done from vboxsf_inode_revalidate().
- *
- * When reads are done through the read_iter fop, it is possible to do
- * further cache revalidation then, there are 3 options to deal with this:
- *
- * 1) Rely solely on the revalidation done at open time
- * 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf
- * host API does not allow stat on handles, so we would need to use
- * file->f_path.dentry and the stat will then fail if the file was unlinked
- * or renamed (and there is no thing like NFS' silly-rename). So we get:
- * 2a) "stat" and compare mtime, on stat failure invalidate the cache
- * 2b) "stat" and compare mtime, on stat failure do nothing
- * 3) Simply always call invalidate_inode_pages2_range on the range of the read
- *
- * Currently we are keeping things KISS and using option 1. this allows
- * directly using generic_file_read_iter without wrapping it.
- *
- * This means that only data written on the host side before open() on
- * the guest side is guaranteed to be seen by the guest. If necessary
- * we may provide other read-cache strategies in the future and make this
- * configurable through a mount option.
- */
-const struct file_operations vboxsf_reg_fops = {
- .llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .mmap = vboxsf_file_mmap,
- .open = vboxsf_file_open,
- .release = vboxsf_file_release,
- .fsync = noop_fsync,
- .splice_read = generic_file_splice_read,
-};
-
-const struct inode_operations vboxsf_reg_iops = {
- .getattr = vboxsf_getattr,
- .setattr = vboxsf_setattr
-};
-
-static int vboxsf_readpage(struct file *file, struct page *page)
-{
- struct vboxsf_handle *sf_handle = file->private_data;
- loff_t off = page_offset(page);
- u32 nread = PAGE_SIZE;
- u8 *buf;
- int err;
-
- buf = kmap(page);
-
- err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
- if (err == 0) {
- memset(&buf[nread], 0, PAGE_SIZE - nread);
- flush_dcache_page(page);
- SetPageUptodate(page);
- } else {
- SetPageError(page);
- }
-
- kunmap(page);
- unlock_page(page);
- return err;
-}
-
-static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
-{
- struct vboxsf_handle *h, *sf_handle = NULL;
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_for_each_entry(h, &sf_i->handle_list, head) {
- if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
- h->access_flags == SHFL_CF_ACCESS_READWRITE) {
- kref_get(&h->refcount);
- sf_handle = h;
- break;
- }
- }
- mutex_unlock(&sf_i->handle_list_mutex);
-
- return sf_handle;
-}
-
-static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct vboxsf_handle *sf_handle;
- loff_t off = page_offset(page);
- loff_t size = i_size_read(inode);
- u32 nwrite = PAGE_SIZE;
- u8 *buf;
- int err;
-
- if (off + PAGE_SIZE > size)
- nwrite = size & ~PAGE_MASK;
-
- sf_handle = vboxsf_get_write_handle(sf_i);
- if (!sf_handle)
- return -EBADF;
-
- buf = kmap(page);
- err = vboxsf_write(sf_handle->root, sf_handle->handle,
- off, &nwrite, buf);
- kunmap(page);
-
- kref_put(&sf_handle->refcount, vboxsf_handle_release);
-
- if (err == 0) {
- ClearPageError(page);
- /* mtime changed */
- sf_i->force_restat = 1;
- } else {
- ClearPageUptodate(page);
- }
-
- unlock_page(page);
- return err;
-}
-
-static int vboxsf_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = mapping->host;
- struct vboxsf_handle *sf_handle = file->private_data;
- unsigned int from = pos & ~PAGE_MASK;
- u32 nwritten = len;
- u8 *buf;
- int err;
-
- buf = kmap(page);
- err = vboxsf_write(sf_handle->root, sf_handle->handle,
- pos, &nwritten, buf + from);
- kunmap(page);
-
- if (err) {
- nwritten = 0;
- goto out;
- }
-
- /* mtime changed */
- VBOXSF_I(inode)->force_restat = 1;
-
- if (!PageUptodate(page) && nwritten == PAGE_SIZE)
- SetPageUptodate(page);
-
- pos += nwritten;
- if (pos > inode->i_size)
- i_size_write(inode, pos);
-
-out:
- unlock_page(page);
- put_page(page);
-
- return nwritten;
-}
-
-const struct address_space_operations vboxsf_reg_aops = {
- .readpage = vboxsf_readpage,
- .writepage = vboxsf_writepage,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .write_begin = simple_write_begin,
- .write_end = vboxsf_write_end,
-};
-
-static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *done)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
- struct shfl_string *path;
- char *link;
- int err;
-
- if (!dentry)
- return ERR_PTR(-ECHILD);
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return (char *)path;
-
- link = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!link) {
- __putname(path);
- return ERR_PTR(-ENOMEM);
- }
-
- err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
- __putname(path);
- if (err) {
- kfree(link);
- return ERR_PTR(err);
- }
-
- set_delayed_call(done, kfree_link, link);
- return link;
-}
-
-const struct inode_operations vboxsf_lnk_iops = {
- .get_link = vboxsf_get_link
-};
diff --git a/drivers/staging/vboxsf/shfl_hostintf.h b/drivers/staging/vboxsf/shfl_hostintf.h
deleted file mode 100644
index aca829062c12..000000000000
--- a/drivers/staging/vboxsf/shfl_hostintf.h
+++ /dev/null
@@ -1,901 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * VirtualBox Shared Folders: host interface definition.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#ifndef SHFL_HOSTINTF_H
-#define SHFL_HOSTINTF_H
-
-#include <linux/vbox_vmmdev_types.h>
-
-/* The max in/out buffer size for a FN_READ or FN_WRITE call */
-#define SHFL_MAX_RW_COUNT (16 * SZ_1M)
-
-/*
- * Structures shared between guest and the service
- * can be relocated and use offsets to point to variable
- * length parts.
- *
- * Shared folders protocol works with handles.
- * Before doing any action on a file system object,
- * one have to obtain the object handle via a SHFL_FN_CREATE
- * request. A handle must be closed with SHFL_FN_CLOSE.
- */
-
-enum {
- SHFL_FN_QUERY_MAPPINGS = 1, /* Query mappings changes. */
- SHFL_FN_QUERY_MAP_NAME = 2, /* Query map name. */
- SHFL_FN_CREATE = 3, /* Open/create object. */
- SHFL_FN_CLOSE = 4, /* Close object handle. */
- SHFL_FN_READ = 5, /* Read object content. */
- SHFL_FN_WRITE = 6, /* Write new object content. */
- SHFL_FN_LOCK = 7, /* Lock/unlock a range in the object. */
- SHFL_FN_LIST = 8, /* List object content. */
- SHFL_FN_INFORMATION = 9, /* Query/set object information. */
- /* Note function number 10 is not used! */
- SHFL_FN_REMOVE = 11, /* Remove object */
- SHFL_FN_MAP_FOLDER_OLD = 12, /* Map folder (legacy) */
- SHFL_FN_UNMAP_FOLDER = 13, /* Unmap folder */
- SHFL_FN_RENAME = 14, /* Rename object */
- SHFL_FN_FLUSH = 15, /* Flush file */
- SHFL_FN_SET_UTF8 = 16, /* Select UTF8 filename encoding */
- SHFL_FN_MAP_FOLDER = 17, /* Map folder */
- SHFL_FN_READLINK = 18, /* Read symlink dest (as of VBox 4.0) */
- SHFL_FN_SYMLINK = 19, /* Create symlink (as of VBox 4.0) */
- SHFL_FN_SET_SYMLINKS = 20, /* Ask host to show symlinks (4.0+) */
-};
-
-/* Root handles for a mapping are of type u32, Root handles are unique. */
-#define SHFL_ROOT_NIL UINT_MAX
-
-/* Shared folders handle for an opened object are of type u64. */
-#define SHFL_HANDLE_NIL ULLONG_MAX
-
-/* Hardcoded maximum length (in chars) of a shared folder name. */
-#define SHFL_MAX_LEN (256)
-/* Hardcoded maximum number of shared folder mapping available to the guest. */
-#define SHFL_MAX_MAPPINGS (64)
-
-/** Shared folder string buffer structure. */
-struct shfl_string {
- /** Allocated size of the string member in bytes. */
- u16 size;
-
- /** Length of string without trailing nul in bytes. */
- u16 length;
-
- /** UTF-8 or UTF-16 string. Nul terminated. */
- union {
- u8 utf8[2];
- u16 utf16[1];
- u16 ucs2[1]; /* misnomer, use utf16. */
- } string;
-};
-VMMDEV_ASSERT_SIZE(shfl_string, 6);
-
-/* The size of shfl_string w/o the string part. */
-#define SHFLSTRING_HEADER_SIZE 4
-
-/* Calculate size of the string. */
-static inline u32 shfl_string_buf_size(const struct shfl_string *string)
-{
- return string ? SHFLSTRING_HEADER_SIZE + string->size : 0;
-}
-
-/* Set user id on execution (S_ISUID). */
-#define SHFL_UNIX_ISUID 0004000U
-/* Set group id on execution (S_ISGID). */
-#define SHFL_UNIX_ISGID 0002000U
-/* Sticky bit (S_ISVTX / S_ISTXT). */
-#define SHFL_UNIX_ISTXT 0001000U
-
-/* Owner readable (S_IRUSR). */
-#define SHFL_UNIX_IRUSR 0000400U
-/* Owner writable (S_IWUSR). */
-#define SHFL_UNIX_IWUSR 0000200U
-/* Owner executable (S_IXUSR). */
-#define SHFL_UNIX_IXUSR 0000100U
-
-/* Group readable (S_IRGRP). */
-#define SHFL_UNIX_IRGRP 0000040U
-/* Group writable (S_IWGRP). */
-#define SHFL_UNIX_IWGRP 0000020U
-/* Group executable (S_IXGRP). */
-#define SHFL_UNIX_IXGRP 0000010U
-
-/* Other readable (S_IROTH). */
-#define SHFL_UNIX_IROTH 0000004U
-/* Other writable (S_IWOTH). */
-#define SHFL_UNIX_IWOTH 0000002U
-/* Other executable (S_IXOTH). */
-#define SHFL_UNIX_IXOTH 0000001U
-
-/* Named pipe (fifo) (S_IFIFO). */
-#define SHFL_TYPE_FIFO 0010000U
-/* Character device (S_IFCHR). */
-#define SHFL_TYPE_DEV_CHAR 0020000U
-/* Directory (S_IFDIR). */
-#define SHFL_TYPE_DIRECTORY 0040000U
-/* Block device (S_IFBLK). */
-#define SHFL_TYPE_DEV_BLOCK 0060000U
-/* Regular file (S_IFREG). */
-#define SHFL_TYPE_FILE 0100000U
-/* Symbolic link (S_IFLNK). */
-#define SHFL_TYPE_SYMLINK 0120000U
-/* Socket (S_IFSOCK). */
-#define SHFL_TYPE_SOCKET 0140000U
-/* Whiteout (S_IFWHT). */
-#define SHFL_TYPE_WHITEOUT 0160000U
-/* Type mask (S_IFMT). */
-#define SHFL_TYPE_MASK 0170000U
-
-/* Checks the mode flags indicate a directory (S_ISDIR). */
-#define SHFL_IS_DIRECTORY(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_DIRECTORY)
-/* Checks the mode flags indicate a symbolic link (S_ISLNK). */
-#define SHFL_IS_SYMLINK(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_SYMLINK)
-
-/** The available additional information in a shfl_fsobjattr object. */
-enum shfl_fsobjattr_add {
- /** No additional information is available / requested. */
- SHFLFSOBJATTRADD_NOTHING = 1,
- /**
- * The additional unix attributes (shfl_fsobjattr::u::unix_attr) are
- * available / requested.
- */
- SHFLFSOBJATTRADD_UNIX,
- /**
- * The additional extended attribute size (shfl_fsobjattr::u::size) is
- * available / requested.
- */
- SHFLFSOBJATTRADD_EASIZE,
- /**
- * The last valid item (inclusive).
- * The valid range is SHFLFSOBJATTRADD_NOTHING thru
- * SHFLFSOBJATTRADD_LAST.
- */
- SHFLFSOBJATTRADD_LAST = SHFLFSOBJATTRADD_EASIZE,
-
- /** The usual 32-bit hack. */
- SHFLFSOBJATTRADD_32BIT_SIZE_HACK = 0x7fffffff
-};
-
-/**
- * Additional unix Attributes, these are available when
- * shfl_fsobjattr.additional == SHFLFSOBJATTRADD_UNIX.
- */
-struct shfl_fsobjattr_unix {
- /**
- * The user owning the filesystem object (st_uid).
- * This field is ~0U if not supported.
- */
- u32 uid;
-
- /**
- * The group the filesystem object is assigned (st_gid).
- * This field is ~0U if not supported.
- */
- u32 gid;
-
- /**
- * Number of hard links to this filesystem object (st_nlink).
- * This field is 1 if the filesystem doesn't support hardlinking or
- * the information isn't available.
- */
- u32 hardlinks;
-
- /**
- * The device number of the device which this filesystem object resides
- * on (st_dev). This field is 0 if this information is not available.
- */
- u32 inode_id_device;
-
- /**
- * The unique identifier (within the filesystem) of this filesystem
- * object (st_ino). Together with inode_id_device, this field can be
- * used as a OS wide unique id, when both their values are not 0.
- * This field is 0 if the information is not available.
- */
- u64 inode_id;
-
- /**
- * User flags (st_flags).
- * This field is 0 if this information is not available.
- */
- u32 flags;
-
- /**
- * The current generation number (st_gen).
- * This field is 0 if this information is not available.
- */
- u32 generation_id;
-
- /**
- * The device number of a char. or block device type object (st_rdev).
- * This field is 0 if the file isn't a char. or block device or when
- * the OS doesn't use the major+minor device idenfication scheme.
- */
- u32 device;
-} __packed;
-
-/** Extended attribute size. */
-struct shfl_fsobjattr_easize {
- /** Size of EAs. */
- s64 cb;
-} __packed;
-
-/** Shared folder filesystem object attributes. */
-struct shfl_fsobjattr {
- /** Mode flags (st_mode). SHFL_UNIX_*, SHFL_TYPE_*, and SHFL_DOS_*. */
- u32 mode;
-
- /** The additional attributes available. */
- enum shfl_fsobjattr_add additional;
-
- /**
- * Additional attributes.
- *
- * Unless explicitly specified to an API, the API can provide additional
- * data as it is provided by the underlying OS.
- */
- union {
- struct shfl_fsobjattr_unix unix_attr;
- struct shfl_fsobjattr_easize size;
- } __packed u;
-} __packed;
-VMMDEV_ASSERT_SIZE(shfl_fsobjattr, 44);
-
-struct shfl_timespec {
- s64 ns_relative_to_unix_epoch;
-};
-
-/** Filesystem object information structure. */
-struct shfl_fsobjinfo {
- /**
- * Logical size (st_size).
- * For normal files this is the size of the file.
- * For symbolic links, this is the length of the path name contained
- * in the symbolic link.
- * For other objects this fields needs to be specified.
- */
- s64 size;
-
- /** Disk allocation size (st_blocks * DEV_BSIZE). */
- s64 allocated;
-
- /** Time of last access (st_atime). */
- struct shfl_timespec access_time;
-
- /** Time of last data modification (st_mtime). */
- struct shfl_timespec modification_time;
-
- /**
- * Time of last status change (st_ctime).
- * If not available this is set to modification_time.
- */
- struct shfl_timespec change_time;
-
- /**
- * Time of file birth (st_birthtime).
- * If not available this is set to change_time.
- */
- struct shfl_timespec birth_time;
-
- /** Attributes. */
- struct shfl_fsobjattr attr;
-
-} __packed;
-VMMDEV_ASSERT_SIZE(shfl_fsobjinfo, 92);
-
-/**
- * result of an open/create request.
- * Along with handle value the result code
- * identifies what has happened while
- * trying to open the object.
- */
-enum shfl_create_result {
- SHFL_NO_RESULT,
- /** Specified path does not exist. */
- SHFL_PATH_NOT_FOUND,
- /** Path to file exists, but the last component does not. */
- SHFL_FILE_NOT_FOUND,
- /** File already exists and either has been opened or not. */
- SHFL_FILE_EXISTS,
- /** New file was created. */
- SHFL_FILE_CREATED,
- /** Existing file was replaced or overwritten. */
- SHFL_FILE_REPLACED
-};
-
-/* No flags. Initialization value. */
-#define SHFL_CF_NONE (0x00000000)
-
-/*
- * Only lookup the object, do not return a handle. When this is set all other
- * flags are ignored.
- */
-#define SHFL_CF_LOOKUP (0x00000001)
-
-/*
- * Open parent directory of specified object.
- * Useful for the corresponding Windows FSD flag
- * and for opening paths like \\dir\\*.* to search the 'dir'.
- */
-#define SHFL_CF_OPEN_TARGET_DIRECTORY (0x00000002)
-
-/* Create/open a directory. */
-#define SHFL_CF_DIRECTORY (0x00000004)
-
-/*
- * Open/create action to do if object exists
- * and if the object does not exists.
- * REPLACE file means atomically DELETE and CREATE.
- * OVERWRITE file means truncating the file to 0 and
- * setting new size.
- * When opening an existing directory REPLACE and OVERWRITE
- * actions are considered invalid, and cause returning
- * FILE_EXISTS with NIL handle.
- */
-#define SHFL_CF_ACT_MASK_IF_EXISTS (0x000000f0)
-#define SHFL_CF_ACT_MASK_IF_NEW (0x00000f00)
-
-/* What to do if object exists. */
-#define SHFL_CF_ACT_OPEN_IF_EXISTS (0x00000000)
-#define SHFL_CF_ACT_FAIL_IF_EXISTS (0x00000010)
-#define SHFL_CF_ACT_REPLACE_IF_EXISTS (0x00000020)
-#define SHFL_CF_ACT_OVERWRITE_IF_EXISTS (0x00000030)
-
-/* What to do if object does not exist. */
-#define SHFL_CF_ACT_CREATE_IF_NEW (0x00000000)
-#define SHFL_CF_ACT_FAIL_IF_NEW (0x00000100)
-
-/* Read/write requested access for the object. */
-#define SHFL_CF_ACCESS_MASK_RW (0x00003000)
-
-/* No access requested. */
-#define SHFL_CF_ACCESS_NONE (0x00000000)
-/* Read access requested. */
-#define SHFL_CF_ACCESS_READ (0x00001000)
-/* Write access requested. */
-#define SHFL_CF_ACCESS_WRITE (0x00002000)
-/* Read/Write access requested. */
-#define SHFL_CF_ACCESS_READWRITE (0x00003000)
-
-/* Requested share access for the object. */
-#define SHFL_CF_ACCESS_MASK_DENY (0x0000c000)
-
-/* Allow any access. */
-#define SHFL_CF_ACCESS_DENYNONE (0x00000000)
-/* Do not allow read. */
-#define SHFL_CF_ACCESS_DENYREAD (0x00004000)
-/* Do not allow write. */
-#define SHFL_CF_ACCESS_DENYWRITE (0x00008000)
-/* Do not allow access. */
-#define SHFL_CF_ACCESS_DENYALL (0x0000c000)
-
-/* Requested access to attributes of the object. */
-#define SHFL_CF_ACCESS_MASK_ATTR (0x00030000)
-
-/* No access requested. */
-#define SHFL_CF_ACCESS_ATTR_NONE (0x00000000)
-/* Read access requested. */
-#define SHFL_CF_ACCESS_ATTR_READ (0x00010000)
-/* Write access requested. */
-#define SHFL_CF_ACCESS_ATTR_WRITE (0x00020000)
-/* Read/Write access requested. */
-#define SHFL_CF_ACCESS_ATTR_READWRITE (0x00030000)
-
-/*
- * The file is opened in append mode.
- * Ignored if SHFL_CF_ACCESS_WRITE is not set.
- */
-#define SHFL_CF_ACCESS_APPEND (0x00040000)
-
-/** Create parameters buffer struct for SHFL_FN_CREATE call */
-struct shfl_createparms {
- /** Returned handle of opened object. */
- u64 handle;
-
- /** Returned result of the operation */
- enum shfl_create_result result;
-
- /** SHFL_CF_* */
- u32 create_flags;
-
- /**
- * Attributes of object to create and
- * returned actual attributes of opened/created object.
- */
- struct shfl_fsobjinfo info;
-} __packed;
-
-/** Shared Folder directory information */
-struct shfl_dirinfo {
- /** Full information about the object. */
- struct shfl_fsobjinfo info;
- /**
- * The length of the short field (number of UTF16 chars).
- * It is 16-bit for reasons of alignment.
- */
- u16 short_name_len;
- /**
- * The short name for 8.3 compatibility.
- * Empty string if not available.
- */
- u16 short_name[14];
- struct shfl_string name;
-};
-
-/** Shared folder filesystem properties. */
-struct shfl_fsproperties {
- /**
- * The maximum size of a filesystem object name.
- * This does not include the '\\0'.
- */
- u32 max_component_len;
-
- /**
- * True if the filesystem is remote.
- * False if the filesystem is local.
- */
- bool remote;
-
- /**
- * True if the filesystem is case sensitive.
- * False if the filesystem is case insensitive.
- */
- bool case_sensitive;
-
- /**
- * True if the filesystem is mounted read only.
- * False if the filesystem is mounted read write.
- */
- bool read_only;
-
- /**
- * True if the filesystem can encode unicode object names.
- * False if it can't.
- */
- bool supports_unicode;
-
- /**
- * True if the filesystem is compresses.
- * False if it isn't or we don't know.
- */
- bool compressed;
-
- /**
- * True if the filesystem compresses of individual files.
- * False if it doesn't or we don't know.
- */
- bool file_compression;
-};
-VMMDEV_ASSERT_SIZE(shfl_fsproperties, 12);
-
-struct shfl_volinfo {
- s64 total_allocation_bytes;
- s64 available_allocation_bytes;
- u32 bytes_per_allocation_unit;
- u32 bytes_per_sector;
- u32 serial;
- struct shfl_fsproperties properties;
-};
-
-
-/** SHFL_FN_MAP_FOLDER Parameters structure. */
-struct shfl_map_folder {
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, out: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in: UTF16
- * Path delimiter
- */
- struct vmmdev_hgcm_function_parameter delimiter;
-
- /**
- * pointer, in: SHFLROOT (u32)
- * Case senstive flag
- */
- struct vmmdev_hgcm_function_parameter case_sensitive;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_MAP_FOLDER (4)
-
-
-/** SHFL_FN_UNMAP_FOLDER Parameters structure. */
-struct shfl_unmap_folder {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_UNMAP_FOLDER (1)
-
-
-/** SHFL_FN_CREATE Parameters structure. */
-struct shfl_create {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, in/out:
- * Points to struct shfl_createparms buffer.
- */
- struct vmmdev_hgcm_function_parameter parms;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_CREATE (3)
-
-
-/** SHFL_FN_CLOSE Parameters structure. */
-struct shfl_close {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to close.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_CLOSE (2)
-
-
-/** SHFL_FN_READ Parameters structure. */
-struct shfl_read {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to read from.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value64, in:
- * Offset to read from.
- */
- struct vmmdev_hgcm_function_parameter offset;
-
- /**
- * value64, in/out:
- * Bytes to read/How many were read.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, out:
- * Buffer to place data to.
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_READ (5)
-
-
-/** SHFL_FN_WRITE Parameters structure. */
-struct shfl_write {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to write to.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value64, in:
- * Offset to write to.
- */
- struct vmmdev_hgcm_function_parameter offset;
-
- /**
- * value64, in/out:
- * Bytes to write/How many were written.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, in:
- * Data to write.
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_WRITE (5)
-
-
-/*
- * SHFL_FN_LIST
- * Listing information includes variable length RTDIRENTRY[EX] structures.
- */
-
-#define SHFL_LIST_NONE 0
-#define SHFL_LIST_RETURN_ONE 1
-
-/** SHFL_FN_LIST Parameters structure. */
-struct shfl_list {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to be listed.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value32, in:
- * List flags SHFL_LIST_*.
- */
- struct vmmdev_hgcm_function_parameter flags;
-
- /**
- * value32, in/out:
- * Bytes to be used for listing information/How many bytes were used.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, in/optional
- * Points to struct shfl_string buffer that specifies a search path.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, out:
- * Buffer to place listing information to. (struct shfl_dirinfo)
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
- /**
- * value32, in/out:
- * Indicates a key where the listing must be resumed.
- * in: 0 means start from begin of object.
- * out: 0 means listing completed.
- */
- struct vmmdev_hgcm_function_parameter resume_point;
-
- /**
- * pointer, out:
- * Number of files returned
- */
- struct vmmdev_hgcm_function_parameter file_count;
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_LIST (8)
-
-
-/** SHFL_FN_READLINK Parameters structure. */
-struct shfl_readLink {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, out:
- * Buffer to place data to.
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_READLINK (3)
-
-
-/* SHFL_FN_INFORMATION */
-
-/* Mask of Set/Get bit. */
-#define SHFL_INFO_MODE_MASK (0x1)
-/* Get information */
-#define SHFL_INFO_GET (0x0)
-/* Set information */
-#define SHFL_INFO_SET (0x1)
-
-/* Get name of the object. */
-#define SHFL_INFO_NAME (0x2)
-/* Set size of object (extend/trucate); only applies to file objects */
-#define SHFL_INFO_SIZE (0x4)
-/* Get/Set file object info. */
-#define SHFL_INFO_FILE (0x8)
-/* Get volume information. */
-#define SHFL_INFO_VOLUME (0x10)
-
-/** SHFL_FN_INFORMATION Parameters structure. */
-struct shfl_information {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to be listed.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value32, in:
- * SHFL_INFO_*
- */
- struct vmmdev_hgcm_function_parameter flags;
-
- /**
- * value32, in/out:
- * Bytes to be used for information/How many bytes were used.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, in/out:
- * Information to be set/get (shfl_fsobjinfo or shfl_string). Do not
- * forget to set the shfl_fsobjinfo::attr::additional for a get
- * operation as well.
- */
- struct vmmdev_hgcm_function_parameter info;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_INFORMATION (5)
-
-
-/* SHFL_FN_REMOVE */
-
-#define SHFL_REMOVE_FILE (0x1)
-#define SHFL_REMOVE_DIR (0x2)
-#define SHFL_REMOVE_SYMLINK (0x4)
-
-/** SHFL_FN_REMOVE Parameters structure. */
-struct shfl_remove {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * value32, in:
- * remove flags (file/directory)
- */
- struct vmmdev_hgcm_function_parameter flags;
-
-};
-
-#define SHFL_CPARMS_REMOVE (3)
-
-
-/* SHFL_FN_RENAME */
-
-#define SHFL_RENAME_FILE (0x1)
-#define SHFL_RENAME_DIR (0x2)
-#define SHFL_RENAME_REPLACE_IF_EXISTS (0x4)
-
-/** SHFL_FN_RENAME Parameters structure. */
-struct shfl_rename {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string src.
- */
- struct vmmdev_hgcm_function_parameter src;
-
- /**
- * pointer, in:
- * Points to struct shfl_string dest.
- */
- struct vmmdev_hgcm_function_parameter dest;
-
- /**
- * value32, in:
- * rename flags (file/directory)
- */
- struct vmmdev_hgcm_function_parameter flags;
-
-};
-
-#define SHFL_CPARMS_RENAME (4)
-
-
-/** SHFL_FN_SYMLINK Parameters structure. */
-struct shfl_symlink {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string of path for the new symlink.
- */
- struct vmmdev_hgcm_function_parameter new_path;
-
- /**
- * pointer, in:
- * Points to struct shfl_string of destination for symlink.
- */
- struct vmmdev_hgcm_function_parameter old_path;
-
- /**
- * pointer, out:
- * Information about created symlink.
- */
- struct vmmdev_hgcm_function_parameter info;
-
-};
-
-#define SHFL_CPARMS_SYMLINK (4)
-
-#endif
diff --git a/drivers/staging/vboxsf/super.c b/drivers/staging/vboxsf/super.c
deleted file mode 100644
index 0bf4d724aefd..000000000000
--- a/drivers/staging/vboxsf/super.c
+++ /dev/null
@@ -1,501 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Virtual File System.
- *
- * Module initialization/finalization
- * File system registration/deregistration
- * Superblock reading
- * Few utility functions
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/idr.h>
-#include <linux/fs_parser.h>
-#include <linux/magic.h>
-#include <linux/module.h>
-#include <linux/nls.h>
-#include <linux/statfs.h>
-#include <linux/vbox_utils.h>
-#include "vfsmod.h"
-
-#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
-
-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
-
-static int follow_symlinks;
-module_param(follow_symlinks, int, 0444);
-MODULE_PARM_DESC(follow_symlinks,
- "Let host resolve symlinks rather than showing them");
-
-static DEFINE_IDA(vboxsf_bdi_ida);
-static DEFINE_MUTEX(vboxsf_setup_mutex);
-static bool vboxsf_setup_done;
-static struct super_operations vboxsf_super_ops; /* forward declaration */
-static struct kmem_cache *vboxsf_inode_cachep;
-
-static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT;
-
-enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
- opt_dmask, opt_fmask };
-
-static const struct fs_parameter_spec vboxsf_param_specs[] = {
- fsparam_string ("nls", opt_nls),
- fsparam_u32 ("uid", opt_uid),
- fsparam_u32 ("gid", opt_gid),
- fsparam_u32 ("ttl", opt_ttl),
- fsparam_u32oct ("dmode", opt_dmode),
- fsparam_u32oct ("fmode", opt_fmode),
- fsparam_u32oct ("dmask", opt_dmask),
- fsparam_u32oct ("fmask", opt_fmask),
- {}
-};
-
-static const struct fs_parameter_description vboxsf_fs_parameters = {
- .name = "vboxsf",
- .specs = vboxsf_param_specs,
-};
-
-static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
-{
- struct vboxsf_fs_context *ctx = fc->fs_private;
- struct fs_parse_result result;
- kuid_t uid;
- kgid_t gid;
- int opt;
-
- opt = fs_parse(fc, &vboxsf_fs_parameters, param, &result);
- if (opt < 0)
- return opt;
-
- switch (opt) {
- case opt_nls:
- if (fc->purpose != FS_CONTEXT_FOR_MOUNT) {
- vbg_err("vboxsf: Cannot reconfigure nls option\n");
- return -EINVAL;
- }
- ctx->nls_name = param->string;
- param->string = NULL;
- break;
- case opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- return -EINVAL;
- ctx->o.uid = uid;
- break;
- case opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- return -EINVAL;
- ctx->o.gid = gid;
- break;
- case opt_ttl:
- ctx->o.ttl = msecs_to_jiffies(result.uint_32);
- break;
- case opt_dmode:
- if (result.uint_32 & ~0777)
- return -EINVAL;
- ctx->o.dmode = result.uint_32;
- ctx->o.dmode_set = true;
- break;
- case opt_fmode:
- if (result.uint_32 & ~0777)
- return -EINVAL;
- ctx->o.fmode = result.uint_32;
- ctx->o.fmode_set = true;
- break;
- case opt_dmask:
- if (result.uint_32 & ~07777)
- return -EINVAL;
- ctx->o.dmask = result.uint_32;
- break;
- case opt_fmask:
- if (result.uint_32 & ~07777)
- return -EINVAL;
- ctx->o.fmask = result.uint_32;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
-{
- struct vboxsf_fs_context *ctx = fc->fs_private;
- struct shfl_string *folder_name, root_path;
- struct vboxsf_sbi *sbi;
- struct dentry *droot;
- struct inode *iroot;
- char *nls_name;
- size_t size;
- int err;
-
- if (!fc->source)
- return -EINVAL;
-
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
-
- sbi->o = ctx->o;
- idr_init(&sbi->ino_idr);
- spin_lock_init(&sbi->ino_idr_lock);
- sbi->next_generation = 1;
- sbi->bdi_id = -1;
-
- /* Load nls if not utf8 */
- nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls;
- if (strcmp(nls_name, "utf8") != 0) {
- if (nls_name == vboxsf_default_nls)
- sbi->nls = load_nls_default();
- else
- sbi->nls = load_nls(nls_name);
-
- if (!sbi->nls) {
- vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
- err = -EINVAL;
- goto fail_free;
- }
- }
-
- sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
- if (sbi->bdi_id < 0) {
- err = sbi->bdi_id;
- goto fail_free;
- }
-
- err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id);
- if (err)
- goto fail_free;
-
- /* Turn source into a shfl_string and map the folder */
- size = strlen(fc->source) + 1;
- folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL);
- if (!folder_name) {
- err = -ENOMEM;
- goto fail_free;
- }
- folder_name->size = size;
- folder_name->length = size - 1;
- strlcpy(folder_name->string.utf8, fc->source, size);
- err = vboxsf_map_folder(folder_name, &sbi->root);
- kfree(folder_name);
- if (err) {
- vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n",
- fc->source, err);
- goto fail_free;
- }
-
- root_path.length = 1;
- root_path.size = 2;
- root_path.string.utf8[0] = '/';
- root_path.string.utf8[1] = 0;
- err = vboxsf_stat(sbi, &root_path, &sbi->root_info);
- if (err)
- goto fail_unmap;
-
- sb->s_magic = VBOXSF_SUPER_MAGIC;
- sb->s_blocksize = 1024;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_op = &vboxsf_super_ops;
- sb->s_d_op = &vboxsf_dentry_ops;
-
- iroot = iget_locked(sb, 0);
- if (!iroot) {
- err = -ENOMEM;
- goto fail_unmap;
- }
- vboxsf_init_inode(sbi, iroot, &sbi->root_info);
- unlock_new_inode(iroot);
-
- droot = d_make_root(iroot);
- if (!droot) {
- err = -ENOMEM;
- goto fail_unmap;
- }
-
- sb->s_root = droot;
- sb->s_fs_info = sbi;
- return 0;
-
-fail_unmap:
- vboxsf_unmap_folder(sbi->root);
-fail_free:
- if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
- if (sbi->nls)
- unload_nls(sbi->nls);
- idr_destroy(&sbi->ino_idr);
- kfree(sbi);
- return err;
-}
-
-static void vboxsf_inode_init_once(void *data)
-{
- struct vboxsf_inode *sf_i = data;
-
- mutex_init(&sf_i->handle_list_mutex);
- inode_init_once(&sf_i->vfs_inode);
-}
-
-static struct inode *vboxsf_alloc_inode(struct super_block *sb)
-{
- struct vboxsf_inode *sf_i;
-
- sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS);
- if (!sf_i)
- return NULL;
-
- sf_i->force_restat = 0;
- INIT_LIST_HEAD(&sf_i->handle_list);
-
- return &sf_i->vfs_inode;
-}
-
-static void vboxsf_free_inode(struct inode *inode)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
- unsigned long flags;
-
- spin_lock_irqsave(&sbi->ino_idr_lock, flags);
- idr_remove(&sbi->ino_idr, inode->i_ino);
- spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
- kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode));
-}
-
-static void vboxsf_put_super(struct super_block *sb)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
-
- vboxsf_unmap_folder(sbi->root);
- if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
- if (sbi->nls)
- unload_nls(sbi->nls);
-
- /*
- * vboxsf_free_inode uses the idr, make sure all delayed rcu free
- * inodes are flushed.
- */
- rcu_barrier();
- idr_destroy(&sbi->ino_idr);
- kfree(sbi);
-}
-
-static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat)
-{
- struct super_block *sb = dentry->d_sb;
- struct shfl_volinfo shfl_volinfo;
- struct vboxsf_sbi *sbi;
- u32 buf_len;
- int err;
-
- sbi = VBOXSF_SBI(sb);
- buf_len = sizeof(shfl_volinfo);
- err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME,
- &buf_len, &shfl_volinfo);
- if (err)
- return err;
-
- stat->f_type = VBOXSF_SUPER_MAGIC;
- stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit;
-
- do_div(shfl_volinfo.total_allocation_bytes,
- shfl_volinfo.bytes_per_allocation_unit);
- stat->f_blocks = shfl_volinfo.total_allocation_bytes;
-
- do_div(shfl_volinfo.available_allocation_bytes,
- shfl_volinfo.bytes_per_allocation_unit);
- stat->f_bfree = shfl_volinfo.available_allocation_bytes;
- stat->f_bavail = shfl_volinfo.available_allocation_bytes;
-
- stat->f_files = 1000;
- /*
- * Don't return 0 here since the guest may then think that it is not
- * possible to create any more files.
- */
- stat->f_ffree = 1000000;
- stat->f_fsid.val[0] = 0;
- stat->f_fsid.val[1] = 0;
- stat->f_namelen = 255;
- return 0;
-}
-
-static struct super_operations vboxsf_super_ops = {
- .alloc_inode = vboxsf_alloc_inode,
- .free_inode = vboxsf_free_inode,
- .put_super = vboxsf_put_super,
- .statfs = vboxsf_statfs,
-};
-
-static int vboxsf_setup(void)
-{
- int err;
-
- mutex_lock(&vboxsf_setup_mutex);
-
- if (vboxsf_setup_done)
- goto success;
-
- vboxsf_inode_cachep =
- kmem_cache_create("vboxsf_inode_cache",
- sizeof(struct vboxsf_inode), 0,
- (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
- SLAB_ACCOUNT),
- vboxsf_inode_init_once);
- if (!vboxsf_inode_cachep) {
- err = -ENOMEM;
- goto fail_nomem;
- }
-
- err = vboxsf_connect();
- if (err) {
- vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err);
- vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n");
- vbg_err("vboxsf: and check dmesg for vboxguest errors\n");
- goto fail_free_cache;
- }
-
- err = vboxsf_set_utf8();
- if (err) {
- vbg_err("vboxsf_setutf8 error %d\n", err);
- goto fail_disconnect;
- }
-
- if (!follow_symlinks) {
- err = vboxsf_set_symlinks();
- if (err)
- vbg_warn("vboxsf: Unable to show symlinks: %d\n", err);
- }
-
- vboxsf_setup_done = true;
-success:
- mutex_unlock(&vboxsf_setup_mutex);
- return 0;
-
-fail_disconnect:
- vboxsf_disconnect();
-fail_free_cache:
- kmem_cache_destroy(vboxsf_inode_cachep);
-fail_nomem:
- mutex_unlock(&vboxsf_setup_mutex);
- return err;
-}
-
-static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
-{
- char *options = data;
-
- if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
- options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
- options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
- options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
- vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
- return -EINVAL;
- }
-
- return generic_parse_monolithic(fc, data);
-}
-
-static int vboxsf_get_tree(struct fs_context *fc)
-{
- int err;
-
- err = vboxsf_setup();
- if (err)
- return err;
-
- return vfs_get_super(fc, vfs_get_independent_super, vboxsf_fill_super);
-}
-
-static int vboxsf_reconfigure(struct fs_context *fc)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb);
- struct vboxsf_fs_context *ctx = fc->fs_private;
- struct inode *iroot;
-
- iroot = ilookup(fc->root->d_sb, 0);
- if (!iroot)
- return -ENOENT;
-
- /* Apply changed options to the root inode */
- sbi->o = ctx->o;
- vboxsf_init_inode(sbi, iroot, &sbi->root_info);
-
- return 0;
-}
-
-static void vboxsf_free_fc(struct fs_context *fc)
-{
- struct vboxsf_fs_context *ctx = fc->fs_private;
-
- kfree(ctx->nls_name);
- kfree(ctx);
-}
-
-static const struct fs_context_operations vboxsf_context_ops = {
- .free = vboxsf_free_fc,
- .parse_param = vboxsf_parse_param,
- .parse_monolithic = vboxsf_parse_monolithic,
- .get_tree = vboxsf_get_tree,
- .reconfigure = vboxsf_reconfigure,
-};
-
-static int vboxsf_init_fs_context(struct fs_context *fc)
-{
- struct vboxsf_fs_context *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- current_uid_gid(&ctx->o.uid, &ctx->o.gid);
-
- fc->fs_private = ctx;
- fc->ops = &vboxsf_context_ops;
- return 0;
-}
-
-static struct file_system_type vboxsf_fs_type = {
- .owner = THIS_MODULE,
- .name = "vboxsf",
- .init_fs_context = vboxsf_init_fs_context,
- .parameters = &vboxsf_fs_parameters,
- .kill_sb = kill_anon_super
-};
-
-/* Module initialization/finalization handlers */
-static int __init vboxsf_init(void)
-{
- return register_filesystem(&vboxsf_fs_type);
-}
-
-static void __exit vboxsf_fini(void)
-{
- unregister_filesystem(&vboxsf_fs_type);
-
- mutex_lock(&vboxsf_setup_mutex);
- if (vboxsf_setup_done) {
- vboxsf_disconnect();
- /*
- * Make sure all delayed rcu free inodes are flushed
- * before we destroy the cache.
- */
- rcu_barrier();
- kmem_cache_destroy(vboxsf_inode_cachep);
- }
- mutex_unlock(&vboxsf_setup_mutex);
-}
-
-module_init(vboxsf_init);
-module_exit(vboxsf_fini);
-
-MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
-MODULE_AUTHOR("Oracle Corporation");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_FS("vboxsf");
diff --git a/drivers/staging/vboxsf/utils.c b/drivers/staging/vboxsf/utils.c
deleted file mode 100644
index 34a49e6f74fc..000000000000
--- a/drivers/staging/vboxsf/utils.c
+++ /dev/null
@@ -1,551 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Utility functions.
- * Mainly conversion from/to VirtualBox/Linux data structures.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/namei.h>
-#include <linux/nls.h>
-#include <linux/sizes.h>
-#include <linux/vfs.h>
-#include "vfsmod.h"
-
-struct inode *vboxsf_new_inode(struct super_block *sb)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
- struct inode *inode;
- unsigned long flags;
- int cursor, ret;
- u32 gen;
-
- inode = new_inode(sb);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&sbi->ino_idr_lock, flags);
- cursor = idr_get_cursor(&sbi->ino_idr);
- ret = idr_alloc_cyclic(&sbi->ino_idr, inode, 1, 0, GFP_ATOMIC);
- if (ret >= 0 && ret < cursor)
- sbi->next_generation++;
- gen = sbi->next_generation;
- spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
- idr_preload_end();
-
- if (ret < 0) {
- iput(inode);
- return ERR_PTR(ret);
- }
-
- inode->i_ino = ret;
- inode->i_generation = gen;
- return inode;
-}
-
-/* set [inode] attributes based on [info], uid/gid based on [sbi] */
-void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
- const struct shfl_fsobjinfo *info)
-{
- const struct shfl_fsobjattr *attr;
- s64 allocated;
- int mode;
-
- attr = &info->attr;
-
-#define mode_set(r) ((attr->mode & (SHFL_UNIX_##r)) ? (S_##r) : 0)
-
- mode = mode_set(IRUSR);
- mode |= mode_set(IWUSR);
- mode |= mode_set(IXUSR);
-
- mode |= mode_set(IRGRP);
- mode |= mode_set(IWGRP);
- mode |= mode_set(IXGRP);
-
- mode |= mode_set(IROTH);
- mode |= mode_set(IWOTH);
- mode |= mode_set(IXOTH);
-
-#undef mode_set
-
- /* We use the host-side values for these */
- inode->i_flags |= S_NOATIME | S_NOCMTIME;
- inode->i_mapping->a_ops = &vboxsf_reg_aops;
-
- if (SHFL_IS_DIRECTORY(attr->mode)) {
- inode->i_mode = sbi->o.dmode_set ? sbi->o.dmode : mode;
- inode->i_mode &= ~sbi->o.dmask;
- inode->i_mode |= S_IFDIR;
- inode->i_op = &vboxsf_dir_iops;
- inode->i_fop = &vboxsf_dir_fops;
- /*
- * XXX: this probably should be set to the number of entries
- * in the directory plus two (. ..)
- */
- set_nlink(inode, 1);
- } else if (SHFL_IS_SYMLINK(attr->mode)) {
- inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
- inode->i_mode &= ~sbi->o.fmask;
- inode->i_mode |= S_IFLNK;
- inode->i_op = &vboxsf_lnk_iops;
- set_nlink(inode, 1);
- } else {
- inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
- inode->i_mode &= ~sbi->o.fmask;
- inode->i_mode |= S_IFREG;
- inode->i_op = &vboxsf_reg_iops;
- inode->i_fop = &vboxsf_reg_fops;
- set_nlink(inode, 1);
- }
-
- inode->i_uid = sbi->o.uid;
- inode->i_gid = sbi->o.gid;
-
- inode->i_size = info->size;
- inode->i_blkbits = 12;
- /* i_blocks always in units of 512 bytes! */
- allocated = info->allocated + 511;
- do_div(allocated, 512);
- inode->i_blocks = allocated;
-
- inode->i_atime = ns_to_timespec64(
- info->access_time.ns_relative_to_unix_epoch);
- inode->i_ctime = ns_to_timespec64(
- info->change_time.ns_relative_to_unix_epoch);
- inode->i_mtime = ns_to_timespec64(
- info->modification_time.ns_relative_to_unix_epoch);
-}
-
-int vboxsf_create_at_dentry(struct dentry *dentry,
- struct shfl_createparms *params)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
- struct shfl_string *path;
- int err;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- err = vboxsf_create(sbi->root, path, params);
- __putname(path);
-
- return err;
-}
-
-int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
- struct shfl_fsobjinfo *info)
-{
- struct shfl_createparms params = {};
- int err;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
-
- err = vboxsf_create(sbi->root, path, &params);
- if (err)
- return err;
-
- if (params.result != SHFL_FILE_EXISTS)
- return -ENOENT;
-
- if (info)
- *info = params.info;
-
- return 0;
-}
-
-int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
- struct shfl_string *path;
- int err;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- err = vboxsf_stat(sbi, path, info);
- __putname(path);
- return err;
-}
-
-int vboxsf_inode_revalidate(struct dentry *dentry)
-{
- struct vboxsf_sbi *sbi;
- struct vboxsf_inode *sf_i;
- struct shfl_fsobjinfo info;
- struct timespec64 prev_mtime;
- struct inode *inode;
- int err;
-
- if (!dentry || !d_really_is_positive(dentry))
- return -EINVAL;
-
- inode = d_inode(dentry);
- prev_mtime = inode->i_mtime;
- sf_i = VBOXSF_I(inode);
- sbi = VBOXSF_SBI(dentry->d_sb);
- if (!sf_i->force_restat) {
- if (time_before(jiffies, dentry->d_time + sbi->o.ttl))
- return 0;
- }
-
- err = vboxsf_stat_dentry(dentry, &info);
- if (err)
- return err;
-
- dentry->d_time = jiffies;
- sf_i->force_restat = 0;
- vboxsf_init_inode(sbi, inode, &info);
-
- /*
- * If the file was changed on the host side we need to invalidate the
- * page-cache for it. Note this also gets triggered by our own writes,
- * this is unavoidable.
- */
- if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0)
- invalidate_inode_pages2(inode->i_mapping);
-
- return 0;
-}
-
-int vboxsf_getattr(const struct path *path, struct kstat *kstat,
- u32 request_mask, unsigned int flags)
-{
- int err;
- struct dentry *dentry = path->dentry;
- struct inode *inode = d_inode(dentry);
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
-
- switch (flags & AT_STATX_SYNC_TYPE) {
- case AT_STATX_DONT_SYNC:
- err = 0;
- break;
- case AT_STATX_FORCE_SYNC:
- sf_i->force_restat = 1;
- /* fall-through */
- default:
- err = vboxsf_inode_revalidate(dentry);
- }
- if (err)
- return err;
-
- generic_fillattr(d_inode(dentry), kstat);
- return 0;
-}
-
-int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr)
-{
- struct vboxsf_inode *sf_i = VBOXSF_I(d_inode(dentry));
- struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
- struct shfl_createparms params = {};
- struct shfl_fsobjinfo info = {};
- u32 buf_len;
- int err;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_ACT_OPEN_IF_EXISTS |
- SHFL_CF_ACT_FAIL_IF_NEW |
- SHFL_CF_ACCESS_ATTR_WRITE;
-
- /* this is at least required for Posix hosts */
- if (iattr->ia_valid & ATTR_SIZE)
- params.create_flags |= SHFL_CF_ACCESS_WRITE;
-
- err = vboxsf_create_at_dentry(dentry, &params);
- if (err || params.result != SHFL_FILE_EXISTS)
- return err ? err : -ENOENT;
-
-#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? SHFL_UNIX_##r : 0)
-
- /*
- * Setting the file size and setting the other attributes has to
- * be handled separately.
- */
- if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) {
- if (iattr->ia_valid & ATTR_MODE) {
- info.attr.mode = mode_set(IRUSR);
- info.attr.mode |= mode_set(IWUSR);
- info.attr.mode |= mode_set(IXUSR);
- info.attr.mode |= mode_set(IRGRP);
- info.attr.mode |= mode_set(IWGRP);
- info.attr.mode |= mode_set(IXGRP);
- info.attr.mode |= mode_set(IROTH);
- info.attr.mode |= mode_set(IWOTH);
- info.attr.mode |= mode_set(IXOTH);
-
- if (iattr->ia_mode & S_IFDIR)
- info.attr.mode |= SHFL_TYPE_DIRECTORY;
- else
- info.attr.mode |= SHFL_TYPE_FILE;
- }
-
- if (iattr->ia_valid & ATTR_ATIME)
- info.access_time.ns_relative_to_unix_epoch =
- timespec64_to_ns(&iattr->ia_atime);
-
- if (iattr->ia_valid & ATTR_MTIME)
- info.modification_time.ns_relative_to_unix_epoch =
- timespec64_to_ns(&iattr->ia_mtime);
-
- /*
- * Ignore ctime (inode change time) as it can't be set
- * from userland anyway.
- */
-
- buf_len = sizeof(info);
- err = vboxsf_fsinfo(sbi->root, params.handle,
- SHFL_INFO_SET | SHFL_INFO_FILE, &buf_len,
- &info);
- if (err) {
- vboxsf_close(sbi->root, params.handle);
- return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
- }
-
-#undef mode_set
-
- if (iattr->ia_valid & ATTR_SIZE) {
- memset(&info, 0, sizeof(info));
- info.size = iattr->ia_size;
- buf_len = sizeof(info);
- err = vboxsf_fsinfo(sbi->root, params.handle,
- SHFL_INFO_SET | SHFL_INFO_SIZE, &buf_len,
- &info);
- if (err) {
- vboxsf_close(sbi->root, params.handle);
- return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
- }
-
- vboxsf_close(sbi->root, params.handle);
-
- /* Update the inode with what the host has actually given us. */
- if (sf_i->force_restat)
- vboxsf_inode_revalidate(dentry);
-
- return 0;
-}
-
-/*
- * [dentry] contains string encoded in coding system that corresponds
- * to [sbi]->nls, we must convert it to UTF8 here.
- * Returns a shfl_string allocated through __getname (must be freed using
- * __putname), or an ERR_PTR on error.
- */
-struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
- struct dentry *dentry)
-{
- struct shfl_string *shfl_path;
- int path_len, out_len, nb;
- char *buf, *path;
- wchar_t uni;
- u8 *out;
-
- buf = __getname();
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- path = dentry_path_raw(dentry, buf, PATH_MAX);
- if (IS_ERR(path)) {
- __putname(buf);
- return (struct shfl_string *)path;
- }
- path_len = strlen(path);
-
- if (sbi->nls) {
- shfl_path = __getname();
- if (!shfl_path) {
- __putname(buf);
- return ERR_PTR(-ENOMEM);
- }
-
- out = shfl_path->string.utf8;
- out_len = PATH_MAX - SHFLSTRING_HEADER_SIZE - 1;
-
- while (path_len) {
- nb = sbi->nls->char2uni(path, path_len, &uni);
- if (nb < 0) {
- __putname(shfl_path);
- __putname(buf);
- return ERR_PTR(-EINVAL);
- }
- path += nb;
- path_len -= nb;
-
- nb = utf32_to_utf8(uni, out, out_len);
- if (nb < 0) {
- __putname(shfl_path);
- __putname(buf);
- return ERR_PTR(-ENAMETOOLONG);
- }
- out += nb;
- out_len -= nb;
- }
- *out = 0;
- shfl_path->length = out - shfl_path->string.utf8;
- shfl_path->size = shfl_path->length + 1;
- __putname(buf);
- } else {
- if ((SHFLSTRING_HEADER_SIZE + path_len + 1) > PATH_MAX) {
- __putname(buf);
- return ERR_PTR(-ENAMETOOLONG);
- }
- /*
- * dentry_path stores the name at the end of buf, but the
- * shfl_string string we return must be properly aligned.
- */
- shfl_path = (struct shfl_string *)buf;
- memmove(shfl_path->string.utf8, path, path_len);
- shfl_path->string.utf8[path_len] = 0;
- shfl_path->length = path_len;
- shfl_path->size = path_len + 1;
- }
-
- return shfl_path;
-}
-
-int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
- const unsigned char *utf8_name, size_t utf8_len)
-{
- const char *in;
- char *out;
- size_t out_len;
- size_t out_bound_len;
- size_t in_bound_len;
-
- in = utf8_name;
- in_bound_len = utf8_len;
-
- out = name;
- out_len = 0;
- /* Reserve space for terminating 0 */
- out_bound_len = name_bound_len - 1;
-
- while (in_bound_len) {
- int nb;
- unicode_t uni;
-
- nb = utf8_to_utf32(in, in_bound_len, &uni);
- if (nb < 0)
- return -EINVAL;
-
- in += nb;
- in_bound_len -= nb;
-
- nb = sbi->nls->uni2char(uni, out, out_bound_len);
- if (nb < 0)
- return nb;
-
- out += nb;
- out_bound_len -= nb;
- out_len += nb;
- }
-
- *out = 0;
-
- return 0;
-}
-
-static struct vboxsf_dir_buf *vboxsf_dir_buf_alloc(struct list_head *list)
-{
- struct vboxsf_dir_buf *b;
-
- b = kmalloc(sizeof(*b), GFP_KERNEL);
- if (!b)
- return NULL;
-
- b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL);
- if (!b->buf) {
- kfree(b);
- return NULL;
- }
-
- b->entries = 0;
- b->used = 0;
- b->free = DIR_BUFFER_SIZE;
- list_add(&b->head, list);
-
- return b;
-}
-
-static void vboxsf_dir_buf_free(struct vboxsf_dir_buf *b)
-{
- list_del(&b->head);
- kfree(b->buf);
- kfree(b);
-}
-
-struct vboxsf_dir_info *vboxsf_dir_info_alloc(void)
-{
- struct vboxsf_dir_info *p;
-
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return NULL;
-
- INIT_LIST_HEAD(&p->info_list);
- return p;
-}
-
-void vboxsf_dir_info_free(struct vboxsf_dir_info *p)
-{
- struct list_head *list, *pos, *tmp;
-
- list = &p->info_list;
- list_for_each_safe(pos, tmp, list) {
- struct vboxsf_dir_buf *b;
-
- b = list_entry(pos, struct vboxsf_dir_buf, head);
- vboxsf_dir_buf_free(b);
- }
- kfree(p);
-}
-
-int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
- u64 handle)
-{
- struct vboxsf_dir_buf *b;
- u32 entries, size;
- int err = 0;
- void *buf;
-
- /* vboxsf_dirinfo returns 1 on end of dir */
- while (err == 0) {
- b = vboxsf_dir_buf_alloc(&sf_d->info_list);
- if (!b) {
- err = -ENOMEM;
- break;
- }
-
- buf = b->buf;
- size = b->free;
-
- err = vboxsf_dirinfo(sbi->root, handle, NULL, 0, 0,
- &size, buf, &entries);
- if (err < 0)
- break;
-
- b->entries += entries;
- b->free -= size;
- b->used += size;
- }
-
- if (b && b->used == 0)
- vboxsf_dir_buf_free(b);
-
- /* -EILSEQ means the host could not translate a filename, ignore */
- if (err > 0 || err == -EILSEQ)
- err = 0;
-
- return err;
-}
diff --git a/drivers/staging/vboxsf/vboxsf_wrappers.c b/drivers/staging/vboxsf/vboxsf_wrappers.c
deleted file mode 100644
index bfc78a097dae..000000000000
--- a/drivers/staging/vboxsf/vboxsf_wrappers.c
+++ /dev/null
@@ -1,371 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Wrapper functions for the shfl host calls.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vbox_err.h>
-#include <linux/vbox_utils.h>
-#include "vfsmod.h"
-
-#define SHFL_REQUEST \
- (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER | \
- VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
-
-static u32 vboxsf_client_id;
-
-int vboxsf_connect(void)
-{
- struct vbg_dev *gdev;
- struct vmmdev_hgcm_service_location loc;
- int err, vbox_status;
-
- loc.type = VMMDEV_HGCM_LOC_LOCALHOST_EXISTING;
- strcpy(loc.u.localhost.service_name, "VBoxSharedFolders");
-
- gdev = vbg_get_gdev();
- if (IS_ERR(gdev))
- return -ENODEV; /* No guest-device */
-
- err = vbg_hgcm_connect(gdev, SHFL_REQUEST, &loc,
- &vboxsf_client_id, &vbox_status);
- vbg_put_gdev(gdev);
-
- return err ? err : vbg_status_code_to_errno(vbox_status);
-}
-
-void vboxsf_disconnect(void)
-{
- struct vbg_dev *gdev;
- int vbox_status;
-
- gdev = vbg_get_gdev();
- if (IS_ERR(gdev))
- return; /* guest-device is gone, already disconnected */
-
- vbg_hgcm_disconnect(gdev, SHFL_REQUEST, vboxsf_client_id, &vbox_status);
- vbg_put_gdev(gdev);
-}
-
-static int vboxsf_call(u32 function, void *parms, u32 parm_count, int *status)
-{
- struct vbg_dev *gdev;
- int err, vbox_status;
-
- gdev = vbg_get_gdev();
- if (IS_ERR(gdev))
- return -ESHUTDOWN; /* guest-dev removed underneath us */
-
- err = vbg_hgcm_call(gdev, SHFL_REQUEST, vboxsf_client_id, function,
- U32_MAX, parms, parm_count, &vbox_status);
- vbg_put_gdev(gdev);
-
- if (err < 0)
- return err;
-
- if (status)
- *status = vbox_status;
-
- return vbg_status_code_to_errno(vbox_status);
-}
-
-int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root)
-{
- struct shfl_map_folder parms;
- int err, status;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.path.u.pointer.size = shfl_string_buf_size(folder_name);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)folder_name;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = 0;
-
- parms.delimiter.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.delimiter.u.value32 = '/';
-
- parms.case_sensitive.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.case_sensitive.u.value32 = 1;
-
- err = vboxsf_call(SHFL_FN_MAP_FOLDER, &parms, SHFL_CPARMS_MAP_FOLDER,
- &status);
- if (err == -ENOSYS && status == VERR_NOT_IMPLEMENTED)
- vbg_err("%s: Error host is too old\n", __func__);
-
- *root = parms.root.u.value32;
- return err;
-}
-
-int vboxsf_unmap_folder(u32 root)
-{
- struct shfl_unmap_folder parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- return vboxsf_call(SHFL_FN_UNMAP_FOLDER, &parms,
- SHFL_CPARMS_UNMAP_FOLDER, NULL);
-}
-
-/**
- * vboxsf_create - Create a new file or folder
- * @root: Root of the shared folder in which to create the file
- * @parsed_path: The path of the file or folder relative to the shared folder
- * @param: create_parms Parameters for file/folder creation.
- *
- * Create a new file or folder or open an existing one in a shared folder.
- * Note this function always returns 0 / success unless an exceptional condition
- * occurs - out of memory, invalid arguments, etc. If the file or folder could
- * not be opened or created, create_parms->handle will be set to
- * SHFL_HANDLE_NIL on return. In this case the value in create_parms->result
- * provides information as to why (e.g. SHFL_FILE_EXISTS), create_parms->result
- * is also set on success as additional information.
- *
- * Returns:
- * 0 or negative errno value.
- */
-int vboxsf_create(u32 root, struct shfl_string *parsed_path,
- struct shfl_createparms *create_parms)
-{
- struct shfl_create parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
-
- parms.parms.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.parms.u.pointer.size = sizeof(struct shfl_createparms);
- parms.parms.u.pointer.u.linear_addr = (uintptr_t)create_parms;
-
- return vboxsf_call(SHFL_FN_CREATE, &parms, SHFL_CPARMS_CREATE, NULL);
-}
-
-int vboxsf_close(u32 root, u64 handle)
-{
- struct shfl_close parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
-
- return vboxsf_call(SHFL_FN_CLOSE, &parms, SHFL_CPARMS_CLOSE, NULL);
-}
-
-int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags)
-{
- struct shfl_remove parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
-
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
-
- return vboxsf_call(SHFL_FN_REMOVE, &parms, SHFL_CPARMS_REMOVE, NULL);
-}
-
-int vboxsf_rename(u32 root, struct shfl_string *src_path,
- struct shfl_string *dest_path, u32 flags)
-{
- struct shfl_rename parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.src.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.src.u.pointer.size = shfl_string_buf_size(src_path);
- parms.src.u.pointer.u.linear_addr = (uintptr_t)src_path;
-
- parms.dest.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.dest.u.pointer.size = shfl_string_buf_size(dest_path);
- parms.dest.u.pointer.u.linear_addr = (uintptr_t)dest_path;
-
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
-
- return vboxsf_call(SHFL_FN_RENAME, &parms, SHFL_CPARMS_RENAME, NULL);
-}
-
-int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
-{
- struct shfl_read parms;
- int err;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.offset.u.value64 = offset;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.buffer.u.pointer.size = *buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- err = vboxsf_call(SHFL_FN_READ, &parms, SHFL_CPARMS_READ, NULL);
-
- *buf_len = parms.cb.u.value32;
- return err;
-}
-
-int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
-{
- struct shfl_write parms;
- int err;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.offset.u.value64 = offset;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.buffer.u.pointer.size = *buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- err = vboxsf_call(SHFL_FN_WRITE, &parms, SHFL_CPARMS_WRITE, NULL);
-
- *buf_len = parms.cb.u.value32;
- return err;
-}
-
-/* Returns 0 on success, 1 on end-of-dir, negative errno otherwise */
-int vboxsf_dirinfo(u32 root, u64 handle,
- struct shfl_string *parsed_path, u32 flags, u32 index,
- u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count)
-{
- struct shfl_list parms;
- int err, status;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- if (parsed_path) {
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
- } else {
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_IN;
- parms.path.u.pointer.size = 0;
- parms.path.u.pointer.u.linear_addr = 0;
- }
-
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.buffer.u.pointer.size = *buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- parms.resume_point.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.resume_point.u.value32 = index;
- parms.file_count.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.file_count.u.value32 = 0; /* out parameter only */
-
- err = vboxsf_call(SHFL_FN_LIST, &parms, SHFL_CPARMS_LIST, &status);
- if (err == -ENODATA && status == VERR_NO_MORE_FILES)
- err = 1;
-
- *buf_len = parms.cb.u.value32;
- *file_count = parms.file_count.u.value32;
- return err;
-}
-
-int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
- u32 *buf_len, void *buf)
-{
- struct shfl_information parms;
- int err;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.info.u.pointer.size = *buf_len;
- parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- err = vboxsf_call(SHFL_FN_INFORMATION, &parms, SHFL_CPARMS_INFORMATION,
- NULL);
-
- *buf_len = parms.cb.u.value32;
- return err;
-}
-
-int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
- u32 buf_len, u8 *buf)
-{
- struct shfl_readLink parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
-
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.buffer.u.pointer.size = buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- return vboxsf_call(SHFL_FN_READLINK, &parms, SHFL_CPARMS_READLINK,
- NULL);
-}
-
-int vboxsf_symlink(u32 root, struct shfl_string *new_path,
- struct shfl_string *old_path, struct shfl_fsobjinfo *buf)
-{
- struct shfl_symlink parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.new_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.new_path.u.pointer.size = shfl_string_buf_size(new_path);
- parms.new_path.u.pointer.u.linear_addr = (uintptr_t)new_path;
-
- parms.old_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.old_path.u.pointer.size = shfl_string_buf_size(old_path);
- parms.old_path.u.pointer.u.linear_addr = (uintptr_t)old_path;
-
- parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.info.u.pointer.size = sizeof(struct shfl_fsobjinfo);
- parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- return vboxsf_call(SHFL_FN_SYMLINK, &parms, SHFL_CPARMS_SYMLINK, NULL);
-}
-
-int vboxsf_set_utf8(void)
-{
- return vboxsf_call(SHFL_FN_SET_UTF8, NULL, 0, NULL);
-}
-
-int vboxsf_set_symlinks(void)
-{
- return vboxsf_call(SHFL_FN_SET_SYMLINKS, NULL, 0, NULL);
-}
diff --git a/drivers/staging/vboxsf/vfsmod.h b/drivers/staging/vboxsf/vfsmod.h
deleted file mode 100644
index 18f95b00fc33..000000000000
--- a/drivers/staging/vboxsf/vfsmod.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * VirtualBox Guest Shared Folders support: module header.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#ifndef VFSMOD_H
-#define VFSMOD_H
-
-#include <linux/backing-dev.h>
-#include <linux/idr.h>
-#include "shfl_hostintf.h"
-
-#define DIR_BUFFER_SIZE SZ_16K
-
-/* The cast is to prevent assignment of void * to pointers of arbitrary type */
-#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
-#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
-
-struct vboxsf_options {
- unsigned long ttl;
- kuid_t uid;
- kgid_t gid;
- bool dmode_set;
- bool fmode_set;
- umode_t dmode;
- umode_t fmode;
- umode_t dmask;
- umode_t fmask;
-};
-
-struct vboxsf_fs_context {
- struct vboxsf_options o;
- char *nls_name;
-};
-
-/* per-shared folder information */
-struct vboxsf_sbi {
- struct vboxsf_options o;
- struct shfl_fsobjinfo root_info;
- struct idr ino_idr;
- spinlock_t ino_idr_lock; /* This protects ino_idr */
- struct nls_table *nls;
- u32 next_generation;
- u32 root;
- int bdi_id;
-};
-
-/* per-inode information */
-struct vboxsf_inode {
- /* some information was changed, update data on next revalidate */
- int force_restat;
- /* list of open handles for this inode + lock protecting it */
- struct list_head handle_list;
- /* This mutex protects handle_list accesses */
- struct mutex handle_list_mutex;
- /* The VFS inode struct */
- struct inode vfs_inode;
-};
-
-struct vboxsf_dir_info {
- struct list_head info_list;
-};
-
-struct vboxsf_dir_buf {
- size_t entries;
- size_t free;
- size_t used;
- void *buf;
- struct list_head head;
-};
-
-/* globals */
-extern const struct inode_operations vboxsf_dir_iops;
-extern const struct inode_operations vboxsf_lnk_iops;
-extern const struct inode_operations vboxsf_reg_iops;
-extern const struct file_operations vboxsf_dir_fops;
-extern const struct file_operations vboxsf_reg_fops;
-extern const struct address_space_operations vboxsf_reg_aops;
-extern const struct dentry_operations vboxsf_dentry_ops;
-
-/* from utils.c */
-struct inode *vboxsf_new_inode(struct super_block *sb);
-void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
- const struct shfl_fsobjinfo *info);
-int vboxsf_create_at_dentry(struct dentry *dentry,
- struct shfl_createparms *params);
-int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
- struct shfl_fsobjinfo *info);
-int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info);
-int vboxsf_inode_revalidate(struct dentry *dentry);
-int vboxsf_getattr(const struct path *path, struct kstat *kstat,
- u32 request_mask, unsigned int query_flags);
-int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr);
-struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
- struct dentry *dentry);
-int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
- const unsigned char *utf8_name, size_t utf8_len);
-struct vboxsf_dir_info *vboxsf_dir_info_alloc(void);
-void vboxsf_dir_info_free(struct vboxsf_dir_info *p);
-int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
- u64 handle);
-
-/* from vboxsf_wrappers.c */
-int vboxsf_connect(void);
-void vboxsf_disconnect(void);
-
-int vboxsf_create(u32 root, struct shfl_string *parsed_path,
- struct shfl_createparms *create_parms);
-
-int vboxsf_close(u32 root, u64 handle);
-int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags);
-int vboxsf_rename(u32 root, struct shfl_string *src_path,
- struct shfl_string *dest_path, u32 flags);
-
-int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
-int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
-
-int vboxsf_dirinfo(u32 root, u64 handle,
- struct shfl_string *parsed_path, u32 flags, u32 index,
- u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count);
-int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
- u32 *buf_len, void *buf);
-
-int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root);
-int vboxsf_unmap_folder(u32 root);
-
-int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
- u32 buf_len, u8 *buf);
-int vboxsf_symlink(u32 root, struct shfl_string *new_path,
- struct shfl_string *old_path, struct shfl_fsobjinfo *buf);
-
-int vboxsf_set_utf8(void);
-int vboxsf_set_symlinks(void);
-
-#endif
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Kconfig b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
index f66319512faf..d32ea348e846 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config SND_BCM2835
- tristate "BCM2835 Audio"
- depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
- select SND_PCM
- select BCM2835_VCHIQ
- help
- Say Y or M if you want to support BCM2835 built in audio
+ tristate "BCM2835 Audio"
+ depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
+ select SND_PCM
+ select BCM2835_VCHIQ
+ help
+ Say Y or M if you want to support BCM2835 built in audio
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index c6f9cf1913d2..73144f1ce45e 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -9,7 +9,7 @@
struct bcm2835_audio_instance {
struct device *dev;
- VCHI_SERVICE_HANDLE_T vchi_handle;
+ struct vchi_service_handle *vchi_handle;
struct completion msg_avail_comp;
struct mutex vchi_mutex;
struct bcm2835_alsa_stream *alsa_stream;
@@ -90,7 +90,7 @@ static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance,
}
static void audio_vchi_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
+ const enum vchi_callback_reason reason,
void *msg_handle)
{
struct bcm2835_audio_instance *instance = param;
@@ -103,6 +103,9 @@ static void audio_vchi_callback(void *param,
status = vchi_msg_dequeue(instance->vchi_handle,
&m, sizeof(m), &msg_len, VCHI_FLAGS_NONE);
+ if (status)
+ return;
+
if (m.type == VC_AUDIO_MSG_TYPE_RESULT) {
instance->result = m.result.success;
complete(&instance->msg_avail_comp);
@@ -119,7 +122,7 @@ static void audio_vchi_callback(void *param,
}
static int
-vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
+vc_vchi_audio_init(struct vchi_instance_handle *vchi_instance,
struct bcm2835_audio_instance *instance)
{
struct service_creation params = {
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index ed0feb34b6c8..d2fe8d36ab7d 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -44,7 +44,7 @@ enum snd_bcm2835_ctrl {
};
struct bcm2835_vchi_ctx {
- VCHI_INSTANCE_T vchi_instance;
+ struct vchi_instance_handle *vchi_instance;
};
/* definition of the chip-specific record */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index d4d1e44b16b2..beb6a0063bb8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1090,8 +1090,8 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
- if (!ret
- && camera_port ==
+ if (!ret &&
+ camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO]) {
bool overlay_enabled =
!!dev->component[COMP_PREVIEW]->enabled;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 1c180ead4a20..de03b90021a8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -155,7 +155,7 @@ struct mmal_msg_context {
};
struct vchiq_mmal_instance {
- VCHI_SERVICE_HANDLE_T handle;
+ struct vchi_service_handle *handle;
/* ensure serialised access to service */
struct mutex vchiq_mutex;
@@ -535,7 +535,7 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
/* incoming event service callback */
static void service_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
+ const enum vchi_callback_reason reason,
void *bulk_ctx)
{
struct vchiq_mmal_instance *instance = param;
@@ -1814,7 +1814,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
{
int status;
struct vchiq_mmal_instance *instance;
- static VCHI_INSTANCE_T vchi_instance;
+ static struct vchi_instance_handle *vchi_instance;
struct service_creation params = {
.version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
.service_id = VC_MMAL_SERVER_NAME,
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index f85562b9ba9e..56b1037d8e25 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -8,17 +8,17 @@
#include "interface/vchi/vchi_common.h"
/******************************************************************************
- Global defs
+ * Global defs
*****************************************************************************/
-#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
-#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
-#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
+#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x)) + VCHI_BULK_ALIGN - 1) & ~(VCHI_BULK_ALIGN - 1))
+#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN - 1))
+#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN - 1))))
#ifdef USE_VCHIQ_ARM
#define VCHI_BULK_ALIGNED(x) 1
#else
-#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
+#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN - 1)) == 0)
#endif
struct vchi_version {
@@ -45,18 +45,19 @@ struct vchi_held_msg {
struct service_creation {
struct vchi_version version;
int32_t service_id;
- VCHI_CALLBACK_T callback;
+ vchi_callback callback;
void *callback_param;
};
// Opaque handle for a VCHI instance
-typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
+struct vchi_instance_handle;
// Opaque handle for a server or client
-typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
+struct vchi_service_handle;
/******************************************************************************
- Global funcs - implementation is specific to which side you are on (local / remote)
+ * Global funcs - implementation is specific to which side you are on
+ * (local / remote)
*****************************************************************************/
#ifdef __cplusplus
@@ -64,98 +65,99 @@ extern "C" {
#endif
// Routine used to initialise the vchi on both local + remote connections
-extern int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle);
+extern int32_t vchi_initialise(struct vchi_instance_handle **instance_handle);
extern int32_t vchi_exit(void);
-extern int32_t vchi_connect(VCHI_INSTANCE_T instance_handle);
+extern int32_t vchi_connect(struct vchi_instance_handle *instance_handle);
//When this is called, ensure that all services have no data pending.
//Bulk transfers can remain 'queued'
-extern int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle);
+extern int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle);
// helper functions
-extern void *vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
-extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
-extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
+extern void *vchi_allocate_buffer(struct vchi_service_handle *handle, uint32_t *length);
+extern void vchi_free_buffer(struct vchi_service_handle *handle, void *address);
+extern uint32_t vchi_current_time(struct vchi_instance_handle *instance_handle);
/******************************************************************************
- Global service API
+ * Global service API
*****************************************************************************/
// Routine to destroy a service
-extern int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_destroy(const struct vchi_service_handle *handle);
// Routine to open a named service
-extern int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+extern int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
- VCHI_SERVICE_HANDLE_T *handle);
+ struct vchi_service_handle **handle);
-extern int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_get_peer_version(const struct vchi_service_handle *handle,
short *peer_version);
// Routine to close a named service
-extern int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_close(const struct vchi_service_handle *handle);
// Routine to increment ref count on a named service
-extern int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_use(const struct vchi_service_handle *handle);
// Routine to decrement ref count on a named service
-extern int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_release(const struct vchi_service_handle *handle);
// Routine to set a control option for a named service
-extern int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
- VCHI_SERVICE_OPTION_T option,
- int value);
+extern int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
+ enum vchi_service_option option,
+ int value);
/* Routine to send a message from kernel memory across a service */
extern int
-vchi_queue_kernel_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size);
/* Routine to send a message from user memory across a service */
extern int
-vchi_queue_user_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_user_message(struct vchi_service_handle *handle,
void __user *data,
unsigned int size);
// Routine to receive a msg from a service
// Dequeue is equivalent to hold, copy into client buffer, release
-extern int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_dequeue(struct vchi_service_handle *handle,
void *data,
uint32_t max_data_size_to_read,
uint32_t *actual_msg_size,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
// Routine to look at a message in place.
// The message is not dequeued, so a subsequent call to peek or dequeue
// will return the same message.
-extern int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_peek(struct vchi_service_handle *handle,
void **data,
uint32_t *msg_size,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
// Routine to remove a message after it has been read in place with peek
// The first message on the queue is dequeued.
-extern int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_msg_remove(struct vchi_service_handle *handle);
// Routine to look at a message in place.
// The message is dequeued, so the caller is left holding it; the descriptor is
// filled in and must be released when the user has finished with the message.
-extern int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_hold(struct vchi_service_handle *handle,
void **data, // } may be NULL, as info can be
uint32_t *msg_size, // } obtained from HELD_MSG_T
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
struct vchi_held_msg *message_descriptor);
// Initialise an iterator to look through messages in place
-extern int32_t vchi_msg_look_ahead(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_look_ahead(struct vchi_service_handle *handle,
struct vchi_msg_iter *iter,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
-/******************************************************************************
- Global service support API - operations on held messages and message iterators
- *****************************************************************************/
+/*******************************************************************************
+ * Global service support API - operations on held messages
+ * and message iterators
+ ******************************************************************************/
// Routine to get the address of a held message
extern void *vchi_held_msg_ptr(const struct vchi_held_msg *message);
@@ -196,42 +198,42 @@ extern int32_t vchi_msg_iter_hold_next(struct vchi_msg_iter *iter,
struct vchi_held_msg *message);
/******************************************************************************
- Global bulk API
+ * Global bulk API
*****************************************************************************/
// Routine to prepare interface for a transfer from the other side
-extern int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle,
void *data_dst,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
// Prepare interface for a transfer from the other side into relocatable memory.
-int32_t vchi_bulk_queue_receive_reloc(const VCHI_SERVICE_HANDLE_T handle,
+int32_t vchi_bulk_queue_receive_reloc(const struct vchi_service_handle *handle,
uint32_t offset,
uint32_t data_size,
- const VCHI_FLAGS_T flags,
+ const enum vchi_flags flags,
void * const bulk_handle);
// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
-extern int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
const void *data_src,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
/******************************************************************************
- Configuration plumbing
+ * Configuration plumbing
*****************************************************************************/
#ifdef __cplusplus
}
#endif
-extern int32_t vchi_bulk_queue_transmit_reloc(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_transmit_reloc(struct vchi_service_handle *handle,
uint32_t offset,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
#endif /* VCHI_H_ */
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
index 89aa4e6122cd..138c36151a22 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
@@ -4,13 +4,17 @@
#ifndef VCHI_CFG_H_
#define VCHI_CFG_H_
-/****************************************************************************************
- * Defines in this first section are part of the VCHI API and may be examined by VCHI
- * services.
- ***************************************************************************************/
-
-/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
-/* Really determined by the message driver, and should be available from a run-time call. */
+/*******************************************************************************
+ * Defines in this first section are part of the VCHI API and may be examined by
+ * VCHI services.
+ ******************************************************************************/
+
+/*
+ * Required alignment of base addresses for bulk transfer, if unaligned
+ * transfers are not enabled
+ * Really determined by the message driver, and should be available from
+ * a run-time call.
+ */
#ifndef VCHI_BULK_ALIGN
# if __VCCOREVER__ >= 0x04000000
# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
@@ -19,9 +23,13 @@
# endif
#endif
-/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
-/* May be less than or greater than VCHI_BULK_ALIGN */
-/* Really determined by the message driver, and should be available from a run-time call. */
+/*
+ * Required length multiple for bulk transfers, if unaligned transfers are
+ * not enabled
+ * May be less than or greater than VCHI_BULK_ALIGN
+ * Really determined by the message driver, and should be available from
+ * a run-time call.
+ */
#ifndef VCHI_BULK_GRANULARITY
# if __VCCOREVER__ >= 0x04000000
# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
@@ -39,19 +47,24 @@
# endif
#endif
-/******************************************************************************************
- * Defines below are system configuration options, and should not be used by VCHI services.
- *****************************************************************************************/
+/******************************************************************************
+ * Defines below are system configuration options, and should not be used by
+ * VCHI services.
+ ******************************************************************************/
-/* How many connections can we support? A localhost implementation uses 2 connections,
- * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
- * driver. */
+/*
+ * How many connections can we support? A localhost implementation uses
+ * 2 connections, 1 for host-app, 1 for VMCS, and these are hooked together
+ * by a loopback MPHI VCFW driver.
+ */
#ifndef VCHI_MAX_NUM_CONNECTIONS
# define VCHI_MAX_NUM_CONNECTIONS 3
#endif
-/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
- * amount of static memory. */
+/*
+ * How many services can we open per connection? Extending this doesn't cost
+ * processing time, just a small amount of static memory.
+ */
#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
# define VCHI_MAX_SERVICES_PER_CONNECTION 36
#endif
@@ -66,8 +79,10 @@
# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
#endif
-/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
- * receive queue space, less message headers. */
+/*
+ * How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the
+ * effective receive queue space, less message headers.
+ */
#ifndef VCHI_NUM_READ_SLOTS
# if defined(VCHI_LOCAL_HOST_PORT)
# define VCHI_NUM_READ_SLOTS 4
@@ -76,112 +91,141 @@
# endif
#endif
-/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
- * performance. Only define on VideoCore end, talking to host.
+/*
+ * Do we utilise overrun facility for receive message slots? Can aid peer
+ * transmit performance. Only define on VideoCore end, talking to host.
*/
//#define VCHI_MSG_RX_OVERRUN
-/* How many transmit slots do we use. Generally don't need many, as the hardware driver
- * underneath VCHI will usually have its own buffering. */
+/*
+ * How many transmit slots do we use. Generally don't need many,
+ * as the hardware driver underneath VCHI will usually have its own buffering.
+ */
#ifndef VCHI_NUM_WRITE_SLOTS
# define VCHI_NUM_WRITE_SLOTS 4
#endif
-/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
- * then it's taking up too much buffer space, and the peer service will be told to stop
- * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
- * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
- * is too high. */
+/*
+ * If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or
+ * more slots, then it's taking up too much buffer space,
+ * and the peer service will be told to stop transmitting with an XOFF message.
+ * For this to be effective, the VCHI_NUM_READ_SLOTS needs to be considerably
+ * bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency is too high.
+ */
#ifndef VCHI_XOFF_THRESHOLD
# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
#endif
-/* After we've sent an XOFF, the peer will be told to resume transmission once the local
- * service has dequeued/released enough messages that it's now occupying
- * VCHI_XON_THRESHOLD slots or fewer. */
+/*
+ * After we've sent an XOFF, the peer will be told to resume transmission
+ * once the local service has dequeued/released enough messages that it's now
+ * occupying VCHI_XON_THRESHOLD slots or fewer.
+ */
#ifndef VCHI_XON_THRESHOLD
# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
#endif
-/* A size below which a bulk transfer omits the handshake completely and always goes
- * via the message channel, if bulk auxiliary is being sent on that service. (The user
- * can guarantee this by enabling unaligned transmits).
- * Not API. */
+/*
+ * A size below which a bulk transfer omits the handshake completely and always
+ * goes via the message channel, if bulk auxiliary is being sent on that
+ * service. (The user can guarantee this by enabling unaligned transmits).
+ * Not API.
+ */
#ifndef VCHI_MIN_BULK_SIZE
# define VCHI_MIN_BULK_SIZE (VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096)
#endif
-/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
- * speed and latency; the smaller the chunk size the better change of messages and other
- * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
- * break transmissions into chunks.
+/*
+ * Maximum size of bulk transmission chunks, for each interface type.
+ * A trade-off between speed and latency; the smaller the chunk size the better
+ * change of messages and other bulk transmissions getting in when big bulk
+ * transfers are happening. Set to 0 to not break transmissions into chunks.
*/
#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
#endif
-/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
- * with multiple-line frames. Only use if the receiver can cope. */
+/*
+ * NB Chunked CCP2 transmissions violate the letter of the CCP2 spec
+ * by using "JPEG8" mode with multiple-line frames. Only use if the receiver
+ * can cope.
+ */
#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
#endif
-/* How many TX messages can we have pending in our transmit slots. Once exhausted,
- * vchi_msg_queue will be blocked. */
+/*
+ * How many TX messages can we have pending in our transmit slots.
+ * Once exhausted, vchi_msg_queue will be blocked.
+ */
#ifndef VCHI_TX_MSG_QUEUE_SIZE
# define VCHI_TX_MSG_QUEUE_SIZE 256
#endif
-/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
- * will be suspended until older messages are dequeued/released. */
+/*
+ * How many RX messages can we have parsed in the receive slots. Once exhausted,
+ * parsing will be suspended until older messages are dequeued/released.
+ */
#ifndef VCHI_RX_MSG_QUEUE_SIZE
# define VCHI_RX_MSG_QUEUE_SIZE 256
#endif
-/* Really should be able to cope if we run out of received message descriptors, by
- * suspending parsing as the comment above says, but we don't. This sweeps the issue
- * under the carpet. */
-#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+/*
+ * Really should be able to cope if we run out of received message descriptors,
+ * by suspending parsing as the comment above says, but we don't.
+ * This sweeps the issue under the carpet.
+ */
+#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS
# undef VCHI_RX_MSG_QUEUE_SIZE
-# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS)
+# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS)
#endif
-/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
- * will be blocked. */
+/*
+ * How many bulk transmits can we have pending. Once exhausted,
+ * vchi_bulk_queue_transmit will be blocked.
+ */
#ifndef VCHI_TX_BULK_QUEUE_SIZE
# define VCHI_TX_BULK_QUEUE_SIZE 64
#endif
-/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
- * will be blocked. */
+/*
+ * How many bulk receives can we have pending. Once exhausted,
+ *vchi_bulk_queue_receive will be blocked.
+ */
#ifndef VCHI_RX_BULK_QUEUE_SIZE
# define VCHI_RX_BULK_QUEUE_SIZE 64
#endif
-/* A limit on how many outstanding bulk requests we expect the peer to give us. If
- * the peer asks for more than this, VCHI will fail and assert. The number is determined
- * by the peer's hardware - it's the number of outstanding requests that can be queued
- * on all bulk channels. VC3's MPHI peripheral allows 16. */
+/*
+ * A limit on how many outstanding bulk requests we expect the peer to give us.
+ * If the peer asks for more than this, VCHI will fail and assert.
+ * The number is determined by the peer's hardware
+ * - it's the number of outstanding requests that can be queued
+ * on all bulk channels. VC3's MPHI peripheral allows 16.
+ */
#ifndef VCHI_MAX_PEER_BULK_REQUESTS
# define VCHI_MAX_PEER_BULK_REQUESTS 32
#endif
-/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
+/*
+ * Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
* transmitter on and off.
*/
/*#define VCHI_CCP2TX_MANUAL_POWER*/
#ifndef VCHI_CCP2TX_MANUAL_POWER
-/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
- * negative for no IDLE.
+/*
+ * Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state.
+ * Set negative for no IDLE.
*/
# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
# define VCHI_CCP2TX_IDLE_TIMEOUT 5
# endif
-/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
- * negative for no OFF.
+/*
+ * Timeout (in milliseconds) for putting the CCP2TX interface into OFF state.
+ * Set negative for no OFF.
*/
# ifndef VCHI_CCP2TX_OFF_TIMEOUT
# define VCHI_CCP2TX_OFF_TIMEOUT 1000
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index e7955cbaf26a..141af16ce031 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -5,7 +5,7 @@
#define VCHI_COMMON_H_
//flags used when sending messages (must be bitmapped)
-typedef enum {
+enum vchi_flags {
VCHI_FLAGS_NONE = 0x0,
VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
@@ -20,17 +20,17 @@ typedef enum {
VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
VCHI_FLAGS_INTERNAL = 0xFF0000
-} VCHI_FLAGS_T;
+};
// constants for vchi_crc_control()
-typedef enum {
+enum vchi_crc_control {
VCHI_CRC_NOTHING = -1,
VCHI_CRC_PER_SERVICE = 0,
VCHI_CRC_EVERYTHING = 1,
-} VCHI_CRC_CONTROL_T;
+};
//callback reasons when an event occurs on a service
-typedef enum {
+enum vchi_callback_reason {
VCHI_CALLBACK_REASON_MIN,
//This indicates that there is data available
@@ -73,22 +73,22 @@ typedef enum {
VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
VCHI_CALLBACK_REASON_MAX
-} VCHI_CALLBACK_REASON_T;
+};
// service control options
-typedef enum {
+enum vchi_service_option {
VCHI_SERVICE_OPTION_MIN,
VCHI_SERVICE_OPTION_TRACE,
VCHI_SERVICE_OPTION_SYNCHRONOUS,
VCHI_SERVICE_OPTION_MAX
-} VCHI_SERVICE_OPTION_T;
+};
//Callback used by all services / bulk transfers
-typedef void (*VCHI_CALLBACK_T)(void *callback_param, //my service local param
- VCHI_CALLBACK_REASON_T reason,
- void *handle); //for transmitting msg's only
+typedef void (*vchi_callback)(void *callback_param, //my service local param
+ enum vchi_callback_reason reason,
+ void *handle); //for transmitting msg's only
/*
* Define vector struct for scatter-gather (vector) operations
@@ -112,12 +112,6 @@ struct vchi_msg_vector {
int32_t vec_len;
};
-// Opaque type for a connection API
-typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
-
-// Opaque type for a message driver
-typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
-
// Iterator structure for reading ahead through received message queue. Allocated by client,
// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 8dc730cfe7a6..ca30bfd52919 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -81,7 +81,6 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
struct rpi_firmware *fw = drvdata->fw;
struct vchiq_slot_zero *vchiq_slot_zero;
- struct resource *res;
void *slot_mem;
dma_addr_t slot_phys;
u32 channelbase;
@@ -135,8 +134,7 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
if (vchiq_init_state(state, vchiq_slot_zero) != VCHIQ_SUCCESS)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g_regs = devm_ioremap_resource(&pdev->dev, res);
+ g_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g_regs))
return PTR_ERR(g_regs);
@@ -170,10 +168,10 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
return 0;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_init_state(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_2835_state *platform_state;
state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
@@ -216,7 +214,7 @@ remote_event_signal(struct remote_event *event)
writel(0, g_regs + BELL2); /* trigger vc interrupt */
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
int dir)
{
@@ -249,24 +247,23 @@ vchiq_complete_bulk(struct vchiq_bulk *bulk)
bulk->actual);
}
-void
-vchiq_dump_platform_state(void *dump_context)
+int vchiq_dump_platform_state(void *dump_context)
{
char buf[80];
int len;
len = snprintf(buf, sizeof(buf),
" Platform: 2835 (VC master)");
- vchiq_dump(dump_context, buf, len + 1);
+ return vchiq_dump(dump_context, buf, len + 1);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_suspend(struct vchiq_state *state)
{
return VCHIQ_ERROR;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_resume(struct vchiq_state *state)
{
return VCHIQ_SUCCESS;
@@ -526,11 +523,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
return NULL;
}
- WARN_ON(g_free_fragments == NULL);
+ WARN_ON(!g_free_fragments);
down(&g_free_fragments_mutex);
fragments = g_free_fragments;
- WARN_ON(fragments == NULL);
+ WARN_ON(!fragments);
g_free_fragments = *(char **) g_free_fragments;
up(&g_free_fragments_mutex);
pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index b1595b13dea8..02148a24818a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -84,7 +84,7 @@ static void suspend_timer_callback(struct timer_list *t);
struct user_service {
struct vchiq_service *service;
void *userdata;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
char is_vchi;
char dequeue_pending;
char close_pending;
@@ -103,7 +103,7 @@ struct bulk_waiter_node {
struct list_head list;
};
-struct vchiq_instance_struct {
+struct vchiq_instance {
struct vchiq_state *state;
struct vchiq_completion_data completions[MAX_COMPLETIONS];
int completion_insert;
@@ -172,16 +172,16 @@ static const char *const ioctl_names[] = {
vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
(VCHIQ_IOC_MAX + 1));
-static VCHIQ_STATUS_T
-vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, VCHIQ_BULK_DIR_T dir);
+static enum vchiq_status
+vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir);
#define VCHIQ_INIT_RETRIES 10
-VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
+enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state;
- VCHIQ_INSTANCE_T instance = NULL;
+ struct vchiq_instance *instance = NULL;
int i;
vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
@@ -230,9 +230,9 @@ failed:
}
EXPORT_SYMBOL(vchiq_initialise);
-VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
+enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
vchiq_log_trace(vchiq_core_log_level,
@@ -267,14 +267,14 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
}
EXPORT_SYMBOL(vchiq_shutdown);
-static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
+static int vchiq_is_connected(struct vchiq_instance *instance)
{
return instance->connected;
}
-VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
+enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
vchiq_log_trace(vchiq_core_log_level,
@@ -301,12 +301,12 @@ failed:
}
EXPORT_SYMBOL(vchiq_connect);
-VCHIQ_STATUS_T vchiq_add_service(
- VCHIQ_INSTANCE_T instance,
+enum vchiq_status vchiq_add_service(
+ struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *phandle)
+ unsigned int *phandle)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
int srvstate;
@@ -340,12 +340,12 @@ VCHIQ_STATUS_T vchiq_add_service(
}
EXPORT_SYMBOL(vchiq_add_service);
-VCHIQ_STATUS_T vchiq_open_service(
- VCHIQ_INSTANCE_T instance,
+enum vchiq_status vchiq_open_service(
+ struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *phandle)
+ unsigned int *phandle)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
@@ -380,11 +380,11 @@ failed:
}
EXPORT_SYMBOL(vchiq_open_service);
-VCHIQ_STATUS_T
-vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
- unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+enum vchiq_status
+vchiq_bulk_transmit(unsigned int handle, const void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
@@ -405,11 +405,11 @@ vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
}
EXPORT_SYMBOL(vchiq_bulk_transmit);
-VCHIQ_STATUS_T
-vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+enum vchiq_status
+vchiq_bulk_receive(unsigned int handle, void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
@@ -429,13 +429,13 @@ vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
}
EXPORT_SYMBOL(vchiq_bulk_receive);
-static VCHIQ_STATUS_T
-vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, VCHIQ_BULK_DIR_T dir)
+static enum vchiq_status
+vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir)
{
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
struct vchiq_service *service;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct bulk_waiter_node *waiter = NULL;
service = find_service_by_handle(handle);
@@ -515,8 +515,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
*
***************************************************************************/
-static VCHIQ_STATUS_T
-add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
+static enum vchiq_status
+add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, struct user_service *user_service,
void *bulk_userdata)
{
@@ -582,9 +582,9 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
*
***************************************************************************/
-static VCHIQ_STATUS_T
-service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
+static enum vchiq_status
+service_callback(enum vchiq_reason reason, struct vchiq_header *header,
+ unsigned int handle, void *bulk_userdata)
{
/* How do we ensure the callback goes to the right client?
** The service_user data points to a user_service record
@@ -593,7 +593,7 @@ service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
*/
struct user_service *user_service;
struct vchiq_service *service;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
bool skip_completion = false;
DEBUG_INITIALISE(g_state.local)
@@ -630,7 +630,7 @@ service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
*/
if ((user_service->message_available_pos -
instance->completion_remove) < 0) {
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
vchiq_log_info(vchiq_arm_log_level,
"Inserting extra MESSAGE_AVAILABLE");
@@ -772,8 +772,8 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
* vchiq_ioc_queue_message
*
**************************************************************************/
-static VCHIQ_STATUS_T
-vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+static enum vchiq_status
+vchiq_ioc_queue_message(unsigned int handle,
struct vchiq_element *elements,
unsigned long count)
{
@@ -804,8 +804,8 @@ vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
static long
vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- VCHIQ_INSTANCE_T instance = file->private_data;
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ struct vchiq_instance *instance = file->private_data;
+ enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_service *service = NULL;
long ret = 0;
int i, rc;
@@ -827,7 +827,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Remove all services */
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
status = vchiq_remove_service(service->handle);
unlock_service(service);
if (status != VCHIQ_SUCCESS)
@@ -907,7 +907,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
&args.params, srvstate,
instance, user_service_free);
- if (service != NULL) {
+ if (service) {
user_service->service = service;
user_service->userdata = userdata;
user_service->instance = instance;
@@ -952,7 +952,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_CLOSE_SERVICE:
case VCHIQ_IOC_REMOVE_SERVICE: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
struct user_service *user_service;
service = find_service_for_instance(instance, handle);
@@ -985,10 +985,10 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_USE_SERVICE:
case VCHIQ_IOC_RELEASE_SERVICE: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
service = find_service_for_instance(instance, handle);
- if (service != NULL) {
+ if (service) {
status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
vchiq_use_service_internal(service) :
vchiq_release_service_internal(service);
@@ -1021,7 +1021,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
service = find_service_for_instance(instance, args.handle);
- if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
+ if (service && (args.count <= MAX_ELEMENTS)) {
/* Copy elements into kernel space */
struct vchiq_element elements[MAX_ELEMENTS];
@@ -1042,7 +1042,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct vchiq_queue_bulk_transfer args;
struct bulk_waiter_node *waiter = NULL;
- VCHIQ_BULK_DIR_T dir =
+ enum vchiq_bulk_dir dir =
(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
@@ -1107,7 +1107,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
kfree(waiter);
} else {
- const VCHIQ_BULK_MODE_T mode_waiting =
+ const enum vchiq_bulk_mode mode_waiting =
VCHIQ_BULK_MODE_WAITING;
waiter->pid = current->pid;
mutex_lock(&instance->bulk_waiter_list_mutex);
@@ -1343,11 +1343,11 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
spin_unlock(&msg_queue_spinlock);
complete(&user_service->remove_event);
- if (header == NULL)
+ if (!header)
ret = -ENOTCONN;
else if (header->size <= args.bufsize) {
/* Copy to user space if msgbuf is not NULL */
- if ((args.buf == NULL) ||
+ if (!args.buf ||
(copy_to_user((void __user *)args.buf,
header->data,
header->size) == 0)) {
@@ -1368,7 +1368,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} break;
case VCHIQ_IOC_GET_CLIENT_ID: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
ret = vchiq_get_client_id(handle);
} break;
@@ -1423,10 +1423,10 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} break;
case VCHIQ_IOC_CLOSE_DELIVERED: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
service = find_closed_service_for_instance(instance, handle);
- if (service != NULL) {
+ if (service) {
struct user_service *user_service =
(struct user_service *)service->base.userdata;
close_delivered(user_service);
@@ -1611,7 +1611,7 @@ struct vchiq_queue_bulk_transfer32 {
compat_uptr_t data;
unsigned int size;
compat_uptr_t userdata;
- VCHIQ_BULK_MODE_T mode;
+ enum vchiq_bulk_mode mode;
};
#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
@@ -1666,7 +1666,7 @@ vchiq_compat_ioctl_queue_bulk(struct file *file,
}
struct vchiq_completion_data32 {
- VCHIQ_REASON_T reason;
+ enum vchiq_reason reason;
compat_uptr_t header;
compat_uptr_t service_userdata;
compat_uptr_t bulk_userdata;
@@ -1919,7 +1919,7 @@ vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int vchiq_open(struct inode *inode, struct file *file)
{
struct vchiq_state *state = vchiq_get_state();
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
@@ -1951,7 +1951,7 @@ static int vchiq_open(struct inode *inode, struct file *file)
static int vchiq_release(struct inode *inode, struct file *file)
{
- VCHIQ_INSTANCE_T instance = file->private_data;
+ struct vchiq_instance *instance = file->private_data;
struct vchiq_state *state = vchiq_get_state();
struct vchiq_service *service;
int ret = 0;
@@ -2072,43 +2072,45 @@ out:
*
***************************************************************************/
-void
-vchiq_dump(void *dump_context, const char *str, int len)
+int vchiq_dump(void *dump_context, const char *str, int len)
{
struct dump_context *context = (struct dump_context *)dump_context;
+ int copy_bytes;
- if (context->actual < context->space) {
- int copy_bytes;
+ if (context->actual >= context->space)
+ return 0;
- if (context->offset > 0) {
- int skip_bytes = min(len, (int)context->offset);
+ if (context->offset > 0) {
+ int skip_bytes = min_t(int, len, context->offset);
- str += skip_bytes;
- len -= skip_bytes;
- context->offset -= skip_bytes;
- if (context->offset > 0)
- return;
- }
- copy_bytes = min(len, (int)(context->space - context->actual));
- if (copy_bytes == 0)
- return;
- if (copy_to_user(context->buf + context->actual, str,
- copy_bytes))
- context->actual = -EFAULT;
- context->actual += copy_bytes;
- len -= copy_bytes;
-
- /* If tne terminating NUL is included in the length, then it
- ** marks the end of a line and should be replaced with a
- ** carriage return. */
- if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
- char cr = '\n';
-
- if (copy_to_user(context->buf + context->actual - 1,
- &cr, 1))
- context->actual = -EFAULT;
- }
+ str += skip_bytes;
+ len -= skip_bytes;
+ context->offset -= skip_bytes;
+ if (context->offset > 0)
+ return 0;
+ }
+ copy_bytes = min_t(int, len, context->space - context->actual);
+ if (copy_bytes == 0)
+ return 0;
+ if (copy_to_user(context->buf + context->actual, str,
+ copy_bytes))
+ return -EFAULT;
+ context->actual += copy_bytes;
+ len -= copy_bytes;
+
+ /*
+ * If the terminating NUL is included in the length, then it
+ * marks the end of a line and should be replaced with a
+ * carriage return.
+ */
+ if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
+ char cr = '\n';
+
+ if (copy_to_user(context->buf + context->actual - 1,
+ &cr, 1))
+ return -EFAULT;
}
+ return 0;
}
/****************************************************************************
@@ -2117,8 +2119,7 @@ vchiq_dump(void *dump_context, const char *str, int len)
*
***************************************************************************/
-void
-vchiq_dump_platform_instances(void *dump_context)
+int vchiq_dump_platform_instances(void *dump_context)
{
struct vchiq_state *state = vchiq_get_state();
char buf[80];
@@ -2130,37 +2131,43 @@ vchiq_dump_platform_instances(void *dump_context)
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = state->services[i];
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
- if (service && (service->base.callback == service_callback)) {
- instance = service->instance;
- if (instance)
- instance->mark = 0;
- }
+ if (!service || service->base.callback != service_callback)
+ continue;
+
+ instance = service->instance;
+ if (instance)
+ instance->mark = 0;
}
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = state->services[i];
- VCHIQ_INSTANCE_T instance;
-
- if (service && (service->base.callback == service_callback)) {
- instance = service->instance;
- if (instance && !instance->mark) {
- len = snprintf(buf, sizeof(buf),
- "Instance %pK: pid %d,%s completions %d/%d",
- instance, instance->pid,
- instance->connected ? " connected, " :
- "",
- instance->completion_insert -
- instance->completion_remove,
- MAX_COMPLETIONS);
-
- vchiq_dump(dump_context, buf, len + 1);
-
- instance->mark = 1;
- }
- }
+ struct vchiq_instance *instance;
+ int err;
+
+ if (!service || service->base.callback != service_callback)
+ continue;
+
+ instance = service->instance;
+ if (!instance || instance->mark)
+ continue;
+
+ len = snprintf(buf, sizeof(buf),
+ "Instance %pK: pid %d,%s completions %d/%d",
+ instance, instance->pid,
+ instance->connected ? " connected, " :
+ "",
+ instance->completion_insert -
+ instance->completion_remove,
+ MAX_COMPLETIONS);
+
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
+ instance->mark = 1;
}
+ return 0;
}
/****************************************************************************
@@ -2169,9 +2176,8 @@ vchiq_dump_platform_instances(void *dump_context)
*
***************************************************************************/
-void
-vchiq_dump_platform_service_state(void *dump_context,
- struct vchiq_service *service)
+int vchiq_dump_platform_service_state(void *dump_context,
+ struct vchiq_service *service)
{
struct user_service *user_service =
(struct user_service *)service->base.userdata;
@@ -2192,7 +2198,7 @@ vchiq_dump_platform_service_state(void *dump_context,
" (dequeue pending)");
}
- vchiq_dump(dump_context, buf, len + 1);
+ return vchiq_dump(dump_context, buf, len + 1);
}
/****************************************************************************
@@ -2206,13 +2212,16 @@ vchiq_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct dump_context context;
+ int err;
context.buf = buf;
context.actual = 0;
context.space = count;
context.offset = *ppos;
- vchiq_dump_state(&context, &g_state);
+ err = vchiq_dump_state(&context, &g_state);
+ if (err)
+ return err;
*ppos += context.actual;
@@ -2223,13 +2232,13 @@ struct vchiq_state *
vchiq_get_state(void)
{
- if (g_state.remote == NULL)
+ if (!g_state.remote)
printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
else if (g_state.remote->initialised != 1)
printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
__func__, g_state.remote->initialised);
- return ((g_state.remote != NULL) &&
+ return (g_state.remote &&
(g_state.remote->initialised == 1)) ? &g_state : NULL;
}
@@ -2270,10 +2279,10 @@ vchiq_videocore_wanted(struct vchiq_state *state)
return 1;
}
-static VCHIQ_STATUS_T
-vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
+static enum vchiq_status
+vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T service_user,
+ unsigned int service_user,
void *bulk_user)
{
vchiq_log_error(vchiq_susp_log_level,
@@ -2287,9 +2296,9 @@ vchiq_keepalive_thread_func(void *v)
struct vchiq_state *state = (struct vchiq_state *)v;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T status;
- VCHIQ_INSTANCE_T instance;
- VCHIQ_SERVICE_HANDLE_T ka_handle;
+ enum vchiq_status status;
+ struct vchiq_instance *instance;
+ unsigned int ka_handle;
struct vchiq_service_params params = {
.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
@@ -2361,7 +2370,7 @@ exit:
return 0;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state)
{
@@ -2563,10 +2572,10 @@ unblock_resume(struct vchiq_arm_state *arm_state)
/* Initiate suspend via slot handler. Should be called with the write lock
* held */
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_arm_vcsuspend(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
if (!arm_state)
@@ -2684,12 +2693,12 @@ out:
return resume;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
int local_uc, local_entity_uc;
@@ -2798,7 +2807,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
}
if (ret == VCHIQ_SUCCESS) {
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
while (ack_cnt && (status == VCHIQ_SUCCESS)) {
@@ -2817,11 +2826,11 @@ out:
return ret;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
@@ -2898,33 +2907,33 @@ vchiq_on_remote_release(struct vchiq_state *state)
complete(&arm_state->ka_evt);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_use_service_internal(struct vchiq_service *service)
{
return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_release_service_internal(struct vchiq_service *service)
{
return vchiq_release_internal(service->state, service);
}
struct vchiq_debugfs_node *
-vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
{
return &instance->debugfs_node;
}
int
-vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_use_count(struct vchiq_instance *instance)
{
struct vchiq_service *service;
int use_count = 0, i;
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
use_count += service->service_use_count;
unlock_service(service);
}
@@ -2932,26 +2941,26 @@ vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
}
int
-vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_pid(struct vchiq_instance *instance)
{
return instance->pid;
}
int
-vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_trace(struct vchiq_instance *instance)
{
return instance->trace;
}
void
-vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
{
struct vchiq_service *service;
int i;
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
service->trace = trace;
unlock_service(service);
}
@@ -2969,10 +2978,10 @@ static void suspend_timer_callback(struct timer_list *t)
vchiq_check_suspend(state);
}
-VCHIQ_STATUS_T
-vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_use_service(unsigned int handle)
{
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (service) {
@@ -2983,10 +2992,10 @@ vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
return ret;
}
-VCHIQ_STATUS_T
-vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_release_service(unsigned int handle)
{
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (service) {
@@ -3088,11 +3097,11 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
vchiq_dump_platform_use_state(state);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_check_service(struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state;
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
if (!service || !service->state)
goto out;
@@ -3128,35 +3137,36 @@ void vchiq_on_remote_use_active(struct vchiq_state *state)
}
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
- VCHIQ_CONNSTATE_T oldstate,
- VCHIQ_CONNSTATE_T newstate)
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ char threadname[16];
vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
get_conn_state_name(oldstate), get_conn_state_name(newstate));
- if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
- write_lock_bh(&arm_state->susp_res_lock);
- if (!arm_state->first_connect) {
- char threadname[16];
+ if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
+ return;
- arm_state->first_connect = 1;
- write_unlock_bh(&arm_state->susp_res_lock);
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- arm_state->ka_thread = kthread_create(
- &vchiq_keepalive_thread_func,
- (void *)state,
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->first_connect) {
+ write_unlock_bh(&arm_state->susp_res_lock);
+ return;
+ }
+
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (IS_ERR(arm_state->ka_thread)) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "vchiq: FATAL: couldn't create thread %s",
threadname);
- if (IS_ERR(arm_state->ka_thread)) {
- vchiq_log_error(vchiq_susp_log_level,
- "vchiq: FATAL: couldn't create thread %s",
- threadname);
- } else {
- wake_up_process(arm_state->ka_thread);
- }
- } else
- write_unlock_bh(&arm_state->susp_res_lock);
+ } else {
+ wake_up_process(arm_state->ka_thread);
}
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index b424323e9613..19d2a2eefb6a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -109,13 +109,13 @@ int vchiq_platform_init(struct platform_device *pdev,
extern struct vchiq_state *
vchiq_get_state(void);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_vcsuspend(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_vcresume(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state);
@@ -124,16 +124,16 @@ vchiq_check_resume(struct vchiq_state *state);
extern void
vchiq_check_suspend(struct vchiq_state *state);
-VCHIQ_STATUS_T
-vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
+enum vchiq_status
+vchiq_use_service(unsigned int handle);
-extern VCHIQ_STATUS_T
-vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
+extern enum vchiq_status
+vchiq_release_service(unsigned int handle);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_suspend(struct vchiq_state *state);
extern int
@@ -154,27 +154,27 @@ vchiq_platform_get_arm_state(struct vchiq_state *state);
extern int
vchiq_videocore_wanted(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_release_internal(struct vchiq_state *state,
struct vchiq_service *service);
extern struct vchiq_debugfs_node *
-vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_use_count(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_pid(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_trace(struct vchiq_instance *instance);
extern void
-vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace);
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace);
extern void
set_suspend_state(struct vchiq_arm_state *arm_state,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 56a23a297fa4..76351078affb 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -132,7 +132,7 @@ vchiq_set_service_state(struct vchiq_service *service, int newstate)
}
struct vchiq_service *
-find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
+find_service_by_handle(unsigned int handle)
{
struct vchiq_service *service;
@@ -177,8 +177,8 @@ find_service_by_port(struct vchiq_state *state, int localport)
}
struct vchiq_service *
-find_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle)
+find_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle)
{
struct vchiq_service *service;
@@ -201,8 +201,8 @@ find_service_for_instance(VCHIQ_INSTANCE_T instance,
}
struct vchiq_service *
-find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle)
+find_closed_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle)
{
struct vchiq_service *service;
@@ -227,7 +227,7 @@ find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
}
struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
+next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
int *pidx)
{
struct vchiq_service *service = NULL;
@@ -295,7 +295,7 @@ unlock:
}
int
-vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_client_id(unsigned int handle)
{
struct vchiq_service *service = find_service_by_handle(handle);
int id;
@@ -308,7 +308,7 @@ vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
}
void *
-vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_service_userdata(unsigned int handle)
{
struct vchiq_service *service = handle_to_service(handle);
@@ -316,7 +316,7 @@ vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
}
int
-vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_service_fourcc(unsigned int handle)
{
struct vchiq_service *service = handle_to_service(handle);
@@ -354,11 +354,11 @@ mark_service_closing(struct vchiq_service *service)
mark_service_closing_internal(service, 0);
}
-static inline VCHIQ_STATUS_T
-make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
+static inline enum vchiq_status
+make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
struct vchiq_header *header, void *bulk_userdata)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
@@ -375,9 +375,9 @@ make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
}
inline void
-vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate)
+vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
{
- VCHIQ_CONNSTATE_T oldstate = state->conn_state;
+ enum vchiq_connstate oldstate = state->conn_state;
vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
conn_state_names[oldstate],
@@ -542,7 +542,7 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
if (space > slot_space) {
struct vchiq_header *header;
/* Fill the remaining space with padding */
- WARN_ON(state->tx_data == NULL);
+ WARN_ON(!state->tx_data);
header = (struct vchiq_header *)
(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
header->msgid = VCHIQ_MSGID_PADDING;
@@ -779,7 +779,7 @@ copy_message_data(
}
/* Called by the slot handler and application threads */
-static VCHIQ_STATUS_T
+static enum vchiq_status
queue_message(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -1027,7 +1027,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
}
/* Called by the slot handler and application threads */
-static VCHIQ_STATUS_T
+static enum vchiq_status
queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -1178,11 +1178,11 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
}
/* Called by the slot handler - don't hold the bulk mutex */
-static VCHIQ_STATUS_T
+static enum vchiq_status
notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
int retry_poll)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
vchiq_log_trace(vchiq_core_log_level,
"%d: nb:%d %cx - p=%x rn=%x r=%x",
@@ -1230,7 +1230,7 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
spin_unlock(&bulk_waiter_spinlock);
} else if (bulk->mode ==
VCHIQ_BULK_MODE_CALLBACK) {
- VCHIQ_REASON_T reason = (bulk->dir ==
+ enum vchiq_reason reason = (bulk->dir ==
VCHIQ_BULK_TRANSMIT) ?
((bulk->actual ==
VCHIQ_BULK_ACTUAL_ABORTED) ?
@@ -2078,7 +2078,7 @@ init_bulk_queue(struct vchiq_bulk_queue *queue)
}
inline const char *
-get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
+get_conn_state_name(enum vchiq_connstate conn_state)
{
return conn_state_names[conn_state];
}
@@ -2123,18 +2123,15 @@ vchiq_init_slots(void *mem_base, int mem_size)
return slot_zero;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
{
struct vchiq_shared_state *local;
struct vchiq_shared_state *remote;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
char threadname[16];
int i;
- vchiq_log_warning(vchiq_core_log_level,
- "%s: slot_zero = %pK", __func__, slot_zero);
-
if (vchiq_states[0]) {
pr_err("%s: VCHIQ state already initialized\n", __func__);
return VCHIQ_ERROR;
@@ -2283,8 +2280,8 @@ fail_free_handler_thread:
struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params *params,
- int srvstate, VCHIQ_INSTANCE_T instance,
- VCHIQ_USERDATA_TERM_T userdata_term)
+ int srvstate, struct vchiq_instance *instance,
+ vchiq_userdata_term userdata_term)
{
struct vchiq_service *service;
struct vchiq_service **pservice = NULL;
@@ -2412,7 +2409,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
return service;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_open_service_internal(struct vchiq_service *service, int client_id)
{
struct vchiq_open_payload payload = {
@@ -2421,7 +2418,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
service->version,
service->version_min
};
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
service->client_id = client_id;
vchiq_use_service_internal(service);
@@ -2519,7 +2516,7 @@ release_service_messages(struct vchiq_service *service)
static int
do_abort_bulks(struct vchiq_service *service)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
/* Abort any outstanding bulk transfers */
if (mutex_lock_killable(&service->bulk_mutex))
@@ -2535,10 +2532,10 @@ do_abort_bulks(struct vchiq_service *service)
return (status == VCHIQ_SUCCESS);
}
-static VCHIQ_STATUS_T
+static enum vchiq_status
close_service_complete(struct vchiq_service *service, int failstate)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
int newstate;
@@ -2597,11 +2594,11 @@ close_service_complete(struct vchiq_service *service, int failstate)
}
/* Called by the slot handler */
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
{
struct vchiq_state *state = service->state;
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
@@ -2777,8 +2774,8 @@ vchiq_free_service_internal(struct vchiq_service *service)
unlock_service(service);
}
-VCHIQ_STATUS_T
-vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
+enum vchiq_status
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
@@ -2813,8 +2810,8 @@ vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
return VCHIQ_SUCCESS;
}
-VCHIQ_STATUS_T
-vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
+enum vchiq_status
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
@@ -2830,12 +2827,12 @@ vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
return VCHIQ_SUCCESS;
}
-VCHIQ_STATUS_T
-vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_close_service(unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
return VCHIQ_ERROR;
@@ -2889,12 +2886,12 @@ vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
return status;
}
-VCHIQ_STATUS_T
-vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_remove_service(unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
return VCHIQ_ERROR;
@@ -2955,10 +2952,10 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
-VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
void *offset, int size, void *userdata,
- VCHIQ_BULK_MODE_T mode,
- VCHIQ_BULK_DIR_T dir)
+ enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(handle);
struct vchiq_bulk_queue *queue;
@@ -2968,7 +2965,7 @@ VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
int payload[2];
if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
@@ -3103,15 +3100,15 @@ error_exit:
return status;
}
-VCHIQ_STATUS_T
-vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+enum vchiq_status
+vchiq_queue_message(unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size)
{
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
if (!service ||
(vchiq_check_service(service) != VCHIQ_SUCCESS))
@@ -3156,7 +3153,7 @@ error_exit:
}
void
-vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle,
+vchiq_release_message(unsigned int handle,
struct vchiq_header *header)
{
struct vchiq_service *service = find_service_by_handle(handle);
@@ -3195,10 +3192,10 @@ release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
remote_event_signal(&state->remote->sync_release);
}
-VCHIQ_STATUS_T
-vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
+enum vchiq_status
+vchiq_get_peer_version(unsigned int handle, short *peer_version)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (!service ||
@@ -3224,12 +3221,12 @@ void vchiq_get_config(struct vchiq_config *config)
config->version_min = VCHIQ_VERSION_MIN;
}
-VCHIQ_STATUS_T
-vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
- VCHIQ_SERVICE_OPTION_T option, int value)
+enum vchiq_status
+vchiq_set_service_option(unsigned int handle,
+ enum vchiq_service_option option, int value)
{
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
if (service) {
switch (option) {
@@ -3301,7 +3298,7 @@ vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
return status;
}
-static void
+static int
vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
struct vchiq_shared_state *shared, const char *label)
{
@@ -3321,16 +3318,21 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
int i;
char buf[80];
int len;
+ int err;
len = scnprintf(buf, sizeof(buf),
" %s: slots %d-%d tx_pos=%x recycle=%x",
label, shared->slot_first, shared->slot_last,
shared->tx_pos, shared->slot_queue_recycle);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Slots claimed:");
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
for (i = shared->slot_first; i <= shared->slot_last; i++) {
struct vchiq_slot_info slot_info =
@@ -3339,27 +3341,34 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
len = scnprintf(buf, sizeof(buf),
" %d: %d/%d", i, slot_info.use_count,
slot_info.release_count);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
}
for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
debug_names[i], shared->debug[i], shared->debug[i]);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
+ return 0;
}
-void
-vchiq_dump_state(void *dump_context, struct vchiq_state *state)
+int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
{
char buf[80];
int len;
int i;
+ int err;
len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
conn_state_names[state->conn_state]);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
@@ -3367,12 +3376,16 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
state->rx_pos,
state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Version: %d (min %d)",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
if (VCHIQ_ENABLE_STATS) {
len = scnprintf(buf, sizeof(buf),
@@ -3380,7 +3393,9 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
"error_count=%d",
state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
state->stats.error_count);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
len = scnprintf(buf, sizeof(buf),
@@ -3391,30 +3406,49 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
state->data_quota - state->data_use_count,
state->local->slot_queue_recycle - state->slot_queue_available,
state->stats.slot_stalls, state->stats.data_stalls);
- vchiq_dump(dump_context, buf, len + 1);
-
- vchiq_dump_platform_state(dump_context);
-
- vchiq_dump_shared_state(dump_context, state, state->local, "Local");
- vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
-
- vchiq_dump_platform_instances(dump_context);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
+
+ err = vchiq_dump_platform_state(dump_context);
+ if (err)
+ return err;
+
+ err = vchiq_dump_shared_state(dump_context,
+ state,
+ state->local,
+ "Local");
+ if (err)
+ return err;
+ err = vchiq_dump_shared_state(dump_context,
+ state,
+ state->remote,
+ "Remote");
+ if (err)
+ return err;
+
+ err = vchiq_dump_platform_instances(dump_context);
+ if (err)
+ return err;
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = find_service_by_port(state, i);
if (service) {
- vchiq_dump_service_state(dump_context, service);
+ err = vchiq_dump_service_state(dump_context, service);
unlock_service(service);
+ if (err)
+ return err;
}
}
+ return 0;
}
-void
-vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
+int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
{
char buf[80];
int len;
+ int err;
len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
service->localport, srvstate_names[service->srvstate],
@@ -3447,7 +3481,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service_quota->slot_use_count,
service_quota->slot_quota);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
tx_pending = service->bulk_tx.local_insert -
service->bulk_tx.remote_insert;
@@ -3466,7 +3502,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
BULK_INDEX(service->bulk_rx.remove)].size : 0);
if (VCHIQ_ENABLE_STATS) {
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Ctrl: tx_count=%d, tx_bytes=%llu, "
@@ -3475,7 +3513,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.ctrl_tx_bytes,
service->stats.ctrl_rx_count,
service->stats.ctrl_rx_bytes);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Bulk: tx_count=%d, tx_bytes=%llu, "
@@ -3484,7 +3524,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.bulk_tx_bytes,
service->stats.bulk_rx_count,
service->stats.bulk_rx_bytes);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" %d quota stalls, %d slot stalls, "
@@ -3497,10 +3539,13 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
}
}
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
if (service->srvstate != VCHIQ_SRVSTATE_FREE)
- vchiq_dump_platform_service_state(dump_context, service);
+ err = vchiq_dump_platform_service_state(dump_context, service);
+ return err;
}
void
@@ -3527,9 +3572,9 @@ vchiq_loud_error_footer(void)
"================");
}
-VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
+enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ enum vchiq_status status = VCHIQ_RETRY;
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
@@ -3538,9 +3583,9 @@ VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
return status;
}
-VCHIQ_STATUS_T vchiq_send_remote_use_active(struct vchiq_state *state)
+enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ enum vchiq_status status = VCHIQ_RETRY;
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
@@ -3578,7 +3623,7 @@ void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
}
*s++ = '\0';
- if ((label != NULL) && (*label != '\0'))
+ if (label && (*label != '\0'))
vchiq_log_trace(VCHIQ_LOG_TRACE,
"%s: %08x: %s", label, addr, line_buf);
else
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 63f71b2a492f..c31f953a9986 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -60,8 +60,8 @@ vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
-#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(struct vchiq_slot_zero) + \
- VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
+#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
+ VCHIQ_SLOT_SIZE)
#define VCHIQ_MSG_PADDING 0 /* - */
#define VCHIQ_MSG_CONNECT 1 /* - */
@@ -169,7 +169,7 @@ enum {
#endif /* VCHIQ_ENABLE_DEBUG */
-typedef enum {
+enum vchiq_connstate {
VCHIQ_CONNSTATE_DISCONNECTED,
VCHIQ_CONNSTATE_CONNECTING,
VCHIQ_CONNSTATE_CONNECTED,
@@ -179,7 +179,7 @@ typedef enum {
VCHIQ_CONNSTATE_RESUMING,
VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
VCHIQ_CONNSTATE_RESUME_TIMEOUT
-} VCHIQ_CONNSTATE_T;
+};
enum {
VCHIQ_SRVSTATE_FREE,
@@ -202,12 +202,12 @@ enum {
VCHIQ_POLL_COUNT
};
-typedef enum {
+enum vchiq_bulk_dir {
VCHIQ_BULK_TRANSMIT,
VCHIQ_BULK_RECEIVE
-} VCHIQ_BULK_DIR_T;
+};
-typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
+typedef void (*vchiq_userdata_term)(void *userdata);
struct vchiq_bulk {
short mode;
@@ -236,7 +236,7 @@ struct remote_event {
u32 __unused;
};
-typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
+struct opaque_platform_state;
struct vchiq_slot {
char data[VCHIQ_SLOT_SIZE];
@@ -250,10 +250,10 @@ struct vchiq_slot_info {
struct vchiq_service {
struct vchiq_service_base base;
- VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int handle;
unsigned int ref_count;
int srvstate;
- VCHIQ_USERDATA_TERM_T userdata_term;
+ vchiq_userdata_term userdata_term;
unsigned int localport;
unsigned int remoteport;
int public_fourcc;
@@ -268,7 +268,7 @@ struct vchiq_service {
short peer_version;
struct vchiq_state *state;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
int service_use_count;
@@ -367,7 +367,7 @@ struct vchiq_slot_zero {
struct vchiq_state {
int id;
int initialised;
- VCHIQ_CONNSTATE_T conn_state;
+ enum vchiq_connstate conn_state;
short version_common;
struct vchiq_shared_state *local;
@@ -382,7 +382,7 @@ struct vchiq_state {
/* Mutex protecting services */
struct mutex mutex;
- VCHIQ_INSTANCE_T *instance;
+ struct vchiq_instance **instance;
/* Processes incoming messages */
struct task_struct *slot_handler_thread;
@@ -468,7 +468,7 @@ struct vchiq_state {
struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
- VCHIQ_PLATFORM_STATE_T platform_state;
+ struct opaque_platform_state *platform_state;
};
struct bulk_waiter {
@@ -486,27 +486,27 @@ extern int vchiq_sync_log_level;
extern struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
extern const char *
-get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
+get_conn_state_name(enum vchiq_connstate conn_state);
extern struct vchiq_slot_zero *
vchiq_init_slots(void *mem_base, int mem_size);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero);
-extern VCHIQ_STATUS_T
-vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
extern struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params *params,
- int srvstate, VCHIQ_INSTANCE_T instance,
- VCHIQ_USERDATA_TERM_T userdata_term);
+ int srvstate, struct vchiq_instance *instance,
+ vchiq_userdata_term userdata_term);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_open_service_internal(struct vchiq_service *service, int client_id);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
extern void
@@ -515,21 +515,21 @@ vchiq_terminate_service_internal(struct vchiq_service *service);
extern void
vchiq_free_service_internal(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
-vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
extern void
remote_event_pollall(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
-vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *offset, int size,
- void *userdata, VCHIQ_BULK_MODE_T mode,
- VCHIQ_BULK_DIR_T dir);
+extern enum vchiq_status
+vchiq_bulk_transfer(unsigned int handle, void *offset, int size,
+ void *userdata, enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir);
-extern void
+extern int
vchiq_dump_state(void *dump_context, struct vchiq_state *state);
-extern void
+extern int
vchiq_dump_service_state(void *dump_context, struct vchiq_service *service);
extern void
@@ -543,7 +543,7 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service,
int poll_type);
static inline struct vchiq_service *
-handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
+handle_to_service(unsigned int handle)
{
struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
(VCHIQ_MAX_STATES - 1)];
@@ -554,21 +554,21 @@ handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
}
extern struct vchiq_service *
-find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
+find_service_by_handle(unsigned int handle);
extern struct vchiq_service *
find_service_by_port(struct vchiq_state *state, int localport);
extern struct vchiq_service *
-find_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle);
+find_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle);
extern struct vchiq_service *
-find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle);
+find_closed_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle);
extern struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
+next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
int *pidx);
extern void
@@ -580,7 +580,7 @@ unlock_service(struct vchiq_service *service);
/* The following functions are called from vchiq_core, and external
** implementations must be provided. */
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
int dir);
@@ -596,29 +596,29 @@ vchiq_platform_check_suspend(struct vchiq_state *state);
extern void
vchiq_platform_paused(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_resume(struct vchiq_state *state);
extern void
vchiq_platform_resumed(struct vchiq_state *state);
-extern void
+extern int
vchiq_dump(void *dump_context, const char *str, int len);
-extern void
+extern int
vchiq_dump_platform_state(void *dump_context);
-extern void
+extern int
vchiq_dump_platform_instances(void *dump_context);
-extern void
+extern int
vchiq_dump_platform_service_state(void *dump_context,
struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_use_service_internal(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_release_service_internal(struct vchiq_service *service);
extern void
@@ -627,31 +627,31 @@ vchiq_on_remote_use(struct vchiq_state *state);
extern void
vchiq_on_remote_release(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_init_state(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
extern void
vchiq_on_remote_use_active(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_send_remote_use(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_send_remote_use_active(struct vchiq_state *state);
extern void
vchiq_platform_conn_state_changed(struct vchiq_state *state,
- VCHIQ_CONNSTATE_T oldstate,
- VCHIQ_CONNSTATE_T newstate);
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate);
extern void
vchiq_platform_handle_timeout(struct vchiq_state *state);
extern void
-vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate);
+vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
extern void
vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index f217b78d95a0..89cc52211de4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -117,7 +117,7 @@ static const struct file_operations debugfs_log_fops = {
static int debugfs_usecount_show(struct seq_file *f, void *offset)
{
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
int use_count;
use_count = vchiq_instance_get_use_count(instance);
@@ -129,7 +129,7 @@ DEFINE_SHOW_ATTRIBUTE(debugfs_usecount);
static int debugfs_trace_show(struct seq_file *f, void *offset)
{
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
int trace;
trace = vchiq_instance_get_trace(instance);
@@ -148,7 +148,7 @@ static ssize_t debugfs_trace_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *f = (struct seq_file *)file->private_data;
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
char firstchar;
if (copy_from_user(&firstchar, buffer, 1))
@@ -184,7 +184,7 @@ static const struct file_operations debugfs_trace_fops = {
};
/* add an instance (process) to the debugfs entries */
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
char pidstr[16];
struct dentry *top;
@@ -201,7 +201,7 @@ void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
vchiq_instance_get_debugfs_node(instance)->dentry = top;
}
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
struct vchiq_debugfs_node *node =
vchiq_instance_get_debugfs_node(instance);
@@ -242,11 +242,11 @@ void vchiq_debugfs_deinit(void)
{
}
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
}
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index 9b563d105fdb..ec2f033cdf32 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -14,8 +14,8 @@ void vchiq_debugfs_init(void);
void vchiq_debugfs_deinit(void);
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance);
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance);
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance);
#endif /* VCHIQ_DEBUGFS_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index c23bd105c40f..07c6a3db5ab6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -15,7 +15,7 @@
#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
-typedef enum {
+enum vchiq_reason {
VCHIQ_SERVICE_OPENED, /* service, -, - */
VCHIQ_SERVICE_CLOSED, /* service, -, - */
VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
@@ -23,28 +23,28 @@ typedef enum {
VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
-} VCHIQ_REASON_T;
+};
-typedef enum {
+enum vchiq_status {
VCHIQ_ERROR = -1,
VCHIQ_SUCCESS = 0,
VCHIQ_RETRY = 1
-} VCHIQ_STATUS_T;
+};
-typedef enum {
+enum vchiq_bulk_mode {
VCHIQ_BULK_MODE_CALLBACK,
VCHIQ_BULK_MODE_BLOCKING,
VCHIQ_BULK_MODE_NOCALLBACK,
VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
-} VCHIQ_BULK_MODE_T;
+};
-typedef enum {
+enum vchiq_service_option {
VCHIQ_SERVICE_OPTION_AUTOCLOSE,
VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
VCHIQ_SERVICE_OPTION_TRACE
-} VCHIQ_SERVICE_OPTION_T;
+};
struct vchiq_header {
/* The message identifier - opaque to applications. */
@@ -61,21 +61,19 @@ struct vchiq_element {
unsigned int size;
};
-typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
-
-typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T,
- struct vchiq_header *,
- VCHIQ_SERVICE_HANDLE_T, void *);
+typedef enum vchiq_status (*vchiq_callback)(enum vchiq_reason,
+ struct vchiq_header *,
+ unsigned int, void *);
struct vchiq_service_base {
int fourcc;
- VCHIQ_CALLBACK_T callback;
+ vchiq_callback callback;
void *userdata;
};
struct vchiq_service_params {
int fourcc;
- VCHIQ_CALLBACK_T callback;
+ vchiq_callback callback;
void *userdata;
short version; /* Increment for non-trivial changes */
short version_min; /* Update for incompatible changes */
@@ -92,57 +90,57 @@ struct vchiq_config {
short version_min; /* The minimum compatible version of VCHIQ */
};
-typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
-typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
+struct vchiq_instance;
+typedef void (*vchiq_remote_callback)(void *cb_arg);
-extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
-extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
-extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
-extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
+extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
+extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
+extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
+extern enum vchiq_status vchiq_add_service(struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *pservice);
-extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
+ unsigned int *pservice);
+extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *pservice);
-extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T
-vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ unsigned int *pservice);
+extern enum vchiq_status vchiq_close_service(unsigned int service);
+extern enum vchiq_status vchiq_remove_service(unsigned int service);
+extern enum vchiq_status vchiq_use_service(unsigned int service);
+extern enum vchiq_status vchiq_release_service(unsigned int service);
+extern enum vchiq_status
+vchiq_queue_message(unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size);
-extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
+extern void vchiq_release_message(unsigned int service,
struct vchiq_header *header);
-extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
+extern enum vchiq_status vchiq_bulk_transmit(unsigned int service,
const void *data, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
+ enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_receive(unsigned int service,
void *data, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
+ enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_transmit_handle(unsigned int service,
const void *offset, unsigned int size,
- void *userdata, VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
+ void *userdata, enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_receive_handle(unsigned int service,
void *offset, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
-extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
-extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
+ enum vchiq_bulk_mode mode);
+extern int vchiq_get_client_id(unsigned int service);
+extern void *vchiq_get_service_userdata(unsigned int service);
+extern int vchiq_get_service_fourcc(unsigned int service);
extern void vchiq_get_config(struct vchiq_config *config);
-extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
- VCHIQ_SERVICE_OPTION_T option, int value);
+extern enum vchiq_status vchiq_set_service_option(unsigned int service,
+ enum vchiq_service_option option, int value);
-extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
- VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
-extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status vchiq_remote_use(struct vchiq_instance *instance,
+ vchiq_remote_callback callback, void *cb_arg);
+extern enum vchiq_status vchiq_remote_release(struct vchiq_instance *instance);
-extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
+extern enum vchiq_status vchiq_dump_phys_mem(unsigned int service,
void *ptr, size_t num_bytes);
-extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
+extern enum vchiq_status vchiq_get_peer_version(unsigned int handle,
short *peer_version);
#endif /* VCHIQ_IF_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
index 460ccea088bf..202889b3774f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -28,11 +28,11 @@ struct vchiq_queue_bulk_transfer {
void *data;
unsigned int size;
void *userdata;
- VCHIQ_BULK_MODE_T mode;
+ enum vchiq_bulk_mode mode;
};
struct vchiq_completion_data {
- VCHIQ_REASON_T reason;
+ enum vchiq_reason reason;
struct vchiq_header *header;
void *service_userdata;
void *bulk_userdata;
@@ -60,7 +60,7 @@ struct vchiq_get_config {
struct vchiq_set_service_option {
unsigned int handle;
- VCHIQ_SERVICE_OPTION_T option;
+ enum vchiq_service_option option;
int value;
};
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 17a4f2c8d8b1..0ce3b08b3441 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -12,22 +12,22 @@
#define vchiq_status_to_vchi(status) ((int32_t)status)
struct shim_service {
- VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int handle;
struct vchiu_queue queue;
- VCHI_CALLBACK_T callback;
+ vchi_callback callback;
void *callback_param;
};
/***********************************************************
* Name: vchi_msg_peek
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void **data,
* uint32_t *msg_size,
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
*
* Description: Routine to return a pointer to the current message (to allow in
* place processing). The message can be removed using
@@ -36,10 +36,10 @@ struct shim_service {
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
- void **data,
- uint32_t *msg_size,
- VCHI_FLAGS_T flags)
+int32_t vchi_msg_peek(struct vchi_service_handle *handle,
+ void **data,
+ uint32_t *msg_size,
+ enum vchi_flags flags)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
/***********************************************************
* Name: vchi_msg_remove
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
*
* Description: Routine to remove a message (after it has been read with
* vchi_msg_peek)
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_msg_remove(struct vchi_service_handle *handle)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(vchi_msg_remove);
/***********************************************************
* Name: vchi_msg_queue
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* ssize_t (*copy_callback)(void *context, void *dest,
* size_t offset, size_t maxsize),
* void *context,
@@ -99,14 +99,14 @@ EXPORT_SYMBOL(vchi_msg_remove);
*
***********************************************************/
static
-int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+int32_t vchi_msg_queue(struct vchi_service_handle *handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
uint32_t data_size)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
while (1) {
status = vchiq_queue_message(service->handle,
@@ -139,7 +139,7 @@ vchi_queue_kernel_message_callback(void *context,
}
int
-vchi_queue_kernel_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size)
{
@@ -169,7 +169,7 @@ vchi_queue_user_message_callback(void *context,
}
int
-vchi_queue_user_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_user_message(struct vchi_service_handle *handle,
void __user *data,
unsigned int size)
{
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(vchi_queue_user_message);
* Arguments: VCHI_BULK_HANDLE_T handle,
* void *data_dst,
* const uint32_t data_size,
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
* void *bulk_handle
*
* Description: Routine to setup a rcv buffer
@@ -198,15 +198,13 @@ EXPORT_SYMBOL(vchi_queue_user_message);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
- void *data_dst,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *bulk_handle)
+int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle, void *data_dst,
+ uint32_t data_size, enum vchi_flags flags,
+ void *bulk_handle)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_BULK_MODE_T mode;
- VCHIQ_STATUS_T status;
+ enum vchiq_bulk_mode mode;
+ enum vchiq_status status;
switch ((int)flags) {
case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
@@ -250,7 +248,7 @@ EXPORT_SYMBOL(vchi_bulk_queue_receive);
* Arguments: VCHI_BULK_HANDLE_T handle,
* const void *data_src,
* uint32_t data_size,
- * VCHI_FLAGS_T flags,
+ * enum vchi_flags flags,
* void *bulk_handle
*
* Description: Routine to transmit some data
@@ -258,15 +256,15 @@ EXPORT_SYMBOL(vchi_bulk_queue_receive);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
- const void *data_src,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *bulk_handle)
+int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
+ const void *data_src,
+ uint32_t data_size,
+ enum vchi_flags flags,
+ void *bulk_handle)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_BULK_MODE_T mode;
- VCHIQ_STATUS_T status;
+ enum vchiq_bulk_mode mode;
+ enum vchiq_status status;
switch ((int)flags) {
case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
@@ -309,22 +307,20 @@ EXPORT_SYMBOL(vchi_bulk_queue_transmit);
/***********************************************************
* Name: vchi_msg_dequeue
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void *data,
* uint32_t max_data_size_to_read,
* uint32_t *actual_msg_size
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
*
* Description: Routine to dequeue a message into the supplied buffer
*
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
- void *data,
- uint32_t max_data_size_to_read,
- uint32_t *actual_msg_size,
- VCHI_FLAGS_T flags)
+int32_t vchi_msg_dequeue(struct vchi_service_handle *handle, void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size, enum vchi_flags flags)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -364,13 +360,13 @@ int32_t vchi_held_msg_release(struct vchi_held_msg *message)
{
/*
* Convert the service field pointer back to an
- * VCHIQ_SERVICE_HANDLE_T which is an int.
+ * unsigned int which is an int.
* This pointer is opaque to everything except
* vchi_msg_hold which simply upcasted the int
* to a pointer.
*/
- vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service,
+ vchiq_release_message((unsigned int)(long)message->service,
(struct vchiq_header *)message->message);
return 0;
@@ -380,10 +376,10 @@ EXPORT_SYMBOL(vchi_held_msg_release);
/***********************************************************
* Name: vchi_msg_hold
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void **data,
* uint32_t *msg_size,
- * VCHI_FLAGS_T flags,
+ * enum vchi_flags flags,
* struct vchi_held_msg *message_handle
*
* Description: Routine to return a pointer to the current message (to allow
@@ -394,11 +390,9 @@ EXPORT_SYMBOL(vchi_held_msg_release);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
- void **data,
- uint32_t *msg_size,
- VCHI_FLAGS_T flags,
- struct vchi_held_msg *message_handle)
+int32_t vchi_msg_hold(struct vchi_service_handle *handle, void **data,
+ uint32_t *msg_size, enum vchi_flags flags,
+ struct vchi_held_msg *message_handle)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -416,7 +410,7 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
*msg_size = header->size;
/*
- * upcast the VCHIQ_SERVICE_HANDLE_T which is an int
+ * upcast the unsigned int which is an int
* to a pointer and stuff it in the held message.
* This pointer is opaque to everything except
* vchi_held_msg_release which simply downcasts it back
@@ -434,7 +428,7 @@ EXPORT_SYMBOL(vchi_msg_hold);
/***********************************************************
* Name: vchi_initialise
*
- * Arguments: VCHI_INSTANCE_T *instance_handle
+ * Arguments: struct vchi_instance_handle **instance_handle
*
* Description: Initialises the hardware but does not transmit anything
* When run as a Host App this will be called twice hence the need
@@ -444,14 +438,14 @@ EXPORT_SYMBOL(vchi_msg_hold);
*
***********************************************************/
-int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
+int32_t vchi_initialise(struct vchi_instance_handle **instance_handle)
{
- VCHIQ_INSTANCE_T instance;
- VCHIQ_STATUS_T status;
+ struct vchiq_instance *instance;
+ enum vchiq_status status;
status = vchiq_initialise(&instance);
- *instance_handle = (VCHI_INSTANCE_T)instance;
+ *instance_handle = (struct vchi_instance_handle *)instance;
return vchiq_status_to_vchi(status);
}
@@ -460,7 +454,7 @@ EXPORT_SYMBOL(vchi_initialise);
/***********************************************************
* Name: vchi_connect
*
- * Arguments: VCHI_INSTANCE_T instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
*
* Description: Starts the command service on each connection,
* causing INIT messages to be pinged back and forth
@@ -468,9 +462,9 @@ EXPORT_SYMBOL(vchi_initialise);
* Returns: 0 if successful, failure otherwise
*
***********************************************************/
-int32_t vchi_connect(VCHI_INSTANCE_T instance_handle)
+int32_t vchi_connect(struct vchi_instance_handle *instance_handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
return vchiq_connect(instance);
}
@@ -479,7 +473,7 @@ EXPORT_SYMBOL(vchi_connect);
/***********************************************************
* Name: vchi_disconnect
*
- * Arguments: VCHI_INSTANCE_T instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
*
* Description: Stops the command service on each connection,
* causing DE-INIT messages to be pinged back and forth
@@ -487,9 +481,9 @@ EXPORT_SYMBOL(vchi_connect);
* Returns: 0 if successful, failure otherwise
*
***********************************************************/
-int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
+int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
return vchiq_status_to_vchi(vchiq_shutdown(instance));
}
@@ -499,9 +493,9 @@ EXPORT_SYMBOL(vchi_disconnect);
* Name: vchi_service_open
* Name: vchi_service_create
*
- * Arguments: VCHI_INSTANCE_T *instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
* struct service_creation *setup,
- * VCHI_SERVICE_HANDLE_T *handle
+ * struct vchi_service_handle **handle
*
* Description: Routine to open a service
*
@@ -509,9 +503,9 @@ EXPORT_SYMBOL(vchi_disconnect);
*
***********************************************************/
-static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
+static enum vchiq_status shim_callback(enum vchiq_reason reason,
struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T handle,
+ unsigned int handle,
void *bulk_user)
{
struct shim_service *service =
@@ -571,7 +565,7 @@ done:
return VCHIQ_SUCCESS;
}
-static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
+static struct shim_service *service_alloc(struct vchiq_instance *instance,
struct service_creation *setup)
{
struct shim_service *service = kzalloc(sizeof(struct shim_service), GFP_KERNEL);
@@ -579,7 +573,7 @@ static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
(void)instance;
if (service) {
- if (vchiu_queue_init(&service->queue, 64)) {
+ if (!vchiu_queue_init(&service->queue, 64)) {
service->callback = setup->callback;
service->callback_param = setup->callback_param;
} else {
@@ -599,18 +593,18 @@ static void service_free(struct shim_service *service)
}
}
-int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
- VCHI_SERVICE_HANDLE_T *handle)
+ struct vchi_service_handle **handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
struct shim_service *service = service_alloc(instance, setup);
- *handle = (VCHI_SERVICE_HANDLE_T)service;
+ *handle = (struct vchi_service_handle *)service;
if (service) {
struct vchiq_service_params params;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
memset(&params, 0, sizeof(params));
params.fourcc = setup->service_id;
@@ -628,17 +622,17 @@ int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
}
}
- return (service != NULL) ? 0 : -1;
+ return service ? 0 : -1;
}
EXPORT_SYMBOL(vchi_service_open);
-int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_close(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
+ enum vchiq_status status = vchiq_close_service(service->handle);
if (status == VCHIQ_SUCCESS)
service_free(service);
@@ -648,13 +642,13 @@ int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
}
EXPORT_SYMBOL(vchi_service_close);
-int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_destroy(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
+ enum vchiq_status status = vchiq_remove_service(service->handle);
if (status == VCHIQ_SUCCESS) {
service_free(service);
@@ -667,13 +661,13 @@ int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
}
EXPORT_SYMBOL(vchi_service_destroy);
-int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
- VCHI_SERVICE_OPTION_T option,
+int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
+ enum vchi_service_option option,
int value)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_SERVICE_OPTION_T vchiq_option;
+ enum vchiq_service_option vchiq_option;
switch (option) {
case VCHI_SERVICE_OPTION_TRACE:
@@ -687,7 +681,7 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
break;
}
if (service) {
- VCHIQ_STATUS_T status =
+ enum vchiq_status status =
vchiq_set_service_option(service->handle,
vchiq_option,
value);
@@ -698,13 +692,13 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
}
EXPORT_SYMBOL(vchi_service_set_option);
-int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_version)
+int32_t vchi_get_peer_version(const struct vchi_service_handle *handle, short *peer_version)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
status = vchiq_get_peer_version(service->handle, peer_version);
ret = vchiq_status_to_vchi(status);
@@ -716,14 +710,14 @@ EXPORT_SYMBOL(vchi_get_peer_version);
/***********************************************************
* Name: vchi_service_use
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ * Arguments: const struct vchi_service_handle *handle
*
* Description: Routine to increment refcount on a service
*
* Returns: void
*
***********************************************************/
-int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_use(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
@@ -737,14 +731,14 @@ EXPORT_SYMBOL(vchi_service_use);
/***********************************************************
* Name: vchi_service_release
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ * Arguments: const struct vchi_service_handle *handle
*
* Description: Routine to decrement refcount on a service
*
* Returns: void
*
***********************************************************/
-int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_release(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 5e6d3035dc05..644844d88fed 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -24,9 +24,9 @@ int vchiu_queue_init(struct vchiu_queue *queue, int size)
GFP_KERNEL);
if (!queue->storage) {
vchiu_queue_delete(queue);
- return 0;
+ return -ENOMEM;
}
- return 1;
+ return 0;
}
void vchiu_queue_delete(struct vchiu_queue *queue)
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index eba4ee0750dc..e65c9825ea5a 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -79,14 +79,10 @@ static void s_vCalculateOFDMRParameter(unsigned char byRate, u8 bb_type,
*
* Return Value: none
*/
-static
-void
-s_vCalculateOFDMRParameter(
- unsigned char byRate,
- u8 bb_type,
- unsigned char *pbyTxRate,
- unsigned char *pbyRsvTime
-)
+static void s_vCalculateOFDMRParameter(unsigned char byRate,
+ u8 bb_type,
+ unsigned char *pbyTxRate,
+ unsigned char *pbyRsvTime)
{
switch (byRate) {
case RATE_6M:
@@ -736,8 +732,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_36 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_36M),
bb_type,
&byTxRate,
@@ -745,8 +740,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_48 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_48M),
bb_type,
&byTxRate,
@@ -754,8 +748,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_54 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
@@ -763,8 +756,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_72 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 887c1692e05b..25867cb9d13d 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -45,7 +45,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
void CARDvSetLoopbackMode(struct vnt_private *priv,
- unsigned short wLoopbackMode);
+ unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 082302944c37..f69fc687d4c3 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -295,7 +295,8 @@ static void device_init_registers(struct vnt_private *priv)
/* Get Desire Power Value */
priv->byCurPwr = 0xFF;
priv->byCCKPwr = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_PWR_CCK);
- priv->byOFDMPwrG = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_PWR_OFDMG);
+ priv->byOFDMPwrG = SROMbyReadEmbedded(priv->PortOffset,
+ EEP_OFS_PWR_OFDMG);
/* Load power Table */
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
@@ -373,7 +374,7 @@ static void device_init_registers(struct vnt_private *priv)
priv->bRadioOff = false;
priv->byRadioCtl = SROMbyReadEmbedded(priv->PortOffset,
- EEP_OFS_RADIOCTL);
+ EEP_OFS_RADIOCTL);
priv->bHWRadioOff = false;
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
@@ -659,12 +660,13 @@ static int device_init_td0_ring(struct vnt_private *priv)
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD0Rings[(i+1) % priv->opts.tx_descs[0]]);
- desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
+ desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
+ desc->next_desc = cpu_to_le32(curr +
+ sizeof(struct vnt_tx_desc));
}
if (i > 0)
- priv->apTD0Rings[i-1].next_desc = cpu_to_le32(priv->td0_pool_dma);
+ priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
return 0;
@@ -704,7 +706,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
}
if (i > 0)
- priv->apTD1Rings[i-1].next_desc = cpu_to_le32(priv->td1_pool_dma);
+ priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
return 0;
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 9725de3bca6a..bfd598a93b04 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -97,10 +97,7 @@ void PSvEnablePowerSaving(struct vnt_private *priv,
*
*/
-void
-PSvDisablePowerSaving(
- struct vnt_private *priv
-)
+void PSvDisablePowerSaving(struct vnt_private *priv)
{
/* disable power saving hw function */
MACbPSWakeup(priv);
@@ -126,10 +123,7 @@ PSvDisablePowerSaving(
*
*/
-bool
-PSbIsNextTBTTWakeUp(
- struct vnt_private *priv
-)
+bool PSbIsNextTBTTWakeUp(struct vnt_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &hw->conf;
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index e80fed69bafe..fb2855e686a7 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -436,7 +436,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
@@ -558,7 +558,8 @@ static bool RFbAL2230Init(struct vnt_private *priv)
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
- ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
+ ret &= IFRFbWriteEmbedded(priv,
+ dwAL2230InitTable[CB_AL2230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 042ac67a9709..affb70eba10f 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -61,23 +61,14 @@
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
-bool RFbInit(
- struct vnt_private *priv
-);
+bool RFbInit(struct vnt_private *priv);
bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, u16 uChannel);
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
-bool RFbRawSetPower(
- struct vnt_private *priv,
- unsigned char byPwr,
- unsigned int rate
-);
+bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
+ unsigned int rate);
-void
-RFvRSSITodBm(
- struct vnt_private *priv,
- unsigned char byCurrRSSI,
- long *pldBm
-);
+void RFvRSSITodBm(struct vnt_private *priv, unsigned char byCurrRSSI,
+ long *pldBm);
/* {{ RobertYu: 20050104 */
bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv, u16 byOldChannel, u16 byNewChannel);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index a14908895b9e..37fcc42ed000 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1289,7 +1289,7 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
current_rate = rate->hw_value;
if (priv->wCurrentRate != current_rate &&
- !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
+ !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
priv->wCurrentRate = current_rate;
RFbSetPower(priv, priv->wCurrentRate,
@@ -1396,7 +1396,8 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
tx_key = info->control.hw_key;
if (tx_key->keylen > 0)
vnt_fill_txkey(hdr, tx_buffer_head->tx_key,
- tx_key, skb, tx_body_size, td_info->mic_hdr);
+ tx_key, skb, tx_body_size,
+ td_info->mic_hdr);
}
return 0;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 856ba97aec4f..4ac85ecb0921 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -249,10 +249,10 @@ static int vnt_init_registers(struct vnt_private *priv)
} else {
priv->tx_antenna_mode = ANT_B;
- if (priv->tx_rx_ant_inv)
- priv->rx_antenna_mode = ANT_A;
- else
- priv->rx_antenna_mode = ANT_B;
+ if (priv->tx_rx_ant_inv)
+ priv->rx_antenna_mode = ANT_A;
+ else
+ priv->rx_antenna_mode = ANT_B;
}
}
@@ -362,7 +362,6 @@ static int vnt_init_registers(struct vnt_private *priv)
goto end;
}
-
ret = vnt_mac_set_led(priv, LEDSTS_TMLEN, 0x38);
if (ret)
goto end;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 4e9cfacf75f2..f9020a4f7bbf 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -112,11 +112,11 @@ static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
frame_length, rate);
if (pkt_type == PK_TYPE_11B)
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, (u16)priv->top_cck_basic_rate);
+ ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
+ (u16)priv->top_cck_basic_rate);
else
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, (u16)priv->top_ofdm_basic_rate);
+ ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
+ (u16)priv->top_ofdm_basic_rate);
if (need_ack)
return data_time + priv->sifs + ack_time;
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
new file mode 100644
index 000000000000..26de6762b942
--- /dev/null
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
@@ -0,0 +1,97 @@
+The WFxxx chip series can be connected via SPI or via SDIO.
+
+SPI
+---
+
+You have to declare the WFxxx chip in your device tree.
+
+Required properties:
+ - compatible: Should be "silabs,wfx-spi"
+ - reg: Chip select address of device
+ - spi-max-frequency: Maximum SPI clocking speed of device in Hz
+ - interrupts-extended: Should contain interrupt line (interrupt-parent +
+ interrupt can also been used). Trigger should be `IRQ_TYPE_EDGE_RISING`.
+
+Optional properties:
+ - reset-gpios: phandle of gpio that will be used to reset chip during probe.
+ Without this property, you may encounter issues with warm boot.
+
+Please consult Documentation/devicetree/bindings/spi/spi-bus.txt for optional
+SPI connection related properties,
+
+Example:
+
+&spi1 {
+ wfx {
+ compatible = "silabs,wfx-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_irq &wfx_gpios>;
+ interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_HIGH>;
+ reg = <0>;
+ spi-max-frequency = <42000000>;
+ };
+};
+
+
+SDIO
+----
+
+The driver is able to detect a WFxxx chip on SDIO bus by matching its Vendor ID
+and Product ID. However, driver will only provide limited features in this
+case. Thus declaring WFxxx chip in device tree is strongly recommended (and may
+become mandatory in the future).
+
+Required properties:
+ - compatible: Should be "silabs,wfx-sdio"
+ - reg: Should be 1
+
+In addition, it is recommended to declare a mmc-pwrseq on SDIO host above WFx.
+Without it, you may encounter issues with warm boot. mmc-pwrseq should be
+compatible with mmc-pwrseq-simple. Please consult
+Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt for more
+information.
+
+Example:
+
+/ {
+ wfx_pwrseq: wfx_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_reset>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc1 {
+ mmc-pwrseq = <&wfx_pwrseq>;
+ #address-size = <1>;
+ #size = <0>;
+
+ mmc@1 {
+ compatible = "silabs,wfx-sdio";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_wakeup>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+Note that #address-size and #size shoud already be defined in node mmc1, but it
+is rarely the case.
+
+Common properties
+-----------------
+
+Some properties are recognized either by SPI and SDIO versions:
+ - wakeup-gpios: phandle of gpio that will be used to wake-up chip. Without
+ this property, driver will disable most of power saving features.
+ - config-file: Use an alternative file as PDS. Default is `wf200.pds`. Only
+ necessary for development/debug purpose.
+ - slk_key: String representing hexadecimal value of secure link key to use.
+ Must contains 64 hexadecimal digits. Not supported in current version.
+
+WFx driver also supports `mac-address` and `local-mac-address` as described in
+Documentation/devicetree/binding/net/ethernet.txt
+
diff --git a/drivers/staging/wfx/Kconfig b/drivers/staging/wfx/Kconfig
new file mode 100644
index 000000000000..83ee4d0ca8c6
--- /dev/null
+++ b/drivers/staging/wfx/Kconfig
@@ -0,0 +1,8 @@
+config WFX
+ tristate "Silicon Labs wireless chips WF200 and further"
+ depends on MAC80211
+ depends on MMC || !MMC # do not allow WFX=y if MMC=m
+ depends on (SPI || MMC)
+ help
+ This is a driver for Silicons Labs WFxxx series (WF200 and further)
+ chipsets. This chip can be found on SPI or SDIO buses.
diff --git a/drivers/staging/wfx/Makefile b/drivers/staging/wfx/Makefile
new file mode 100644
index 000000000000..0d9c1ed092f6
--- /dev/null
+++ b/drivers/staging/wfx/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Necessary for CREATE_TRACE_POINTS
+CFLAGS_debug.o = -I$(src)
+
+wfx-y := \
+ bh.o \
+ hwio.o \
+ fwio.o \
+ hif_tx.o \
+ hif_rx.o \
+ queue.o \
+ data_tx.o \
+ data_rx.o \
+ scan.o \
+ sta.o \
+ key.o \
+ main.o \
+ sta.o \
+ debug.o
+wfx-$(CONFIG_SPI) += bus_spi.o
+wfx-$(subst m,y,$(CONFIG_MMC)) += bus_sdio.o
+
+obj-$(CONFIG_WFX) += wfx.o
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
new file mode 100644
index 000000000000..e44772289af8
--- /dev/null
+++ b/drivers/staging/wfx/TODO
@@ -0,0 +1,17 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+
+ - I have to take a decision about secure link support. I can:
+ - drop completely
+ - keep it in an external patch (my preferred option)
+ - replace call to mbedtls with kernel crypto API (necessitate a
+ bunch of work)
+ - pull mbedtls in kernel (non-realistic)
+
+ - mac80211 interface does not (yet) have expected quality to be placed
+ outside of staging:
+ - Some processings are redundant with mac80211 ones
+ - Many members from wfx_dev/wfx_vif can be retrieved from mac80211
+ structures
+ - Some functions are too complex
+ - ...
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
new file mode 100644
index 000000000000..2432ba95c2f5
--- /dev/null
+++ b/drivers/staging/wfx/bh.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Interrupt bottom half (BH).
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/gpio/consumer.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "traces.h"
+#include "secure_link.h"
+#include "hif_rx.h"
+#include "hif_api_cmd.h"
+
+static void device_wakeup(struct wfx_dev *wdev)
+{
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+ if (gpiod_get_value(wdev->pdata.gpio_wakeup))
+ return;
+
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ if (wfx_api_older_than(wdev, 1, 4)) {
+ if (!completion_done(&wdev->hif.ctrl_ready))
+ udelay(2000);
+ } else {
+ // completion.h does not provide any function to wait
+ // completion without consume it (a kind of
+ // wait_for_completion_done_timeout()). So we have to emulate
+ // it.
+ if (wait_for_completion_timeout(&wdev->hif.ctrl_ready,
+ msecs_to_jiffies(2) + 1))
+ complete(&wdev->hif.ctrl_ready);
+ else
+ dev_err(wdev->dev, "timeout while wake up chip\n");
+ }
+}
+
+static void device_release(struct wfx_dev *wdev)
+{
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+}
+
+static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
+{
+ struct sk_buff *skb;
+ struct hif_msg *hif;
+ size_t alloc_len;
+ size_t computed_len;
+ int release_count;
+ int piggyback = 0;
+
+ WARN(read_len < 4, "corrupted read");
+ WARN(read_len > round_down(0xFFF, 2) * sizeof(u16),
+ "%s: request exceed WFx capability", __func__);
+
+ // Add 2 to take into account piggyback size
+ alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2);
+ skb = dev_alloc_skb(alloc_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (wfx_data_read(wdev, skb->data, alloc_len))
+ goto err;
+
+ piggyback = le16_to_cpup((u16 *)(skb->data + alloc_len - 2));
+ _trace_piggyback(piggyback, false);
+
+ hif = (struct hif_msg *)skb->data;
+ WARN(hif->encrypted & 0x1, "unsupported encryption type");
+ if (hif->encrypted == 0x2) {
+ if (wfx_sl_decode(wdev, (void *)hif)) {
+ dev_kfree_skb(skb);
+ // If frame was a confirmation, expect trouble in next
+ // exchange. However, it is harmless to fail to decode
+ // an indication frame, so try to continue. Anyway,
+ // piggyback is probably correct.
+ return piggyback;
+ }
+ le16_to_cpus(&hif->len);
+ computed_len = round_up(hif->len - sizeof(hif->len), 16)
+ + sizeof(struct hif_sl_msg)
+ + sizeof(struct hif_sl_tag);
+ } else {
+ le16_to_cpus(&hif->len);
+ computed_len = round_up(hif->len, 2);
+ }
+ if (computed_len != read_len) {
+ dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
+ computed_len, read_len);
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1,
+ hif, read_len, true);
+ goto err;
+ }
+
+ if (!(hif->id & HIF_ID_IS_INDICATION)) {
+ (*is_cnf)++;
+ if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
+ release_count = le32_to_cpu(((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs);
+ else
+ release_count = 1;
+ WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
+ wdev->hif.tx_buffers_used -= release_count;
+ if (!wdev->hif.tx_buffers_used)
+ wake_up(&wdev->hif.tx_buffers_empty);
+ }
+ _trace_hif_recv(hif, wdev->hif.tx_buffers_used);
+
+ if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) {
+ if (hif->seqnum != wdev->hif.rx_seqnum)
+ dev_warn(wdev->dev, "wrong message sequence: %d != %d\n",
+ hif->seqnum, wdev->hif.rx_seqnum);
+ wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
+ }
+
+ skb_put(skb, hif->len);
+ // wfx_handle_rx takes care on SKB livetime
+ wfx_handle_rx(wdev, skb);
+
+ return piggyback;
+
+err:
+ if (skb)
+ dev_kfree_skb(skb);
+ return -EIO;
+}
+
+static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf)
+{
+ size_t len;
+ int i;
+ int ctrl_reg, piggyback;
+
+ piggyback = 0;
+ for (i = 0; i < max_msg; i++) {
+ if (piggyback & CTRL_NEXT_LEN_MASK)
+ ctrl_reg = piggyback;
+ else if (try_wait_for_completion(&wdev->hif.ctrl_ready))
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0);
+ else
+ ctrl_reg = 0;
+ if (!(ctrl_reg & CTRL_NEXT_LEN_MASK))
+ return i;
+ // ctrl_reg units are 16bits words
+ len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2;
+ piggyback = rx_helper(wdev, len, num_cnf);
+ if (piggyback < 0)
+ return i;
+ if (!(piggyback & CTRL_WLAN_READY))
+ dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n",
+ piggyback);
+ }
+ if (piggyback & CTRL_NEXT_LEN_MASK) {
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback);
+ complete(&wdev->hif.ctrl_ready);
+ if (ctrl_reg)
+ dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n",
+ ctrl_reg, piggyback);
+ }
+ return i;
+}
+
+static void tx_helper(struct wfx_dev *wdev, struct hif_msg *hif)
+{
+ int ret;
+ void *data;
+ bool is_encrypted = false;
+ size_t len = le16_to_cpu(hif->len);
+
+ WARN(len < sizeof(*hif), "try to send corrupted data");
+
+ hif->seqnum = wdev->hif.tx_seqnum;
+ wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1);
+
+ if (wfx_is_secure_command(wdev, hif->id)) {
+ len = round_up(len - sizeof(hif->len), 16) + sizeof(hif->len) +
+ sizeof(struct hif_sl_msg_hdr) +
+ sizeof(struct hif_sl_tag);
+ // AES support encryption in-place. However, mac80211 access to
+ // 802.11 header after frame was sent (to get MAC addresses).
+ // So, keep origin buffer clear.
+ data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ goto end;
+ is_encrypted = true;
+ ret = wfx_sl_encode(wdev, hif, data);
+ if (ret)
+ goto end;
+ } else {
+ data = hif;
+ }
+ WARN(len > wdev->hw_caps.size_inp_ch_buf,
+ "%s: request exceed WFx capability: %zu > %d\n", __func__,
+ len, wdev->hw_caps.size_inp_ch_buf);
+ len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len);
+ ret = wfx_data_write(wdev, data, len);
+ if (ret)
+ goto end;
+
+ wdev->hif.tx_buffers_used++;
+ _trace_hif_send(hif, wdev->hif.tx_buffers_used);
+end:
+ if (is_encrypted)
+ kfree(data);
+}
+
+static int bh_work_tx(struct wfx_dev *wdev, int max_msg)
+{
+ struct hif_msg *hif;
+ int i;
+
+ for (i = 0; i < max_msg; i++) {
+ hif = NULL;
+ if (wdev->hif.tx_buffers_used < wdev->hw_caps.num_inp_ch_bufs) {
+ if (try_wait_for_completion(&wdev->hif_cmd.ready)) {
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+ hif = wdev->hif_cmd.buf_send;
+ } else {
+ hif = wfx_tx_queues_get(wdev);
+ }
+ }
+ if (!hif)
+ return i;
+ tx_helper(wdev, hif);
+ }
+ return i;
+}
+
+/* In SDIO mode, it is necessary to make an access to a register to acknowledge
+ * last received message. It could be possible to restrict this acknowledge to
+ * SDIO mode and only if last operation was rx.
+ */
+static void ack_sdio_data(struct wfx_dev *wdev)
+{
+ u32 cfg_reg;
+
+ config_reg_read(wdev, &cfg_reg);
+ if (cfg_reg & 0xFF) {
+ dev_warn(wdev->dev, "chip reports errors: %02x\n",
+ cfg_reg & 0xFF);
+ config_reg_write_bits(wdev, 0xFF, 0x00);
+ }
+}
+
+static void bh_work(struct work_struct *work)
+{
+ struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh);
+ int stats_req = 0, stats_cnf = 0, stats_ind = 0;
+ bool release_chip = false, last_op_is_rx = false;
+ int num_tx, num_rx;
+
+ device_wakeup(wdev);
+ do {
+ num_tx = bh_work_tx(wdev, 32);
+ stats_req += num_tx;
+ if (num_tx)
+ last_op_is_rx = false;
+ num_rx = bh_work_rx(wdev, 32, &stats_cnf);
+ stats_ind += num_rx;
+ if (num_rx)
+ last_op_is_rx = true;
+ } while (num_rx || num_tx);
+ stats_ind -= stats_cnf;
+
+ if (last_op_is_rx)
+ ack_sdio_data(wdev);
+ if (!wdev->hif.tx_buffers_used && !work_pending(work) &&
+ !atomic_read(&wdev->scan_in_progress)) {
+ device_release(wdev);
+ release_chip = true;
+ }
+ _trace_bh_stats(stats_ind, stats_req, stats_cnf,
+ wdev->hif.tx_buffers_used, release_chip);
+}
+
+/*
+ * An IRQ from chip did occur
+ */
+void wfx_bh_request_rx(struct wfx_dev *wdev)
+{
+ u32 cur, prev;
+
+ control_reg_read(wdev, &cur);
+ prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
+ complete(&wdev->hif.ctrl_ready);
+ queue_work(system_highpri_wq, &wdev->hif.bh);
+
+ if (!(cur & CTRL_NEXT_LEN_MASK))
+ dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n",
+ cur);
+ if (prev != 0)
+ dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n",
+ prev, cur);
+}
+
+/*
+ * Driver want to send data
+ */
+void wfx_bh_request_tx(struct wfx_dev *wdev)
+{
+ queue_work(system_highpri_wq, &wdev->hif.bh);
+}
+
+void wfx_bh_register(struct wfx_dev *wdev)
+{
+ INIT_WORK(&wdev->hif.bh, bh_work);
+ init_completion(&wdev->hif.ctrl_ready);
+ init_waitqueue_head(&wdev->hif.tx_buffers_empty);
+}
+
+void wfx_bh_unregister(struct wfx_dev *wdev)
+{
+ flush_work(&wdev->hif.bh);
+}
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
new file mode 100644
index 000000000000..93ca98424e0b
--- /dev/null
+++ b/drivers/staging/wfx/bh.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interrupt bottom half.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BH_H
+#define WFX_BH_H
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct wfx_dev;
+
+struct wfx_hif {
+ struct work_struct bh;
+ struct completion ctrl_ready;
+ wait_queue_head_t tx_buffers_empty;
+ atomic_t ctrl_reg;
+ int rx_seqnum;
+ int tx_seqnum;
+ int tx_buffers_used;
+};
+
+void wfx_bh_register(struct wfx_dev *wdev);
+void wfx_bh_unregister(struct wfx_dev *wdev);
+void wfx_bh_request_rx(struct wfx_dev *wdev);
+void wfx_bh_request_tx(struct wfx_dev *wdev);
+
+#endif /* WFX_BH_H */
diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h
new file mode 100644
index 000000000000..62d6ecabe4cb
--- /dev/null
+++ b/drivers/staging/wfx/bus.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common bus abstraction layer.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BUS_H
+#define WFX_BUS_H
+
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+
+#define WFX_REG_CONFIG 0x0
+#define WFX_REG_CONTROL 0x1
+#define WFX_REG_IN_OUT_QUEUE 0x2
+#define WFX_REG_AHB_DPORT 0x3
+#define WFX_REG_BASE_ADDR 0x4
+#define WFX_REG_SRAM_DPORT 0x5
+#define WFX_REG_SET_GEN_R_W 0x6
+#define WFX_REG_FRAME_OUT 0x7
+
+struct hwbus_ops {
+ int (*copy_from_io)(void *bus_priv, unsigned int addr,
+ void *dst, size_t count);
+ int (*copy_to_io)(void *bus_priv, unsigned int addr,
+ const void *src, size_t count);
+ void (*lock)(void *bus_priv);
+ void (*unlock)(void *bus_priv);
+ size_t (*align_size)(void *bus_priv, size_t size);
+};
+
+extern struct sdio_driver wfx_sdio_driver;
+extern struct spi_driver wfx_spi_driver;
+
+#endif
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
new file mode 100644
index 000000000000..f8901164c206
--- /dev/null
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SDIO interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+static const struct wfx_platform_data wfx_sdio_pdata = {
+ .file_fw = "wfm_wf200",
+ .file_pds = "wf200.pds",
+};
+
+struct wfx_sdio_priv {
+ struct sdio_func *func;
+ struct wfx_dev *core;
+ u8 buf_id_tx;
+ u8 buf_id_rx;
+ int of_irq;
+};
+
+static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id,
+ void *dst, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(((uintptr_t)dst) & 3, "unaligned buffer size");
+ WARN(count & 3, "unaligned buffer address");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= (bus->buf_id_rx + 1) << 7;
+ ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_rx = (bus->buf_id_rx + 1) % 4;
+
+ return ret;
+}
+
+static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id,
+ const void *src, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(((uintptr_t)src) & 3, "unaligned buffer size");
+ WARN(count & 3, "unaligned buffer address");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= bus->buf_id_tx << 7;
+ // FIXME: discards 'const' qualifier for src
+ ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_tx = (bus->buf_id_tx + 1) % 32;
+
+ return ret;
+}
+
+static void wfx_sdio_lock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_claim_host(bus->func);
+}
+
+static void wfx_sdio_unlock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_release_host(bus->func);
+}
+
+static void wfx_sdio_irq_handler(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ if (bus->core)
+ wfx_bh_request_rx(bus->core);
+ else
+ WARN(!bus->core, "race condition in driver init/deinit");
+}
+
+static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ if (!bus->core) {
+ WARN(!bus->core, "race condition in driver init/deinit");
+ return IRQ_NONE;
+ }
+ sdio_claim_host(bus->func);
+ wfx_bh_request_rx(bus->core);
+ sdio_release_host(bus->func);
+ return IRQ_HANDLED;
+}
+
+static int wfx_sdio_irq_subscribe(struct wfx_sdio_priv *bus)
+{
+ int ret;
+
+ if (bus->of_irq) {
+ ret = request_irq(bus->of_irq, wfx_sdio_irq_handler_ext,
+ IRQF_TRIGGER_RISING, "wfx", bus);
+ } else {
+ sdio_claim_host(bus->func);
+ ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler);
+ sdio_release_host(bus->func);
+ }
+ return ret;
+}
+
+static int wfx_sdio_irq_unsubscribe(struct wfx_sdio_priv *bus)
+{
+ int ret;
+
+ if (bus->of_irq) {
+ free_irq(bus->of_irq, bus);
+ ret = 0;
+ } else {
+ sdio_claim_host(bus->func);
+ ret = sdio_release_irq(bus->func);
+ sdio_release_host(bus->func);
+ }
+ return ret;
+}
+
+static size_t wfx_sdio_align_size(void *priv, size_t size)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ return sdio_align_size(bus->func, size);
+}
+
+static const struct hwbus_ops wfx_sdio_hwbus_ops = {
+ .copy_from_io = wfx_sdio_copy_from_io,
+ .copy_to_io = wfx_sdio_copy_to_io,
+ .lock = wfx_sdio_lock,
+ .unlock = wfx_sdio_unlock,
+ .align_size = wfx_sdio_align_size,
+};
+
+static const struct of_device_id wfx_sdio_of_match[];
+static int wfx_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct device_node *np = func->dev.of_node;
+ struct wfx_sdio_priv *bus;
+ int ret;
+
+ if (func->num != 1) {
+ dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", func->num);
+ return -ENODEV;
+ }
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ if (np) {
+ if (!of_match_node(wfx_sdio_of_match, np)) {
+ dev_warn(&func->dev, "no compatible device found in DT\n");
+ return -ENODEV;
+ }
+ bus->of_irq = irq_of_parse_and_map(np, 0);
+ } else {
+ dev_warn(&func->dev,
+ "device is not declared in DT, features will be limited\n");
+ // FIXME: ignore VID/PID and only rely on device tree
+ // return -ENODEV;
+ }
+
+ bus->func = func;
+ sdio_set_drvdata(func, bus);
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0 |
+ MMC_QUIRK_BLKSZ_FOR_BYTE_MODE |
+ MMC_QUIRK_BROKEN_BYTE_MODE_512;
+
+ sdio_claim_host(func);
+ ret = sdio_enable_func(func);
+ // Block of 64 bytes is more efficient than 512B for frame sizes < 4k
+ sdio_set_block_size(func, 64);
+ sdio_release_host(func);
+ if (ret)
+ goto err0;
+
+ ret = wfx_sdio_irq_subscribe(bus);
+ if (ret)
+ goto err1;
+
+ bus->core = wfx_init_common(&func->dev, &wfx_sdio_pdata,
+ &wfx_sdio_hwbus_ops, bus);
+ if (!bus->core) {
+ ret = -EIO;
+ goto err2;
+ }
+
+ ret = wfx_probe(bus->core);
+ if (ret)
+ goto err3;
+
+ return 0;
+
+err3:
+ wfx_free_common(bus->core);
+err2:
+ wfx_sdio_irq_unsubscribe(bus);
+err1:
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+err0:
+ return ret;
+}
+
+static void wfx_sdio_remove(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ wfx_release(bus->core);
+ wfx_free_common(bus->core);
+ wfx_sdio_irq_unsubscribe(bus);
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+}
+
+#define SDIO_VENDOR_ID_SILABS 0x0000
+#define SDIO_DEVICE_ID_SILABS_WF200 0x1000
+static const struct sdio_device_id wfx_sdio_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_SILABS, SDIO_DEVICE_ID_SILABS_WF200) },
+ // FIXME: ignore VID/PID and only rely on device tree
+ // { SDIO_DEVICE(SDIO_ANY_ID, SDIO_ANY_ID) },
+ { },
+};
+MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id wfx_sdio_of_match[] = {
+ { .compatible = "silabs,wfx-sdio" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+#endif
+
+struct sdio_driver wfx_sdio_driver = {
+ .name = "wfx-sdio",
+ .id_table = wfx_sdio_ids,
+ .probe = wfx_sdio_probe,
+ .remove = wfx_sdio_remove,
+ .drv = {
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(wfx_sdio_of_match),
+ }
+};
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
new file mode 100644
index 000000000000..ab0cda1e124f
--- /dev/null
+++ b/drivers/staging/wfx/bus_spi.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, Sagrad Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+static int gpio_reset = -2;
+module_param(gpio_reset, int, 0644);
+MODULE_PARM_DESC(gpio_reset, "gpio number for reset. -1 for none.");
+
+#define SET_WRITE 0x7FFF /* usage: and operation */
+#define SET_READ 0x8000 /* usage: or operation */
+
+static const struct wfx_platform_data wfx_spi_pdata = {
+ .file_fw = "wfm_wf200",
+ .file_pds = "wf200.pds",
+ .use_rising_clk = true,
+};
+
+struct wfx_spi_priv {
+ struct spi_device *func;
+ struct wfx_dev *core;
+ struct gpio_desc *gpio_reset;
+ struct work_struct request_rx;
+ bool need_swab;
+};
+
+/*
+ * WFx chip read data 16bits at time and place them directly into (little
+ * endian) CPU register. So, chip expect byte order like "B1 B0 B3 B2" (while
+ * LE is "B0 B1 B2 B3" and BE is "B3 B2 B1 B0")
+ *
+ * A little endian host with bits_per_word == 16 should do the right job
+ * natively. The code below to support big endian host and commonly used SPI
+ * 8bits.
+ */
+static int wfx_spi_copy_from_io(void *priv, unsigned int addr,
+ void *dst, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2) | SET_READ;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .rx_buf = dst,
+ .len = count,
+ };
+ u16 *dst16 = dst;
+ int ret, i;
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+
+ cpu_to_le16s(&regaddr);
+ if (bus->need_swab)
+ swab16s(&regaddr);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&dst16[i]);
+ return ret;
+}
+
+static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
+ const void *src, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2);
+ // FIXME: use a bounce buffer
+ u16 *src16 = (void *)src;
+ int ret, i;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .tx_buf = src,
+ .len = count,
+ };
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+ WARN(regaddr & SET_READ, "bad addr or size overflow");
+
+ cpu_to_le16s(&regaddr);
+
+ if (bus->need_swab)
+ swab16s(&regaddr);
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+ return ret;
+}
+
+static void wfx_spi_lock(void *priv)
+{
+}
+
+static void wfx_spi_unlock(void *priv)
+{
+}
+
+static irqreturn_t wfx_spi_irq_handler(int irq, void *priv)
+{
+ struct wfx_spi_priv *bus = priv;
+
+ if (!bus->core) {
+ WARN(!bus->core, "race condition in driver init/deinit");
+ return IRQ_NONE;
+ }
+ queue_work(system_highpri_wq, &bus->request_rx);
+ return IRQ_HANDLED;
+}
+
+static void wfx_spi_request_rx(struct work_struct *work)
+{
+ struct wfx_spi_priv *bus =
+ container_of(work, struct wfx_spi_priv, request_rx);
+
+ wfx_bh_request_rx(bus->core);
+}
+
+static size_t wfx_spi_align_size(void *priv, size_t size)
+{
+ // Most of SPI controllers avoid DMA if buffer size is not 32bit aligned
+ return ALIGN(size, 4);
+}
+
+static const struct hwbus_ops wfx_spi_hwbus_ops = {
+ .copy_from_io = wfx_spi_copy_from_io,
+ .copy_to_io = wfx_spi_copy_to_io,
+ .lock = wfx_spi_lock,
+ .unlock = wfx_spi_unlock,
+ .align_size = wfx_spi_align_size,
+};
+
+static int wfx_spi_probe(struct spi_device *func)
+{
+ struct wfx_spi_priv *bus;
+ int ret;
+
+ if (!func->bits_per_word)
+ func->bits_per_word = 16;
+ ret = spi_setup(func);
+ if (ret)
+ return ret;
+ // Trace below is also displayed by spi_setup() if compiled with DEBUG
+ dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n",
+ func->chip_select, func->mode, func->bits_per_word,
+ func->max_speed_hz);
+ if (func->bits_per_word != 16 && func->bits_per_word != 8)
+ dev_warn(&func->dev, "unusual bits/word value: %d\n",
+ func->bits_per_word);
+ if (func->max_speed_hz > 49000000)
+ dev_warn(&func->dev, "%dHz is a very high speed\n",
+ func->max_speed_hz);
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ bus->func = func;
+ if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ bus->need_swab = true;
+ spi_set_drvdata(func, bus);
+
+ bus->gpio_reset = wfx_get_gpio(&func->dev, gpio_reset, "reset");
+ if (!bus->gpio_reset) {
+ dev_warn(&func->dev, "try to load firmware anyway\n");
+ } else {
+ gpiod_set_value(bus->gpio_reset, 0);
+ udelay(100);
+ gpiod_set_value(bus->gpio_reset, 1);
+ udelay(2000);
+ }
+
+ ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler,
+ IRQF_TRIGGER_RISING, "wfx", bus);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&bus->request_rx, wfx_spi_request_rx);
+ bus->core = wfx_init_common(&func->dev, &wfx_spi_pdata,
+ &wfx_spi_hwbus_ops, bus);
+ if (!bus->core)
+ return -EIO;
+
+ ret = wfx_probe(bus->core);
+ if (ret)
+ wfx_free_common(bus->core);
+
+ return ret;
+}
+
+/* Disconnect Function to be called by SPI stack when device is disconnected */
+static int wfx_spi_disconnect(struct spi_device *func)
+{
+ struct wfx_spi_priv *bus = spi_get_drvdata(func);
+
+ wfx_release(bus->core);
+ wfx_free_common(bus->core);
+ // A few IRQ will be sent during device release. Hopefully, no IRQ
+ // should happen after wdev/wvif are released.
+ devm_free_irq(&func->dev, func->irq, bus);
+ flush_work(&bus->request_rx);
+ return 0;
+}
+
+/*
+ * For dynamic driver binding, kernel does not use OF to match driver. It only
+ * use modalias and modalias is a copy of 'compatible' DT node with vendor
+ * stripped.
+ */
+static const struct spi_device_id wfx_spi_id[] = {
+ { "wfx-spi", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, wfx_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id wfx_spi_of_match[] = {
+ { .compatible = "silabs,wfx-spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_spi_of_match);
+#endif
+
+struct spi_driver wfx_spi_driver = {
+ .driver = {
+ .name = "wfx-spi",
+ .of_match_table = of_match_ptr(wfx_spi_of_match),
+ },
+ .id_table = wfx_spi_id,
+ .probe = wfx_spi_probe,
+ .remove = wfx_spi_disconnect,
+};
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
new file mode 100644
index 000000000000..e7fcce8d0cc4
--- /dev/null
+++ b/drivers/staging/wfx/data_rx.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "data_rx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+
+static int wfx_handle_pspoll(struct wfx_vif *wvif, struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
+ int link_id = 0;
+ u32 pspoll_mask = 0;
+ int i;
+
+ if (wvif->state != WFX_STATE_AP)
+ return 1;
+ if (!ether_addr_equal(wvif->vif->addr, pspoll->bssid))
+ return 1;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wvif->vif, pspoll->ta);
+ if (sta)
+ link_id = ((struct wfx_sta_priv *)&sta->drv_priv)->link_id;
+ rcu_read_unlock();
+ if (link_id)
+ pspoll_mask = BIT(link_id);
+ else
+ return 1;
+
+ wvif->pspoll_mask |= pspoll_mask;
+ /* Do not report pspols if data for given link id is queued already. */
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ if (wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
+ pspoll_mask)) {
+ wfx_bh_request_tx(wvif->wdev);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data;
+ size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
+ size_t iv_len, icv_len;
+
+ /* Oops... There is no fast way to ask mac80211 about
+ * IV/ICV lengths. Even defineas are not exposed.
+ */
+ switch (arg->rx_flags.encryp) {
+ case HIF_RI_FLAGS_WEP_ENCRYPTED:
+ iv_len = 4 /* WEP_IV_LEN */;
+ icv_len = 4 /* WEP_ICV_LEN */;
+ break;
+ case HIF_RI_FLAGS_TKIP_ENCRYPTED:
+ iv_len = 8 /* TKIP_IV_LEN */;
+ icv_len = 4 /* TKIP_ICV_LEN */
+ + 8 /*MICHAEL_MIC_LEN*/;
+ break;
+ case HIF_RI_FLAGS_AES_ENCRYPTED:
+ iv_len = 8 /* CCMP_HDR_LEN */;
+ icv_len = 8 /* CCMP_MIC_LEN */;
+ break;
+ case HIF_RI_FLAGS_WAPI_ENCRYPTED:
+ iv_len = 18 /* WAPI_HDR_LEN */;
+ icv_len = 16 /* WAPI_MIC_LEN */;
+ break;
+ default:
+ dev_err(wdev->dev, "unknown encryption type %d\n",
+ arg->rx_flags.encryp);
+ return -EIO;
+ }
+
+ /* Firmware strips ICV in case of MIC failure. */
+ if (arg->status == HIF_STATUS_MICFAILURE)
+ icv_len = 0;
+
+ if (skb->len < hdrlen + iv_len + icv_len) {
+ dev_warn(wdev->dev, "malformed SDU received\n");
+ return -EIO;
+ }
+
+ /* Remove IV, ICV and MIC */
+ skb_trim(skb, skb->len - icv_len);
+ memmove(skb->data + iv_len, skb->data, hdrlen);
+ skb_pull(skb, iv_len);
+ return 0;
+
+}
+
+void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
+ struct sk_buff *skb)
+{
+ int link_id = arg->rx_flags.peer_sta_id;
+ struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct wfx_link_entry *entry = NULL;
+ bool early_data = false;
+
+ memset(hdr, 0, sizeof(*hdr));
+
+ // FIXME: Why do we drop these frames?
+ if (!arg->rcpi_rssi &&
+ (ieee80211_is_probe_resp(frame->frame_control) ||
+ ieee80211_is_beacon(frame->frame_control)))
+ goto drop;
+
+ if (link_id && link_id <= WFX_MAX_STA_IN_AP_MODE) {
+ entry = &wvif->link_id_db[link_id - 1];
+ entry->timestamp = jiffies;
+ if (entry->status == WFX_LINK_SOFT &&
+ ieee80211_is_data(frame->frame_control))
+ early_data = true;
+ }
+
+ if (arg->status == HIF_STATUS_MICFAILURE)
+ hdr->flag |= RX_FLAG_MMIC_ERROR;
+ else if (arg->status)
+ goto drop;
+
+ if (skb->len < sizeof(struct ieee80211_pspoll)) {
+ dev_warn(wvif->wdev->dev, "malformed SDU received\n");
+ goto drop;
+ }
+
+ if (ieee80211_is_pspoll(frame->frame_control))
+ if (wfx_handle_pspoll(wvif, skb))
+ goto drop;
+
+ hdr->band = NL80211_BAND_2GHZ;
+ hdr->freq = ieee80211_channel_to_frequency(arg->channel_number,
+ hdr->band);
+
+ if (arg->rxed_rate >= 14) {
+ hdr->encoding = RX_ENC_HT;
+ hdr->rate_idx = arg->rxed_rate - 14;
+ } else if (arg->rxed_rate >= 4) {
+ hdr->rate_idx = arg->rxed_rate - 2;
+ } else {
+ hdr->rate_idx = arg->rxed_rate;
+ }
+
+ hdr->signal = arg->rcpi_rssi / 2 - 110;
+ hdr->antenna = 0;
+
+ if (arg->rx_flags.encryp) {
+ if (wfx_drop_encrypt_data(wvif->wdev, arg, skb))
+ goto drop;
+ hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
+ if (arg->rx_flags.encryp == HIF_RI_FLAGS_TKIP_ENCRYPTED)
+ hdr->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+
+ /* Filter block ACK negotiation: fully controlled by firmware */
+ if (ieee80211_is_action(frame->frame_control) &&
+ arg->rx_flags.match_uc_addr &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ goto drop;
+ if (ieee80211_is_beacon(frame->frame_control) &&
+ !arg->status && wvif->vif &&
+ ether_addr_equal(ieee80211_get_SA(frame),
+ wvif->vif->bss_conf.bssid)) {
+ const u8 *tim_ie;
+ u8 *ies = mgmt->u.beacon.variable;
+ size_t ies_len = skb->len - (ies - skb->data);
+
+ tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+ if (tim_ie) {
+ struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2];
+
+ if (wvif->dtim_period != tim->dtim_period) {
+ wvif->dtim_period = tim->dtim_period;
+ schedule_work(&wvif->set_beacon_wakeup_period_work);
+ }
+ }
+
+ /* Disable beacon filter once we're associated... */
+ if (wvif->disable_beacon_filter &&
+ (wvif->vif->bss_conf.assoc ||
+ wvif->vif->bss_conf.ibss_joined)) {
+ wvif->disable_beacon_filter = false;
+ schedule_work(&wvif->update_filtering_work);
+ }
+ }
+
+ if (early_data) {
+ spin_lock_bh(&wvif->ps_state_lock);
+ /* Double-check status with lock held */
+ if (entry->status == WFX_LINK_SOFT)
+ skb_queue_tail(&entry->rx_queue, skb);
+ else
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ } else {
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
+ }
+
+ return;
+
+drop:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
new file mode 100644
index 000000000000..a50ce352bc5e
--- /dev/null
+++ b/drivers/staging/wfx/data_rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_RX_H
+#define WFX_DATA_RX_H
+
+#include "hif_api_cmd.h"
+
+struct wfx_vif;
+struct sk_buff;
+
+void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
+ struct sk_buff *skb);
+
+#endif /* WFX_DATA_RX_H */
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
new file mode 100644
index 000000000000..b722e9773232
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.c
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "data_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+#include "queue.h"
+#include "debug.h"
+#include "traces.h"
+#include "hif_tx_mib.h"
+
+#define WFX_INVALID_RATE_ID (0xFF)
+#define WFX_LINK_ID_NO_ASSOC 15
+#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
+
+static int wfx_get_hw_rate(struct wfx_dev *wdev,
+ const struct ieee80211_tx_rate *rate)
+{
+ if (rate->idx < 0)
+ return -1;
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ if (rate->idx > 7) {
+ WARN(1, "wrong rate->idx value: %d", rate->idx);
+ return -1;
+ }
+ return rate->idx + 14;
+ }
+ // WFx only support 2GHz, else band information should be retrieved
+ // from ieee80211_tx_info
+ return wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates[rate->idx].hw_value;
+}
+
+/* TX policy cache implementation */
+
+static void wfx_tx_policy_build(struct wfx_vif *wvif, struct tx_policy *policy,
+ struct ieee80211_tx_rate *rates)
+{
+ int i;
+ size_t count;
+ struct wfx_dev *wdev = wvif->wdev;
+
+ WARN(rates[0].idx < 0, "invalid rate policy");
+ memset(policy, 0, sizeof(*policy));
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ if (rates[i].idx < 0)
+ break;
+ count = i;
+
+ /* HACK!!! Device has problems (at least) switching from
+ * 54Mbps CTS to 1Mbps. This switch takes enormous amount
+ * of time (100-200 ms), leading to valuable throughput drop.
+ * As a workaround, additional g-rates are injected to the
+ * policy.
+ */
+ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
+ rates[0].idx > 4 && rates[0].count > 2 &&
+ rates[1].idx < 2) {
+ int mid_rate = (rates[0].idx + 4) >> 1;
+
+ /* Decrease number of retries for the initial rate */
+ rates[0].count -= 2;
+
+ if (mid_rate != 4) {
+ /* Keep fallback rate at 1Mbps. */
+ rates[3] = rates[1];
+
+ /* Inject 1 transmission on lowest g-rate */
+ rates[2].idx = 4;
+ rates[2].count = 1;
+ rates[2].flags = rates[1].flags;
+
+ /* Inject 1 transmission on mid-rate */
+ rates[1].idx = mid_rate;
+ rates[1].count = 1;
+
+ /* Fallback to 1 Mbps is a really bad thing,
+ * so let's try to increase probability of
+ * successful transmission on the lowest g rate
+ * even more
+ */
+ if (rates[0].count >= 3) {
+ --rates[0].count;
+ ++rates[2].count;
+ }
+
+ /* Adjust amount of rates defined */
+ count += 2;
+ } else {
+ /* Keep fallback rate at 1Mbps. */
+ rates[2] = rates[1];
+
+ /* Inject 2 transmissions on lowest g-rate */
+ rates[1].idx = 4;
+ rates[1].count = 2;
+
+ /* Adjust amount of rates defined */
+ count += 1;
+ }
+ }
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+ int rateid;
+ u8 count;
+
+ if (rates[i].idx < 0)
+ break;
+ WARN_ON(rates[i].count > 15);
+ rateid = wfx_get_hw_rate(wdev, &rates[i]);
+ // Pack two values in each byte of policy->rates
+ count = rates[i].count;
+ if (rateid % 2)
+ count <<= 4;
+ policy->rates[rateid / 2] |= count;
+ }
+}
+
+static bool tx_policy_is_equal(const struct tx_policy *a,
+ const struct tx_policy *b)
+{
+ return !memcmp(a->rates, b->rates, sizeof(a->rates));
+}
+
+static int wfx_tx_policy_find(struct tx_policy_cache *cache,
+ struct tx_policy *wanted)
+{
+ struct tx_policy *it;
+
+ list_for_each_entry(it, &cache->used, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ list_for_each_entry(it, &cache->free, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ return -1;
+}
+
+static void wfx_tx_policy_use(struct tx_policy_cache *cache,
+ struct tx_policy *entry)
+{
+ ++entry->usage_count;
+ list_move(&entry->link, &cache->used);
+}
+
+static int wfx_tx_policy_release(struct tx_policy_cache *cache,
+ struct tx_policy *entry)
+{
+ int ret = --entry->usage_count;
+
+ if (!ret)
+ list_move(&entry->link, &cache->free);
+ return ret;
+}
+
+static int wfx_tx_policy_get(struct wfx_vif *wvif,
+ struct ieee80211_tx_rate *rates,
+ bool *renew)
+{
+ int idx;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct tx_policy wanted;
+
+ wfx_tx_policy_build(wvif, &wanted, rates);
+
+ spin_lock_bh(&cache->lock);
+ if (WARN_ON(list_empty(&cache->free))) {
+ spin_unlock_bh(&cache->lock);
+ return WFX_INVALID_RATE_ID;
+ }
+ idx = wfx_tx_policy_find(cache, &wanted);
+ if (idx >= 0) {
+ *renew = false;
+ } else {
+ struct tx_policy *entry;
+ *renew = true;
+ /* If policy is not found create a new one
+ * using the oldest entry in "free" list
+ */
+ entry = list_entry(cache->free.prev, struct tx_policy, link);
+ memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
+ entry->uploaded = 0;
+ entry->usage_count = 0;
+ idx = entry - cache->cache;
+ }
+ wfx_tx_policy_use(cache, &cache->cache[idx]);
+ if (list_empty(&cache->free)) {
+ /* Lock TX queues. */
+ wfx_tx_queues_lock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+ return idx;
+}
+
+static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
+{
+ int usage, locked;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+ usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
+ if (locked && !usage) {
+ /* Unlock TX queues. */
+ wfx_tx_queues_unlock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+}
+
+static int wfx_tx_policy_upload(struct wfx_vif *wvif)
+{
+ int i;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct hif_mib_set_tx_rate_retry_policy *arg =
+ kzalloc(struct_size(arg,
+ tx_rate_retry_policy,
+ HIF_MIB_NUM_TX_RATE_RETRY_POLICIES),
+ GFP_KERNEL);
+ struct hif_mib_tx_rate_retry_policy *dst;
+
+ spin_lock_bh(&cache->lock);
+ /* Upload only modified entries. */
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) {
+ struct tx_policy *src = &cache->cache[i];
+
+ if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) {
+ dst = arg->tx_rate_retry_policy +
+ arg->num_tx_rate_policies;
+
+ dst->policy_index = i;
+ dst->short_retry_count = 255;
+ dst->long_retry_count = 255;
+ dst->first_rate_sel = 1;
+ dst->terminate = 1;
+ dst->count_init = 1;
+ memcpy(&dst->rates, src->rates, sizeof(src->rates));
+ src->uploaded = 1;
+ arg->num_tx_rate_policies++;
+ }
+ }
+ spin_unlock_bh(&cache->lock);
+ hif_set_tx_rate_retry_policy(wvif, arg);
+ kfree(arg);
+ return 0;
+}
+
+static void wfx_tx_policy_upload_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, tx_policy_upload_work);
+
+ wfx_tx_policy_upload(wvif);
+
+ wfx_tx_unlock(wvif->wdev);
+ wfx_tx_queues_unlock(wvif->wdev);
+}
+
+void wfx_tx_policy_init(struct wfx_vif *wvif)
+{
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ int i;
+
+ memset(cache, 0, sizeof(*cache));
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->used);
+ INIT_LIST_HEAD(&cache->free);
+ INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
+
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ list_add(&cache->cache[i].link, &cache->free);
+}
+
+/* Link ID related functions */
+
+static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+ unsigned long max_inactivity = 0;
+ unsigned long now = jiffies;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (!wvif->link_id_db[i].status) {
+ ret = i + 1;
+ break;
+ } else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
+ !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
+ unsigned long inactivity =
+ now - wvif->link_id_db[i].timestamp;
+
+ if (inactivity < max_inactivity)
+ continue;
+ max_inactivity = inactivity;
+ ret = i + 1;
+ }
+ }
+
+ if (ret) {
+ struct wfx_link_entry *entry = &wvif->link_id_db[ret - 1];
+
+ entry->status = WFX_LINK_RESERVE;
+ ether_addr_copy(entry->mac, mac);
+ memset(&entry->buffered, 0, WFX_MAX_TID);
+ skb_queue_head_init(&entry->rx_queue);
+ wfx_tx_lock(wvif->wdev);
+
+ if (!schedule_work(&wvif->link_id_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ dev_info(wvif->wdev->dev, "no more link-id available\n");
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (ether_addr_equal(mac, wvif->link_id_db[i].mac) &&
+ wvif->link_id_db[i].status) {
+ wvif->link_id_db[i].timestamp = jiffies;
+ ret = i + 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+static int wfx_map_link(struct wfx_vif *wvif,
+ struct wfx_link_entry *link_entry, int sta_id)
+{
+ int ret;
+
+ ret = hif_map_link(wvif, link_entry->mac, 0, sta_id);
+
+ if (ret == 0)
+ /* Save the MAC address currently associated with the peer
+ * for future unmap request
+ */
+ ether_addr_copy(link_entry->old_mac, link_entry->mac);
+
+ return ret;
+}
+
+int wfx_unmap_link(struct wfx_vif *wvif, int sta_id)
+{
+ u8 *mac_addr = NULL;
+
+ if (sta_id)
+ mac_addr = wvif->link_id_db[sta_id - 1].old_mac;
+
+ return hif_map_link(wvif, mac_addr, 1, sta_id);
+}
+
+void wfx_link_id_gc_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_gc_work.work);
+ unsigned long now = jiffies;
+ unsigned long next_gc = -1;
+ long ttl;
+ u32 mask;
+ int i;
+
+ if (wvif->state != WFX_STATE_AP)
+ return;
+
+ wfx_tx_lock_flush(wvif->wdev);
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ bool need_reset = false;
+
+ mask = BIT(i + 1);
+ if (wvif->link_id_db[i].status == WFX_LINK_RESERVE ||
+ (wvif->link_id_db[i].status == WFX_LINK_HARD &&
+ !(wvif->link_id_map & mask))) {
+ if (wvif->link_id_map & mask) {
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ need_reset = true;
+ }
+ wvif->link_id_map |= mask;
+ if (wvif->link_id_db[i].status != WFX_LINK_HARD)
+ wvif->link_id_db[i].status = WFX_LINK_SOFT;
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (need_reset)
+ wfx_unmap_link(wvif, i + 1);
+ wfx_map_link(wvif, &wvif->link_id_db[i], i + 1);
+ next_gc = min(next_gc, WFX_LINK_ID_GC_TIMEOUT);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else if (wvif->link_id_db[i].status == WFX_LINK_SOFT) {
+ ttl = wvif->link_id_db[i].timestamp - now +
+ WFX_LINK_ID_GC_TIMEOUT;
+ if (ttl <= 0) {
+ need_reset = true;
+ wvif->link_id_db[i].status = WFX_LINK_OFF;
+ wvif->link_id_map &= ~mask;
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ spin_unlock_bh(&wvif->ps_state_lock);
+ wfx_unmap_link(wvif, i + 1);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else {
+ next_gc = min_t(unsigned long, next_gc, ttl);
+ }
+ }
+ if (need_reset)
+ skb_queue_purge(&wvif->link_id_db[i].rx_queue);
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (next_gc != -1)
+ schedule_delayed_work(&wvif->link_id_gc_work, next_gc);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+void wfx_link_id_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_work);
+
+ wfx_tx_flush(wvif->wdev);
+ wfx_link_id_gc_work(&wvif->link_id_gc_work.work);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+/* Tx implementation */
+
+static bool ieee80211_is_action_back(struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
+ return false;
+ return true;
+}
+
+static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
+ struct wfx_tx_priv *tx_priv,
+ struct ieee80211_sta *sta)
+{
+ u32 mask = ~BIT(tx_priv->raw_link_id);
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ if (ieee80211_is_auth(hdr->frame_control)) {
+ wvif->sta_asleep_mask &= mask;
+ wvif->pspoll_mask &= mask;
+ }
+
+ if (tx_priv->link_id == WFX_LINK_ID_AFTER_DTIM &&
+ !wvif->mcast_buffered) {
+ wvif->mcast_buffered = true;
+ if (wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_start_work);
+ }
+
+ if (tx_priv->raw_link_id) {
+ wvif->link_id_db[tx_priv->raw_link_id - 1].timestamp = jiffies;
+ if (tx_priv->tid < WFX_MAX_TID)
+ wvif->link_id_db[tx_priv->raw_link_id - 1].buffered[tx_priv->tid]++;
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tx_priv->tid, true);
+}
+
+static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr)
+{
+ struct wfx_sta_priv *sta_priv =
+ sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
+ const u8 *da = ieee80211_get_DA(hdr);
+ int ret;
+
+ if (sta_priv && sta_priv->link_id)
+ return sta_priv->link_id;
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+ if (is_multicast_ether_addr(da))
+ return 0;
+ ret = wfx_find_link_id(wvif, da);
+ if (!ret)
+ ret = wfx_alloc_link_id(wvif, da);
+ if (!ret) {
+ dev_err(wvif->wdev->dev, "no more link-id available\n");
+ return WFX_LINK_ID_NO_ASSOC;
+ }
+ return ret;
+}
+
+static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+{
+ int i;
+ bool finished;
+
+ // Firmware is not able to mix rates with differents flags
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+ rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+ }
+
+ // Sort rates and remove duplicates
+ do {
+ finished = true;
+ for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+ if (rates[i + 1].idx == rates[i].idx &&
+ rates[i].idx != -1) {
+ rates[i].count =
+ max_t(int, rates[i].count,
+ rates[i + 1].count);
+ rates[i + 1].idx = -1;
+ rates[i + 1].count = 0;
+
+ finished = false;
+ }
+ if (rates[i + 1].idx > rates[i].idx) {
+ swap(rates[i + 1], rates[i]);
+ finished = false;
+ }
+ }
+ } while (!finished);
+ // All retries use long GI
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+}
+
+static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
+ struct ieee80211_tx_info *tx_info)
+{
+ bool tx_policy_renew = false;
+ u8 rate_id;
+
+ rate_id = wfx_tx_policy_get(wvif,
+ tx_info->driver_rates, &tx_policy_renew);
+ WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy");
+
+ if (tx_policy_renew) {
+ /* FIXME: It's not so optimal to stop TX queues every now and
+ * then. Better to reimplement task scheduling with a counter.
+ */
+ wfx_tx_lock(wvif->wdev);
+ wfx_tx_queues_lock(wvif->wdev);
+ if (!schedule_work(&wvif->tx_policy_upload_work)) {
+ wfx_tx_queues_unlock(wvif->wdev);
+ wfx_tx_unlock(wvif->wdev);
+ }
+ }
+ return rate_id;
+}
+
+static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info)
+{
+ struct ieee80211_tx_rate *rate = &tx_info->driver_rates[0];
+ struct hif_ht_tx_parameters ret = { };
+
+ if (!(rate->flags & IEEE80211_TX_RC_MCS))
+ ret.frame_format = HIF_FRAME_FORMAT_NON_HT;
+ else if (!(rate->flags & IEEE80211_TX_RC_GREEN_FIELD))
+ ret.frame_format = HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
+ else
+ ret.frame_format = HIF_FRAME_FORMAT_GF_HT_11N;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ ret.short_gi = 1;
+ if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+ ret.stbc = 0; // FIXME: Not yet supported by firmware?
+ return ret;
+}
+
+static u8 wfx_tx_get_tid(struct ieee80211_hdr *hdr)
+{
+ // FIXME: ieee80211_get_tid(hdr) should be sufficient for all cases.
+ if (!ieee80211_is_data(hdr->frame_control))
+ return WFX_MAX_TID;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ return ieee80211_get_tid(hdr);
+ else
+ return 0;
+}
+
+static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
+{
+ int mic_space;
+
+ if (!hw_key)
+ return 0;
+ mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
+ return hw_key->icv_len + mic_space;
+}
+
+static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct hif_msg *hif_msg;
+ struct hif_req_tx *req;
+ struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int queue_id = tx_info->hw_queue;
+ size_t offset = (size_t) skb->data & 3;
+ int wmsg_len = sizeof(struct hif_msg) +
+ sizeof(struct hif_req_tx) + offset;
+
+ WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
+ wfx_tx_fixup_rates(tx_info->driver_rates);
+
+ // From now tx_info->control is unusable
+ memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
+ // Fill tx_priv
+ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
+ tx_priv->tid = wfx_tx_get_tid(hdr);
+ tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
+ tx_priv->link_id = tx_priv->raw_link_id;
+ if (ieee80211_has_protected(hdr->frame_control))
+ tx_priv->hw_key = hw_key;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ tx_priv->link_id = WFX_LINK_ID_AFTER_DTIM;
+ if (sta && (sta->uapsd_queues & BIT(queue_id)))
+ tx_priv->link_id = WFX_LINK_ID_UAPSD;
+
+ // Fill hif_msg
+ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
+ WARN(offset & 1, "attempt to transmit an unaligned frame");
+ skb_put(skb, wfx_tx_get_icv_len(tx_priv->hw_key));
+ skb_push(skb, wmsg_len);
+ memset(skb->data, 0, wmsg_len);
+ hif_msg = (struct hif_msg *)skb->data;
+ hif_msg->len = cpu_to_le16(skb->len);
+ hif_msg->id = HIF_REQ_ID_TX;
+ hif_msg->interface = wvif->id;
+ if (skb->len > wvif->wdev->hw_caps.size_inp_ch_buf) {
+ dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n",
+ skb->len, wvif->wdev->hw_caps.size_inp_ch_buf);
+ skb_pull(skb, wmsg_len);
+ return -EIO;
+ }
+
+ // Fill tx request
+ req = (struct hif_req_tx *)hif_msg->body;
+ req->packet_id = queue_id << 16 |
+ IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ req->data_flags.fc_offset = offset;
+ req->queue_id.peer_sta_id = tx_priv->raw_link_id;
+ // Queue index are inverted between firmware and Linux
+ req->queue_id.queue_id = 3 - queue_id;
+ req->ht_tx_parameters = wfx_tx_get_tx_parms(wvif->wdev, tx_info);
+ req->tx_flags.retry_policy_index = wfx_tx_get_rate_id(wvif, tx_info);
+
+ // Auxiliary operations
+ wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
+ wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ wfx_bh_request_tx(wvif->wdev);
+ return 0;
+}
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+ struct ieee80211_sta *sta = control ? control->sta : NULL;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info,
+ rate_driver_data);
+
+ compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
+ "struct tx_priv is too large");
+ WARN(skb->next || skb->prev, "skb is already member of a list");
+ // control.vif can be NULL for injected frames
+ if (tx_info->control.vif)
+ wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
+ else
+ wvif = wvif_iterate(wdev, NULL);
+ if (WARN_ON(!wvif))
+ goto drop;
+ // FIXME: why?
+ if (ieee80211_is_action_back(hdr)) {
+ dev_info(wdev->dev, "drop BA action\n");
+ goto drop;
+ }
+ if (wfx_tx_inner(wvif, sta, skb))
+ goto drop;
+
+ return;
+
+drop:
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
+
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
+{
+ int i;
+ int tx_count;
+ struct sk_buff *skb;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_tx_info *tx_info;
+ const struct wfx_tx_priv *tx_priv;
+
+ skb = wfx_pending_get(wvif->wdev, arg->packet_id);
+ if (!skb) {
+ dev_warn(wvif->wdev->dev,
+ "received unknown packet_id (%#.8x) from chip\n",
+ arg->packet_id);
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
+ _trace_tx_stats(arg, skb,
+ wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+
+ // You can touch to tx_priv, but don't touch to tx_info->status.
+ tx_count = arg->ack_failures;
+ if (!arg->status || arg->ack_failures)
+ tx_count += 1; // Also report success
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ rate = &tx_info->status.rates[i];
+ if (rate->idx < 0)
+ break;
+ if (tx_count < rate->count && arg->status && arg->ack_failures)
+ dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
+ rate->count, tx_count);
+ if (tx_count <= rate->count && tx_count &&
+ arg->txed_rate != wfx_get_hw_rate(wvif->wdev, rate))
+ dev_dbg(wvif->wdev->dev,
+ "inconsistent tx_info rates: %d != %d\n",
+ arg->txed_rate,
+ wfx_get_hw_rate(wvif->wdev, rate));
+ if (tx_count > rate->count) {
+ tx_count -= rate->count;
+ } else if (!tx_count) {
+ rate->count = 0;
+ rate->idx = -1;
+ } else {
+ rate->count = tx_count;
+ tx_count = 0;
+ }
+ }
+ if (tx_count)
+ dev_dbg(wvif->wdev->dev,
+ "%d more retries than expected\n", tx_count);
+ skb_trim(skb, skb->len - wfx_tx_get_icv_len(tx_priv->hw_key));
+
+ // From now, you can touch to tx_info->status, but do not touch to
+ // tx_priv anymore
+ // FIXME: use ieee80211_tx_info_clear_status()
+ memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
+ memset(tx_info->pad, 0, sizeof(tx_info->pad));
+
+ if (!arg->status) {
+ if (wvif->bss_loss_state &&
+ arg->packet_id == wvif->bss_loss_confirm_id)
+ wfx_cqm_bssloss_sm(wvif, 0, 1, 0);
+ tx_info->status.tx_time =
+ arg->media_delay - arg->tx_queue_delay;
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ } else if (arg->status == HIF_REQUEUE) {
+ /* "REQUEUE" means "implicit suspend" */
+ struct hif_ind_suspend_resume_tx suspend = {
+ .suspend_resume_flags.resume = 0,
+ .suspend_resume_flags.bc_mc_only = 1,
+ };
+
+ WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
+ wfx_suspend_resume(wvif, &suspend);
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ } else {
+ if (wvif->bss_loss_state &&
+ arg->packet_id == wvif->bss_loss_confirm_id)
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 1);
+ }
+ wfx_pending_remove(wvif->wdev, skb);
+}
+
+static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb,
+ struct hif_req_tx *req)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int tid = wfx_tx_get_tid(hdr);
+ int raw_link_id = req->queue_id.peer_sta_id;
+ u8 *buffered;
+
+ if (raw_link_id && tid < WFX_MAX_TID) {
+ buffered = wvif->link_id_db[raw_link_id - 1].buffered;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ WARN(!buffered[tid], "inconsistent notification");
+ buffered[tid]--;
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (!buffered[tid]) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tid, false);
+ rcu_read_unlock();
+ }
+ }
+}
+
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ unsigned int offset = sizeof(struct hif_req_tx) +
+ sizeof(struct hif_msg) +
+ req->data_flags.fc_offset;
+
+ WARN_ON(!wvif);
+ skb_pull(skb, offset);
+ wfx_notify_buffered_tx(wvif, skb, req);
+ wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
new file mode 100644
index 000000000000..29faa5640516
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_TX_H
+#define WFX_DATA_TX_H
+
+#include <linux/list.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+// FIXME: use IEEE80211_NUM_TIDS
+#define WFX_MAX_TID 8
+
+struct wfx_tx_priv;
+struct wfx_dev;
+struct wfx_vif;
+
+enum wfx_link_status {
+ WFX_LINK_OFF,
+ WFX_LINK_RESERVE,
+ WFX_LINK_SOFT,
+ WFX_LINK_HARD,
+};
+
+struct wfx_link_entry {
+ unsigned long timestamp;
+ enum wfx_link_status status;
+ u8 mac[ETH_ALEN];
+ u8 old_mac[ETH_ALEN];
+ u8 buffered[WFX_MAX_TID];
+ struct sk_buff_head rx_queue;
+};
+
+struct tx_policy {
+ struct list_head link;
+ u8 rates[12];
+ u8 usage_count;
+ u8 uploaded;
+};
+
+struct tx_policy_cache {
+ struct tx_policy cache[HIF_MIB_NUM_TX_RATE_RETRY_POLICIES];
+ // FIXME: use a trees and drop hash from tx_policy
+ struct list_head used;
+ struct list_head free;
+ spinlock_t lock;
+};
+
+struct wfx_tx_priv {
+ ktime_t xmit_timestamp;
+ struct ieee80211_key_conf *hw_key;
+ u8 link_id;
+ u8 raw_link_id;
+ u8 tid;
+} __packed;
+
+void wfx_tx_policy_init(struct wfx_vif *wvif);
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg);
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
+
+int wfx_unmap_link(struct wfx_vif *wvif, int link_id);
+void wfx_link_id_work(struct work_struct *work);
+void wfx_link_id_gc_work(struct work_struct *work);
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac);
+
+static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return NULL;
+ tx_info = IEEE80211_SKB_CB(skb);
+ return (struct wfx_tx_priv *)tx_info->rate_driver_data;
+}
+
+static inline struct hif_req_tx *wfx_skb_txreq(struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *) hif->body;
+
+ return req;
+}
+
+#endif /* WFX_DATA_TX_H */
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
new file mode 100644
index 000000000000..d17a75242365
--- /dev/null
+++ b/drivers/staging/wfx/debug.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+
+#include "debug.h"
+#include "wfx.h"
+#include "sta.h"
+#include "main.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define CREATE_TRACE_POINTS
+#include "traces.h"
+
+static const struct trace_print_flags hif_msg_print_map[] = {
+ hif_msg_list,
+};
+
+static const struct trace_print_flags hif_mib_print_map[] = {
+ hif_mib_list,
+};
+
+static const struct trace_print_flags wfx_reg_print_map[] = {
+ wfx_reg_list,
+};
+
+static const char *get_symbol(unsigned long val,
+ const struct trace_print_flags *symbol_array)
+{
+ int i;
+
+ for (i = 0; symbol_array[i].mask != -1; i++) {
+ if (val == symbol_array[i].mask)
+ return symbol_array[i].name;
+ }
+
+ return "unknown";
+}
+
+const char *get_hif_name(unsigned long id)
+{
+ return get_symbol(id, hif_msg_print_map);
+}
+
+const char *get_mib_name(unsigned long id)
+{
+ return get_symbol(id, hif_mib_print_map);
+}
+
+const char *get_reg_name(unsigned long id)
+{
+ return get_symbol(id, wfx_reg_print_map);
+}
+
+static int wfx_counters_show(struct seq_file *seq, void *v)
+{
+ int ret;
+ struct wfx_dev *wdev = seq->private;
+ struct hif_mib_extended_count_table counters;
+
+ ret = hif_get_counters_table(wdev, &counters);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -EIO;
+
+#define PUT_COUNTER(name) \
+ seq_printf(seq, "%24s %d\n", #name ":",\
+ le32_to_cpu(counters.count_##name))
+
+ PUT_COUNTER(tx_packets);
+ PUT_COUNTER(tx_multicast_frames);
+ PUT_COUNTER(tx_frames_success);
+ PUT_COUNTER(tx_frame_failures);
+ PUT_COUNTER(tx_frames_retried);
+ PUT_COUNTER(tx_frames_multi_retried);
+
+ PUT_COUNTER(rts_success);
+ PUT_COUNTER(rts_failures);
+ PUT_COUNTER(ack_failures);
+
+ PUT_COUNTER(rx_packets);
+ PUT_COUNTER(rx_frames_success);
+ PUT_COUNTER(rx_packet_errors);
+ PUT_COUNTER(plcp_errors);
+ PUT_COUNTER(fcs_errors);
+ PUT_COUNTER(rx_decryption_failures);
+ PUT_COUNTER(rx_mic_failures);
+ PUT_COUNTER(rx_no_key_failures);
+ PUT_COUNTER(rx_frame_duplicates);
+ PUT_COUNTER(rx_multicast_frames);
+ PUT_COUNTER(rx_cmacicv_errors);
+ PUT_COUNTER(rx_cmac_replays);
+ PUT_COUNTER(rx_mgmt_ccmp_replays);
+
+ PUT_COUNTER(rx_beacon);
+ PUT_COUNTER(miss_beacon);
+
+#undef PUT_COUNTER
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_counters);
+
+static const char * const channel_names[] = {
+ [0] = "1M",
+ [1] = "2M",
+ [2] = "5.5M",
+ [3] = "11M",
+ /* Entries 4 and 5 does not exist */
+ [6] = "6M",
+ [7] = "9M",
+ [8] = "12M",
+ [9] = "18M",
+ [10] = "24M",
+ [11] = "36M",
+ [12] = "48M",
+ [13] = "54M",
+ [14] = "MCS0",
+ [15] = "MCS1",
+ [16] = "MCS2",
+ [17] = "MCS3",
+ [18] = "MCS4",
+ [19] = "MCS5",
+ [20] = "MCS6",
+ [21] = "MCS7",
+};
+
+static int wfx_rx_stats_show(struct seq_file *seq, void *v)
+{
+ struct wfx_dev *wdev = seq->private;
+ struct hif_rx_stats *st = &wdev->rx_stats;
+ int i;
+
+ mutex_lock(&wdev->rx_stats_lock);
+ seq_printf(seq, "Timestamp: %dus\n", st->date);
+ seq_printf(seq, "Low power clock: frequency %uHz, external %s\n",
+ st->pwr_clk_freq,
+ st->is_ext_pwr_clk ? "yes" : "no");
+ seq_printf(seq,
+ "N. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
+ st->nb_rx_frame, st->per_total, st->throughput);
+ seq_puts(seq, " Num. of PER RSSI SNR CFO\n");
+ seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n");
+ for (i = 0; i < ARRAY_SIZE(channel_names); i++) {
+ if (channel_names[i])
+ seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n",
+ channel_names[i], st->nb_rx_by_rate[i],
+ st->per[i], st->rssi[i] / 100,
+ st->snr[i] / 100, st->cfo[i]);
+ }
+ mutex_unlock(&wdev->rx_stats_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats);
+
+static ssize_t wfx_send_pds_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wfx_dev *wdev = file->private_data;
+ char *buf;
+ int ret;
+
+ if (*ppos != 0) {
+ dev_dbg(wdev->dev, "PDS data must be written in one transaction");
+ return -EBUSY;
+ }
+ buf = memdup_user(user_buf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+ *ppos = *ppos + count;
+ ret = wfx_send_pds(wdev, buf, count);
+ kfree(buf);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static const struct file_operations wfx_send_pds_fops = {
+ .open = simple_open,
+ .write = wfx_send_pds_write,
+};
+
+static ssize_t wfx_burn_slk_key_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wfx_dev *wdev = file->private_data;
+
+ dev_info(wdev->dev, "this driver does not support secure link\n");
+ return -EINVAL;
+}
+
+static const struct file_operations wfx_burn_slk_key_fops = {
+ .open = simple_open,
+ .write = wfx_burn_slk_key_write,
+};
+
+struct dbgfs_hif_msg {
+ struct wfx_dev *wdev;
+ struct completion complete;
+ u8 reply[1024];
+ int ret;
+};
+
+static ssize_t wfx_send_hif_msg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ struct wfx_dev *wdev = context->wdev;
+ struct hif_msg *request;
+
+ if (completion_done(&context->complete)) {
+ dev_dbg(wdev->dev, "read previous result before start a new one\n");
+ return -EBUSY;
+ }
+ if (count < sizeof(struct hif_msg))
+ return -EINVAL;
+
+ // wfx_cmd_send() chekc that reply buffer is wide enough, but do not
+ // return precise length read. User have to know how many bytes should
+ // be read. Filling reply buffer with a memory pattern may help user.
+ memset(context->reply, 0xFF, sizeof(context->reply));
+ request = memdup_user(user_buf, count);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ if (request->len != count) {
+ kfree(request);
+ return -EINVAL;
+ }
+ context->ret = wfx_cmd_send(wdev, request, context->reply,
+ sizeof(context->reply), false);
+
+ kfree(request);
+ complete(&context->complete);
+ return count;
+}
+
+static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ int ret;
+
+ if (count > sizeof(context->reply))
+ return -EINVAL;
+ ret = wait_for_completion_interruptible(&context->complete);
+ if (ret)
+ return ret;
+ if (context->ret < 0)
+ return context->ret;
+ // Be carefull, write() is waiting for a full message while read()
+ // only return a payload
+ if (copy_to_user(user_buf, context->reply, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int wfx_send_hif_msg_open(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL);
+
+ if (!context)
+ return -ENOMEM;
+ context->wdev = inode->i_private;
+ init_completion(&context->complete);
+ file->private_data = context;
+ return 0;
+}
+
+static int wfx_send_hif_msg_release(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+
+ kfree(context);
+ return 0;
+}
+
+static const struct file_operations wfx_send_hif_msg_fops = {
+ .open = wfx_send_hif_msg_open,
+ .release = wfx_send_hif_msg_release,
+ .write = wfx_send_hif_msg_write,
+ .read = wfx_send_hif_msg_read,
+};
+
+int wfx_debug_init(struct wfx_dev *wdev)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir);
+ debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops);
+ debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops);
+ debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
+ debugfs_create_file("burn_slk_key", 0200, d, wdev,
+ &wfx_burn_slk_key_fops);
+ debugfs_create_file("send_hif_msg", 0600, d, wdev,
+ &wfx_send_hif_msg_fops);
+
+ return 0;
+}
diff --git a/drivers/staging/wfx/debug.h b/drivers/staging/wfx/debug.h
new file mode 100644
index 000000000000..6f2f84d64c9e
--- /dev/null
+++ b/drivers/staging/wfx/debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, ST-Ericsson
+ */
+#ifndef WFX_DEBUG_H
+#define WFX_DEBUG_H
+
+struct wfx_dev;
+
+int wfx_debug_init(struct wfx_dev *wdev);
+
+const char *get_hif_name(unsigned long id);
+const char *get_mib_name(unsigned long id);
+const char *get_reg_name(unsigned long id);
+
+#endif /* WFX_DEBUG_H */
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
new file mode 100644
index 000000000000..dbf8bda71ff7
--- /dev/null
+++ b/drivers/staging/wfx/fwio.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitfield.h>
+
+#include "fwio.h"
+#include "wfx.h"
+#include "hwio.h"
+
+// Addresses below are in SRAM area
+#define WFX_DNLD_FIFO 0x09004000
+#define DNLD_BLOCK_SIZE 0x0400
+#define DNLD_FIFO_SIZE 0x8000 // (32 * DNLD_BLOCK_SIZE)
+// Download Control Area (DCA)
+#define WFX_DCA_IMAGE_SIZE 0x0900C000
+#define WFX_DCA_PUT 0x0900C004
+#define WFX_DCA_GET 0x0900C008
+#define WFX_DCA_HOST_STATUS 0x0900C00C
+#define HOST_READY 0x87654321
+#define HOST_INFO_READ 0xA753BD99
+#define HOST_UPLOAD_PENDING 0xABCDDCBA
+#define HOST_UPLOAD_COMPLETE 0xD4C64A99
+#define HOST_OK_TO_JUMP 0x174FC882
+#define WFX_DCA_NCP_STATUS 0x0900C010
+#define NCP_NOT_READY 0x12345678
+#define NCP_READY 0x87654321
+#define NCP_INFO_READY 0xBD53EF99
+#define NCP_DOWNLOAD_PENDING 0xABCDDCBA
+#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA
+#define NCP_AUTH_OK 0xD4C64A99
+#define NCP_AUTH_FAIL 0x174FC882
+#define NCP_PUB_KEY_RDY 0x7AB41D19
+#define WFX_DCA_FW_SIGNATURE 0x0900C014
+#define FW_SIGNATURE_SIZE 0x40
+#define WFX_DCA_FW_HASH 0x0900C054
+#define FW_HASH_SIZE 0x08
+#define WFX_DCA_FW_VERSION 0x0900C05C
+#define FW_VERSION_SIZE 0x04
+#define WFX_DCA_RESERVED 0x0900C060
+#define DCA_RESERVED_SIZE 0x20
+#define WFX_STATUS_INFO 0x0900C080
+#define WFX_BOOTLOADER_LABEL 0x0900C084
+#define BOOTLOADER_LABEL_SIZE 0x3C
+#define WFX_PTE_INFO 0x0900C0C0
+#define PTE_INFO_KEYSET_IDX 0x0D
+#define PTE_INFO_SIZE 0x10
+#define WFX_ERR_INFO 0x0900C0D0
+#define ERR_INVALID_SEC_TYPE 0x05
+#define ERR_SIG_VERIF_FAILED 0x0F
+#define ERR_AES_CTRL_KEY 0x10
+#define ERR_ECC_PUB_KEY 0x11
+#define ERR_MAC_KEY 0x18
+
+#define DCA_TIMEOUT 50 // milliseconds
+#define WAKEUP_TIMEOUT 200 // milliseconds
+
+static const char * const fwio_error_strings[] = {
+ [ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
+ [ERR_SIG_VERIF_FAILED] = "Signature verification failed",
+ [ERR_AES_CTRL_KEY] = "AES control key not initialized",
+ [ERR_ECC_PUB_KEY] = "ECC public key not initialized",
+ [ERR_MAC_KEY] = "MAC key not initialized",
+};
+
+/*
+ * request_firmware() allocate data using vmalloc(). It is not compatible with
+ * underlying hardware that use DMA. Function below detect this case and
+ * allocate a bounce buffer if necessary.
+ *
+ * Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to
+ * detect this problem at runtime (else, kernel silently fail).
+ *
+ * NOTE: it may also be possible to use 'pages' from struct firmware and avoid
+ * bounce buffer
+ */
+static int sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf,
+ size_t len)
+{
+ int ret;
+ const u8 *tmp;
+
+ if (!virt_addr_valid(buf)) {
+ tmp = kmemdup(buf, len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ } else {
+ tmp = buf;
+ }
+ ret = sram_buf_write(wdev, addr, tmp, len);
+ if (!virt_addr_valid(buf))
+ kfree(tmp);
+ return ret;
+}
+
+int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
+ const struct firmware **fw, int *file_offset)
+{
+ int keyset_file;
+ char filename[256];
+ const char *data;
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s_%02X.sec", wdev->pdata.file_fw,
+ keyset_chip);
+ ret = firmware_request_nowarn(fw, filename, wdev->dev);
+ if (ret) {
+ dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n",
+ filename, wdev->pdata.file_fw);
+ snprintf(filename, sizeof(filename), "%s.sec",
+ wdev->pdata.file_fw);
+ ret = request_firmware(fw, filename, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load %s\n", filename);
+ *fw = NULL;
+ return ret;
+ }
+ }
+
+ data = (*fw)->data;
+ if (memcmp(data, "KEYSET", 6) != 0) {
+ // Legacy firmware format
+ *file_offset = 0;
+ keyset_file = 0x90;
+ } else {
+ *file_offset = 8;
+ keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]);
+ if (keyset_file < 0) {
+ dev_err(wdev->dev, "%s corrupted\n", filename);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -EINVAL;
+ }
+ }
+ if (keyset_file != keyset_chip) {
+ dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n",
+ keyset_file, keyset_chip);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -ENODEV;
+ }
+ wdev->keyset = keyset_file;
+ return 0;
+}
+
+static int wait_ncp_status(struct wfx_dev *wdev, u32 status)
+{
+ ktime_t now, start;
+ u32 reg;
+ int ret;
+
+ start = ktime_get();
+ for (;;) {
+ ret = sram_reg_read(wdev, WFX_DCA_NCP_STATUS, &reg);
+ if (ret < 0)
+ return -EIO;
+ now = ktime_get();
+ if (reg == status)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "chip answer after %lldus\n",
+ ktime_us_delta(now, start));
+ else
+ dev_dbg(wdev->dev, "chip answer immediately\n");
+ return 0;
+}
+
+static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
+{
+ int ret;
+ u32 offs, bytes_done;
+ ktime_t now, start;
+
+ if (len % DNLD_BLOCK_SIZE) {
+ dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n");
+ return -EIO;
+ }
+ offs = 0;
+ while (offs < len) {
+ start = ktime_get();
+ for (;;) {
+ ret = sram_reg_read(wdev, WFX_DCA_GET, &bytes_done);
+ if (ret < 0)
+ return ret;
+ now = ktime_get();
+ if (offs +
+ DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "answer after %lldus\n",
+ ktime_us_delta(now, start));
+
+ ret = sram_write_dma_safe(wdev, WFX_DNLD_FIFO +
+ (offs % DNLD_FIFO_SIZE),
+ data + offs, DNLD_BLOCK_SIZE);
+ if (ret < 0)
+ return ret;
+
+ // WFx seems to not support writing 0 in this register during
+ // first loop
+ offs += DNLD_BLOCK_SIZE;
+ ret = sram_reg_write(wdev, WFX_DCA_PUT, offs);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void print_boot_status(struct wfx_dev *wdev)
+{
+ u32 val32;
+
+ sram_reg_read(wdev, WFX_STATUS_INFO, &val32);
+ if (val32 == 0x12345678) {
+ dev_info(wdev->dev, "no error reported by secure boot\n");
+ } else {
+ sram_reg_read(wdev, WFX_ERR_INFO, &val32);
+ if (val32 < ARRAY_SIZE(fwio_error_strings) &&
+ fwio_error_strings[val32])
+ dev_info(wdev->dev, "secure boot error: %s\n",
+ fwio_error_strings[val32]);
+ else
+ dev_info(wdev->dev,
+ "secure boot error: Unknown (0x%02x)\n",
+ val32);
+ }
+}
+
+static int load_firmware_secure(struct wfx_dev *wdev)
+{
+ const struct firmware *fw = NULL;
+ int header_size;
+ int fw_offset;
+ ktime_t start;
+ u8 *buf;
+ int ret;
+
+ BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE);
+ buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY);
+ ret = wait_ncp_status(wdev, NCP_INFO_READY);
+ if (ret)
+ goto error;
+
+ sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE);
+ buf[BOOTLOADER_LABEL_SIZE] = 0;
+ dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf);
+
+ sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE);
+ ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset);
+ if (ret)
+ goto error;
+ header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE;
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ);
+ ret = wait_ncp_status(wdev, NCP_READY);
+ if (ret)
+ goto error;
+
+ sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); // Fifo init
+ sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00",
+ FW_VERSION_SIZE);
+ sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset,
+ FW_SIGNATURE_SIZE);
+ sram_write_dma_safe(wdev, WFX_DCA_FW_HASH,
+ fw->data + fw_offset + FW_SIGNATURE_SIZE,
+ FW_HASH_SIZE);
+ sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size);
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING);
+ ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING);
+ if (ret)
+ goto error;
+
+ start = ktime_get();
+ ret = upload_firmware(wdev, fw->data + header_size,
+ fw->size - header_size);
+ if (ret)
+ goto error;
+ dev_dbg(wdev->dev, "firmware load after %lldus\n",
+ ktime_us_delta(ktime_get(), start));
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE);
+ ret = wait_ncp_status(wdev, NCP_AUTH_OK);
+ // Legacy ROM support
+ if (ret < 0)
+ ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY);
+ if (ret < 0)
+ goto error;
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP);
+
+error:
+ kfree(buf);
+ if (fw)
+ release_firmware(fw);
+ if (ret)
+ print_boot_status(wdev);
+ return ret;
+}
+
+static int init_gpr(struct wfx_dev *wdev)
+{
+ int ret, i;
+ static const struct {
+ int index;
+ u32 value;
+ } gpr_init[] = {
+ { 0x07, 0x208775 },
+ { 0x08, 0x2EC020 },
+ { 0x09, 0x3C3C3C },
+ { 0x0B, 0x322C44 },
+ { 0x0C, 0xA06497 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(gpr_init); i++) {
+ ret = igpr_reg_write(wdev, gpr_init[i].index,
+ gpr_init[i].value);
+ if (ret < 0)
+ return ret;
+ dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index,
+ gpr_init[i].value);
+ }
+ return 0;
+}
+
+int wfx_init_device(struct wfx_dev *wdev)
+{
+ int ret;
+ int hw_revision, hw_type;
+ int wakeup_timeout = 50; // ms
+ ktime_t now, start;
+ u32 reg;
+
+ reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_WORD_MODE2;
+ if (wdev->pdata.use_rising_clk)
+ reg |= CFG_CLK_RISE_EDGE;
+ ret = config_reg_write(wdev, reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n");
+ return -EIO;
+ }
+
+ ret = config_reg_read(wdev, &reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n");
+ return -EIO;
+ }
+ if (reg == 0 || reg == ~0) {
+ dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n");
+ return -EIO;
+ }
+ dev_dbg(wdev->dev, "initial config register value: %08x\n", reg);
+
+ hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg);
+ if (hw_revision == 0 || hw_revision > 2) {
+ dev_err(wdev->dev, "bad hardware revision number: %d\n",
+ hw_revision);
+ return -ENODEV;
+ }
+ hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg);
+ if (hw_type == 1) {
+ dev_notice(wdev->dev, "development hardware detected\n");
+ wakeup_timeout = 2000;
+ }
+
+ ret = init_gpr(wdev);
+ if (ret < 0)
+ return ret;
+
+ ret = control_reg_write(wdev, CTRL_WLAN_WAKEUP);
+ if (ret < 0)
+ return -EIO;
+ start = ktime_get();
+ for (;;) {
+ ret = control_reg_read(wdev, &reg);
+ now = ktime_get();
+ if (reg & CTRL_WLAN_READY)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) {
+ dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n");
+ return -ETIMEDOUT;
+ }
+ }
+ dev_dbg(wdev->dev, "chip wake up after %lldus\n",
+ ktime_us_delta(now, start));
+
+ ret = config_reg_write_bits(wdev, CFG_CPU_RESET, 0);
+ if (ret < 0)
+ return ret;
+ ret = load_firmware_secure(wdev);
+ if (ret < 0)
+ return ret;
+ ret = config_reg_write_bits(wdev,
+ CFG_DIRECT_ACCESS_MODE |
+ CFG_IRQ_ENABLE_DATA |
+ CFG_IRQ_ENABLE_WRDY,
+ CFG_IRQ_ENABLE_DATA);
+ return ret;
+}
diff --git a/drivers/staging/wfx/fwio.h b/drivers/staging/wfx/fwio.h
new file mode 100644
index 000000000000..6028f92503fe
--- /dev/null
+++ b/drivers/staging/wfx/fwio.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_FWIO_H
+#define WFX_FWIO_H
+
+struct wfx_dev;
+
+int wfx_init_device(struct wfx_dev *wdev);
+
+#endif /* WFX_FWIO_H */
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
new file mode 100644
index 000000000000..c15831de4ff4
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_CMD_H
+#define WFX_HIF_API_CMD_H
+
+#include "hif_api_general.h"
+
+#define HIF_NUM_AC 4
+
+#define HIF_API_SSID_SIZE API_SSID_SIZE
+
+enum hif_requests_ids {
+ HIF_REQ_ID_RESET = 0x0a,
+ HIF_REQ_ID_READ_MIB = 0x05,
+ HIF_REQ_ID_WRITE_MIB = 0x06,
+ HIF_REQ_ID_START_SCAN = 0x07,
+ HIF_REQ_ID_STOP_SCAN = 0x08,
+ HIF_REQ_ID_TX = 0x04,
+ HIF_REQ_ID_JOIN = 0x0b,
+ HIF_REQ_ID_SET_PM_MODE = 0x10,
+ HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
+ HIF_REQ_ID_ADD_KEY = 0x0c,
+ HIF_REQ_ID_REMOVE_KEY = 0x0d,
+ HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_REQ_ID_START = 0x17,
+ HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
+ HIF_REQ_ID_UPDATE_IE = 0x1b,
+ HIF_REQ_ID_MAP_LINK = 0x1c,
+};
+
+enum hif_confirmations_ids {
+ HIF_CNF_ID_RESET = 0x0a,
+ HIF_CNF_ID_READ_MIB = 0x05,
+ HIF_CNF_ID_WRITE_MIB = 0x06,
+ HIF_CNF_ID_START_SCAN = 0x07,
+ HIF_CNF_ID_STOP_SCAN = 0x08,
+ HIF_CNF_ID_TX = 0x04,
+ HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
+ HIF_CNF_ID_JOIN = 0x0b,
+ HIF_CNF_ID_SET_PM_MODE = 0x10,
+ HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
+ HIF_CNF_ID_ADD_KEY = 0x0c,
+ HIF_CNF_ID_REMOVE_KEY = 0x0d,
+ HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_CNF_ID_START = 0x17,
+ HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
+ HIF_CNF_ID_UPDATE_IE = 0x1b,
+ HIF_CNF_ID_MAP_LINK = 0x1c,
+};
+
+enum hif_indications_ids {
+ HIF_IND_ID_RX = 0x84,
+ HIF_IND_ID_SCAN_CMPL = 0x86,
+ HIF_IND_ID_JOIN_COMPLETE = 0x8f,
+ HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
+ HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
+ HIF_IND_ID_EVENT = 0x85
+};
+
+union hif_commands_ids {
+ enum hif_requests_ids request;
+ enum hif_confirmations_ids confirmation;
+ enum hif_indications_ids indication;
+};
+
+enum hif_status {
+ HIF_STATUS_SUCCESS = 0x0,
+ HIF_STATUS_FAILURE = 0x1,
+ HIF_INVALID_PARAMETER = 0x2,
+ HIF_STATUS_WARNING = 0x3,
+ HIF_ERROR_UNSUPPORTED_MSG_ID = 0x4,
+ HIF_STATUS_DECRYPTFAILURE = 0x10,
+ HIF_STATUS_MICFAILURE = 0x11,
+ HIF_STATUS_NO_KEY_FOUND = 0x12,
+ HIF_STATUS_RETRY_EXCEEDED = 0x13,
+ HIF_STATUS_TX_LIFETIME_EXCEEDED = 0x14,
+ HIF_REQUEUE = 0x15,
+ HIF_STATUS_REFUSED = 0x16,
+ HIF_STATUS_BUSY = 0x17
+};
+
+struct hif_reset_flags {
+ u8 reset_stat:1;
+ u8 reset_all_int:1;
+ u8 reserved1:6;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_req_reset {
+ struct hif_reset_flags reset_flags;
+} __packed;
+
+struct hif_cnf_reset {
+ u32 status;
+} __packed;
+
+struct hif_req_read_mib {
+ u16 mib_id;
+ u16 reserved;
+} __packed;
+
+struct hif_cnf_read_mib {
+ u32 status;
+ u16 mib_id;
+ u16 length;
+ u8 mib_data[];
+} __packed;
+
+struct hif_req_write_mib {
+ u16 mib_id;
+ u16 length;
+ u8 mib_data[];
+} __packed;
+
+struct hif_cnf_write_mib {
+ u32 status;
+} __packed;
+
+struct hif_ie_flags {
+ u8 beacon:1;
+ u8 probe_resp:1;
+ u8 probe_req:1;
+ u8 reserved1:5;
+ u8 reserved2;
+} __packed;
+
+struct hif_ie_tlv {
+ u8 type;
+ u8 length;
+ u8 data[];
+} __packed;
+
+struct hif_req_update_ie {
+ struct hif_ie_flags ie_flags;
+ u16 num_i_es;
+ struct hif_ie_tlv ie[];
+} __packed;
+
+struct hif_cnf_update_ie {
+ u32 status;
+} __packed;
+
+struct hif_scan_type {
+ u8 type:1;
+ u8 mode:1;
+ u8 reserved:6;
+} __packed;
+
+struct hif_scan_flags {
+ u8 fbg:1;
+ u8 reserved1:1;
+ u8 pre:1;
+ u8 reserved2:5;
+} __packed;
+
+struct hif_auto_scan_param {
+ u16 interval;
+ u8 reserved;
+ s8 rssi_thr;
+} __packed;
+
+struct hif_ssid_def {
+ u32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+} __packed;
+
+#define HIF_API_MAX_NB_SSIDS 2
+#define HIF_API_MAX_NB_CHANNELS 14
+
+struct hif_req_start_scan {
+ u8 band;
+ struct hif_scan_type scan_type;
+ struct hif_scan_flags scan_flags;
+ u8 max_transmit_rate;
+ struct hif_auto_scan_param auto_scan_param;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssi_ds;
+ u8 num_of_channels;
+ u32 min_channel_time;
+ u32 max_channel_time;
+ s32 tx_power_level;
+ u8 ssid_and_channel_lists[];
+} __packed;
+
+struct hif_start_scan_req_cstnbssid_body {
+ u8 band;
+ struct hif_scan_type scan_type;
+ struct hif_scan_flags scan_flags;
+ u8 max_transmit_rate;
+ struct hif_auto_scan_param auto_scan_param;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssi_ds;
+ u8 num_of_channels;
+ u32 min_channel_time;
+ u32 max_channel_time;
+ s32 tx_power_level;
+ struct hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS];
+ u8 channel_list[];
+} __packed;
+
+struct hif_cnf_start_scan {
+ u32 status;
+} __packed;
+
+struct hif_cnf_stop_scan {
+ u32 status;
+} __packed;
+
+enum hif_pm_mode_status {
+ HIF_PM_MODE_ACTIVE = 0x0,
+ HIF_PM_MODE_PS = 0x1,
+ HIF_PM_MODE_UNDETERMINED = 0x2
+};
+
+struct hif_ind_scan_cmpl {
+ u32 status;
+ u8 pm_mode;
+ u8 num_channels_completed;
+ u16 reserved;
+} __packed;
+
+enum hif_queue_id {
+ HIF_QUEUE_ID_BACKGROUND = 0x0,
+ HIF_QUEUE_ID_BESTEFFORT = 0x1,
+ HIF_QUEUE_ID_VIDEO = 0x2,
+ HIF_QUEUE_ID_VOICE = 0x3
+};
+
+enum hif_frame_format {
+ HIF_FRAME_FORMAT_NON_HT = 0x0,
+ HIF_FRAME_FORMAT_MIXED_FORMAT_HT = 0x1,
+ HIF_FRAME_FORMAT_GF_HT_11N = 0x2
+};
+
+enum hif_stbc {
+ HIF_STBC_NOT_ALLOWED = 0x0,
+ HIF_STBC_ALLOWED = 0x1
+};
+
+struct hif_queue {
+ u8 queue_id:2;
+ u8 peer_sta_id:4;
+ u8 reserved:2;
+} __packed;
+
+struct hif_data_flags {
+ u8 more:1;
+ u8 fc_offset:3;
+ u8 reserved:4;
+} __packed;
+
+struct hif_tx_flags {
+ u8 start_exp:1;
+ u8 reserved:3;
+ u8 retry_policy_index:4;
+} __packed;
+
+struct hif_ht_tx_parameters {
+ u8 frame_format:4;
+ u8 fec_coding:1;
+ u8 short_gi:1;
+ u8 reserved1:1;
+ u8 stbc:1;
+ u8 reserved2;
+ u8 aggregation:1;
+ u8 reserved3:7;
+ u8 reserved4;
+} __packed;
+
+struct hif_req_tx {
+ u32 packet_id;
+ u8 max_tx_rate;
+ struct hif_queue queue_id;
+ struct hif_data_flags data_flags;
+ struct hif_tx_flags tx_flags;
+ u32 reserved;
+ u32 expire_time;
+ struct hif_ht_tx_parameters ht_tx_parameters;
+ u8 frame[];
+} __packed;
+
+enum hif_qos_ackplcy {
+ HIF_QOS_ACKPLCY_NORMAL = 0x0,
+ HIF_QOS_ACKPLCY_TXNOACK = 0x1,
+ HIF_QOS_ACKPLCY_NOEXPACK = 0x2,
+ HIF_QOS_ACKPLCY_BLCKACK = 0x3
+};
+
+struct hif_tx_result_flags {
+ u8 aggr:1;
+ u8 requeue:1;
+ u8 ack_policy:2;
+ u8 txop_limit:1;
+ u8 reserved1:3;
+ u8 reserved2;
+} __packed;
+
+struct hif_cnf_tx {
+ u32 status;
+ u32 packet_id;
+ u8 txed_rate;
+ u8 ack_failures;
+ struct hif_tx_result_flags tx_result_flags;
+ u32 media_delay;
+ u32 tx_queue_delay;
+} __packed;
+
+struct hif_cnf_multi_transmit {
+ u32 num_tx_confs;
+ struct hif_cnf_tx tx_conf_payload[];
+} __packed;
+
+enum hif_ri_flags_encrypt {
+ HIF_RI_FLAGS_UNENCRYPTED = 0x0,
+ HIF_RI_FLAGS_WEP_ENCRYPTED = 0x1,
+ HIF_RI_FLAGS_TKIP_ENCRYPTED = 0x2,
+ HIF_RI_FLAGS_AES_ENCRYPTED = 0x3,
+ HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4
+};
+
+struct hif_rx_flags {
+ u8 encryp:3;
+ u8 in_aggr:1;
+ u8 first_aggr:1;
+ u8 last_aggr:1;
+ u8 defrag:1;
+ u8 beacon:1;
+ u8 tim:1;
+ u8 bitmap:1;
+ u8 match_ssid:1;
+ u8 match_bssid:1;
+ u8 more:1;
+ u8 reserved1:1;
+ u8 ht:1;
+ u8 stbc:1;
+ u8 match_uc_addr:1;
+ u8 match_mc_addr:1;
+ u8 match_bc_addr:1;
+ u8 key_type:1;
+ u8 key_index:4;
+ u8 reserved2:1;
+ u8 peer_sta_id:4;
+ u8 reserved3:2;
+ u8 reserved4:1;
+} __packed;
+
+struct hif_ind_rx {
+ u32 status;
+ u16 channel_number;
+ u8 rxed_rate;
+ u8 rcpi_rssi;
+ struct hif_rx_flags rx_flags;
+ u8 frame[];
+} __packed;
+
+
+struct hif_req_edca_queue_params {
+ u8 queue_id;
+ u8 reserved1;
+ u8 aifsn;
+ u8 reserved2;
+ u16 cw_min;
+ u16 cw_max;
+ u16 tx_op_limit;
+ u16 allowed_medium_time;
+ u32 reserved3;
+} __packed;
+
+struct hif_cnf_edca_queue_params {
+ u32 status;
+} __packed;
+
+enum hif_ap_mode {
+ HIF_MODE_IBSS = 0x0,
+ HIF_MODE_BSS = 0x1
+};
+
+enum hif_preamble {
+ HIF_PREAMBLE_LONG = 0x0,
+ HIF_PREAMBLE_SHORT = 0x1,
+ HIF_PREAMBLE_SHORT_LONG12 = 0x2
+};
+
+struct hif_join_flags {
+ u8 reserved1:2;
+ u8 force_no_beacon:1;
+ u8 force_with_ind:1;
+ u8 reserved2:4;
+} __packed;
+
+struct hif_req_join {
+ u8 mode;
+ u8 band;
+ u16 channel_number;
+ u8 bssid[ETH_ALEN];
+ u16 atim_window;
+ u8 preamble_type;
+ u8 probe_for_join;
+ u8 reserved;
+ struct hif_join_flags join_flags;
+ u32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ u32 beacon_interval;
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_cnf_join {
+ u32 status;
+} __packed;
+
+struct hif_ind_join_complete {
+ u32 status;
+} __packed;
+
+struct hif_bss_flags {
+ u8 lost_count_only:1;
+ u8 reserved:7;
+} __packed;
+
+struct hif_req_set_bss_params {
+ struct hif_bss_flags bss_flags;
+ u8 beacon_lost_count;
+ u16 aid;
+ u32 operational_rate_set;
+} __packed;
+
+struct hif_cnf_set_bss_params {
+ u32 status;
+} __packed;
+
+struct hif_pm_mode {
+ u8 enter_psm:1;
+ u8 reserved:6;
+ u8 fast_psm:1;
+} __packed;
+
+struct hif_req_set_pm_mode {
+ struct hif_pm_mode pm_mode;
+ u8 fast_psm_idle_period;
+ u8 ap_psm_change_period;
+ u8 min_auto_ps_poll_period;
+} __packed;
+
+struct hif_cnf_set_pm_mode {
+ u32 status;
+} __packed;
+
+struct hif_ind_set_pm_mode_cmpl {
+ u32 status;
+ u8 pm_mode;
+ u8 reserved[3];
+} __packed;
+
+
+struct hif_req_start {
+ u8 mode;
+ u8 band;
+ u16 channel_number;
+ u32 reserved1;
+ u32 beacon_interval;
+ u8 dtim_period;
+ u8 preamble_type;
+ u8 reserved2;
+ u8 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_cnf_start {
+ u32 status;
+} __packed;
+
+enum hif_beacon {
+ HIF_BEACON_STOP = 0x0,
+ HIF_BEACON_START = 0x1
+};
+
+struct hif_req_beacon_transmit {
+ u8 enable_beaconing;
+ u8 reserved[3];
+} __packed;
+
+struct hif_cnf_beacon_transmit {
+ u32 status;
+} __packed;
+
+enum hif_sta_map_direction {
+ HIF_STA_MAP = 0x0,
+ HIF_STA_UNMAP = 0x1
+};
+
+struct hif_map_link_flags {
+ u8 map_direction:1;
+ u8 mfpc:1;
+ u8 reserved:6;
+} __packed;
+
+struct hif_req_map_link {
+ u8 mac_addr[ETH_ALEN];
+ struct hif_map_link_flags map_link_flags;
+ u8 peer_sta_id;
+} __packed;
+
+struct hif_cnf_map_link {
+ u32 status;
+} __packed;
+
+struct hif_suspend_resume_flags {
+ u8 resume:1;
+ u8 reserved1:2;
+ u8 bc_mc_only:1;
+ u8 reserved2:4;
+ u8 reserved3;
+} __packed;
+
+struct hif_ind_suspend_resume_tx {
+ struct hif_suspend_resume_flags suspend_resume_flags;
+ u16 peer_sta_set;
+} __packed;
+
+
+#define MAX_KEY_ENTRIES 24
+#define HIF_API_WEP_KEY_DATA_SIZE 16
+#define HIF_API_TKIP_KEY_DATA_SIZE 16
+#define HIF_API_RX_MIC_KEY_SIZE 8
+#define HIF_API_TX_MIC_KEY_SIZE 8
+#define HIF_API_AES_KEY_DATA_SIZE 16
+#define HIF_API_WAPI_KEY_DATA_SIZE 16
+#define HIF_API_MIC_KEY_DATA_SIZE 16
+#define HIF_API_IGTK_KEY_DATA_SIZE 16
+#define HIF_API_RX_SEQUENCE_COUNTER_SIZE 8
+#define HIF_API_IPN_SIZE 8
+
+enum hif_key_type {
+ HIF_KEY_TYPE_WEP_DEFAULT = 0x0,
+ HIF_KEY_TYPE_WEP_PAIRWISE = 0x1,
+ HIF_KEY_TYPE_TKIP_GROUP = 0x2,
+ HIF_KEY_TYPE_TKIP_PAIRWISE = 0x3,
+ HIF_KEY_TYPE_AES_GROUP = 0x4,
+ HIF_KEY_TYPE_AES_PAIRWISE = 0x5,
+ HIF_KEY_TYPE_WAPI_GROUP = 0x6,
+ HIF_KEY_TYPE_WAPI_PAIRWISE = 0x7,
+ HIF_KEY_TYPE_IGTK_GROUP = 0x8,
+ HIF_KEY_TYPE_NONE = 0x9
+};
+
+struct hif_wep_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved;
+ u8 key_length;
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_wep_group_key {
+ u8 key_id;
+ u8 key_length;
+ u8 reserved[2];
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_tkip_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
+} __packed;
+
+struct hif_tkip_group_key {
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct hif_aes_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_aes_group_key {
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct hif_wapi_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 key_id;
+ u8 reserved;
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_wapi_group_key {
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+} __packed;
+
+struct hif_igtk_group_key {
+ u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 ipn[HIF_API_IPN_SIZE];
+} __packed;
+
+union hif_privacy_key_data {
+ struct hif_wep_pairwise_key wep_pairwise_key;
+ struct hif_wep_group_key wep_group_key;
+ struct hif_tkip_pairwise_key tkip_pairwise_key;
+ struct hif_tkip_group_key tkip_group_key;
+ struct hif_aes_pairwise_key aes_pairwise_key;
+ struct hif_aes_group_key aes_group_key;
+ struct hif_wapi_pairwise_key wapi_pairwise_key;
+ struct hif_wapi_group_key wapi_group_key;
+ struct hif_igtk_group_key igtk_group_key;
+};
+
+struct hif_req_add_key {
+ u8 type;
+ u8 entry_index;
+ u8 int_id:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ union hif_privacy_key_data key;
+} __packed;
+
+struct hif_cnf_add_key {
+ u32 status;
+} __packed;
+
+struct hif_req_remove_key {
+ u8 entry_index;
+ u8 reserved[3];
+} __packed;
+
+struct hif_cnf_remove_key {
+ u32 status;
+} __packed;
+
+enum hif_event_ind {
+ HIF_EVENT_IND_BSSLOST = 0x1,
+ HIF_EVENT_IND_BSSREGAINED = 0x2,
+ HIF_EVENT_IND_RCPI_RSSI = 0x3,
+ HIF_EVENT_IND_PS_MODE_ERROR = 0x4,
+ HIF_EVENT_IND_INACTIVITY = 0x5
+};
+
+enum hif_ps_mode_error {
+ HIF_PS_ERROR_NO_ERROR = 0,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_POLL = 1,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_UAPSD_TRIGGER = 2,
+ HIF_PS_ERROR_AP_SENT_UNICAST_IN_DOZE = 3,
+ HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4
+};
+
+union hif_event_data {
+ u8 rcpi_rssi;
+ u32 ps_mode_error;
+ u32 peer_sta_set;
+};
+
+struct hif_ind_event {
+ u32 event_id;
+ union hif_event_data event_data;
+} __packed;
+
+
+#endif
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
new file mode 100644
index 000000000000..a069c3a21b4d
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_GENERAL_H
+#define WFX_HIF_API_GENERAL_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#else
+#include <net/ethernet.h>
+#include <stdint.h>
+#define __packed __attribute__((__packed__))
+#endif
+
+#define API_SSID_SIZE 32
+
+#define HIF_ID_IS_INDICATION 0x80
+#define HIF_COUNTER_MAX 7
+
+struct hif_msg {
+ u16 len;
+ u8 id;
+ u8 reserved:1;
+ u8 interface:2;
+ u8 seqnum:3;
+ u8 encrypted:2;
+ u8 body[];
+} __packed;
+
+enum hif_general_requests_ids {
+ HIF_REQ_ID_CONFIGURATION = 0x09,
+ HIF_REQ_ID_CONTROL_GPIO = 0x26,
+ HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_REQ_ID_SL_CONFIGURE = 0x29,
+ HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_REQ_ID_PTA_SETTINGS = 0x2b,
+ HIF_REQ_ID_PTA_PRIORITY = 0x2c,
+ HIF_REQ_ID_PTA_STATE = 0x2d,
+ HIF_REQ_ID_SHUT_DOWN = 0x32,
+};
+
+enum hif_general_confirmations_ids {
+ HIF_CNF_ID_CONFIGURATION = 0x09,
+ HIF_CNF_ID_CONTROL_GPIO = 0x26,
+ HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_CNF_ID_SL_CONFIGURE = 0x29,
+ HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_CNF_ID_PTA_SETTINGS = 0x2b,
+ HIF_CNF_ID_PTA_PRIORITY = 0x2c,
+ HIF_CNF_ID_PTA_STATE = 0x2d,
+ HIF_CNF_ID_SHUT_DOWN = 0x32,
+};
+
+enum hif_general_indications_ids {
+ HIF_IND_ID_EXCEPTION = 0xe0,
+ HIF_IND_ID_STARTUP = 0xe1,
+ HIF_IND_ID_WAKEUP = 0xe2,
+ HIF_IND_ID_GENERIC = 0xe3,
+ HIF_IND_ID_ERROR = 0xe4,
+ HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
+};
+
+enum hif_hi_status {
+ HI_STATUS_SUCCESS = 0x0000,
+ HI_STATUS_FAILURE = 0x0001,
+ HI_INVALID_PARAMETER = 0x0002,
+ HI_STATUS_GPIO_WARNING = 0x0003,
+ HI_ERROR_UNSUPPORTED_MSG_ID = 0x0004,
+ SL_MAC_KEY_STATUS_SUCCESS = 0x005A,
+ SL_MAC_KEY_STATUS_FAILED_KEY_ALREADY_BURNED = 0x006B,
+ SL_MAC_KEY_STATUS_FAILED_RAM_MODE_NOT_ALLOWED = 0x007C,
+ SL_MAC_KEY_STATUS_FAILED_UNKNOWN_MODE = 0x008D,
+ SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS = 0x009E,
+ SL_PUB_KEY_EXCHANGE_STATUS_FAILED = 0x00AF,
+ PREVENT_ROLLBACK_CNF_SUCCESS = 0x1234,
+ PREVENT_ROLLBACK_CNF_WRONG_MAGIC_WORD = 0x1256
+};
+
+enum hif_api_rate_index {
+ API_RATE_INDEX_B_1MBPS = 0,
+ API_RATE_INDEX_B_2MBPS = 1,
+ API_RATE_INDEX_B_5P5MBPS = 2,
+ API_RATE_INDEX_B_11MBPS = 3,
+ API_RATE_INDEX_PBCC_22MBPS = 4,
+ API_RATE_INDEX_PBCC_33MBPS = 5,
+ API_RATE_INDEX_G_6MBPS = 6,
+ API_RATE_INDEX_G_9MBPS = 7,
+ API_RATE_INDEX_G_12MBPS = 8,
+ API_RATE_INDEX_G_18MBPS = 9,
+ API_RATE_INDEX_G_24MBPS = 10,
+ API_RATE_INDEX_G_36MBPS = 11,
+ API_RATE_INDEX_G_48MBPS = 12,
+ API_RATE_INDEX_G_54MBPS = 13,
+ API_RATE_INDEX_N_6P5MBPS = 14,
+ API_RATE_INDEX_N_13MBPS = 15,
+ API_RATE_INDEX_N_19P5MBPS = 16,
+ API_RATE_INDEX_N_26MBPS = 17,
+ API_RATE_INDEX_N_39MBPS = 18,
+ API_RATE_INDEX_N_52MBPS = 19,
+ API_RATE_INDEX_N_58P5MBPS = 20,
+ API_RATE_INDEX_N_65MBPS = 21,
+ API_RATE_NUM_ENTRIES = 22
+};
+
+
+enum hif_fw_type {
+ HIF_FW_TYPE_ETF = 0x0,
+ HIF_FW_TYPE_WFM = 0x1,
+ HIF_FW_TYPE_WSM = 0x2
+};
+
+struct hif_capabilities {
+ u8 link_mode:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+} __packed;
+
+struct hif_otp_regul_sel_mode_info {
+ u8 region_sel_mode:4;
+ u8 reserved:4;
+} __packed;
+
+struct hif_otp_phy_info {
+ u8 phy1_region:3;
+ u8 phy0_region:3;
+ u8 otp_phy_ver:2;
+} __packed;
+
+#define API_OPN_SIZE 14
+#define API_UID_SIZE 8
+#define API_DISABLED_CHANNEL_LIST_SIZE 2
+#define API_FIRMWARE_LABEL_SIZE 128
+
+struct hif_ind_startup {
+ u32 status;
+ u16 hardware_id;
+ u8 opn[API_OPN_SIZE];
+ u8 uid[API_UID_SIZE];
+ u16 num_inp_ch_bufs;
+ u16 size_inp_ch_buf;
+ u8 num_links_ap;
+ u8 num_interfaces;
+ u8 mac_addr[2][ETH_ALEN];
+ u8 api_version_minor;
+ u8 api_version_major;
+ struct hif_capabilities capabilities;
+ u8 firmware_build;
+ u8 firmware_minor;
+ u8 firmware_major;
+ u8 firmware_type;
+ u8 disabled_channel_list[API_DISABLED_CHANNEL_LIST_SIZE];
+ struct hif_otp_regul_sel_mode_info regul_sel_mode_info;
+ struct hif_otp_phy_info otp_phy_info;
+ u32 supported_rate_mask;
+ u8 firmware_label[API_FIRMWARE_LABEL_SIZE];
+} __packed;
+
+struct hif_ind_wakeup {
+} __packed;
+
+struct hif_req_configuration {
+ u16 length;
+ u8 pds_data[];
+} __packed;
+
+struct hif_cnf_configuration {
+ u32 status;
+} __packed;
+
+enum hif_gpio_mode {
+ HIF_GPIO_MODE_D0 = 0x0,
+ HIF_GPIO_MODE_D1 = 0x1,
+ HIF_GPIO_MODE_OD0 = 0x2,
+ HIF_GPIO_MODE_OD1 = 0x3,
+ HIF_GPIO_MODE_TRISTATE = 0x4,
+ HIF_GPIO_MODE_TOGGLE = 0x5,
+ HIF_GPIO_MODE_READ = 0x6
+};
+
+struct hif_req_control_gpio {
+ u8 gpio_label;
+ u8 gpio_mode;
+} __packed;
+
+enum hif_gpio_error {
+ HIF_GPIO_ERROR_0 = 0x0,
+ HIF_GPIO_ERROR_1 = 0x1,
+ HIF_GPIO_ERROR_2 = 0x2
+};
+
+struct hif_cnf_control_gpio {
+ u32 status;
+ u32 value;
+} __packed;
+
+enum hif_generic_indication_type {
+ HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
+ HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
+ HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2
+};
+
+struct hif_rx_stats {
+ u32 nb_rx_frame;
+ u32 nb_crc_frame;
+ u32 per_total;
+ u32 throughput;
+ u32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
+ u16 per[API_RATE_NUM_ENTRIES];
+ s16 snr[API_RATE_NUM_ENTRIES];
+ s16 rssi[API_RATE_NUM_ENTRIES];
+ s16 cfo[API_RATE_NUM_ENTRIES];
+ u32 date;
+ u32 pwr_clk_freq;
+ u8 is_ext_pwr_clk;
+ s8 current_temp;
+} __packed;
+
+union hif_indication_data {
+ struct hif_rx_stats rx_stats;
+ u8 raw_data[1];
+};
+
+struct hif_ind_generic {
+ u32 indication_type;
+ union hif_indication_data indication_data;
+} __packed;
+
+
+#define HIF_EXCEPTION_DATA_SIZE 124
+
+struct hif_ind_exception {
+ u8 data[HIF_EXCEPTION_DATA_SIZE];
+} __packed;
+
+
+enum hif_error {
+ HIF_ERROR_FIRMWARE_ROLLBACK = 0x0,
+ HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x1,
+ HIF_ERROR_OUTDATED_SESSION_KEY = 0x2,
+ HIF_ERROR_INVALID_SESSION_KEY = 0x3,
+ HIF_ERROR_OOR_VOLTAGE = 0x4,
+ HIF_ERROR_PDS_VERSION = 0x5,
+ HIF_ERROR_OOR_TEMPERATURE = 0x6,
+ HIF_ERROR_REQ_DURING_KEY_EXCHANGE = 0x7,
+ HIF_ERROR_MULTI_TX_CNF_SECURELINK = 0x8,
+ HIF_ERROR_SECURELINK_OVERFLOW = 0x9,
+ HIF_ERROR_SECURELINK_DECRYPTION = 0xa
+};
+
+struct hif_ind_error {
+ u32 type;
+ u8 data[];
+} __packed;
+
+enum hif_secure_link_state {
+ SEC_LINK_UNAVAILABLE = 0x0,
+ SEC_LINK_RESERVED = 0x1,
+ SEC_LINK_EVAL = 0x2,
+ SEC_LINK_ENFORCED = 0x3
+};
+
+enum hif_sl_encryption_type {
+ NO_ENCRYPTION = 0,
+ TX_ENCRYPTION = 1,
+ RX_ENCRYPTION = 2,
+ HP_ENCRYPTION = 3
+};
+
+struct hif_sl_msg_hdr {
+ u32 seqnum:30;
+ u32 encrypted:2;
+} __packed;
+
+struct hif_sl_msg {
+ struct hif_sl_msg_hdr hdr;
+ u16 len;
+ u8 payload[];
+} __packed;
+
+#define AES_CCM_TAG_SIZE 16
+
+struct hif_sl_tag {
+ u8 tag[16];
+} __packed;
+
+enum hif_sl_mac_key_dest {
+ SL_MAC_KEY_DEST_OTP = 0x78,
+ SL_MAC_KEY_DEST_RAM = 0x87
+};
+
+#define API_KEY_VALUE_SIZE 32
+
+struct hif_req_set_sl_mac_key {
+ u8 otp_or_ram;
+ u8 key_value[API_KEY_VALUE_SIZE];
+} __packed;
+
+struct hif_cnf_set_sl_mac_key {
+ u32 status;
+} __packed;
+
+#define API_HOST_PUB_KEY_SIZE 32
+#define API_HOST_PUB_KEY_MAC_SIZE 64
+
+enum hif_sl_session_key_alg {
+ HIF_SL_CURVE25519 = 0x01,
+ HIF_SL_KDF = 0x02
+};
+
+struct hif_req_sl_exchange_pub_keys {
+ u8 algorithm:2;
+ u8 reserved1:6;
+ u8 reserved2[3];
+ u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
+ u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
+} __packed;
+
+struct hif_cnf_sl_exchange_pub_keys {
+ u32 status;
+} __packed;
+
+#define API_NCP_PUB_KEY_SIZE 32
+#define API_NCP_PUB_KEY_MAC_SIZE 64
+
+struct hif_ind_sl_exchange_pub_keys {
+ u32 status;
+ u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
+ u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
+} __packed;
+
+#define API_ENCR_BMP_SIZE 32
+
+struct hif_req_sl_configure {
+ u8 encr_bmp[API_ENCR_BMP_SIZE];
+ u8 disable_session_key_protection:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_cnf_sl_configure {
+ u32 status;
+} __packed;
+
+struct hif_req_prevent_rollback {
+ u32 magic_word;
+} __packed;
+
+struct hif_cnf_prevent_rollback {
+ u32 status;
+} __packed;
+
+enum hif_pta_mode {
+ PTA_1W_WLAN_MASTER = 0,
+ PTA_1W_COEX_MASTER = 1,
+ PTA_2W = 2,
+ PTA_3W = 3,
+ PTA_4W = 4
+};
+
+enum hif_signal_level {
+ SIGNAL_LOW = 0,
+ SIGNAL_HIGH = 1
+};
+
+enum hif_coex_type {
+ COEX_TYPE_GENERIC = 0,
+ COEX_TYPE_BLE = 1
+};
+
+enum hif_grant_state {
+ NO_GRANT = 0,
+ GRANT = 1
+};
+
+struct hif_req_pta_settings {
+ u8 pta_mode;
+ u8 request_signal_active_level;
+ u8 priority_signal_active_level;
+ u8 freq_signal_active_level;
+ u8 grant_signal_active_level;
+ u8 coex_type;
+ u8 default_grant_state;
+ u8 simultaneous_rx_accesses;
+ u8 priority_sampling_time;
+ u8 tx_rx_sampling_time;
+ u8 freq_sampling_time;
+ u8 grant_valid_time;
+ u8 fem_control_time;
+ u8 first_slot_time;
+ u16 periodic_tx_rx_sampling_time;
+ u16 coex_quota;
+ u16 wlan_quota;
+} __packed;
+
+struct hif_cnf_pta_settings {
+ u32 status;
+} __packed;
+
+enum hif_pta_priority {
+ HIF_PTA_PRIORITY_COEX_MAXIMIZED = 0x00000562,
+ HIF_PTA_PRIORITY_COEX_HIGH = 0x00000462,
+ HIF_PTA_PRIORITY_BALANCED = 0x00001461,
+ HIF_PTA_PRIORITY_WLAN_HIGH = 0x00001851,
+ HIF_PTA_PRIORITY_WLAN_MAXIMIZED = 0x00001A51
+};
+
+struct hif_req_pta_priority {
+ u32 priority;
+} __packed;
+
+struct hif_cnf_pta_priority {
+ u32 status;
+} __packed;
+
+enum hif_pta_state {
+ PTA_OFF = 0,
+ PTA_ON = 1
+};
+
+struct hif_req_pta_state {
+ u32 pta_state;
+} __packed;
+
+struct hif_cnf_pta_state {
+ u32 status;
+} __packed;
+
+#endif
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
new file mode 100644
index 000000000000..94b789ceb4ff
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_MIB_H
+#define WFX_HIF_API_MIB_H
+
+#include "hif_api_general.h"
+
+#define HIF_API_IPV4_ADDRESS_SIZE 4
+#define HIF_API_IPV6_ADDRESS_SIZE 16
+
+enum hif_mib_ids {
+ HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
+ HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
+ HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
+ HIF_MIB_ID_CCA_CONFIG = 0x2003,
+ HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
+ HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
+ HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
+ HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
+ HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
+ HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
+ HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
+ HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
+ HIF_MIB_ID_RX_FILTER = 0x201B,
+ HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
+ HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
+ HIF_MIB_ID_TSF_COUNTER = 0x2031,
+ HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
+ HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
+ HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
+ HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
+ HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041,
+ HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
+ HIF_MIB_ID_SLOT_TIME = 0x2045,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
+ HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
+ HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
+ HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
+ HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
+ HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
+ HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
+ HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
+ HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
+ HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
+ HIF_MIB_ID_BEACON_STATS = 0x2056,
+};
+
+#define HIF_OP_POWER_MODE_MASK 0xf
+
+enum hif_op_power_mode {
+ HIF_OP_POWER_MODE_ACTIVE = 0x0,
+ HIF_OP_POWER_MODE_DOZE = 0x1,
+ HIF_OP_POWER_MODE_QUIESCENT = 0x2
+};
+
+struct hif_mib_gl_operational_power_mode {
+ u8 power_mode:4;
+ u8 reserved1:3;
+ u8 wup_ind_activation:1;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_gl_block_ack_info {
+ u8 rx_buffer_size;
+ u8 rx_max_num_agreements;
+ u8 tx_buffer_size;
+ u8 tx_max_num_agreements;
+} __packed;
+
+struct hif_mib_gl_set_multi_msg {
+ u8 enable_multi_tx_conf:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum hif_cca_thr_mode {
+ HIF_CCA_THR_MODE_RELATIVE = 0x0,
+ HIF_CCA_THR_MODE_ABSOLUTE = 0x1
+};
+
+struct hif_mib_gl_cca_config {
+ u8 cca_thr_mode;
+ u8 reserved[3];
+} __packed;
+
+#define MAX_NUMBER_DATA_FILTERS 0xA
+
+#define MAX_NUMBER_IPV4_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_IPV6_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_MAC_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_UC_MC_BC_CONDITIONS 0x4
+#define MAX_NUMBER_ETHER_TYPE_CONDITIONS 0x4
+#define MAX_NUMBER_PORT_CONDITIONS 0x4
+#define MAX_NUMBER_MAGIC_CONDITIONS 0x4
+#define MAX_NUMBER_ARP_CONDITIONS 0x2
+#define MAX_NUMBER_NS_CONDITIONS 0x2
+
+struct hif_mib_ethertype_data_frame_condition {
+ u8 condition_idx;
+ u8 reserved;
+ u16 ether_type;
+} __packed;
+
+enum hif_udp_tcp_protocol {
+ HIF_PROTOCOL_UDP = 0x0,
+ HIF_PROTOCOL_TCP = 0x1,
+ HIF_PROTOCOL_BOTH_UDP_TCP = 0x2
+};
+
+enum hif_which_port {
+ HIF_PORT_DST = 0x0,
+ HIF_PORT_SRC = 0x1,
+ HIF_PORT_SRC_OR_DST = 0x2
+};
+
+struct hif_mib_ports_data_frame_condition {
+ u8 condition_idx;
+ u8 protocol;
+ u8 which_port;
+ u8 reserved1;
+ u16 port_number;
+ u8 reserved2[2];
+} __packed;
+
+#define HIF_API_MAGIC_PATTERN_SIZE 32
+
+struct hif_mib_magic_data_frame_condition {
+ u8 condition_idx;
+ u8 offset;
+ u8 magic_pattern_length;
+ u8 reserved;
+ u8 magic_pattern[HIF_API_MAGIC_PATTERN_SIZE];
+} __packed;
+
+enum hif_mac_addr_type {
+ HIF_MAC_ADDR_A1 = 0x0,
+ HIF_MAC_ADDR_A2 = 0x1,
+ HIF_MAC_ADDR_A3 = 0x2
+};
+
+struct hif_mib_mac_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_type;
+ u8 mac_address[ETH_ALEN];
+} __packed;
+
+enum hif_ip_addr_mode {
+ HIF_IP_ADDR_SRC = 0x0,
+ HIF_IP_ADDR_DST = 0x1
+};
+
+struct hif_mib_ipv4_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_mode;
+ u8 reserved[2];
+ u8 i_pv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_ipv6_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_mode;
+ u8 reserved[2];
+ u8 i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+} __packed;
+
+union hif_addr_type {
+ u8 value;
+ struct {
+ u8 type_unicast:1;
+ u8 type_multicast:1;
+ u8 type_broadcast:1;
+ u8 reserved:5;
+ } bits;
+};
+
+struct hif_mib_uc_mc_bc_data_frame_condition {
+ u8 condition_idx;
+ union hif_addr_type param;
+ u8 reserved[2];
+} __packed;
+
+struct hif_mib_config_data_filter {
+ u8 filter_idx;
+ u8 enable;
+ u8 reserved1[2];
+ u8 eth_type_cond;
+ u8 port_cond;
+ u8 magic_cond;
+ u8 mac_cond;
+ u8 ipv4_cond;
+ u8 ipv6_cond;
+ u8 uc_mc_bc_cond;
+ u8 reserved2;
+} __packed;
+
+struct hif_mib_set_data_filtering {
+ u8 default_filter;
+ u8 enable;
+ u8 reserved[2];
+} __packed;
+
+enum hif_arp_ns_frame_treatment {
+ HIF_ARP_NS_FILTERING_DISABLE = 0x0,
+ HIF_ARP_NS_FILTERING_ENABLE = 0x1,
+ HIF_ARP_NS_REPLY_ENABLE = 0x2
+};
+
+struct hif_mib_arp_ip_addr_table {
+ u8 condition_idx;
+ u8 arp_enable;
+ u8 reserved[2];
+ u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_ns_ip_addr_table {
+ u8 condition_idx;
+ u8 ns_enable;
+ u8 reserved[2];
+ u8 ipv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_rx_filter {
+ u8 reserved1:1;
+ u8 bssid_filter:1;
+ u8 reserved2:1;
+ u8 fwd_probe_req:1;
+ u8 keep_alive_filter:1;
+ u8 reserved3:3;
+ u8 reserved4[3];
+} __packed;
+
+#define HIF_API_OUI_SIZE 3
+#define HIF_API_MATCH_DATA_SIZE 3
+
+struct hif_ie_table_entry {
+ u8 ie_id;
+ u8 has_changed:1;
+ u8 no_longer:1;
+ u8 has_appeared:1;
+ u8 reserved:1;
+ u8 num_match_data:4;
+ u8 oui[HIF_API_OUI_SIZE];
+ u8 match_data[HIF_API_MATCH_DATA_SIZE];
+} __packed;
+
+struct hif_mib_bcn_filter_table {
+ u32 num_of_info_elmts;
+ struct hif_ie_table_entry ie_table[];
+} __packed;
+
+enum hif_beacon_filter {
+ HIF_BEACON_FILTER_DISABLE = 0x0,
+ HIF_BEACON_FILTER_ENABLE = 0x1,
+ HIF_BEACON_FILTER_AUTO_ERP = 0x2
+};
+
+struct hif_mib_bcn_filter_enable {
+ u32 enable;
+ u32 bcn_count;
+} __packed;
+
+struct hif_mib_group_seq_counter {
+ u32 bits4716;
+ u16 bits1500;
+ u16 reserved;
+} __packed;
+
+struct hif_mib_tsf_counter {
+ u32 tsf_counterlo;
+ u32 tsf_counterhi;
+} __packed;
+
+struct hif_mib_stats_table {
+ s16 latest_snr;
+ u8 latest_rcpi;
+ s8 latest_rssi;
+} __packed;
+
+struct hif_mib_extended_count_table {
+ u32 count_plcp_errors;
+ u32 count_fcs_errors;
+ u32 count_tx_packets;
+ u32 count_rx_packets;
+ u32 count_rx_packet_errors;
+ u32 count_rx_decryption_failures;
+ u32 count_rx_mic_failures;
+ u32 count_rx_no_key_failures;
+ u32 count_tx_multicast_frames;
+ u32 count_tx_frames_success;
+ u32 count_tx_frame_failures;
+ u32 count_tx_frames_retried;
+ u32 count_tx_frames_multi_retried;
+ u32 count_rx_frame_duplicates;
+ u32 count_rts_success;
+ u32 count_rts_failures;
+ u32 count_ack_failures;
+ u32 count_rx_multicast_frames;
+ u32 count_rx_frames_success;
+ u32 count_rx_cmacicv_errors;
+ u32 count_rx_cmac_replays;
+ u32 count_rx_mgmt_ccmp_replays;
+ u32 count_rx_bipmic_errors;
+ u32 count_rx_beacon;
+ u32 count_miss_beacon;
+ u32 reserved[15];
+} __packed;
+
+struct hif_mib_count_table {
+ u32 count_plcp_errors;
+ u32 count_fcs_errors;
+ u32 count_tx_packets;
+ u32 count_rx_packets;
+ u32 count_rx_packet_errors;
+ u32 count_rx_decryption_failures;
+ u32 count_rx_mic_failures;
+ u32 count_rx_no_key_failures;
+ u32 count_tx_multicast_frames;
+ u32 count_tx_frames_success;
+ u32 count_tx_frame_failures;
+ u32 count_tx_frames_retried;
+ u32 count_tx_frames_multi_retried;
+ u32 count_rx_frame_duplicates;
+ u32 count_rts_success;
+ u32 count_rts_failures;
+ u32 count_ack_failures;
+ u32 count_rx_multicast_frames;
+ u32 count_rx_frames_success;
+ u32 count_rx_cmacicv_errors;
+ u32 count_rx_cmac_replays;
+ u32 count_rx_mgmt_ccmp_replays;
+ u32 count_rx_bipmic_errors;
+} __packed;
+
+struct hif_mib_max_tx_power_level {
+ s32 max_tx_power_level_rf_port1;
+ s32 max_tx_power_level_rf_port2;
+} __packed;
+
+struct hif_mib_beacon_stats {
+ s32 latest_tbtt_diff;
+ u32 reserved[4];
+} __packed;
+
+struct hif_mib_mac_address {
+ u8 mac_addr[ETH_ALEN];
+ u16 reserved;
+} __packed;
+
+struct hif_mib_dot11_max_transmit_msdu_lifetime {
+ u32 max_life_time;
+} __packed;
+
+struct hif_mib_dot11_max_receive_lifetime {
+ u32 max_life_time;
+} __packed;
+
+struct hif_mib_wep_default_key_id {
+ u8 wep_default_key_id;
+ u8 reserved[3];
+} __packed;
+
+struct hif_mib_dot11_rts_threshold {
+ u32 threshold;
+} __packed;
+
+struct hif_mib_slot_time {
+ u32 slot_time;
+} __packed;
+
+struct hif_mib_current_tx_power_level {
+ s32 power_level;
+} __packed;
+
+struct hif_mib_non_erp_protection {
+ u8 use_cts_to_self:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum hif_tx_mode {
+ HIF_TX_MODE_MIXED = 0x0,
+ HIF_TX_MODE_GREENFIELD = 0x1
+};
+
+enum hif_tmplt {
+ HIF_TMPLT_PRBREQ = 0x0,
+ HIF_TMPLT_BCN = 0x1,
+ HIF_TMPLT_NULL = 0x2,
+ HIF_TMPLT_QOSNUL = 0x3,
+ HIF_TMPLT_PSPOLL = 0x4,
+ HIF_TMPLT_PRBRES = 0x5,
+ HIF_TMPLT_ARP = 0x6,
+ HIF_TMPLT_NA = 0x7
+};
+
+#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
+
+struct hif_mib_template_frame {
+ u8 frame_type;
+ u8 init_rate:7;
+ u8 mode:1;
+ u16 frame_length;
+ u8 frame[HIF_API_MAX_TEMPLATE_FRAME_SIZE];
+} __packed;
+
+struct hif_mib_beacon_wake_up_period {
+ u8 wakeup_period_min;
+ u8 receive_dtim:1;
+ u8 reserved1:7;
+ u8 wakeup_period_max;
+ u8 reserved2;
+} __packed;
+
+struct hif_mib_rcpi_rssi_threshold {
+ u8 detection:1;
+ u8 rcpi_rssi:1;
+ u8 upperthresh:1;
+ u8 lowerthresh:1;
+ u8 reserved:4;
+ u8 lower_threshold;
+ u8 upper_threshold;
+ u8 rolling_average_count;
+} __packed;
+
+#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16
+
+struct hif_mib_block_ack_policy {
+ u8 block_ack_tx_tid_policy;
+ u8 reserved1;
+ u8 block_ack_rx_tid_policy;
+ u8 block_ack_rx_max_buffer_size;
+} __packed;
+
+struct hif_mib_override_int_rate {
+ u8 internal_tx_rate;
+ u8 non_erp_internal_tx_rate;
+ u8 reserved[2];
+} __packed;
+
+enum hif_mpdu_start_spacing {
+ HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
+ HIF_MPDU_START_SPACING_QUARTER = 0x1,
+ HIF_MPDU_START_SPACING_HALF = 0x2,
+ HIF_MPDU_START_SPACING_ONE = 0x3,
+ HIF_MPDU_START_SPACING_TWO = 0x4,
+ HIF_MPDU_START_SPACING_FOUR = 0x5,
+ HIF_MPDU_START_SPACING_EIGHT = 0x6,
+ HIF_MPDU_START_SPACING_SIXTEEN = 0x7
+};
+
+struct hif_mib_set_association_mode {
+ u8 preambtype_use:1;
+ u8 mode:1;
+ u8 rateset:1;
+ u8 spacing:1;
+ u8 reserved:4;
+ u8 preamble_type;
+ u8 mixed_or_greenfield_type;
+ u8 mpdu_start_spacing;
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_mib_set_uapsd_information {
+ u8 trig_bckgrnd:1;
+ u8 trig_be:1;
+ u8 trig_video:1;
+ u8 trig_voice:1;
+ u8 reserved1:4;
+ u8 deliv_bckgrnd:1;
+ u8 deliv_be:1;
+ u8 deliv_video:1;
+ u8 deliv_voice:1;
+ u8 reserved2:4;
+ u16 min_auto_trigger_interval;
+ u16 max_auto_trigger_interval;
+ u16 auto_trigger_step;
+} __packed;
+
+struct hif_mib_tx_rate_retry_policy {
+ u8 policy_index;
+ u8 short_retry_count;
+ u8 long_retry_count;
+ u8 first_rate_sel:2;
+ u8 terminate:1;
+ u8 count_init:1;
+ u8 reserved1:4;
+ u8 rate_recovery_count;
+ u8 reserved2[3];
+ u8 rates[12];
+} __packed;
+
+#define HIF_MIB_NUM_TX_RATE_RETRY_POLICIES 15
+
+struct hif_mib_set_tx_rate_retry_policy {
+ u8 num_tx_rate_policies;
+ u8 reserved[3];
+ struct hif_mib_tx_rate_retry_policy tx_rate_retry_policy[];
+} __packed;
+
+struct hif_mib_protected_mgmt_policy {
+ u8 pmf_enable:1;
+ u8 unpmf_allowed:1;
+ u8 host_enc_auth_frames:1;
+ u8 reserved1:5;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_set_ht_protection {
+ u8 dual_cts_prot:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_keep_alive_period {
+ u16 keep_alive_period;
+ u8 reserved[2];
+} __packed;
+
+struct hif_mib_arp_keep_alive_period {
+ u16 arp_keep_alive_period;
+ u8 encr_type;
+ u8 reserved;
+ u8 sender_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+ u8 target_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_inactivity_timer {
+ u8 min_active_time;
+ u8 max_active_time;
+ u16 reserved;
+} __packed;
+
+struct hif_mib_interface_protection {
+ u8 use_cts_prot:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+#endif
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
new file mode 100644
index 000000000000..820de216be0c
--- /dev/null
+++ b/drivers/staging/wfx/hif_rx.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
+ * (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+
+#include "hif_rx.h"
+#include "wfx.h"
+#include "scan.h"
+#include "bh.h"
+#include "sta.h"
+#include "data_rx.h"
+#include "secure_link.h"
+#include "hif_api_cmd.h"
+
+static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ // All confirm messages start with status
+ int status = le32_to_cpu(*((__le32 *) buf));
+ int cmd = hif->id;
+ int len = hif->len - 4; // drop header
+
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+
+ if (!wdev->hif_cmd.buf_send) {
+ dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd);
+ return -EINVAL;
+ }
+
+ if (cmd != wdev->hif_cmd.buf_send->id) {
+ dev_warn(wdev->dev,
+ "chip response mismatch request: 0x%.2x vs 0x%.2x\n",
+ cmd, wdev->hif_cmd.buf_send->id);
+ return -EINVAL;
+ }
+
+ if (wdev->hif_cmd.buf_recv) {
+ if (wdev->hif_cmd.len_recv >= len)
+ memcpy(wdev->hif_cmd.buf_recv, buf, len);
+ else
+ status = -ENOMEM;
+ }
+ wdev->hif_cmd.ret = status;
+
+ if (!wdev->hif_cmd.async) {
+ complete(&wdev->hif_cmd.done);
+ } else {
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_unlock(&wdev->hif_cmd.key_renew_lock);
+ }
+ return status;
+}
+
+static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+{
+ struct hif_cnf_tx *body = buf;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ wfx_tx_confirm_cb(wvif, body);
+ return 0;
+}
+
+static int hif_multi_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_cnf_multi_transmit *body = buf;
+ struct hif_cnf_tx *buf_loc = (struct hif_cnf_tx *) &body->tx_conf_payload;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ int count = body->num_tx_confs;
+ int i;
+
+ WARN(count <= 0, "corrupted message");
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ for (i = 0; i < count; ++i) {
+ wfx_tx_confirm_cb(wvif, buf_loc);
+ buf_loc++;
+ }
+ return 0;
+}
+
+static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_startup *body = buf;
+
+ if (body->status || body->firmware_type > 4) {
+ dev_err(wdev->dev, "received invalid startup indication");
+ return -EINVAL;
+ }
+ memcpy(&wdev->hw_caps, body, sizeof(struct hif_ind_startup));
+ le32_to_cpus(&wdev->hw_caps.status);
+ le16_to_cpus(&wdev->hw_caps.hardware_id);
+ le16_to_cpus(&wdev->hw_caps.num_inp_ch_bufs);
+ le16_to_cpus(&wdev->hw_caps.size_inp_ch_buf);
+
+ complete(&wdev->firmware_ready);
+ return 0;
+}
+
+static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ if (!wdev->pdata.gpio_wakeup
+ || !gpiod_get_value(wdev->pdata.gpio_wakeup)) {
+ dev_warn(wdev->dev, "unexpected wake-up indication\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int hif_keys_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_sl_exchange_pub_keys *body = buf;
+
+ // Compatibility with legacy secure link
+ if (body->status == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ body->status = 0;
+ if (body->status)
+ dev_warn(wdev->dev, "secure link negociation error\n");
+ wfx_sl_check_pubkey(wdev, body->ncp_pub_key, body->ncp_pub_key_mac);
+ return 0;
+}
+
+static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf, struct sk_buff *skb)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_rx *body = buf;
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "ignore rx data for non-existent vif %d\n",
+ hif->interface);
+ return 0;
+ }
+ skb_pull(skb, sizeof(struct hif_msg) + sizeof(struct hif_ind_rx));
+ wfx_rx_cb(wvif, body, skb);
+
+ return 0;
+}
+
+static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_event *body = buf;
+ struct wfx_hif_event *event;
+ int first;
+
+ WARN_ON(!wvif);
+ if (!wvif)
+ return 0;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ memcpy(&event->evt, body, sizeof(struct hif_ind_event));
+ spin_lock(&wvif->event_queue_lock);
+ first = list_empty(&wvif->event_queue);
+ list_add_tail(&event->link, &wvif->event_queue);
+ spin_unlock(&wvif->event_queue_lock);
+
+ if (first)
+ schedule_work(&wvif->event_handler_work);
+
+ return 0;
+}
+
+static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ complete(&wvif->set_pm_mode_complete);
+
+ return 0;
+}
+
+static int hif_scan_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_scan_cmpl *body = buf;
+
+ WARN_ON(!wvif);
+ wfx_scan_complete_cb(wvif, body);
+
+ return 0;
+}
+
+static int hif_join_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ dev_warn(wdev->dev, "unattended JoinCompleteInd\n");
+
+ return 0;
+}
+
+static int hif_suspend_resume_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_suspend_resume_tx *body = buf;
+
+ WARN_ON(!wvif);
+ wfx_suspend_resume(wvif, body);
+
+ return 0;
+}
+
+static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_error *body = buf;
+ u8 *pRollback = (u8 *) body->data;
+ u32 *pStatus = (u32 *) body->data;
+
+ switch (body->type) {
+ case HIF_ERROR_FIRMWARE_ROLLBACK:
+ dev_err(wdev->dev,
+ "asynchronous error: firmware rollback error %d\n",
+ *pRollback);
+ break;
+ case HIF_ERROR_FIRMWARE_DEBUG_ENABLED:
+ dev_err(wdev->dev, "asynchronous error: firmware debug feature enabled\n");
+ break;
+ case HIF_ERROR_OUTDATED_SESSION_KEY:
+ dev_err(wdev->dev, "asynchronous error: secure link outdated key: %#.8x\n",
+ *pStatus);
+ break;
+ case HIF_ERROR_INVALID_SESSION_KEY:
+ dev_err(wdev->dev, "asynchronous error: invalid session key\n");
+ break;
+ case HIF_ERROR_OOR_VOLTAGE:
+ dev_err(wdev->dev, "asynchronous error: out-of-range overvoltage: %#.8x\n",
+ *pStatus);
+ break;
+ case HIF_ERROR_PDS_VERSION:
+ dev_err(wdev->dev,
+ "asynchronous error: wrong PDS payload or version: %#.8x\n",
+ *pStatus);
+ break;
+ default:
+ dev_err(wdev->dev, "asynchronous error: unknown (%d)\n",
+ body->type);
+ break;
+ }
+ return 0;
+}
+
+static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_generic *body = buf;
+
+ switch (body->indication_type) {
+ case HIF_GENERIC_INDICATION_TYPE_RAW:
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_STRING:
+ dev_info(wdev->dev, "firmware says: %s\n",
+ (char *) body->indication_data.raw_data);
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
+ mutex_lock(&wdev->rx_stats_lock);
+ // Older firmware send a generic indication beside RxStats
+ if (!wfx_api_older_than(wdev, 1, 4))
+ dev_info(wdev->dev, "Rx test ongoing. Temperature: %d°C\n",
+ body->indication_data.rx_stats.current_temp);
+ memcpy(&wdev->rx_stats, &body->indication_data.rx_stats,
+ sizeof(wdev->rx_stats));
+ mutex_unlock(&wdev->rx_stats_lock);
+ return 0;
+ default:
+ dev_err(wdev->dev,
+ "generic_indication: unknown indication type: %#.8x\n",
+ body->indication_type);
+ return -EIO;
+ }
+}
+
+static int hif_exception_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ size_t len = hif->len - 4; // drop header
+ dev_err(wdev->dev, "firmware exception\n");
+ print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
+ wdev->chip_frozen = 1;
+
+ return -1;
+}
+
+static const struct {
+ int msg_id;
+ int (*handler)(struct wfx_dev *wdev, struct hif_msg *hif, void *buf);
+} hif_handlers[] = {
+ /* Confirmations */
+ { HIF_CNF_ID_TX, hif_tx_confirm },
+ { HIF_CNF_ID_MULTI_TRANSMIT, hif_multi_tx_confirm },
+ /* Indications */
+ { HIF_IND_ID_STARTUP, hif_startup_indication },
+ { HIF_IND_ID_WAKEUP, hif_wakeup_indication },
+ { HIF_IND_ID_JOIN_COMPLETE, hif_join_complete_indication },
+ { HIF_IND_ID_SET_PM_MODE_CMPL, hif_pm_mode_complete_indication },
+ { HIF_IND_ID_SCAN_CMPL, hif_scan_complete_indication },
+ { HIF_IND_ID_SUSPEND_RESUME_TX, hif_suspend_resume_indication },
+ { HIF_IND_ID_SL_EXCHANGE_PUB_KEYS, hif_keys_indication },
+ { HIF_IND_ID_EVENT, hif_event_indication },
+ { HIF_IND_ID_GENERIC, hif_generic_indication },
+ { HIF_IND_ID_ERROR, hif_error_indication },
+ { HIF_IND_ID_EXCEPTION, hif_exception_indication },
+ // FIXME: allocate skb_p from hif_receive_indication and make it generic
+ //{ HIF_IND_ID_RX, hif_receive_indication },
+};
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ int i;
+ struct hif_msg *hif = (struct hif_msg *) skb->data;
+ int hif_id = hif->id;
+
+ if (hif_id == HIF_IND_ID_RX) {
+ // hif_receive_indication take care of skb lifetime
+ hif_receive_indication(wdev, hif, hif->body, skb);
+ return;
+ }
+ // Note: mutex_is_lock cause an implicit memory barrier that protect
+ // buf_send
+ if (mutex_is_locked(&wdev->hif_cmd.lock)
+ && wdev->hif_cmd.buf_send
+ && wdev->hif_cmd.buf_send->id == hif_id) {
+ hif_generic_confirm(wdev, hif, hif->body);
+ goto free;
+ }
+ for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) {
+ if (hif_handlers[i].msg_id == hif_id) {
+ if (hif_handlers[i].handler)
+ hif_handlers[i].handler(wdev, hif, hif->body);
+ goto free;
+ }
+ }
+ dev_err(wdev->dev, "unsupported HIF ID %02x\n", hif_id);
+free:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/staging/wfx/hif_rx.h b/drivers/staging/wfx/hif_rx.h
new file mode 100644
index 000000000000..f07c10c8c6bd
--- /dev/null
+++ b/drivers/staging/wfx/hif_rx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
+ * (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_RX_H
+#define WFX_HIF_RX_H
+
+struct wfx_dev;
+struct sk_buff;
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
new file mode 100644
index 000000000000..cb7cddcb9815
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
+ * Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+
+#include "hif_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "hwio.h"
+#include "debug.h"
+#include "sta.h"
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
+{
+ init_completion(&hif_cmd->ready);
+ init_completion(&hif_cmd->done);
+ mutex_init(&hif_cmd->lock);
+ mutex_init(&hif_cmd->key_renew_lock);
+}
+
+static void wfx_fill_header(struct hif_msg *hif, int if_id, unsigned int cmd,
+ size_t size)
+{
+ if (if_id == -1)
+ if_id = 2;
+
+ WARN(cmd > 0x3f, "invalid WSM command %#.2x", cmd);
+ WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size);
+ WARN(if_id > 0x3, "invalid interface ID %d", if_id);
+
+ hif->len = cpu_to_le16(size + 4);
+ hif->id = cmd;
+ hif->interface = if_id;
+}
+
+static void *wfx_alloc_hif(size_t body_len, struct hif_msg **hif)
+{
+ *hif = kzalloc(sizeof(struct hif_msg) + body_len, GFP_KERNEL);
+ if (*hif)
+ return (*hif)->body;
+ else
+ return NULL;
+}
+
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
+ size_t reply_len, bool async)
+{
+ const char *mib_name = "";
+ const char *mib_sep = "";
+ int cmd = request->id;
+ int vif = request->interface;
+ int ret;
+
+ WARN(wdev->hif_cmd.buf_recv && wdev->hif_cmd.async, "API usage error");
+
+ // Do not wait for any reply if chip is frozen
+ if (wdev->chip_frozen)
+ return -ETIMEDOUT;
+
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_lock(&wdev->hif_cmd.key_renew_lock);
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ WARN(wdev->hif_cmd.buf_send, "data locking error");
+
+ // Note: call to complete() below has an implicit memory barrier that
+ // hopefully protect buf_send
+ wdev->hif_cmd.buf_send = request;
+ wdev->hif_cmd.buf_recv = reply;
+ wdev->hif_cmd.len_recv = reply_len;
+ wdev->hif_cmd.async = async;
+ complete(&wdev->hif_cmd.ready);
+
+ wfx_bh_request_tx(wdev);
+
+ // NOTE: no timeout is catched async is enabled
+ if (async)
+ return 0;
+
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ);
+ if (!ret) {
+ dev_err(wdev->dev, "chip is abnormally long to answer\n");
+ reinit_completion(&wdev->hif_cmd.ready);
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ);
+ }
+ if (!ret) {
+ dev_err(wdev->dev, "chip did not answer\n");
+ wfx_pending_dump_old_frames(wdev, 3000);
+ wdev->chip_frozen = 1;
+ reinit_completion(&wdev->hif_cmd.done);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = wdev->hif_cmd.ret;
+ }
+
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
+
+ if (ret &&
+ (cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) {
+ mib_name = get_mib_name(((u16 *) request)[2]);
+ mib_sep = "/";
+ }
+ if (ret < 0)
+ dev_err(wdev->dev,
+ "WSM request %s%s%s (%#.2x) on vif %d returned error %d\n",
+ get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+ if (ret > 0)
+ dev_warn(wdev->dev,
+ "WSM request %s%s%s (%#.2x) on vif %d returned status %d\n",
+ get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_unlock(&wdev->hif_cmd.key_renew_lock);
+ return ret;
+}
+
+// This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any
+// request anymore. We need to slightly hack struct wfx_hif_cmd for that job. Be
+// carefull to only call this funcion during device unregister.
+int hif_shutdown(struct wfx_dev *wdev)
+{
+ int ret;
+ struct hif_msg *hif;
+
+ wfx_alloc_hif(0, &hif);
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
+ // After this command, chip won't reply. Be sure to give enough time to
+ // bh to send buffer:
+ msleep(100);
+ wdev->hif_cmd.buf_send = NULL;
+ if (wdev->pdata.gpio_wakeup)
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+ else
+ control_reg_write(wdev, 0);
+ mutex_unlock(&wdev->hif_cmd.lock);
+ kfree(hif);
+ return ret;
+}
+
+int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len)
+{
+ int ret;
+ size_t buf_len = sizeof(struct hif_req_configuration) + len;
+ struct hif_msg *hif;
+ struct hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif);
+
+ body->length = cpu_to_le16(len);
+ memcpy(body->pds_data, conf, len);
+ wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_reset(struct wfx_vif *wvif, bool reset_stat)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ body->reset_flags.reset_stat = reset_stat;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
+ size_t val_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_cnf_read_mib) + val_len;
+ struct hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif);
+ struct hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL);
+
+ body->mib_id = cpu_to_le16(mib_id);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, reply, buf_len, false);
+
+ if (!ret && mib_id != reply->mib_id) {
+ dev_warn(wdev->dev,
+ "%s: confirmation mismatch request\n", __func__);
+ ret = -EIO;
+ }
+ if (ret == -ENOMEM)
+ dev_err(wdev->dev,
+ "buffer is too small to receive %s (%zu < %d)\n",
+ get_mib_name(mib_id), val_len, reply->length);
+ if (!ret)
+ memcpy(val, &reply->mib_data, reply->length);
+ else
+ memset(val, 0xFF, val_len);
+ kfree(hif);
+ kfree(reply);
+ return ret;
+}
+
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
+ size_t val_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_req_write_mib) + val_len;
+ struct hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif);
+
+ body->mib_id = cpu_to_le16(mib_id);
+ body->length = cpu_to_le16(val_len);
+ memcpy(&body->mib_data, val, val_len);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg)
+{
+ int ret, i;
+ struct hif_msg *hif;
+ struct hif_ssid_def *ssids;
+ size_t buf_len = sizeof(struct hif_req_start_scan) +
+ arg->scan_req.num_of_channels * sizeof(u8) +
+ arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
+ struct hif_req_start_scan *body = wfx_alloc_hif(buf_len, &hif);
+ u8 *ptr = (u8 *) body + sizeof(*body);
+
+ WARN(arg->scan_req.num_of_channels > HIF_API_MAX_NB_CHANNELS, "invalid params");
+ WARN(arg->scan_req.num_of_ssi_ds > 2, "invalid params");
+ WARN(arg->scan_req.band > 1, "invalid params");
+
+ // FIXME: This API is unnecessary complex, fixing NumOfChannels and
+ // adding a member SsidDef at end of struct hif_req_start_scan would
+ // simplify that a lot.
+ memcpy(body, &arg->scan_req, sizeof(*body));
+ cpu_to_le32s(&body->min_channel_time);
+ cpu_to_le32s(&body->max_channel_time);
+ cpu_to_le32s(&body->tx_power_level);
+ memcpy(ptr, arg->ssids,
+ arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def));
+ ssids = (struct hif_ssid_def *) ptr;
+ for (i = 0; i < body->num_of_ssi_ds; ++i)
+ cpu_to_le32s(&ssids[i].ssid_length);
+ ptr += arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
+ memcpy(ptr, arg->ch, arg->scan_req.num_of_channels * sizeof(u8));
+ ptr += arg->scan_req.num_of_channels * sizeof(u8);
+ WARN(buf_len != ptr - (u8 *) body, "allocation size mismatch");
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_stop_scan(struct wfx_vif *wvif)
+{
+ int ret;
+ struct hif_msg *hif;
+ // body associated to HIF_REQ_ID_STOP_SCAN is empty
+ wfx_alloc_hif(0, &hif);
+
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(struct hif_req_join));
+ cpu_to_le16s(&body->channel_number);
+ cpu_to_le16s(&body->atim_window);
+ cpu_to_le32s(&body->ssid_length);
+ cpu_to_le32s(&body->beacon_interval);
+ cpu_to_le32s(&body->basic_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_bss_params(struct wfx_vif *wvif,
+ const struct hif_req_set_bss_params *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->aid);
+ cpu_to_le32s(&body->operational_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ // FIXME: only send necessary bits
+ struct hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ // FIXME: swap bytes as necessary in body
+ memcpy(body, arg, sizeof(*body));
+ if (wfx_api_older_than(wdev, 1, 5))
+ // Legacy firmwares expect that add_key to be sent on right
+ // interface.
+ wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY,
+ sizeof(*body));
+ else
+ wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_remove_key(struct wfx_dev *wdev, int idx)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ body->entry_index = idx;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_edca_queue_params(struct wfx_vif *wvif,
+ const struct hif_req_edca_queue_params *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ // NOTE: queues numerotation are not the same between WFx and Linux
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->cw_min);
+ cpu_to_le16s(&body->cw_max);
+ cpu_to_le16s(&body->tx_op_limit);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->channel_number);
+ cpu_to_le32s(&body->beacon_interval);
+ cpu_to_le32s(&body->basic_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable_beaconing)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ body->enable_beaconing = enable_beaconing ? 1 : 0;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (mac_addr)
+ ether_addr_copy(body->mac_addr, mac_addr);
+ body->map_link_flags = *(struct hif_map_link_flags *) &flags;
+ body->peer_sta_id = sta_id;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
+ const u8 *ies, size_t ies_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
+ struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
+
+ memcpy(&body->ie_flags, target_frame, sizeof(struct hif_ie_flags));
+ body->num_i_es = cpu_to_le16(1);
+ memcpy(body->ie, ies, ies_len);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_sl_send_pub_keys(struct wfx_dev *wdev, const uint8_t *pubkey,
+ const uint8_t *pubkey_hmac)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_sl_exchange_pub_keys *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ body->algorithm = HIF_SL_CURVE25519;
+ memcpy(body->host_pub_key, pubkey, sizeof(body->host_pub_key));
+ memcpy(body->host_pub_key_mac, pubkey_hmac,
+ sizeof(body->host_pub_key_mac));
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ // Compatibility with legacy secure link
+ if (ret == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ ret = 0;
+ return ret;
+}
+
+int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_sl_configure *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body->encr_bmp, bitmap, sizeof(body->encr_bmp));
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SL_CONFIGURE, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
+ int destination)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_sl_mac_key *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ memcpy(body->key_value, slk_key, sizeof(body->key_value));
+ body->otp_or_ram = destination;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SET_SL_MAC_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ // Compatibility with legacy secure link
+ if (ret == SL_MAC_KEY_STATUS_SUCCESS)
+ ret = 0;
+ return ret;
+}
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
new file mode 100644
index 000000000000..f61ae7b0d41c
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
+ * Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_H
+#define WFX_HIF_TX_H
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_scan_params {
+ struct hif_req_start_scan scan_req;
+ struct hif_ssid_def *ssids;
+ u8 *ch;
+};
+
+struct wfx_hif_cmd {
+ struct mutex lock;
+ struct mutex key_renew_lock;
+ struct completion ready;
+ struct completion done;
+ bool async;
+ struct hif_msg *buf_send;
+ void *buf_recv;
+ size_t len_recv;
+ int ret;
+};
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *wfx_hif_cmd);
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
+ void *reply, size_t reply_len, bool async);
+
+int hif_shutdown(struct wfx_dev *wdev);
+int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len);
+int hif_reset(struct wfx_vif *wvif, bool reset_stat);
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *buf, size_t buf_size);
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *buf, size_t buf_size);
+int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg);
+int hif_stop_scan(struct wfx_vif *wvif);
+int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg);
+int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int hif_set_bss_params(struct wfx_vif *wvif,
+ const struct hif_req_set_bss_params *arg);
+int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
+int hif_remove_key(struct wfx_dev *wdev, int idx);
+int hif_set_edca_queue_params(struct wfx_vif *wvif,
+ const struct hif_req_edca_queue_params *arg);
+int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg);
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
+int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
+int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
+ const u8 *ies, size_t ies_len);
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
+ int destination);
+int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
+int hif_sl_send_pub_keys(struct wfx_dev *wdev,
+ const u8 *pubkey, const u8 *pubkey_hmac);
+
+#endif
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
new file mode 100644
index 000000000000..bb091e395ff5
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_MIB_H
+#define WFX_HIF_TX_MIB_H
+
+#include <linux/etherdevice.h>
+
+#include "wfx.h"
+#include "hif_tx.h"
+#include "hif_api_mib.h"
+
+static inline int hif_set_output_power(struct wfx_vif *wvif, int power_level)
+{
+ __le32 val = cpu_to_le32(power_level);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval,
+ unsigned int listen_interval)
+{
+ struct hif_mib_beacon_wake_up_period val = {
+ .wakeup_period_min = dtim_interval,
+ .receive_dtim = 0,
+ .wakeup_period_max = cpu_to_le16(listen_interval),
+ };
+
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
+ struct hif_mib_rcpi_rssi_threshold *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD, arg, sizeof(*arg));
+}
+
+static inline int hif_get_counters_table(struct wfx_dev *wdev,
+ struct hif_mib_extended_count_table *arg)
+{
+ if (wfx_api_older_than(wdev, 1, 3)) {
+ // extended_count_table is wider than count_table
+ memset(arg, 0xFF, sizeof(*arg));
+ return hif_read_mib(wdev, 0, HIF_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(struct hif_mib_count_table));
+ } else {
+ return hif_read_mib(wdev, 0,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
+ sizeof(struct hif_mib_extended_count_table));
+ }
+}
+
+static inline int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
+{
+ struct hif_mib_mac_address msg = { };
+
+ if (mac)
+ ether_addr_copy(msg.mac_addr, mac);
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
+ &msg, sizeof(msg));
+}
+
+static inline int hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid,
+ bool fwd_probe_req)
+{
+ struct hif_mib_rx_filter val = { };
+
+ if (filter_bssid)
+ val.bssid_filter = 1;
+ if (fwd_probe_req)
+ val.fwd_probe_req = 1;
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_beacon_filter_table(struct wfx_vif *wvif,
+ int tbl_len,
+ struct hif_ie_table_entry *tbl)
+{
+ int ret;
+ struct hif_mib_bcn_filter_table *val;
+ int buf_len = struct_size(val, ie_table, tbl_len);
+
+ val = kzalloc(buf_len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+ val->num_of_info_elmts = cpu_to_le32(tbl_len);
+ memcpy(val->ie_table, tbl, tbl_len * sizeof(*tbl));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
+ kfree(val);
+ return ret;
+}
+
+static inline int hif_beacon_filter_control(struct wfx_vif *wvif,
+ int enable, int beacon_count)
+{
+ struct hif_mib_bcn_filter_enable arg = {
+ .enable = cpu_to_le32(enable),
+ .bcn_count = cpu_to_le32(beacon_count),
+ };
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_set_operational_mode(struct wfx_dev *wdev,
+ enum hif_op_power_mode mode)
+{
+ struct hif_mib_gl_operational_power_mode val = {
+ .power_mode = mode,
+ .wup_ind_activation = 1,
+ };
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_template_frame(struct wfx_vif *wvif,
+ struct hif_mib_template_frame *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
+{
+ struct hif_mib_protected_mgmt_policy val = { };
+
+ WARN(required && !capable, "incoherent arguments");
+ if (capable) {
+ val.pmf_enable = 1;
+ val.host_enc_auth_frames = 1;
+ }
+ if (!required)
+ val.unpmf_allowed = 1;
+ cpu_to_le32s((u32 *) &val);
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
+ u8 tx_tid_policy, u8 rx_tid_policy)
+{
+ struct hif_mib_block_ack_policy val = {
+ .block_ack_tx_tid_policy = tx_tid_policy,
+ .block_ack_rx_tid_policy = rx_tid_policy,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_association_mode(struct wfx_vif *wvif,
+ struct hif_mib_set_association_mode *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, arg, sizeof(*arg));
+}
+
+static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
+ struct hif_mib_set_tx_rate_retry_policy *arg)
+{
+ size_t size = struct_size(arg, tx_rate_retry_policy,
+ arg->num_tx_rate_policies);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+}
+
+static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
+ struct hif_mib_mac_addr_data_frame_condition *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
+ struct hif_mib_uc_mc_bc_data_frame_condition *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_config_data_filter(struct wfx_vif *wvif,
+ struct hif_mib_config_data_filter *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CONFIG_DATA_FILTER, arg, sizeof(*arg));
+}
+
+static inline int hif_set_data_filtering(struct wfx_vif *wvif,
+ struct hif_mib_set_data_filtering *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_DATA_FILTERING, arg, sizeof(*arg));
+}
+
+static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
+{
+ struct hif_mib_keep_alive_period arg = {
+ .keep_alive_period = cpu_to_le16(period),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif,
+ struct hif_mib_arp_ip_addr_table *fp)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ fp, sizeof(*fp));
+}
+
+static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev,
+ bool enabled)
+{
+ __le32 arg = enabled ? cpu_to_le32(1) : 0;
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_set_uapsd_info(struct wfx_vif *wvif,
+ struct hif_mib_set_uapsd_information *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
+{
+ __le32 arg = enable ? cpu_to_le32(1) : 0;
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
+}
+
+static inline int hif_slot_time(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val);
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
+{
+ struct hif_mib_set_ht_protection arg = {
+ .dual_cts_prot = val,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val > 0 ? val : 0xFFFF);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
+}
+
+#endif
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
new file mode 100644
index 000000000000..47e04c59ed93
--- /dev/null
+++ b/drivers/staging/wfx/hwio.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level I/O functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "hwio.h"
+#include "wfx.h"
+#include "bus.h"
+#include "traces.h"
+
+/*
+ * Internal helpers.
+ *
+ * About CONFIG_VMAP_STACK:
+ * When CONFIG_VMAP_STACK is enabled, it is not possible to run DMA on stack
+ * allocated data. Functions below that work with registers (aka functions
+ * ending with "32") automatically reallocate buffers with kmalloc. However,
+ * functions that work with arbitrary length buffers let's caller to handle
+ * memory location. In doubt, enable CONFIG_DEBUG_SG to detect badly located
+ * buffer.
+ */
+
+static int read32(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ *val = ~0; // Never return undefined value
+ if (!tmp)
+ return -ENOMEM;
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp,
+ sizeof(u32));
+ if (ret >= 0)
+ *val = le32_to_cpu(*tmp);
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int write32(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp,
+ sizeof(u32));
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int read32_locked(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = read32(wdev, reg, val);
+ _trace_io_read32(reg, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int write32_locked(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = write32(wdev, reg, val);
+ _trace_io_write32(reg, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val)
+{
+ int ret;
+ u32 val_r, val_w;
+
+ WARN_ON(~mask & val);
+ val &= mask;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = read32(wdev, reg, &val_r);
+ _trace_io_read32(reg, val_r);
+ if (ret < 0)
+ goto err;
+ val_w = (val_r & ~mask) | val;
+ if (val_w != val_r) {
+ ret = write32(wdev, reg, val_w);
+ _trace_io_write32(reg, val_w);
+ }
+err:
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf,
+ size_t len)
+{
+ int ret;
+ int i;
+ u32 cfg;
+ u32 prefetch;
+
+ WARN_ON(len >= 0x2000);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+
+ if (reg == WFX_REG_AHB_DPORT)
+ prefetch = CFG_PREFETCH_AHB;
+ else if (reg == WFX_REG_SRAM_DPORT)
+ prefetch = CFG_PREFETCH_SRAM;
+ else
+ return -ENODEV;
+
+ ret = write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ goto err;
+
+ ret = read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+
+ ret = write32(wdev, WFX_REG_CONFIG, cfg | prefetch);
+ if (ret < 0)
+ goto err;
+
+ for (i = 0; i < 20; i++) {
+ ret = read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+ if (!(cfg & prefetch))
+ break;
+ udelay(200);
+ }
+ if (i == 20) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len);
+
+err:
+ if (ret < 0)
+ memset(buf, 0xFF, len); // Never return undefined value
+ return ret;
+}
+
+static int indirect_write(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ WARN_ON(len >= 0x2000);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+ ret = write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ return ret;
+
+ return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len);
+}
+
+static int indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_read(wdev, reg, addr, buf, len);
+ _trace_io_ind_read(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_write(wdev, reg, addr, buf, len);
+ _trace_io_ind_write(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_read(wdev, reg, addr, tmp, sizeof(u32));
+ *val = cpu_to_le32(*tmp);
+ _trace_io_ind_read32(reg, addr, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+static int indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_write(wdev, reg, addr, tmp, sizeof(u32));
+ _trace_io_ind_write32(reg, addr, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len)
+{
+ int ret;
+
+ WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv,
+ WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len)
+{
+ int ret;
+
+ WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv,
+ WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+int sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int config_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return read32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int config_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val);
+}
+
+int control_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return read32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int control_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val);
+}
+
+int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val)
+{
+ int ret;
+
+ *val = ~0; // Never return undefined value
+ ret = write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24);
+ if (ret)
+ return ret;
+ ret = read32_locked(wdev, WFX_REG_SET_GEN_R_W, val);
+ if (ret)
+ return ret;
+ *val &= IGPR_VALUE;
+ return ret;
+}
+
+int igpr_reg_write(struct wfx_dev *wdev, int index, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val);
+}
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
new file mode 100644
index 000000000000..b2c1a66de963
--- /dev/null
+++ b/drivers/staging/wfx/hwio.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low-level API.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_HWIO_H
+#define WFX_HWIO_H
+
+#include <linux/types.h>
+
+struct wfx_dev;
+
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len);
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t buf_len);
+
+int sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+int ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+#define CFG_ERR_SPI_FRAME 0x00000001 // only with SPI
+#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 // only with SDIO
+#define CFG_ERR_BUF_UNDERRUN 0x00000002
+#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004
+#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008
+#define CFG_ERR_BUF_OVERRUN 0x00000010
+#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020
+#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040
+#define CFG_ERR_HOST_CRC_MISS 0x00000080 // only with SDIO
+#define CFG_SPI_IGNORE_CS 0x00000080 // only with SPI
+/* Bytes ordering (only writable in SPI): */
+#define CFG_WORD_MODE_MASK 0x00000300
+/*
+ * B1,B0,B3,B2 (In SPI, register address and
+ * CONFIG data always use this mode)
+ */
+#define CFG_WORD_MODE0 0x00000000
+#define CFG_WORD_MODE1 0x00000100 // B3,B2,B1,B0
+#define CFG_WORD_MODE2 0x00000200 // B0,B1,B2,B3 (SDIO)
+#define CFG_DIRECT_ACCESS_MODE 0x00000400 // Direct or queue access mode
+#define CFG_PREFETCH_AHB 0x00000800
+#define CFG_DISABLE_CPU_CLK 0x00001000
+#define CFG_PREFETCH_SRAM 0x00002000
+#define CFG_CPU_RESET 0x00004000
+#define CFG_SDIO_DISABLE_IRQ 0x00008000 // only with SDIO
+#define CFG_IRQ_ENABLE_DATA 0x00010000
+#define CFG_IRQ_ENABLE_WRDY 0x00020000
+#define CFG_CLK_RISE_EDGE 0x00040000
+#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 // only with SDIO
+#define CFG_RESERVED 0x00F00000
+#define CFG_DEVICE_ID_MAJOR 0x07000000
+#define CFG_DEVICE_ID_RESERVED 0x78000000
+#define CFG_DEVICE_ID_TYPE 0x80000000
+int config_reg_read(struct wfx_dev *wdev, u32 *val);
+int config_reg_write(struct wfx_dev *wdev, u32 val);
+int config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define CTRL_NEXT_LEN_MASK 0x00000FFF
+#define CTRL_WLAN_WAKEUP 0x00001000
+#define CTRL_WLAN_READY 0x00002000
+int control_reg_read(struct wfx_dev *wdev, u32 *val);
+int control_reg_write(struct wfx_dev *wdev, u32 val);
+int control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define IGPR_RW 0x80000000
+#define IGPR_INDEX 0x7F000000
+#define IGPR_VALUE 0x00FFFFFF
+int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val);
+int igpr_reg_write(struct wfx_dev *wdev, int index, u32 val);
+
+#endif /* WFX_HWIO_H */
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
new file mode 100644
index 000000000000..96adfa330604
--- /dev/null
+++ b/drivers/staging/wfx/key.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Key management related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "key.h"
+#include "wfx.h"
+#include "hif_tx_mib.h"
+
+static int wfx_alloc_key(struct wfx_dev *wdev)
+{
+ int idx;
+
+ idx = ffs(~wdev->key_map) - 1;
+ if (idx < 0 || idx >= MAX_KEY_ENTRIES)
+ return -1;
+
+ wdev->key_map |= BIT(idx);
+ wdev->keys[idx].entry_index = idx;
+ return idx;
+}
+
+static void wfx_free_key(struct wfx_dev *wdev, int idx)
+{
+ WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation");
+ memset(&wdev->keys[idx], 0, sizeof(wdev->keys[idx]));
+ wdev->key_map &= ~BIT(idx);
+}
+
+static u8 fill_wep_pair(struct hif_wep_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_WEP_PAIRWISE;
+}
+
+static u8 fill_wep_group(struct hif_wep_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_id = key->keyidx;
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_WEP_DEFAULT;
+}
+
+static u8 fill_tkip_pair(struct hif_tkip_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data)
+ + sizeof(msg->tx_mic_key)
+ + sizeof(msg->rx_mic_key), "inconsistent data");
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key));
+ keybuf += sizeof(msg->tx_mic_key);
+ memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key));
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_TKIP_PAIRWISE;
+}
+
+static u8 fill_tkip_group(struct hif_tkip_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq,
+ enum nl80211_iftype iftype)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data)
+ + 2 * sizeof(msg->rx_mic_key), "inconsistent data");
+ msg->key_id = key->keyidx;
+ memcpy(msg->rx_sequence_counter,
+ &seq->tkip.iv16, sizeof(seq->tkip.iv16));
+ memcpy(msg->rx_sequence_counter + sizeof(u16),
+ &seq->tkip.iv32, sizeof(seq->tkip.iv32));
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ if (iftype == NL80211_IFTYPE_AP)
+ // Use Tx MIC Key
+ memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key));
+ else
+ // Use Rx MIC Key
+ memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key));
+ return HIF_KEY_TYPE_TKIP_GROUP;
+}
+
+static u8 fill_ccmp_pair(struct hif_aes_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_AES_PAIRWISE;
+}
+
+static u8 fill_ccmp_group(struct hif_aes_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn));
+ memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_AES_GROUP;
+}
+
+static u8 fill_sms4_pair(struct hif_wapi_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data)
+ + sizeof(msg->mic_key_data), "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_PAIRWISE;
+}
+
+static u8 fill_sms4_group(struct hif_wapi_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data)
+ + sizeof(msg->mic_key_data), "inconsistent data");
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_GROUP;
+}
+
+static u8 fill_aes_cmac_group(struct hif_igtk_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data");
+ memcpy(msg->igtk_key_data, key->key, key->keylen);
+ memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn));
+ memreverse(msg->ipn, sizeof(seq->aes_cmac.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_IGTK_GROUP;
+}
+
+static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret;
+ struct hif_req_add_key *k;
+ struct ieee80211_key_seq seq;
+ struct wfx_dev *wdev = wvif->wdev;
+ int idx = wfx_alloc_key(wvif->wdev);
+ bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+
+ WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data");
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+ if (idx < 0)
+ return -EINVAL;
+ k = &wdev->keys[idx];
+ k->int_id = wvif->id;
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ if (pairwise)
+ k->type = fill_wep_pair(&k->key.wep_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_wep_group(&k->key.wep_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ if (pairwise)
+ k->type = fill_tkip_pair(&k->key.tkip_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_tkip_group(&k->key.tkip_group_key, key,
+ &seq, wvif->vif->type);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
+ if (pairwise)
+ k->type = fill_ccmp_pair(&k->key.aes_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_ccmp_group(&k->key.aes_group_key, key,
+ &seq);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
+ if (pairwise)
+ k->type = fill_sms4_pair(&k->key.wapi_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_sms4_group(&k->key.wapi_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ k->type = fill_aes_cmac_group(&k->key.igtk_group_key, key,
+ &seq);
+ } else {
+ dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ ret = hif_add_key(wdev, k);
+ if (ret) {
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+ key->hw_key_idx = idx;
+ return 0;
+}
+
+static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key)
+{
+ WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx");
+ wfx_free_key(wvif->wdev, key->hw_key_idx);
+ return hif_remove_key(wvif->wdev, key->hw_key_idx);
+}
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret = -EOPNOTSUPP;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ if (cmd == SET_KEY)
+ ret = wfx_add_key(wvif, sta, key);
+ if (cmd == DISABLE_KEY)
+ ret = wfx_remove_key(wvif, key);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ return ret;
+}
+
+int wfx_upload_keys(struct wfx_vif *wvif)
+{
+ int i;
+ struct hif_req_add_key *key;
+ struct wfx_dev *wdev = wvif->wdev;
+
+ for (i = 0; i < ARRAY_SIZE(wdev->keys); i++) {
+ if (wdev->key_map & BIT(i)) {
+ key = &wdev->keys[i];
+ if (key->int_id == wvif->id)
+ hif_add_key(wdev, key);
+ }
+ }
+ return 0;
+}
+
+void wfx_wep_key_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, wep_key_work);
+
+ wfx_tx_flush(wvif->wdev);
+ hif_wep_default_key_id(wvif, wvif->wep_default_key_id);
+ wfx_pending_requeue(wvif->wdev, wvif->wep_pending_skb);
+ wvif->wep_pending_skb = NULL;
+ wfx_tx_unlock(wvif->wdev);
+}
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
new file mode 100644
index 000000000000..9436ccdf4d3b
--- /dev/null
+++ b/drivers/staging/wfx/key.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_KEY_H
+#define WFX_KEY_H
+
+#include <net/mac80211.h>
+
+struct wfx_dev;
+struct wfx_vif;
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int wfx_upload_keys(struct wfx_vif *wvif);
+void wfx_wep_key_work(struct work_struct *work);
+
+#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
new file mode 100644
index 000000000000..986a2ef678b9
--- /dev/null
+++ b/drivers/staging/wfx/main.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies).
+ * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+
+#include "main.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "bus.h"
+#include "bh.h"
+#include "sta.h"
+#include "key.h"
+#include "debug.h"
+#include "data_tx.h"
+#include "secure_link.h"
+#include "hif_tx_mib.h"
+#include "hif_api_cmd.h"
+
+#define WFX_PDS_MAX_SIZE 1500
+
+MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WFx");
+MODULE_AUTHOR("Jérôme Pouiller <jerome.pouiller@silabs.com>");
+MODULE_LICENSE("GPL");
+
+static int gpio_wakeup = -2;
+module_param(gpio_wakeup, int, 0644);
+MODULE_PARM_DESC(gpio_wakeup, "gpio number for wakeup. -1 for none.");
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+}
+
+static struct ieee80211_rate wfx_rates[] = {
+ RATETAB_ENT(10, 0, 0),
+ RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(60, 6, 0),
+ RATETAB_ENT(90, 7, 0),
+ RATETAB_ENT(120, 8, 0),
+ RATETAB_ENT(180, 9, 0),
+ RATETAB_ENT(240, 10, 0),
+ RATETAB_ENT(360, 11, 0),
+ RATETAB_ENT(480, 12, 0),
+ RATETAB_ENT(540, 13, 0),
+};
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel wfx_2ghz_chantable[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_supported_band wfx_band_2ghz = {
+ .channels = wfx_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(wfx_2ghz_chantable),
+ .bitrates = wfx_rates,
+ .n_bitrates = ARRAY_SIZE(wfx_rates),
+ .ht_cap = {
+ // Receive caps
+ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_MAX_AMSDU |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask = { 0xFF }, // MCS0 to MCS7
+ .rx_highest = 65,
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+static const struct ieee80211_iface_limit wdev_iface_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination wfx_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 2,
+ .limits = wdev_iface_limits,
+ .n_limits = ARRAY_SIZE(wdev_iface_limits),
+ }
+};
+
+static const struct ieee80211_ops wfx_ops = {
+ .start = wfx_start,
+ .stop = wfx_stop,
+ .add_interface = wfx_add_interface,
+ .remove_interface = wfx_remove_interface,
+ .config = wfx_config,
+ .tx = wfx_tx,
+ .conf_tx = wfx_conf_tx,
+ .hw_scan = wfx_hw_scan,
+ .sta_add = wfx_sta_add,
+ .sta_remove = wfx_sta_remove,
+ .sta_notify = wfx_sta_notify,
+ .set_tim = wfx_set_tim,
+ .set_key = wfx_set_key,
+ .set_rts_threshold = wfx_set_rts_threshold,
+ .bss_info_changed = wfx_bss_info_changed,
+ .prepare_multicast = wfx_prepare_multicast,
+ .configure_filter = wfx_configure_filter,
+ .ampdu_action = wfx_ampdu_action,
+ .flush = wfx_flush,
+ .add_chanctx = wfx_add_chanctx,
+ .remove_chanctx = wfx_remove_chanctx,
+ .change_chanctx = wfx_change_chanctx,
+ .assign_vif_chanctx = wfx_assign_vif_chanctx,
+ .unassign_vif_chanctx = wfx_unassign_vif_chanctx,
+};
+
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
+{
+ if (wdev->hw_caps.api_version_major < major)
+ return true;
+ if (wdev->hw_caps.api_version_major > major)
+ return false;
+ if (wdev->hw_caps.api_version_minor < minor)
+ return true;
+ return false;
+}
+
+struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
+ const char *label)
+{
+ struct gpio_desc *ret;
+ char label_buf[256];
+
+ if (override >= 0) {
+ snprintf(label_buf, sizeof(label_buf), "wfx_%s", label);
+ ret = ERR_PTR(devm_gpio_request_one(dev, override,
+ GPIOF_OUT_INIT_LOW,
+ label_buf));
+ if (!ret)
+ ret = gpio_to_desc(override);
+ } else if (override == -1) {
+ ret = NULL;
+ } else {
+ ret = devm_gpiod_get(dev, label, GPIOD_OUT_LOW);
+ }
+ if (IS_ERR(ret) || !ret) {
+ if (!ret || PTR_ERR(ret) == -ENOENT)
+ dev_warn(dev, "gpio %s is not defined\n", label);
+ else
+ dev_warn(dev,
+ "error while requesting gpio %s\n", label);
+ ret = NULL;
+ } else {
+ dev_dbg(dev,
+ "using gpio %d for %s\n", desc_to_gpio(ret), label);
+ }
+ return ret;
+}
+
+/* NOTE: wfx_send_pds() destroy buf */
+int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len)
+{
+ int ret;
+ int start, brace_level, i;
+
+ start = 0;
+ brace_level = 0;
+ if (buf[0] != '{') {
+ dev_err(wdev->dev, "valid PDS start with '{'. Did you forget to compress it?\n");
+ return -EINVAL;
+ }
+ for (i = 1; i < len - 1; i++) {
+ if (buf[i] == '{')
+ brace_level++;
+ if (buf[i] == '}')
+ brace_level--;
+ if (buf[i] == '}' && !brace_level) {
+ i++;
+ if (i - start + 1 > WFX_PDS_MAX_SIZE)
+ return -EFBIG;
+ buf[start] = '{';
+ buf[i] = 0;
+ dev_dbg(wdev->dev, "send PDS '%s}'\n", buf + start);
+ buf[i] = '}';
+ ret = hif_configuration(wdev, buf + start,
+ i - start + 1);
+ if (ret == HIF_STATUS_FAILURE) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: invalid data (unsupported options?)\n", start, i);
+ return -EINVAL;
+ }
+ if (ret == -ETIMEDOUT) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip didn't reply (corrupted file?)\n", start, i);
+ return ret;
+ }
+ if (ret) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip returned an unknown error\n", start, i);
+ return -EIO;
+ }
+ buf[i] = ',';
+ start = i;
+ }
+ }
+ return 0;
+}
+
+static int wfx_send_pdata_pds(struct wfx_dev *wdev)
+{
+ int ret = 0;
+ const struct firmware *pds;
+ unsigned char *tmp_buf;
+
+ ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load PDS file %s\n",
+ wdev->pdata.file_pds);
+ return ret;
+ }
+ tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL);
+ ret = wfx_send_pds(wdev, tmp_buf, pds->size);
+ kfree(tmp_buf);
+ release_firmware(pds);
+ return ret;
+}
+
+struct wfx_dev *wfx_init_common(struct device *dev,
+ const struct wfx_platform_data *pdata,
+ const struct hwbus_ops *hwbus_ops,
+ void *hwbus_priv)
+{
+ struct ieee80211_hw *hw;
+ struct wfx_dev *wdev;
+
+ hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops);
+ if (!hw)
+ return NULL;
+
+ SET_IEEE80211_DEV(hw, dev);
+
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->vif_data_size = sizeof(struct wfx_vif);
+ hw->sta_data_size = sizeof(struct wfx_sta_priv);
+ hw->queues = 4;
+ hw->max_rates = 8;
+ hw->max_rate_tries = 15;
+ hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) +
+ sizeof(struct hif_msg)
+ + sizeof(struct hif_req_tx)
+ + 4 /* alignment */ + 8 /* TKIP IV */;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
+ hw->wiphy->max_scan_ssids = 2;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
+ hw->wiphy->iface_combinations = wfx_iface_combinations;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL);
+ // FIXME: also copy wfx_rates and wfx_2ghz_chantable
+ memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz,
+ sizeof(wfx_band_2ghz));
+
+ wdev = hw->priv;
+ wdev->hw = hw;
+ wdev->dev = dev;
+ wdev->hwbus_ops = hwbus_ops;
+ wdev->hwbus_priv = hwbus_priv;
+ memcpy(&wdev->pdata, pdata, sizeof(*pdata));
+ of_property_read_string(dev->of_node, "config-file",
+ &wdev->pdata.file_pds);
+ wdev->pdata.gpio_wakeup = wfx_get_gpio(dev, gpio_wakeup, "wakeup");
+ wfx_sl_fill_pdata(dev, &wdev->pdata);
+
+ mutex_init(&wdev->conf_mutex);
+ mutex_init(&wdev->rx_stats_lock);
+ init_completion(&wdev->firmware_ready);
+ wfx_init_hif_cmd(&wdev->hif_cmd);
+ wfx_tx_queues_init(wdev);
+
+ return wdev;
+}
+
+void wfx_free_common(struct wfx_dev *wdev)
+{
+ mutex_destroy(&wdev->rx_stats_lock);
+ mutex_destroy(&wdev->conf_mutex);
+ wfx_tx_queues_deinit(wdev);
+ ieee80211_free_hw(wdev->hw);
+}
+
+int wfx_probe(struct wfx_dev *wdev)
+{
+ int i;
+ int err;
+ const void *macaddr;
+ struct gpio_desc *gpio_saved;
+
+ // During first part of boot, gpio_wakeup cannot yet been used. So
+ // prevent bh() to touch it.
+ gpio_saved = wdev->pdata.gpio_wakeup;
+ wdev->pdata.gpio_wakeup = NULL;
+
+ wfx_bh_register(wdev);
+
+ err = wfx_init_device(wdev);
+ if (err)
+ goto err1;
+
+ err = wait_for_completion_interruptible_timeout(&wdev->firmware_ready,
+ 10 * HZ);
+ if (err <= 0) {
+ if (err == 0) {
+ dev_err(wdev->dev, "timeout while waiting for startup indication. IRQ configuration error?\n");
+ err = -ETIMEDOUT;
+ } else if (err == -ERESTARTSYS) {
+ dev_info(wdev->dev, "probe interrupted by user\n");
+ }
+ goto err1;
+ }
+
+ // FIXME: fill wiphy::hw_version
+ dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n",
+ wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
+ wdev->hw_caps.api_version_major,
+ wdev->hw_caps.api_version_minor,
+ wdev->keyset, *((u32 *) &wdev->hw_caps.capabilities));
+ snprintf(wdev->hw->wiphy->fw_version,
+ sizeof(wdev->hw->wiphy->fw_version),
+ "%d.%d.%d",
+ wdev->hw_caps.firmware_major,
+ wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build);
+
+ if (wfx_api_older_than(wdev, 1, 0)) {
+ dev_err(wdev->dev,
+ "unsupported firmware API version (expect 1 while firmware returns %d)\n",
+ wdev->hw_caps.api_version_major);
+ err = -ENOTSUPP;
+ goto err1;
+ }
+
+ err = wfx_sl_init(wdev);
+ if (err && wdev->hw_caps.capabilities.link_mode == SEC_LINK_ENFORCED) {
+ dev_err(wdev->dev,
+ "chip require secure_link, but can't negociate it\n");
+ goto err1;
+ }
+
+ if (wdev->hw_caps.regul_sel_mode_info.region_sel_mode) {
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |= IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |= IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ dev_dbg(wdev->dev, "sending configuration file %s\n",
+ wdev->pdata.file_pds);
+ err = wfx_send_pdata_pds(wdev);
+ if (err < 0)
+ goto err1;
+
+ wdev->pdata.gpio_wakeup = gpio_saved;
+ if (wdev->pdata.gpio_wakeup) {
+ dev_dbg(wdev->dev,
+ "enable 'quiescent' power mode with gpio %d and PDS file %s\n",
+ desc_to_gpio(wdev->pdata.gpio_wakeup),
+ wdev->pdata.file_pds);
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ control_reg_write(wdev, 0);
+ hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT);
+ } else {
+ hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE);
+ }
+
+ hif_use_multi_tx_conf(wdev, true);
+
+ for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
+ eth_zero_addr(wdev->addresses[i].addr);
+ macaddr = of_get_mac_address(wdev->dev->of_node);
+ if (!IS_ERR_OR_NULL(macaddr)) {
+ ether_addr_copy(wdev->addresses[i].addr, macaddr);
+ wdev->addresses[i].addr[ETH_ALEN - 1] += i;
+ } else {
+ ether_addr_copy(wdev->addresses[i].addr,
+ wdev->hw_caps.mac_addr[i]);
+ }
+ if (!is_valid_ether_addr(wdev->addresses[i].addr)) {
+ dev_warn(wdev->dev, "using random MAC address\n");
+ eth_random_addr(wdev->addresses[i].addr);
+ }
+ dev_info(wdev->dev, "MAC address %d: %pM\n", i,
+ wdev->addresses[i].addr);
+ }
+ wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses);
+ wdev->hw->wiphy->addresses = wdev->addresses;
+
+ err = ieee80211_register_hw(wdev->hw);
+ if (err)
+ goto err1;
+
+ err = wfx_debug_init(wdev);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ ieee80211_unregister_hw(wdev->hw);
+ ieee80211_free_hw(wdev->hw);
+err1:
+ wfx_bh_unregister(wdev);
+ return err;
+}
+
+void wfx_release(struct wfx_dev *wdev)
+{
+ ieee80211_unregister_hw(wdev->hw);
+ hif_shutdown(wdev);
+ wfx_bh_unregister(wdev);
+ wfx_sl_deinit(wdev);
+}
+
+static int __init wfx_core_init(void)
+{
+ int ret = 0;
+
+ if (IS_ENABLED(CONFIG_SPI))
+ ret = spi_register_driver(&wfx_spi_driver);
+ if (IS_ENABLED(CONFIG_MMC) && !ret)
+ ret = sdio_register_driver(&wfx_sdio_driver);
+ return ret;
+}
+module_init(wfx_core_init);
+
+static void __exit wfx_core_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MMC))
+ sdio_unregister_driver(&wfx_sdio_driver);
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&wfx_spi_driver);
+}
+module_exit(wfx_core_exit);
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
new file mode 100644
index 000000000000..875f8c227803
--- /dev/null
+++ b/drivers/staging/wfx/main.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_MAIN_H
+#define WFX_MAIN_H
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+
+#include "bus.h"
+#include "hif_api_general.h"
+
+struct wfx_dev;
+
+struct wfx_platform_data {
+ /* Keyset and ".sec" extention will appended to this string */
+ const char *file_fw;
+ const char *file_pds;
+ struct gpio_desc *gpio_wakeup;
+ /*
+ * if true HIF D_out is sampled on the rising edge of the clock
+ * (intended to be used in 50Mhz SDIO)
+ */
+ bool use_rising_clk;
+};
+
+struct wfx_dev *wfx_init_common(struct device *dev,
+ const struct wfx_platform_data *pdata,
+ const struct hwbus_ops *hwbus_ops,
+ void *hwbus_priv);
+void wfx_free_common(struct wfx_dev *wdev);
+
+int wfx_probe(struct wfx_dev *wdev);
+void wfx_release(struct wfx_dev *wdev);
+
+struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
+ const char *label);
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor);
+int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len);
+
+#endif
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
new file mode 100644
index 000000000000..c7ee90888f69
--- /dev/null
+++ b/drivers/staging/wfx/queue.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wfx.h"
+#include "sta.h"
+#include "data_tx.h"
+
+void wfx_tx_lock(struct wfx_dev *wdev)
+{
+ atomic_inc(&wdev->tx_lock);
+}
+
+void wfx_tx_unlock(struct wfx_dev *wdev)
+{
+ int tx_lock = atomic_dec_return(&wdev->tx_lock);
+
+ WARN(tx_lock < 0, "inconsistent tx_lock value");
+ if (!tx_lock)
+ wfx_bh_request_tx(wdev);
+}
+
+void wfx_tx_flush(struct wfx_dev *wdev)
+{
+ int ret;
+
+ WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
+
+ // Do not wait for any reply if chip is frozen
+ if (wdev->chip_frozen)
+ return;
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
+ !wdev->hif.tx_buffers_used,
+ msecs_to_jiffies(3000));
+ if (!ret) {
+ dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
+ wdev->hif.tx_buffers_used);
+ wfx_pending_dump_old_frames(wdev, 3000);
+ // FIXME: drop pending frames here
+ wdev->chip_frozen = 1;
+ }
+ mutex_unlock(&wdev->hif_cmd.lock);
+}
+
+void wfx_tx_lock_flush(struct wfx_dev *wdev)
+{
+ wfx_tx_lock(wdev);
+ wfx_tx_flush(wdev);
+}
+
+void wfx_tx_queues_lock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ if (queue->tx_locked_cnt++ == 0)
+ ieee80211_stop_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+void wfx_tx_queues_unlock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ WARN(!queue->tx_locked_cnt, "queue already unlocked");
+ if (--queue->tx_locked_cnt == 0)
+ ieee80211_wake_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+/* If successful, LOCKS the TX queue! */
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
+{
+ int i;
+ bool done;
+ struct wfx_queue *queue;
+ struct sk_buff *item;
+ struct wfx_dev *wdev = wvif->wdev;
+ struct hif_msg *hif;
+
+ if (wvif->wdev->chip_frozen) {
+ wfx_tx_lock_flush(wdev);
+ wfx_tx_queues_clear(wdev);
+ return;
+ }
+
+ do {
+ done = true;
+ wfx_tx_lock_flush(wdev);
+ for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ hif = (struct hif_msg *) item->data;
+ if (hif->interface == wvif->id)
+ done = false;
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ }
+ if (!done) {
+ wfx_tx_unlock(wdev);
+ msleep(20);
+ }
+ } while (!done);
+}
+
+static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff_head *gc_list)
+{
+ int i;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&queue->queue.lock);
+ while ((item = __skb_dequeue(&queue->queue)) != NULL)
+ skb_queue_head(gc_list, item);
+ spin_lock_bh(&stats->pending.lock);
+ for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
+ stats->link_map_cache[i] -= queue->link_map_cache[i];
+ queue->link_map_cache[i] = 0;
+ }
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+void wfx_tx_queues_clear(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff *item;
+ struct sk_buff_head gc_list;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ skb_queue_head_init(&gc_list);
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i)
+ wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
+ wake_up(&stats->wait_link_id_empty);
+ while ((item = skb_dequeue(&gc_list)) != NULL)
+ wfx_skb_dtor(wdev, item);
+}
+
+void wfx_tx_queues_init(struct wfx_dev *wdev)
+{
+ int i;
+
+ memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
+ memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
+ skb_queue_head_init(&wdev->tx_queue_stats.pending);
+ init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ wdev->tx_queue[i].queue_id = i;
+ skb_queue_head_init(&wdev->tx_queue[i].queue);
+ }
+}
+
+void wfx_tx_queues_deinit(struct wfx_dev *wdev)
+{
+ WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
+ wfx_tx_queues_clear(wdev);
+}
+
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
+ u32 link_id_map)
+{
+ size_t ret;
+ int i, bit;
+
+ if (!link_id_map)
+ return 0;
+
+ spin_lock_bh(&queue->queue.lock);
+ if (link_id_map == (u32)-1) {
+ ret = skb_queue_len(&queue->queue);
+ } else {
+ ret = 0;
+ for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
+ ++i, bit <<= 1) {
+ if (link_id_map & bit)
+ ret += queue->link_map_cache[i];
+ }
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ return ret;
+}
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
+ spin_lock_bh(&queue->queue.lock);
+ __skb_queue_tail(&queue->queue, skb);
+
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
+ struct wfx_queue *queue,
+ u32 link_id_map)
+{
+ struct sk_buff *skb = NULL;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv;
+ bool wakeup_stats = false;
+
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ tx_priv = wfx_skb_tx_priv(item);
+ if (link_id_map & BIT(tx_priv->link_id)) {
+ skb = item;
+ break;
+ }
+ }
+ WARN_ON(!skb);
+ if (skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ __skb_unlink(skb, &queue->queue);
+ --queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_queue_tail(&stats->pending, skb);
+ if (!--stats->link_map_cache[tx_priv->link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->pending.lock);
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+ return skb;
+}
+
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+ struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ spin_lock_bh(&queue->queue.lock);
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ __skb_queue_tail(&queue->queue, skb);
+ spin_unlock_bh(&queue->queue.lock);
+ return 0;
+}
+
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ wfx_skb_dtor(wdev, skb);
+
+ return 0;
+}
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
+{
+ struct sk_buff *skb;
+ struct hif_req_tx *req;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ req = wfx_skb_txreq(skb);
+ if (req->packet_id == packet_id) {
+ spin_unlock_bh(&stats->pending.lock);
+ return skb;
+ }
+ }
+ spin_unlock_bh(&stats->pending.lock);
+ WARN(1, "cannot find packet in pending queue");
+ return NULL;
+}
+
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv;
+ struct hif_req_tx *req;
+ struct sk_buff *skb;
+ bool first = true;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ req = wfx_skb_txreq(skb);
+ if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
+ limit_ms))) {
+ if (first) {
+ dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
+ limit_ms);
+ first = false;
+ }
+ dev_info(wdev->dev, " id %08x sent %lldms ago\n",
+ req->packet_id,
+ ktime_ms_delta(now, tx_priv->xmit_timestamp));
+ }
+ }
+ spin_unlock_bh(&stats->pending.lock);
+}
+
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
+ struct sk_buff *skb)
+{
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ return ktime_us_delta(now, tx_priv->xmit_timestamp);
+}
+
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff_head *queue;
+ bool ret = true;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ queue = &wdev->tx_queue[i].queue;
+ spin_lock_bh(&queue->lock);
+ if (!skb_queue_empty(queue))
+ ret = false;
+ spin_unlock_bh(&queue->lock);
+ }
+ return ret;
+}
+
+static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
+ struct wfx_queue *queue)
+{
+ bool handled = false;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+ struct hif_req_tx *req = wfx_skb_txreq(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+
+ enum {
+ do_probe,
+ do_drop,
+ do_wep,
+ do_tx,
+ } action = do_tx;
+
+ switch (wvif->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (wvif->state < WFX_STATE_PRE_STA)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!wvif->state) {
+ action = do_drop;
+ } else if (!(BIT(tx_priv->raw_link_id) &
+ (BIT(0) | wvif->link_id_map))) {
+ dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
+ action = do_drop;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (wvif->state != WFX_STATE_IBSS)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ default:
+ action = do_drop;
+ break;
+ }
+
+ if (action == do_tx) {
+ if (ieee80211_is_nullfunc(frame->frame_control)) {
+ mutex_lock(&wvif->bss_loss_lock);
+ if (wvif->bss_loss_state) {
+ wvif->bss_loss_confirm_id = req->packet_id;
+ req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
+ }
+ mutex_unlock(&wvif->bss_loss_lock);
+ } else if (ieee80211_has_protected(frame->frame_control) &&
+ tx_priv->hw_key &&
+ tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
+ (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ action = do_wep;
+ }
+ }
+
+ switch (action) {
+ case do_drop:
+ wfx_pending_remove(wvif->wdev, skb);
+ handled = true;
+ break;
+ case do_wep:
+ wfx_tx_lock(wvif->wdev);
+ wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
+ wvif->wep_pending_skb = skb;
+ if (!schedule_work(&wvif->wep_key_work))
+ wfx_tx_unlock(wvif->wdev);
+ handled = true;
+ break;
+ case do_tx:
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ return handled;
+}
+
+static int wfx_get_prio_queue(struct wfx_vif *wvif,
+ u32 tx_allowed_mask, int *total)
+{
+ static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
+ BIT(WFX_LINK_ID_UAPSD);
+ struct hif_req_edca_queue_params *edca;
+ unsigned int score, best = -1;
+ int winner = -1;
+ int i;
+
+ /* search for a winner using edca params */
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ int queued;
+
+ edca = &wvif->edca.params[i];
+ queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
+ tx_allowed_mask);
+ if (!queued)
+ continue;
+ *total += queued;
+ score = ((edca->aifsn + edca->cw_min) << 16) +
+ ((edca->cw_max - edca->cw_min) *
+ (get_random_int() & 0xFFFF));
+ if (score < best && (winner < 0 || i != 3)) {
+ best = score;
+ winner = i;
+ }
+ }
+
+ /* override winner if bursting */
+ if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
+ winner != wvif->wdev->tx_burst_idx &&
+ !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
+ tx_allowed_mask & urgent) &&
+ wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
+ winner = wvif->wdev->tx_burst_idx;
+
+ return winner;
+}
+
+static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
+ struct wfx_queue **queue_p,
+ u32 *tx_allowed_mask_p,
+ bool *more)
+{
+ int idx;
+ u32 tx_allowed_mask;
+ int total = 0;
+
+ /* Search for a queue with multicast frames buffered */
+ if (wvif->mcast_tx) {
+ tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx >= 0) {
+ *more = total > 1;
+ goto found;
+ }
+ }
+
+ /* Search for unicast traffic */
+ tx_allowed_mask = ~wvif->sta_asleep_mask;
+ tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
+ if (wvif->sta_asleep_mask) {
+ tx_allowed_mask |= wvif->pspoll_mask;
+ tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
+ } else {
+ tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
+ }
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx < 0)
+ return -ENOENT;
+
+found:
+ *queue_p = &wvif->wdev->tx_queue[idx];
+ *tx_allowed_mask_p = tx_allowed_mask;
+ return 0;
+}
+
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
+{
+ struct sk_buff *skb;
+ struct hif_msg *hif = NULL;
+ struct hif_req_tx *req = NULL;
+ struct wfx_queue *queue = NULL;
+ struct wfx_queue *vif_queue = NULL;
+ u32 tx_allowed_mask = 0;
+ u32 vif_tx_allowed_mask = 0;
+ const struct wfx_tx_priv *tx_priv = NULL;
+ struct wfx_vif *wvif;
+ /* More is used only for broadcasts. */
+ bool more = false;
+ bool vif_more = false;
+ int not_found;
+ int burst;
+
+ for (;;) {
+ int ret = -ENOENT;
+ int queue_num;
+ struct ieee80211_hdr *hdr;
+
+ if (atomic_read(&wdev->tx_lock))
+ return NULL;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ spin_lock_bh(&wvif->ps_state_lock);
+
+ not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
+ &vif_tx_allowed_mask,
+ &vif_more);
+
+ if (wvif->mcast_buffered && (not_found || !vif_more) &&
+ (wvif->mcast_tx ||
+ !wvif->sta_asleep_mask)) {
+ wvif->mcast_buffered = false;
+ if (wvif->mcast_tx) {
+ wvif->mcast_tx = false;
+ schedule_work(&wvif->mcast_stop_work);
+ }
+ }
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (vif_more) {
+ more = true;
+ tx_allowed_mask = vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ break;
+ } else if (!not_found) {
+ if (queue && queue != vif_queue)
+ dev_info(wdev->dev, "vifs disagree about queue priority\n");
+ tx_allowed_mask |= vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ return NULL;
+
+ queue_num = queue - wdev->tx_queue;
+
+ skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
+ if (!skb)
+ continue;
+ tx_priv = wfx_skb_tx_priv(skb);
+ hif = (struct hif_msg *) skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ WARN_ON(!wvif);
+
+ if (hif_handle_tx_data(wvif, skb, queue))
+ continue; /* Handled by WSM */
+
+ wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
+
+ /* allow bursting if txop is set */
+ if (wvif->edca.params[queue_num].tx_op_limit)
+ burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
+ else
+ burst = 1;
+
+ /* store index of bursting queue */
+ if (burst > 1)
+ wdev->tx_burst_idx = queue_num;
+ else
+ wdev->tx_burst_idx = -1;
+
+ /* more buffered multicast/broadcast frames
+ * ==> set MoreData flag in IEEE 802.11 header
+ * to inform PS STAs
+ */
+ if (more) {
+ req = (struct hif_req_tx *) hif->body;
+ hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+ return hif;
+ }
+}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
new file mode 100644
index 000000000000..21566e48b2c2
--- /dev/null
+++ b/drivers/staging/wfx/queue.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_QUEUE_H
+#define WFX_QUEUE_H
+
+#include <linux/skbuff.h>
+
+#include "hif_api_cmd.h"
+
+#define WFX_MAX_STA_IN_AP_MODE 14
+#define WFX_LINK_ID_AFTER_DTIM (WFX_MAX_STA_IN_AP_MODE + 1)
+#define WFX_LINK_ID_UAPSD (WFX_MAX_STA_IN_AP_MODE + 2)
+#define WFX_LINK_ID_MAX (WFX_MAX_STA_IN_AP_MODE + 3)
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_queue {
+ struct sk_buff_head queue;
+ int tx_locked_cnt;
+ int link_map_cache[WFX_LINK_ID_MAX];
+ u8 queue_id;
+};
+
+struct wfx_queue_stats {
+ int link_map_cache[WFX_LINK_ID_MAX];
+ struct sk_buff_head pending;
+ wait_queue_head_t wait_link_id_empty;
+};
+
+void wfx_tx_lock(struct wfx_dev *wdev);
+void wfx_tx_unlock(struct wfx_dev *wdev);
+void wfx_tx_flush(struct wfx_dev *wdev);
+void wfx_tx_lock_flush(struct wfx_dev *wdev);
+
+void wfx_tx_queues_init(struct wfx_dev *wdev);
+void wfx_tx_queues_deinit(struct wfx_dev *wdev);
+void wfx_tx_queues_lock(struct wfx_dev *wdev);
+void wfx_tx_queues_unlock(struct wfx_dev *wdev);
+void wfx_tx_queues_clear(struct wfx_dev *wdev);
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff *skb);
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
+ struct sk_buff *skb);
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
+
+#endif /* WFX_QUEUE_H */
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
new file mode 100644
index 000000000000..35fcf9119f96
--- /dev/null
+++ b/drivers/staging/wfx/scan.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "scan.h"
+#include "wfx.h"
+#include "sta.h"
+#include "hif_tx_mib.h"
+
+static void __ieee80211_scan_completed_compat(struct ieee80211_hw *hw,
+ bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted ? 1 : 0,
+ };
+
+ ieee80211_scan_completed(hw, &info);
+}
+
+static void wfx_scan_restart_delayed(struct wfx_vif *wvif)
+{
+ if (wvif->delayed_unjoin) {
+ wvif->delayed_unjoin = false;
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else if (wvif->delayed_link_loss) {
+ wvif->delayed_link_loss = 0;
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ }
+}
+
+static int wfx_scan_start(struct wfx_vif *wvif, struct wfx_scan_params *scan)
+{
+ int ret;
+ int tmo = 500;
+
+ if (wvif->state == WFX_STATE_PRE_STA)
+ return -EBUSY;
+
+ tmo += scan->scan_req.num_of_channels *
+ ((20 * (scan->scan_req.max_channel_time)) + 10);
+ atomic_set(&wvif->scan.in_progress, 1);
+ atomic_set(&wvif->wdev->scan_in_progress, 1);
+
+ schedule_delayed_work(&wvif->scan.timeout, msecs_to_jiffies(tmo));
+ ret = hif_scan(wvif, scan);
+ if (ret) {
+ wfx_scan_failed_cb(wvif);
+ atomic_set(&wvif->scan.in_progress, 0);
+ atomic_set(&wvif->wdev->scan_in_progress, 0);
+ cancel_delayed_work_sync(&wvif->scan.timeout);
+ wfx_scan_restart_delayed(wvif);
+ }
+ return ret;
+}
+
+int wfx_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct sk_buff *skb;
+ int i, ret;
+ struct hif_mib_template_frame *p;
+
+ if (!wvif)
+ return -EINVAL;
+
+ if (wvif->state == WFX_STATE_AP)
+ return -EOPNOTSUPP;
+
+ if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
+ req->n_ssids = 0;
+
+ if (req->n_ssids > HIF_API_MAX_NB_SSIDS)
+ return -EINVAL;
+
+ skb = ieee80211_probereq_get(hw, wvif->vif->addr, NULL, 0, req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (req->ie_len)
+ memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
+
+ mutex_lock(&wdev->conf_mutex);
+
+ p = (struct hif_mib_template_frame *)skb_push(skb, 4);
+ p->frame_type = HIF_TMPLT_PRBREQ;
+ p->frame_length = cpu_to_le16(skb->len - 4);
+ ret = hif_set_template_frame(wvif, p);
+ skb_pull(skb, 4);
+
+ if (!ret)
+ /* Host want to be the probe responder. */
+ ret = wfx_fwd_probe_req(wvif, true);
+ if (ret) {
+ mutex_unlock(&wdev->conf_mutex);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ wfx_tx_lock_flush(wdev);
+
+ WARN(wvif->scan.req, "unexpected concurrent scan");
+ wvif->scan.req = req;
+ wvif->scan.n_ssids = 0;
+ wvif->scan.status = 0;
+ wvif->scan.begin = &req->channels[0];
+ wvif->scan.curr = wvif->scan.begin;
+ wvif->scan.end = &req->channels[req->n_channels];
+ wvif->scan.output_power = wdev->output_power;
+
+ for (i = 0; i < req->n_ssids; ++i) {
+ struct hif_ssid_def *dst = &wvif->scan.ssids[wvif->scan.n_ssids];
+
+ memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
+ dst->ssid_length = req->ssids[i].ssid_len;
+ ++wvif->scan.n_ssids;
+ }
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ if (skb)
+ dev_kfree_skb(skb);
+ schedule_work(&wvif->scan.work);
+ return 0;
+}
+
+void wfx_scan_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan.work);
+ struct ieee80211_channel **it;
+ struct wfx_scan_params scan = {
+ .scan_req.scan_type.type = 0, /* Foreground */
+ };
+ struct ieee80211_channel *first;
+ bool first_run = (wvif->scan.begin == wvif->scan.curr &&
+ wvif->scan.begin != wvif->scan.end);
+ int i;
+
+ down(&wvif->scan.lock);
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ if (first_run) {
+ if (wvif->state == WFX_STATE_STA &&
+ !(wvif->powersave_mode.pm_mode.enter_psm)) {
+ struct hif_req_set_pm_mode pm = wvif->powersave_mode;
+
+ pm.pm_mode.enter_psm = 1;
+ wfx_set_pm(wvif, &pm);
+ }
+ }
+
+ if (!wvif->scan.req || wvif->scan.curr == wvif->scan.end) {
+ if (wvif->scan.output_power != wvif->wdev->output_power)
+ hif_set_output_power(wvif,
+ wvif->wdev->output_power * 10);
+
+ if (wvif->scan.status < 0)
+ dev_warn(wvif->wdev->dev, "scan failed\n");
+ else if (wvif->scan.req)
+ dev_dbg(wvif->wdev->dev, "scan completed\n");
+ else
+ dev_dbg(wvif->wdev->dev, "scan canceled\n");
+
+ wvif->scan.req = NULL;
+ wfx_scan_restart_delayed(wvif);
+ wfx_tx_unlock(wvif->wdev);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ __ieee80211_scan_completed_compat(wvif->wdev->hw,
+ wvif->scan.status ? 1 : 0);
+ up(&wvif->scan.lock);
+ if (wvif->state == WFX_STATE_STA &&
+ !(wvif->powersave_mode.pm_mode.enter_psm))
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ return;
+ }
+ first = *wvif->scan.curr;
+
+ for (it = wvif->scan.curr + 1, i = 1;
+ it != wvif->scan.end && i < HIF_API_MAX_NB_CHANNELS;
+ ++it, ++i) {
+ if ((*it)->band != first->band)
+ break;
+ if (((*it)->flags ^ first->flags) &
+ IEEE80211_CHAN_NO_IR)
+ break;
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
+ (*it)->max_power != first->max_power)
+ break;
+ }
+ scan.scan_req.band = first->band;
+
+ if (wvif->scan.req->no_cck)
+ scan.scan_req.max_transmit_rate = API_RATE_INDEX_G_6MBPS;
+ else
+ scan.scan_req.max_transmit_rate = API_RATE_INDEX_B_1MBPS;
+ scan.scan_req.num_of_probe_requests =
+ (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
+ scan.scan_req.num_of_ssi_ds = wvif->scan.n_ssids;
+ scan.ssids = &wvif->scan.ssids[0];
+ scan.scan_req.num_of_channels = it - wvif->scan.curr;
+ scan.scan_req.probe_delay = 100;
+ // FIXME: Check if FW can do active scan while joined.
+ if (wvif->state == WFX_STATE_STA) {
+ scan.scan_req.scan_type.type = 1;
+ scan.scan_req.scan_flags.fbg = 1;
+ }
+
+ scan.ch = kcalloc(scan.scan_req.num_of_channels,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!scan.ch) {
+ wvif->scan.status = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < scan.scan_req.num_of_channels; ++i)
+ scan.ch[i] = wvif->scan.curr[i]->hw_value;
+
+ if (wvif->scan.curr[0]->flags & IEEE80211_CHAN_NO_IR) {
+ scan.scan_req.min_channel_time = 50;
+ scan.scan_req.max_channel_time = 150;
+ } else {
+ scan.scan_req.min_channel_time = 10;
+ scan.scan_req.max_channel_time = 50;
+ }
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
+ wvif->scan.output_power != first->max_power) {
+ wvif->scan.output_power = first->max_power;
+ hif_set_output_power(wvif, wvif->scan.output_power * 10);
+ }
+ wvif->scan.status = wfx_scan_start(wvif, &scan);
+ kfree(scan.ch);
+ if (wvif->scan.status)
+ goto fail;
+ wvif->scan.curr = it;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ return;
+
+fail:
+ wvif->scan.curr = wvif->scan.end;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ up(&wvif->scan.lock);
+ schedule_work(&wvif->scan.work);
+}
+
+static void wfx_scan_complete(struct wfx_vif *wvif)
+{
+ up(&wvif->scan.lock);
+ atomic_set(&wvif->wdev->scan_in_progress, 0);
+
+ wfx_scan_work(&wvif->scan.work);
+}
+
+void wfx_scan_failed_cb(struct wfx_vif *wvif)
+{
+ if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
+ wvif->scan.status = -EIO;
+ schedule_work(&wvif->scan.timeout.work);
+ }
+}
+
+void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg)
+{
+ if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
+ wvif->scan.status = 1;
+ schedule_work(&wvif->scan.timeout.work);
+ }
+}
+
+void wfx_scan_timeout(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ scan.timeout.work);
+
+ if (atomic_xchg(&wvif->scan.in_progress, 0)) {
+ if (wvif->scan.status > 0) {
+ wvif->scan.status = 0;
+ } else if (!wvif->scan.status) {
+ dev_warn(wvif->wdev->dev, "timeout waiting for scan complete notification\n");
+ wvif->scan.status = -ETIMEDOUT;
+ wvif->scan.curr = wvif->scan.end;
+ hif_stop_scan(wvif);
+ }
+ wfx_scan_complete(wvif);
+ }
+}
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
new file mode 100644
index 000000000000..b4ddd0771a9b
--- /dev/null
+++ b/drivers/staging/wfx/scan.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_SCAN_H
+#define WFX_SCAN_H
+
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_scan {
+ struct semaphore lock;
+ struct work_struct work;
+ struct delayed_work timeout;
+ struct cfg80211_scan_request *req;
+ struct ieee80211_channel **begin;
+ struct ieee80211_channel **curr;
+ struct ieee80211_channel **end;
+ struct hif_ssid_def ssids[HIF_API_MAX_NB_SSIDS];
+ int output_power;
+ int n_ssids;
+ int status;
+ atomic_t in_progress;
+};
+
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void wfx_scan_work(struct work_struct *work);
+void wfx_scan_timeout(struct work_struct *work);
+void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg);
+void wfx_scan_failed_cb(struct wfx_vif *wvif);
+
+#endif /* WFX_SCAN_H */
diff --git a/drivers/staging/wfx/secure_link.h b/drivers/staging/wfx/secure_link.h
new file mode 100644
index 000000000000..666b26e5308d
--- /dev/null
+++ b/drivers/staging/wfx/secure_link.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, Silicon Laboratories, Inc.
+ */
+#ifndef WFX_SECURE_LINK_H
+#define WFX_SECURE_LINK_H
+
+#include <linux/of.h>
+
+#include "hif_api_general.h"
+
+struct wfx_dev;
+
+
+struct sl_context {
+};
+
+static inline bool wfx_is_secure_command(struct wfx_dev *wdev, int cmd_id)
+{
+ return false;
+}
+
+static inline int wfx_sl_decode(struct wfx_dev *wdev, struct hif_sl_msg *m)
+{
+ return -EIO;
+}
+
+static inline int wfx_sl_encode(struct wfx_dev *wdev, struct hif_msg *input,
+ struct hif_sl_msg *output)
+{
+ return -EIO;
+}
+
+static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev, u8 *ncp_pubkey,
+ u8 *ncp_pubmac)
+{
+ return -EIO;
+}
+
+static inline void wfx_sl_fill_pdata(struct device *dev,
+ struct wfx_platform_data *pdata)
+{
+ if (of_find_property(dev->of_node, "slk_key", NULL))
+ dev_err(dev, "secure link is not supported by this driver, ignoring provided key\n");
+}
+
+static inline int wfx_sl_init(struct wfx_dev *wdev)
+{
+ return -EIO;
+}
+
+static inline void wfx_sl_deinit(struct wfx_dev *wdev)
+{
+}
+
+
+#endif
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
new file mode 100644
index 000000000000..29848a202ab4
--- /dev/null
+++ b/drivers/staging/wfx/sta.c
@@ -0,0 +1,1684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "sta.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "bh.h"
+#include "key.h"
+#include "scan.h"
+#include "debug.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define TXOP_UNIT 32
+#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
+
+static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
+{
+ int i;
+ u32 ret = 0;
+ // WFx only support 2GHz
+ struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (rates & BIT(i)) {
+ if (i >= sband->n_bitrates)
+ dev_warn(wdev->dev, "unsupported basic rate\n");
+ else
+ ret |= BIT(sband->bitrates[i].hw_value);
+ }
+ }
+ return ret;
+}
+
+static void __wfx_free_event_queue(struct list_head *list)
+{
+ struct wfx_hif_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, list, link) {
+ list_del(&event->link);
+ kfree(event);
+ }
+}
+
+static void wfx_free_event_queue(struct wfx_vif *wvif)
+{
+ LIST_HEAD(list);
+
+ spin_lock(&wvif->event_queue_lock);
+ list_splice_init(&wvif->event_queue, &list);
+ spin_unlock(&wvif->event_queue_lock);
+
+ __wfx_free_event_queue(&list);
+}
+
+void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
+{
+ int tx = 0;
+
+ mutex_lock(&wvif->bss_loss_lock);
+ wvif->delayed_link_loss = 0;
+ cancel_work_sync(&wvif->bss_params_work);
+
+ /* If we have a pending unjoin */
+ if (wvif->delayed_unjoin)
+ goto end;
+
+ if (init) {
+ schedule_delayed_work(&wvif->bss_loss_work, HZ);
+ wvif->bss_loss_state = 0;
+
+ if (!atomic_read(&wvif->wdev->tx_lock))
+ tx = 1;
+ } else if (good) {
+ cancel_delayed_work_sync(&wvif->bss_loss_work);
+ wvif->bss_loss_state = 0;
+ schedule_work(&wvif->bss_params_work);
+ } else if (bad) {
+ /* FIXME Should we just keep going until we time out? */
+ if (wvif->bss_loss_state < 3)
+ tx = 1;
+ } else {
+ cancel_delayed_work_sync(&wvif->bss_loss_work);
+ wvif->bss_loss_state = 0;
+ }
+
+ /* Spit out a NULL packet to our AP if necessary */
+ // FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
+ if (tx) {
+ struct sk_buff *skb;
+
+ wvif->bss_loss_state++;
+
+ skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
+ if (!skb)
+ goto end;
+ memset(IEEE80211_SKB_CB(skb), 0,
+ sizeof(*IEEE80211_SKB_CB(skb)));
+ IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
+ IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
+ IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
+ IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
+ wfx_tx(wvif->wdev->hw, NULL, skb);
+ }
+end:
+ mutex_unlock(&wvif->bss_loss_lock);
+}
+
+static int wfx_set_uapsd_param(struct wfx_vif *wvif,
+ const struct wfx_edca_params *arg)
+{
+ /* Here's the mapping AC [queue, bit]
+ * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
+ */
+
+ if (arg->uapsd_enable[IEEE80211_AC_VO])
+ wvif->uapsd_info.trig_voice = 1;
+ else
+ wvif->uapsd_info.trig_voice = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_VI])
+ wvif->uapsd_info.trig_video = 1;
+ else
+ wvif->uapsd_info.trig_video = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_BE])
+ wvif->uapsd_info.trig_be = 1;
+ else
+ wvif->uapsd_info.trig_be = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_BK])
+ wvif->uapsd_info.trig_bckgrnd = 1;
+ else
+ wvif->uapsd_info.trig_bckgrnd = 0;
+
+ /* Currently pseudo U-APSD operation is not supported, so setting
+ * MinAutoTriggerInterval, MaxAutoTriggerInterval and
+ * AutoTriggerStep to 0
+ */
+ wvif->uapsd_info.min_auto_trigger_interval = 0;
+ wvif->uapsd_info.max_auto_trigger_interval = 0;
+ wvif->uapsd_info.auto_trigger_step = 0;
+
+ return hif_set_uapsd_info(wvif, &wvif->uapsd_info);
+}
+
+int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
+{
+ wvif->fwd_probe_req = enable;
+ return hif_set_rx_filter(wvif, wvif->filter_bssid,
+ wvif->fwd_probe_req);
+}
+
+static int wfx_set_mcast_filter(struct wfx_vif *wvif,
+ struct wfx_grp_addr_table *fp)
+{
+ int i, ret;
+ struct hif_mib_config_data_filter config = { };
+ struct hif_mib_set_data_filtering filter_data = { };
+ struct hif_mib_mac_addr_data_frame_condition filter_addr_val = { };
+ struct hif_mib_uc_mc_bc_data_frame_condition filter_addr_type = { };
+
+ // Temporary workaround for filters
+ return hif_set_data_filtering(wvif, &filter_data);
+
+ if (!fp->enable) {
+ filter_data.enable = 0;
+ return hif_set_data_filtering(wvif, &filter_data);
+ }
+
+ // A1 Address match on list
+ for (i = 0; i < fp->num_addresses; i++) {
+ filter_addr_val.condition_idx = i;
+ filter_addr_val.address_type = HIF_MAC_ADDR_A1;
+ ether_addr_copy(filter_addr_val.mac_address,
+ fp->address_list[i]);
+ ret = hif_set_mac_addr_condition(wvif,
+ &filter_addr_val);
+ if (ret)
+ return ret;
+ config.mac_cond |= 1 << i;
+ }
+
+ // Accept unicast and broadcast
+ filter_addr_type.condition_idx = 0;
+ filter_addr_type.param.bits.type_unicast = 1;
+ filter_addr_type.param.bits.type_broadcast = 1;
+ ret = hif_set_uc_mc_bc_condition(wvif, &filter_addr_type);
+ if (ret)
+ return ret;
+
+ config.uc_mc_bc_cond = 1;
+ config.filter_idx = 0; // TODO #define MULTICAST_FILTERING 0
+ config.enable = 1;
+ ret = hif_set_config_data_filter(wvif, &config);
+ if (ret)
+ return ret;
+
+ // discard all data frames except match filter
+ filter_data.enable = 1;
+ filter_data.default_filter = 1; // discard all
+ ret = hif_set_data_filtering(wvif, &filter_data);
+
+ return ret;
+}
+
+void wfx_update_filtering(struct wfx_vif *wvif)
+{
+ int ret;
+ bool is_sta = wvif->vif && NL80211_IFTYPE_STATION == wvif->vif->type;
+ bool filter_bssid = wvif->filter_bssid;
+ bool fwd_probe_req = wvif->fwd_probe_req;
+ struct hif_mib_bcn_filter_enable bf_ctrl;
+ struct hif_ie_table_entry filter_ies[] = {
+ {
+ .ie_id = WLAN_EID_VENDOR_SPECIFIC,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ .oui = { 0x50, 0x6F, 0x9A },
+ }, {
+ .ie_id = WLAN_EID_HT_OPERATION,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }, {
+ .ie_id = WLAN_EID_ERP_INFO,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }
+ };
+ int n_filter_ies;
+
+ if (wvif->state == WFX_STATE_PASSIVE)
+ return;
+
+ if (wvif->disable_beacon_filter) {
+ bf_ctrl.enable = 0;
+ bf_ctrl.bcn_count = 1;
+ n_filter_ies = 0;
+ } else if (!is_sta) {
+ bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE |
+ HIF_BEACON_FILTER_AUTO_ERP;
+ bf_ctrl.bcn_count = 0;
+ n_filter_ies = 2;
+ } else {
+ bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE;
+ bf_ctrl.bcn_count = 0;
+ n_filter_ies = 3;
+ }
+
+ ret = hif_set_rx_filter(wvif, filter_bssid, fwd_probe_req);
+ if (!ret)
+ ret = hif_set_beacon_filter_table(wvif, n_filter_ies,
+ filter_ies);
+ if (!ret)
+ ret = hif_beacon_filter_control(wvif, bf_ctrl.enable,
+ bf_ctrl.bcn_count);
+ if (!ret)
+ ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
+ if (ret)
+ dev_err(wvif->wdev->dev, "update filtering failed: %d\n", ret);
+}
+
+static void wfx_update_filtering_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ update_filtering_work);
+
+ wfx_update_filtering(wvif);
+}
+
+u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ int i;
+ struct netdev_hw_addr *ha;
+ struct wfx_vif *wvif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+ int count = netdev_hw_addr_list_count(mc_list);
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ memset(&wvif->mcast_filter, 0x00, sizeof(wvif->mcast_filter));
+ if (!count ||
+ count > ARRAY_SIZE(wvif->mcast_filter.address_list))
+ continue;
+
+ i = 0;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ ether_addr_copy(wvif->mcast_filter.address_list[i],
+ ha->addr);
+ i++;
+ }
+ wvif->mcast_filter.enable = true;
+ wvif->mcast_filter.num_addresses = count;
+ }
+
+ return 0;
+}
+
+void wfx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 unused)
+{
+ struct wfx_vif *wvif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+
+ *total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ down(&wvif->scan.lock);
+ wvif->filter_bssid = (*total_flags &
+ (FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
+ wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
+ wfx_fwd_probe_req(wvif, true);
+ wfx_update_filtering(wvif);
+ up(&wvif->scan.lock);
+ }
+}
+
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int ret = 0;
+ /* To prevent re-applying PM request OID again and again*/
+ u16 old_uapsd_flags, new_uapsd_flags;
+ struct hif_req_edca_queue_params *edca;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ if (queue < hw->queues) {
+ old_uapsd_flags = *((u16 *) &wvif->uapsd_info);
+ edca = &wvif->edca.params[queue];
+
+ wvif->edca.uapsd_enable[queue] = params->uapsd;
+ edca->aifsn = params->aifs;
+ edca->cw_min = params->cw_min;
+ edca->cw_max = params->cw_max;
+ edca->tx_op_limit = params->txop * TXOP_UNIT;
+ edca->allowed_medium_time = 0;
+ ret = hif_set_edca_queue_params(wvif, edca);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (wvif->vif->type == NL80211_IFTYPE_STATION) {
+ ret = wfx_set_uapsd_param(wvif, &wvif->edca);
+ new_uapsd_flags = *((u16 *) &wvif->uapsd_info);
+ if (!ret && wvif->setbssparams_done &&
+ wvif->state == WFX_STATE_STA &&
+ old_uapsd_flags != new_uapsd_flags)
+ ret = wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&wdev->conf_mutex);
+ return ret;
+}
+
+int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+{
+ struct hif_req_set_pm_mode pm = *arg;
+ u16 uapsd_flags;
+ int ret;
+
+ if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
+ return 0;
+
+ memcpy(&uapsd_flags, &wvif->uapsd_info, sizeof(uapsd_flags));
+
+ if (uapsd_flags != 0)
+ pm.pm_mode.fast_psm = 0;
+
+ // Kernel disable PowerSave when multiple vifs are in use. In contrary,
+ // it is absolutly necessary to enable PowerSave for WF200
+ if (wvif_count(wvif->wdev) > 1) {
+ pm.pm_mode.enter_psm = 1;
+ pm.pm_mode.fast_psm = 0;
+ }
+
+ if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
+ msecs_to_jiffies(300)))
+ dev_warn(wvif->wdev->dev,
+ "timeout while waiting of set_pm_mode_complete\n");
+ ret = hif_set_pm(wvif, &pm);
+ // FIXME: why ?
+ if (-ETIMEDOUT == wvif->scan.status)
+ wvif->scan.status = 1;
+ return ret;
+}
+
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = NULL;
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ hif_rts_threshold(wvif, value);
+ return 0;
+}
+
+/* If successful, LOCKS the TX queue! */
+static int __wfx_flush(struct wfx_dev *wdev, bool drop)
+{
+ int ret;
+
+ for (;;) {
+ if (drop) {
+ wfx_tx_queues_clear(wdev);
+ } else {
+ ret = wait_event_timeout(
+ wdev->tx_queue_stats.wait_link_id_empty,
+ wfx_tx_queues_is_empty(wdev),
+ 2 * HZ);
+ }
+
+ if (!drop && ret <= 0) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ ret = 0;
+
+ wfx_tx_lock_flush(wdev);
+ if (!wfx_tx_queues_is_empty(wdev)) {
+ /* Highly unlikely: WSM requeued frames. */
+ wfx_tx_unlock(wdev);
+ continue;
+ }
+ break;
+ }
+ return ret;
+}
+
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+
+ if (vif) {
+ wvif = (struct wfx_vif *) vif->drv_priv;
+ if (wvif->vif->type == NL80211_IFTYPE_MONITOR)
+ drop = true;
+ if (wvif->vif->type == NL80211_IFTYPE_AP &&
+ !wvif->enable_beacon)
+ drop = true;
+ }
+
+ // FIXME: only flush requested vif
+ if (!__wfx_flush(wdev, drop))
+ wfx_tx_unlock(wdev);
+}
+
+/* WSM callbacks */
+
+static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
+{
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ int rcpi_rssi;
+ int cqm_evt;
+
+ rcpi_rssi = raw_rcpi_rssi / 2 - 110;
+ if (rcpi_rssi <= wvif->cqm_rssi_thold)
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
+}
+
+static void wfx_event_handler_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, event_handler_work);
+ struct wfx_hif_event *event;
+
+ LIST_HEAD(list);
+
+ spin_lock(&wvif->event_queue_lock);
+ list_splice_init(&wvif->event_queue, &list);
+ spin_unlock(&wvif->event_queue_lock);
+
+ list_for_each_entry(event, &list, link) {
+ switch (event->evt.event_id) {
+ case HIF_EVENT_IND_BSSLOST:
+ cancel_work_sync(&wvif->unjoin_work);
+ if (!down_trylock(&wvif->scan.lock)) {
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ up(&wvif->scan.lock);
+ } else {
+ /* Scan is in progress. Delay reporting.
+ * Scan complete will trigger bss_loss_work
+ */
+ wvif->delayed_link_loss = 1;
+ /* Also start a watchdog. */
+ schedule_delayed_work(&wvif->bss_loss_work,
+ 5 * HZ);
+ }
+ break;
+ case HIF_EVENT_IND_BSSREGAINED:
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+ break;
+ case HIF_EVENT_IND_RCPI_RSSI:
+ wfx_event_report_rssi(wvif,
+ event->evt.event_data.rcpi_rssi);
+ break;
+ case HIF_EVENT_IND_PS_MODE_ERROR:
+ dev_warn(wvif->wdev->dev,
+ "error while processing power save request\n");
+ break;
+ default:
+ dev_warn(wvif->wdev->dev,
+ "unhandled event indication: %.2x\n",
+ event->evt.event_id);
+ break;
+ }
+ }
+ __wfx_free_event_queue(&list);
+}
+
+static void wfx_bss_loss_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ bss_loss_work.work);
+
+ ieee80211_connection_loss(wvif->vif);
+}
+
+static void wfx_bss_params_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ bss_params_work);
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ wvif->bss_params.bss_flags.lost_count_only = 1;
+ hif_set_bss_params(wvif, &wvif->bss_params);
+ wvif->bss_params.bss_flags.lost_count_only = 0;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+}
+
+static void wfx_set_beacon_wakeup_period_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ set_beacon_wakeup_period_work);
+
+ hif_set_beacon_wakeup_period(wvif, wvif->dtim_period,
+ wvif->dtim_period);
+}
+
+static void wfx_do_unjoin(struct wfx_vif *wvif)
+{
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ if (atomic_read(&wvif->scan.in_progress)) {
+ if (wvif->delayed_unjoin)
+ dev_dbg(wvif->wdev->dev,
+ "delayed unjoin is already scheduled\n");
+ else
+ wvif->delayed_unjoin = true;
+ goto done;
+ }
+
+ wvif->delayed_link_loss = false;
+
+ if (!wvif->state)
+ goto done;
+
+ if (wvif->state == WFX_STATE_AP)
+ goto done;
+
+ cancel_work_sync(&wvif->update_filtering_work);
+ cancel_work_sync(&wvif->set_beacon_wakeup_period_work);
+ wvif->state = WFX_STATE_PASSIVE;
+
+ /* Unjoin is a reset. */
+ wfx_tx_flush(wvif->wdev);
+ hif_keep_alive_period(wvif, 0);
+ hif_reset(wvif, false);
+ hif_set_output_power(wvif, wvif->wdev->output_power * 10);
+ wvif->dtim_period = 0;
+ hif_set_macaddr(wvif, wvif->vif->addr);
+ wfx_free_event_queue(wvif);
+ cancel_work_sync(&wvif->event_handler_work);
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+
+ /* Disable Block ACKs */
+ hif_set_block_ack_policy(wvif, 0, 0);
+
+ wvif->disable_beacon_filter = false;
+ wfx_update_filtering(wvif);
+ memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
+ wvif->setbssparams_done = false;
+ memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+
+done:
+ mutex_unlock(&wvif->wdev->conf_mutex);
+}
+
+static void wfx_set_mfp(struct wfx_vif *wvif,
+ struct cfg80211_bss *bss)
+{
+ const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ const int akm_suite_size = 4 / sizeof(u16);
+ const u16 *ptr = NULL;
+ bool mfpc = false;
+ bool mfpr = false;
+
+ /* 802.11w protected mgmt frames */
+
+ /* retrieve MFPC and MFPR flags from beacon or PBRSP */
+
+ rcu_read_lock();
+ if (bss)
+ ptr = (const u16 *) ieee80211_bss_get_ie(bss,
+ WLAN_EID_RSN);
+
+ if (ptr) {
+ ptr += pairwise_cipher_suite_count_offset;
+ ptr += 1 + pairwise_cipher_suite_size * *ptr;
+ ptr += 1 + akm_suite_size * *ptr;
+ mfpr = *ptr & BIT(6);
+ mfpc = *ptr & BIT(7);
+ }
+ rcu_read_unlock();
+
+ hif_set_mfp(wvif, mfpc, mfpr);
+}
+
+/* MUST be called with tx_lock held! It will be unlocked for us. */
+static void wfx_do_join(struct wfx_vif *wvif)
+{
+ const u8 *bssid;
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct cfg80211_bss *bss = NULL;
+ struct hif_req_join join = {
+ .mode = conf->ibss_joined ? HIF_MODE_IBSS : HIF_MODE_BSS,
+ .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
+ .probe_for_join = 1,
+ .atim_window = 0,
+ .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
+ conf->basic_rates),
+ };
+
+ if (wvif->channel->flags & IEEE80211_CHAN_NO_IR)
+ join.probe_for_join = 0;
+
+ if (wvif->state)
+ wfx_do_unjoin(wvif);
+
+ bssid = wvif->vif->bss_conf.bssid;
+
+ bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
+ bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+
+ if (!bss && !conf->ibss_joined) {
+ wfx_tx_unlock(wvif->wdev);
+ return;
+ }
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ /* Under the conf lock: check scan status and
+ * bail out if it is in progress.
+ */
+ if (atomic_read(&wvif->scan.in_progress)) {
+ wfx_tx_unlock(wvif->wdev);
+ goto done_put;
+ }
+
+ /* Sanity check basic rates */
+ if (!join.basic_rate_set)
+ join.basic_rate_set = 7;
+
+ /* Sanity check beacon interval */
+ if (!wvif->beacon_int)
+ wvif->beacon_int = 1;
+
+ join.beacon_interval = wvif->beacon_int;
+
+ // DTIM period will be set on first Beacon
+ wvif->dtim_period = 0;
+
+ join.channel_number = wvif->channel->hw_value;
+ memcpy(join.bssid, bssid, sizeof(join.bssid));
+
+ if (!conf->ibss_joined) {
+ const u8 *ssidie;
+
+ rcu_read_lock();
+ ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssidie) {
+ join.ssid_length = ssidie[1];
+ memcpy(join.ssid, &ssidie[2], join.ssid_length);
+ }
+ rcu_read_unlock();
+ }
+
+ wfx_tx_flush(wvif->wdev);
+
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+
+ wfx_set_mfp(wvif, bss);
+
+ /* Perform actual join */
+ wvif->wdev->tx_burst_idx = -1;
+ if (hif_join(wvif, &join)) {
+ ieee80211_connection_loss(wvif->vif);
+ wvif->join_complete_status = -1;
+ /* Tx lock still held, unjoin will clear it. */
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ wvif->join_complete_status = 0;
+ if (wvif->vif->type == NL80211_IFTYPE_ADHOC)
+ wvif->state = WFX_STATE_IBSS;
+ else
+ wvif->state = WFX_STATE_PRE_STA;
+ wfx_tx_unlock(wvif->wdev);
+
+ /* Upload keys */
+ wfx_upload_keys(wvif);
+
+ /* Due to beacon filtering it is possible that the
+ * AP's beacon is not known for the mac80211 stack.
+ * Disable filtering temporary to make sure the stack
+ * receives at least one
+ */
+ wvif->disable_beacon_filter = true;
+ }
+ wfx_update_filtering(wvif);
+
+done_put:
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ if (bss)
+ cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
+}
+
+static void wfx_unjoin_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, unjoin_work);
+
+ wfx_do_unjoin(wvif);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_link_entry *entry;
+ struct sk_buff *skb;
+
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+
+ sta_priv->vif_id = wvif->id;
+ sta_priv->link_id = wfx_find_link_id(wvif, sta->addr);
+ if (!sta_priv->link_id) {
+ dev_warn(wdev->dev, "mo more link-id available\n");
+ return -ENOENT;
+ }
+
+ entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&wvif->ps_state_lock);
+ if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
+ entry->status = WFX_LINK_HARD;
+ while ((skb = skb_dequeue(&entry->rx_queue)))
+ ieee80211_rx_irqsafe(wdev->hw, skb);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return 0;
+}
+
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_link_entry *entry;
+
+ if (wvif->vif->type != NL80211_IFTYPE_AP || !sta_priv->link_id)
+ return 0;
+
+ entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&wvif->ps_state_lock);
+ entry->status = WFX_LINK_RESERVE;
+ entry->timestamp = jiffies;
+ wfx_tx_lock(wdev);
+ if (!schedule_work(&wvif->link_id_work))
+ wfx_tx_unlock(wdev);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ flush_work(&wvif->link_id_work);
+ return 0;
+}
+
+static void wfx_set_cts_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_cts_work);
+ u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
+ struct hif_ie_flags target_frame = {
+ .beacon = 1,
+ };
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ erp_ie[2] = wvif->erp_info;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+
+ hif_erp_use_protection(wvif, erp_ie[2] & WLAN_ERP_USE_PROTECTION);
+
+ if (wvif->vif->type != NL80211_IFTYPE_STATION)
+ hif_update_ie(wvif, &target_frame, erp_ie, sizeof(erp_ie));
+}
+
+static int wfx_start_ap(struct wfx_vif *wvif)
+{
+ int ret;
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct hif_req_start start = {
+ .channel_number = wvif->channel->hw_value,
+ .beacon_interval = conf->beacon_int,
+ .dtim_period = conf->dtim_period,
+ .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
+ .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
+ conf->basic_rates),
+ };
+
+ memset(start.ssid, 0, sizeof(start.ssid));
+ if (!conf->hidden_ssid) {
+ start.ssid_length = conf->ssid_len;
+ memcpy(start.ssid, conf->ssid, start.ssid_length);
+ }
+
+ wvif->beacon_int = conf->beacon_int;
+ wvif->dtim_period = conf->dtim_period;
+
+ memset(&wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+
+ wvif->wdev->tx_burst_idx = -1;
+ ret = hif_start(wvif, &start);
+ if (!ret)
+ ret = wfx_upload_keys(wvif);
+ if (!ret) {
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wvif->state = WFX_STATE_AP;
+ wfx_update_filtering(wvif);
+ }
+ return ret;
+}
+
+static int wfx_update_beaconing(struct wfx_vif *wvif)
+{
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+
+ if (wvif->vif->type == NL80211_IFTYPE_AP) {
+ /* TODO: check if changed channel, band */
+ if (wvif->state != WFX_STATE_AP ||
+ wvif->beacon_int != conf->beacon_int) {
+ wfx_tx_lock_flush(wvif->wdev);
+ if (wvif->state != WFX_STATE_PASSIVE)
+ hif_reset(wvif, false);
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_start_ap(wvif);
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ }
+ }
+ return 0;
+}
+
+static int wfx_upload_beacon(struct wfx_vif *wvif)
+{
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ struct ieee80211_mgmt *mgmt;
+ struct hif_mib_template_frame *p;
+
+ if (wvif->vif->type == NL80211_IFTYPE_STATION ||
+ wvif->vif->type == NL80211_IFTYPE_MONITOR ||
+ wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
+ goto done;
+
+ skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
+
+ if (!skb)
+ return -ENOMEM;
+
+ p = (struct hif_mib_template_frame *) skb_push(skb, 4);
+ p->frame_type = HIF_TMPLT_BCN;
+ p->init_rate = API_RATE_INDEX_B_1MBPS; /* 1Mbps DSSS */
+ p->frame_length = cpu_to_le16(skb->len - 4);
+
+ ret = hif_set_template_frame(wvif, p);
+
+ skb_pull(skb, 4);
+
+ if (ret)
+ goto done;
+ /* TODO: Distill probe resp; remove TIM and any other beacon-specific
+ * IEs
+ */
+ mgmt = (void *)skb->data;
+ mgmt->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
+
+ p->frame_type = HIF_TMPLT_PRBRES;
+
+ ret = hif_set_template_frame(wvif, p);
+ wfx_fwd_probe_req(wvif, false);
+
+done:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int wfx_is_ht(const struct wfx_ht_info *ht_info)
+{
+ return ht_info->channel_type != NL80211_CHAN_NO_HT;
+}
+
+static int wfx_ht_greenfield(const struct wfx_ht_info *ht_info)
+{
+ return wfx_is_ht(ht_info) &&
+ (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ht_info->operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+}
+
+static int wfx_ht_ampdu_density(const struct wfx_ht_info *ht_info)
+{
+ if (!wfx_is_ht(ht_info))
+ return 0;
+ return ht_info->ht_cap.ampdu_density;
+}
+
+static void wfx_join_finalize(struct wfx_vif *wvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct hif_mib_set_association_mode association_mode = { };
+
+ if (info->dtim_period)
+ wvif->dtim_period = info->dtim_period;
+ wvif->beacon_int = info->beacon_int;
+
+ rcu_read_lock();
+ if (info->bssid && !info->ibss_joined)
+ sta = ieee80211_find_sta(wvif->vif, info->bssid);
+ if (sta) {
+ wvif->ht_info.ht_cap = sta->ht_cap;
+ wvif->bss_params.operational_rate_set =
+ wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
+ wvif->ht_info.operation_mode = info->ht_operation_mode;
+ } else {
+ memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+ wvif->bss_params.operational_rate_set = -1;
+ }
+ rcu_read_unlock();
+
+ /* Non Greenfield stations present */
+ if (wvif->ht_info.operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+ hif_dual_cts_protection(wvif, true);
+ else
+ hif_dual_cts_protection(wvif, false);
+
+ association_mode.preambtype_use = 1;
+ association_mode.mode = 1;
+ association_mode.rateset = 1;
+ association_mode.spacing = 1;
+ association_mode.preamble_type = info->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG;
+ association_mode.basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates));
+ association_mode.mixed_or_greenfield_type = wfx_ht_greenfield(&wvif->ht_info);
+ association_mode.mpdu_start_spacing = wfx_ht_ampdu_density(&wvif->ht_info);
+
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+
+ wvif->bss_params.beacon_lost_count = 20;
+ wvif->bss_params.aid = info->aid;
+
+ if (wvif->dtim_period < 1)
+ wvif->dtim_period = 1;
+
+ hif_set_association_mode(wvif, &association_mode);
+
+ if (!info->ibss_joined) {
+ hif_keep_alive_period(wvif, 30 /* sec */);
+ hif_set_bss_params(wvif, &wvif->bss_params);
+ wvif->setbssparams_done = true;
+ wfx_set_beacon_wakeup_period_work(&wvif->set_beacon_wakeup_period_work);
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+}
+
+void wfx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ bool do_join = false;
+ int i;
+ int nb_arp_addr;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ /* TODO: BSS_CHANGED_QOS */
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ struct hif_mib_arp_ip_addr_table filter = { };
+
+ nb_arp_addr = info->arp_addr_cnt;
+ if (nb_arp_addr <= 0 || nb_arp_addr > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ nb_arp_addr = 0;
+
+ for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
+ filter.condition_idx = i;
+ if (i < nb_arp_addr) {
+ // Caution: type of arp_addr_list[i] is __be32
+ memcpy(filter.ipv4_address,
+ &info->arp_addr_list[i],
+ sizeof(filter.ipv4_address));
+ filter.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ } else {
+ filter.arp_enable = HIF_ARP_NS_FILTERING_DISABLE;
+ }
+ hif_set_arp_ipv4_filter(wvif, &filter);
+ }
+ }
+
+ if (changed &
+ (BSS_CHANGED_BEACON | BSS_CHANGED_AP_PROBE_RESP |
+ BSS_CHANGED_BSSID | BSS_CHANGED_SSID | BSS_CHANGED_IBSS)) {
+ wvif->beacon_int = info->beacon_int;
+ wfx_update_beaconing(wvif);
+ wfx_upload_beacon(wvif);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED &&
+ wvif->state != WFX_STATE_IBSS) {
+ if (wvif->enable_beacon != info->enable_beacon) {
+ hif_beacon_transmit(wvif, info->enable_beacon);
+ wvif->enable_beacon = info->enable_beacon;
+ }
+ }
+
+ /* assoc/disassoc, or maybe AID changed */
+ if (changed & BSS_CHANGED_ASSOC) {
+ wfx_tx_lock_flush(wdev);
+ wvif->wep_default_key_id = -1;
+ wfx_tx_unlock(wdev);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC && !info->assoc &&
+ (wvif->state == WFX_STATE_STA || wvif->state == WFX_STATE_IBSS)) {
+ /* Shedule unjoin work */
+ wfx_tx_lock(wdev);
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wdev);
+ } else {
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ if (info->ibss_joined)
+ do_join = true;
+ else if (wvif->state == WFX_STATE_AP)
+ wfx_update_beaconing(wvif);
+ }
+
+ if (changed & BSS_CHANGED_BSSID)
+ do_join = true;
+
+ if (changed &
+ (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID |
+ BSS_CHANGED_IBSS | BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_HT)) {
+ if (info->assoc) {
+ if (wvif->state < WFX_STATE_PRE_STA) {
+ ieee80211_connection_loss(vif);
+ mutex_unlock(&wdev->conf_mutex);
+ return;
+ } else if (wvif->state == WFX_STATE_PRE_STA) {
+ wvif->state = WFX_STATE_STA;
+ }
+ } else {
+ do_join = true;
+ }
+
+ if (info->assoc || info->ibss_joined)
+ wfx_join_finalize(wvif, info);
+ else
+ memset(&wvif->bss_params, 0,
+ sizeof(wvif->bss_params));
+ }
+ }
+
+ /* ERP Protection */
+ if (changed & (BSS_CHANGED_ASSOC |
+ BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_ERP_PREAMBLE)) {
+ u32 prev_erp_info = wvif->erp_info;
+
+ if (info->use_cts_prot)
+ wvif->erp_info |= WLAN_ERP_USE_PROTECTION;
+ else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
+ wvif->erp_info &= ~WLAN_ERP_USE_PROTECTION;
+
+ if (info->use_short_preamble)
+ wvif->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
+ else
+ wvif->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
+
+ if (prev_erp_info != wvif->erp_info)
+ schedule_work(&wvif->set_cts_work);
+ }
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT))
+ hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
+ struct hif_mib_rcpi_rssi_threshold th = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ wvif->cqm_rssi_thold = info->cqm_rssi_thold;
+
+ if (!info->cqm_rssi_thold && !info->cqm_rssi_hyst) {
+ th.upperthresh = 1;
+ th.lowerthresh = 1;
+ } else {
+ /* FIXME It's not a correct way of setting threshold.
+ * Upper and lower must be set equal here and adjusted
+ * in callback. However current implementation is much
+ * more reliable and stable.
+ */
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ th.upper_threshold = info->cqm_rssi_thold + info->cqm_rssi_hyst;
+ th.upper_threshold = (th.upper_threshold + 110) * 2;
+ th.lower_threshold = info->cqm_rssi_thold;
+ th.lower_threshold = (th.lower_threshold + 110) * 2;
+ }
+ hif_set_rcpi_rssi_threshold(wvif, &th);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER &&
+ info->txpower != wdev->output_power) {
+ wdev->output_power = info->txpower;
+ hif_set_output_power(wvif, wdev->output_power * 10);
+ }
+ mutex_unlock(&wdev->conf_mutex);
+
+ if (do_join) {
+ wfx_tx_lock_flush(wdev);
+ wfx_do_join(wvif); /* Will unlock it for us */
+ }
+}
+
+static void wfx_ps_notify(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd,
+ int link_id)
+{
+ u32 bit, prev;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ /* Zero link id means "for all link IDs" */
+ if (link_id) {
+ bit = BIT(link_id);
+ } else if (notify_cmd != STA_NOTIFY_AWAKE) {
+ dev_warn(wvif->wdev->dev, "unsupported notify command\n");
+ bit = 0;
+ } else {
+ bit = wvif->link_id_map;
+ }
+ prev = wvif->sta_asleep_mask & bit;
+
+ switch (notify_cmd) {
+ case STA_NOTIFY_SLEEP:
+ if (!prev) {
+ if (wvif->mcast_buffered && !wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_start_work);
+ wvif->sta_asleep_mask |= bit;
+ }
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (prev) {
+ wvif->sta_asleep_mask &= ~bit;
+ wvif->pspoll_mask &= ~bit;
+ if (link_id && !wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_stop_work);
+ wfx_bh_request_tx(wvif->wdev);
+ }
+ break;
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+}
+
+void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+
+ wfx_ps_notify(wvif, notify_cmd, sta_priv->link_id);
+}
+
+static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
+{
+ struct sk_buff *skb;
+ struct hif_ie_flags target_frame = {
+ .beacon = 1,
+ };
+ u16 tim_offset, tim_length;
+ u8 *tim_ptr;
+
+ skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
+ &tim_offset, &tim_length);
+ if (!skb) {
+ if (!__wfx_flush(wvif->wdev, true))
+ wfx_tx_unlock(wvif->wdev);
+ return -ENOENT;
+ }
+ tim_ptr = skb->data + tim_offset;
+
+ if (tim_offset && tim_length >= 6) {
+ /* Ignore DTIM count from mac80211:
+ * firmware handles DTIM internally.
+ */
+ tim_ptr[2] = 0;
+
+ /* Set/reset aid0 bit */
+ if (aid0_bit_set)
+ tim_ptr[4] |= 1;
+ else
+ tim_ptr[4] &= ~1;
+ }
+
+ hif_update_ie(wvif, &target_frame, tim_ptr, tim_length);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void wfx_set_tim_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_tim_work);
+
+ wfx_set_tim_impl(wvif, wvif->aid0_bit_set);
+}
+
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
+
+ schedule_work(&wvif->set_tim_work);
+ return 0;
+}
+
+static void wfx_mcast_start_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ mcast_start_work);
+ long tmo = wvif->dtim_period * TU_TO_JIFFIES(wvif->beacon_int + 20);
+
+ cancel_work_sync(&wvif->mcast_stop_work);
+ if (!wvif->aid0_bit_set) {
+ wfx_tx_lock_flush(wvif->wdev);
+ wfx_set_tim_impl(wvif, true);
+ wvif->aid0_bit_set = true;
+ mod_timer(&wvif->mcast_timeout, jiffies + tmo);
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_stop_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ mcast_stop_work);
+
+ if (wvif->aid0_bit_set) {
+ del_timer_sync(&wvif->mcast_timeout);
+ wfx_tx_lock_flush(wvif->wdev);
+ wvif->aid0_bit_set = false;
+ wfx_set_tim_impl(wvif, false);
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_timeout(struct timer_list *t)
+{
+ struct wfx_vif *wvif = from_timer(wvif, t, mcast_timeout);
+
+ dev_warn(wvif->wdev->dev, "multicast delivery timeout\n");
+ spin_lock_bh(&wvif->ps_state_lock);
+ wvif->mcast_tx = wvif->aid0_bit_set && wvif->mcast_buffered;
+ if (wvif->mcast_tx)
+ wfx_bh_request_tx(wvif->wdev);
+ spin_unlock_bh(&wvif->ps_state_lock);
+}
+
+int wfx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ /* Aggregation is implemented fully in firmware,
+ * including block ack negotiation. Do not allow
+ * mac80211 stack to do anything: it interferes with
+ * the firmware.
+ */
+
+ /* Note that we still need this function stubbed. */
+
+ return -ENOTSUPP;
+}
+
+void wfx_suspend_resume(struct wfx_vif *wvif,
+ struct hif_ind_suspend_resume_tx *arg)
+{
+ if (arg->suspend_resume_flags.bc_mc_only) {
+ bool cancel_tmo = false;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ if (!arg->suspend_resume_flags.resume)
+ wvif->mcast_tx = false;
+ else
+ wvif->mcast_tx = wvif->aid0_bit_set &&
+ wvif->mcast_buffered;
+ if (wvif->mcast_tx) {
+ cancel_tmo = true;
+ wfx_bh_request_tx(wvif->wdev);
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (cancel_tmo)
+ del_timer_sync(&wvif->mcast_timeout);
+ } else if (arg->suspend_resume_flags.resume) {
+ // FIXME: should change each station status independently
+ wfx_ps_notify(wvif, STA_NOTIFY_AWAKE, 0);
+ wfx_bh_request_tx(wvif->wdev);
+ } else {
+ // FIXME: should change each station status independently
+ wfx_ps_notify(wvif, STA_NOTIFY_SLEEP, 0);
+ }
+}
+
+int wfx_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ return 0;
+}
+
+void wfx_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+}
+
+void wfx_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed)
+{
+}
+
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel, "channel overwrite");
+ wvif->channel = ch;
+ wvif->ht_info.channel_type = cfg80211_get_chandef_type(&conf->def);
+
+ return 0;
+}
+
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel != ch, "channel mismatch");
+ wvif->channel = NULL;
+}
+
+int wfx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ int ret = 0;
+ struct wfx_dev *wdev = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct wfx_vif *wvif;
+
+ // FIXME: Interface id should not been hardcoded
+ wvif = wdev_to_wvif(wdev, 0);
+ if (!wvif) {
+ WARN(1, "interface 0 does not exist anymore");
+ return 0;
+ }
+
+ down(&wvif->scan.lock);
+ mutex_lock(&wdev->conf_mutex);
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ wdev->output_power = conf->power_level;
+ hif_set_output_power(wvif, wdev->output_power * 10);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ memset(&wvif->powersave_mode, 0,
+ sizeof(wvif->powersave_mode));
+ if (conf->flags & IEEE80211_CONF_PS) {
+ wvif->powersave_mode.pm_mode.enter_psm = 1;
+ if (conf->dynamic_ps_timeout > 0) {
+ wvif->powersave_mode.pm_mode.fast_psm = 1;
+ /*
+ * Firmware does not support more than
+ * 128ms
+ */
+ wvif->powersave_mode.fast_psm_idle_period =
+ min(conf->dynamic_ps_timeout *
+ 2, 255);
+ }
+ }
+ if (wvif->state == WFX_STATE_STA && wvif->bss_params.aid)
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ wvif = wdev_to_wvif(wdev, 0);
+ }
+
+ mutex_unlock(&wdev->conf_mutex);
+ up(&wvif->scan.lock);
+ return ret;
+}
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ int i;
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ // FIXME: parameters are set by kernel juste after interface_add.
+ // Keep struct hif_req_edca_queue_params blank?
+ struct hif_req_edca_queue_params default_edca_params[] = {
+ [IEEE80211_AC_VO] = {
+ .queue_id = HIF_QUEUE_ID_VOICE,
+ .aifsn = 2,
+ .cw_min = 3,
+ .cw_max = 7,
+ .tx_op_limit = TXOP_UNIT * 47,
+ },
+ [IEEE80211_AC_VI] = {
+ .queue_id = HIF_QUEUE_ID_VIDEO,
+ .aifsn = 2,
+ .cw_min = 7,
+ .cw_max = 15,
+ .tx_op_limit = TXOP_UNIT * 94,
+ },
+ [IEEE80211_AC_BE] = {
+ .queue_id = HIF_QUEUE_ID_BESTEFFORT,
+ .aifsn = 3,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ [IEEE80211_AC_BK] = {
+ .queue_id = HIF_QUEUE_ID_BACKGROUND,
+ .aifsn = 7,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(default_edca_params) != ARRAY_SIZE(wvif->edca.params));
+ if (wfx_api_older_than(wdev, 2, 0)) {
+ default_edca_params[IEEE80211_AC_BE].queue_id = HIF_QUEUE_ID_BACKGROUND;
+ default_edca_params[IEEE80211_AC_BK].queue_id = HIF_QUEUE_ID_BESTEFFORT;
+ }
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_UAPSD |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ mutex_unlock(&wdev->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ if (!wdev->vif[i]) {
+ wdev->vif[i] = vif;
+ wvif->id = i;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(wdev->vif)) {
+ mutex_unlock(&wdev->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+ // FIXME: prefer use of container_of() to get vif
+ wvif->vif = vif;
+ wvif->wdev = wdev;
+
+ INIT_WORK(&wvif->link_id_work, wfx_link_id_work);
+ INIT_DELAYED_WORK(&wvif->link_id_gc_work, wfx_link_id_gc_work);
+
+ spin_lock_init(&wvif->ps_state_lock);
+ INIT_WORK(&wvif->set_tim_work, wfx_set_tim_work);
+
+ INIT_WORK(&wvif->mcast_start_work, wfx_mcast_start_work);
+ INIT_WORK(&wvif->mcast_stop_work, wfx_mcast_stop_work);
+ timer_setup(&wvif->mcast_timeout, wfx_mcast_timeout, 0);
+
+ wvif->setbssparams_done = false;
+ mutex_init(&wvif->bss_loss_lock);
+ INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
+
+ wvif->wep_default_key_id = -1;
+ INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
+
+ sema_init(&wvif->scan.lock, 1);
+ INIT_WORK(&wvif->scan.work, wfx_scan_work);
+ INIT_DELAYED_WORK(&wvif->scan.timeout, wfx_scan_timeout);
+
+ spin_lock_init(&wvif->event_queue_lock);
+ INIT_LIST_HEAD(&wvif->event_queue);
+ INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
+
+ init_completion(&wvif->set_pm_mode_complete);
+ complete(&wvif->set_pm_mode_complete);
+ INIT_WORK(&wvif->set_beacon_wakeup_period_work,
+ wfx_set_beacon_wakeup_period_work);
+ INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
+ INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
+ INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work);
+ INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ hif_set_macaddr(wvif, vif->addr);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ memcpy(&wvif->edca.params[i], &default_edca_params[i],
+ sizeof(default_edca_params[i]));
+ wvif->edca.uapsd_enable[i] = false;
+ hif_set_edca_queue_params(wvif, &wvif->edca.params[i]);
+ }
+ wfx_set_uapsd_param(wvif, &wvif->edca);
+
+ wfx_tx_policy_init(wvif);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+ if (wvif_count(wdev) == 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ // Combo force powersave mode. We can re-enable it now
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ return 0;
+}
+
+void wfx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int i;
+
+ // If scan is in progress, stop it
+ while (down_trylock(&wvif->scan.lock))
+ schedule();
+ up(&wvif->scan.lock);
+ wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
+
+ mutex_lock(&wdev->conf_mutex);
+ switch (wvif->state) {
+ case WFX_STATE_PRE_STA:
+ case WFX_STATE_STA:
+ case WFX_STATE_IBSS:
+ wfx_tx_lock_flush(wdev);
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wdev);
+ break;
+ case WFX_STATE_AP:
+ for (i = 0; wvif->link_id_map; ++i) {
+ if (wvif->link_id_map & BIT(i)) {
+ wfx_unmap_link(wvif, i);
+ wvif->link_id_map &= ~BIT(i);
+ }
+ }
+ memset(wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+ wvif->sta_asleep_mask = 0;
+ wvif->enable_beacon = false;
+ wvif->mcast_tx = false;
+ wvif->aid0_bit_set = false;
+ wvif->mcast_buffered = false;
+ wvif->pspoll_mask = 0;
+ /* reset.link_id = 0; */
+ hif_reset(wvif, false);
+ break;
+ default:
+ break;
+ }
+
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_tx_queues_wait_empty_vif(wvif);
+ wfx_tx_unlock(wdev);
+
+ /* FIXME: In add to reset MAC address, try to reset interface */
+ hif_set_macaddr(wvif, NULL);
+
+ cancel_delayed_work_sync(&wvif->scan.timeout);
+
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+ cancel_delayed_work_sync(&wvif->link_id_gc_work);
+ del_timer_sync(&wvif->mcast_timeout);
+ wfx_free_event_queue(wvif);
+
+ wdev->vif[wvif->id] = NULL;
+ wvif->vif = NULL;
+
+ mutex_unlock(&wdev->conf_mutex);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+ if (wvif_count(wdev) == 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ // Combo force powersave mode. We can re-enable it now
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+}
+
+int wfx_start(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+
+void wfx_stop(struct ieee80211_hw *hw)
+{
+ struct wfx_dev *wdev = hw->priv;
+
+ wfx_tx_lock_flush(wdev);
+ mutex_lock(&wdev->conf_mutex);
+ wfx_tx_queues_clear(wdev);
+ mutex_unlock(&wdev->conf_mutex);
+ wfx_tx_unlock(wdev);
+ WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
+}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
new file mode 100644
index 000000000000..4ccf1b17632b
--- /dev/null
+++ b/drivers/staging/wfx/sta.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_STA_H
+#define WFX_STA_H
+
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+enum wfx_state {
+ WFX_STATE_PASSIVE = 0,
+ WFX_STATE_PRE_STA,
+ WFX_STATE_STA,
+ WFX_STATE_IBSS,
+ WFX_STATE_AP,
+};
+
+struct wfx_ht_info {
+ struct ieee80211_sta_ht_cap ht_cap;
+ enum nl80211_channel_type channel_type;
+ u16 operation_mode;
+};
+
+struct wfx_hif_event {
+ struct list_head link;
+ struct hif_ind_event evt;
+};
+
+struct wfx_edca_params {
+ /* NOTE: index is a linux queue id. */
+ struct hif_req_edca_queue_params params[IEEE80211_NUM_ACS];
+ bool uapsd_enable[IEEE80211_NUM_ACS];
+};
+
+struct wfx_grp_addr_table {
+ bool enable;
+ int num_addresses;
+ u8 address_list[8][ETH_ALEN];
+};
+
+struct wfx_sta_priv {
+ int link_id;
+ int vif_id;
+};
+
+// mac80211 interface
+int wfx_start(struct ieee80211_hw *hw);
+void wfx_stop(struct ieee80211_hw *hw);
+int wfx_config(struct ieee80211_hw *hw, u32 changed);
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 unused);
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed);
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta);
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int wfx_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf, u32 changed);
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf);
+
+// WSM Callbacks
+void wfx_suspend_resume(struct wfx_vif *wvif,
+ struct hif_ind_suspend_resume_tx *arg);
+
+// Other Helpers
+void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
+void wfx_update_filtering(struct wfx_vif *wvif);
+int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+
+#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
new file mode 100644
index 000000000000..3f6198ab2235
--- /dev/null
+++ b/drivers/staging/wfx/traces.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tracepoints definitions.
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wfx
+
+#if !defined(_WFX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _WFX_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <net/mac80211.h>
+
+#include "bus.h"
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+/* The hell below need some explanations. For each symbolic number, we need to
+ * define it with TRACE_DEFINE_ENUM() and in a list for __print_symbolic.
+ *
+ * 1. Define a new macro that call TRACE_DEFINE_ENUM():
+ *
+ * #define xxx_name(sym) TRACE_DEFINE_ENUM(sym);
+ *
+ * 2. Define list of all symbols:
+ *
+ * #define list_names \
+ * ... \
+ * xxx_name(XXX) \
+ * ...
+ *
+ * 3. Instanciate that list_names:
+ *
+ * list_names
+ *
+ * 4. Redefine xxx_name() as a entry of array for __print_symbolic()
+ *
+ * #undef xxx_name
+ * #define xxx_name(msg) { msg, #msg },
+ *
+ * 5. list_name can now nearlu be used with __print_symbolic() but,
+ * __print_symbolic() dislike last comma of list. So we define a new list
+ * with a dummy element:
+ *
+ * #define list_for_print_symbolic list_names { -1, NULL }
+ */
+
+#define _hif_msg_list \
+ hif_cnf_name(ADD_KEY) \
+ hif_cnf_name(BEACON_TRANSMIT) \
+ hif_cnf_name(EDCA_QUEUE_PARAMS) \
+ hif_cnf_name(JOIN) \
+ hif_cnf_name(MAP_LINK) \
+ hif_cnf_name(READ_MIB) \
+ hif_cnf_name(REMOVE_KEY) \
+ hif_cnf_name(RESET) \
+ hif_cnf_name(SET_BSS_PARAMS) \
+ hif_cnf_name(SET_PM_MODE) \
+ hif_cnf_name(START) \
+ hif_cnf_name(START_SCAN) \
+ hif_cnf_name(STOP_SCAN) \
+ hif_cnf_name(TX) \
+ hif_cnf_name(MULTI_TRANSMIT) \
+ hif_cnf_name(UPDATE_IE) \
+ hif_cnf_name(WRITE_MIB) \
+ hif_cnf_name(CONFIGURATION) \
+ hif_cnf_name(CONTROL_GPIO) \
+ hif_cnf_name(PREVENT_ROLLBACK) \
+ hif_cnf_name(SET_SL_MAC_KEY) \
+ hif_cnf_name(SL_CONFIGURE) \
+ hif_cnf_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_cnf_name(SHUT_DOWN) \
+ hif_ind_name(EVENT) \
+ hif_ind_name(JOIN_COMPLETE) \
+ hif_ind_name(RX) \
+ hif_ind_name(SCAN_CMPL) \
+ hif_ind_name(SET_PM_MODE_CMPL) \
+ hif_ind_name(SUSPEND_RESUME_TX) \
+ hif_ind_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_ind_name(ERROR) \
+ hif_ind_name(EXCEPTION) \
+ hif_ind_name(GENERIC) \
+ hif_ind_name(WAKEUP) \
+ hif_ind_name(STARTUP)
+
+#define hif_msg_list_enum _hif_msg_list
+
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) TRACE_DEFINE_ENUM(HIF_CNF_ID_##msg);
+#define hif_ind_name(msg) TRACE_DEFINE_ENUM(HIF_IND_ID_##msg);
+hif_msg_list_enum
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) { HIF_CNF_ID_##msg, #msg },
+#define hif_ind_name(msg) { HIF_IND_ID_##msg, #msg },
+#define hif_msg_list hif_msg_list_enum { -1, NULL }
+
+#define _hif_mib_list \
+ hif_mib_name(ARP_IP_ADDRESSES_TABLE) \
+ hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \
+ hif_mib_name(BEACON_FILTER_ENABLE) \
+ hif_mib_name(BEACON_FILTER_TABLE) \
+ hif_mib_name(BEACON_WAKEUP_PERIOD) \
+ hif_mib_name(BLOCK_ACK_POLICY) \
+ hif_mib_name(CONFIG_DATA_FILTER) \
+ hif_mib_name(COUNTERS_TABLE) \
+ hif_mib_name(CURRENT_TX_POWER_LEVEL) \
+ hif_mib_name(DOT11_MAC_ADDRESS) \
+ hif_mib_name(DOT11_MAX_RECEIVE_LIFETIME) \
+ hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \
+ hif_mib_name(DOT11_RTS_THRESHOLD) \
+ hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \
+ hif_mib_name(GL_BLOCK_ACK_INFO) \
+ hif_mib_name(GL_OPERATIONAL_POWER_MODE) \
+ hif_mib_name(GL_SET_MULTI_MSG) \
+ hif_mib_name(INACTIVITY_TIMER) \
+ hif_mib_name(INTERFACE_PROTECTION) \
+ hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(KEEP_ALIVE_PERIOD) \
+ hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(NON_ERP_PROTECTION) \
+ hif_mib_name(NS_IP_ADDRESSES_TABLE) \
+ hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \
+ hif_mib_name(PROTECTED_MGMT_POLICY) \
+ hif_mib_name(RX_FILTER) \
+ hif_mib_name(RCPI_RSSI_THRESHOLD) \
+ hif_mib_name(SET_ASSOCIATION_MODE) \
+ hif_mib_name(SET_DATA_FILTERING) \
+ hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
+ hif_mib_name(SET_HT_PROTECTION) \
+ hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
+ hif_mib_name(SET_TX_RATE_RETRY_POLICY) \
+ hif_mib_name(SET_UAPSD_INFORMATION) \
+ hif_mib_name(PORT_DATAFRAME_CONDITION) \
+ hif_mib_name(SLOT_TIME) \
+ hif_mib_name(STATISTICS_TABLE) \
+ hif_mib_name(TEMPLATE_FRAME) \
+ hif_mib_name(TSF_COUNTER) \
+ hif_mib_name(UC_MC_BC_DATAFRAME_CONDITION)
+
+#define hif_mib_list_enum _hif_mib_list
+
+#undef hif_mib_name
+#define hif_mib_name(mib) TRACE_DEFINE_ENUM(HIF_MIB_ID_##mib);
+hif_mib_list_enum
+#undef hif_mib_name
+#define hif_mib_name(mib) { HIF_MIB_ID_##mib, #mib },
+#define hif_mib_list hif_mib_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(hif_data,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv),
+ TP_STRUCT__entry(
+ __field(int, tx_fill_level)
+ __field(int, msg_id)
+ __field(const char *, msg_type)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __field(int, if_id)
+ __field(int, mib)
+ __array(u8, buf, 128)
+ ),
+ TP_fast_assign(
+ int header_len;
+
+ __entry->tx_fill_level = tx_fill_level;
+ __entry->msg_len = hif->len;
+ __entry->msg_id = hif->id;
+ __entry->if_id = hif->interface;
+ if (is_recv)
+ __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF";
+ else
+ __entry->msg_type = "REQ";
+ if (!is_recv &&
+ (__entry->msg_id == HIF_REQ_ID_READ_MIB ||
+ __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) {
+ __entry->mib = le16_to_cpup((u16 *) hif->body);
+ header_len = 4;
+ } else {
+ __entry->mib = -1;
+ header_len = 0;
+ }
+ __entry->buf_len = min_t(int, __entry->msg_len,
+ sizeof(__entry->buf))
+ - sizeof(struct hif_msg) - header_len;
+ memcpy(__entry->buf, hif->body + header_len, __entry->buf_len);
+ ),
+ TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)",
+ __entry->tx_fill_level,
+ __entry->if_id,
+ __print_symbolic(__entry->msg_id, hif_msg_list),
+ __entry->msg_type,
+ __entry->mib != -1 ? "/" : "",
+ __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "",
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(hif_data, hif_send,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_send(hif, tx_fill_level)\
+ trace_hif_send(hif, tx_fill_level, false)
+DEFINE_EVENT(hif_data, hif_recv,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_recv(hif, tx_fill_level)\
+ trace_hif_recv(hif, tx_fill_level, true)
+
+#define wfx_reg_list_enum \
+ wfx_reg_name(WFX_REG_CONFIG, "CONFIG") \
+ wfx_reg_name(WFX_REG_CONTROL, "CONTROL") \
+ wfx_reg_name(WFX_REG_IN_OUT_QUEUE, "QUEUE") \
+ wfx_reg_name(WFX_REG_AHB_DPORT, "AHB") \
+ wfx_reg_name(WFX_REG_BASE_ADDR, "BASE_ADDR") \
+ wfx_reg_name(WFX_REG_SRAM_DPORT, "SRAM") \
+ wfx_reg_name(WFX_REG_SET_GEN_R_W, "SET_GEN_R_W") \
+ wfx_reg_name(WFX_REG_FRAME_OUT, "FRAME_OUT")
+
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) TRACE_DEFINE_ENUM(sym);
+wfx_reg_list_enum
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) { sym, name },
+#define wfx_reg_list wfx_reg_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(io_data,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __array(u8, buf, 32)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->msg_len = len;
+ __entry->buf_len = min_t(int, sizeof(__entry->buf),
+ __entry->msg_len);
+ memcpy(__entry->buf, io_buf, __entry->buf_len);
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %s%s (%d bytes)",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(io_data, io_write,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_write(reg, addr, io_buf, len)\
+ trace_io_write(reg, addr, io_buf, len)
+#define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len)
+DEFINE_EVENT(io_data, io_read,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_read(reg, addr, io_buf, len)\
+ trace_io_read(reg, addr, io_buf, len)
+#define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len)
+
+DECLARE_EVENT_CLASS(io_data32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, val)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->val = val;
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %08x",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __entry->val
+ )
+);
+DEFINE_EVENT(io_data32, io_write32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val)
+#define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val)
+DEFINE_EVENT(io_data32, io_read32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val)
+#define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val)
+
+DECLARE_EVENT_CLASS(piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored),
+ TP_STRUCT__entry(
+ __field(int, val)
+ __field(bool, ignored)
+ ),
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->ignored = ignored;
+ ),
+ TP_printk("CONTROL: %08x%s",
+ __entry->val,
+ __entry->ignored ? " (ignored)" : ""
+ )
+);
+DEFINE_EVENT(piggyback, piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored));
+#define _trace_piggyback(val, ignored) trace_piggyback(val, ignored)
+
+TRACE_EVENT(bh_stats,
+ TP_PROTO(int ind, int req, int cnf, int busy, bool release),
+ TP_ARGS(ind, req, cnf, busy, release),
+ TP_STRUCT__entry(
+ __field(int, ind)
+ __field(int, req)
+ __field(int, cnf)
+ __field(int, busy)
+ __field(bool, release)
+ ),
+ TP_fast_assign(
+ __entry->ind = ind;
+ __entry->req = req;
+ __entry->cnf = cnf;
+ __entry->busy = busy;
+ __entry->release = release;
+ ),
+ TP_printk("IND/REQ/CNF:%3d/%3d/%3d, REQ in progress:%3d, WUP: %s",
+ __entry->ind,
+ __entry->req,
+ __entry->cnf,
+ __entry->busy,
+ __entry->release ? "release" : "keep"
+ )
+);
+#define _trace_bh_stats(ind, req, cnf, busy, release)\
+ trace_bh_stats(ind, req, cnf, busy, release)
+
+TRACE_EVENT(tx_stats,
+ TP_PROTO(struct hif_cnf_tx *tx_cnf, struct sk_buff *skb, int delay),
+ TP_ARGS(tx_cnf, skb, delay),
+ TP_STRUCT__entry(
+ __field(int, pkt_id)
+ __field(int, delay_media)
+ __field(int, delay_queue)
+ __field(int, delay_fw)
+ __field(int, ack_failures)
+ __field(int, flags)
+ __array(int, rate, 4)
+ __array(int, tx_count, 4)
+ ),
+ TP_fast_assign(
+ // Keep sync with wfx_rates definition in main.c
+ static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9,
+ 10, 11, 12, 13 };
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+ int i;
+
+ __entry->pkt_id = tx_cnf->packet_id;
+ __entry->delay_media = tx_cnf->media_delay;
+ __entry->delay_queue = tx_cnf->tx_queue_delay;
+ __entry->delay_fw = delay;
+ __entry->ack_failures = tx_cnf->ack_failures;
+ if (!tx_cnf->status || __entry->ack_failures)
+ __entry->ack_failures += 1;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->rate[i] = rates[i].idx;
+ else
+ __entry->rate[i] = hw_rate[rates[i].idx];
+ __entry->tx_count[i] = rates[i].count;
+ }
+ __entry->flags = 0;
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->flags |= 0x01;
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ __entry->flags |= 0x02;
+ if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ __entry->flags |= 0x04;
+ if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ __entry->flags |= 0x08;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ __entry->flags |= 0x10;
+ if (tx_cnf->status)
+ __entry->flags |= 0x20;
+ if (tx_cnf->status == HIF_REQUEUE)
+ __entry->flags |= 0x40;
+ ),
+ TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus",
+ __entry->pkt_id,
+ __print_flags(__entry->flags, NULL,
+ { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" },
+ { 0x08, "R" }, { 0x10, "D" }, { 0x20, "F" },
+ { 0x40, "Q" }),
+ __entry->rate[0],
+ __entry->tx_count[0],
+ __entry->rate[1],
+ __entry->tx_count[1],
+ __entry->rate[2],
+ __entry->tx_count[2],
+ __entry->rate[3],
+ __entry->tx_count[3],
+ __entry->ack_failures,
+ __entry->delay_media,
+ __entry->delay_queue,
+ __entry->delay_fw
+ )
+);
+#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay)
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE traces
+
+#include <trace/define_trace.h>
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
new file mode 100644
index 000000000000..781a8c8ba982
--- /dev/null
+++ b/drivers/staging/wfx/wfx.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common private data for Silicon Labs WFx chips.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_H
+#define WFX_H
+
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/nospec.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "data_tx.h"
+#include "main.h"
+#include "queue.h"
+#include "secure_link.h"
+#include "sta.h"
+#include "scan.h"
+#include "hif_tx.h"
+#include "hif_api_general.h"
+
+struct hwbus_ops;
+
+struct wfx_dev {
+ struct wfx_platform_data pdata;
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif[2];
+ struct mac_address addresses[2];
+ const struct hwbus_ops *hwbus_ops;
+ void *hwbus_priv;
+
+ u8 keyset;
+ struct completion firmware_ready;
+ struct hif_ind_startup hw_caps;
+ struct wfx_hif hif;
+ struct sl_context sl;
+ int chip_frozen;
+ struct mutex conf_mutex;
+
+ struct wfx_hif_cmd hif_cmd;
+ struct wfx_queue tx_queue[4];
+ struct wfx_queue_stats tx_queue_stats;
+ int tx_burst_idx;
+ atomic_t tx_lock;
+
+ u32 key_map;
+ struct hif_req_add_key keys[MAX_KEY_ENTRIES];
+
+ struct hif_rx_stats rx_stats;
+ struct mutex rx_stats_lock;
+
+ int output_power;
+ atomic_t scan_in_progress;
+};
+
+struct wfx_vif {
+ struct wfx_dev *wdev;
+ struct ieee80211_vif *vif;
+ struct ieee80211_channel *channel;
+ int id;
+ enum wfx_state state;
+
+ int delayed_link_loss;
+ int bss_loss_state;
+ u32 bss_loss_confirm_id;
+ struct mutex bss_loss_lock;
+ struct delayed_work bss_loss_work;
+
+ u32 link_id_map;
+ struct wfx_link_entry link_id_db[WFX_MAX_STA_IN_AP_MODE];
+ struct delayed_work link_id_gc_work;
+ struct work_struct link_id_work;
+
+ bool aid0_bit_set;
+ bool mcast_tx;
+ bool mcast_buffered;
+ struct wfx_grp_addr_table mcast_filter;
+ struct timer_list mcast_timeout;
+ struct work_struct mcast_start_work;
+ struct work_struct mcast_stop_work;
+
+ s8 wep_default_key_id;
+ struct sk_buff *wep_pending_skb;
+ struct work_struct wep_key_work;
+
+ struct tx_policy_cache tx_policy_cache;
+ struct work_struct tx_policy_upload_work;
+
+ u32 sta_asleep_mask;
+ u32 pspoll_mask;
+ spinlock_t ps_state_lock;
+ struct work_struct set_tim_work;
+
+ int dtim_period;
+ int beacon_int;
+ bool enable_beacon;
+ struct work_struct set_beacon_wakeup_period_work;
+
+ bool filter_bssid;
+ bool fwd_probe_req;
+ bool disable_beacon_filter;
+ struct work_struct update_filtering_work;
+
+ u32 erp_info;
+ int cqm_rssi_thold;
+ bool setbssparams_done;
+ struct wfx_ht_info ht_info;
+ struct wfx_edca_params edca;
+ struct hif_mib_set_uapsd_information uapsd_info;
+ struct hif_req_set_bss_params bss_params;
+ struct work_struct bss_params_work;
+ struct work_struct set_cts_work;
+
+ int join_complete_status;
+ bool delayed_unjoin;
+ struct work_struct unjoin_work;
+
+ struct wfx_scan scan;
+
+ struct hif_req_set_pm_mode powersave_mode;
+ struct completion set_pm_mode_complete;
+
+ struct list_head event_queue;
+ spinlock_t event_queue_lock;
+ struct work_struct event_handler_work;
+};
+
+static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
+{
+ if (vif_id >= ARRAY_SIZE(wdev->vif)) {
+ dev_dbg(wdev->dev, "requesting non-existent vif: %d\n", vif_id);
+ return NULL;
+ }
+ vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
+ if (!wdev->vif[vif_id]) {
+ dev_dbg(wdev->dev, "requesting non-allocated vif: %d\n",
+ vif_id);
+ return NULL;
+ }
+ return (struct wfx_vif *) wdev->vif[vif_id]->drv_priv;
+}
+
+static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev,
+ struct wfx_vif *cur)
+{
+ int i;
+ int mark = 0;
+ struct wfx_vif *tmp;
+
+ if (!cur)
+ mark = 1;
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ tmp = wdev_to_wvif(wdev, i);
+ if (mark && tmp)
+ return tmp;
+ if (tmp == cur)
+ mark = 1;
+ }
+ return NULL;
+}
+
+static inline int wvif_count(struct wfx_dev *wdev)
+{
+ int i;
+ int ret = 0;
+ struct wfx_vif *wvif;
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ wvif = wdev_to_wvif(wdev, i);
+ if (wvif)
+ ret++;
+ }
+ return ret;
+}
+
+static inline void memreverse(u8 *src, u8 length)
+{
+ u8 *lo = src;
+ u8 *hi = src + length - 1;
+ u8 swap;
+
+ while (lo < hi) {
+ swap = *lo;
+ *lo++ = *hi;
+ *hi-- = swap;
+ }
+}
+
+static inline int memzcmp(void *src, unsigned int size)
+{
+ u8 *buf = src;
+
+ if (!size)
+ return 0;
+ if (*buf)
+ return 1;
+ return memcmp(buf, buf + 1, size - 1);
+}
+
+#endif /* WFX_H */
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index a5a8e806b98e..a3305a0a888a 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -4,11 +4,11 @@ obj-$(CONFIG_WILC1000) += wilc1000.o
ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
-DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
-wilc1000-objs := wilc_wfi_cfgoperations.o wilc_netdev.o wilc_mon.o \
- wilc_hif.o wilc_wlan_cfg.o wilc_wlan.o
+wilc1000-objs := cfg80211.o netdev.o mon.o \
+ hif.o wlan_cfg.o wlan.o
obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
-wilc1000-sdio-objs += wilc_sdio.o
+wilc1000-sdio-objs += sdio.o
obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
-wilc1000-spi-objs += wilc_spi.o
+wilc1000-spi-objs += spi.o
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/cfg80211.c
index 22f21831649b..4863e516ff13 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/cfg80211.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_cfgoperations.h"
+#include "cfg80211.h"
#define FRAME_TYPE_ID 0
#define ACTION_CAT_ID 24
@@ -137,6 +137,7 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
struct wilc *wl = vif->wilc;
struct host_if_drv *wfi_drv = priv->hif_drv;
struct wilc_conn_info *conn_info = &wfi_drv->conn_info;
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
vif->connecting = false;
@@ -158,12 +159,16 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
memcpy(priv->associated_bss, conn_info->bssid,
ETH_ALEN);
- cfg80211_connect_result(dev, conn_info->bssid,
- conn_info->req_ies,
- conn_info->req_ies_len,
- conn_info->resp_ies,
- conn_info->resp_ies_len, connect_status,
- GFP_KERNEL);
+ cfg80211_ref_bss(wiphy, vif->bss);
+ cfg80211_connect_bss(dev, conn_info->bssid, vif->bss,
+ conn_info->req_ies,
+ conn_info->req_ies_len,
+ conn_info->resp_ies,
+ conn_info->resp_ies_len,
+ connect_status, GFP_KERNEL,
+ NL80211_TIMEOUT_UNSPECIFIED);
+
+ vif->bss = NULL;
} else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
u16 reason = 0;
@@ -186,15 +191,15 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
}
}
-static struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl)
+struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl)
{
- int i;
+ struct wilc_vif *vif;
- for (i = 0; i < wl->vif_num; i++)
- if (wl->vif[i])
- return wl->vif[i];
+ vif = list_first_or_null_rcu(&wl->vif_list, typeof(*vif), list);
+ if (!vif)
+ return ERR_PTR(-EINVAL);
- return ERR_PTR(-EINVAL);
+ return vif;
}
static int set_channel(struct wiphy *wiphy,
@@ -204,11 +209,12 @@ static int set_channel(struct wiphy *wiphy,
struct wilc_vif *vif;
u32 channelnum;
int result;
+ int srcu_idx;
- mutex_lock(&wl->vif_mutex);
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return PTR_ERR(vif);
}
@@ -219,7 +225,7 @@ static int set_channel(struct wiphy *wiphy,
if (result)
netdev_err(vif->ndev, "Error in setting channel\n");
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return result;
}
@@ -405,6 +411,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
goto out_put_bss;
}
kfree(join_params);
+ vif->bss = bss;
cfg80211_put_bss(wiphy, bss);
return 0;
@@ -450,6 +457,8 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
ret = -EINVAL;
}
+ vif->bss = NULL;
+
return ret;
}
@@ -620,29 +629,26 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
bool pairwise,
const u8 *mac_addr)
{
- struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
- if (netdev == wl->vif[0]->ndev) {
- if (priv->wilc_gtk[key_index]) {
- kfree(priv->wilc_gtk[key_index]->key);
- priv->wilc_gtk[key_index]->key = NULL;
- kfree(priv->wilc_gtk[key_index]->seq);
- priv->wilc_gtk[key_index]->seq = NULL;
+ if (priv->wilc_gtk[key_index]) {
+ kfree(priv->wilc_gtk[key_index]->key);
+ priv->wilc_gtk[key_index]->key = NULL;
+ kfree(priv->wilc_gtk[key_index]->seq);
+ priv->wilc_gtk[key_index]->seq = NULL;
- kfree(priv->wilc_gtk[key_index]);
- priv->wilc_gtk[key_index] = NULL;
- }
+ kfree(priv->wilc_gtk[key_index]);
+ priv->wilc_gtk[key_index] = NULL;
+ }
- if (priv->wilc_ptk[key_index]) {
- kfree(priv->wilc_ptk[key_index]->key);
- priv->wilc_ptk[key_index]->key = NULL;
- kfree(priv->wilc_ptk[key_index]->seq);
- priv->wilc_ptk[key_index]->seq = NULL;
- kfree(priv->wilc_ptk[key_index]);
- priv->wilc_ptk[key_index] = NULL;
- }
+ if (priv->wilc_ptk[key_index]) {
+ kfree(priv->wilc_ptk[key_index]->key);
+ priv->wilc_ptk[key_index]->key = NULL;
+ kfree(priv->wilc_ptk[key_index]->seq);
+ priv->wilc_ptk[key_index]->seq = NULL;
+ kfree(priv->wilc_ptk[key_index]);
+ priv->wilc_ptk[key_index] = NULL;
}
if (key_index <= 3 && priv->wep_key_len[key_index]) {
@@ -752,33 +758,19 @@ static int change_bss(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-struct wilc_vif *wilc_get_interface(struct wilc *wl)
-{
- int i;
- struct wilc_vif *vif = NULL;
-
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- if (wl->vif[i]) {
- vif = wl->vif[i];
- break;
- }
- }
- mutex_unlock(&wl->vif_mutex);
- return vif;
-}
-
static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- int ret;
+ int ret = -EINVAL;
struct cfg_param_attr cfg_param_val;
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
struct wilc_priv *priv;
+ int srcu_idx;
- vif = wilc_get_interface(wl);
- if (!vif)
- return -EINVAL;
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_wl_to_vif(wl);
+ if (IS_ERR(vif))
+ goto out;
priv = &vif->priv;
cfg_param_val.flag = 0;
@@ -808,7 +800,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
} else {
netdev_err(vif->ndev,
"Fragmentation threshold out of range\n");
- return -EINVAL;
+ goto out;
}
}
@@ -821,7 +813,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
cfg_param_val.rts_threshold = wiphy->rts_threshold;
} else {
netdev_err(vif->ndev, "RTS threshold out of range\n");
- return -EINVAL;
+ goto out;
}
}
@@ -829,6 +821,8 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (ret)
netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
+out:
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1144,7 +1138,7 @@ static int remain_on_channel(struct wiphy *wiphy,
cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL);
mod_timer(&vif->hif_drv->remain_on_ch_timer,
- jiffies + msecs_to_jiffies(duration));
+ jiffies + msecs_to_jiffies(duration + 1000));
return ret;
}
@@ -1419,8 +1413,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE)
wilc_wfi_deinit_mon_interface(wl, true);
vif->iftype = WILC_STATION_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_STATION_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
memset(priv->assoc_stainfo.sta_associated_bss, 0,
WILC_MAX_NUM_STA * ETH_ALEN);
@@ -1432,8 +1428,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
priv->wdev.iftype = type;
vif->monitor_flag = 0;
vif->iftype = WILC_CLIENT_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_STATION_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
break;
case NL80211_IFTYPE_AP:
@@ -1450,8 +1448,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
dev->ieee80211_ptr->iftype = type;
priv->wdev.iftype = type;
vif->iftype = WILC_GO_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_AP_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_AP_MODE, vif->idx);
break;
default:
@@ -1557,20 +1557,16 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int wilc_get_vif_from_type(struct wilc *wl, int type)
+static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type)
{
- int i;
+ struct wilc_vif *vif;
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- if (wl->vif[i]->iftype == type) {
- mutex_unlock(&wl->vif_mutex);
- return i;
- }
+ list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ if (vif->iftype == type)
+ return vif;
}
- mutex_unlock(&wl->vif_mutex);
- return -EINVAL;
+ return NULL;
}
static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
@@ -1583,29 +1579,36 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
struct wilc_vif *vif;
struct wireless_dev *wdev;
int iftype;
- int ret;
if (type == NL80211_IFTYPE_MONITOR) {
struct net_device *ndev;
- int ap_index = wilc_get_vif_from_type(wl, WILC_AP_MODE);
-
- if (ap_index < 0) {
- ap_index = wilc_get_vif_from_type(wl, WILC_GO_MODE);
- if (ap_index < 0)
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
+ if (!vif) {
+ vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
+ if (!vif) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
+ }
}
- vif = wl->vif[ap_index];
- if (vif->monitor_flag)
+ if (vif->monitor_flag) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
+ }
ndev = wilc_wfi_init_mon_interface(wl, name, vif->ndev);
- if (ndev)
+ if (ndev) {
vif->monitor_flag = 1;
- else
+ } else {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ERR_PTR(-EINVAL);
+ }
wdev = &vif->priv.wdev;
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return wdev;
}
@@ -1613,9 +1616,10 @@ validate_interface:
mutex_lock(&wl->vif_mutex);
if (wl->vif_num == WILC_NUM_CONCURRENT_IFC) {
pr_err("Reached maximum number of interface\n");
- ret = -EINVAL;
- goto out_err;
+ mutex_unlock(&wl->vif_mutex);
+ return ERR_PTR(-EINVAL);
}
+ mutex_unlock(&wl->vif_mutex);
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -1625,30 +1629,20 @@ validate_interface:
iftype = WILC_AP_MODE;
break;
default:
- ret = -EOPNOTSUPP;
- goto out_err;
+ return ERR_PTR(-EOPNOTSUPP);
}
vif = wilc_netdev_ifc_init(wl, name, iftype, type, true);
- if (IS_ERR(vif)) {
- ret = PTR_ERR(vif);
- goto out_err;
- }
-
- mutex_unlock(&wl->vif_mutex);
+ if (IS_ERR(vif))
+ return ERR_CAST(vif);
return &vif->priv.wdev;
-
-out_err:
- mutex_unlock(&wl->vif_mutex);
- return ERR_PTR(ret);
}
static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
- int i;
if (wdev->iftype == NL80211_IFTYPE_AP ||
wdev->iftype == NL80211_IFTYPE_P2P_GO)
@@ -1658,22 +1652,12 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- mutex_lock(&wl->vif_mutex);
wilc_set_operation_mode(vif, 0, 0, 0);
- for (i = vif->idx; i < wl->vif_num; i++) {
- if ((i + 1) >= wl->vif_num) {
- wl->vif[i] = NULL;
- } else {
- vif = wl->vif[i + 1];
- vif->idx = i;
- wl->vif[i] = vif;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- vif->iftype, vif->idx);
- }
- }
+ mutex_lock(&wl->vif_mutex);
+ list_del_rcu(&vif->list);
wl->vif_num--;
mutex_unlock(&wl->vif_mutex);
-
+ synchronize_srcu(&wl->srcu);
return 0;
}
@@ -1698,25 +1682,39 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
+ int srcu_idx;
- mutex_lock(&wl->vif_mutex);
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return;
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
int ret;
+ int srcu_idx;
s32 tx_power = MBM_TO_DBM(mbm);
- struct wilc_vif *vif = netdev_priv(wdev->netdev);
+ struct wilc *wl = wiphy_priv(wiphy);
+ struct wilc_vif *vif;
+
+ if (!wl->initialized)
+ return -EIO;
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_wl_to_vif(wl);
+ if (IS_ERR(vif)) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+ return -EINVAL;
+ }
+
+ netdev_info(vif->ndev, "Setting tx power %d\n", tx_power);
if (tx_power < 0)
tx_power = 0;
else if (tx_power > 18)
@@ -1724,6 +1722,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = wilc_set_tx_power(vif, tx_power);
if (ret)
netdev_err(vif->ndev, "Failed to set tx power\n");
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1803,6 +1802,17 @@ static void wlan_init_locks(struct wilc *wl)
init_completion(&wl->cfg_event);
init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
+ init_srcu_struct(&wl->srcu);
+}
+
+void wlan_deinit_locks(struct wilc *wilc)
+{
+ mutex_destroy(&wilc->hif_cs);
+ mutex_destroy(&wilc->rxq_cs);
+ mutex_destroy(&wilc->cfg_cmd_lock);
+ mutex_destroy(&wilc->txq_add_to_head_cs);
+ mutex_destroy(&wilc->vif_mutex);
+ cleanup_srcu_struct(&wilc->srcu);
}
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
@@ -1816,6 +1826,8 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
if (!wl)
return -EINVAL;
+ wlan_init_locks(wl);
+
ret = wilc_wlan_cfg_init(wl);
if (ret)
goto free_wl;
@@ -1826,6 +1838,7 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
wl->chip_ps_state = WILC_CHIP_WAKEDUP;
INIT_LIST_HEAD(&wl->txq_head.list);
INIT_LIST_HEAD(&wl->rxq_head.list);
+ INIT_LIST_HEAD(&wl->vif_list);
wl->hif_workqueue = create_singlethread_workqueue("WILC_wq");
if (!wl->hif_workqueue) {
@@ -1839,8 +1852,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
goto free_hq;
}
- wlan_init_locks(wl);
-
return 0;
free_hq:
@@ -1850,6 +1861,7 @@ free_cfg:
wilc_wlan_cfg_deinit(wl);
free_wl:
+ wlan_deinit_locks(wl);
wiphy_unregister(wl->wiphy);
wiphy_free(wl->wiphy);
return ret;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/cfg80211.h
index 234faaabdb82..5e5d63f70df2 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/cfg80211.h
@@ -4,9 +4,9 @@
* All rights reserved.
*/
-#ifndef NM_WFI_CFGOPERATIONS
-#define NM_WFI_CFGOPERATIONS
-#include "wilc_wfi_netdevice.h"
+#ifndef WILC_CFG80211_H
+#define WILC_CFG80211_H
+#include "netdev.h"
struct wiphy *wilc_cfg_alloc(void);
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
@@ -24,4 +24,6 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
u16 frame_type, bool reg);
struct wilc_vif *wilc_get_interface(struct wilc *wl);
+struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl);
+void wlan_deinit_locks(struct wilc *wilc);
#endif
diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/hif.c
index f2b7d5a1be17..349e45d58ec9 100644
--- a/drivers/staging/wilc1000/wilc_hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_netdevice.h"
+#include "netdev.h"
#define WILC_HIF_SCAN_TIMEOUT_MS 5000
#define WILC_HIF_CONNECT_TIMEOUT_MS 9500
@@ -32,7 +32,7 @@ struct wilc_op_mode {
};
struct wilc_reg_frame {
- bool reg;
+ u8 reg;
u8 reg_id;
__le16 frame_type;
} __packed;
@@ -183,11 +183,17 @@ int wilc_get_vif_idx(struct wilc_vif *vif)
static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
{
int index = idx - 1;
+ struct wilc_vif *vif;
if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC)
return NULL;
- return wilc->vif[index];
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->idx == index)
+ return vif;
+ }
+
+ return NULL;
}
static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
@@ -473,20 +479,27 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
if (rates_ie) {
rates_len = rates_ie[1];
+ if (rates_len > WILC_MAX_RATES_SUPPORTED)
+ rates_len = WILC_MAX_RATES_SUPPORTED;
param->supp_rates[0] = rates_len;
memcpy(&param->supp_rates[1], rates_ie + 2, rates_len);
}
- supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data,
- ies->len);
- if (supp_rates_ie) {
- if (supp_rates_ie[1] > (WILC_MAX_RATES_SUPPORTED - rates_len))
- param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
- else
- param->supp_rates[0] += supp_rates_ie[1];
+ if (rates_len < WILC_MAX_RATES_SUPPORTED) {
+ supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+ ies->data, ies->len);
+ if (supp_rates_ie) {
+ u8 ext_rates = supp_rates_ie[1];
- memcpy(&param->supp_rates[rates_len + 1], supp_rates_ie + 2,
- (param->supp_rates[0] - rates_len));
+ if (ext_rates > (WILC_MAX_RATES_SUPPORTED - rates_len))
+ param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
+ else
+ param->supp_rates[0] += ext_rates;
+
+ memcpy(&param->supp_rates[rates_len + 1],
+ supp_rates_ie + 2,
+ (param->supp_rates[0] - rates_len));
+ }
}
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
@@ -544,7 +557,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->mode_802_11i = 2;
param->rsn_found = true;
- //extract RSN capabilities
+ /* extract RSN capabilities */
offset += (rsn_ie[offset] * 4) + 2;
offset += (rsn_ie[offset] * 4) + 2;
memcpy(param->rsn_cap, &rsn_ie[offset], 2);
@@ -1776,7 +1789,9 @@ void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
wid.val = (u8 *)&reg_frame;
memset(&reg_frame, 0x0, sizeof(reg_frame));
- reg_frame.reg = reg;
+
+ if (reg)
+ reg_frame.reg = 1;
switch (frame_type) {
case IEEE80211_STYPE_ACTION:
diff --git a/drivers/staging/wilc1000/wilc_hif.h b/drivers/staging/wilc1000/hif.h
index ac5fe57f872b..22ee6fffd599 100644
--- a/drivers/staging/wilc1000/wilc_hif.h
+++ b/drivers/staging/wilc1000/hif.h
@@ -4,10 +4,10 @@
* All rights reserved.
*/
-#ifndef HOST_INT_H
-#define HOST_INT_H
+#ifndef WILC_HIF_H
+#define WILC_HIF_H
#include <linux/ieee80211.h>
-#include "wilc_wlan_if.h"
+#include "wlan_if.h"
enum {
WILC_IDLE_MODE = 0x0,
diff --git a/drivers/staging/wilc1000/wilc_mon.c b/drivers/staging/wilc1000/mon.c
index d6f14f69ad64..48ac33f06f63 100644
--- a/drivers/staging/wilc1000/wilc_mon.c
+++ b/drivers/staging/wilc1000/mon.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_cfgoperations.h"
+#include "cfg80211.h"
struct wilc_wfi_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
@@ -220,7 +220,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
{
struct wilc_wfi_mon_priv *priv;
- /*If monitor interface is already initialized, return it*/
+ /* If monitor interface is already initialized, return it */
if (wl->monitor_dev)
return wl->monitor_dev;
diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/netdev.c
index 508acb8bb089..d2c0b0f7cf63 100644
--- a/drivers/staging/wilc1000/wilc_netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -10,8 +10,8 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
-#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_cfg.h"
+#include "cfg80211.h"
+#include "wlan_cfg.h"
#define WILC_MULTICAST_TABLE_SIZE 8
@@ -97,29 +97,25 @@ void wilc_mac_indicate(struct wilc *wilc)
static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
{
u8 *bssid, *bssid1;
- int i = 0;
struct net_device *ndev = NULL;
+ struct wilc_vif *vif;
bssid = mac_header + 10;
bssid1 = mac_header + 4;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i]->mode == WILC_STATION_MODE)
- if (ether_addr_equal_unaligned(bssid,
- wilc->vif[i]->bssid)) {
- ndev = wilc->vif[i]->ndev;
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->mode == WILC_STATION_MODE)
+ if (ether_addr_equal_unaligned(bssid, vif->bssid)) {
+ ndev = vif->ndev;
goto out;
}
- if (wilc->vif[i]->mode == WILC_AP_MODE)
- if (ether_addr_equal_unaligned(bssid1,
- wilc->vif[i]->bssid)) {
- ndev = wilc->vif[i]->ndev;
+ if (vif->mode == WILC_AP_MODE)
+ if (ether_addr_equal_unaligned(bssid1, vif->bssid)) {
+ ndev = vif->ndev;
goto out;
}
}
out:
- mutex_unlock(&wilc->vif_mutex);
return ndev;
}
@@ -137,13 +133,16 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
{
- u8 i = 0;
+ int srcu_idx;
u8 ret_val = 0;
+ struct wilc_vif *vif;
- for (i = 0; i < wilc->vif_num; i++)
- if (!is_zero_ether_addr(wilc->vif[i]->bssid))
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (!is_zero_ether_addr(vif->bssid))
ret_val++;
-
+ }
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return ret_val;
}
@@ -167,16 +166,16 @@ static int wilc_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(wl, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- int i;
+ int srcu_idx;
struct wilc_vif *ifc;
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- ifc = wl->vif[i];
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ list_for_each_entry_rcu(ifc, &wl->vif_list,
+ list) {
if (ifc->mac_opened && ifc->ndev)
netif_wake_queue(ifc->ndev);
}
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
} while (ret == -ENOBUFS && !wl->close);
}
@@ -424,18 +423,6 @@ fail:
return -1;
}
-static void wlan_deinit_locks(struct net_device *dev)
-{
- struct wilc_vif *vif = netdev_priv(dev);
- struct wilc *wilc = vif->wilc;
-
- mutex_destroy(&wilc->hif_cs);
- mutex_destroy(&wilc->rxq_cs);
- mutex_destroy(&wilc->cfg_cmd_lock);
- mutex_destroy(&wilc->txq_add_to_head_cs);
- mutex_destroy(&wilc->vif_mutex);
-}
-
static void wlan_deinitialize_threads(struct net_device *dev)
{
struct wilc_vif *vif = netdev_priv(dev);
@@ -477,7 +464,6 @@ static void wilc_wlan_deinitialize(struct net_device *dev)
wilc_wlan_stop(wl, vif);
wilc_wlan_cleanup(dev);
- wlan_deinit_locks(dev);
wl->initialized = false;
@@ -738,14 +724,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
wilc_tx_complete);
if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
- int i;
+ int srcu_idx;
+ struct wilc_vif *vif;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i]->mac_opened)
- netif_stop_queue(wilc->vif[i]->ndev);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->mac_opened)
+ netif_stop_queue(vif->ndev);
}
- mutex_unlock(&wilc->vif_mutex);
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
return 0;
@@ -823,26 +810,22 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
{
- int i = 0;
+ int srcu_idx;
struct wilc_vif *vif;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
u16 type = le16_to_cpup((__le16 *)buff);
- vif = netdev_priv(wilc->vif[i]->ndev);
- if ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
- (type == vif->frame_reg[1].type && vif->frame_reg[1].reg)) {
+ if (vif->priv.p2p_listen_state &&
+ ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
+ (type == vif->frame_reg[1].type && vif->frame_reg[1].reg)))
wilc_wfi_p2p_rx(vif, buff, size);
- break;
- }
- if (vif->monitor_flag) {
+ if (vif->monitor_flag)
wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
- break;
- }
}
- mutex_unlock(&wilc->vif_mutex);
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
static const struct net_device_ops wilc_netdev_ops = {
@@ -856,7 +839,8 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- int i;
+ struct wilc_vif *vif;
+ int srcu_idx;
if (!wilc)
return;
@@ -866,21 +850,57 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i] && wilc->vif[i]->ndev)
- unregister_netdev(wilc->vif[i]->ndev);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->ndev)
+ unregister_netdev(vif->ndev);
}
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
flush_workqueue(wilc->hif_workqueue);
destroy_workqueue(wilc->hif_workqueue);
+
+ do {
+ mutex_lock(&wilc->vif_mutex);
+ if (wilc->vif_num <= 0) {
+ mutex_unlock(&wilc->vif_mutex);
+ break;
+ }
+ vif = wilc_get_wl_to_vif(wilc);
+ if (!IS_ERR(vif))
+ list_del_rcu(&vif->list);
+
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
+ } while (1);
+
wilc_wlan_cfg_deinit(wilc);
+ wlan_deinit_locks(wilc);
kfree(wilc->bus_data);
wiphy_unregister(wilc->wiphy);
wiphy_free(wilc->wiphy);
}
EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
+static u8 wilc_get_available_idx(struct wilc *wl)
+{
+ int idx = 0;
+ struct wilc_vif *vif;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ if (vif->idx == 0)
+ idx = 1;
+ else
+ idx = 0;
+ }
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+ return idx;
+}
+
struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
int vif_type, enum nl80211_iftype type,
bool rtnl_locked)
@@ -921,10 +941,14 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
- vif->wilc->vif[wl->vif_num] = vif;
- vif->idx = wl->vif_num;
- wl->vif_num += 1;
+ vif->idx = wilc_get_available_idx(wl);
vif->mac_opened = 0;
+ mutex_lock(&wl->vif_mutex);
+ list_add_tail_rcu(&vif->list, &wl->vif_list);
+ wl->vif_num += 1;
+ mutex_unlock(&wl->vif_mutex);
+ synchronize_srcu(&wl->srcu);
+
return vif;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/netdev.h
index 978a8bdbfc40..cd8f0d72caaa 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -4,8 +4,8 @@
* All rights reserved.
*/
-#ifndef WILC_WFI_NETDEVICE
-#define WILC_WFI_NETDEVICE
+#ifndef WILC_NETDEV_H
+#define WILC_NETDEV_H
#include <linux/tcp.h>
#include <linux/ieee80211.h>
@@ -14,9 +14,9 @@
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
-#include "wilc_hif.h"
-#include "wilc_wlan.h"
-#include "wilc_wlan_cfg.h"
+#include "hif.h"
+#include "wlan.h"
+#include "wlan_cfg.h"
#define FLOW_CONTROL_LOWER_THRESHOLD 128
#define FLOW_CONTROL_UPPER_THRESHOLD 256
@@ -60,7 +60,7 @@ struct sta_info {
u8 sta_associated_bss[WILC_MAX_NUM_STA][ETH_ALEN];
};
-/*Parameters needed for host interface for remaining on channel*/
+/* Parameters needed for host interface for remaining on channel */
struct wilc_wfi_p2p_listen_params {
struct ieee80211_channel *listen_ch;
u32 listen_duration;
@@ -145,11 +145,13 @@ struct wilc_priv {
struct wilc_pmkid_attr pmkid_list;
u8 wep_key[4][WLAN_KEY_LEN_WEP104];
u8 wep_key_len[4];
+
/* The real interface that the monitor is on */
struct net_device *real_ndev;
struct wilc_wfi_key *wilc_gtk[WILC_MAX_NUM_STA];
struct wilc_wfi_key *wilc_ptk[WILC_MAX_NUM_STA];
u8 wilc_groupkey;
+
/* mutexes */
struct mutex scan_req_lock;
bool p2p_listen_state;
@@ -208,6 +210,8 @@ struct wilc_vif {
struct tcp_ack_filter ack_filter;
bool connecting;
struct wilc_priv priv;
+ struct list_head list;
+ struct cfg80211_bss *bss;
};
struct wilc {
@@ -221,16 +225,22 @@ struct wilc {
int dev_irq_num;
int close;
u8 vif_num;
- struct wilc_vif *vif[WILC_NUM_CONCURRENT_IFC];
- /*protect vif list*/
+ struct list_head vif_list;
+
+ /* protect vif list */
struct mutex vif_mutex;
+ struct srcu_struct srcu;
u8 open_ifcs;
- /*protect head of transmit queue*/
+
+ /* protect head of transmit queue */
struct mutex txq_add_to_head_cs;
- /*protect txq_entry_t transmit queue*/
+
+ /* protect txq_entry_t transmit queue */
spinlock_t txq_spinlock;
- /*protect rxq_entry_t receiver queue*/
+
+ /* protect rxq_entry_t receiver queue */
struct mutex rxq_cs;
+
/* lock to protect hif access */
struct mutex hif_cs;
@@ -242,6 +252,7 @@ struct wilc {
struct task_struct *txq_thread;
int quit;
+
/* lock to protect issue of wid command to firmware */
struct mutex cfg_cmd_lock;
struct wilc_cfg_frame cfg_frame;
@@ -268,6 +279,7 @@ struct wilc {
struct wilc_cfg cfg;
void *bus_data;
struct net_device *monitor_dev;
+
/* deinit lock */
struct mutex deinit_lock;
u8 sta_ch;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/sdio.c
index c787c5da8f2b..319e039380b0 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/sdio.c
@@ -8,8 +8,8 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/host.h>
-#include "wilc_wfi_netdevice.h"
-#include "wilc_wfi_cfgoperations.h"
+#include "netdev.h"
+#include "cfg80211.h"
#define SDIO_MODALIAS "wilc1000_sdio"
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/spi.c
index 3c1ae9e9f9aa..55f8757325f0 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/spi.c
@@ -4,10 +4,11 @@
* All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/spi/spi.h>
-#include "wilc_wfi_netdevice.h"
-#include "wilc_wfi_cfgoperations.h"
+#include "netdev.h"
+#include "cfg80211.h"
struct wilc_spi {
int crc_off;
@@ -132,6 +133,12 @@ static int wilc_bus_probe(struct spi_device *spi)
wilc->bus_data = spi_priv;
wilc->gpio_irq = gpio;
+ wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (!IS_ERR(wilc->rtc_clk))
+ clk_prepare_enable(wilc->rtc_clk);
+
return 0;
}
@@ -142,6 +149,10 @@ static int wilc_bus_remove(struct spi_device *spi)
/* free the GPIO in module remove */
if (wilc->gpio_irq)
gpiod_put(wilc->gpio_irq);
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_disable_unprepare(wilc->rtc_clk);
+
wilc_netdev_cleanup(wilc);
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wlan.c
index 771d8cb68dc1..d3de76126b78 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wlan.c
@@ -6,8 +6,8 @@
#include <linux/if_ether.h>
#include <linux/ip.h>
-#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_cfg.h"
+#include "cfg80211.h"
+#include "wlan_cfg.h"
static inline bool is_wilc1000(u32 id)
{
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wlan.h
index 7469fa47d588..1f6957cf2e9c 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wlan.h
@@ -190,7 +190,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
-/*time for expiring the completion of cfg packets*/
+/* time for expiring the completion of cfg packets */
#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000)
#define IS_MANAGMEMENT 0x100
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wlan_cfg.c
index 3f53807cee0f..6f6b286788d1 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wlan_cfg.c
@@ -4,10 +4,10 @@
* All rights reserved.
*/
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
-#include "wilc_wlan_cfg.h"
-#include "wilc_wfi_netdevice.h"
+#include "wlan_if.h"
+#include "wlan.h"
+#include "wlan_cfg.h"
+#include "netdev.h"
enum cfg_cmd_type {
CFG_BYTE_CMD = 0,
@@ -44,6 +44,11 @@ static const struct wilc_cfg_str g_cfg_str[] = {
{WID_NIL, NULL}
};
+#define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
+#define WILC_RESP_MSG_TYPE_STATUS_INFO 'I'
+#define WILC_RESP_MSG_TYPE_NETWORK_INFO 'N'
+#define WILC_RESP_MSG_TYPE_SCAN_COMPLETE 'S'
+
/********************************************
*
* Configuration Functions
@@ -360,33 +365,26 @@ void wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
size -= 4;
rsp->type = 0;
- /*
- * The valid types of response messages are
- * 'R' (Response),
- * 'I' (Information), and
- * 'N' (Network Information)
- */
-
switch (msg_type) {
- case 'R':
+ case WILC_RESP_MSG_TYPE_CONFIG_REPLY:
wilc_wlan_parse_response_frame(wilc, frame, size);
rsp->type = WILC_CFG_RSP;
rsp->seq_no = msg_id;
break;
- case 'I':
+ case WILC_RESP_MSG_TYPE_STATUS_INFO:
wilc_wlan_parse_info_frame(wilc, frame);
rsp->type = WILC_CFG_RSP_STATUS;
rsp->seq_no = msg_id;
- /*call host interface info parse as well*/
+ /* call host interface info parse as well */
wilc_gnrl_async_info_received(wilc, frame - 4, size + 4);
break;
- case 'N':
+ case WILC_RESP_MSG_TYPE_NETWORK_INFO:
wilc_network_info_received(wilc, frame - 4, size + 4);
break;
- case 'S':
+ case WILC_RESP_MSG_TYPE_SCAN_COMPLETE:
wilc_scan_complete_received(wilc, frame - 4, size + 4);
break;
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wlan_cfg.h
index 614c5673f232..614c5673f232 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wlan_cfg.h
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wlan_if.h
index 70eac586f80c..7c7ee66c35f5 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wlan_if.h
@@ -750,10 +750,10 @@ enum {
WID_REMOVE_KEY = 0x301E,
WID_ASSOC_REQ_INFO = 0x301F,
WID_ASSOC_RES_INFO = 0x3020,
- WID_MANUFACTURER = 0x3026, /*Added for CAPI tool */
- WID_MODEL_NAME = 0x3027, /*Added for CAPI tool */
- WID_MODEL_NUM = 0x3028, /*Added for CAPI tool */
- WID_DEVICE_NAME = 0x3029, /*Added for CAPI tool */
+ WID_MANUFACTURER = 0x3026, /* Added for CAPI tool */
+ WID_MODEL_NAME = 0x3027, /* Added for CAPI tool */
+ WID_MODEL_NUM = 0x3028, /* Added for CAPI tool */
+ WID_DEVICE_NAME = 0x3029, /* Added for CAPI tool */
/* NMAC String WID list */
WID_SET_OPERATION_MODE = 0x3079,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 5ff740a8837d..bdd7f414fdbb 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1181,8 +1181,6 @@ struct hfa384x_usbctlx {
ctlx_cmdcb_t cmdcb; /* Async command callback */
ctlx_usercb_t usercb; /* Async user callback, */
void *usercb_data; /* at CTLX completion */
-
- int variant; /* Identifies cmd variant */
};
struct hfa384x_usbctlxq {
@@ -1337,7 +1335,9 @@ struct hfa384x {
* interface
*/
- struct hfa384x_caplevel cap_act_sta_mfi; /* sta f/w to modem interface */
+ struct hfa384x_caplevel cap_act_sta_mfi; /*
+ * sta f/w to modem interface
+ */
struct hfa384x_caplevel cap_act_ap_cfi; /*
* ap f/w to controller
@@ -1359,7 +1359,9 @@ struct hfa384x {
struct hfa384x_inf_frame *scanresults;
- struct prism2sta_authlist authlist; /* Authenticated station list. */
+ struct prism2sta_authlist authlist; /*
+ * Authenticated station list.
+ */
unsigned int accessmode; /* Access mode. */
struct prism2sta_accesslist allow; /* Allowed station list. */
struct prism2sta_accesslist deny; /* Denied station list. */
@@ -1370,12 +1372,13 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb);
void hfa384x_destroy(struct hfa384x *hw);
int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime,
- int genesis);
+ int genesis);
int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_flashdl_enable(struct hfa384x *hw);
int hfa384x_drvr_flashdl_disable(struct hfa384x *hw);
-int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf,
+ u32 len);
int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr);
int hfa384x_drvr_ramdl_disable(struct hfa384x *hw);
@@ -1383,7 +1386,8 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len);
int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
-static inline int hfa384x_drvr_getconfig16(struct hfa384x *hw, u16 rid, void *val)
+static inline int
+hfa384x_drvr_getconfig16(struct hfa384x *hw, u16 rid, void *val)
{
int result = 0;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 28d372a0663a..b71756ab0394 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -293,13 +293,11 @@ void dbprint_urb(struct urb *urb)
pr_debug("urb->transfer_buffer_length=0x%08x\n",
urb->transfer_buffer_length);
pr_debug("urb->actual_length=0x%08x\n", urb->actual_length);
- pr_debug("urb->bandwidth=0x%08x\n", urb->bandwidth);
pr_debug("urb->setup_packet(ctl)=0x%08x\n",
(unsigned int)urb->setup_packet);
pr_debug("urb->start_frame(iso/irq)=0x%08x\n", urb->start_frame);
pr_debug("urb->interval(irq)=0x%08x\n", urb->interval);
pr_debug("urb->error_count(iso)=0x%08x\n", urb->error_count);
- pr_debug("urb->timeout=0x%08x\n", urb->timeout);
pr_debug("urb->context=0x%08x\n", (unsigned int)urb->context);
pr_debug("urb->complete=0x%08x\n", (unsigned int)urb->complete);
}
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 8bd92bba0ac1..51d917c8cdc8 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -49,6 +49,7 @@
/*================================================================*/
/* System Includes */
+#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/random.h>
@@ -61,61 +62,6 @@
#define WEP_KEY(x) (((x) & 0xC0) >> 6)
-static const u32 wep_crc32_table[256] = {
- 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
- 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
- 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
- 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
- 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
- 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
- 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
- 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
- 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
- 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
- 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
- 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
- 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
- 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
- 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
- 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
- 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
- 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
- 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
- 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
- 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
- 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
- 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
- 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
- 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
- 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
- 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
- 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
- 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
- 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
- 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
- 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
- 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
- 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
- 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
- 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
- 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
- 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
- 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
- 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
- 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
- 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
- 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
- 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
- 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
- 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
- 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
- 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
- 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
- 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
- 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
- 0x2d02ef8dL
-};
-
/* keylen in bytes! */
int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
@@ -184,7 +130,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
}
/* Apply the RC4 to the data, update the CRC32 */
- crc = ~0;
i = 0;
j = 0;
for (k = 0; k < len; k++) {
@@ -192,9 +137,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
j = (j + s[i]) & 0xff;
swap(i, j);
buf[k] ^= s[(s[i] + s[j]) & 0xff];
- crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
}
- crc = ~crc;
+ crc = ~crc32_le(~0, buf, len);
/* now let's check the crc */
c_crc[0] = crc;
@@ -257,17 +201,15 @@ int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
}
/* Update CRC32 then apply RC4 to the data */
- crc = ~0;
i = 0;
j = 0;
for (k = 0; k < len; k++) {
- crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
swap(i, j);
dst[k] = buf[k] ^ s[(s[i] + s[j]) & 0xff];
}
- crc = ~crc;
+ crc = ~crc32_le(~0, buf, len);
/* now let's encrypt the crc */
icv[0] = crc;
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index b5ba176004c1..352556f6870a 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -137,7 +137,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
{
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (wlandev) {
LIST_HEAD(cleanlist);
struct hfa384x_usbctlx *ctlx, *temp;
@@ -222,7 +222,7 @@ static int prism2sta_suspend(struct usb_interface *interface,
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
@@ -245,7 +245,7 @@ static int prism2sta_resume(struct usb_interface *interface)
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 8bf8e031f0bc..fdd77bb4628d 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -33,9 +33,9 @@ static int tb_port_enable_tmu(struct tb_port *port, bool enable)
* Legacy devices need to have TMU access enabled before port
* space can be fully accessed.
*/
- if (tb_switch_is_lr(sw))
+ if (tb_switch_is_light_ridge(sw))
offset = 0x26;
- else if (tb_switch_is_er(sw))
+ else if (tb_switch_is_eagle_ridge(sw))
offset = 0x2a;
else
return 0;
@@ -60,7 +60,7 @@ static void tb_port_dummy_read(struct tb_port *port)
* reading stale data on next read perform one dummy read after
* port capabilities are walked.
*/
- if (tb_switch_is_lr(port->sw)) {
+ if (tb_switch_is_light_ridge(port->sw)) {
u32 dummy;
tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2ec1af8f7968..d97813e80e5f 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -962,8 +962,8 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
+ route, space, offset);
break;
default:
@@ -988,8 +988,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
+ route, space, offset);
break;
default:
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index ee5196479854..8dd7de0cc826 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -514,17 +514,6 @@ int tb_drom_read(struct tb_switch *sw)
* no entries). Hardcode the configuration here.
*/
tb_drom_read_uid_only(sw, &sw->uid);
-
- sw->ports[1].link_nr = 0;
- sw->ports[2].link_nr = 1;
- sw->ports[1].dual_link_port = &sw->ports[2];
- sw->ports[2].dual_link_port = &sw->ports[1];
-
- sw->ports[3].link_nr = 0;
- sw->ports[4].link_nr = 1;
- sw->ports[3].dual_link_port = &sw->ports[4];
- sw->ports[4].dual_link_port = &sw->ports[3];
-
return 0;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 245588f691e7..13e88109742e 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
@@ -43,6 +44,10 @@
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
+static bool start_icm;
+module_param(start_icm, bool, 0444);
+MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
+
/**
* struct icm - Internal connection manager private data
* @request_lock: Makes sure only one message is send to ICM at time
@@ -147,6 +152,17 @@ static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
return NULL;
}
+static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
+{
+ const struct intel_vss *vss;
+
+ vss = parse_intel_vss(ep_name, size);
+ if (vss)
+ return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
+ return false;
+}
+
static inline struct tb *icm_to_tb(struct icm *icm)
{
return ((void *)icm - sizeof(struct tb));
@@ -339,6 +355,14 @@ static void icm_veto_end(struct tb *tb)
}
}
+static bool icm_firmware_running(const struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ return !!(val & REG_FW_STS_ICM_EN);
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -562,58 +586,42 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0;
}
-static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, const u8 *ep_name,
- size_t ep_name_size, u8 connection_id,
- u8 connection_key, u8 link, u8 depth,
- enum tb_security_level security_level,
- bool authorized, bool boot)
+static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
+ const uuid_t *uuid)
{
- const struct intel_vss *vss;
+ struct tb *tb = parent_sw->tb;
struct tb_switch *sw;
- int ret;
-
- pm_runtime_get_sync(&parent_sw->dev);
- sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
- if (IS_ERR(sw))
- goto out;
+ sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+ if (IS_ERR(sw)) {
+ tb_warn(tb, "failed to allocate switch at %llx\n", route);
+ return sw;
+ }
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
if (!sw->uuid) {
- tb_sw_warn(sw, "cannot allocate memory for switch\n");
tb_switch_put(sw);
- goto out;
+ return ERR_PTR(-ENOMEM);
}
- sw->connection_id = connection_id;
- sw->connection_key = connection_key;
- sw->link = link;
- sw->depth = depth;
- sw->authorized = authorized;
- sw->security_level = security_level;
- sw->boot = boot;
+
init_completion(&sw->rpm_complete);
+ return sw;
+}
- vss = parse_intel_vss(ep_name, ep_name_size);
- if (vss)
- sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
+{
+ u64 route = tb_route(sw);
+ int ret;
/* Link the two switches now */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
ret = tb_switch_add(sw);
- if (ret) {
+ if (ret)
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
- tb_switch_put(sw);
- sw = ERR_PTR(ret);
- }
-
-out:
- pm_runtime_mark_last_busy(&parent_sw->dev);
- pm_runtime_put_autosuspend(&parent_sw->dev);
- return sw;
+ return ret;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -697,11 +705,11 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
(const struct icm_fr_event_device_connected *)hdr;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
+ bool boot, dual_lane, speed_gen3;
struct icm *icm = tb_priv(tb);
bool authorized = false;
struct tb_xdomain *xd;
u8 link, depth;
- bool boot;
u64 route;
int ret;
@@ -714,6 +722,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
@@ -811,10 +821,27 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id,
- pkg->connection_key, link, depth, security_level,
- authorized, boot);
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->connection_key = pkg->connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1142,10 +1169,10 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
{
const struct icm_tr_event_device_connected *pkg =
(const struct icm_tr_event_device_connected *)hdr;
+ bool authorized, boot, dual_lane, speed_gen3;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
struct tb_xdomain *xd;
- bool authorized, boot;
u64 route;
icm_postpone_rescan(tb);
@@ -1163,6 +1190,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
@@ -1205,11 +1234,27 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
return;
}
- sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
- security_level, authorized, boot);
- if (!IS_ERR(sw) && force_rtd3)
- sw->rpm = true;
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = force_rtd3;
+ if (!sw->rpm)
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
+ sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1349,9 +1394,12 @@ static bool icm_ar_is_supported(struct tb *tb)
/*
* Starting from Alpine Ridge we can use ICM on Apple machines
* as well. We just need to reset and re-enable it first.
+ * However, only start it if explicitly asked by the user.
*/
- if (!x86_apple_machine)
+ if (icm_firmware_running(tb->nhi))
return true;
+ if (!start_icm)
+ return false;
/*
* Find the upstream PCIe port in case we need to do reset
@@ -1704,8 +1752,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
u32 val;
/* Check if the ICM firmware is already running */
- val = ioread32(nhi->iobase + REG_FW_STS);
- if (val & REG_FW_STS_ICM_EN)
+ if (icm_firmware_running(nhi))
return 0;
dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
@@ -1893,14 +1940,12 @@ static int icm_suspend(struct tb *tb)
*/
static void icm_unplug_children(struct tb_switch *sw)
{
- unsigned int i;
+ struct tb_port *port;
if (tb_route(sw))
sw->is_unplugged = true;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain)
port->xdomain->is_unplugged = true;
else if (tb_port_has_remote(port))
@@ -1936,11 +1981,9 @@ static void remove_unplugged_switch(struct tb_switch *sw)
static void icm_free_unplugged_children(struct tb_switch *sw)
{
- unsigned int i;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain && port->xdomain->is_unplugged) {
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
@@ -2216,7 +2259,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
- icm->is_supported = icm_ar_is_supported;
+ icm->is_supported = icm_fr_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
icm->device_connected = icm_icl_device_connected;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index ae1e92611c3e..bd44d50246d2 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -94,7 +94,7 @@ int tb_lc_configure_link(struct tb_switch *sw)
struct tb_port *up, *down;
int ret;
- if (!sw->config.enabled || !tb_route(sw))
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
@@ -124,7 +124,7 @@ void tb_lc_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
+ if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw))
return;
up = tb_upstream_port(sw);
@@ -177,3 +177,192 @@ int tb_lc_set_sleep(struct tb_switch *sw)
return 0;
}
+
+/**
+ * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
+ * @sw: Switch to check
+ *
+ * Checks whether conditions for lane bonding from parent to @sw are
+ * possible.
+ */
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int cap, ret;
+ u32 val;
+
+ if (sw->generation < 2)
+ return false;
+
+ up = tb_upstream_port(sw);
+ cap = find_port_lc_cap(up);
+ if (cap < 0)
+ return false;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TB_LC_PORT_ATTR_BE);
+}
+
+static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
+ struct tb_port *in)
+{
+ struct tb_port *port;
+
+ /* The first DP IN port is sink 0 and second is sink 1 */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_is_dpin(port))
+ return in != port;
+ }
+
+ return -EINVAL;
+}
+
+static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
+{
+ u32 val, alloc;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Sink is available for CM/SW to use if the allocation valie is
+ * either 0 or 1.
+ */
+ if (!sink) {
+ alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
+ return 0;
+ } else {
+ alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
+ * @sw: Switch whose DP sink is queried
+ * @in: DP IN port to check
+ *
+ * Queries through LC SNK_ALLOCATION registers whether DP sink is available
+ * for the given DP IN port or not.
+ */
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
+{
+ int sink;
+
+ /*
+ * For older generations sink is always available as there is no
+ * allocation mechanism.
+ */
+ if (sw->generation < 3)
+ return true;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return false;
+
+ return !tb_lc_dp_sink_available(sw, sink);
+}
+
+/**
+ * tb_lc_dp_sink_alloc() - Allocate DP sink
+ * @sw: Switch whose DP sink is allocated
+ * @in: DP IN port the DP sink is allocated for
+ *
+ * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
+ * resource is available and allocation is successful returns %0. In all
+ * other cases returs negative errno. In particular %-EBUSY is returned if
+ * the resource was not available.
+ */
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink) {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
+ } else {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ }
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d allocated\n", sink);
+ return 0;
+}
+
+/**
+ * tb_lc_dp_sink_dealloc() - De-allocate DP sink
+ * @sw: Switch whose DP sink is de-allocated
+ * @in: DP IN port whose DP sink is de-allocated
+ *
+ * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
+ */
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ /* Needs to be owned by CM/SW */
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink)
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ else
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d de-allocated\n", sink);
+ return 0;
+}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index afe5f8391ebf..ad58559ea88e 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -220,7 +220,8 @@ err:
* Creates path between two ports starting with given @src_hopid. Reserves
* HopIDs for each port (they can be different from @src_hopid depending on
* how many HopIDs each port already have reserved). If there are dual
- * links on the path, prioritizes using @link_nr.
+ * links on the path, prioritizes using @link_nr but takes into account
+ * that the lanes may be bonded.
*
* Return: Returns a tb_path on success or NULL on failure.
*/
@@ -259,7 +260,9 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!in_port)
goto err;
- if (in_port->dual_link_port && in_port->link_nr != link_nr)
+ /* When lanes are bonded primary link must be used */
+ if (!in_port->bonded && in_port->dual_link_port &&
+ in_port->link_nr != link_nr)
in_port = in_port->dual_link_port;
ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
@@ -271,8 +274,27 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!out_port)
goto err;
- if (out_port->dual_link_port && out_port->link_nr != link_nr)
- out_port = out_port->dual_link_port;
+ /*
+ * Pick up right port when going from non-bonded to
+ * bonded or from bonded to non-bonded.
+ */
+ if (out_port->dual_link_port) {
+ if (!in_port->bonded && out_port->bonded &&
+ out_port->link_nr) {
+ /*
+ * Use primary link when going from
+ * non-bonded to bonded.
+ */
+ out_port = out_port->dual_link_port;
+ } else if (!out_port->bonded &&
+ out_port->link_nr != link_nr) {
+ /*
+ * If out port is not bonded follow
+ * link_nr.
+ */
+ out_port = out_port->dual_link_port;
+ }
+ }
if (i == num_hops - 1)
ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
@@ -535,3 +557,25 @@ bool tb_path_is_invalid(struct tb_path *path)
}
return false;
}
+
+/**
+ * tb_path_switch_on_path() - Does the path go through certain switch
+ * @path: Path to check
+ * @sw: Switch to check
+ *
+ * Goes over all hops on path and checks if @sw is any of them.
+ * Direction does not matter.
+ */
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ if (path->hops[i].in_port->sw == sw ||
+ path->hops[i].out_port->sw == sw)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 5ea8db667e83..ca86a8e09c77 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -168,7 +168,7 @@ static int nvm_validate_and_write(struct tb_switch *sw)
static int nvm_authenticate_host(struct tb_switch *sw)
{
- int ret;
+ int ret = 0;
/*
* Root switch NVM upgrade requires that we disconnect the
@@ -176,6 +176,8 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* already).
*/
if (!sw->safe_mode) {
+ u32 status;
+
ret = tb_domain_disconnect_all_paths(sw->tb);
if (ret)
return ret;
@@ -184,7 +186,16 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* everything goes well so getting timeout is expected.
*/
ret = dma_port_flash_update_auth(sw->dma_port);
- return ret == -ETIMEDOUT ? 0 : ret;
+ if (!ret || ret == -ETIMEDOUT)
+ return 0;
+
+ /*
+ * Any error from update auth operation requires power
+ * cycling of the host router.
+ */
+ tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
+ if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
+ nvm_set_auth_status(sw, status);
}
/*
@@ -192,7 +203,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* switch.
*/
dma_port_power_cycle(sw->dma_port);
- return 0;
+ return ret;
}
static int nvm_authenticate_device(struct tb_switch *sw)
@@ -200,8 +211,16 @@ static int nvm_authenticate_device(struct tb_switch *sw)
int ret, retries = 10;
ret = dma_port_flash_update_auth(sw->dma_port);
- if (ret && ret != -ETIMEDOUT)
+ switch (ret) {
+ case 0:
+ case -ETIMEDOUT:
+ case -EACCES:
+ case -EINVAL:
+ /* Power cycle is required */
+ break;
+ default:
return ret;
+ }
/*
* Poll here for the authentication status. It takes some time
@@ -553,17 +572,17 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
if (credits == 0 || port->sw->is_unplugged)
return 0;
- nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
nfc_credits += credits;
- tb_port_dbg(port, "adding %d NFC credits to %lu",
- credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
+ tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
+ port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
- port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
+ port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
port->config.nfc_credits |= nfc_credits;
return tb_port_write(port, &port->config.nfc_credits,
- TB_CFG_PORT, 4, 1);
+ TB_CFG_PORT, ADP_CS_4, 1);
}
/**
@@ -578,14 +597,14 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
- data &= ~TB_PORT_LCA_MASK;
- data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
+ data &= ~ADP_CS_5_LCA_MASK;
+ data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
- return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
+ return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
}
/**
@@ -645,6 +664,7 @@ static int tb_init_port(struct tb_port *port)
ida_init(&port->out_hopids);
}
+ INIT_LIST_HEAD(&port->list);
return 0;
}
@@ -775,6 +795,132 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
return next;
}
+static int tb_port_get_link_speed(struct tb_port *port)
+{
+ u32 val, speed;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
+ LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
+ return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
+}
+
+static int tb_port_get_link_width(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
+ LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+}
+
+static bool tb_port_is_width_supported(struct tb_port *port, int width)
+{
+ u32 phy, widths;
+ int ret;
+
+ if (!port->cap_phy)
+ return false;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return ret;
+
+ widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+
+ return !!(widths & width);
+}
+
+static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
+ switch (width) {
+ case 1:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ case 2:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= LANE_ADP_CS_1_LB;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_lane_bonding_enable(struct tb_port *port)
+{
+ int ret;
+
+ /*
+ * Enable lane bonding for both links if not already enabled by
+ * for example the boot firmware.
+ */
+ ret = tb_port_get_link_width(port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port, 2);
+ if (ret)
+ return ret;
+ }
+
+ ret = tb_port_get_link_width(port->dual_link_port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port->dual_link_port, 2);
+ if (ret) {
+ tb_port_set_link_width(port, 1);
+ return ret;
+ }
+ }
+
+ port->bonded = true;
+ port->dual_link_port->bonded = true;
+
+ return 0;
+}
+
+static void tb_port_lane_bonding_disable(struct tb_port *port)
+{
+ port->dual_link_port->bonded = false;
+ port->bonded = false;
+
+ tb_port_set_link_width(port->dual_link_port, 1);
+ tb_port_set_link_width(port, 1);
+}
+
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
@@ -803,10 +949,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
{
u32 data;
- if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1))
return false;
- return !!(data & TB_PCI_EN);
+ return !!(data & ADP_PCIE_CS_0_PE);
}
/**
@@ -816,10 +963,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
*/
int tb_pci_port_enable(struct tb_port *port, bool enable)
{
- u32 word = enable ? TB_PCI_EN : 0x0;
+ u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
if (!port->cap_adap)
return -ENXIO;
- return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1);
}
/**
@@ -833,11 +981,12 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
- return !!(data & TB_DP_HDP);
+ return !!(data & ADP_DP_CS_2_HDP);
}
/**
@@ -851,12 +1000,14 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
if (ret)
return ret;
- data |= TB_DP_HPDC;
- return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ data |= ADP_DP_CS_3_HDPC;
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
}
/**
@@ -874,20 +1025,23 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
- data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
- data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
+ data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
- data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
- data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
+ data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+ ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+ data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
+ ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/**
@@ -898,11 +1052,11 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
{
u32 data[2];
- if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+ if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
ARRAY_SIZE(data)))
return false;
- return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+ return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
}
/**
@@ -918,18 +1072,18 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
if (enable)
- data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+ data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
else
- data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+ data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/* switch utility functions */
@@ -986,7 +1140,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data;
int res;
- if (!sw->config.enabled)
+ if (tb_switch_is_icm(sw))
return 0;
sw->config.plug_events_delay = 0xff;
@@ -1114,6 +1268,15 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(device_name);
+static ssize_t
+generation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->generation);
+}
+static DEVICE_ATTR_RO(generation);
+
static ssize_t key_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1166,6 +1329,36 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
+}
+
+/*
+ * Currently all lanes must run at the same speed but we expose here
+ * both directions to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
+static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
+
+static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->link_width);
+}
+
+/*
+ * Currently link has same amount of lanes both directions (1 or 2) but
+ * expose them separately to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
+static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+
static void nvm_authenticate_start(struct tb_switch *sw)
{
struct pci_dev *root_port;
@@ -1246,8 +1439,6 @@ static ssize_t nvm_authenticate_store(struct device *dev,
*/
nvm_authenticate_start(sw);
ret = nvm_authenticate_host(sw);
- if (ret)
- nvm_authenticate_complete(sw);
} else {
ret = nvm_authenticate_device(sw);
}
@@ -1319,9 +1510,14 @@ static struct attribute *switch_attrs[] = {
&dev_attr_boot.attr,
&dev_attr_device.attr,
&dev_attr_device_name.attr,
+ &dev_attr_generation.attr,
&dev_attr_key.attr,
&dev_attr_nvm_authenticate.attr,
&dev_attr_nvm_version.attr,
+ &dev_attr_rx_speed.attr,
+ &dev_attr_rx_lanes.attr,
+ &dev_attr_tx_speed.attr,
+ &dev_attr_tx_lanes.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
&dev_attr_unique_id.attr,
@@ -1352,6 +1548,13 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
+ } else if (attr == &dev_attr_rx_speed.attr ||
+ attr == &dev_attr_rx_lanes.attr ||
+ attr == &dev_attr_tx_speed.attr ||
+ attr == &dev_attr_tx_lanes.attr) {
+ if (tb_route(sw))
+ return attr->mode;
+ return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
if (sw->dma_port && !sw->no_nvm_upgrade)
return attr->mode;
@@ -1382,14 +1585,14 @@ static const struct attribute_group *switch_groups[] = {
static void tb_switch_release(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
- int i;
+ struct tb_port *port;
dma_port_free(sw->dma_port);
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (!sw->ports[i].disabled) {
- ida_destroy(&sw->ports[i].in_hopids);
- ida_destroy(&sw->ports[i].out_hopids);
+ tb_switch_for_each_port(sw, port) {
+ if (!port->disabled) {
+ ida_destroy(&port->in_hopids);
+ ida_destroy(&port->out_hopids);
}
}
@@ -1690,13 +1893,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
int ret;
switch (sw->generation) {
- case 3:
- break;
-
case 2:
/* Only root switch can be upgraded */
if (tb_route(sw))
return 0;
+
+ /* fallthrough */
+ case 3:
+ ret = tb_switch_set_uuid(sw);
+ if (ret)
+ return ret;
break;
default:
@@ -1710,7 +1916,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
}
/* Root switch DMA port requires running firmware */
- if (!tb_route(sw) && sw->config.enabled)
+ if (!tb_route(sw) && !tb_switch_is_icm(sw))
return 0;
sw->dma_port = dma_port_alloc(sw);
@@ -1721,6 +1927,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return 0;
/*
+ * If there is status already set then authentication failed
+ * when the dma_port_flash_update_auth() returned. Power cycling
+ * is not needed (it was done already) so only thing we do here
+ * is to unblock runtime PM of the root port.
+ */
+ nvm_get_auth_status(sw, &status);
+ if (status) {
+ if (!tb_route(sw))
+ nvm_authenticate_complete(sw);
+ return 0;
+ }
+
+ /*
* Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make
* it functional again.
@@ -1735,9 +1954,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
- ret = tb_switch_set_uuid(sw);
- if (ret)
- return ret;
nvm_set_auth_status(sw, status);
}
@@ -1751,6 +1967,153 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return -ESHUTDOWN;
}
+static void tb_switch_default_link_ports(struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i += 2) {
+ struct tb_port *port = &sw->ports[i];
+ struct tb_port *subordinate;
+
+ if (!tb_port_is_null(port))
+ continue;
+
+ /* Check for the subordinate port */
+ if (i == sw->config.max_port_number ||
+ !tb_port_is_null(&sw->ports[i + 1]))
+ continue;
+
+ /* Link them if not already done so (by DROM) */
+ subordinate = &sw->ports[i + 1];
+ if (!port->dual_link_port && !subordinate->dual_link_port) {
+ port->link_nr = 0;
+ port->dual_link_port = subordinate;
+ subordinate->link_nr = 1;
+ subordinate->dual_link_port = port;
+
+ tb_sw_dbg(sw, "linked ports %d <-> %d\n",
+ port->port, subordinate->port);
+ }
+ }
+}
+
+static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ const struct tb_port *up = tb_upstream_port(sw);
+
+ if (!up->dual_link_port || !up->dual_link_port->remote)
+ return false;
+
+ return tb_lc_lane_bonding_possible(sw);
+}
+
+static int tb_switch_update_link_attributes(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ bool change = false;
+ int ret;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+
+ ret = tb_port_get_link_speed(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_speed != ret)
+ change = true;
+ sw->link_speed = ret;
+
+ ret = tb_port_get_link_width(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_width != ret)
+ change = true;
+ sw->link_width = ret;
+
+ /* Notify userspace that there is possible link attribute change */
+ if (device_is_registered(&sw->dev) && change)
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * tb_switch_lane_bonding_enable() - Enable lane bonding
+ * @sw: Switch to enable lane bonding
+ *
+ * Connection manager can call this function to enable lane bonding of a
+ * switch. If conditions are correct and both switches support the feature,
+ * lanes are bonded. It is safe to call this to any switch.
+ */
+int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+ u64 route = tb_route(sw);
+ int ret;
+
+ if (!route)
+ return 0;
+
+ if (!tb_switch_lane_bonding_possible(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(route, parent);
+
+ if (!tb_port_is_width_supported(up, 2) ||
+ !tb_port_is_width_supported(down, 2))
+ return 0;
+
+ ret = tb_port_lane_bonding_enable(up);
+ if (ret) {
+ tb_port_warn(up, "failed to enable lane bonding\n");
+ return ret;
+ }
+
+ ret = tb_port_lane_bonding_enable(down);
+ if (ret) {
+ tb_port_warn(down, "failed to enable lane bonding\n");
+ tb_port_lane_bonding_disable(up);
+ return ret;
+ }
+
+ tb_switch_update_link_attributes(sw);
+
+ tb_sw_dbg(sw, "lane bonding enabled\n");
+ return ret;
+}
+
+/**
+ * tb_switch_lane_bonding_disable() - Disable lane bonding
+ * @sw: Switch whose lane bonding to disable
+ *
+ * Disables lane bonding between @sw and parent. This can be called even
+ * if lanes were not bonded originally.
+ */
+void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+
+ if (!tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ if (!up->bonded)
+ return;
+
+ down = tb_port_at(tb_route(sw), parent);
+
+ tb_port_lane_bonding_disable(up);
+ tb_port_lane_bonding_disable(down);
+
+ tb_switch_update_link_attributes(sw);
+ tb_sw_dbg(sw, "lane bonding disabled\n");
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -1775,21 +2138,25 @@ int tb_switch_add(struct tb_switch *sw)
* configuration based mailbox.
*/
ret = tb_switch_add_dma_port(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add DMA port\n");
return ret;
+ }
if (!sw->safe_mode) {
/* read drom */
ret = tb_drom_read(sw);
if (ret) {
- tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+ dev_err(&sw->dev, "reading DROM failed\n");
return ret;
}
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
ret = tb_switch_set_uuid(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to set UUID\n");
return ret;
+ }
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
@@ -1797,14 +2164,24 @@ int tb_switch_add(struct tb_switch *sw)
continue;
}
ret = tb_init_port(&sw->ports[i]);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to initialize port %d\n", i);
return ret;
+ }
}
+
+ tb_switch_default_link_ports(sw);
+
+ ret = tb_switch_update_link_attributes(sw);
+ if (ret)
+ return ret;
}
ret = device_add(&sw->dev);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add device: %d\n", ret);
return ret;
+ }
if (tb_route(sw)) {
dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
@@ -1816,6 +2193,7 @@ int tb_switch_add(struct tb_switch *sw)
ret = tb_switch_nvm_add(sw);
if (ret) {
+ dev_err(&sw->dev, "failed to add NVM devices\n");
device_del(&sw->dev);
return ret;
}
@@ -1842,7 +2220,7 @@ int tb_switch_add(struct tb_switch *sw)
*/
void tb_switch_remove(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
if (sw->rpm) {
pm_runtime_get_sync(&sw->dev);
@@ -1850,13 +2228,13 @@ void tb_switch_remove(struct tb_switch *sw)
}
/* port 0 is the switch itself and never has a remote */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i])) {
- tb_switch_remove(sw->ports[i].remote->sw);
- sw->ports[i].remote = NULL;
- } else if (sw->ports[i].xdomain) {
- tb_xdomain_remove(sw->ports[i].xdomain);
- sw->ports[i].xdomain = NULL;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port)) {
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else if (port->xdomain) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
}
}
@@ -1876,7 +2254,8 @@ void tb_switch_remove(struct tb_switch *sw)
*/
void tb_sw_set_unplugged(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
+
if (sw == sw->tb->root_switch) {
tb_sw_WARN(sw, "cannot unplug root switch\n");
return;
@@ -1886,17 +2265,19 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
return;
}
sw->is_unplugged = true;
- for (i = 0; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_sw_set_unplugged(sw->ports[i].remote->sw);
- else if (sw->ports[i].xdomain)
- sw->ports[i].xdomain->is_unplugged = true;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_sw_set_unplugged(port->remote->sw);
+ else if (port->xdomain)
+ port->xdomain->is_unplugged = true;
}
}
int tb_switch_resume(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
tb_sw_dbg(sw, "resuming switch\n");
/*
@@ -1944,9 +2325,7 @@ int tb_switch_resume(struct tb_switch *sw)
return err;
/* check for surviving downstream switches */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
continue;
@@ -1970,19 +2349,64 @@ int tb_switch_resume(struct tb_switch *sw)
void tb_switch_suspend(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
err = tb_plug_events_active(sw, false);
if (err)
return;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_switch_suspend(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_switch_suspend(port->remote->sw);
}
tb_lc_set_sleep(sw);
}
+/**
+ * tb_switch_query_dp_resource() - Query availability of DP resource
+ * @sw: Switch whose DP resource is queried
+ * @in: DP IN port
+ *
+ * Queries availability of DP resource for DP tunneling using switch
+ * specific means. Returns %true if resource is available.
+ */
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_query(sw, in);
+}
+
+/**
+ * tb_switch_alloc_dp_resource() - Allocate available DP resource
+ * @sw: Switch whose DP resource is allocated
+ * @in: DP IN port
+ *
+ * Allocates DP resource for DP tunneling. The resource must be
+ * available for this to succeed (see tb_switch_query_dp_resource()).
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_alloc(sw, in);
+}
+
+/**
+ * tb_switch_dealloc_dp_resource() - De-allocate DP resource
+ * @sw: Switch whose DP resource is de-allocated
+ * @in: DP IN port
+ *
+ * De-allocates DP resource that was previously allocated for DP
+ * tunneling.
+ */
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ if (tb_lc_dp_sink_dealloc(sw, in)) {
+ tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
+ in->port);
+ }
+}
+
struct tb_sw_lookup {
struct tb *tb;
u8 link;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1f7a9e1cc09c..ea8727f769d6 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/platform_data/x86/apple.h>
#include "tb.h"
#include "tb_regs.h"
@@ -18,6 +17,7 @@
/**
* struct tb_cm - Simple Thunderbolt connection manager
* @tunnel_list: List of active tunnels
+ * @dp_resources: List of available DP resources for DP tunneling
* @hotplug_active: tb_handle_hotplug will stop progressing plug
* events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain wq
@@ -25,6 +25,7 @@
*/
struct tb_cm {
struct list_head tunnel_list;
+ struct list_head dp_resources;
bool hotplug_active;
};
@@ -56,17 +57,51 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
/* enumeration & hot plug handling */
+static void tb_add_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (!tb_switch_query_dp_resource(sw, port))
+ continue;
+
+ list_add_tail(&port->list, &tcm->dp_resources);
+ tb_port_dbg(port, "DP IN resource available\n");
+ }
+}
+
+static void tb_remove_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port, *tmp;
+
+ /* Clear children resources first */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_remove_dp_resources(port->remote->sw);
+ }
+
+ list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
+ if (port->sw == sw) {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ list_del_init(&port->list);
+ }
+ }
+}
+
static void tb_discover_tunnels(struct tb_switch *sw)
{
struct tb *tb = sw->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port;
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
+ tb_switch_for_each_port(sw, port) {
struct tb_tunnel *tunnel = NULL;
- port = &sw->ports[i];
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port);
@@ -95,9 +130,9 @@ static void tb_discover_tunnels(struct tb_switch *sw)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
}
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_discover_tunnels(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_discover_tunnels(port->remote->sw);
}
}
@@ -130,9 +165,10 @@ static void tb_scan_port(struct tb_port *port);
*/
static void tb_scan_switch(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- tb_scan_port(&sw->ports[i]);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port)
+ tb_scan_port(port);
}
/**
@@ -217,11 +253,16 @@ static void tb_scan_port(struct tb_port *port)
upstream_port->dual_link_port->remote = port->dual_link_port;
}
+ /* Enable lane bonding if supported */
+ if (tb_switch_lane_bonding_enable(sw))
+ tb_sw_warn(sw, "failed to enable lane bonding\n");
+
tb_scan_switch(sw);
}
-static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
- struct tb_port *src_port, struct tb_port *dst_port)
+static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
+ struct tb_port *src_port,
+ struct tb_port *dst_port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
@@ -230,14 +271,32 @@ static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
if (tunnel->type == type &&
((src_port && src_port == tunnel->src_port) ||
(dst_port && dst_port == tunnel->dst_port))) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- return 0;
+ return tunnel;
}
}
- return -ENODEV;
+ return NULL;
+}
+
+static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+{
+ if (!tunnel)
+ return;
+
+ tb_tunnel_deactivate(tunnel);
+ list_del(&tunnel->list);
+
+ /*
+ * In case of DP tunnel make sure the DP IN resource is deallocated
+ * properly.
+ */
+ if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+
+ tb_switch_dealloc_dp_resource(in->sw, in);
+ }
+
+ tb_tunnel_free(tunnel);
}
/**
@@ -250,11 +309,8 @@ static void tb_free_invalid_tunnels(struct tb *tb)
struct tb_tunnel *n;
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_invalid(tunnel)) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- }
+ if (tb_tunnel_is_invalid(tunnel))
+ tb_deactivate_and_free_tunnel(tunnel);
}
}
@@ -263,14 +319,15 @@ static void tb_free_invalid_tunnels(struct tb *tb)
*/
static void tb_free_unplugged_children(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
if (port->remote->sw->is_unplugged) {
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -289,10 +346,13 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
static struct tb_port *tb_find_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- if (sw->ports[i].config.type == type)
- return &sw->ports[i];
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
return NULL;
}
@@ -304,18 +364,18 @@ static struct tb_port *tb_find_port(struct tb_switch *sw,
static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_is_upstream_port(&sw->ports[i]))
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
continue;
- if (sw->ports[i].config.type != type)
+ if (port->config.type != type)
continue;
- if (!sw->ports[i].cap_adap)
+ if (port->cap_adap)
continue;
- if (tb_port_is_enabled(&sw->ports[i]))
+ if (tb_port_is_enabled(port))
continue;
- return &sw->ports[i];
+ return port;
}
return NULL;
}
@@ -336,10 +396,13 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
* Hard-coded Thunderbolt port to PCIe down port mapping
* per controller.
*/
- if (tb_switch_is_cr(sw))
+ if (tb_switch_is_cactus_ridge(sw) ||
+ tb_switch_is_alpine_ridge(sw))
index = !phy_port ? 6 : 7;
- else if (tb_switch_is_fr(sw))
+ else if (tb_switch_is_falcon_ridge(sw))
index = !phy_port ? 6 : 8;
+ else if (tb_switch_is_titan_ridge(sw))
+ index = !phy_port ? 8 : 9;
else
goto out;
@@ -358,42 +421,162 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
+static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
{
- struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw = out->sw;
struct tb_tunnel *tunnel;
- struct tb_port *in;
+ int bw, available_bw = 40000;
- if (tb_port_is_enabled(out))
- return 0;
+ while (sw && sw != in->sw) {
+ bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
+ /* Leave 10% guard band */
+ bw -= bw / 10;
+
+ /*
+ * Check for any active DP tunnels that go through this
+ * switch and reduce their consumed bandwidth from
+ * available.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ int consumed_bw;
+
+ if (!tb_tunnel_switch_on_path(tunnel, sw))
+ continue;
+
+ consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
+ if (consumed_bw < 0)
+ return consumed_bw;
+
+ bw -= consumed_bw;
+ }
- do {
- sw = tb_to_switch(sw->dev.parent);
- if (!sw)
- return 0;
- in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
- } while (!in);
+ if (bw < available_bw)
+ available_bw = bw;
- tunnel = tb_tunnel_alloc_dp(tb, in, out);
+ sw = tb_switch_parent(sw);
+ }
+
+ return available_bw;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+ struct tb_tunnel *tunnel;
+ int available_bw;
+
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "in use\n");
+ continue;
+ }
+
+ tb_port_dbg(port, "available\n");
+
+ if (!in && tb_port_is_dpin(port))
+ in = port;
+ else if (!out && tb_port_is_dpout(port))
+ out = port;
+ }
+
+ if (!in) {
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+ return;
+ }
+ if (!out) {
+ tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+ return;
+ }
+
+ if (tb_switch_alloc_dp_resource(in->sw, in)) {
+ tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
+ return;
+ }
+
+ /* Calculate available bandwidth between in and out */
+ available_bw = tb_available_bw(tcm, in, out);
+ if (available_bw < 0) {
+ tb_warn(tb, "failed to determine available bandwidth\n");
+ return;
+ }
+
+ tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
+ available_bw);
+
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
if (!tunnel) {
- tb_port_dbg(out, "DP tunnel allocation failed\n");
- return -ENOMEM;
+ tb_port_dbg(out, "could not allocate DP tunnel\n");
+ goto dealloc_dp;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
tb_tunnel_free(tunnel);
- return -EIO;
+ goto dealloc_dp;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- return 0;
+ return;
+
+dealloc_dp:
+ tb_switch_dealloc_dp_resource(in->sw, in);
}
-static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
- tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ if (tb_port_is_dpin(port)) {
+ tb_port_dbg(port, "DP IN resource unavailable\n");
+ in = port;
+ out = NULL;
+ } else {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ in = NULL;
+ out = port;
+ }
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+ tb_deactivate_and_free_tunnel(tunnel);
+ list_del_init(&port->list);
+
+ /*
+ * See if there is another DP OUT port that can be used for
+ * to create another tunnel.
+ */
+ tb_tunnel_dp(tb);
+}
+
+static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *p;
+
+ if (tb_port_is_enabled(port))
+ return;
+
+ list_for_each_entry(p, &tcm->dp_resources, list) {
+ if (p == port)
+ return;
+ }
+
+ tb_port_dbg(port, "DP %s resource available\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
+
+ /* Look for suitable DP IN <-> DP OUT pairs now */
+ tb_tunnel_dp(tb);
}
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
@@ -468,6 +651,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
{
struct tb_port *dst_port;
+ struct tb_tunnel *tunnel;
struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent);
@@ -478,7 +662,8 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
* case of cable disconnect) so it is fine if we cannot find it
* here anymore.
*/
- tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tb_deactivate_and_free_tunnel(tunnel);
}
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
@@ -533,10 +718,14 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_port_dbg(port, "switch unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
+ /* Maybe we can create another DP tunnel */
+ tb_tunnel_dp(tb);
} else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
@@ -553,8 +742,8 @@ static void tb_handle_hotplug(struct work_struct *work)
port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd);
tb_xdomain_put(xd);
- } else if (tb_port_is_dpout(port)) {
- tb_teardown_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_unavailable(tb, port);
} else {
tb_port_dbg(port,
"got unplug event for disconnected port, ignoring\n");
@@ -567,8 +756,8 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_scan_port(port);
if (!port->remote)
tb_port_dbg(port, "hotplug: no switch found\n");
- } else if (tb_port_is_dpout(port)) {
- tb_tunnel_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_available(tb, port);
}
}
@@ -681,6 +870,8 @@ static int tb_start(struct tb *tb)
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch);
+ /* Add DP IN resources for the root switch */
+ tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
@@ -702,6 +893,21 @@ static int tb_suspend_noirq(struct tb *tb)
return 0;
}
+static void tb_restore_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+
+ if (tb_switch_lane_bonding_enable(port->remote->sw))
+ dev_warn(&sw->dev, "failed to restore lane bonding\n");
+
+ tb_restore_children(port->remote->sw);
+ }
+}
+
static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
@@ -715,6 +921,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
+ tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_tunnel_restart(tunnel);
if (!list_empty(&tcm->tunnel_list)) {
@@ -734,11 +941,10 @@ static int tb_resume_noirq(struct tb *tb)
static int tb_free_unplugged_xdomains(struct tb_switch *sw)
{
- int i, ret = 0;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ int ret = 0;
+ tb_switch_for_each_port(sw, port) {
if (tb_is_upstream_port(port))
continue;
if (port->xdomain && port->xdomain->is_unplugged) {
@@ -783,9 +989,6 @@ struct tb *tb_probe(struct tb_nhi *nhi)
struct tb_cm *tcm;
struct tb *tb;
- if (!x86_apple_machine)
- return NULL;
-
tb = tb_domain_alloc(nhi, sizeof(*tcm));
if (!tb)
return NULL;
@@ -795,6 +998,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tcm = tb_priv(tb);
INIT_LIST_HEAD(&tcm->tunnel_list);
+ INIT_LIST_HEAD(&tcm->dp_resources);
return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 6407d529871d..ec851f20c571 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -61,6 +61,8 @@ struct tb_switch_nvm {
* @device: Device ID of the switch
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
* @cap_lc: Offset to the link controller capability (%0 if not found)
@@ -97,6 +99,8 @@ struct tb_switch {
u16 device;
const char *vendor_name;
const char *device_name;
+ unsigned int link_speed;
+ unsigned int link_width;
unsigned int generation;
int cap_plug_events;
int cap_lc;
@@ -127,11 +131,13 @@ struct tb_switch {
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
+ * @bonded: true if the port is bonded (two lanes combined as one)
* @dual_link_port: If the switch is connected using two ports, points
* to the other port.
* @link_nr: Is this primary or secondary port on the dual_link.
* @in_hopids: Currently allocated input HopIDs
* @out_hopids: Currently allocated output HopIDs
+ * @list: Used to link ports to DP resources list
*/
struct tb_port {
struct tb_regs_port_header config;
@@ -142,10 +148,12 @@ struct tb_port {
int cap_adap;
u8 port;
bool disabled;
+ bool bonded;
struct tb_port *dual_link_port;
u8 link_nr:1;
struct ida in_hopids;
struct ida out_hopids;
+ struct list_head list;
};
/**
@@ -399,7 +407,7 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
length);
}
-static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
if (sw->is_unplugged)
@@ -530,6 +538,17 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
+/**
+ * tb_switch_for_each_port() - Iterate over each switch port
+ * @sw: Switch whose ports to iterate
+ * @p: Port used as iterator
+ *
+ * Iterates over each switch port skipping the control port (port %0).
+ */
+#define tb_switch_for_each_port(sw, p) \
+ for ((p) = &(sw)->ports[1]; \
+ (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
+
static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
{
if (sw)
@@ -559,17 +578,17 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
return tb_to_switch(sw->dev.parent);
}
-static inline bool tb_switch_is_lr(const struct tb_switch *sw)
+static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
}
-static inline bool tb_switch_is_er(const struct tb_switch *sw)
+static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
}
-static inline bool tb_switch_is_cr(const struct tb_switch *sw)
+static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
@@ -580,7 +599,7 @@ static inline bool tb_switch_is_cr(const struct tb_switch *sw)
}
}
-static inline bool tb_switch_is_fr(const struct tb_switch *sw)
+static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
@@ -591,6 +610,52 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw)
}
}
+static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * tb_switch_is_icm() - Is the switch handled by ICM firmware
+ * @sw: Switch to check
+ *
+ * In case there is a need to differentiate whether ICM firmware or SW CM
+ * is handling @sw this function can be called. It is valid to call this
+ * after tb_switch_alloc() and tb_switch_configure() has been called
+ * (latter only for SW CM case).
+ */
+static inline bool tb_switch_is_icm(const struct tb_switch *sw)
+{
+ return !sw->config.enabled;
+}
+
+int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
@@ -626,6 +691,8 @@ void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
bool tb_path_is_invalid(struct tb_path *path);
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw);
int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
@@ -634,6 +701,10 @@ int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
int tb_lc_configure_link(struct tb_switch *sw);
void tb_lc_unconfigure_link(struct tb_switch *sw);
int tb_lc_set_sleep(struct tb_switch *sw);
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
static inline int tb_route_length(u64 route)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 4b641e4ee0c5..3705057723b6 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -122,6 +122,8 @@ struct icm_pkg_header {
#define ICM_FLAGS_NO_KEY BIT(1)
#define ICM_FLAGS_SLEVEL_SHIFT 3
#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3)
+#define ICM_FLAGS_DUAL_LANE BIT(5)
+#define ICM_FLAGS_SPEED_GEN3 BIT(7)
#define ICM_FLAGS_WRITE BIT(7)
struct icm_pkg_driver_ready {
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index deb9d4a977b9..7ee45b73c7f7 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -211,37 +211,71 @@ struct tb_regs_port_header {
} __packed;
-/* DWORD 4 */
-#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0)
-#define TB_PORT_MAX_CREDITS_SHIFT 20
-#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20)
-/* DWORD 5 */
-#define TB_PORT_LCA_SHIFT 22
-#define TB_PORT_LCA_MASK GENMASK(28, 22)
+/* Basic adapter configuration registers */
+#define ADP_CS_4 0x04
+#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
+#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
+#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_5 0x05
+#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
+#define ADP_CS_5_LCA_SHIFT 22
+
+/* Lane adapter registers */
+#define LANE_ADP_CS_0 0x00
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_1 0x01
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
+#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_LB BIT(15)
+#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
+#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
+#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
/* Display Port adapter registers */
-
-/* DWORD 0 */
-#define TB_DP_VIDEO_HOPID_SHIFT 16
-#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16)
-#define TB_DP_AUX_EN BIT(30)
-#define TB_DP_VIDEO_EN BIT(31)
-/* DWORD 1 */
-#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0)
-#define TB_DP_AUX_RX_HOPID_SHIFT 11
-#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11)
-/* DWORD 2 */
-#define TB_DP_HDP BIT(6)
-/* DWORD 3 */
-#define TB_DP_HPDC BIT(9)
-/* DWORD 4 */
-#define TB_DP_LOCAL_CAP 0x4
-/* DWORD 5 */
-#define TB_DP_REMOTE_CAP 0x5
+#define ADP_DP_CS_0 0x00
+#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
+#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16
+#define ADP_DP_CS_0_AE BIT(30)
+#define ADP_DP_CS_0_VE BIT(31)
+#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0)
+#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
+#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
+#define ADP_DP_CS_2 0x02
+#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_3 0x03
+#define ADP_DP_CS_3_HDPC BIT(9)
+#define DP_LOCAL_CAP 0x04
+#define DP_REMOTE_CAP 0x05
+#define DP_STATUS_CTRL 0x06
+#define DP_STATUS_CTRL_CMHS BIT(25)
+#define DP_STATUS_CTRL_UF BIT(26)
+#define DP_COMMON_CAP 0x07
+/*
+ * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
+ * with exception of DPRX done.
+ */
+#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8)
+#define DP_COMMON_CAP_RATE_SHIFT 8
+#define DP_COMMON_CAP_RATE_RBR 0x0
+#define DP_COMMON_CAP_RATE_HBR 0x1
+#define DP_COMMON_CAP_RATE_HBR2 0x2
+#define DP_COMMON_CAP_RATE_HBR3 0x3
+#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12)
+#define DP_COMMON_CAP_LANES_SHIFT 12
+#define DP_COMMON_CAP_1_LANE 0x0
+#define DP_COMMON_CAP_2_LANES 0x1
+#define DP_COMMON_CAP_4_LANES 0x2
+#define DP_COMMON_CAP_DPRX_DONE BIT(31)
/* PCIe adapter registers */
-
-#define TB_PCI_EN BIT(31)
+#define ADP_PCIE_CS_0 0x00
+#define ADP_PCIE_CS_0_PE BIT(31)
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
@@ -278,8 +312,17 @@ struct tb_regs_hop {
#define TB_LC_DESC_PORT_SIZE_SHIFT 16
#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
#define TB_LC_FUSE 0x03
+#define TB_LC_SNK_ALLOCATION 0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
/* Link controller registers */
+#define TB_LC_PORT_ATTR 0x8d
+#define TB_LC_PORT_ATTR_BE BIT(12)
+
#define TB_LC_SX_CTRL 0x96
#define TB_LC_SX_CTRL_L1C BIT(16)
#define TB_LC_SX_CTRL_L2C BIT(20)
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 5a99234826e7..0d3463c4e24a 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -6,6 +6,7 @@
* Copyright (C) 2019, Intel Corporation
*/
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -90,6 +91,22 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
return 0;
}
+static int tb_initial_credits(const struct tb_switch *sw)
+{
+ /* If the path is complete sw is not NULL */
+ if (sw) {
+ /* More credits for faster link */
+ switch (sw->link_speed * sw->link_width) {
+ case 40:
+ return 32;
+ case 20:
+ return 24;
+ }
+ }
+
+ return 16;
+}
+
static void tb_pci_init_path(struct tb_path *path)
{
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
@@ -101,7 +118,8 @@ static void tb_pci_init_path(struct tb_path *path)
path->drop_packages = 0;
path->nfc_credits = 0;
path->hops[0].initial_credits = 7;
- path->hops[1].initial_credits = 16;
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
}
/**
@@ -225,11 +243,174 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
}
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ /* Both ends need to support this */
+ if (!tb_switch_is_titan_ridge(in->sw) ||
+ !tb_switch_is_titan_ridge(out->sw))
+ return 0;
+
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
+
+ ret = tb_port_write(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+ if (!(val & DP_STATUS_CTRL_CMHS))
+ return 0;
+ usleep_range(10, 100);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static inline u32 tb_dp_cap_get_rate(u32 val)
+{
+ u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
+
+ switch (rate) {
+ case DP_COMMON_CAP_RATE_RBR:
+ return 1620;
+ case DP_COMMON_CAP_RATE_HBR:
+ return 2700;
+ case DP_COMMON_CAP_RATE_HBR2:
+ return 5400;
+ case DP_COMMON_CAP_RATE_HBR3:
+ return 8100;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
+{
+ val &= ~DP_COMMON_CAP_RATE_MASK;
+ switch (rate) {
+ default:
+ WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
+ /* Fallthrough */
+ case 1620:
+ val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 2700:
+ val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 5400:
+ val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 8100:
+ val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static inline u32 tb_dp_cap_get_lanes(u32 val)
+{
+ u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
+
+ switch (lanes) {
+ case DP_COMMON_CAP_1_LANE:
+ return 1;
+ case DP_COMMON_CAP_2_LANES:
+ return 2;
+ case DP_COMMON_CAP_4_LANES:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
+{
+ val &= ~DP_COMMON_CAP_LANES_MASK;
+ switch (lanes) {
+ default:
+ WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
+ lanes);
+ /* Fallthrough */
+ case 1:
+ val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 2:
+ val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 4:
+ val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
+{
+ /* Tunneling removes the DP 8b/10b encoding */
+ return rate * lanes * 8 / 10;
+}
+
+static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
+ u32 out_rate, u32 out_lanes, u32 *new_rate,
+ u32 *new_lanes)
+{
+ static const u32 dp_bw[][2] = {
+ /* Mb/s, lanes */
+ { 8100, 4 }, /* 25920 Mb/s */
+ { 5400, 4 }, /* 17280 Mb/s */
+ { 8100, 2 }, /* 12960 Mb/s */
+ { 2700, 4 }, /* 8640 Mb/s */
+ { 5400, 2 }, /* 8640 Mb/s */
+ { 8100, 1 }, /* 6480 Mb/s */
+ { 1620, 4 }, /* 5184 Mb/s */
+ { 5400, 1 }, /* 4320 Mb/s */
+ { 2700, 2 }, /* 4320 Mb/s */
+ { 1620, 2 }, /* 2592 Mb/s */
+ { 2700, 1 }, /* 2160 Mb/s */
+ { 1620, 1 }, /* 1296 Mb/s */
+ };
+ unsigned int i;
+
+ /*
+ * Find a combination that can fit into max_bw and does not
+ * exceed the maximum rate and lanes supported by the DP OUT and
+ * DP IN adapters.
+ */
+ for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
+ if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
+ continue;
+
+ if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
+ continue;
+
+ if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
+ *new_rate = dp_bw[i][0];
+ *new_lanes = dp_bw[i][1];
+ return 0;
+ }
+ }
+
+ return -ENOSR;
+}
+
static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
{
+ u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- u32 in_dp_cap, out_dp_cap;
int ret;
/*
@@ -239,25 +420,71 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
if (in->sw->generation < 2 || out->sw->generation < 2)
return 0;
+ /*
+ * Perform connection manager handshake between IN and OUT ports
+ * before capabilities exchange can take place.
+ */
+ ret = tb_dp_cm_handshake(in, out);
+ if (ret)
+ return ret;
+
/* Read both DP_LOCAL_CAP registers */
ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_LOCAL_CAP, 1);
+ in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_LOCAL_CAP, 1);
+ out->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
/* Write IN local caps to OUT remote caps */
ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_REMOTE_CAP, 1);
+ out->cap_adap + DP_REMOTE_CAP, 1);
if (ret)
return ret;
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+ tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+
+ /*
+ * If the tunnel bandwidth is limited (max_bw is set) then see
+ * if we need to reduce bandwidth to fit there.
+ */
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+ bw = tb_dp_bandwidth(out_rate, out_lanes);
+ tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
+
+ if (tunnel->max_bw && bw > tunnel->max_bw) {
+ u32 new_rate, new_lanes, new_bw;
+
+ ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
+ out_rate, out_lanes, &new_rate,
+ &new_lanes);
+ if (ret) {
+ tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ return ret;
+ }
+
+ new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+ tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
+
+ /*
+ * Set new rate and number of lanes before writing it to
+ * the IN port remote caps.
+ */
+ out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
+ out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
+ }
+
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_REMOTE_CAP, 1);
+ in->cap_adap + DP_REMOTE_CAP, 1);
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
@@ -297,6 +524,56 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
+static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+ const struct tb_switch *sw = in->sw;
+ u32 val, rate = 0, lanes = 0;
+ int ret;
+
+ if (tb_switch_is_titan_ridge(sw)) {
+ int timeout = 10;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set
+ * for active tunnel.
+ */
+ do {
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE) {
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ break;
+ }
+ msleep(250);
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+ } else if (sw->generation >= 2) {
+ /*
+ * Read from the copied remote cap so that we take into
+ * account if capabilities were reduced during exchange.
+ */
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1);
+ if (ret)
+ return ret;
+
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ } else {
+ /* No bandwidth management for legacy devices */
+ return 0;
+ }
+
+ return tb_dp_bandwidth(rate, lanes);
+}
+
static void tb_dp_init_aux_path(struct tb_path *path)
{
int i;
@@ -324,12 +601,12 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover)
path->weight = 1;
if (discover) {
- path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
} else {
u32 max_credits;
- max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
/* Leave some credits for AUX path */
path->nfc_credits = min(max_credits - 2, 12U);
}
@@ -361,6 +638,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
@@ -419,6 +697,7 @@ err_free:
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
+ * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
*
* Allocates a tunnel between @in and @out that is capable of tunneling
* Display Port traffic.
@@ -426,7 +705,7 @@ err_free:
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out)
+ struct tb_port *out, int max_bw)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -441,8 +720,10 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
tunnel->dst_port = out;
+ tunnel->max_bw = max_bw;
paths = tunnel->paths;
@@ -478,8 +759,8 @@ static u32 tb_dma_credits(struct tb_port *nhi)
{
u32 max_credits;
- max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
return min(max_credits, 13U);
}
@@ -689,3 +970,62 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
tb_path_deactivate(tunnel->paths[i]);
}
}
+
+/**
+ * tb_tunnel_switch_on_path() - Does the tunnel go through switch
+ * @tunnel: Tunnel to check
+ * @sw: Switch to check
+ *
+ * Returns true if @tunnel goes through @sw (direction does not matter),
+ * false otherwise.
+ */
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ continue;
+ if (tb_path_switch_on_path(tunnel->paths[i], sw))
+ return true;
+ }
+
+ return false;
+}
+
+static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ return false;
+ if (!tunnel->paths[i]->activated)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
+ * @tunnel: Tunnel to check
+ *
+ * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
+ * is not active or does consume bandwidth.
+ */
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return 0;
+
+ if (tunnel->consumed_bandwidth) {
+ int ret = tunnel->consumed_bandwidth(tunnel);
+
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index c68bbcd3a62c..ba888da005f5 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -27,8 +27,11 @@ enum tb_tunnel_type {
* @npaths: Number of paths in @paths
* @init: Optional tunnel specific initialization
* @activate: Optional tunnel specific activation/deactivation
+ * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP).
+ * Only set if the bandwidth needs to be limited.
*/
struct tb_tunnel {
struct tb *tb;
@@ -38,8 +41,10 @@ struct tb_tunnel {
size_t npaths;
int (*init)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ int (*consumed_bandwidth)(struct tb_tunnel *tunnel);
struct list_head list;
enum tb_tunnel_type type;
+ unsigned int max_bw;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
@@ -47,7 +52,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out);
+ struct tb_port *out, int max_bw);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
@@ -58,6 +63,9 @@ int tb_tunnel_activate(struct tb_tunnel *tunnel);
int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw);
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel);
static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
{
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 4e17a7c7bf0a..880d784398a3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1404,10 +1404,9 @@ struct tb_xdomain_lookup {
static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
const struct tb_xdomain_lookup *lookup)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ tb_switch_for_each_port(sw, port) {
struct tb_xdomain *xd;
if (port->xdomain) {
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4d22b911111f..4487a6b9acc8 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -89,7 +89,7 @@ config HVC_DCC
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV
+ depends on RISCV_SBI
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 8df89e9cd254..e985f344b2dd 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -174,3 +174,4 @@ MODULE_AUTHOR("Michael Moese <michael.moese@men.de");
MODULE_ALIAS("mcb:16z125");
MODULE_ALIAS("mcb:16z025");
MODULE_ALIAS("mcb:16z057");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 67a9eb3f94ce..540142c5b7b3 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -88,7 +88,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI"
- depends on RISCV
+ depends on RISCV_SBI
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index e5d3ebab6dae..4f53a4caabf6 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -930,3 +930,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MEN 16z135 High Speed UART");
MODULE_ALIAS("mcb:16z135");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 22e5d4e13714..58bf9d496ba5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -54,6 +54,7 @@
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
+#include <asm/platform_early.h>
#endif
#include "serial_mctrl_gpio.h"
@@ -3090,6 +3091,7 @@ static struct console serial_console = {
.data = &sci_uart_driver,
};
+#ifdef CONFIG_SUPERH
static struct console early_serial_console = {
.name = "early_ttySC",
.write = serial_console_write,
@@ -3118,6 +3120,7 @@ static int sci_probe_earlyprintk(struct platform_device *pdev)
register_console(&early_serial_console);
return 0;
}
+#endif
#define SCI_CONSOLE (&serial_console)
@@ -3318,8 +3321,10 @@ static int sci_probe(struct platform_device *dev)
* the special early probe. We don't have sufficient device state
* to make it beyond this yet.
*/
- if (is_early_platform_device(dev))
+#ifdef CONFIG_SUPERH
+ if (is_sh_early_platform_device(dev))
return sci_probe_earlyprintk(dev);
+#endif
if (dev->dev.of_node) {
p = sci_parse_dt(dev, &dev_id);
@@ -3414,8 +3419,8 @@ static void __exit sci_exit(void)
uart_unregister_driver(&sci_uart_driver);
}
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-early_platform_init_buffer("earlyprintk", &sci_driver,
+#if defined(CONFIG_SUPERH) && defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
+sh_early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 60ff236a3d63..ce8291053af3 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -303,7 +303,7 @@ static int __ldsem_down_read_nested(struct ld_semaphore *sem,
if (count <= 0) {
lock_contended(&sem->dep_map, _RET_IP_);
if (!down_read_failed(sem, count, timeout)) {
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return 0;
}
}
@@ -322,7 +322,7 @@ static int __ldsem_down_write_nested(struct ld_semaphore *sem,
if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
lock_contended(&sem->dep_map, _RET_IP_);
if (!down_write_failed(sem, count, timeout)) {
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return 0;
}
}
@@ -390,7 +390,7 @@ void ldsem_up_read(struct ld_semaphore *sem)
{
long count;
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
@@ -404,7 +404,7 @@ void ldsem_up_write(struct ld_semaphore *sem)
{
long count;
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
if (count < 0)
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index ebcf1434e296..81c88f7bbbcb 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -151,8 +151,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
int i;
if (pdev->dev.of_node) {
- int irq;
-
/* alloc uioinfo for one device */
uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
if (!uioinfo) {
@@ -163,13 +161,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
pdev->dev.of_node);
uioinfo->version = "devicetree";
-
- /* Multiple IRQs are not supported */
- irq = platform_get_irq(pdev, 0);
- if (irq == -ENXIO)
- uioinfo->irq = UIO_IRQ_NONE;
- else
- uioinfo->irq = irq;
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
@@ -199,8 +190,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
mutex_init(&priv->alloc_lock);
if (!uioinfo->irq) {
+ /* Multiple IRQs are not supported */
ret = platform_get_irq(pdev, 0);
- if (ret < 0)
+ if (ret == -ENXIO && pdev->dev.of_node)
+ ret = UIO_IRQ_NONE;
+ else if (ret < 0)
goto bad1;
uioinfo->irq = ret;
}
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index d0331613a355..2a1e89d12ed9 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -43,4 +43,14 @@ config USB_CDNS3_PCI_WRAP
If you choose to build this driver as module it will
be dynamically linked and module will be called cdns3-pci.ko
+config USB_CDNS3_TI
+ tristate "Cadence USB3 support on TI platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_CDNS3
+ help
+ Say 'Y' or 'M' here if you are building for Texas Instruments
+ platforms that contain Cadence USB3 controller core.
+
+ e.g. J721e.
+
endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index a703547350bb..948e6b88d1a9 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -14,3 +14,4 @@ endif
cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
+obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
new file mode 100644
index 000000000000..c6a79ca15858
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-ti.c - TI specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+/* USB Wrapper register offsets */
+#define USBSS_PID 0x0
+#define USBSS_W1 0x4
+#define USBSS_STATIC_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_DEBUG_CTRL 0x10
+#define USBSS_DEBUG_INFO 0x14
+#define USBSS_DEBUG_LINK_STATE 0x18
+#define USBSS_DEVICE_CTRL 0x1c
+
+/* Wrapper 1 register bits */
+#define USBSS_W1_PWRUP_RST BIT(0)
+#define USBSS_W1_OVERCURRENT_SEL BIT(8)
+#define USBSS_W1_MODESTRAP_SEL BIT(9)
+#define USBSS_W1_OVERCURRENT BIT(16)
+#define USBSS_W1_MODESTRAP_MASK GENMASK(18, 17)
+#define USBSS_W1_MODESTRAP_SHIFT 17
+#define USBSS_W1_USB2_ONLY BIT(19)
+
+/* Static config register bits */
+#define USBSS1_STATIC_PLL_REF_SEL_MASK GENMASK(8, 5)
+#define USBSS1_STATIC_PLL_REF_SEL_SHIFT 5
+#define USBSS1_STATIC_LOOPBACK_MODE_MASK GENMASK(4, 3)
+#define USBSS1_STATIC_LOOPBACK_MODE_SHIFT 3
+#define USBSS1_STATIC_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS1_STATIC_VBUS_SEL_SHIFT 1
+#define USBSS1_STATIC_LANE_REVERSE BIT(0)
+
+/* Modestrap modes */
+enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE,
+ USBSS_MODESTRAP_MODE_HOST,
+ USBSS_MODESTRAP_MODE_PERIPHERAL};
+
+struct cdns_ti {
+ struct device *dev;
+ void __iomem *usbss;
+ int usb2_only:1;
+ int vbus_divider:1;
+ struct clk *usb2_refclk;
+ struct clk *lpm_clk;
+};
+
+static const int cdns_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 cdns_ti_readl(struct cdns_ti *data, u32 offset)
+{
+ return readl(data->usbss + offset);
+}
+
+static inline void cdns_ti_writel(struct cdns_ti *data, u32 offset, u32 value)
+{
+ writel(value, data->usbss + offset);
+}
+
+static int cdns_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct cdns_ti *data;
+ int error;
+ u32 reg;
+ int rate_code, i;
+ unsigned long rate;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->dev = dev;
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ data->lpm_clk = devm_clk_get(dev, "lpm");
+ if (IS_ERR(data->lpm_clk)) {
+ dev_err(dev, "can't get lpm_clk\n");
+ return PTR_ERR(data->lpm_clk);
+ }
+
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; /* To KHz */
+ for (i = 0; i < ARRAY_SIZE(cdns_ti_rate_table); i++) {
+ if (cdns_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cdns_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ return -EINVAL;
+ }
+
+ rate_code = i;
+
+ pm_runtime_enable(dev);
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed: %d\n", error);
+ goto err_get;
+ }
+
+ /* assert RESET */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ reg &= ~USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* set static config */
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+ reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
+ reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
+
+ reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ if (data->vbus_divider)
+ reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
+
+ cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+
+ /* set USB2_ONLY mode if requested */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
+ if (data->usb2_only)
+ reg |= USBSS_W1_USB2_ONLY;
+
+ /* set default modestrap */
+ reg |= USBSS_W1_MODESTRAP_SEL;
+ reg &= ~USBSS_W1_MODESTRAP_MASK;
+ reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* de-assert RESET */
+ reg |= USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ error = of_platform_populate(node, NULL, NULL, dev);
+ if (error) {
+ dev_err(dev, "failed to create children: %d\n", error);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pm_runtime_put_sync(data->dev);
+err_get:
+ pm_runtime_disable(data->dev);
+
+ return error;
+}
+
+static int cdns_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ device_for_each_child(dev, NULL, cdns_ti_remove_core);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_ti_of_match[] = {
+ { .compatible = "ti,j721e-usb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
+
+static struct platform_driver cdns_ti_driver = {
+ .probe = cdns_ti_probe,
+ .remove = cdns_ti_remove,
+ .driver = {
+ .name = "cdns3-ti",
+ .of_match_table = cdns_ti_of_match,
+ },
+};
+
+module_platform_driver(cdns_ti_driver);
+
+MODULE_ALIAS("platform:cdns3-ti");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 TI Glue Layer");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index df8812c30640..d8e7eb2f97b9 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -274,11 +274,14 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
switch (event) {
case CI_HDRC_IMX_HSIC_ACTIVE_EVENT:
- ret = pinctrl_select_state(data->pinctrl,
- data->pinctrl_hsic_active);
- if (ret)
- dev_err(dev, "hsic_active select failed, err=%d\n",
- ret);
+ if (data->pinctrl) {
+ ret = pinctrl_select_state(data->pinctrl,
+ data->pinctrl_hsic_active);
+ if (ret)
+ dev_err(dev,
+ "hsic_active select failed, err=%d\n",
+ ret);
+ }
break;
case CI_HDRC_IMX_HSIC_SUSPEND_EVENT:
ret = imx_usbmisc_hsic_set_connect(data->usbmisc_data);
@@ -306,7 +309,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct pinctrl_state *pinctrl_hsic_idle;
of_id = of_match_device(ci_hdrc_imx_dt_ids, dev);
if (!of_id)
@@ -330,12 +332,42 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.flags |= CI_HDRC_IMX_IS_HSIC;
data->usbmisc_data->hsic = 1;
data->pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(data->pinctrl)) {
- dev_err(dev, "pinctrl get failed, err=%ld\n",
+ if (PTR_ERR(data->pinctrl) == -ENODEV)
+ data->pinctrl = NULL;
+ else if (IS_ERR(data->pinctrl)) {
+ if (PTR_ERR(data->pinctrl) != -EPROBE_DEFER)
+ dev_err(dev, "pinctrl get failed, err=%ld\n",
PTR_ERR(data->pinctrl));
return PTR_ERR(data->pinctrl);
}
+ data->hsic_pad_regulator =
+ devm_regulator_get_optional(dev, "hsic");
+ if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
+ /* no pad regualator is needed */
+ data->hsic_pad_regulator = NULL;
+ } else if (IS_ERR(data->hsic_pad_regulator)) {
+ if (PTR_ERR(data->hsic_pad_regulator) != -EPROBE_DEFER)
+ dev_err(dev,
+ "Get HSIC pad regulator error: %ld\n",
+ PTR_ERR(data->hsic_pad_regulator));
+ return PTR_ERR(data->hsic_pad_regulator);
+ }
+
+ if (data->hsic_pad_regulator) {
+ ret = regulator_enable(data->hsic_pad_regulator);
+ if (ret) {
+ dev_err(dev,
+ "Failed to enable HSIC pad regulator\n");
+ return ret;
+ }
+ }
+ }
+
+ /* HSIC pinctrl handling */
+ if (data->pinctrl) {
+ struct pinctrl_state *pinctrl_hsic_idle;
+
pinctrl_hsic_idle = pinctrl_lookup_state(data->pinctrl, "idle");
if (IS_ERR(pinctrl_hsic_idle)) {
dev_err(dev,
@@ -358,27 +390,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
PTR_ERR(data->pinctrl_hsic_active));
return PTR_ERR(data->pinctrl_hsic_active);
}
-
- data->hsic_pad_regulator = devm_regulator_get(dev, "hsic");
- if (PTR_ERR(data->hsic_pad_regulator) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
- /* no pad regualator is needed */
- data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator)) {
- dev_err(dev, "Get HSIC pad regulator error: %ld\n",
- PTR_ERR(data->hsic_pad_regulator));
- return PTR_ERR(data->hsic_pad_regulator);
- }
-
- if (data->hsic_pad_regulator) {
- ret = regulator_enable(data->hsic_pad_regulator);
- if (ret) {
- dev_err(dev,
- "Failed to enable HSIC pad regulator\n");
- return ret;
- }
- }
}
if (pdata.flags & CI_HDRC_PMQOS)
@@ -433,6 +444,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
goto err_clk;
}
+ if (data->usbmisc_data) {
+ if (!IS_ERR(pdata.id_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_id = 1;
+
+ if (!IS_ERR(pdata.vbus_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_vbus = 1;
+ }
+
ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc post failed, ret=%d\n", ret);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index c842e03f8767..de2aac9a2868 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -22,6 +22,8 @@ struct imx_usbmisc_data {
unsigned int evdo:1; /* set external vbus divider option */
unsigned int ulpi:1; /* connected to an ULPI phy */
unsigned int hsic:1; /* HSIC controlller */
+ unsigned int ext_id:1; /* ID from exteranl event */
+ unsigned int ext_vbus:1; /* Vbus from exteranl event */
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 12025358bb3c..0c9911d44ee5 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -24,35 +24,23 @@ struct tegra_udc_soc_info {
unsigned long flags;
};
-static const struct tegra_udc_soc_info tegra20_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra30_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra114_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra124_udc_soc_info = {
+static const struct tegra_udc_soc_info tegra_udc_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
};
static const struct of_device_id tegra_udc_of_match[] = {
{
.compatible = "nvidia,tegra20-udc",
- .data = &tegra20_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra30-udc",
- .data = &tegra30_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra114-udc",
- .data = &tegra114_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra124-udc",
- .data = &tegra124_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
/* sentinel */
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 98ee575ee500..dce5db41501c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -683,7 +683,7 @@ static int ci_get_platdata(struct device *dev,
if (platdata->dr_mode != USB_DR_MODE_PERIPHERAL) {
/* Get the vbus regulator */
- platdata->reg_vbus = devm_regulator_get(dev, "vbus");
+ platdata->reg_vbus = devm_regulator_get_optional(dev, "vbus");
if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index fcc91a338875..e0376ee646ad 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -342,7 +342,7 @@ DEFINE_SHOW_ATTRIBUTE(ci_registers);
*/
void dbg_create_files(struct ci_hdrc *ci)
{
- ci->debugfs = debugfs_create_dir(dev_name(ci->dev), NULL);
+ ci->debugfs = debugfs_create_dir(dev_name(ci->dev), usb_debug_root);
debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
&ci_device_fops);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8f18e7b6cadf..ffaf46f5d062 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1524,42 +1524,53 @@ static const struct usb_ep_ops usb_ep_ops = {
/******************************************************************************
* GADGET block
*****************************************************************************/
+/**
+ * ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
+ */
+static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+
+ if (is_active) {
+ pm_runtime_get_sync(&_gadget->dev);
+ hw_device_reset(ci);
+ spin_lock_irqsave(&ci->lock, flags);
+ if (ci->driver) {
+ hw_device_state(ci, ci->ep0out->qh.dma);
+ usb_gadget_set_state(_gadget, USB_STATE_POWERED);
+ usb_udc_vbus_handler(_gadget, true);
+ }
+ spin_unlock_irqrestore(&ci->lock, flags);
+ } else {
+ usb_udc_vbus_handler(_gadget, false);
+ if (ci->driver)
+ ci->driver->disconnect(&ci->gadget);
+ hw_device_state(ci, 0);
+ if (ci->platdata->notify_event)
+ ci->platdata->notify_event(ci,
+ CI_HDRC_CONTROLLER_STOPPED_EVENT);
+ _gadget_stop_activity(&ci->gadget);
+ pm_runtime_put_sync(&_gadget->dev);
+ usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
+ }
+}
+
static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
- int gadget_ready = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
- if (ci->driver)
- gadget_ready = 1;
spin_unlock_irqrestore(&ci->lock, flags);
if (ci->usb_phy)
usb_phy_set_charger_state(ci->usb_phy, is_active ?
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
- if (gadget_ready) {
- if (is_active) {
- pm_runtime_get_sync(&_gadget->dev);
- hw_device_reset(ci);
- hw_device_state(ci, ci->ep0out->qh.dma);
- usb_gadget_set_state(_gadget, USB_STATE_POWERED);
- usb_udc_vbus_handler(_gadget, true);
- } else {
- usb_udc_vbus_handler(_gadget, false);
- if (ci->driver)
- ci->driver->disconnect(&ci->gadget);
- hw_device_state(ci, 0);
- if (ci->platdata->notify_event)
- ci->platdata->notify_event(ci,
- CI_HDRC_CONTROLLER_STOPPED_EVENT);
- _gadget_stop_activity(&ci->gadget);
- pm_runtime_put_sync(&_gadget->dev);
- usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
- }
- }
+ if (ci->driver)
+ ci_hdrc_gadget_connect(_gadget, is_active);
return 0;
}
@@ -1612,7 +1623,7 @@ static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
@@ -1785,18 +1796,10 @@ static int ci_udc_start(struct usb_gadget *gadget,
return retval;
}
- pm_runtime_get_sync(&ci->gadget.dev);
- if (ci->vbus_active) {
- hw_device_reset(ci);
- } else {
+ if (ci->vbus_active)
+ ci_hdrc_gadget_connect(gadget, 1);
+ else
usb_udc_vbus_handler(&ci->gadget, false);
- pm_runtime_put_sync(&ci->gadget.dev);
- return retval;
- }
-
- retval = hw_device_state(ci, ci->ep0out->qh.dma);
- if (retval)
- pm_runtime_put_sync(&ci->gadget.dev);
return retval;
}
@@ -1826,6 +1829,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
+ ci->driver = NULL;
if (ci->vbus_active) {
hw_device_state(ci, 0);
@@ -1838,7 +1842,6 @@ static int ci_udc_stop(struct usb_gadget *gadget)
pm_runtime_put(&ci->gadget.dev);
}
- ci->driver = NULL;
spin_unlock_irqrestore(&ci->lock, flags);
ci_udc_stop_for_otg_fsm(ci);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 078c1fdce493..e81e33c26e6c 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -100,6 +100,9 @@
#define MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID MX7D_USB_VBUS_WAKEUP_SOURCE(2)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_SESS_END MX7D_USB_VBUS_WAKEUP_SOURCE(3)
+#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
+ MX6_BM_ID_WAKEUP)
+
struct usbmisc_ops {
/* It's called once when probe a usb device */
int (*init)(struct imx_usbmisc_data *data);
@@ -330,14 +333,25 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
return 0;
}
+static u32 usbmisc_wakeup_setting(struct imx_usbmisc_data *data)
+{
+ u32 wakeup_setting = MX6_USB_OTG_WAKEUP_BITS;
+
+ if (data->ext_id)
+ wakeup_setting &= ~MX6_BM_ID_WAKEUP;
+
+ if (data->ext_vbus)
+ wakeup_setting &= ~MX6_BM_VBUS_WAKEUP;
+
+ return wakeup_setting;
+}
+
static int usbmisc_imx6q_set_wakeup
(struct imx_usbmisc_data *data, bool enabled)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
int ret = 0;
if (data->index > 3)
@@ -346,11 +360,12 @@ static int usbmisc_imx6q_set_wakeup
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + data->index * 4);
if (enabled) {
- val |= wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
} else {
if (val & MX6_BM_WAKEUP_INTR)
pr_debug("wakeup int at ci_hdrc.%d\n", data->index);
- val &= ~wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
}
writel(val, usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
@@ -547,17 +562,17 @@ static int usbmisc_imx7d_set_wakeup
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base);
if (enabled) {
- writel(val | wakeup_setting, usbmisc->base);
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
+ writel(val, usbmisc->base);
} else {
if (val & MX6_BM_WAKEUP_INTR)
dev_dbg(data->dev, "wakeup int\n");
- writel(val & ~wakeup_setting, usbmisc->base);
+ writel(val & ~MX6_USB_OTG_WAKEUP_BITS, usbmisc->base);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 1ac1095bfeac..5f40117e68e7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -805,10 +805,10 @@ int usb_get_configuration(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
int ncfg = dev->descriptor.bNumConfigurations;
- int result = -ENOMEM;
unsigned int cfgno, length;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
+ int result;
if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, "
@@ -824,16 +824,16 @@ int usb_get_configuration(struct usb_device *dev)
length = ncfg * sizeof(struct usb_host_config);
dev->config = kzalloc(length, GFP_KERNEL);
if (!dev->config)
- goto err2;
+ return -ENOMEM;
length = ncfg * sizeof(char *);
dev->rawdescriptors = kzalloc(length, GFP_KERNEL);
if (!dev->rawdescriptors)
- goto err2;
+ return -ENOMEM;
desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
if (!desc)
- goto err2;
+ return -ENOMEM;
for (cfgno = 0; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
@@ -895,9 +895,7 @@ int usb_get_configuration(struct usb_device *dev)
err:
kfree(desc);
dev->descriptor.bNumConfigurations = cfgno;
-err2:
- if (result == -ENOMEM)
- dev_err(ddev, "out of memory\n");
+
return result;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3f899552f6e3..879d03f5127c 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -764,8 +764,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
- else
+ else {
+ unsigned int old_suppress;
+
+ /* suppress uevents while claiming interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
+ }
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
@@ -785,7 +792,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
+ unsigned int old_suppress;
+
+ /* suppress uevents while releasing interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
usb_driver_release_interface(&usbfs_driver, intf);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
err = 0;
}
return err;
@@ -1550,10 +1563,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
uurb->buffer_length = le16_to_cpu(dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
- is_in = 1;
+ is_in = true;
uurb->endpoint |= USB_DIR_IN;
} else {
- is_in = 0;
+ is_in = false;
uurb->endpoint &= ~USB_DIR_IN;
}
if (is_in)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 236313f41f4a..1709895387b9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4930,6 +4930,91 @@ hub_power_remaining(struct usb_hub *hub)
return remaining;
}
+
+static int descriptors_changed(struct usb_device *udev,
+ struct usb_device_descriptor *old_device_descriptor,
+ struct usb_host_bos *old_bos)
+{
+ int changed = 0;
+ unsigned index;
+ unsigned serial_len = 0;
+ unsigned len;
+ unsigned old_length;
+ int length;
+ char *buf;
+
+ if (memcmp(&udev->descriptor, old_device_descriptor,
+ sizeof(*old_device_descriptor)) != 0)
+ return 1;
+
+ if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
+ return 1;
+ if (udev->bos) {
+ len = le16_to_cpu(udev->bos->desc->wTotalLength);
+ if (len != le16_to_cpu(old_bos->desc->wTotalLength))
+ return 1;
+ if (memcmp(udev->bos->desc, old_bos->desc, len))
+ return 1;
+ }
+
+ /* Since the idVendor, idProduct, and bcdDevice values in the
+ * device descriptor haven't changed, we will assume the
+ * Manufacturer and Product strings haven't changed either.
+ * But the SerialNumber string could be different (e.g., a
+ * different flash card of the same brand).
+ */
+ if (udev->serial)
+ serial_len = strlen(udev->serial) + 1;
+
+ len = serial_len;
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ len = max(len, old_length);
+ }
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (!buf)
+ /* assume the worst */
+ return 1;
+
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
+ old_length);
+ if (length != old_length) {
+ dev_dbg(&udev->dev, "config index %d, error %d\n",
+ index, length);
+ changed = 1;
+ break;
+ }
+ if (memcmp(buf, udev->rawdescriptors[index], old_length)
+ != 0) {
+ dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
+ index,
+ ((struct usb_config_descriptor *) buf)->
+ bConfigurationValue);
+ changed = 1;
+ break;
+ }
+ }
+
+ if (!changed && serial_len) {
+ length = usb_string(udev, udev->descriptor.iSerialNumber,
+ buf, serial_len);
+ if (length + 1 != serial_len) {
+ dev_dbg(&udev->dev, "serial string error %d\n",
+ length);
+ changed = 1;
+ } else if (memcmp(buf, udev->serial, length) != 0) {
+ dev_dbg(&udev->dev, "serial string changed\n");
+ changed = 1;
+ }
+ }
+
+ kfree(buf);
+ return changed;
+}
+
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
@@ -5167,7 +5252,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
+ struct usb_device_descriptor descriptor;
int status = -ENODEV;
+ int retval;
dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
portchange, portspeed(hub, portstatus));
@@ -5188,7 +5275,30 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
udev->state != USB_STATE_NOTATTACHED) {
if (portstatus & USB_PORT_STAT_ENABLE) {
- status = 0; /* Nothing to do */
+ /*
+ * USB-3 connections are initialized automatically by
+ * the hostcontroller hardware. Therefore check for
+ * changed device descriptors before resuscitating the
+ * device.
+ */
+ descriptor = udev->descriptor;
+ retval = usb_get_device_descriptor(udev,
+ sizeof(udev->descriptor));
+ if (retval < 0) {
+ dev_dbg(&udev->dev,
+ "can't read device descriptor %d\n",
+ retval);
+ } else {
+ if (descriptors_changed(udev, &descriptor,
+ udev->bos)) {
+ dev_dbg(&udev->dev,
+ "device descriptor has changed\n");
+ /* for disconnect() calls */
+ udev->descriptor = descriptor;
+ } else {
+ status = 0; /* Nothing to do */
+ }
+ }
#ifdef CONFIG_PM
} else if (udev->state == USB_STATE_SUSPENDED &&
udev->persist_enabled) {
@@ -5550,90 +5660,6 @@ void usb_hub_cleanup(void)
usb_deregister(&hub_driver);
} /* usb_hub_cleanup() */
-static int descriptors_changed(struct usb_device *udev,
- struct usb_device_descriptor *old_device_descriptor,
- struct usb_host_bos *old_bos)
-{
- int changed = 0;
- unsigned index;
- unsigned serial_len = 0;
- unsigned len;
- unsigned old_length;
- int length;
- char *buf;
-
- if (memcmp(&udev->descriptor, old_device_descriptor,
- sizeof(*old_device_descriptor)) != 0)
- return 1;
-
- if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
- return 1;
- if (udev->bos) {
- len = le16_to_cpu(udev->bos->desc->wTotalLength);
- if (len != le16_to_cpu(old_bos->desc->wTotalLength))
- return 1;
- if (memcmp(udev->bos->desc, old_bos->desc, len))
- return 1;
- }
-
- /* Since the idVendor, idProduct, and bcdDevice values in the
- * device descriptor haven't changed, we will assume the
- * Manufacturer and Product strings haven't changed either.
- * But the SerialNumber string could be different (e.g., a
- * different flash card of the same brand).
- */
- if (udev->serial)
- serial_len = strlen(udev->serial) + 1;
-
- len = serial_len;
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- len = max(len, old_length);
- }
-
- buf = kmalloc(len, GFP_NOIO);
- if (!buf)
- /* assume the worst */
- return 1;
-
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
- old_length);
- if (length != old_length) {
- dev_dbg(&udev->dev, "config index %d, error %d\n",
- index, length);
- changed = 1;
- break;
- }
- if (memcmp(buf, udev->rawdescriptors[index], old_length)
- != 0) {
- dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
- index,
- ((struct usb_config_descriptor *) buf)->
- bConfigurationValue);
- changed = 1;
- break;
- }
- }
-
- if (!changed && serial_len) {
- length = usb_string(udev, udev->descriptor.iSerialNumber,
- buf, serial_len);
- if (length + 1 != serial_len) {
- dev_dbg(&udev->dev, "serial string error %d\n",
- length);
- changed = 1;
- } else if (memcmp(buf, udev->serial, length) != 0) {
- dev_dbg(&udev->dev, "serial string changed\n");
- changed = 1;
- }
- }
-
- kfree(buf);
- return changed;
-}
-
/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
@@ -5814,7 +5840,7 @@ re_enumerate_no_bos:
/**
* usb_reset_device - warn interface drivers and perform a USB port reset
- * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
+ * @udev: device to reset (not in NOTATTACHED state)
*
* Warns all drivers bound to registered interfaces (using their pre_reset
* method), performs the port reset, and then lets the drivers know that
@@ -5842,8 +5868,7 @@ int usb_reset_device(struct usb_device *udev)
struct usb_host_config *config = udev->actconfig;
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
- if (udev->state == USB_STATE_NOTATTACHED ||
- udev->state == USB_STATE_SUSPENDED) {
+ if (udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8e41d70fd298..78a4925aa118 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -524,7 +524,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
greset |= GRSTCTL_CSFTRST;
dwc2_writel(hsotg, greset, GRSTCTL);
- if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) {
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) {
dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
__func__);
return -EBUSY;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index d08d070a0fb6..968e03b89d04 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -134,7 +134,7 @@ struct dwc2_hsotg_req;
* @target_frame: Targeted frame num to setup next ISOC transfer
* @frame_overrun: Indicates SOF number overrun in DSTS
*
- * This is the driver's state for each registered enpoint, allowing it
+ * This is the driver's state for each registered endpoint, allowing it
* to keep track of transactions that need doing. Each endpoint has a
* lock to protect the state, to try and avoid using an overall lock
* for the host controller as much as possible.
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 7f62f4cdc265..b8f2790abf91 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -770,7 +770,7 @@ int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
int ret;
struct dentry *root;
- root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+ root = debugfs_create_dir(dev_name(hsotg->dev), usb_debug_root);
hsotg->debug_root = root;
debugfs_create_file("params", 0444, root, hsotg, &params_fops);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 556a876c7896..206caa0ea1c6 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -97,24 +97,24 @@ config USB_DWC3_KEYSTONE
Say 'Y' or 'M' here if you have one such device
config USB_DWC3_MESON_G12A
- tristate "Amlogic Meson G12A Platforms"
- depends on OF && COMMON_CLK
- depends on ARCH_MESON || COMPILE_TEST
- default USB_DWC3
- select USB_ROLE_SWITCH
+ tristate "Amlogic Meson G12A Platforms"
+ depends on OF && COMMON_CLK
+ depends on ARCH_MESON || COMPILE_TEST
+ default USB_DWC3
+ select USB_ROLE_SWITCH
select REGMAP_MMIO
- help
- Support USB2/3 functionality in Amlogic G12A platforms.
- Say 'Y' or 'M' if you have one such device.
+ help
+ Support USB2/3 functionality in Amlogic G12A platforms.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_OF_SIMPLE
- tristate "Generic OF Simple Glue Layer"
- depends on OF && COMMON_CLK
- default USB_DWC3
- help
- Support USB2/3 functionality in simple SoC integrations.
- Currently supports Xilinx and Qualcomm DWC USB3 IP.
- Say 'Y' or 'M' if you have one such device.
+ tristate "Generic OF Simple Glue Layer"
+ depends on OF && COMMON_CLK
+ default USB_DWC3
+ help
+ Support USB2/3 functionality in simple SoC integrations.
+ Currently supports Xilinx and Qualcomm DWC USB3 IP.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_ST
tristate "STMicroelectronics Platforms"
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 97d6ae3c4df2..f561c6c9e8a9 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -566,8 +566,11 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc)
*/
static int dwc3_phy_setup(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
@@ -585,6 +588,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
if (dwc->u2ss_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
@@ -668,6 +679,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ /*
+ * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
if (dwc->dis_u2_susphy_quirk)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -902,9 +921,12 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
*/
static int dwc3_core_init(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
int ret;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
/*
* Write Linux Version Code to our GUID register so it's easy to figure
* out which kernel version a bug was found.
@@ -940,6 +962,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err0a;
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+ dwc->revision > DWC3_REVISION_194A) {
+ if (!dwc->dis_u3_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ }
+
+ if (!dwc->dis_u2_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+ }
+
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 9baabed87d61..e56beb9d1e36 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -112,7 +112,7 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
@@ -141,7 +141,7 @@ dwc3_gadget_hs_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 1c792710348f..4fe8b1e1485c 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -916,7 +916,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
- root = debugfs_create_dir(dev_name(dwc->dev), NULL);
+ root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->root = root;
debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index bdac3e7d7b18..e64754be47b4 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -110,12 +110,9 @@ err_resetc_put:
return ret;
}
-static int dwc3_of_simple_remove(struct platform_device *pdev)
+static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
{
- struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
-
- of_platform_depopulate(dev);
+ of_platform_depopulate(simple->dev);
clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
clk_bulk_put_all(simple->num_clocks, simple->clks);
@@ -126,13 +123,27 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
reset_control_put(simple->resets);
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
- pm_runtime_set_suspended(dev);
+ pm_runtime_disable(simple->dev);
+ pm_runtime_put_noidle(simple->dev);
+ pm_runtime_set_suspended(simple->dev);
+}
+
+static int dwc3_of_simple_remove(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
return 0;
}
+static void dwc3_of_simple_shutdown(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
+}
+
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
@@ -190,6 +201,7 @@ MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
static struct platform_driver dwc3_of_simple_driver = {
.probe = dwc3_of_simple_probe,
.remove = dwc3_of_simple_remove,
+ .shutdown = dwc3_of_simple_shutdown,
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ec54b69c29c..3b4f67000315 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -794,9 +794,9 @@ static int set_config(struct usb_composite_dev *cdev,
result = 0;
}
- INFO(cdev, "%s config #%d: %s\n",
- usb_speed_string(gadget->speed),
- number, c ? c->label : "unconfigured");
+ DBG(cdev, "%s config #%d: %s\n",
+ usb_speed_string(gadget->speed),
+ number, c ? c->label : "unconfigured");
if (!c)
goto done;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 33852c2b29d1..ab9ac48a751a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1544,6 +1544,7 @@ static struct config_group *gadgets_make(
gi->composite.resume = NULL;
gi->composite.max_speed = USB_SPEED_SUPER;
+ spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
INIT_LIST_HEAD(&gi->string_list);
INIT_LIST_HEAD(&gi->available_func);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 9fc98de83624..7c152c28b26c 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -771,6 +771,24 @@ static struct configfs_item_operations acm_item_ops = {
.release = acm_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_acm_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_acm_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_acm_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -779,6 +797,9 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_acm_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_acm_attr_console,
+#endif
&f_acm_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index 55b7f57d2dc7..ab26d84ed95e 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -432,7 +432,7 @@ static struct usb_function_instance *obex_alloc_inst(void)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = obex_free_inst;
- ret = gserial_alloc_line(&opts->port_num);
+ ret = gserial_alloc_line_no_console(&opts->port_num);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index c860f30a0ea2..1406255d0865 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -266,6 +266,24 @@ static struct configfs_item_operations serial_item_ops = {
.release = serial_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_serial_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_serial_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_serial_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -274,6 +292,9 @@ static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_serial_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_serial_attr_console,
+#endif
&f_serial_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7f01f78b1d23..36504931b2d1 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -846,7 +846,7 @@ static void uasp_set_alt(struct f_uas *fu)
fu->flags = USBG_IS_UAS;
- if (gadget->speed == USB_SPEED_SUPER)
+ if (gadget->speed >= USB_SPEED_SUPER)
fu->flags |= USBG_USE_STREAMS;
config_ep_by_speed(gadget, f, fu->ep_in);
@@ -2093,6 +2093,16 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
usb_composite_setup_continue(fu->function.config->cdev);
}
+static int tcm_get_alt(struct usb_function *f, unsigned intf)
+{
+ if (intf == bot_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_BBB;
+ if (intf == uasp_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_UAS;
+
+ return -EOPNOTSUPP;
+}
+
static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_uas *fu = to_f_uas(f);
@@ -2300,6 +2310,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.bind = tcm_bind;
fu->function.unbind = tcm_unbind;
fu->function.set_alt = tcm_set_alt;
+ fu->function.get_alt = tcm_get_alt;
fu->function.setup = tcm_setup;
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 56906d15fb55..7ec6a996af26 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -585,7 +585,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
sprintf(card->longname, "%s %i", card_name, card->dev->id);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+ NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 65f634ec7fc2..f986e5c55974 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -82,14 +82,13 @@
#define GS_CONSOLE_BUF_SIZE 8192
/* console info */
-struct gscons_info {
- struct gs_port *port;
- struct task_struct *console_thread;
- struct kfifo con_buf;
- /* protect the buf and busy flag */
- spinlock_t con_lock;
- int req_busy;
- struct usb_request *console_req;
+struct gs_console {
+ struct console console;
+ struct work_struct work;
+ spinlock_t lock;
+ struct usb_request *req;
+ struct kfifo buf;
+ size_t missed;
};
/*
@@ -101,8 +100,10 @@ struct gs_port {
spinlock_t port_lock; /* guard port_* access */
struct gserial *port_usb;
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ struct gs_console *console;
+#endif
- bool openclose; /* open/close in progress */
u8 port_num;
struct list_head read_pool;
@@ -586,82 +587,45 @@ static int gs_open(struct tty_struct *tty, struct file *file)
{
int port_num = tty->index;
struct gs_port *port;
- int status;
-
- do {
- mutex_lock(&ports[port_num].lock);
- port = ports[port_num].port;
- if (!port)
- status = -ENODEV;
- else {
- spin_lock_irq(&port->port_lock);
-
- /* already open? Great. */
- if (port->port.count) {
- status = 0;
- port->port.count++;
-
- /* currently opening/closing? wait ... */
- } else if (port->openclose) {
- status = -EBUSY;
-
- /* ... else we do the work */
- } else {
- status = -EAGAIN;
- port->openclose = true;
- }
- spin_unlock_irq(&port->port_lock);
- }
- mutex_unlock(&ports[port_num].lock);
+ int status = 0;
- switch (status) {
- default:
- /* fully handled */
- return status;
- case -EAGAIN:
- /* must do the work */
- break;
- case -EBUSY:
- /* wait for EAGAIN task to finish */
- msleep(1);
- /* REVISIT could have a waitchannel here, if
- * concurrent open performance is important
- */
- break;
- }
- } while (status != -EAGAIN);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+ if (!port) {
+ status = -ENODEV;
+ goto out;
+ }
- /* Do the "real open" */
spin_lock_irq(&port->port_lock);
/* allocate circular buffer on first open */
if (!kfifo_initialized(&port->port_write_buf)) {
spin_unlock_irq(&port->port_lock);
+
+ /*
+ * portmaster's mutex still protects from simultaneous open(),
+ * and close() can't happen, yet.
+ */
+
status = kfifo_alloc(&port->port_write_buf,
WRITE_BUF_SIZE, GFP_KERNEL);
- spin_lock_irq(&port->port_lock);
-
if (status) {
pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
- port->port_num, tty, file);
- port->openclose = false;
- goto exit_unlock_port;
+ port_num, tty, file);
+ goto out;
}
- }
- /* REVISIT if REMOVED (ports[].port NULL), abort the open
- * to let rmmod work faster (but this way isn't wrong).
- */
+ spin_lock_irq(&port->port_lock);
+ }
- /* REVISIT maybe wait for "carrier detect" */
+ /* already open? Great. */
+ if (port->port.count++)
+ goto exit_unlock_port;
tty->driver_data = port;
port->port.tty = tty;
- port->port.count = 1;
- port->openclose = false;
-
/* if connected, start the I/O stream */
if (port->port_usb) {
struct gserial *gser = port->port_usb;
@@ -675,20 +639,21 @@ static int gs_open(struct tty_struct *tty, struct file *file)
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
- status = 0;
-
exit_unlock_port:
spin_unlock_irq(&port->port_lock);
+out:
+ mutex_unlock(&ports[port_num].lock);
return status;
}
-static int gs_writes_finished(struct gs_port *p)
+static int gs_close_flush_done(struct gs_port *p)
{
int cond;
- /* return true on disconnect or empty buffer */
+ /* return true on disconnect or empty buffer or if raced with open() */
spin_lock_irq(&p->port_lock);
- cond = (p->port_usb == NULL) || !kfifo_len(&p->port_write_buf);
+ cond = p->port_usb == NULL || !kfifo_len(&p->port_write_buf) ||
+ p->port.count > 1;
spin_unlock_irq(&p->port_lock);
return cond;
@@ -702,6 +667,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
spin_lock_irq(&port->port_lock);
if (port->port.count != 1) {
+raced_with_open:
if (port->port.count == 0)
WARN_ON(1);
else
@@ -711,12 +677,6 @@ static void gs_close(struct tty_struct *tty, struct file *file)
pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
- /* mark port as closing but in use; we can drop port lock
- * and sleep if necessary
- */
- port->openclose = true;
- port->port.count = 0;
-
gser = port->port_usb;
if (gser && gser->disconnect)
gser->disconnect(gser);
@@ -727,9 +687,13 @@ static void gs_close(struct tty_struct *tty, struct file *file)
if (kfifo_len(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
- gs_writes_finished(port),
+ gs_close_flush_done(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
+
+ if (port->port.count != 1)
+ goto raced_with_open;
+
gser = port->port_usb;
}
@@ -742,10 +706,9 @@ static void gs_close(struct tty_struct *tty, struct file *file)
else
kfifo_reset(&port->port_write_buf);
+ port->port.count = 0;
port->port.tty = NULL;
- port->openclose = false;
-
pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
port->port_num, tty, file);
@@ -889,36 +852,9 @@ static struct tty_driver *gs_tty_driver;
#ifdef CONFIG_U_SERIAL_CONSOLE
-static struct gscons_info gscons_info;
-static struct console gserial_cons;
-
-static struct usb_request *gs_request_new(struct usb_ep *ep)
-{
- struct usb_request *req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (!req)
- return NULL;
-
- req->buf = kmalloc(ep->maxpacket, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- return NULL;
- }
-
- return req;
-}
-
-static void gs_request_free(struct usb_request *req, struct usb_ep *ep)
+static void gs_console_complete_out(struct usb_ep *ep, struct usb_request *req)
{
- if (!req)
- return;
-
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
-static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
-{
- struct gscons_info *info = &gscons_info;
+ struct gs_console *cons = req->context;
switch (req->status) {
default:
@@ -927,12 +863,12 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
/* fall through */
case 0:
/* normal completion */
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
-
- wake_up_process(info->console_thread);
+ spin_lock(&cons->lock);
+ req->length = 0;
+ schedule_work(&cons->work);
+ spin_unlock(&cons->lock);
break;
+ case -ECONNRESET:
case -ESHUTDOWN:
/* disconnect */
pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
@@ -940,190 +876,250 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
}
}
-static int gs_console_connect(int port_num)
+static void __gs_console_push(struct gs_console *cons)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct usb_request *req = cons->req;
struct usb_ep *ep;
+ size_t size;
- if (port_num != gserial_cons.index) {
- pr_err("%s: port num [%d] is not support console\n",
- __func__, port_num);
- return -ENXIO;
- }
+ if (!req)
+ return; /* disconnected */
- port = ports[port_num].port;
- ep = port->port_usb->in;
- if (!info->console_req) {
- info->console_req = gs_request_new(ep);
- if (!info->console_req)
- return -ENOMEM;
- info->console_req->complete = gs_complete_out;
+ if (req->length)
+ return; /* busy */
+
+ ep = cons->console.data;
+ size = kfifo_out(&cons->buf, req->buf, ep->maxpacket);
+ if (!size)
+ return;
+
+ if (cons->missed && ep->maxpacket >= 64) {
+ char buf[64];
+ size_t len;
+
+ len = sprintf(buf, "\n[missed %zu bytes]\n", cons->missed);
+ kfifo_in(&cons->buf, buf, len);
+ cons->missed = 0;
}
- info->port = port;
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
- pr_vdebug("port[%d] console connect!\n", port_num);
- return 0;
+ req->length = size;
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ req->length = 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_work(struct work_struct *work)
{
- struct gscons_info *info = &gscons_info;
- struct usb_request *req = info->console_req;
+ struct gs_console *cons = container_of(work, struct gs_console, work);
+
+ spin_lock_irq(&cons->lock);
- gs_request_free(req, ep);
- info->console_req = NULL;
+ __gs_console_push(cons);
+
+ spin_unlock_irq(&cons->lock);
}
-static int gs_console_thread(void *data)
+static void gs_console_write(struct console *co,
+ const char *buf, unsigned count)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct gs_console *cons = container_of(co, struct gs_console, console);
+ unsigned long flags;
+ size_t n;
+
+ spin_lock_irqsave(&cons->lock, flags);
+
+ n = kfifo_in(&cons->buf, buf, count);
+ if (n < count)
+ cons->missed += count - n;
+
+ if (cons->req && !cons->req->length)
+ schedule_work(&cons->work);
+
+ spin_unlock_irqrestore(&cons->lock, flags);
+}
+
+static struct tty_driver *gs_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return gs_tty_driver;
+}
+
+static int gs_console_connect(struct gs_port *port)
+{
+ struct gs_console *cons = port->console;
struct usb_request *req;
struct usb_ep *ep;
- int xfer, ret, count, size;
-
- do {
- port = info->port;
- set_current_state(TASK_INTERRUPTIBLE);
- if (!port || !port->port_usb
- || !port->port_usb->in || !info->console_req)
- goto sched;
-
- req = info->console_req;
- ep = port->port_usb->in;
-
- spin_lock_irq(&info->con_lock);
- count = kfifo_len(&info->con_buf);
- size = ep->maxpacket;
-
- if (count > 0 && !info->req_busy) {
- set_current_state(TASK_RUNNING);
- if (count < size)
- size = count;
-
- xfer = kfifo_out(&info->con_buf, req->buf, size);
- req->length = xfer;
-
- spin_unlock(&info->con_lock);
- ret = usb_ep_queue(ep, req, GFP_ATOMIC);
- spin_lock(&info->con_lock);
- if (ret < 0)
- info->req_busy = 0;
- else
- info->req_busy = 1;
-
- spin_unlock_irq(&info->con_lock);
- } else {
- spin_unlock_irq(&info->con_lock);
-sched:
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- schedule();
- }
- } while (1);
+
+ if (!cons)
+ return 0;
+
+ ep = port->port_usb->in;
+ req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+ req->complete = gs_console_complete_out;
+ req->context = cons;
+ req->length = 0;
+
+ spin_lock(&cons->lock);
+ cons->req = req;
+ cons->console.data = ep;
+ spin_unlock(&cons->lock);
+
+ pr_debug("ttyGS%d: console connected!\n", port->port_num);
+
+ schedule_work(&cons->work);
return 0;
}
-static int gs_console_setup(struct console *co, char *options)
+static void gs_console_disconnect(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- int status;
+ struct gs_console *cons = port->console;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ if (!cons)
+ return;
- info->port = NULL;
- info->console_req = NULL;
- info->req_busy = 0;
- spin_lock_init(&info->con_lock);
+ spin_lock(&cons->lock);
- status = kfifo_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
- if (status) {
- pr_err("%s: allocate console buffer failed\n", __func__);
- return status;
- }
+ req = cons->req;
+ ep = cons->console.data;
+ cons->req = NULL;
+
+ spin_unlock(&cons->lock);
- info->console_thread = kthread_create(gs_console_thread,
- co, "gs_console");
- if (IS_ERR(info->console_thread)) {
- pr_err("%s: cannot create console thread\n", __func__);
- kfifo_free(&info->con_buf);
- return PTR_ERR(info->console_thread);
+ if (!req)
+ return;
+
+ usb_ep_dequeue(ep, req);
+ gs_free_req(ep, req);
+}
+
+static int gs_console_init(struct gs_port *port)
+{
+ struct gs_console *cons;
+ int err;
+
+ if (port->console)
+ return 0;
+
+ cons = kzalloc(sizeof(*port->console), GFP_KERNEL);
+ if (!cons)
+ return -ENOMEM;
+
+ strcpy(cons->console.name, "ttyGS");
+ cons->console.write = gs_console_write;
+ cons->console.device = gs_console_device;
+ cons->console.flags = CON_PRINTBUFFER;
+ cons->console.index = port->port_num;
+
+ INIT_WORK(&cons->work, gs_console_work);
+ spin_lock_init(&cons->lock);
+
+ err = kfifo_alloc(&cons->buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
+ if (err) {
+ pr_err("ttyGS%d: allocate console buffer failed\n", port->port_num);
+ kfree(cons);
+ return err;
}
- wake_up_process(info->console_thread);
+
+ port->console = cons;
+ register_console(&cons->console);
+
+ spin_lock_irq(&port->port_lock);
+ if (port->port_usb)
+ gs_console_connect(port);
+ spin_unlock_irq(&port->port_lock);
return 0;
}
-static void gs_console_write(struct console *co,
- const char *buf, unsigned count)
+static void gs_console_exit(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- unsigned long flags;
+ struct gs_console *cons = port->console;
+
+ if (!cons)
+ return;
+
+ unregister_console(&cons->console);
- spin_lock_irqsave(&info->con_lock, flags);
- kfifo_in(&info->con_buf, buf, count);
- spin_unlock_irqrestore(&info->con_lock, flags);
+ spin_lock_irq(&port->port_lock);
+ if (cons->req)
+ gs_console_disconnect(port);
+ spin_unlock_irq(&port->port_lock);
- wake_up_process(info->console_thread);
+ cancel_work_sync(&cons->work);
+ kfifo_free(&cons->buf);
+ kfree(cons);
+ port->console = NULL;
}
-static struct tty_driver *gs_console_device(struct console *co, int *index)
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count)
{
- struct tty_driver **p = (struct tty_driver **)co->data;
+ struct gs_port *port;
+ bool enable;
+ int ret;
- if (!*p)
- return NULL;
+ ret = strtobool(page, &enable);
+ if (ret)
+ return ret;
- *index = co->index;
- return *p;
-}
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
-static struct console gserial_cons = {
- .name = "ttyGS",
- .write = gs_console_write,
- .device = gs_console_device,
- .setup = gs_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &gs_tty_driver,
-};
+ if (WARN_ON(port == NULL)) {
+ ret = -ENXIO;
+ goto out;
+ }
-static void gserial_console_init(void)
-{
- register_console(&gserial_cons);
+ if (enable)
+ ret = gs_console_init(port);
+ else
+ gs_console_exit(port);
+out:
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret < 0 ? ret : count;
}
+EXPORT_SYMBOL_GPL(gserial_set_console);
-static void gserial_console_exit(void)
+ssize_t gserial_get_console(unsigned char port_num, char *page)
{
- struct gscons_info *info = &gscons_info;
+ struct gs_port *port;
+ ssize_t ret;
- unregister_console(&gserial_cons);
- if (!IS_ERR_OR_NULL(info->console_thread))
- kthread_stop(info->console_thread);
- kfifo_free(&info->con_buf);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+
+ if (WARN_ON(port == NULL))
+ ret = -ENXIO;
+ else
+ ret = sprintf(page, "%u\n", !!port->console);
+
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(gserial_get_console);
#else
-static int gs_console_connect(int port_num)
+static int gs_console_connect(struct gs_port *port)
{
return 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_disconnect(struct gs_port *port)
{
}
-static void gserial_console_init(void)
+static int gs_console_init(struct gs_port *port)
{
+ return -ENOSYS;
}
-static void gserial_console_exit(void)
+static void gs_console_exit(struct gs_port *port)
{
}
@@ -1172,8 +1168,9 @@ static int gs_closed(struct gs_port *port)
int cond;
spin_lock_irq(&port->port_lock);
- cond = (port->port.count == 0) && !port->openclose;
+ cond = port->port.count == 0;
spin_unlock_irq(&port->port_lock);
+
return cond;
}
@@ -1197,18 +1194,19 @@ void gserial_free_line(unsigned char port_num)
return;
}
port = ports[port_num].port;
+ gs_console_exit(port);
ports[port_num].port = NULL;
mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
tty_unregister_device(gs_tty_driver, port_num);
- gserial_console_exit();
}
EXPORT_SYMBOL_GPL(gserial_free_line);
-int gserial_alloc_line(unsigned char *line_num)
+int gserial_alloc_line_no_console(unsigned char *line_num)
{
struct usb_cdc_line_coding coding;
+ struct gs_port *port;
struct device *tty_dev;
int ret;
int port_num;
@@ -1231,24 +1229,35 @@ int gserial_alloc_line(unsigned char *line_num)
/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
- tty_dev = tty_port_register_device(&ports[port_num].port->port,
+ port = ports[port_num].port;
+ tty_dev = tty_port_register_device(&port->port,
gs_tty_driver, port_num, NULL);
if (IS_ERR(tty_dev)) {
- struct gs_port *port;
pr_err("%s: failed to register tty for port %d, err %ld\n",
__func__, port_num, PTR_ERR(tty_dev));
ret = PTR_ERR(tty_dev);
- port = ports[port_num].port;
+ mutex_lock(&ports[port_num].lock);
ports[port_num].port = NULL;
+ mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
goto err;
}
*line_num = port_num;
- gserial_console_init();
err:
return ret;
}
+EXPORT_SYMBOL_GPL(gserial_alloc_line_no_console);
+
+int gserial_alloc_line(unsigned char *line_num)
+{
+ int ret = gserial_alloc_line_no_console(line_num);
+
+ if (!ret && !*line_num)
+ gs_console_init(ports[*line_num].port);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(gserial_alloc_line);
/**
@@ -1327,7 +1336,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
gser->disconnect(gser);
}
- status = gs_console_connect(port_num);
+ status = gs_console_connect(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -1359,12 +1368,14 @@ void gserial_disconnect(struct gserial *gser)
/* tell the TTY glue not to do I/O here any more */
spin_lock_irqsave(&port->port_lock, flags);
+ gs_console_disconnect(port);
+
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
port->port_usb = NULL;
gser->ioport = NULL;
- if (port->port.count > 0 || port->openclose) {
+ if (port->port.count > 0) {
wake_up_interruptible(&port->drain_wait);
if (port->port.tty)
tty_hangup(port->port.tty);
@@ -1377,7 +1388,7 @@ void gserial_disconnect(struct gserial *gser)
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
- if (port->port.count == 0 && !port->openclose)
+ if (port->port.count == 0)
kfifo_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue, NULL);
@@ -1386,7 +1397,6 @@ void gserial_disconnect(struct gserial *gser)
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
- gs_console_disconnect(gser->in);
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index 9acaac1cbb75..e5b08ab8cf7a 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -54,9 +54,17 @@ struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
void gs_free_req(struct usb_ep *, struct usb_request *req);
/* management of individual TTY ports */
+int gserial_alloc_line_no_console(unsigned char *port_line);
int gserial_alloc_line(unsigned char *port_line);
void gserial_free_line(unsigned char port_line);
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count);
+ssize_t gserial_get_console(unsigned char port_num, char *page);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
/* connect/disconnect is handled by individual functions */
int gserial_connect(struct gserial *, u8 port_num);
void gserial_disconnect(struct gserial *);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 69ff7f8c86f5..119a4e47681f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -149,21 +149,21 @@ config USB_ETH_RNDIS
is given in comments found in that info file.
config USB_ETH_EEM
- bool "Ethernet Emulation Model (EEM) support"
- depends on USB_ETH
+ bool "Ethernet Emulation Model (EEM) support"
+ depends on USB_ETH
select USB_LIBCOMPOSITE
select USB_F_EEM
- help
- CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
- and therefore can be supported by more hardware. Technically ECM and
- EEM are designed for different applications. The ECM model extends
- the network interface to the target (e.g. a USB cable modem), and the
- EEM model is for mobile devices to communicate with hosts using
- ethernet over USB. For Linux gadgets, however, the interface with
- the host is the same (a usbX device), so the differences are minimal.
-
- If you say "y" here, the Ethernet gadget driver will use the EEM
- protocol rather than ECM. If unsure, say "n".
+ help
+ CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
+ and therefore can be supported by more hardware. Technically ECM and
+ EEM are designed for different applications. The ECM model extends
+ the network interface to the target (e.g. a USB cable modem), and the
+ EEM model is for mobile devices to communicate with hosts using
+ ethernet over USB. For Linux gadgets, however, the interface with
+ the host is the same (a usbX device), so the differences are minimal.
+
+ If you say "y" here, the Ethernet gadget driver will use the EEM
+ protocol rather than ECM. If unsure, say "n".
config USB_G_NCM
tristate "Network Control Model (NCM) support"
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index af16672d5118..59be2d8417c9 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -105,7 +105,6 @@ static struct usb_function *f_msg;
*/
static int acm_ms_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int status;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_acm = usb_get_function(f_acm_inst);
if (IS_ERR(f_acm))
return PTR_ERR(f_acm);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index fd5595ac5bf7..f18f77584fc2 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -105,7 +105,6 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static int msg_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int msg_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_msg = usb_get_function(fi_msg);
if (IS_ERR(f_msg))
return PTR_ERR(f_msg);
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index de30d7628eef..da44f89f5e73 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -97,6 +97,36 @@ static unsigned n_ports = 1;
module_param(n_ports, uint, 0);
MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
+static bool enable = true;
+
+static int switch_gserial_enable(bool do_enable);
+
+static int enable_set(const char *s, const struct kernel_param *kp)
+{
+ bool do_enable;
+ int ret;
+
+ if (!s) /* called for no-arg enable == default */
+ return 0;
+
+ ret = strtobool(s, &do_enable);
+ if (ret || enable == do_enable)
+ return ret;
+
+ ret = switch_gserial_enable(do_enable);
+ if (!ret)
+ enable = do_enable;
+
+ return ret;
+}
+
+static const struct kernel_param_ops enable_ops = {
+ .set = enable_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(enable, &enable_ops, &enable, 0644);
+
/*-------------------------------------------------------------------------*/
static struct usb_configuration serial_config_driver = {
@@ -240,6 +270,19 @@ static struct usb_composite_driver gserial_driver = {
.unbind = gs_unbind,
};
+static int switch_gserial_enable(bool do_enable)
+{
+ if (!serial_config_driver.label)
+ /* init() was not called, yet */
+ return 0;
+
+ if (do_enable)
+ return usb_composite_probe(&gserial_driver);
+
+ usb_composite_unregister(&gserial_driver);
+ return 0;
+}
+
static int __init init(void)
{
/* We *could* export two configs; that'd be much cleaner...
@@ -266,12 +309,16 @@ static int __init init(void)
}
strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label;
+ if (!enable)
+ return 0;
+
return usb_composite_probe(&gserial_driver);
}
module_init(init);
static void __exit cleanup(void)
{
- usb_composite_unregister(&gserial_driver);
+ if (enable)
+ usb_composite_unregister(&gserial_driver);
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index d354036ff6c8..ae70ce29d5e4 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -120,10 +120,10 @@ config USB_FOTG210_UDC
dynamically linked module called "fotg210_udc".
config USB_GR_UDC
- tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
- depends on HAS_DMA
- help
- Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
+ tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
+ depends on HAS_DMA
+ help
+ Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
VHDL IP core library.
config USB_OMAP
@@ -441,6 +441,17 @@ config USB_GADGET_XILINX
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
+config USB_TEGRA_XUDC
+ tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on PHY_TEGRA_XUSB
+ help
+ Enables NVIDIA Tegra USB 3.0 device mode controller driver.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "tegra_xudc" and force all
+ gadget drivers to also be dynamically linked.
+
source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 897f648f3cf1..f6777e654a8e 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
+obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 194ffb1ed462..1b2b548c59a0 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1808,7 +1808,6 @@ static int at91udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct at91_udc *udc;
int retval;
- struct resource *res;
struct at91_ep *ep;
int i;
@@ -1839,8 +1838,7 @@ static int at91udc_probe(struct platform_device *pdev)
ep->is_pingpong = 1;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr))
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 1d0d8952a74b..8a42768e3213 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ctype.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/delay.h>
@@ -226,7 +227,7 @@ static void usba_init_debugfs(struct usba_udc *udc)
struct dentry *root;
struct resource *regs_resource;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 97b16463f3ef..54501814dc3f 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2248,7 +2248,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
return;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
@@ -2282,7 +2282,6 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
struct bcm63xx_udc *udc;
- struct resource *res;
int rc = -ENOMEM, i, irq;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
@@ -2298,13 +2297,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->usbd_regs = devm_ioremap_resource(dev, res);
+ udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->usbd_regs))
return PTR_ERR(udc->usbd_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- udc->iudma_regs = devm_ioremap_resource(dev, res);
+ udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(udc->iudma_regs))
return PTR_ERR(udc->iudma_regs);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index cc4a16e253ac..02a3a774670b 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -480,7 +480,6 @@ static void bdc_phy_exit(struct bdc *bdc)
static int bdc_probe(struct platform_device *pdev)
{
struct bdc *bdc;
- struct resource *res;
int ret = -ENOMEM;
int irq;
u32 temp;
@@ -508,8 +507,7 @@ static int bdc_probe(struct platform_device *pdev)
bdc->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdc->regs = devm_ioremap_resource(dev, res);
+ bdc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdc->regs)) {
dev_err(dev, "ioremap error\n");
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 7bfd58c846f7..248426a3e88a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -195,7 +195,7 @@ static void handle_link_state_change(struct bdc *bdc, u32 uspc)
break;
case BDC_LINK_STATE_U0:
if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
- bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
+ bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
if (bdc->gadget.speed == USB_SPEED_SUPER) {
bdc_function_wake_fh(bdc, 0);
bdc->devstatus |= FUNC_WAKE_ISSUED;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 3d499d93c083..4c9d1e49d5ed 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1321,7 +1321,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 this_sg;
bool next_sg;
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
@@ -1409,7 +1409,7 @@ top:
/* FIXME update emulated data toggle too */
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
if (unlikely(len == 0))
is_short = 1;
else {
@@ -1830,7 +1830,7 @@ restart:
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
- if (usb_pipein(urb->pipe))
+ if (usb_urb_dir_in(urb))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
@@ -2385,7 +2385,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
s = "?";
break;
} s; }),
- ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
+ ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
@@ -2725,7 +2725,7 @@ static struct platform_driver dummy_hcd_driver = {
};
/*-------------------------------------------------------------------------*/
-#define MAX_NUM_UDC 2
+#define MAX_NUM_UDC 32
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.h b/drivers/usb/gadget/udc/fsl_qe_udc.h
index 2c537a904ee7..53ca0ff7c2cb 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.h
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.h
@@ -333,8 +333,8 @@ struct qe_udc {
u32 resume_state; /* USB state to resume*/
u32 usb_state; /* USB current state */
u32 usb_next_state; /* USB next state */
- u32 ep0_state; /* Enpoint zero state */
- u32 ep0_dir; /* Enpoint zero direction: can be
+ u32 ep0_state; /* Endpoint zero state */
+ u32 ep0_dir; /* Endpoint zero direction: can be
USB_DIR_IN or USB_DIR_OUT*/
u32 usb_sof_count; /* SOF count */
u32 errors; /* USB ERRORs count */
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 9a05863b2876..ec6eda426223 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1052,10 +1052,11 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
u32 bitmask;
struct ep_queue_head *qh;
- ep = container_of(_ep, struct fsl_ep, ep);
- if (!_ep || (!ep->ep.desc && ep_index(ep) != 0))
+ if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
return -ENODEV;
+ ep = container_of(_ep, struct fsl_ep, ep);
+
udc = (struct fsl_udc *)ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
@@ -1208,7 +1209,7 @@ static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int fsl_pullup(struct usb_gadget *gadget, int is_on)
{
@@ -1595,14 +1596,13 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
struct fsl_req *curr_req)
{
struct ep_td_struct *curr_td;
- int td_complete, actual, remaining_length, j, tmp;
+ int actual, remaining_length, j, tmp;
int status = 0;
int errors = 0;
struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
int direction = pipe % 2;
curr_td = curr_req->head;
- td_complete = 0;
actual = curr_req->req.length;
for (j = 0; j < curr_req->dtd_count; j++) {
@@ -1647,11 +1647,9 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
status = -EPROTO;
break;
} else {
- td_complete++;
break;
}
} else {
- td_complete++;
VDBG("dTD transmitted successful");
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 7a0e9a58c2d8..64d80c65bb96 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -29,6 +29,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/dma-mapping.h>
@@ -208,7 +209,7 @@ static void gr_dfs_create(struct gr_udc *dev)
{
const char *name = "gr_udc_state";
- dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
+ dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
}
@@ -2118,7 +2119,6 @@ static int gr_request_irq(struct gr_udc *dev, int irq)
static int gr_probe(struct platform_device *pdev)
{
struct gr_udc *dev;
- struct resource *res;
struct gr_regs __iomem *regs;
int retval;
u32 status;
@@ -2128,8 +2128,7 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
dev->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index bf6c81e2f8cc..d14b2bb3f67c 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3000,7 +3000,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct lpc32xx_udc *udc;
int retval, i;
- struct resource *res;
dma_addr_t dma_handle;
struct device_node *isp1301_node;
@@ -3048,9 +3047,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
* IORESOURCE_IRQ, USB device interrupt number
* IORESOURCE_IRQ, USB transceiver interrupt number
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
spin_lock_init(&udc->lock);
@@ -3061,7 +3057,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
return udc->udp_irq[i];
}
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr)) {
dev_err(udc->dev, "IO map failure\n");
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
index 982625b7197a..66b84f792f64 100644
--- a/drivers/usb/gadget/udc/mv_u3d.h
+++ b/drivers/usb/gadget/udc/mv_u3d.h
@@ -138,7 +138,7 @@ struct mv_u3d_op_regs {
u32 doorbell; /* doorbell register */
};
-/* control enpoint enable registers */
+/* control endpoint enable registers */
struct epxcr {
u32 epxoutcr0; /* ep out control 0 register */
u32 epxoutcr1; /* ep out control 1 register */
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 265dab2bbfac..3344fb8c4181 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1519,7 +1519,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
dma_pool_free(dev->data_requests, td, addr);
- td->next = 0x00;
addr = addr2;
}
req->chain_len = 1;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index d4be53559f2e..cfafdd92c2a8 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2321,7 +2321,6 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
struct pxa25x_udc *dev = &memory;
int retval, irq;
u32 chiprev;
- struct resource *res;
pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
@@ -2367,8 +2366,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 014233252299..78902d13fc27 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -207,7 +207,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
{
struct dentry *root;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
@@ -2356,7 +2356,6 @@ MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
*/
static int pxa_udc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct pxa_udc *udc = &memory;
int retval = 0, gpio;
struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
@@ -2378,8 +2377,7 @@ static int pxa_udc_probe(struct platform_device *pdev)
udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ udc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
udc->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 11e25a3f4f1f..582a16165ea9 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1838,7 +1838,7 @@ static int r8a66597_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
char clk_name[8];
- struct resource *res, *ires;
+ struct resource *ires;
int irq;
void __iomem *reg = NULL;
struct r8a66597 *r8a66597 = NULL;
@@ -1846,8 +1846,7 @@ static int r8a66597_probe(struct platform_device *pdev)
int i;
unsigned long irq_trigger;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 33703140233a..c5c3c14df67a 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -775,6 +775,18 @@ static void usb3_irq_epc_int_1_resume(struct renesas_usb3 *usb3)
usb3_transition_to_default_state(usb3, false);
}
+static void usb3_irq_epc_int_1_suspend(struct renesas_usb3 *usb3)
+{
+ usb3_disable_irq_1(usb3, USB_INT_1_B2_SPND);
+
+ if (usb3->gadget.speed != USB_SPEED_UNKNOWN &&
+ usb3->gadget.state != USB_STATE_NOTATTACHED) {
+ if (usb3->driver && usb3->driver->suspend)
+ usb3->driver->suspend(&usb3->gadget);
+ usb_gadget_set_state(&usb3->gadget, USB_STATE_SUSPENDED);
+ }
+}
+
static void usb3_irq_epc_int_1_disable(struct renesas_usb3 *usb3)
{
usb3_stop_usb3_connection(usb3);
@@ -860,6 +872,9 @@ static void usb3_irq_epc_int_1(struct renesas_usb3 *usb3, u32 int_sta_1)
if (int_sta_1 & USB_INT_1_B2_RSUM)
usb3_irq_epc_int_1_resume(usb3);
+ if (int_sta_1 & USB_INT_1_B2_SPND)
+ usb3_irq_epc_int_1_suspend(usb3);
+
if (int_sta_1 & USB_INT_1_SPEED)
usb3_irq_epc_int_1_speed(usb3);
@@ -2536,7 +2551,7 @@ static const struct file_operations renesas_usb3_b_device_fops = {
static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
struct device *dev)
{
- usb3->dentry = debugfs_create_dir(dev_name(dev), NULL);
+ usb3->dentry = debugfs_create_dir(dev_name(dev), usb_debug_root);
debugfs_create_file("b_device", 0644, usb3->dentry, usb3,
&renesas_usb3_b_device_fops);
@@ -2733,7 +2748,6 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- struct resource *res;
int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2752,8 +2766,7 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (!usb3)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- usb3->reg = devm_ioremap_resource(&pdev->dev, res);
+ usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 858993c73442..21252fbc0319 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1263,7 +1263,6 @@ static const struct usb_gadget_ops s3c_hsudc_gadget_ops = {
static int s3c_hsudc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct s3c_hsudc *hsudc;
struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
int ret, i;
@@ -1290,9 +1289,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
goto err_supplies;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- hsudc->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsudc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsudc->regs)) {
ret = PTR_ERR(hsudc->regs);
goto err_res;
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index f82208fbc249..0507a2ca0f55 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1978,7 +1978,8 @@ static int __init udc_init(void)
dprintk(DEBUG_NORMAL, "%s\n", gadget_name);
- s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
+ s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name,
+ usb_debug_root);
retval = platform_driver_register(&udc_driver_24x0);
if (retval)
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
new file mode 100644
index 000000000000..634c2c19a176
--- /dev/null
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -0,0 +1,3810 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra XUSB device mode controller
+ *
+ * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2015, Google Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/pm_domain.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/role.h>
+#include <linux/workqueue.h>
+
+/* XUSB_DEV registers */
+#define SPARAM 0x000
+#define SPARAM_ERSTMAX_MASK GENMASK(20, 16)
+#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
+#define DB 0x004
+#define DB_TARGET_MASK GENMASK(15, 8)
+#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
+#define DB_STREAMID_MASK GENMASK(31, 16)
+#define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
+#define ERSTSZ 0x008
+#define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
+#define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
+#define ERSTXBALO(x) (0x010 + 8 * (x))
+#define ERSTXBAHI(x) (0x014 + 8 * (x))
+#define ERDPLO 0x020
+#define ERDPLO_EHB BIT(3)
+#define ERDPHI 0x024
+#define EREPLO 0x028
+#define EREPLO_ECS BIT(0)
+#define EREPLO_SEGI BIT(1)
+#define EREPHI 0x02c
+#define CTRL 0x030
+#define CTRL_RUN BIT(0)
+#define CTRL_LSE BIT(1)
+#define CTRL_IE BIT(4)
+#define CTRL_SMI_EVT BIT(5)
+#define CTRL_SMI_DSE BIT(6)
+#define CTRL_EWE BIT(7)
+#define CTRL_DEVADDR_MASK GENMASK(30, 24)
+#define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
+#define CTRL_ENABLE BIT(31)
+#define ST 0x034
+#define ST_RC BIT(0)
+#define ST_IP BIT(4)
+#define RT_IMOD 0x038
+#define RT_IMOD_IMODI_MASK GENMASK(15, 0)
+#define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
+#define RT_IMOD_IMODC_MASK GENMASK(31, 16)
+#define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
+#define PORTSC 0x03c
+#define PORTSC_CCS BIT(0)
+#define PORTSC_PED BIT(1)
+#define PORTSC_PR BIT(4)
+#define PORTSC_PLS_SHIFT 5
+#define PORTSC_PLS_MASK GENMASK(8, 5)
+#define PORTSC_PLS_U0 0x0
+#define PORTSC_PLS_U2 0x2
+#define PORTSC_PLS_U3 0x3
+#define PORTSC_PLS_DISABLED 0x4
+#define PORTSC_PLS_RXDETECT 0x5
+#define PORTSC_PLS_INACTIVE 0x6
+#define PORTSC_PLS_RESUME 0xf
+#define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
+#define PORTSC_PS_SHIFT 10
+#define PORTSC_PS_MASK GENMASK(13, 10)
+#define PORTSC_PS_UNDEFINED 0x0
+#define PORTSC_PS_FS 0x1
+#define PORTSC_PS_LS 0x2
+#define PORTSC_PS_HS 0x3
+#define PORTSC_PS_SS 0x4
+#define PORTSC_LWS BIT(16)
+#define PORTSC_CSC BIT(17)
+#define PORTSC_WRC BIT(19)
+#define PORTSC_PRC BIT(21)
+#define PORTSC_PLC BIT(22)
+#define PORTSC_CEC BIT(23)
+#define PORTSC_WPR BIT(30)
+#define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
+ PORTSC_PLC | PORTSC_CEC)
+#define ECPLO 0x040
+#define ECPHI 0x044
+#define MFINDEX 0x048
+#define MFINDEX_FRAME_SHIFT 3
+#define MFINDEX_FRAME_MASK GENMASK(13, 3)
+#define PORTPM 0x04c
+#define PORTPM_L1S_MASK GENMASK(1, 0)
+#define PORTPM_L1S_DROP 0x0
+#define PORTPM_L1S_ACCEPT 0x1
+#define PORTPM_L1S_NYET 0x2
+#define PORTPM_L1S_STALL 0x3
+#define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
+#define PORTPM_RWE BIT(3)
+#define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
+#define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
+#define PORTPM_FLA BIT(24)
+#define PORTPM_VBA BIT(25)
+#define PORTPM_WOC BIT(26)
+#define PORTPM_WOD BIT(27)
+#define PORTPM_U1E BIT(28)
+#define PORTPM_U2E BIT(29)
+#define PORTPM_FRWE BIT(30)
+#define PORTPM_PNG_CYA BIT(31)
+#define EP_HALT 0x050
+#define EP_PAUSE 0x054
+#define EP_RELOAD 0x058
+#define EP_STCHG 0x05c
+#define DEVNOTIF_LO 0x064
+#define DEVNOTIF_LO_TRIG BIT(0)
+#define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
+#define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
+#define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
+#define DEVNOTIF_HI 0x068
+#define PORTHALT 0x06c
+#define PORTHALT_HALT_LTSSM BIT(0)
+#define PORTHALT_HALT_REJECT BIT(1)
+#define PORTHALT_STCHG_REQ BIT(20)
+#define PORTHALT_STCHG_INTR_EN BIT(24)
+#define PORT_TM 0x070
+#define EP_THREAD_ACTIVE 0x074
+#define EP_STOPPED 0x078
+#define HSFSPI_COUNT0 0x100
+#define HSFSPI_COUNT13 0x134
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
+ HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
+#define BLCG 0x840
+#define SSPX_CORE_CNT0 0x610
+#define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
+#define SSPX_CORE_CNT30 0x688
+#define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
+ SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
+#define SSPX_CORE_CNT32 0x690
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
+ SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
+#define SSPX_CORE_PADCTL4 0x750
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
+ SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
+#define BLCG_DFPCI BIT(0)
+#define BLCG_UFPCI BIT(1)
+#define BLCG_FE BIT(2)
+#define BLCG_COREPLL_PWRDN BIT(8)
+#define BLCG_IOPLL_0_PWRDN BIT(9)
+#define BLCG_IOPLL_1_PWRDN BIT(10)
+#define BLCG_IOPLL_2_PWRDN BIT(11)
+#define BLCG_ALL 0x1ff
+#define CFG_DEV_SSPI_XFER 0x858
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
+ CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
+#define CFG_DEV_FE 0x85c
+#define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
+#define CFG_DEV_FE_PORTREGSEL_SS_PI 1
+#define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
+#define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
+#define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
+
+/* FPCI registers */
+#define XUSB_DEV_CFG_1 0x004
+#define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
+#define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
+#define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
+#define XUSB_DEV_CFG_4 0x010
+#define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
+#define XUSB_DEV_CFG_5 0x014
+
+/* IPFS registers */
+#define XUSB_DEV_CONFIGURATION_0 0x180
+#define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
+#define XUSB_DEV_INTR_MASK_0 0x188
+#define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
+
+struct tegra_xudc_ep_context {
+ __le32 info0;
+ __le32 info1;
+ __le32 deq_lo;
+ __le32 deq_hi;
+ __le32 tx_info;
+ __le32 rsvd[11];
+};
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+
+#define EP_TYPE_INVALID 0
+#define EP_TYPE_ISOCH_OUT 1
+#define EP_TYPE_BULK_OUT 2
+#define EP_TYPE_INTERRUPT_OUT 3
+#define EP_TYPE_CONTROL 4
+#define EP_TYPE_ISCOH_IN 5
+#define EP_TYPE_BULK_IN 6
+#define EP_TYPE_INTERRUPT_IN 7
+
+#define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
+static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
+{ \
+ return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
+} \
+static inline void \
+ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ ctx->member = cpu_to_le32(tmp); \
+}
+
+BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
+BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
+BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
+BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
+BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
+BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
+BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
+BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
+BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
+BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
+BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
+BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
+BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
+BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
+BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
+BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
+BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
+BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
+BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
+BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
+
+static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
+{
+ return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
+ (ep_ctx_read_deq_lo(ctx) << 4);
+}
+
+static inline void
+ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
+{
+ ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
+ ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
+}
+
+struct tegra_xudc_trb {
+ __le32 data_lo;
+ __le32 data_hi;
+ __le32 status;
+ __le32 control;
+};
+
+#define TRB_TYPE_RSVD 0
+#define TRB_TYPE_NORMAL 1
+#define TRB_TYPE_SETUP_STAGE 2
+#define TRB_TYPE_DATA_STAGE 3
+#define TRB_TYPE_STATUS_STAGE 4
+#define TRB_TYPE_ISOCH 5
+#define TRB_TYPE_LINK 6
+#define TRB_TYPE_TRANSFER_EVENT 32
+#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
+#define TRB_TYPE_STREAM 48
+#define TRB_TYPE_SETUP_PACKET_EVENT 63
+
+#define TRB_CMPL_CODE_INVALID 0
+#define TRB_CMPL_CODE_SUCCESS 1
+#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
+#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
+#define TRB_CMPL_CODE_USB_TRANS_ERR 4
+#define TRB_CMPL_CODE_TRB_ERR 5
+#define TRB_CMPL_CODE_STALL 6
+#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
+#define TRB_CMPL_CODE_SHORT_PACKET 13
+#define TRB_CMPL_CODE_RING_UNDERRUN 14
+#define TRB_CMPL_CODE_RING_OVERRUN 15
+#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
+#define TRB_CMPL_CODE_STOPPED 26
+#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
+#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
+#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
+#define TRB_CMPL_CODE_HOST_REJECTED 221
+#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
+#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
+
+#define BUILD_TRB_RW(name, member, shift, mask) \
+static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
+{ \
+ return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
+} \
+static inline void \
+trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ trb->member = cpu_to_le32(tmp); \
+}
+
+BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
+BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
+BUILD_TRB_RW(seq_num, status, 0, 0xffff)
+BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
+BUILD_TRB_RW(td_size, status, 17, 0x1f)
+BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
+BUILD_TRB_RW(cycle, control, 0, 0x1)
+BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
+BUILD_TRB_RW(isp, control, 2, 0x1)
+BUILD_TRB_RW(chain, control, 4, 0x1)
+BUILD_TRB_RW(ioc, control, 5, 0x1)
+BUILD_TRB_RW(type, control, 10, 0x3f)
+BUILD_TRB_RW(stream_id, control, 16, 0xffff)
+BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
+BUILD_TRB_RW(tlbpc, control, 16, 0xf)
+BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
+BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
+BUILD_TRB_RW(sia, control, 31, 0x1)
+
+static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
+{
+ return ((u64)trb_read_data_hi(trb) << 32) |
+ trb_read_data_lo(trb);
+}
+
+static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
+{
+ trb_write_data_lo(trb, lower_32_bits(addr));
+ trb_write_data_hi(trb, upper_32_bits(addr));
+}
+
+struct tegra_xudc_request {
+ struct usb_request usb_req;
+
+ size_t buf_queued;
+ unsigned int trbs_queued;
+ unsigned int trbs_needed;
+ bool need_zlp;
+
+ struct tegra_xudc_trb *first_trb;
+ struct tegra_xudc_trb *last_trb;
+
+ struct list_head list;
+};
+
+struct tegra_xudc_ep {
+ struct tegra_xudc *xudc;
+ struct usb_ep usb_ep;
+ unsigned int index;
+ char name[8];
+
+ struct tegra_xudc_ep_context *context;
+
+#define XUDC_TRANSFER_RING_SIZE 64
+ struct tegra_xudc_trb *transfer_ring;
+ dma_addr_t transfer_ring_phys;
+
+ unsigned int enq_ptr;
+ unsigned int deq_ptr;
+ bool pcs;
+ bool ring_full;
+ bool stream_rejected;
+
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+};
+
+struct tegra_xudc_sel_timing {
+ __u8 u1sel;
+ __u8 u1pel;
+ __le16 u2sel;
+ __le16 u2pel;
+};
+
+enum tegra_xudc_setup_state {
+ WAIT_FOR_SETUP,
+ DATA_STAGE_XFER,
+ DATA_STAGE_RECV,
+ STATUS_STAGE_XFER,
+ STATUS_STAGE_RECV,
+};
+
+struct tegra_xudc_setup_packet {
+ struct usb_ctrlrequest ctrl_req;
+ unsigned int seq_num;
+};
+
+struct tegra_xudc_save_regs {
+ u32 ctrl;
+ u32 portpm;
+};
+
+struct tegra_xudc {
+ struct device *dev;
+ const struct tegra_xudc_soc *soc;
+ struct tegra_xusb_padctl *padctl;
+
+ spinlock_t lock;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+#define XUDC_NR_EVENT_RINGS 2
+#define XUDC_EVENT_RING_SIZE 4096
+ struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
+ dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
+ unsigned int event_ring_index;
+ unsigned int event_ring_deq_ptr;
+ bool ccs;
+
+#define XUDC_NR_EPS 32
+ struct tegra_xudc_ep ep[XUDC_NR_EPS];
+ struct tegra_xudc_ep_context *ep_context;
+ dma_addr_t ep_context_phys;
+
+ struct device *genpd_dev_device;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_device;
+ struct device_link *genpd_dl_ss;
+
+ struct dma_pool *transfer_ring_pool;
+
+ bool queued_setup_packet;
+ struct tegra_xudc_setup_packet setup_packet;
+ enum tegra_xudc_setup_state setup_state;
+ u16 setup_seq_num;
+
+ u16 dev_addr;
+ u16 isoch_delay;
+ struct tegra_xudc_sel_timing sel_timing;
+ u8 test_mode_pattern;
+ u16 status_buf;
+ struct tegra_xudc_request *ep0_req;
+
+ bool pullup;
+
+ unsigned int nr_enabled_eps;
+ unsigned int nr_isoch_eps;
+
+ unsigned int device_state;
+ unsigned int resume_state;
+
+ int irq;
+
+ void __iomem *base;
+ resource_size_t phys_base;
+ void __iomem *ipfs;
+ void __iomem *fpci;
+
+ struct regulator_bulk_data *supplies;
+
+ struct clk_bulk_data *clks;
+
+ enum usb_role device_mode;
+ struct usb_role_switch *usb_role_sw;
+ struct work_struct usb_role_sw_work;
+
+ struct phy *usb3_phy;
+ struct phy *utmi_phy;
+
+ struct tegra_xudc_save_regs saved_regs;
+ bool suspended;
+ bool powergated;
+
+ struct completion disconnect_complete;
+
+ bool selfpowered;
+
+#define TOGGLE_VBUS_WAIT_MS 100
+ struct delayed_work plc_reset_work;
+ bool wait_csc;
+
+ struct delayed_work port_reset_war_work;
+ bool wait_for_sec_prc;
+};
+
+#define XUDC_TRB_MAX_BUFFER_SIZE 65536
+#define XUDC_MAX_ISOCH_EPS 4
+#define XUDC_INTERRUPT_MODERATION_US 0
+
+static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+struct tegra_xudc_soc {
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ const char * const *clock_names;
+ unsigned int num_clks;
+ bool u1_enable;
+ bool u2_enable;
+ bool lpm_enable;
+ bool invalid_seq_num;
+ bool pls_quirk;
+ bool port_reset_quirk;
+ bool has_ipfs;
+};
+
+static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->fpci + offset);
+}
+
+static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->fpci + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->ipfs + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->ipfs + offset);
+}
+
+static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->base + offset);
+}
+
+static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->base + offset);
+}
+
+static inline int xudc_readl_poll(struct tegra_xudc *xudc,
+ unsigned int offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ return readl_poll_timeout_atomic(xudc->base + offset, regval,
+ (regval & mask) == val, 1, 100);
+}
+
+static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct tegra_xudc, gadget);
+}
+
+static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct tegra_xudc_ep, usb_ep);
+}
+
+static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
+{
+ return container_of(req, struct tegra_xudc_request, usb_req);
+}
+
+static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(xudc->dev,
+ "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
+ type, trb, trb->data_lo, trb->data_hi, trb->status,
+ trb->control);
+}
+
+static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
+{
+ int err;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ err = phy_power_on(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi power on failed %d\n", err);
+
+ err = phy_power_on(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3 phy power on failed %d\n", err);
+
+ dev_dbg(xudc->dev, "device mode on\n");
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+
+ xudc->device_mode = USB_ROLE_DEVICE;
+}
+
+static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
+{
+ bool connected = false;
+ u32 pls, val;
+ int err;
+
+ dev_dbg(xudc->dev, "device mode off\n");
+
+ connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
+
+ reinit_completion(&xudc->disconnect_complete);
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, false);
+
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ /* Direct link to U0 if disconnected in RESUME or U2. */
+ if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
+ (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
+ val = xudc_readl(xudc, PORTPM);
+ val |= PORTPM_FRWE;
+ xudc_writel(xudc, val, PORTPM);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ xudc->device_mode = USB_ROLE_NONE;
+
+ /* Wait for disconnect event. */
+ if (connected)
+ wait_for_completion(&xudc->disconnect_complete);
+
+ /* Make sure interrupt handler has completed before powergating. */
+ synchronize_irq(xudc->irq);
+
+ err = phy_power_off(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi_phy power off failed %d\n", err);
+
+ err = phy_power_off(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3_phy power off failed %d\n", err);
+
+ pm_runtime_put(xudc->dev);
+}
+
+static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
+{
+ struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
+ usb_role_sw_work);
+
+ if (!xudc->usb_role_sw ||
+ usb_role_switch_get_role(xudc->usb_role_sw) == USB_ROLE_DEVICE)
+ tegra_xudc_device_mode_on(xudc);
+ else
+ tegra_xudc_device_mode_off(xudc);
+
+}
+
+static int tegra_xudc_usb_role_sw_set(struct device *dev, enum usb_role role)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ dev_dbg(dev, "%s role is %d\n", __func__, role);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (!xudc->suspended)
+ schedule_work(&xudc->usb_role_sw_work);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return 0;
+}
+
+static void tegra_xudc_plc_reset_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
+ plc_reset_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->wait_csc) {
+ u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ if (pls == PORTSC_PLS_INACTIVE) {
+ dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl,
+ false);
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+ xudc->wait_csc = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static void tegra_xudc_port_reset_war_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc =
+ container_of(dwork, struct tegra_xudc, port_reset_war_work);
+ unsigned long flags;
+ u32 pls;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if ((xudc->device_mode == USB_ROLE_DEVICE)
+ && xudc->wait_for_sec_prc) {
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+ dev_dbg(xudc->dev, "pls = %x\n", pls);
+
+ if (pls == PORTSC_PLS_DISABLED) {
+ dev_dbg(xudc->dev, "toggle vbus\n");
+ /* PRC doesn't complete in 100ms, toggle the vbus */
+ ret = tegra_phy_xusb_utmi_port_reset(xudc->utmi_phy);
+ if (ret == 1)
+ xudc->wait_for_sec_prc = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ unsigned int index;
+
+ index = trb - ep->transfer_ring;
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return 0;
+
+ return (ep->transfer_ring_phys + index * sizeof(*trb));
+}
+
+static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
+ dma_addr_t addr)
+{
+ struct tegra_xudc_trb *trb;
+ unsigned int index;
+
+ index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return NULL;
+
+ trb = &ep->transfer_ring[index];
+
+ return trb;
+}
+
+static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_writel(xudc, BIT(ep), EP_RELOAD);
+ xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
+}
+
+static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+
+ xudc_writel(xudc, 0, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!val)
+ return;
+ xudc_writel(xudc, 0, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
+ xudc_writel(xudc, BIT(ep), EP_STOPPED);
+}
+
+static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
+}
+
+static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req, int status)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
+ req, ep->index, status);
+
+ if (likely(req->usb_req.status == -EINPROGRESS))
+ req->usb_req.status = status;
+
+ list_del_init(&req->list);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ spin_unlock(&xudc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&xudc->lock);
+}
+
+static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
+{
+ struct tegra_xudc_request *req;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ tegra_xudc_req_done(ep, req, status);
+ }
+}
+
+static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
+{
+ if (ep->ring_full)
+ return 0;
+
+ if (ep->deq_ptr > ep->enq_ptr)
+ return ep->deq_ptr - ep->enq_ptr - 1;
+
+ return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
+}
+
+static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb,
+ bool ioc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ dma_addr_t buf_addr;
+ size_t len;
+
+ len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
+ req->buf_queued);
+ if (len > 0)
+ buf_addr = req->usb_req.dma + req->buf_queued;
+ else
+ buf_addr = 0;
+
+ trb_write_data_ptr(trb, buf_addr);
+
+ trb_write_transfer_len(trb, len);
+ trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
+
+ if (req->trbs_queued == req->trbs_needed - 1 ||
+ (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
+ trb_write_chain(trb, 0);
+ else
+ trb_write_chain(trb, 1);
+
+ trb_write_ioc(trb, ioc);
+
+ if (usb_endpoint_dir_out(ep->desc) ||
+ (usb_endpoint_xfer_control(ep->desc) &&
+ (xudc->setup_state == DATA_STAGE_RECV)))
+ trb_write_isp(trb, 1);
+ else
+ trb_write_isp(trb, 0);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == DATA_STAGE_RECV)
+ trb_write_type(trb, TRB_TYPE_DATA_STAGE);
+ else
+ trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
+
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == STATUS_STAGE_XFER)
+ trb_write_data_stage_dir(trb, 1);
+ else
+ trb_write_data_stage_dir(trb, 0);
+ } else if (usb_endpoint_xfer_isoc(ep->desc)) {
+ trb_write_type(trb, TRB_TYPE_ISOCH);
+ trb_write_sia(trb, 1);
+ trb_write_frame_id(trb, 0);
+ trb_write_tlbpc(trb, 0);
+ } else if (usb_ss_max_streams(ep->comp_desc)) {
+ trb_write_type(trb, TRB_TYPE_STREAM);
+ trb_write_stream_id(trb, req->usb_req.stream_id);
+ } else {
+ trb_write_type(trb, TRB_TYPE_NORMAL);
+ trb_write_stream_id(trb, 0);
+ }
+
+ trb_write_cycle(trb, ep->pcs);
+
+ req->trbs_queued++;
+ req->buf_queued += len;
+
+ dump_trb(xudc, "TRANSFER", trb);
+}
+
+static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ unsigned int i, count, available;
+ bool wait_td = false;
+
+ available = ep_available_trbs(ep);
+ count = req->trbs_needed - req->trbs_queued;
+ if (available < count) {
+ count = available;
+ ep->ring_full = true;
+ }
+
+ /*
+ * To generate zero-length packet on USB bus, SW needs schedule a
+ * standalone zero-length TD. According to HW's behavior, SW needs
+ * to schedule TDs in different ways for different endpoint types.
+ *
+ * For control endpoint:
+ * - Data stage TD (IOC = 1, CH = 0)
+ * - Ring doorbell and wait transfer event
+ * - Data stage TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ *
+ * For bulk and interrupt endpoints:
+ * - Normal transfer TD (IOC = 0, CH = 0)
+ * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ */
+
+ if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
+ wait_td = true;
+
+ if (!req->first_trb)
+ req->first_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ for (i = 0; i < count; i++) {
+ struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
+ bool ioc = false;
+
+ if ((i == count - 1) || (wait_td && i == count - 2))
+ ioc = true;
+
+ tegra_xudc_queue_one_trb(ep, req, trb, ioc);
+ req->last_trb = trb;
+
+ ep->enq_ptr++;
+ if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
+ trb = &ep->transfer_ring[ep->enq_ptr];
+ trb_write_cycle(trb, ep->pcs);
+ ep->pcs = !ep->pcs;
+ ep->enq_ptr = 0;
+ }
+
+ if (ioc)
+ break;
+ }
+
+ return count;
+}
+
+static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ u32 val;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ val = DB_TARGET(ep->index);
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ val |= DB_STREAMID(xudc->setup_seq_num);
+ } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
+ struct tegra_xudc_request *req;
+
+ /* Don't ring doorbell if the stream has been rejected. */
+ if (ep->stream_rejected)
+ return;
+
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ val |= DB_STREAMID(req->usb_req.stream_id);
+ }
+
+ dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
+ xudc_writel(xudc, val, DB);
+}
+
+static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc_request *req;
+ bool trbs_queued = false;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (ep->ring_full)
+ break;
+
+ if (tegra_xudc_queue_trbs(ep, req) > 0)
+ trbs_queued = true;
+ }
+
+ if (trbs_queued)
+ tegra_xudc_ep_ring_doorbell(ep);
+}
+
+static int
+__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ int err;
+
+ if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "control EP has pending transfers\n");
+ return -EINVAL;
+ }
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to map request: %d\n", err);
+ return err;
+ }
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ req->need_zlp = false;
+ req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
+ XUDC_TRB_MAX_BUFFER_SIZE);
+ if (req->usb_req.length == 0)
+ req->trbs_needed++;
+
+ if (!usb_endpoint_xfer_isoc(ep->desc) &&
+ req->usb_req.zero && req->usb_req.length &&
+ ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
+ req->trbs_needed++;
+ req->need_zlp = true;
+ }
+
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ list_add_tail(&req->list, &ep->queue);
+
+ tegra_xudc_ep_kick_queue(ep);
+
+ return 0;
+}
+
+static int
+tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
+ gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_queue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc_trb *trb = req->first_trb;
+ bool pcs_enq = trb_read_cycle(trb);
+ bool pcs;
+
+ /*
+ * Clear out all the TRBs part of or after the cancelled request,
+ * and must correct trb cycle bit to the last un-enqueued state.
+ */
+ while (trb != &ep->transfer_ring[ep->enq_ptr]) {
+ pcs = trb_read_cycle(trb);
+ memset(trb, 0, sizeof(*trb));
+ trb_write_cycle(trb, !pcs);
+ trb++;
+
+ if (trb_read_type(trb) == TRB_TYPE_LINK)
+ trb = ep->transfer_ring;
+ }
+
+ /* Requests will be re-queued at the start of the cancelled request. */
+ ep->enq_ptr = req->first_trb - ep->transfer_ring;
+ /*
+ * Retrieve the correct cycle bit state from the first trb of
+ * the cancelled request.
+ */
+ ep->pcs = pcs_enq;
+ ep->ring_full = false;
+ list_for_each_entry_continue(req, &ep->queue, list) {
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ }
+}
+
+/*
+ * Determine if the given TRB is in the range [first trb, last trb] for the
+ * given request.
+ */
+static bool trb_in_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
+ req->first_trb, req->last_trb, trb);
+
+ if (trb >= req->first_trb && (trb <= req->last_trb ||
+ req->last_trb < req->first_trb))
+ return true;
+
+ if (trb < req->first_trb && trb <= req->last_trb &&
+ req->last_trb < req->first_trb)
+ return true;
+
+ return false;
+}
+
+/*
+ * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
+ * for the given endpoint and request.
+ */
+static bool trb_before_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
+ __func__, req->first_trb, req->last_trb, enq_trb, trb);
+
+ if (trb < req->first_trb && (enq_trb <= trb ||
+ req->first_trb < enq_trb))
+ return true;
+
+ if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
+ return true;
+
+ return false;
+}
+
+static int
+__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ struct tegra_xudc_request *r;
+ struct tegra_xudc_trb *deq_trb;
+ bool busy, kick_queue = false;
+ int ret = 0;
+
+ /* Make sure the request is actually queued to this endpoint. */
+ list_for_each_entry(r, &ep->queue, list) {
+ if (r == req)
+ break;
+ }
+
+ if (r != req)
+ return -EINVAL;
+
+ /* Request hasn't been queued in the transfer ring yet. */
+ if (!req->trbs_queued) {
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ return 0;
+ }
+
+ /* Halt DMA for this endpiont. */
+ if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
+ ep_pause(xudc, ep->index);
+ ep_wait_for_inactive(xudc, ep->index);
+ }
+
+ deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
+ /* Is the hardware processing the TRB at the dequeue pointer? */
+ busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
+
+ if (trb_in_request(ep, req, deq_trb) && busy) {
+ /*
+ * Request has been partially completed or it hasn't
+ * started processing yet.
+ */
+ dma_addr_t deq_ptr;
+
+ squeeze_transfer_ring(ep, req);
+
+ req->usb_req.actual = ep_ctx_read_edtla(ep->context);
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+
+ /* EDTLA is > 0: request has been partially completed */
+ if (req->usb_req.actual > 0) {
+ /*
+ * Abort the pending transfer and update the dequeue
+ * pointer
+ */
+ ep_ctx_write_edtla(ep->context, 0);
+ ep_ctx_write_partial_td(ep->context, 0);
+ ep_ctx_write_data_offset(ep->context, 0);
+
+ deq_ptr = trb_virt_to_phys(ep,
+ &ep->transfer_ring[ep->enq_ptr]);
+
+ if (dma_mapping_error(xudc->dev, deq_ptr)) {
+ ret = -EINVAL;
+ } else {
+ ep_ctx_write_deq_ptr(ep->context, deq_ptr);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+ ep_reload(xudc, ep->index);
+ }
+ }
+ } else if (trb_before_request(ep, req, deq_trb) && busy) {
+ /* Request hasn't started processing yet. */
+ squeeze_transfer_ring(ep, req);
+
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+ } else {
+ /*
+ * Request has completed, but we haven't processed the
+ * completion event yet.
+ */
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ ret = -EINVAL;
+ }
+
+ /* Resume the endpoint. */
+ ep_unpause(xudc, ep->index);
+
+ if (kick_queue)
+ tegra_xudc_ep_kick_queue(ep);
+
+ return ret;
+}
+
+static int
+tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_dequeue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (!ep->desc)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
+ dev_err(xudc->dev, "can't halt isoc EP\n");
+ return -ENOTSUPP;
+ }
+
+ if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
+ dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
+ halt ? "halted" : "not halted");
+ return 0;
+ }
+
+ if (halt) {
+ ep_halt(xudc, ep->index);
+ } else {
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_seq_num(ep->context, 0);
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ tegra_xudc_ep_ring_doorbell(ep);
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ if (value && usb_endpoint_dir_in(ep->desc) &&
+ !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "can't halt EP with requests pending\n");
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_set_halt(ep, value);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
+{
+ const struct usb_endpoint_descriptor *desc = ep->desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+ struct tegra_xudc *xudc = ep->xudc;
+ u16 maxpacket, maxburst = 0, esit = 0;
+ u32 val;
+
+ maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (!usb_endpoint_xfer_control(desc))
+ maxburst = comp_desc->bMaxBurst;
+
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
+ esit = le16_to_cpu(comp_desc->wBytesPerInterval);
+ } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc))) {
+ if (xudc->gadget.speed == USB_SPEED_HIGH) {
+ maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3;
+ if (maxburst == 0x3) {
+ dev_warn(xudc->dev,
+ "invalid endpoint maxburst\n");
+ maxburst = 0x2;
+ }
+ }
+ esit = maxpacket * (maxburst + 1);
+ }
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_interval(ep->context, desc->bInterval);
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (usb_endpoint_xfer_isoc(desc)) {
+ ep_ctx_write_mult(ep->context,
+ comp_desc->bmAttributes & 0x3);
+ }
+
+ if (usb_endpoint_xfer_bulk(desc)) {
+ ep_ctx_write_max_pstreams(ep->context,
+ comp_desc->bmAttributes &
+ 0x1f);
+ ep_ctx_write_lsa(ep->context, 1);
+ }
+ }
+
+ if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
+ val = usb_endpoint_type(desc);
+ else
+ val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
+
+ ep_ctx_write_type(ep->context, val);
+ ep_ctx_write_cerr(ep->context, 0x3);
+ ep_ctx_write_max_packet_size(ep->context, maxpacket);
+ ep_ctx_write_max_burst_size(ep->context, maxburst);
+
+ ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+
+ /* Select a reasonable average TRB length based on endpoint type. */
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ val = 8;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ val = 1024;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_ISOC:
+ default:
+ val = 3072;
+ break;
+ }
+
+ ep_ctx_write_avg_trb_len(ep->context, val);
+ ep_ctx_write_max_esit_payload(ep->context, esit);
+
+ ep_ctx_write_cerrcnt(ep->context, 0x3);
+}
+
+static void setup_link_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ trb_write_data_ptr(trb, ep->transfer_ring_phys);
+ trb_write_type(trb, TRB_TYPE_LINK);
+ trb_write_toggle_cycle(trb, 1);
+}
+
+static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_err(xudc->dev, "endpoint %u already disabled\n",
+ ep->index);
+ return -EINVAL;
+ }
+
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
+
+ xudc->nr_enabled_eps--;
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ xudc->nr_isoch_eps--;
+
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+ if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
+ xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
+
+ /*
+ * If this is the last endpoint disabled in a de-configure request,
+ * switch back to address state.
+ */
+ if ((xudc->device_state == USB_STATE_CONFIGURED) &&
+ (xudc->nr_enabled_eps == 1)) {
+ u32 val;
+
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ dev_info(xudc->dev, "ep %u disabled\n", ep->index);
+
+ return 0;
+}
+
+static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_disable(ep);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ unsigned int i;
+ u32 val;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER &&
+ !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
+ return -EINVAL;
+
+ /* Disable the EP if it is not disabled */
+ if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
+ __tegra_xudc_ep_disable(ep);
+
+ ep->desc = desc;
+ ep->comp_desc = ep->usb_ep.comp_desc;
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
+ dev_err(xudc->dev, "too many isoch endpoints\n");
+ return -EBUSY;
+ }
+ xudc->nr_isoch_eps++;
+ }
+
+ memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
+ sizeof(*ep->transfer_ring));
+ setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
+
+ ep->enq_ptr = 0;
+ ep->deq_ptr = 0;
+ ep->pcs = true;
+ ep->ring_full = false;
+ xudc->nr_enabled_eps++;
+
+ tegra_xudc_ep_context_setup(ep);
+
+ /*
+ * No need to reload and un-halt EP0. This will be done automatically
+ * once a valid SETUP packet is received.
+ */
+ if (usb_endpoint_xfer_control(desc))
+ goto out;
+
+ /*
+ * Transition to configured state once the first non-control
+ * endpoint is enabled.
+ */
+ if (xudc->device_state == USB_STATE_ADDRESS) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+
+ xudc->device_state = USB_STATE_CONFIGURED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ /*
+ * Pause all bulk endpoints when enabling an isoch endpoint
+ * to ensure the isoch endpoint is allocated enough bandwidth.
+ */
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_pause(xudc, i);
+ }
+ }
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_unpause(xudc, i);
+ }
+ }
+
+out:
+ dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
+ usb_ep_type_string(usb_endpoint_type(ep->desc)),
+ usb_endpoint_dir_in(ep->desc) ? "in" : "out");
+
+ return 0;
+}
+
+static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_enable(ep, desc);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *
+tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+
+ req = kzalloc(sizeof(*req), gfp);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->list);
+
+ return &req->usb_req;
+}
+
+static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
+ struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req = to_xudc_req(usb_req);
+
+ kfree(req);
+}
+
+static struct usb_ep_ops tegra_xudc_ep_ops = {
+ .enable = tegra_xudc_ep_enable,
+ .disable = tegra_xudc_ep_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EBUSY;
+}
+
+static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
+{
+ return -EBUSY;
+}
+
+static struct usb_ep_ops tegra_xudc_ep0_ops = {
+ .enable = tegra_xudc_ep0_enable,
+ .disable = tegra_xudc_ep0_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
+ MFINDEX_FRAME_SHIFT;
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ ep_unpause_all(xudc);
+
+ /* Direct link to U0. */
+ val = xudc_readl(xudc, PORTSC);
+ if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ if (xudc->device_state == USB_STATE_SUSPENDED) {
+ xudc->device_state = xudc->resume_state;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ xudc->resume_state = 0;
+ }
+
+ /*
+ * Doorbells may be dropped if they are sent too soon (< ~200ns)
+ * after unpausing the endpoint. Wait for 500ns just to be safe.
+ */
+ ndelay(500);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
+}
+
+static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+ val = xudc_readl(xudc, PORTPM);
+ dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
+ val, gadget->speed);
+
+ if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
+ (val & PORTPM_RWE)) ||
+ ((xudc->gadget.speed == USB_SPEED_SUPER) &&
+ (val & PORTPM_FRWE))) {
+ tegra_xudc_resume_device_state(xudc);
+
+ /* Send Device Notification packet. */
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
+ | DEVNOTIF_LO_TRIG;
+ xudc_writel(xudc, 0, DEVNOTIF_HI);
+ xudc_writel(xudc, val, DEVNOTIF_LO);
+ }
+ }
+
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (is_on != xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ if (is_on)
+ val |= CTRL_ENABLE;
+ else
+ val &= ~CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->pullup = is_on;
+ dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (!driver)
+ return -EINVAL;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
+ if (ret < 0)
+ goto unlock;
+
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_IE | CTRL_LSE;
+ xudc_writel(xudc, val, CTRL);
+
+ val = xudc_readl(xudc, PORTHALT);
+ val |= PORTHALT_STCHG_INTR_EN;
+ xudc_writel(xudc, val, PORTHALT);
+
+ if (xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->driver = driver;
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_IE | CTRL_ENABLE);
+ xudc_writel(xudc, val, CTRL);
+
+ __tegra_xudc_ep_disable(&xudc->ep[0]);
+
+ xudc->driver = NULL;
+ dev_dbg(xudc->dev, "Gadget stopped");
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
+ xudc->selfpowered = !!is_on;
+
+ return 0;
+}
+
+static struct usb_gadget_ops tegra_xudc_gadget_ops = {
+ .get_frame = tegra_xudc_gadget_get_frame,
+ .wakeup = tegra_xudc_gadget_wakeup,
+ .pullup = tegra_xudc_gadget_pullup,
+ .udc_start = tegra_xudc_gadget_start,
+ .udc_stop = tegra_xudc_gadget_stop,
+ .set_selfpowered = tegra_xudc_set_selfpowered,
+};
+
+static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int
+tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = NULL;
+ xudc->ep0_req->usb_req.dma = 0;
+ xudc->ep0_req->usb_req.length = 0;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static int
+tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = buf;
+ xudc->ep0_req->usb_req.length = len;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
+{
+ switch (xudc->setup_state) {
+ case DATA_STAGE_XFER:
+ xudc->setup_state = STATUS_STAGE_RECV;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ case DATA_STAGE_RECV:
+ xudc->setup_state = STATUS_STAGE_XFER;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ default:
+ xudc->setup_state = WAIT_FOR_SETUP;
+ break;
+ }
+}
+
+static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&xudc->lock);
+ ret = xudc->driver->setup(&xudc->gadget, ctrl);
+ spin_lock(&xudc->lock);
+
+ return ret;
+}
+
+static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if (xudc->test_mode_pattern) {
+ xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
+ xudc->test_mode_pattern = 0;
+ }
+}
+
+static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ u32 feature = le16_to_cpu(ctrl->wValue);
+ u32 index = le16_to_cpu(ctrl->wIndex);
+ u32 val, ep;
+ int ret;
+
+ if (le16_to_cpu(ctrl->wLength) != 0)
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (feature) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
+ (xudc->device_state == USB_STATE_DEFAULT))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if (set)
+ val |= PORTPM_RWE;
+ else
+ val &= ~PORTPM_RWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ if ((xudc->device_state != USB_STATE_CONFIGURED) ||
+ (xudc->gadget.speed != USB_SPEED_SUPER))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if ((feature == USB_DEVICE_U1_ENABLE) &&
+ xudc->soc->u1_enable) {
+ if (set)
+ val |= PORTPM_U1E;
+ else
+ val &= ~PORTPM_U1E;
+ }
+
+ if ((feature == USB_DEVICE_U2_ENABLE) &&
+ xudc->soc->u2_enable) {
+ if (set)
+ val |= PORTPM_U2E;
+ else
+ val &= ~PORTPM_U2E;
+ }
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (xudc->gadget.speed != USB_SPEED_HIGH)
+ return -EINVAL;
+
+ if (!set)
+ return -EINVAL;
+
+ xudc->test_mode_pattern = index >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->device_state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ switch (feature) {
+ case USB_INTRF_FUNC_SUSPEND:
+ if (set) {
+ val = xudc_readl(xudc, PORTPM);
+
+ if (index & USB_INTRF_FUNC_SUSPEND_RW)
+ val |= PORTPM_FRWE;
+ else
+ val &= ~PORTPM_FRWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ return tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) ||
+ ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (index != 0)))
+ return -EINVAL;
+
+ ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
+}
+
+static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep_context *ep_ctx;
+ u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
+ u16 status = 0;
+
+ if (!(ctrl->bRequestType & USB_DIR_IN))
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 2))
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ val = xudc_readl(xudc, PORTPM);
+
+ if (xudc->selfpowered)
+ status |= BIT(USB_DEVICE_SELF_POWERED);
+
+ if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (val & PORTPM_RWE))
+ status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (val & PORTPM_U1E)
+ status |= BIT(USB_DEV_STAT_U1_ENABLED);
+ if (val & PORTPM_U2E)
+ status |= BIT(USB_DEV_STAT_U2_ENABLED);
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ status |= USB_INTRF_STAT_FUNC_RW_CAP;
+ val = xudc_readl(xudc, PORTPM);
+ if (val & PORTPM_FRWE)
+ status |= USB_INTRF_STAT_FUNC_RW;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+ ep_ctx = &xudc->ep_context[ep];
+
+ if ((xudc->device_state != USB_STATE_CONFIGURED) &&
+ ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
+ return -EINVAL;
+
+ if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
+ return -EINVAL;
+
+ if (xudc_readl(xudc, EP_HALT) & BIT(ep))
+ status |= BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xudc->status_buf = cpu_to_le16(status);
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
+ sizeof(xudc->status_buf),
+ no_op_complete);
+}
+
+static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with SEL values */
+}
+
+static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 6))
+ return -EINVAL;
+
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
+ sizeof(xudc->sel_timing),
+ set_sel_complete);
+}
+
+static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with isoch delay */
+}
+
+static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ u32 delay = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ xudc->isoch_delay = delay;
+
+ return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
+}
+
+static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) &&
+ (xudc->dev_addr != 0)) {
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (xudc->dev_addr == 0)) {
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+}
+
+static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u32 val, addr = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ dev_dbg(xudc->dev, "set address: %u\n", addr);
+
+ xudc->dev_addr = addr;
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_DEVADDR_MASK);
+ val |= CTRL_DEVADDR(addr);
+ xudc_writel(xudc, val, CTRL);
+
+ ep_ctx_write_devaddr(ep0->context, addr);
+
+ return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
+}
+
+static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
+ ret = tegra_xudc_ep0_get_status(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = tegra_xudc_ep0_set_address(xudc, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
+ ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
+ ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
+ ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ /*
+ * In theory we need to clear RUN bit before status stage of
+ * deconfig request sent, but this seems to be causing problems.
+ * Clear RUN once all endpoints are disabled instead.
+ */
+ fallthrough;
+ default:
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl,
+ u16 seq_num)
+{
+ int ret;
+
+ xudc->setup_seq_num = seq_num;
+
+ /* Ensure EP0 is unhalted. */
+ ep_unhalt(xudc, 0);
+
+ /*
+ * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
+ * are invalid. Halt EP0 until we get a valid packet.
+ */
+ if (xudc->soc->invalid_seq_num &&
+ (seq_num == 0xfffe || seq_num == 0xffff)) {
+ dev_warn(xudc->dev, "invalid sequence number detected\n");
+ ep_halt(xudc, 0);
+ return;
+ }
+
+ if (ctrl->wLength)
+ xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
+ DATA_STAGE_XFER : DATA_STAGE_RECV;
+ else
+ xudc->setup_state = STATUS_STAGE_XFER;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
+ else
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+
+ if (ret < 0) {
+ dev_warn(xudc->dev, "setup request failed: %d\n", ret);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ ep_halt(xudc, 0);
+ }
+}
+
+static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
+ u16 seq_num = trb_read_seq_num(event);
+
+ if (xudc->setup_state != WAIT_FOR_SETUP) {
+ /*
+ * The controller is in the process of handling another
+ * setup request. Queue subsequent requests and handle
+ * the last one once the controller reports a sequence
+ * number error.
+ */
+ memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
+ xudc->setup_packet.seq_num = seq_num;
+ xudc->queued_setup_packet = true;
+ } else {
+ tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
+ }
+}
+
+static struct tegra_xudc_request *
+trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_request *req;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (!req->trbs_queued)
+ break;
+
+ if (trb_in_request(ep, req, trb))
+ return req;
+ }
+
+ return NULL;
+}
+
+static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
+ struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *event)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_trb *trb;
+ bool short_packet;
+
+ short_packet = (trb_read_cmpl_code(event) ==
+ TRB_CMPL_CODE_SHORT_PACKET);
+
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ req = trb_to_request(ep, trb);
+
+ /*
+ * TDs are complete on short packet or when the completed TRB is the
+ * last TRB in the TD (the CHAIN bit is unset).
+ */
+ if (req && (short_packet || (!trb_read_chain(trb) &&
+ (req->trbs_needed == req->trbs_queued)))) {
+ struct tegra_xudc_trb *last = req->last_trb;
+ unsigned int residual;
+
+ residual = trb_read_transfer_len(event);
+ req->usb_req.actual = req->usb_req.length - residual;
+
+ dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
+ req->usb_req.actual, req->usb_req.length);
+
+ tegra_xudc_req_done(ep, req, 0);
+
+ if (ep->desc && usb_endpoint_xfer_control(ep->desc))
+ tegra_xudc_ep0_req_done(xudc);
+
+ /*
+ * Advance the dequeue pointer past the end of the current TD
+ * on short packet completion.
+ */
+ if (short_packet) {
+ ep->deq_ptr = (last - ep->transfer_ring) + 1;
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ }
+ } else if (!req) {
+ dev_warn(xudc->dev, "transfer event on dequeued request\n");
+ }
+
+ if (ep->desc)
+ tegra_xudc_ep_kick_queue(ep);
+}
+
+static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ unsigned int ep_index = trb_read_endpoint_id(event);
+ struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
+ struct tegra_xudc_trb *trb;
+ u16 comp_code;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
+ ep_index);
+ return;
+ }
+
+ /* Update transfer ring dequeue pointer. */
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ comp_code = trb_read_cmpl_code(event);
+ if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
+ ep->deq_ptr = (trb - ep->transfer_ring) + 1;
+
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ ep->ring_full = false;
+ }
+
+ switch (comp_code) {
+ case TRB_CMPL_CODE_SUCCESS:
+ case TRB_CMPL_CODE_SHORT_PACKET:
+ tegra_xudc_handle_transfer_completion(xudc, ep, event);
+ break;
+ case TRB_CMPL_CODE_HOST_REJECTED:
+ dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
+
+ ep->stream_rejected = true;
+ break;
+ case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
+ dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
+
+ if (ep->stream_rejected) {
+ ep->stream_rejected = false;
+ /*
+ * An EP is stopped when a stream is rejected. Wait
+ * for the EP to report that it is stopped and then
+ * un-stop it.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ }
+ tegra_xudc_ep_ring_doorbell(ep);
+ break;
+ case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
+ /*
+ * Wait for the EP to be stopped so the controller stops
+ * processing doorbells.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ ep->enq_ptr = ep->deq_ptr;
+ tegra_xudc_ep_nuke(ep, -EIO);
+ /* FALLTHROUGH */
+ case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
+ case TRB_CMPL_CODE_CTRL_DIR_ERR:
+ case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
+ case TRB_CMPL_CODE_RING_UNDERRUN:
+ case TRB_CMPL_CODE_RING_OVERRUN:
+ case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
+ case TRB_CMPL_CODE_USB_TRANS_ERR:
+ case TRB_CMPL_CODE_TRB_ERR:
+ dev_err(xudc->dev, "completion error %#x on EP %u\n",
+ comp_code, ep_index);
+
+ ep_halt(xudc, ep_index);
+ break;
+ case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
+ dev_info(xudc->dev, "sequence number error\n");
+
+ /*
+ * Kill any queued control request and skip to the last
+ * setup packet we received.
+ */
+ tegra_xudc_ep_nuke(ep, -EINVAL);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ if (!xudc->queued_setup_packet)
+ break;
+
+ tegra_xudc_handle_ep0_setup_packet(xudc,
+ &xudc->setup_packet.ctrl_req,
+ xudc->setup_packet.seq_num);
+ xudc->queued_setup_packet = false;
+ break;
+ case TRB_CMPL_CODE_STOPPED:
+ dev_dbg(xudc->dev, "stop completion code on EP %u\n",
+ ep_index);
+
+ /* Disconnected. */
+ tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
+ break;
+ default:
+ dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
+ comp_code, ep_index);
+ break;
+ }
+}
+
+static void tegra_xudc_reset(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ dma_addr_t deq_ptr;
+ unsigned int i;
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ep_unpause_all(xudc);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
+
+ /*
+ * Reset sequence number and dequeue pointer to flush the transfer
+ * ring.
+ */
+ ep0->deq_ptr = ep0->enq_ptr;
+ ep0->ring_full = false;
+
+ xudc->setup_seq_num = 0;
+ xudc->queued_setup_packet = false;
+
+ ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
+
+ deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
+
+ if (!dma_mapping_error(xudc->dev, deq_ptr)) {
+ ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
+ ep_ctx_write_dcs(ep0->context, ep0->pcs);
+ }
+
+ ep_unhalt_all(xudc);
+ ep_reload(xudc, 0);
+ ep_unpause(xudc, 0);
+}
+
+static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u16 maxpacket;
+ u32 val;
+
+ val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
+ switch (val) {
+ case PORTSC_PS_LS:
+ xudc->gadget.speed = USB_SPEED_LOW;
+ break;
+ case PORTSC_PS_FS:
+ xudc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case PORTSC_PS_HS:
+ xudc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case PORTSC_PS_SS:
+ xudc->gadget.speed = USB_SPEED_SUPER;
+ break;
+ default:
+ xudc->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ maxpacket = 512;
+ else
+ maxpacket = 64;
+
+ ep_ctx_write_max_packet_size(ep0->context, maxpacket);
+ tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
+ usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
+
+ if (!xudc->soc->u1_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U1TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (!xudc->soc->u2_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U2TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (xudc->gadget.speed <= USB_SPEED_HIGH) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_L1S_MASK);
+ if (xudc->soc->lpm_enable)
+ val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
+ else
+ val |= PORTPM_L1S(PORTPM_L1S_NYET);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ val = xudc_readl(xudc, ST);
+ if (val & ST_RC)
+ xudc_writel(xudc, ST_RC, ST);
+}
+
+static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver && xudc->driver->disconnect) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->disconnect(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+
+ xudc->device_state = USB_STATE_NOTATTACHED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ complete(&xudc->disconnect_complete);
+}
+
+static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver) {
+ spin_unlock(&xudc->lock);
+ usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
+ spin_lock(&xudc->lock);
+ }
+
+ tegra_xudc_port_connect(xudc);
+}
+
+static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port suspend\n");
+
+ xudc->resume_state = xudc->device_state;
+ xudc->device_state = USB_STATE_SUSPENDED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ if (xudc->driver->suspend) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->suspend(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port resume\n");
+
+ tegra_xudc_resume_device_state(xudc);
+
+ if (xudc->driver->resume) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->resume(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~PORTSC_CHANGE_MASK;
+ val |= flag;
+ xudc_writel(xudc, val, PORTSC);
+}
+
+static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ u32 portsc, porthalt;
+
+ porthalt = xudc_readl(xudc, PORTHALT);
+ if ((porthalt & PORTHALT_STCHG_REQ) &&
+ (porthalt & PORTHALT_HALT_LTSSM)) {
+ dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
+ porthalt &= ~PORTHALT_HALT_LTSSM;
+ xudc_writel(xudc, porthalt, PORTHALT);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+#define TOGGLE_VBUS_WAIT_MS 100
+ if (xudc->soc->port_reset_quirk) {
+ schedule_delayed_work(&xudc->port_reset_war_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_for_sec_prc = 1;
+ }
+ }
+
+ if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+ tegra_xudc_port_reset(xudc);
+ cancel_delayed_work(&xudc->port_reset_war_work);
+ xudc->wait_for_sec_prc = 0;
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_WRC) {
+ dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
+ if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
+ tegra_xudc_port_reset(xudc);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_CSC) {
+ dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CSC);
+
+ if (portsc & PORTSC_CCS)
+ tegra_xudc_port_connect(xudc);
+ else
+ tegra_xudc_port_disconnect(xudc);
+
+ if (xudc->wait_csc) {
+ cancel_delayed_work(&xudc->plc_reset_work);
+ xudc->wait_csc = false;
+ }
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_PLC) {
+ u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
+
+ dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PLC);
+ switch (pls) {
+ case PORTSC_PLS_U3:
+ tegra_xudc_port_suspend(xudc);
+ break;
+ case PORTSC_PLS_U0:
+ if (xudc->gadget.speed < USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_RESUME:
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_INACTIVE:
+ schedule_delayed_work(&xudc->plc_reset_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_csc = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (portsc & PORTSC_CEC) {
+ dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CEC);
+ }
+
+ dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
+}
+
+static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
+ (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
+ __tegra_xudc_handle_port_status(xudc);
+}
+
+static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ u32 type = trb_read_type(event);
+
+ dump_trb(xudc, "EVENT", event);
+
+ switch (type) {
+ case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
+ tegra_xudc_handle_port_status(xudc);
+ break;
+ case TRB_TYPE_TRANSFER_EVENT:
+ tegra_xudc_handle_transfer_event(xudc, event);
+ break;
+ case TRB_TYPE_SETUP_PACKET_EVENT:
+ tegra_xudc_handle_ep0_event(xudc, event);
+ break;
+ default:
+ dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
+ break;
+ }
+}
+
+static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_trb *event;
+ dma_addr_t erdp;
+
+ while (true) {
+ event = xudc->event_ring[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr;
+
+ if (trb_read_cycle(event) != xudc->ccs)
+ break;
+
+ tegra_xudc_handle_event(xudc, event);
+
+ xudc->event_ring_deq_ptr++;
+ if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
+ xudc->event_ring_deq_ptr = 0;
+ xudc->event_ring_index++;
+ }
+
+ if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
+ xudc->event_ring_index = 0;
+ xudc->ccs = !xudc->ccs;
+ }
+ }
+
+ erdp = xudc->event_ring_phys[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr * sizeof(*event);
+
+ xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
+ xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
+}
+
+static irqreturn_t tegra_xudc_irq(int irq, void *data)
+{
+ struct tegra_xudc *xudc = data;
+ unsigned long flags;
+ u32 val;
+
+ val = xudc_readl(xudc, ST);
+ if (!(val & ST_IP))
+ return IRQ_NONE;
+ xudc_writel(xudc, ST_IP, ST);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ tegra_xudc_process_event_ring(xudc);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ ep->xudc = xudc;
+ ep->index = index;
+ ep->context = &xudc->ep_context[index];
+ INIT_LIST_HEAD(&ep->queue);
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return 0;
+
+ ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
+ GFP_KERNEL,
+ &ep->transfer_ring_phys);
+ if (!ep->transfer_ring)
+ return -ENOMEM;
+
+ if (index) {
+ snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
+ (index % 2 == 0) ? "out" : "in");
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.max_streams = 16;
+ ep->usb_ep.ops = &tegra_xudc_ep_ops;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
+ if (index & 1)
+ ep->usb_ep.caps.dir_in = true;
+ else
+ ep->usb_ep.caps.dir_out = true;
+ list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
+ } else {
+ strscpy(ep->name, "ep0", 3);
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
+ ep->usb_ep.ops = &tegra_xudc_ep0_ops;
+ ep->usb_ep.caps.type_control = true;
+ ep->usb_ep.caps.dir_in = true;
+ ep->usb_ep.caps.dir_out = true;
+ }
+
+ return 0;
+}
+
+static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return;
+
+ dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
+ ep->transfer_ring_phys);
+}
+
+static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
+{
+ struct usb_request *req;
+ unsigned int i;
+ int err;
+
+ xudc->ep_context =
+ dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
+ sizeof(*xudc->ep_context),
+ &xudc->ep_context_phys, GFP_KERNEL);
+ if (!xudc->ep_context)
+ return -ENOMEM;
+
+ xudc->transfer_ring_pool =
+ dmam_pool_create(dev_name(xudc->dev), xudc->dev,
+ XUDC_TRANSFER_RING_SIZE *
+ sizeof(struct tegra_xudc_trb),
+ sizeof(struct tegra_xudc_trb), 0);
+ if (!xudc->transfer_ring_pool) {
+ err = -ENOMEM;
+ goto free_ep_context;
+ }
+
+ INIT_LIST_HEAD(&xudc->gadget.ep_list);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ err = tegra_xudc_alloc_ep(xudc, i);
+ if (err < 0)
+ goto free_eps;
+ }
+
+ req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ goto free_eps;
+ }
+ xudc->ep0_req = to_xudc_req(req);
+
+ return 0;
+
+free_eps:
+ for (; i > 0; i--)
+ tegra_xudc_free_ep(xudc, i - 1);
+free_ep_context:
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+ return err;
+}
+
+static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
+{
+ xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
+ xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
+}
+
+static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
+ &xudc->ep0_req->usb_req);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_free_ep(xudc, i);
+
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+}
+
+static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ xudc->event_ring[i] =
+ dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ &xudc->event_ring_phys[i],
+ GFP_KERNEL);
+ if (!xudc->event_ring[i])
+ goto free_dma;
+ }
+
+ return 0;
+
+free_dma:
+ for (; i > 0; i--) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i - 1]),
+ xudc->event_ring[i - 1],
+ xudc->event_ring_phys[i - 1]);
+ }
+ return -ENOMEM;
+}
+
+static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ val = xudc_readl(xudc, SPARAM);
+ val &= ~(SPARAM_ERSTMAX_MASK);
+ val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
+ xudc_writel(xudc, val, SPARAM);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]));
+
+ val = xudc_readl(xudc, ERSTSZ);
+ val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
+ val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
+ xudc_writel(xudc, val, ERSTSZ);
+
+ xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBALO(i));
+ xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBAHI(i));
+ }
+
+ val = lower_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPLO);
+ val |= EREPLO_ECS;
+ xudc_writel(xudc, val, EREPLO);
+
+ val = upper_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPHI);
+ xudc_writel(xudc, val, EREPHI);
+
+ xudc->ccs = true;
+ xudc->event_ring_index = 0;
+ xudc->event_ring_deq_ptr = 0;
+}
+
+static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ xudc->event_ring[i],
+ xudc->event_ring_phys[i]);
+ }
+}
+
+static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ if (xudc->soc->has_ipfs) {
+ val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
+ val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
+ ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
+ usleep_range(10, 15);
+ }
+
+ /* Enable bus master */
+ val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
+ XUSB_DEV_CFG_1_BUS_MASTER_EN;
+ fpci_writel(xudc, val, XUSB_DEV_CFG_1);
+
+ /* Program BAR0 space */
+ val = fpci_readl(xudc, XUSB_DEV_CFG_4);
+ val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+ val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+
+ fpci_writel(xudc, val, XUSB_DEV_CFG_4);
+ fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
+
+ usleep_range(100, 200);
+
+ if (xudc->soc->has_ipfs) {
+ /* Enable interrupt assertion */
+ val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
+ val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
+ ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
+ }
+}
+
+static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+{
+ u32 val, imod;
+
+ if (xudc->soc->has_ipfs) {
+ val = xudc_readl(xudc, BLCG);
+ val |= BLCG_ALL;
+ val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
+ BLCG_COREPLL_PWRDN);
+ val |= BLCG_IOPLL_0_PWRDN;
+ val |= BLCG_IOPLL_1_PWRDN;
+ val |= BLCG_IOPLL_2_PWRDN;
+
+ xudc_writel(xudc, val, BLCG);
+ }
+
+ /* Set a reasonable U3 exit timer value. */
+ val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
+ val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
+ val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
+ xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
+
+ /* Default ping LFPS tBurst is too large. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT0);
+ val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
+ val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
+ xudc_writel(xudc, val, SSPX_CORE_CNT0);
+
+ /* Default tPortConfiguration timeout is too small. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT30);
+ val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
+ val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
+ xudc_writel(xudc, val, SSPX_CORE_CNT30);
+
+ if (xudc->soc->lpm_enable) {
+ /* Set L1 resume duration to 95 us. */
+ val = xudc_readl(xudc, HSFSPI_COUNT13);
+ val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
+ val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
+ xudc_writel(xudc, val, HSFSPI_COUNT13);
+ }
+
+ /*
+ * Compliacne suite appears to be violating polling LFPS tBurst max
+ * of 1.4us. Send 1.45us instead.
+ */
+ val = xudc_readl(xudc, SSPX_CORE_CNT32);
+ val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
+ val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT32);
+
+ /* Direct HS/FS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Direct SS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Restore port instance. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /*
+ * Enable INFINITE_SS_RETRY to prevent device from entering
+ * Disabled.Error when attached to buggy SuperSpeed hubs.
+ */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val |= CFG_DEV_FE_INFINITE_SS_RETRY;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /* Set interrupt moderation. */
+ imod = XUDC_INTERRUPT_MODERATION_US * 4;
+ val = xudc_readl(xudc, RT_IMOD);
+ val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
+ val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
+ xudc_writel(xudc, val, RT_IMOD);
+
+ /* increase SSPI transaction timeout from 32us to 512us */
+ val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
+ val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
+ val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
+ xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
+}
+
+static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
+{
+ int err;
+
+ err = phy_init(xudc->utmi_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(xudc->usb3_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
+ goto exit_utmi_phy;
+ }
+
+ return 0;
+
+exit_utmi_phy:
+ phy_exit(xudc->utmi_phy);
+ return err;
+}
+
+static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
+{
+ phy_exit(xudc->usb3_phy);
+ phy_exit(xudc->utmi_phy);
+}
+
+static const char * const tegra210_xudc_supply_names[] = {
+ "hvdd-usb",
+ "avddio-usb",
+};
+
+static const char * const tegra210_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "hs_src",
+ "fs_src",
+};
+
+static const char * const tegra186_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "fs_src",
+};
+
+static struct tegra_xudc_soc tegra210_xudc_soc_data = {
+ .supply_names = tegra210_xudc_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
+ .clock_names = tegra210_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
+ .u1_enable = false,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = true,
+ .pls_quirk = true,
+ .port_reset_quirk = true,
+ .has_ipfs = true,
+};
+
+static struct tegra_xudc_soc tegra186_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .has_ipfs = false,
+};
+
+static const struct of_device_id tegra_xudc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra210-xudc",
+ .data = &tegra210_xudc_soc_data
+ },
+ {
+ .compatible = "nvidia,tegra186-xudc",
+ .data = &tegra186_xudc_soc_data
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
+
+static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
+{
+ if (xudc->genpd_dl_ss)
+ device_link_del(xudc->genpd_dl_ss);
+ if (xudc->genpd_dl_device)
+ device_link_del(xudc->genpd_dl_device);
+ if (xudc->genpd_dev_ss)
+ dev_pm_domain_detach(xudc->genpd_dev_ss, true);
+ if (xudc->genpd_dev_device)
+ dev_pm_domain_detach(xudc->genpd_dev_device, true);
+}
+
+static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
+{
+ struct device *dev = xudc->dev;
+ int err;
+
+ xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev,
+ "dev");
+ if (IS_ERR(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device);
+ dev_err(dev, "failed to get dev pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
+ if (IS_ERR(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss);
+ dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_device) {
+ dev_err(dev, "adding usb device device link failed!\n");
+ return -ENODEV;
+ }
+
+ xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_ss) {
+ dev_err(dev, "adding superspeed device link failed!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_probe(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc;
+ struct resource *res;
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+ unsigned int i;
+ int err;
+
+ xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC);
+ if (!xudc)
+ return -ENOMEM;
+
+ xudc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xudc);
+
+ xudc->soc = of_device_get_match_data(&pdev->dev);
+ if (!xudc->soc)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ xudc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->base))
+ return PTR_ERR(xudc->base);
+ xudc->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fpci");
+ xudc->fpci = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->fpci))
+ return PTR_ERR(xudc->fpci);
+
+ if (xudc->soc->has_ipfs) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipfs");
+ xudc->ipfs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->ipfs))
+ return PTR_ERR(xudc->ipfs);
+ }
+
+ xudc->irq = platform_get_irq(pdev, 0);
+ if (xudc->irq < 0) {
+ dev_err(xudc->dev, "failed to get IRQ: %d\n",
+ xudc->irq);
+ return xudc->irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
+ dev_name(&pdev->dev), xudc);
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
+ err);
+ return err;
+ }
+
+ xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks,
+ sizeof(*xudc->clks), GFP_KERNEL);
+ if (!xudc->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_clks; i++)
+ xudc->clks[i].id = xudc->soc->clock_names[i];
+
+ err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks,
+ xudc->clks);
+ if (err) {
+ dev_err(xudc->dev, "failed to request clks %d\n", err);
+ return err;
+ }
+
+ xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
+ sizeof(*xudc->supplies), GFP_KERNEL);
+ if (!xudc->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_supplies; i++)
+ xudc->supplies[i].supply = xudc->soc->supply_names[i];
+
+ err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to request regulators %d\n", err);
+ return err;
+ }
+
+ xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
+ if (IS_ERR(xudc->padctl))
+ return PTR_ERR(xudc->padctl);
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to enable regulators %d\n", err);
+ goto put_padctl;
+ }
+
+ xudc->usb3_phy = devm_phy_optional_get(&pdev->dev, "usb3");
+ if (IS_ERR(xudc->usb3_phy)) {
+ err = PTR_ERR(xudc->usb3_phy);
+ dev_err(xudc->dev, "failed to get usb3 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ xudc->utmi_phy = devm_phy_optional_get(&pdev->dev, "usb2");
+ if (IS_ERR(xudc->utmi_phy)) {
+ err = PTR_ERR(xudc->utmi_phy);
+ dev_err(xudc->dev, "failed to get usb2 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ err = tegra_xudc_powerdomain_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_phy_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_alloc_event_ring(xudc);
+ if (err)
+ goto disable_phy;
+
+ err = tegra_xudc_alloc_eps(xudc);
+ if (err)
+ goto free_event_ring;
+
+ spin_lock_init(&xudc->lock);
+
+ init_completion(&xudc->disconnect_complete);
+
+ INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
+
+ INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
+
+ INIT_DELAYED_WORK(&xudc->port_reset_war_work,
+ tegra_xudc_port_reset_war_work);
+
+ if (of_property_read_bool(xudc->dev->of_node, "usb-role-switch")) {
+ role_sx_desc.set = tegra_xudc_usb_role_sw_set;
+ role_sx_desc.fwnode = dev_fwnode(xudc->dev);
+
+ xudc->usb_role_sw = usb_role_switch_register(xudc->dev,
+ &role_sx_desc);
+ if (IS_ERR(xudc->usb_role_sw)) {
+ err = PTR_ERR(xudc->usb_role_sw);
+ dev_err(xudc->dev, "Failed to register USB role SW: %d",
+ err);
+ goto free_eps;
+ }
+ } else {
+ /* Set the mode as device mode and this keeps phy always ON */
+ dev_info(xudc->dev, "Set usb role to device mode always");
+ schedule_work(&xudc->usb_role_sw_work);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ xudc->gadget.ops = &tegra_xudc_gadget_ops;
+ xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
+ xudc->gadget.name = "tegra-xudc";
+ xudc->gadget.max_speed = USB_SPEED_SUPER;
+
+ err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
+ goto free_eps;
+ }
+
+ return 0;
+
+free_eps:
+ tegra_xudc_free_eps(xudc);
+free_event_ring:
+ tegra_xudc_free_event_ring(xudc);
+disable_phy:
+ tegra_xudc_phy_exit(xudc);
+put_powerdomains:
+ tegra_xudc_powerdomain_remove(xudc);
+disable_regulator:
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+put_padctl:
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return err;
+}
+
+static int tegra_xudc_remove(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(xudc->dev);
+
+ cancel_delayed_work(&xudc->plc_reset_work);
+
+ if (xudc->usb_role_sw) {
+ usb_role_switch_unregister(xudc->usb_role_sw);
+ cancel_work_sync(&xudc->usb_role_sw_work);
+ }
+
+ usb_del_gadget_udc(&xudc->gadget);
+
+ tegra_xudc_free_eps(xudc);
+ tegra_xudc_free_event_ring(xudc);
+
+ tegra_xudc_powerdomain_remove(xudc);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ phy_power_off(xudc->utmi_phy);
+ phy_power_off(xudc->usb3_phy);
+
+ tegra_xudc_phy_exit(xudc);
+
+ pm_runtime_disable(xudc->dev);
+ pm_runtime_put(xudc->dev);
+
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+
+ dev_dbg(xudc->dev, "entering ELPG\n");
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ xudc->powergated = true;
+ xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
+ xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
+ xudc_writel(xudc, 0, CTRL);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ dev_dbg(xudc->dev, "entering ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+ int err;
+
+ dev_dbg(xudc->dev, "exiting ELPG\n");
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err < 0)
+ return err;
+
+ err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
+ if (err < 0)
+ return err;
+
+ tegra_xudc_fpci_ipfs_init(xudc);
+
+ tegra_xudc_device_params_init(xudc);
+
+ tegra_xudc_init_event_ring(xudc);
+
+ tegra_xudc_init_eps(xudc);
+
+ xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
+ xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->powergated = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ dev_dbg(xudc->dev, "exiting ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = true;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ flush_work(&xudc->usb_role_sw_work);
+
+ /* Forcibly disconnect before powergating. */
+ tegra_xudc_device_mode_off(xudc);
+
+ if (!pm_runtime_status_suspended(dev))
+ tegra_xudc_powergate(xudc);
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+
+ err = tegra_xudc_unpowergate(xudc);
+ if (err < 0)
+ return err;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ schedule_work(&xudc->usb_role_sw_work);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_powergate(xudc);
+}
+
+static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_unpowergate(xudc);
+}
+
+static const struct dev_pm_ops tegra_xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
+ SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
+ tegra_xudc_runtime_resume, NULL)
+};
+
+static struct platform_driver tegra_xudc_driver = {
+ .probe = tegra_xudc_probe,
+ .remove = tegra_xudc_remove,
+ .driver = {
+ .name = "tegra-xudc",
+ .pm = &tegra_xudc_pm_ops,
+ .of_match_table = tegra_xudc_of_match,
+ },
+};
+module_platform_driver(tegra_xudc_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
+MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 79b2e79dddd0..8d730180db06 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -38,9 +38,9 @@ config USB_XHCI_DBGCAP
before enabling this option. If unsure, say 'N'.
config USB_XHCI_PCI
- tristate
- depends on USB_PCI
- default y
+ tristate
+ depends on USB_PCI
+ default y
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
@@ -220,12 +220,12 @@ config USB_EHCI_HCD_ORION
Marvell PXA/MMP USB controller" for those.
config USB_EHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
config USB_EHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip EHCI USB controller"
@@ -237,21 +237,21 @@ config USB_EHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_EHCI_HCD_AT91
- tristate "Support for Atmel on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_AT91
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- Atmel chips.
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ Atmel chips.
config USB_EHCI_TEGRA
- tristate "NVIDIA Tegra HCD support"
- depends on ARCH_TEGRA
- select USB_EHCI_ROOT_HUB_TT
- select USB_TEGRA_PHY
- help
- This driver enables support for the internal USB Host Controllers
- found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+ tristate "NVIDIA Tegra HCD support"
+ depends on ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_TEGRA_PHY
+ help
+ This driver enables support for the internal USB Host Controllers
+ found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
@@ -269,10 +269,10 @@ config USB_EHCI_SH
If you use the PCI EHCI controller, this option is not necessary.
config USB_EHCI_EXYNOS
- tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
- help
- Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+ tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
@@ -409,12 +409,12 @@ config USB_OHCI_HCD_OMAP1
Enables support for the OHCI controller on OMAP1/2 chips.
config USB_OHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ ST SPEAr chips.
config USB_OHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip OHCI USB controller"
@@ -426,12 +426,12 @@ config USB_OHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_OHCI_HCD_S3C2410
- tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- S3C24xx/S3C64xx chips.
+ tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ S3C24xx/S3C64xx chips.
config USB_OHCI_HCD_LPC32XX
tristate "Support for LPC on-chip OHCI USB controller"
@@ -440,8 +440,8 @@ config USB_OHCI_HCD_LPC32XX
depends on USB_ISP1301
default y
---help---
- Enables support for the on-chip OHCI controller on
- NXP chips.
+ Enables support for the on-chip OHCI controller on
+ NXP chips.
config USB_OHCI_HCD_PXA27X
tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
@@ -456,8 +456,8 @@ config USB_OHCI_HCD_AT91
depends on USB_OHCI_HCD && ARCH_AT91 && OF
default y
---help---
- Enables support for the on-chip OHCI controller on
- Atmel chips.
+ Enables support for the on-chip OHCI controller on
+ Atmel chips.
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
@@ -545,7 +545,7 @@ config USB_OHCI_EXYNOS
tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS
help
- Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
@@ -609,8 +609,8 @@ config USB_UHCI_PLATFORM
default y if (ARCH_VT8500 || ARCH_ASPEED)
config USB_UHCI_ASPEED
- bool
- default y if ARCH_ASPEED
+ bool
+ default y if ARCH_ASPEED
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
@@ -713,14 +713,14 @@ config USB_RENESAS_USBHS_HCD
module will be called renesas-usbhs.
config USB_IMX21_HCD
- tristate "i.MX21 HCD support"
- depends on ARM && ARCH_MXC
- help
- This driver enables support for the on-chip USB host in the
- i.MX21 processor.
-
- To compile this driver as a module, choose M here: the
- module will be called "imx21-hcd".
+ tristate "i.MX21 HCD support"
+ depends on ARM && ARCH_MXC
+ help
+ This driver enables support for the on-chip USB host in the
+ i.MX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
config USB_HCD_BCMA
tristate "BCMA usb host driver"
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 2400a826397a..652fa29beb27 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -406,9 +406,12 @@ static int bcma_hcd_probe(struct bcma_device *core)
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node)
+ if (core->dev.of_node) {
usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
GPIOD_OUT_HIGH);
+ if (IS_ERR(usb_dev->gpio_desc))
+ return PTR_ERR(usb_dev->gpio_desc);
+ }
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 9e0c98d6bdb0..f967adf2d8df 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5646,8 +5646,10 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
return retval;
failed_dis_clk:
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
@@ -5665,8 +5667,10 @@ static int fotg210_hcd_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index 7fcf1d9dd7f3..02a1344fbd6a 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -419,7 +419,7 @@ static void create_debug_files(struct imx21 *imx21)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ root = debugfs_create_dir(dev_name(imx21->dev), usb_debug_root);
imx21->debug_root = root;
debugfs_create_file("status", S_IRUGO, root, imx21, &debug_status_fops);
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 96f8daa11f25..4a3a2852523f 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2627,7 +2627,7 @@ static int isp1362_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp1362_hcd *isp1362_hcd;
- struct resource *addr, *data, *irq_res;
+ struct resource *data, *irq_res;
void __iomem *addr_reg;
void __iomem *data_reg;
int irq;
@@ -2651,8 +2651,7 @@ static int isp1362_probe(struct platform_device *pdev)
irq = irq_res->start;
- addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- addr_reg = devm_ioremap_resource(&pdev->dev, addr);
+ addr_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(addr_reg))
return PTR_ERR(addr_reg);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index fc35a7993b7b..b635c6a1b1a9 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -115,7 +115,6 @@ static void at91_start_hc(struct platform_device *pdev)
static void at91_stop_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_regs __iomem *regs = hcd->regs;
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "stop\n");
@@ -123,7 +122,7 @@ static void at91_stop_hc(struct platform_device *pdev)
/*
* Put the USB host controller into reset.
*/
- writel(0, &regs->control);
+ usb_hcd_platform_shutdown(pdev);
/*
* Stop the USB clocks.
@@ -628,6 +627,7 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
+ msleep(1);
at91_stop_clock(ohci_at91);
}
@@ -642,8 +642,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
-
- at91_start_clock(ohci_at91);
+ else
+ at91_start_clock(ohci_at91);
ohci_resume(hcd, false);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index c561881d0e79..85878e8ad331 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -150,7 +150,7 @@ static void ohci_nxp_stop_hc(void)
static int ohci_hcd_nxp_probe(struct platform_device *pdev)
{
- struct usb_hcd *hcd = 0;
+ struct usb_hcd *hcd = NULL;
const struct hc_driver *driver = &ohci_nxp_hc_driver;
struct resource *res;
int ret = 0, irq;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e67242e437ed..fe09b8626329 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -676,12 +676,12 @@ static int oxu_hub_control(struct usb_hcd *hcd,
*/
/* Low level read/write registers functions */
-static inline u32 oxu_readl(void *base, u32 reg)
+static inline u32 oxu_readl(void __iomem *base, u32 reg)
{
return readl(base + reg);
}
-static inline void oxu_writel(void *base, u32 reg, u32 val)
+static inline void oxu_writel(void __iomem *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
@@ -4063,7 +4063,7 @@ static const struct hc_driver oxu_hc_driver = {
* Module stuff
*/
-static void oxu_configuration(struct platform_device *pdev, void *base)
+static void oxu_configuration(struct platform_device *pdev, void __iomem *base)
{
u32 tmp;
@@ -4093,7 +4093,7 @@ static void oxu_configuration(struct platform_device *pdev, void *base)
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
-static int oxu_verify_id(struct platform_device *pdev, void *base)
+static int oxu_verify_id(struct platform_device *pdev, void __iomem *base)
{
u32 id;
static const char * const bo[] = {
@@ -4121,7 +4121,7 @@ static int oxu_verify_id(struct platform_device *pdev, void *base)
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq, int otg)
+ void __iomem *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
@@ -4158,7 +4158,7 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq)
+ void __iomem *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
@@ -4207,7 +4207,7 @@ error_create_otg:
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
- void *base;
+ void __iomem *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 4efee34f154f..e9209e3e6248 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -71,7 +71,7 @@ INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
static bool distrust_firmware = true;
module_param(distrust_firmware, bool, 0);
-MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
+MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurrent"
"t setup");
static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
/*
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1e0236e90687..a0025d23b257 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
+#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -212,7 +213,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e7aab31fd9a5..6475c3d3b43b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -280,6 +280,9 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
return;
xhci_dbg(xhci, "// Ding dong!\n");
+
+ trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
+
writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
readl(&xhci->dba->doorbell[0]);
@@ -401,6 +404,9 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
return;
+
+ trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
+
writel(DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
@@ -651,10 +657,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock(&xhci->lock);
trace_xhci_urb_giveback(urb);
usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&xhci->lock);
}
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
@@ -2741,6 +2745,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
}
/*
+ * Update Event Ring Dequeue Pointer:
+ * - When all events have finished
+ * - To avoid "Event Ring Full Error" condition
+ */
+static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ union xhci_trb *event_ring_deq)
+{
+ u64 temp_64;
+ dma_addr_t deq;
+
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != xhci->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall
+ * always advance the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
+ ((u64) deq & (u64) ~ERST_PTR_MASK))
+ return;
+
+ /* Update HC event ring dequeue pointer */
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp_64 |= ERST_EHB;
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+}
+
+/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
@@ -2751,9 +2791,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
union xhci_trb *event_ring_deq;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
- dma_addr_t deq;
u64 temp_64;
u32 status;
+ int event_loop = 0;
spin_lock_irqsave(&xhci->lock, flags);
/* Check if the xHC generated the interrupt, or the irq is shared */
@@ -2807,24 +2847,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
- while (xhci_handle_event(xhci) > 0) {}
-
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != xhci->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event "
- "ring dequeue ptr.\n");
- /* Update HC event ring dequeue pointer */
- temp_64 &= ERST_PTR_MASK;
- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ while (xhci_handle_event(xhci) > 0) {
+ if (event_loop++ < TRBS_PER_SEGMENT / 2)
+ continue;
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
+ event_loop = 0;
}
- /* Clear the event handler busy flag (RW1C); event ring is empty. */
- temp_64 |= ERST_EHB;
- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
ret = IRQ_HANDLED;
out:
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 2ff7c911fbd0..bf9065438320 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -42,19 +42,18 @@
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
-#define XUSB_CFG_ARU_MBOX_CMD 0x0e4
+/* XUSB_CFG_ARU_MBOX_CMD */
#define MBOX_DEST_FALC BIT(27)
#define MBOX_DEST_PME BIT(28)
#define MBOX_DEST_SMI BIT(29)
#define MBOX_DEST_XHCI BIT(30)
#define MBOX_INT_EN BIT(31)
-#define XUSB_CFG_ARU_MBOX_DATA_IN 0x0e8
+/* XUSB_CFG_ARU_MBOX_DATA_IN and XUSB_CFG_ARU_MBOX_DATA_OUT */
#define CMD_DATA_SHIFT 0
#define CMD_DATA_MASK 0xffffff
#define CMD_TYPE_SHIFT 24
#define CMD_TYPE_MASK 0xff
-#define XUSB_CFG_ARU_MBOX_DATA_OUT 0x0ec
-#define XUSB_CFG_ARU_MBOX_OWNER 0x0f0
+/* XUSB_CFG_ARU_MBOX_OWNER */
#define MBOX_OWNER_NONE 0
#define MBOX_OWNER_FW 1
#define MBOX_OWNER_SW 2
@@ -146,6 +145,13 @@ struct tegra_xusb_phy_type {
unsigned int num;
};
+struct tega_xusb_mbox_regs {
+ u16 cmd;
+ u16 data_in;
+ u16 data_out;
+ u16 owner;
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
@@ -160,6 +166,8 @@ struct tegra_xusb_soc {
} usb2, ulpi, hsic, usb3;
} ports;
+ struct tega_xusb_mbox_regs mbox;
+
bool scale_ss_clock;
bool has_ipfs;
};
@@ -395,15 +403,15 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
* ACK/NAK messages.
*/
if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE) {
dev_err(tegra->dev, "mailbox is busy\n");
return -EBUSY;
}
- fpci_writel(tegra, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_SW) {
dev_err(tegra->dev, "failed to acquire mailbox\n");
return -EBUSY;
@@ -413,17 +421,17 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
value = tegra_xusb_mbox_pack(msg);
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_DATA_IN);
+ fpci_writel(tegra, value, tegra->soc->mbox.data_in);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value |= MBOX_INT_EN | MBOX_DEST_FALC;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
if (wait_for_idle) {
unsigned long timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value == MBOX_OWNER_NONE)
break;
@@ -431,7 +439,7 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
if (time_after(jiffies, timeout))
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE)
return -ETIMEDOUT;
@@ -598,16 +606,16 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
mutex_lock(&tegra->lock);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_DATA_OUT);
+ value = fpci_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value &= ~MBOX_DEST_SMI;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
/* clear mailbox owner if no ACK/NAK is required */
if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
- fpci_writel(tegra, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
tegra_xusb_mbox_handle(tegra, &msg);
@@ -755,7 +763,6 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- tegra_xusb_phy_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_clk_disable(tegra);
@@ -779,16 +786,8 @@ static int tegra_xusb_runtime_resume(struct device *dev)
goto disable_clk;
}
- err = tegra_xusb_phy_enable(tegra);
- if (err < 0) {
- dev_err(dev, "failed to enable PHYs: %d\n", err);
- goto disable_regulator;
- }
-
return 0;
-disable_regulator:
- regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
disable_clk:
tegra_xusb_clk_disable(tegra);
return err;
@@ -970,7 +969,7 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb_mbox_msg msg;
- struct resource *res, *regs;
+ struct resource *regs;
struct tegra_xusb *tegra;
struct xhci_hcd *xhci;
unsigned int i, j, k;
@@ -992,14 +991,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tegra->fpci_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->fpci_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->fpci_base))
return PTR_ERR(tegra->fpci_base);
if (tegra->soc->has_ipfs) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->ipfs_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(tegra->ipfs_base))
return PTR_ERR(tegra->ipfs_base);
}
@@ -1128,8 +1125,9 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
- for (i = 0; i < tegra->soc->num_supplies; i++)
- tegra->supplies[i].supply = tegra->soc->supply_names[i];
+ regulator_bulk_set_supply_names(tegra->supplies,
+ tegra->soc->supply_names,
+ tegra->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
tegra->supplies);
@@ -1181,6 +1179,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
*/
platform_set_drvdata(pdev, tegra);
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
+ goto put_hcd;
+ }
+
pm_runtime_enable(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
err = pm_runtime_get_sync(&pdev->dev);
@@ -1189,7 +1193,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to enable device: %d\n", err);
- goto disable_rpm;
+ goto disable_phy;
}
tegra_xusb_config(tegra, regs);
@@ -1275,9 +1279,11 @@ remove_usb2:
put_rpm:
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_xusb_runtime_suspend(&pdev->dev);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
+put_hcd:
usb_put_hcd(tegra->hcd);
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+ pm_runtime_disable(&pdev->dev);
put_powerdomains:
if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
@@ -1314,6 +1320,8 @@ static int tegra_xusb_remove(struct platform_device *pdev)
tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
}
+ tegra_xusb_phy_disable(tegra);
+
tegra_xusb_padctl_put(tegra->padctl);
return 0;
@@ -1375,6 +1383,12 @@ static const struct tegra_xusb_soc tegra124_soc = {
},
.scale_ss_clock = true,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
@@ -1407,6 +1421,12 @@ static const struct tegra_xusb_soc tegra210_soc = {
},
.scale_ss_clock = false,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
@@ -1432,12 +1452,48 @@ static const struct tegra_xusb_soc tegra186_soc = {
},
.scale_ss_clock = false,
.has_ipfs = false,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
+};
+
+static const char * const tegra194_supply_names[] = {
+};
+
+static const struct tegra_xusb_phy_type tegra194_phy_types[] = {
+ { .name = "usb3", .num = 4, },
+ { .name = "usb2", .num = 4, },
+};
+
+static const struct tegra_xusb_soc tegra194_soc = {
+ .firmware = "nvidia/tegra194/xusb.bin",
+ .supply_names = tegra194_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_supply_names),
+ .phy_types = tegra194_phy_types,
+ .num_types = ARRAY_SIZE(tegra194_phy_types),
+ .ports = {
+ .usb3 = { .offset = 0, .count = 4, },
+ .usb2 = { .offset = 4, .count = 4, },
+ },
+ .scale_ss_clock = false,
+ .has_ipfs = false,
+ .mbox = {
+ .cmd = 0x68,
+ .data_in = 0x6c,
+ .data_out = 0x70,
+ .owner = 0x74,
+ },
};
+MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
{ .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
+ { .compatible = "nvidia,tegra194-xusb", .data = &tegra194_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 052a269d86f2..56eb867803a6 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -560,6 +560,32 @@ DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
TP_ARGS(portnum, portsc)
);
+DECLARE_EVENT_CLASS(xhci_log_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell),
+ TP_STRUCT__entry(
+ __field(u32, slot)
+ __field(u32, doorbell)
+ ),
+ TP_fast_assign(
+ __entry->slot = slot;
+ __entry->doorbell = doorbell;
+ ),
+ TP_printk("Ring doorbell for %s",
+ xhci_decode_doorbell(__entry->slot, __entry->doorbell)
+ )
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_ep_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_host_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
DECLARE_EVENT_CLASS(xhci_dbc_log_request,
TP_PROTO(struct dbc_request *req),
TP_ARGS(req),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6c17e3fe181a..6721d059f58a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -5301,7 +5301,8 @@ static const struct hc_driver xhci_hc_driver = {
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
+ HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f9f88626a57a..dc6f62a4b197 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2580,6 +2580,35 @@ static inline const char *xhci_decode_portsc(u32 portsc)
return str;
}
+static inline const char *xhci_decode_doorbell(u32 slot, u32 doorbell)
+{
+ static char str[256];
+ u8 ep;
+ u16 stream;
+ int ret;
+
+ ep = (doorbell & 0xff);
+ stream = doorbell >> 16;
+
+ if (slot == 0) {
+ sprintf(str, "Command Ring %d", doorbell);
+ return str;
+ }
+ ret = sprintf(str, "Slot %d ", slot);
+ if (ep > 0 && ep < 32)
+ ret = sprintf(str + ret, "ep%d%s",
+ ep / 2,
+ ep % 2 ? "in" : "out");
+ else if (ep == 0 || ep < 248)
+ ret = sprintf(str + ret, "Reserved %d", ep);
+ else
+ ret = sprintf(str + ret, "Vendor Defined %d", ep);
+ if (stream)
+ ret = sprintf(str + ret, " Stream %d", stream);
+
+ return str;
+}
+
static inline const char *xhci_ep_state_string(u8 state)
{
switch (state) {
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 7a6b122c833f..360416680e82 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -566,7 +566,6 @@ static int
mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
- int err = 0;
int res;
MTS_DEBUG_GOT_HERE();
@@ -613,7 +612,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback
}
out:
- return err;
+ return 0;
}
static DEF_SCSI_QCMD(mts_scsi_queuecommand)
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 320fc4739835..579a21bd70ad 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1032,8 +1032,6 @@ static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
urb->status = -EOVERFLOW;
else if (FROM_DW3_CERR(ptd->dw3))
urb->status = -EPIPE; /* Stall */
- else if (ptd->dw3 & DW3_ERROR_BIT)
- urb->status = -EPROTO; /* XactErr */
else
urb->status = -EPROTO; /* Unknown */
/*
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 9bce583aada3..834b2494da73 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -181,8 +181,8 @@ config USB_TEST
including sample test device firmware and "how to use it".
config USB_EHSET_TEST_FIXTURE
- tristate "USB EHSET Test Fixture driver"
- help
+ tristate "USB EHSET Test Fixture driver"
+ help
Say Y here if you want to support the special test fixture device
used for the USB-IF Embedded Host High-Speed Electrical Test procedure.
@@ -233,17 +233,17 @@ config USB_HUB_USB251XB
Say Y or M here if you need to configure such a device via SMBus.
config USB_HSIC_USB3503
- tristate "USB3503 HSIC to USB20 Driver"
- depends on I2C
- select REGMAP_I2C
- help
- This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
+ tristate "USB3503 HSIC to USB20 Driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
config USB_HSIC_USB4604
- tristate "USB4604 HSIC to USB20 Driver"
- depends on I2C
- help
- This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
+ tristate "USB4604 HSIC to USB20 Driver"
+ depends on I2C
+ help
+ This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
config USB_LINK_LAYER_TEST
tristate "USB Link Layer Test driver"
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index ac92725458b5..ba1eaabc7796 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
0,
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
- brightness = pdata->msgdata[1];
+ if (retval < 2) {
+ if (retval >= 0)
+ retval = -EMSGSIZE;
+ } else {
+ brightness = pdata->msgdata[1];
+ }
mutex_unlock(&pdata->sysfslock);
if (retval < 0)
@@ -299,6 +304,7 @@ error:
if (pdata) {
if (pdata->urb) {
usb_kill_urb(pdata->urb);
+ cancel_delayed_work_sync(&pdata->work);
if (pdata->urbdata)
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 34e6cd6f40d3..87067c3d6109 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
!dev->reading,
(started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
- if (result < 0)
+ if (result < 0) {
+ usb_kill_urb(dev->urb);
goto out;
+ }
- if (result == 0)
+ if (result == 0) {
result = -ETIMEDOUT;
- else
+ usb_kill_urb(dev->urb);
+ } else {
result = dev->valid;
+ }
out:
/* Let the device go back to sleep eventually */
usb_autopm_put_interface(dev->interface);
@@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
static int chaoskey_resume(struct usb_interface *interface)
{
+ struct chaoskey *dev;
+ struct usb_device *udev = interface_to_usbdev(interface);
+
usb_dbg(interface, "resume");
+ dev = usb_get_intfdata(interface);
+
+ /*
+ * We may have lost power.
+ * In that case the device that needs a long time
+ * for the first requests needs an extended timeout
+ * again
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
+ dev->reads_started = false;
+
return 0;
}
#else
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cdee3af33ad7..8a3d9c0c8d8b 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -333,7 +333,8 @@ static void ftdi_elan_abandon_completions(struct usb_ftdi *ftdi)
*respond->result = -ESHUTDOWN;
*respond->value = 0;
complete(&respond->wait_completion);
- } mutex_unlock(&ftdi->u132_lock);
+ }
+ mutex_unlock(&ftdi->u132_lock);
}
static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi)
@@ -763,7 +764,8 @@ static int ftdi_elan_total_command_size(struct usb_ftdi *ftdi, int command_size)
struct u132_command *command = &ftdi->command[COMMAND_MASK &
i++];
total_size += 5 + command->follows;
- } return total_size;
+ }
+ return total_size;
}
static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 20b0f91a5d9b..4afb5ddfd361 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -56,11 +56,10 @@ static const struct usb_device_id idmouse_table[] = {
#define FTIP_SCROLL 0x24
#define ftip_command(dev, command, value, index) \
- usb_control_msg (dev->udev, usb_sndctrlpipe (dev->udev, 0), command, \
+ usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), command, \
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000)
MODULE_DEVICE_TABLE(usb, idmouse_table);
-static DEFINE_MUTEX(open_disc_mutex);
/* structure to hold all of our device specific stuff */
struct usb_idmouse {
@@ -158,8 +157,8 @@ static int idmouse_create_image(struct usb_idmouse *dev)
/* loop over a blocking bulk read to get data from the device */
while (bytes_read < IMGSIZE) {
- result = usb_bulk_msg (dev->udev,
- usb_rcvbulkpipe (dev->udev, dev->bulk_in_endpointAddr),
+ result = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
dev->bulk_in_buffer + bytes_read,
dev->bulk_in_size, &bulk_read, 5000);
if (result < 0) {
@@ -223,21 +222,17 @@ static int idmouse_open(struct inode *inode, struct file *file)
int result;
/* get the interface from minor number and driver information */
- interface = usb_find_interface (&idmouse_driver, iminor (inode));
+ interface = usb_find_interface(&idmouse_driver, iminor(inode));
if (!interface)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* get the device information block from the interface */
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&open_disc_mutex);
+ if (!dev)
return -ENODEV;
- }
/* lock this device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* check if already open */
if (dev->open) {
@@ -251,7 +246,7 @@ static int idmouse_open(struct inode *inode, struct file *file)
result = usb_autopm_get_interface(interface);
if (result)
goto error;
- result = idmouse_create_image (dev);
+ result = idmouse_create_image(dev);
usb_autopm_put_interface(interface);
if (result)
goto error;
@@ -280,27 +275,17 @@ static int idmouse_release(struct inode *inode, struct file *file)
if (dev == NULL)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* lock our device */
mutex_lock(&dev->lock);
- /* are we really open? */
- if (dev->open <= 0) {
- mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
- return -ENODEV;
- }
-
--dev->open;
if (!dev->present) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
idmouse_delete(dev);
} else {
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
}
return 0;
}
@@ -379,7 +364,6 @@ static int idmouse_probe(struct usb_interface *interface,
if (result) {
/* something prevented us from registering this device */
dev_err(&interface->dev, "Unable to allocate minor number.\n");
- usb_set_intfdata(interface, NULL);
idmouse_delete(dev);
return result;
}
@@ -392,19 +376,13 @@ static int idmouse_probe(struct usb_interface *interface,
static void idmouse_disconnect(struct usb_interface *interface)
{
- struct usb_idmouse *dev;
-
- /* get device structure */
- dev = usb_get_intfdata(interface);
+ struct usb_idmouse *dev = usb_get_intfdata(interface);
/* give back our minor */
usb_deregister_dev(interface, &idmouse_class);
- mutex_lock(&open_disc_mutex);
- usb_set_intfdata(interface, NULL);
/* lock the device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* prevent device read, write and ioctl */
dev->present = 0;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 23061f1526b4..ab4b98b04115 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -157,19 +157,19 @@ MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms");
#define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD
struct tower_reset_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
-} __attribute__ ((packed));
+};
struct tower_get_version_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
__u8 major;
__u8 minor;
- __le16 build_no; /* little-endian */
-} __attribute__ ((packed));
+ __le16 build_no;
+};
/* table of devices that work with this driver */
@@ -178,7 +178,7 @@ static const struct usb_device_id tower_table[] = {
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, tower_table);
+MODULE_DEVICE_TABLE(usb, tower_table);
#define LEGO_USB_TOWER_MINOR_BASE 160
@@ -186,13 +186,13 @@ MODULE_DEVICE_TABLE (usb, tower_table);
/* Structure to hold all of our device specific stuff */
struct lego_usb_tower {
struct mutex lock; /* locks this structure */
- struct usb_device* udev; /* save off the usb device pointer */
+ struct usb_device *udev; /* save off the usb device pointer */
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
unsigned long disconnected:1;
- char* read_buffer;
+ char *read_buffer;
size_t read_buffer_length; /* this much came in */
size_t read_packet_length; /* this much will be returned on read */
spinlock_t read_buffer_lock;
@@ -202,16 +202,15 @@ struct lego_usb_tower {
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
- char* interrupt_in_buffer;
- struct usb_endpoint_descriptor* interrupt_in_endpoint;
- struct urb* interrupt_in_urb;
+ char *interrupt_in_buffer;
+ struct usb_endpoint_descriptor *interrupt_in_endpoint;
+ struct urb *interrupt_in_urb;
int interrupt_in_interval;
- int interrupt_in_running;
int interrupt_in_done;
- char* interrupt_out_buffer;
- struct usb_endpoint_descriptor* interrupt_out_endpoint;
- struct urb* interrupt_out_urb;
+ char *interrupt_out_buffer;
+ struct usb_endpoint_descriptor *interrupt_out_endpoint;
+ struct urb *interrupt_out_urb;
int interrupt_out_interval;
int interrupt_out_busy;
@@ -219,21 +218,20 @@ struct lego_usb_tower {
/* local function prototypes */
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos);
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
-static inline void tower_delete (struct lego_usb_tower *dev);
-static int tower_open (struct inode *inode, struct file *file);
-static int tower_release (struct inode *inode, struct file *file);
-static __poll_t tower_poll (struct file *file, poll_table *wait);
-static loff_t tower_llseek (struct file *file, loff_t off, int whence);
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos);
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
+static inline void tower_delete(struct lego_usb_tower *dev);
+static int tower_open(struct inode *inode, struct file *file);
+static int tower_release(struct inode *inode, struct file *file);
+static __poll_t tower_poll(struct file *file, poll_table *wait);
+static loff_t tower_llseek(struct file *file, loff_t off, int whence);
-static void tower_abort_transfers (struct lego_usb_tower *dev);
-static void tower_check_for_read_packet (struct lego_usb_tower *dev);
-static void tower_interrupt_in_callback (struct urb *urb);
-static void tower_interrupt_out_callback (struct urb *urb);
+static void tower_check_for_read_packet(struct lego_usb_tower *dev);
+static void tower_interrupt_in_callback(struct urb *urb);
+static void tower_interrupt_out_callback(struct urb *urb);
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id);
-static void tower_disconnect (struct usb_interface *interface);
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id);
+static void tower_disconnect(struct usb_interface *interface);
/* file operations needed when we register this driver */
@@ -288,23 +286,23 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
/**
* tower_delete
*/
-static inline void tower_delete (struct lego_usb_tower *dev)
+static inline void tower_delete(struct lego_usb_tower *dev)
{
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
- kfree (dev->read_buffer);
- kfree (dev->interrupt_in_buffer);
- kfree (dev->interrupt_out_buffer);
+ kfree(dev->read_buffer);
+ kfree(dev->interrupt_in_buffer);
+ kfree(dev->interrupt_out_buffer);
usb_put_dev(dev->udev);
- kfree (dev);
+ kfree(dev);
}
/**
* tower_open
*/
-static int tower_open (struct inode *inode, struct file *file)
+static int tower_open(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev = NULL;
int subminor;
@@ -314,7 +312,6 @@ static int tower_open (struct inode *inode, struct file *file)
int result;
reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
-
if (!reset_reply) {
retval = -ENOMEM;
goto exit;
@@ -323,8 +320,7 @@ static int tower_open (struct inode *inode, struct file *file)
nonseekable_open(inode, file);
subminor = iminor(inode);
- interface = usb_find_interface (&tower_driver, subminor);
-
+ interface = usb_find_interface(&tower_driver, subminor);
if (!interface) {
pr_err("error, can't find device for minor %d\n", subminor);
retval = -ENODEV;
@@ -351,15 +347,15 @@ static int tower_open (struct inode *inode, struct file *file)
}
/* reset the tower */
- result = usb_control_msg (dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- LEGO_USB_TOWER_REQUEST_RESET,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- reset_reply,
- sizeof(*reset_reply),
- 1000);
+ result = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ LEGO_USB_TOWER_REQUEST_RESET,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ reset_reply,
+ sizeof(*reset_reply),
+ 1000);
if (result < 0) {
dev_err(&dev->udev->dev,
"LEGO USB Tower reset control request failed\n");
@@ -370,24 +366,22 @@ static int tower_open (struct inode *inode, struct file *file)
/* initialize in direction */
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
- usb_fill_int_urb (dev->interrupt_in_urb,
- dev->udev,
- usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
- dev->interrupt_in_buffer,
- usb_endpoint_maxp(dev->interrupt_in_endpoint),
- tower_interrupt_in_callback,
- dev,
- dev->interrupt_in_interval);
-
- dev->interrupt_in_running = 1;
+ usb_fill_int_urb(dev->interrupt_in_urb,
+ dev->udev,
+ usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
+ dev->interrupt_in_buffer,
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
+ tower_interrupt_in_callback,
+ dev,
+ dev->interrupt_in_interval);
+
dev->interrupt_in_done = 0;
mb();
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
- dev->interrupt_in_running = 0;
goto unlock_exit;
}
@@ -407,13 +401,12 @@ exit:
/**
* tower_release
*/
-static int tower_release (struct inode *inode, struct file *file)
+static int tower_release(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev;
int retval = 0;
dev = file->private_data;
-
if (dev == NULL) {
retval = -ENODEV;
goto exit;
@@ -421,56 +414,32 @@ static int tower_release (struct inode *inode, struct file *file)
mutex_lock(&dev->lock);
- if (dev->open_count != 1) {
- dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
- __func__);
- retval = -ENODEV;
- goto unlock_exit;
- }
-
if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
goto exit;
}
/* wait until write transfer is finished */
if (dev->interrupt_out_busy) {
- wait_event_interruptible_timeout (dev->write_wait, !dev->interrupt_out_busy, 2 * HZ);
+ wait_event_interruptible_timeout(dev->write_wait, !dev->interrupt_out_busy,
+ 2 * HZ);
}
- tower_abort_transfers (dev);
+
+ /* shutdown transfers */
+ usb_kill_urb(dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_out_urb);
+
dev->open_count = 0;
-unlock_exit:
mutex_unlock(&dev->lock);
exit:
return retval;
}
-
-/**
- * tower_abort_transfers
- * aborts transfers and frees associated data structures
- */
-static void tower_abort_transfers (struct lego_usb_tower *dev)
-{
- if (dev == NULL)
- return;
-
- /* shutdown transfer */
- if (dev->interrupt_in_running) {
- dev->interrupt_in_running = 0;
- mb();
- usb_kill_urb(dev->interrupt_in_urb);
- }
- if (dev->interrupt_out_busy)
- usb_kill_urb(dev->interrupt_out_urb);
-}
-
-
/**
* tower_check_for_read_packet
*
@@ -479,23 +448,23 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
* until it has been there unchanged for at least
* dev->packet_timeout_jiffies, or until the buffer is full.
*/
-static void tower_check_for_read_packet (struct lego_usb_tower *dev)
+static void tower_check_for_read_packet(struct lego_usb_tower *dev)
{
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
if (!packet_timeout
|| time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies)
|| dev->read_buffer_length == read_buffer_size) {
dev->read_packet_length = dev->read_buffer_length;
}
dev->interrupt_in_done = 0;
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
}
/**
* tower_poll
*/
-static __poll_t tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll(struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
__poll_t mask = 0;
@@ -509,12 +478,10 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
poll_wait(file, &dev->write_wait, wait);
tower_check_for_read_packet(dev);
- if (dev->read_packet_length > 0) {
+ if (dev->read_packet_length > 0)
mask |= EPOLLIN | EPOLLRDNORM;
- }
- if (!dev->interrupt_out_busy) {
+ if (!dev->interrupt_out_busy)
mask |= EPOLLOUT | EPOLLWRNORM;
- }
return mask;
}
@@ -523,7 +490,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
/**
* tower_llseek
*/
-static loff_t tower_llseek (struct file *file, loff_t off, int whence)
+static loff_t tower_llseek(struct file *file, loff_t off, int whence)
{
return -ESPIPE; /* unseekable */
}
@@ -532,7 +499,7 @@ static loff_t tower_llseek (struct file *file, loff_t off, int whence)
/**
* tower_read
*/
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_read;
@@ -551,7 +518,6 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -561,21 +527,19 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
goto unlock_exit;
}
- if (read_timeout) {
+ if (read_timeout)
timeout = jiffies + msecs_to_jiffies(read_timeout);
- }
/* wait for data */
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
while (dev->read_packet_length == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies);
- if (retval < 0) {
+ if (retval < 0)
goto unlock_exit;
- }
/* reset read timeout during read or write activity */
if (read_timeout
@@ -583,28 +547,27 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
timeout = jiffies + msecs_to_jiffies(read_timeout);
}
/* check for read timeout */
- if (read_timeout && time_after (jiffies, timeout)) {
+ if (read_timeout && time_after(jiffies, timeout)) {
retval = -ETIMEDOUT;
goto unlock_exit;
}
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
}
/* copy the data from read_buffer into userspace */
bytes_to_read = min(count, dev->read_packet_length);
- if (copy_to_user (buffer, dev->read_buffer, bytes_to_read)) {
+ if (copy_to_user(buffer, dev->read_buffer, bytes_to_read)) {
retval = -EFAULT;
goto unlock_exit;
}
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
dev->read_buffer_length -= bytes_to_read;
dev->read_packet_length -= bytes_to_read;
- for (i=0; i<dev->read_buffer_length; i++) {
+ for (i = 0; i < dev->read_buffer_length; i++)
dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read];
- }
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
retval = bytes_to_read;
@@ -620,7 +583,7 @@ exit:
/**
* tower_write
*/
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_write;
@@ -637,7 +600,6 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -653,10 +615,10 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
retval = -EAGAIN;
goto unlock_exit;
}
- retval = wait_event_interruptible (dev->write_wait, !dev->interrupt_out_busy);
- if (retval) {
+ retval = wait_event_interruptible(dev->write_wait,
+ !dev->interrupt_out_busy);
+ if (retval)
goto unlock_exit;
- }
}
/* write the data into interrupt_out_buffer from userspace */
@@ -664,7 +626,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev_dbg(&dev->udev->dev, "%s: count = %zd, bytes_to_write = %zd\n",
__func__, count, bytes_to_write);
- if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) {
+ if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
@@ -682,7 +644,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev->interrupt_out_busy = 1;
wmb();
- retval = usb_submit_urb (dev->interrupt_out_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->udev->dev,
@@ -703,7 +665,7 @@ exit:
/**
* tower_interrupt_in_callback
*/
-static void tower_interrupt_in_callback (struct urb *urb)
+static void tower_interrupt_in_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -729,9 +691,9 @@ static void tower_interrupt_in_callback (struct urb *urb)
if (urb->actual_length > 0) {
spin_lock_irqsave(&dev->read_buffer_lock, flags);
if (dev->read_buffer_length + urb->actual_length < read_buffer_size) {
- memcpy (dev->read_buffer + dev->read_buffer_length,
- dev->interrupt_in_buffer,
- urb->actual_length);
+ memcpy(dev->read_buffer + dev->read_buffer_length,
+ dev->interrupt_in_buffer,
+ urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev->read_last_arrival = jiffies;
dev_dbg(&dev->udev->dev, "%s: received %d bytes\n",
@@ -744,25 +706,21 @@ static void tower_interrupt_in_callback (struct urb *urb)
}
resubmit:
- /* resubmit if we're still running */
- if (dev->interrupt_in_running) {
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
- if (retval)
- dev_err(&dev->udev->dev,
- "%s: usb_submit_urb failed (%d)\n",
- __func__, retval);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
+ if (retval) {
+ dev_err(&dev->udev->dev, "%s: usb_submit_urb failed (%d)\n",
+ __func__, retval);
}
-
exit:
dev->interrupt_in_done = 1;
- wake_up_interruptible (&dev->read_wait);
+ wake_up_interruptible(&dev->read_wait);
}
/**
* tower_interrupt_out_callback
*/
-static void tower_interrupt_out_callback (struct urb *urb)
+static void tower_interrupt_out_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -790,48 +748,27 @@ static void tower_interrupt_out_callback (struct urb *urb)
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
- struct lego_usb_tower *dev = NULL;
+ struct lego_usb_tower *dev;
struct tower_get_version_reply *get_version_reply = NULL;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
-
- dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
-
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
-
dev->udev = usb_get_dev(udev);
- dev->open_count = 0;
- dev->disconnected = 0;
-
- dev->read_buffer = NULL;
- dev->read_buffer_length = 0;
- dev->read_packet_length = 0;
- spin_lock_init (&dev->read_buffer_lock);
+ spin_lock_init(&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
-
- init_waitqueue_head (&dev->read_wait);
- init_waitqueue_head (&dev->write_wait);
-
- dev->interrupt_in_buffer = NULL;
- dev->interrupt_in_endpoint = NULL;
- dev->interrupt_in_urb = NULL;
- dev->interrupt_in_running = 0;
- dev->interrupt_in_done = 0;
-
- dev->interrupt_out_buffer = NULL;
- dev->interrupt_out_endpoint = NULL;
- dev->interrupt_out_urb = NULL;
- dev->interrupt_out_busy = 0;
+ init_waitqueue_head(&dev->read_wait);
+ init_waitqueue_head(&dev->write_wait);
result = usb_find_common_endpoints_reverse(interface->cur_altsetting,
NULL, NULL,
@@ -843,16 +780,16 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
goto error;
}
- dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
+ dev->read_buffer = kmalloc(read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
- dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
+ dev->interrupt_in_buffer = kmalloc(usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
- dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
+ dev->interrupt_out_buffer = kmalloc(write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -862,22 +799,21 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
-
if (!get_version_reply) {
retval = -ENOMEM;
goto error;
}
/* get the firmware version and log it */
- result = usb_control_msg (udev,
- usb_rcvctrlpipe(udev, 0),
- LEGO_USB_TOWER_REQUEST_GET_VERSION,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- get_version_reply,
- sizeof(*get_version_reply),
- 1000);
+ result = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ LEGO_USB_TOWER_REQUEST_GET_VERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ get_version_reply,
+ sizeof(*get_version_reply),
+ 1000);
if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
@@ -892,10 +828,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
le16_to_cpu(get_version_reply->build_no));
/* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
+ usb_set_intfdata(interface, dev);
+ retval = usb_register_dev(interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
@@ -924,17 +859,17 @@ error:
*
* Called by the usb core when the device is removed from the system.
*/
-static void tower_disconnect (struct usb_interface *interface)
+static void tower_disconnect(struct usb_interface *interface)
{
struct lego_usb_tower *dev;
int minor;
- dev = usb_get_intfdata (interface);
+ dev = usb_get_intfdata(interface);
minor = dev->minor;
/* give back our minor and prevent further open() */
- usb_deregister_dev (interface, &tower_class);
+ usb_deregister_dev(interface, &tower_class);
/* stop I/O */
usb_poison_urb(dev->interrupt_in_urb);
@@ -945,7 +880,7 @@ static void tower_disconnect (struct usb_interface *interface)
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
} else {
dev->disconnected = 1;
/* wake up pollers */
@@ -962,6 +897,4 @@ module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 9b632ab24f03..c16121276a21 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -4,7 +4,7 @@ config USB_SISUSBVGA
tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
depends on (USB_MUSB_HDRC || USB_EHCI_HCD)
select FONT_SUPPORT if USB_SISUSBVGA_CON
- ---help---
+ ---help---
Say Y here if you intend to attach a USB2VGA dongle based on a
Net2280 and a SiS315 chip.
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 6ca9111d150a..10c9e7f6273e 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* Internal Register Set Addresses & Default Values acc. to DS00001692C */
@@ -26,10 +27,6 @@
#define USB251XB_ADDR_PRODUCT_ID_LSB 0x02
#define USB251XB_ADDR_PRODUCT_ID_MSB 0x03
-#define USB251XB_DEF_PRODUCT_ID_12 0x2512 /* USB2512B/12Bi */
-#define USB251XB_DEF_PRODUCT_ID_13 0x2513 /* USB2513B/13Bi */
-#define USB251XB_DEF_PRODUCT_ID_14 0x2514 /* USB2514B/14Bi */
-#define USB251XB_DEF_PRODUCT_ID_17 0x2517 /* USB2517/17i */
#define USB251XB_ADDR_DEVICE_ID_LSB 0x04
#define USB251XB_ADDR_DEVICE_ID_MSB 0x05
@@ -74,7 +71,6 @@
#define USB251XB_ADDR_PRODUCT_STRING_LEN 0x14
#define USB251XB_ADDR_PRODUCT_STRING 0x54
-#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi/7i"
#define USB251XB_ADDR_SERIAL_STRING_LEN 0x15
#define USB251XB_ADDR_SERIAL_STRING 0x92
@@ -116,6 +112,7 @@
struct usb251xb {
struct device *dev;
struct i2c_client *i2c;
+ struct regulator *vdd;
u8 skip_config;
struct gpio_desc *gpio_reset;
u16 vendor_id;
@@ -159,6 +156,14 @@ struct usb251xb_data {
char product_str[USB251XB_STRING_BUFSIZE / 2]; /* ASCII string */
};
+static const struct usb251xb_data usb2422_data = {
+ .product_id = 0x2422,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
+ .product_str = "USB2422",
+};
+
static const struct usb251xb_data usb2512b_data = {
.product_id = 0x2512,
.port_cnt = 2,
@@ -261,20 +266,19 @@ static int usb251x_check_gpio_chip(struct usb251xb *hub)
}
#endif
-static void usb251xb_reset(struct usb251xb *hub, int state)
+static void usb251xb_reset(struct usb251xb *hub)
{
if (!hub->gpio_reset)
return;
i2c_lock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
- gpiod_set_value_cansleep(hub->gpio_reset, state);
+ gpiod_set_value_cansleep(hub->gpio_reset, 1);
+ usleep_range(1, 10); /* >=1us RESET_N asserted */
+ gpiod_set_value_cansleep(hub->gpio_reset, 0);
/* wait for hub recovery/stabilization */
- if (!state)
- usleep_range(500, 750); /* >=500us at power on */
- else
- usleep_range(1, 10); /* >=1us at power down */
+ usleep_range(500, 750); /* >=500us after RESET_N deasserted */
i2c_unlock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
}
@@ -292,7 +296,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[0] = 0x01;
i2c_wb[1] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
err = i2c_smbus_write_i2c_block_data(hub->i2c,
USB251XB_ADDR_STATUS_COMMAND, 2, i2c_wb);
@@ -342,7 +346,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[USB251XB_ADDR_PORT_MAP_7] = hub->port_map7;
i2c_wb[USB251XB_ADDR_STATUS_COMMAND] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
/* write registers */
for (i = 0; i < (USB251XB_I2C_REG_SZ / USB251XB_I2C_WRITE_SZ); i++) {
@@ -420,6 +424,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
return err;
}
+ hub->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(hub->vdd))
+ return PTR_ERR(hub->vdd);
+
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -593,6 +601,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
static const struct of_device_id usb251xb_of_match[] = {
{
+ .compatible = "microchip,usb2422",
+ .data = &usb2422_data,
+ }, {
.compatible = "microchip,usb2512b",
.data = &usb2512b_data,
}, {
@@ -665,6 +676,10 @@ static int usb251xb_probe(struct usb251xb *hub)
if (err)
return err;
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
@@ -692,7 +707,31 @@ static int usb251xb_i2c_probe(struct i2c_client *i2c,
return usb251xb_probe(hub);
}
+static int __maybe_unused usb251xb_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+
+ return regulator_disable(hub->vdd);
+}
+
+static int __maybe_unused usb251xb_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+ int err;
+
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
+ return usb251xb_connect(hub);
+}
+
+static SIMPLE_DEV_PM_OPS(usb251xb_pm_ops, usb251xb_suspend, usb251xb_resume);
+
static const struct i2c_device_id usb251xb_id[] = {
+ { "usb2422", 0 },
{ "usb2512b", 0 },
{ "usb2512bi", 0 },
{ "usb2513b", 0 },
@@ -709,6 +748,7 @@ static struct i2c_driver usb251xb_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(usb251xb_of_match),
+ .pm = &usb251xb_pm_ops,
},
.probe = usb251xb_i2c_probe,
.id_table = usb251xb_id,
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 4da216c99726..2be182bd793a 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -153,6 +153,15 @@ static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
}
+static void ep0_do_status_stage(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+}
+
static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
@@ -297,8 +306,7 @@ static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
ep0_load_test_packet(mtu);
/* send status before entering test mode. */
- value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
- mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+ ep0_do_status_stage(mtu);
/* wait for ACK status sent by host */
readl_poll_timeout_atomic(mbase + U3D_EP0CSR, value,
@@ -632,7 +640,6 @@ __acquires(mtu->lock)
{
struct usb_ctrlrequest setup;
struct mtu3_request *mreq;
- void __iomem *mbase = mtu->mac_base;
int handled = 0;
ep0_read_setup(mtu, &setup);
@@ -664,14 +671,19 @@ finish:
if (mtu->test_mode) {
; /* nothing to do */
} else if (handled == USB_GADGET_DELAYED_STATUS) {
- /* handle the delay STATUS phase till receive ep_queue on ep0 */
- mtu->delayed_status = true;
- } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
- mtu3_writel(mbase, U3D_EP0CSR,
- (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS)
- | EP0_SETUPPKTRDY | EP0_DATAEND);
+ mreq = next_ep0_request(mtu);
+ if (mreq) {
+ /* already asked us to continue delayed status */
+ ep0_do_status_stage(mtu);
+ ep0_req_giveback(mtu, &mreq->request);
+ } else {
+ /* do delayed STATUS stage till receive ep0_queue */
+ mtu->delayed_status = true;
+ }
+ } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
+ ep0_do_status_stage(mtu);
/* complete zlp request directly */
mreq = next_ep0_request(mtu);
if (mreq && !mreq->request.length)
@@ -802,12 +814,9 @@ static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
}
if (mtu->delayed_status) {
- u32 csr;
mtu->delayed_status = false;
- csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
- csr |= EP0_SETUPPKTRDY | EP0_DATAEND;
- mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+ ep0_do_status_stage(mtu);
/* needn't giveback the request for handling delay STATUS */
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bd63450af76a..15cca912c53e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2431,14 +2431,12 @@ static int musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq_byname(pdev, "mc");
- struct resource *iomem;
void __iomem *base;
if (irq <= 0)
return -ENODEV;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, iomem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index f42858e2b54c..7b6281ab62ed 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -325,7 +325,7 @@ void musb_init_debugfs(struct musb *musb)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(musb->controller), NULL);
+ root = debugfs_create_dir(dev_name(musb->controller), usb_debug_root);
musb->debugfs_root = root;
debugfs_create_file("regdump", S_IRUGO, root, musb, &musb_regdump_fops);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 327d4f7baaf7..88923175f71e 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -411,7 +411,7 @@ static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
char buf[128];
sprintf(buf, "%s.dsps", dev_name(musb->controller));
- root = debugfs_create_dir(buf, NULL);
+ root = debugfs_create_dir(buf, usb_debug_root);
glue->dbgfs_root = root;
glue->regset.regs = dsps_musb_regs;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ffe462a657b1..f62ffaede1ab 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1085,7 +1085,6 @@ static int musb_gadget_disable(struct usb_ep *ep)
u8 epnum;
struct musb_ep *musb_ep;
void __iomem *epio;
- int status = 0;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
@@ -1118,7 +1117,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
musb_dbg(musb, "%s", musb_ep->end_point.name);
- return status;
+ return 0;
}
/*
@@ -1316,7 +1315,7 @@ done:
}
/*
- * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
* data but will queue requests.
*
* exported to ep0 code
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 19871266312d..110e6e9ad621 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -66,15 +66,13 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keystone_usbphy *k_phy;
- struct resource *res;
int ret;
k_phy = devm_kzalloc(dev, sizeof(*k_phy), GFP_KERNEL);
if (!k_phy)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- k_phy->phy_ctrl = devm_ioremap_resource(dev, res);
+ k_phy->phy_ctrl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 70b8c8248caf..67b39dc62b37 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -710,7 +710,6 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
static int mxs_phy_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *base;
struct clk *clk;
struct mxs_phy *mxs_phy;
@@ -723,8 +722,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index a3c30b609433..d438b7871446 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -590,7 +590,7 @@ static int usbhs_probe(struct platform_device *pdev)
{
const struct renesas_usbhs_platform_info *info;
struct usbhs_priv *priv;
- struct resource *res, *irq_res;
+ struct resource *irq_res;
struct device *dev = &pdev->dev;
int ret, gpio;
u32 tmp;
@@ -619,8 +619,7 @@ static int usbhs_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 0824099b905e..ef1735d014da 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -161,11 +161,12 @@ struct usbhs_priv;
#define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
#define VALID (1 << 3) /* USB Request Receive */
-#define DVSQ_MASK (0x3 << 4) /* Device State */
+#define DVSQ_MASK (0x7 << 4) /* Device State */
#define POWER_STATE (0 << 4)
#define DEFAULT_STATE (1 << 4)
#define ADDRESS_STATE (2 << 4)
#define CONFIGURATION_STATE (3 << 4)
+#define SUSPENDED_STATE (4 << 4)
#define CTSQ_MASK (0x7) /* Control Transfer Stage */
#define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 86637cd066cf..01c6a48c41bc 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1273,11 +1273,11 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
*/
snprintf(name, sizeof(name), "ch%d", channel);
if (channel & 1) {
- fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->tx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->tx_chan))
fifo->tx_chan = NULL;
} else {
- fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->rx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->rx_chan))
fifo->rx_chan = NULL;
}
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 10fc65596014..b98112cefaa4 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -169,17 +169,7 @@ void usbhs_mod_remove(struct usbhs_priv *priv)
*/
int usbhs_status_get_device_state(struct usbhs_irq_state *irq_state)
{
- int state = irq_state->intsts0 & DVSQ_MASK;
-
- switch (state) {
- case POWER_STATE:
- case DEFAULT_STATE:
- case ADDRESS_STATE:
- case CONFIGURATION_STATE:
- return state;
- }
-
- return -EIO;
+ return (int)irq_state->intsts0 & DVSQ_MASK;
}
int usbhs_status_get_ctrl_stage(struct usbhs_irq_state *irq_state)
@@ -348,10 +338,6 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
* usbhs_interrupt
*/
- /*
- * it don't enable DVSE (intenb0) here
- * but "mod->irq_dev_state" will be called.
- */
if (info->irq_vbus)
intenb0 |= VBSE;
@@ -362,6 +348,9 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_ctrl_stage)
intenb0 |= CTRE;
+ if (mod->irq_dev_state)
+ intenb0 |= DVSE;
+
if (mod->irq_empty && mod->irq_bempsts) {
usbhs_write(priv, BEMPENB, mod->irq_bempsts);
intenb0 |= BEMPE;
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index cd38d74b3223..53489cafecc1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -457,12 +457,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ int state = usbhs_status_get_device_state(irq_state);
gpriv->gadget.speed = usbhs_bus_get_speed(priv);
- dev_dbg(dev, "state = %x : speed : %d\n",
- usbhs_status_get_device_state(irq_state),
- gpriv->gadget.speed);
+ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
+
+ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
+ (state & SUSPENDED_STATE)) {
+ if (gpriv->driver && gpriv->driver->suspend)
+ gpriv->driver->suspend(&gpriv->gadget);
+ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
+ }
return 0;
}
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 94b4e7db2b94..8273126ffdf4 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -175,6 +175,27 @@ void usb_role_switch_put(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
+/**
+ * usb_role_switch_find_by_fwnode - Find USB role switch with its fwnode
+ * @fwnode: fwnode of the USB Role Switch
+ *
+ * Finds and returns role switch with @fwnode. The reference count for the
+ * found switch is incremented.
+ */
+struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = class_find_device_by_fwnode(role_class, fwnode);
+
+ return dev ? to_role_switch(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
+
static umode_t
usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 67279c6bce33..ed4a18b435a0 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -269,19 +269,19 @@ config USB_SERIAL_F8153X
config USB_SERIAL_GARMIN
- tristate "USB Garmin GPS driver"
- help
- Say Y here if you want to connect to your Garmin GPS.
- Should work with most Garmin GPS devices which have a native USB port.
+ tristate "USB Garmin GPS driver"
+ help
+ Say Y here if you want to connect to your Garmin GPS.
+ Should work with most Garmin GPS devices which have a native USB port.
- See <http://sourceforge.net/projects/garmin-gps> for the latest
- version of the driver.
+ See <http://sourceforge.net/projects/garmin-gps> for the latest
+ version of the driver.
- To compile this driver as a module, choose M here: the
- module will be called garmin_gps.
+ To compile this driver as a module, choose M here: the
+ module will be called garmin_gps.
config USB_SERIAL_IPW
- tristate "USB IPWireless (3G UMTS TDD) Driver"
+ tristate "USB IPWireless (3G UMTS TDD) Driver"
select USB_SERIAL_WWAN
help
Say Y here if you want to use a IPWireless USB modem such as
@@ -341,20 +341,20 @@ config USB_SERIAL_KLSI
module will be called kl5kusb105.
config USB_SERIAL_KOBIL_SCT
- tristate "USB KOBIL chipcard reader"
- ---help---
- Say Y here if you want to use one of the following KOBIL USB chipcard
- readers:
-
- - USB TWIN
- - KAAN Standard Plus
- - KAAN SIM
- - SecOVID Reader Plus
- - B1 Professional
- - KAAN Professional
-
- Note that you need a current CT-API.
- To compile this driver as a module, choose M here: the
+ tristate "USB KOBIL chipcard reader"
+ ---help---
+ Say Y here if you want to use one of the following KOBIL USB chipcard
+ readers:
+
+ - USB TWIN
+ - KAAN Standard Plus
+ - KAAN SIM
+ - SecOVID Reader Plus
+ - B1 Professional
+ - KAAN Professional
+
+ Note that you need a current CT-API.
+ To compile this driver as a module, choose M here: the
module will be called kobil_sct.
config USB_SERIAL_MCT_U232
@@ -458,7 +458,7 @@ config USB_SERIAL_OTI6858
tristate "USB Ours Technology Inc. OTi-6858 USB To RS232 Bridge Controller"
help
Say Y here if you want to use the OTi-6858 single port USB to serial
- converter device.
+ converter device.
To compile this driver as a module, choose M here: the
module will be called oti6858.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 3bb1fff02bed..df582fe855f0 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -48,12 +48,6 @@
#define CH341_BIT_DCD 0x08
#define CH341_BITS_MODEM_STAT 0x0f /* all bits */
-/*******************************/
-/* baudrate calculation factor */
-/*******************************/
-#define CH341_BAUDBASE_FACTOR 1532620800
-#define CH341_BAUDBASE_DIVMAX 3
-
/* Break support - the information used to implement this was gleaned from
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
@@ -144,37 +138,96 @@ static int ch341_control_in(struct usb_device *dev,
return 0;
}
+#define CH341_CLKRATE 48000000
+#define CH341_CLK_DIV(ps, fact) (1 << (12 - 3 * (ps) - (fact)))
+#define CH341_MIN_RATE(ps) (CH341_CLKRATE / (CH341_CLK_DIV((ps), 1) * 512))
+
+static const speed_t ch341_min_rates[] = {
+ CH341_MIN_RATE(0),
+ CH341_MIN_RATE(1),
+ CH341_MIN_RATE(2),
+ CH341_MIN_RATE(3),
+};
+
+/*
+ * The device line speed is given by the following equation:
+ *
+ * baudrate = 48000000 / (2^(12 - 3 * ps - fact) * div), where
+ *
+ * 0 <= ps <= 3,
+ * 0 <= fact <= 1,
+ * 2 <= div <= 256 if fact = 0, or
+ * 9 <= div <= 256 if fact = 1
+ */
+static int ch341_get_divisor(speed_t speed)
+{
+ unsigned int fact, div, clk_div;
+ int ps;
+
+ /*
+ * Clamp to supported range, this makes the (ps < 0) and (div < 2)
+ * sanity checks below redundant.
+ */
+ speed = clamp(speed, 46U, 3000000U);
+
+ /*
+ * Start with highest possible base clock (fact = 1) that will give a
+ * divisor strictly less than 512.
+ */
+ fact = 1;
+ for (ps = 3; ps >= 0; ps--) {
+ if (speed > ch341_min_rates[ps])
+ break;
+ }
+
+ if (ps < 0)
+ return -EINVAL;
+
+ /* Determine corresponding divisor, rounding down. */
+ clk_div = CH341_CLK_DIV(ps, fact);
+ div = CH341_CLKRATE / (clk_div * speed);
+
+ /* Halve base clock (fact = 0) if required. */
+ if (div < 9 || div > 255) {
+ div /= 2;
+ clk_div *= 2;
+ fact = 0;
+ }
+
+ if (div < 2)
+ return -EINVAL;
+
+ /*
+ * Pick next divisor if resulting rate is closer to the requested one,
+ * scale up to avoid rounding errors on low rates.
+ */
+ if (16 * CH341_CLKRATE / (clk_div * div) - 16 * speed >=
+ 16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
+ div++;
+
+ return (0x100 - div) << 8 | fact << 2 | ps;
+}
+
static int ch341_set_baudrate_lcr(struct usb_device *dev,
struct ch341_private *priv, u8 lcr)
{
- short a;
+ int val;
int r;
- unsigned long factor;
- short divisor;
if (!priv->baud_rate)
return -EINVAL;
- factor = (CH341_BAUDBASE_FACTOR / priv->baud_rate);
- divisor = CH341_BAUDBASE_DIVMAX;
-
- while ((factor > 0xfff0) && divisor) {
- factor >>= 3;
- divisor--;
- }
- if (factor > 0xfff0)
+ val = ch341_get_divisor(priv->baud_rate);
+ if (val < 0)
return -EINVAL;
- factor = 0x10000 - factor;
- a = (factor & 0xff00) | divisor;
-
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
*/
- a |= BIT(7);
+ val |= BIT(7);
- r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, val);
if (r)
return r;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 979bef9bfb6b..f5143eedbc48 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 25e81faf4c24..9ad44a96dfe3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1033,6 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* U-Blox devices */
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 22d66217cb41..e8373528264c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1558,3 +1558,10 @@
*/
#define UNJO_VID 0x22B7
#define UNJO_ISODEBUG_V1_PID 0x150D
+
+/*
+ * U-Blox products (http://www.u-blox.com).
+ */
+#define UBLOX_VID 0x1546
+#define UBLOX_C099F9P_ZED_PID 0x0502
+#define UBLOX_C099F9P_ODIN_PID 0x0503
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 18110225d506..2ec4eeacebc7 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial)
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
-
if (product == MOSCHIP_DEVICE_ID_7715) {
struct urb *urb = serial->port[0]->interrupt_in_urb;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a698d46ba773..23f91d658cb4 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -89,17 +89,10 @@
/* For higher baud Rates use TIOCEXBAUD */
#define TIOCEXBAUD 0x5462
-/* vendor id and device id defines */
-
-/* The native mos7840/7820 component */
-#define USB_VENDOR_ID_MOSCHIP 0x9710
-#define MOSCHIP_DEVICE_ID_7840 0x7840
-#define MOSCHIP_DEVICE_ID_7843 0x7843
-#define MOSCHIP_DEVICE_ID_7820 0x7820
-#define MOSCHIP_DEVICE_ID_7810 0x7810
-/* The native component can have its vendor/device id's overridden
- * in vendor-specific implementations. Such devices can be handled
- * by making a change here, in id_table.
+/*
+ * Vendor id and device id defines
+ *
+ * NOTE: Do not add new defines, add entries directly to the id_table instead.
*/
#define USB_VENDOR_ID_BANDB 0x0856
#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
@@ -116,14 +109,6 @@
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
-/* This driver also supports
- * ATEN UC2324 device using Moschip MCS7840
- * ATEN UC2322 device using Moschip MCS7820
- */
-#define USB_VENDOR_ID_ATENINTL 0x0557
-#define ATENINTL_DEVICE_ID_UC2324 0x2011
-#define ATENINTL_DEVICE_ID_UC2322 0x7820
-
/* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06
@@ -171,30 +156,37 @@
#define LED_OFF_MS 500
enum mos7840_flag {
- MOS7840_FLAG_CTRL_BUSY,
MOS7840_FLAG_LED_BUSY,
};
+#define MCS_PORT_MASK GENMASK(2, 0)
+#define MCS_PORTS(nr) ((nr) & MCS_PORT_MASK)
+#define MCS_LED BIT(3)
+
+#define MCS_DEVICE(vid, pid, flags) \
+ USB_DEVICE((vid), (pid)), .driver_info = (flags)
+
static const struct usb_device_id id_table[] = {
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7843)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
+ { MCS_DEVICE(0x0557, 0x2011, MCS_PORTS(4)) }, /* ATEN UC2324 */
+ { MCS_DEVICE(0x0557, 0x7820, MCS_PORTS(2)) }, /* ATEN UC2322 */
+ { MCS_DEVICE(0x110a, 0x2210, MCS_PORTS(2)) }, /* Moxa UPort 2210 */
+ { MCS_DEVICE(0x9710, 0x7810, MCS_PORTS(1) | MCS_LED) }, /* ASIX MCS7810 */
+ { MCS_DEVICE(0x9710, 0x7820, MCS_PORTS(2)) }, /* MosChip MCS7820 */
+ { MCS_DEVICE(0x9710, 0x7840, MCS_PORTS(4)) }, /* MosChip MCS7840 */
+ { MCS_DEVICE(0x9710, 0x7843, MCS_PORTS(3)) }, /* ASIX MCS7840 3 port */
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -206,19 +198,12 @@ struct moschip_port {
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
- char open;
- char open_ports;
struct usb_serial_port *port; /* loop back to the owner of this object */
/* Offsets */
__u8 SpRegOffset;
__u8 ControlRegOffset;
__u8 DcrRegOffset;
- /* for processing control URBS in interrupt context */
- struct urb *control_urb;
- struct usb_ctrlrequest *dr;
- char *ctrl_buf;
- int MsrLsr;
spinlock_t pool_lock;
struct urb *write_urb_pool[NUM_URBS];
@@ -360,150 +345,11 @@ static void mos7840_dump_serial_port(struct usb_serial_port *port,
/************************************************************************/
/************************************************************************/
-/* I N T E R F A C E F U N C T I O N S */
-/* I N T E R F A C E F U N C T I O N S */
-/************************************************************************/
-/************************************************************************/
-
-static inline void mos7840_set_port_private(struct usb_serial_port *port,
- struct moschip_port *data)
-{
- usb_set_serial_port_data(port, (void *)data);
-}
-
-static inline struct moschip_port *mos7840_get_port_private(struct
- usb_serial_port
- *port)
-{
- return (struct moschip_port *)usb_get_serial_port_data(port);
-}
-
-static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
-{
- struct moschip_port *mos7840_port;
- struct async_icount *icount;
- mos7840_port = port;
- if (new_msr &
- (MOS_MSR_DELTA_CTS | MOS_MSR_DELTA_DSR | MOS_MSR_DELTA_RI |
- MOS_MSR_DELTA_CD)) {
- icount = &mos7840_port->port->icount;
-
- /* update input line counters */
- if (new_msr & MOS_MSR_DELTA_CTS)
- icount->cts++;
- if (new_msr & MOS_MSR_DELTA_DSR)
- icount->dsr++;
- if (new_msr & MOS_MSR_DELTA_CD)
- icount->dcd++;
- if (new_msr & MOS_MSR_DELTA_RI)
- icount->rng++;
-
- wake_up_interruptible(&port->port->port.delta_msr_wait);
- }
-}
-
-static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr)
-{
- struct async_icount *icount;
-
- if (new_lsr & SERIAL_LSR_BI) {
- /*
- * Parity and Framing errors only count if they
- * occur exclusive of a break being
- * received.
- */
- new_lsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI);
- }
-
- /* update input line counters */
- icount = &port->port->icount;
- if (new_lsr & SERIAL_LSR_BI)
- icount->brk++;
- if (new_lsr & SERIAL_LSR_OE)
- icount->overrun++;
- if (new_lsr & SERIAL_LSR_PE)
- icount->parity++;
- if (new_lsr & SERIAL_LSR_FE)
- icount->frame++;
-}
-
-/************************************************************************/
-/************************************************************************/
/* U S B C A L L B A C K F U N C T I O N S */
/* U S B C A L L B A C K F U N C T I O N S */
/************************************************************************/
/************************************************************************/
-static void mos7840_control_callback(struct urb *urb)
-{
- unsigned char *data;
- struct moschip_port *mos7840_port;
- struct device *dev = &urb->dev->dev;
- __u8 regval = 0x0;
- int status = urb->status;
-
- mos7840_port = urb->context;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
- goto out;
- default:
- dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
- goto out;
- }
-
- dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
- if (urb->actual_length < 1)
- goto out;
-
- dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
- mos7840_port->MsrLsr, mos7840_port->port_num);
- data = urb->transfer_buffer;
- regval = (__u8) data[0];
- dev_dbg(dev, "%s data is %x\n", __func__, regval);
- if (mos7840_port->MsrLsr == 0)
- mos7840_handle_new_msr(mos7840_port, regval);
- else if (mos7840_port->MsrLsr == 1)
- mos7840_handle_new_lsr(mos7840_port, regval);
-out:
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
-}
-
-static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
- __u16 *val)
-{
- struct usb_device *dev = mcs->port->serial->dev;
- struct usb_ctrlrequest *dr = mcs->dr;
- unsigned char *buffer = mcs->ctrl_buf;
- int ret;
-
- if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
- return -EBUSY;
-
- dr->bRequestType = MCS_RD_RTYPE;
- dr->bRequest = MCS_RDREQ;
- dr->wValue = cpu_to_le16(Wval); /* 0 */
- dr->wIndex = cpu_to_le16(reg);
- dr->wLength = cpu_to_le16(2);
-
- usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0),
- (unsigned char *)dr, buffer, 2,
- mos7840_control_callback, mcs);
- mcs->control_urb->transfer_buffer_length = 2;
- ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
- if (ret)
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
-
- return ret;
-}
-
static void mos7840_set_led_callback(struct urb *urb)
{
switch (urb->status) {
@@ -580,146 +426,6 @@ static void mos7840_led_activity(struct usb_serial_port *port)
}
/*****************************************************************************
- * mos7840_interrupt_callback
- * this is the callback function for when we have received data on the
- * interrupt endpoint.
- *****************************************************************************/
-
-static void mos7840_interrupt_callback(struct urb *urb)
-{
- int result;
- int length;
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
- __u16 Data;
- unsigned char *data;
- __u8 sp[5];
- int i, rv = 0;
- __u16 wval, wreg = 0;
- int status = urb->status;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
- __func__, status);
- return;
- default:
- dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n",
- __func__, status);
- goto exit;
- }
-
- length = urb->actual_length;
- data = urb->transfer_buffer;
-
- serial = urb->context;
-
- /* Moschip get 5 bytes
- * Byte 1 IIR Port 1 (port.number is 0)
- * Byte 2 IIR Port 2 (port.number is 1)
- * Byte 3 IIR Port 3 (port.number is 2)
- * Byte 4 IIR Port 4 (port.number is 3)
- * Byte 5 FIFO status for both */
-
- if (length > 5) {
- dev_dbg(&urb->dev->dev, "%s", "Wrong data !!!\n");
- return;
- }
-
- sp[0] = (__u8) data[0];
- sp[1] = (__u8) data[1];
- sp[2] = (__u8) data[2];
- sp[3] = (__u8) data[3];
-
- for (i = 0; i < serial->num_ports; i++) {
- mos7840_port = mos7840_get_port_private(serial->port[i]);
- wval = ((__u16)serial->port[i]->port_number + 1) << 8;
- if (mos7840_port->open) {
- if (sp[i] & 0x01) {
- dev_dbg(&urb->dev->dev, "SP%d No Interrupt !!!\n", i);
- } else {
- switch (sp[i] & 0x0f) {
- case SERIAL_IIR_RLS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Receiver status error or \n", i);
- dev_dbg(&urb->dev->dev, "address bit detected in 9-bit mode\n");
- mos7840_port->MsrLsr = 1;
- wreg = LINE_STATUS_REGISTER;
- break;
- case SERIAL_IIR_MS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Modem status change\n", i);
- mos7840_port->MsrLsr = 0;
- wreg = MODEM_STATUS_REGISTER;
- break;
- }
- rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
- }
- }
- }
- if (!(rv < 0))
- /* the completion handler for the control urb will resubmit */
- return;
-exit:
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result) {
- dev_err(&urb->dev->dev,
- "%s - Error %d submitting interrupt urb\n",
- __func__, result);
- }
-}
-
-static int mos7840_port_paranoia_check(struct usb_serial_port *port,
- const char *function)
-{
- if (!port) {
- pr_debug("%s - port == NULL\n", function);
- return -1;
- }
- if (!port->serial) {
- pr_debug("%s - port->serial == NULL\n", function);
- return -1;
- }
-
- return 0;
-}
-
-/* Inline functions to check the sanity of a pointer that is passed to us */
-static int mos7840_serial_paranoia_check(struct usb_serial *serial,
- const char *function)
-{
- if (!serial) {
- pr_debug("%s - serial == NULL\n", function);
- return -1;
- }
- if (!serial->type) {
- pr_debug("%s - serial->type == NULL!\n", function);
- return -1;
- }
-
- return 0;
-}
-
-static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
- const char *function)
-{
- /* if no port was specified, or it fails a paranoia check */
- if (!port ||
- mos7840_port_paranoia_check(port, function) ||
- mos7840_serial_paranoia_check(port->serial, function)) {
- /* then say that we don't have a valid usb_serial thing,
- * which will end up genrating -ENODEV return values */
- return NULL;
- }
-
- return port->serial;
-}
-
-/*****************************************************************************
* mos7840_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
@@ -727,35 +433,18 @@ static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
static void mos7840_bulk_in_callback(struct urb *urb)
{
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int retval;
unsigned char *data;
- struct usb_serial *serial;
- struct usb_serial_port *port;
- struct moschip_port *mos7840_port;
int status = urb->status;
- mos7840_port = urb->context;
- if (!mos7840_port)
- return;
-
if (status) {
dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status);
mos7840_port->read_urb_busy = false;
return;
}
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__)) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
data = urb->transfer_buffer;
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
@@ -767,12 +456,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx);
}
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!\n");
- mos7840_port->read_urb_busy = false;
- return;
- }
-
if (mos7840_port->has_led)
mos7840_led_activity(port);
@@ -793,14 +476,12 @@ static void mos7840_bulk_in_callback(struct urb *urb)
static void mos7840_bulk_out_data_callback(struct urb *urb)
{
- struct moschip_port *mos7840_port;
- struct usb_serial_port *port;
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int status = urb->status;
unsigned long flags;
int i;
- mos7840_port = urb->context;
- port = mos7840_port->port;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; i++) {
if (urb == mos7840_port->write_urb_pool[i]) {
@@ -815,11 +496,7 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
return;
}
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port->open)
- tty_port_tty_wakeup(&port->port);
+ tty_port_tty_wakeup(&port->port);
}
@@ -836,32 +513,16 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int response;
int j;
- struct usb_serial *serial;
struct urb *urb;
__u16 Data;
int status;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -ENODEV;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -ENODEV;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
- port0->open_ports++;
/* Initialising the write urb pool */
for (j = 0; j < NUM_URBS; ++j) {
@@ -1012,41 +673,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
- /* Check to see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the structures *
- * were not set up at that time.) */
- if (port0->open_ports == 1) {
- /* FIXME: Buffer never NULL, so URB is not submitted. */
- if (serial->port[0]->interrupt_in_buffer == NULL) {
- /* set up interrupt urb */
- usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
- serial->dev,
- usb_rcvintpipe(serial->dev,
- serial->port[0]->interrupt_in_endpointAddress),
- serial->port[0]->interrupt_in_buffer,
- serial->port[0]->interrupt_in_urb->
- transfer_buffer_length,
- mos7840_interrupt_callback,
- serial,
- serial->port[0]->interrupt_in_urb->interval);
-
- /* start interrupt read for mos7840 */
- response =
- usb_submit_urb(serial->port[0]->interrupt_in_urb,
- GFP_KERNEL);
- if (response) {
- dev_err(&port->dev, "%s - Error %d submitting "
- "interrupt urb\n", __func__, response);
- }
-
- }
-
- }
-
- /* see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the *
- * structures were not set up at that time.) */
-
dev_dbg(&port->dev, "port number is %d\n", port->port_number);
dev_dbg(&port->dev, "minor number is %d\n", port->minor);
dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
@@ -1086,9 +712,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* initialize our port settings */
/* Must set to enable ints! */
mos7840_port->shadowMCR = MCR_MASTER_IE;
- /* send a open port command */
- mos7840_port->open = 1;
- /* mos7840_change_port_settings(mos7840_port,old_termios); */
return 0;
err:
@@ -1115,17 +738,10 @@ err:
static int mos7840_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int chars = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return 0;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return 0;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1147,25 +763,10 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
static void mos7840_close(struct usb_serial_port *port)
{
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int j;
__u16 Data;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return;
-
for (j = 0; j < NUM_URBS; ++j)
usb_kill_urb(mos7840_port->write_urb_pool[j]);
@@ -1180,22 +781,11 @@ static void mos7840_close(struct usb_serial_port *port)
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
- port0->open_ports--;
- dev_dbg(&port->dev, "%s in close%d\n", __func__, port0->open_ports);
- if (port0->open_ports == 0) {
- if (serial->port[0]->interrupt_in_urb) {
- dev_dbg(&port->dev, "Shutdown interrupt_in_urb\n");
- usb_kill_urb(serial->port[0]->interrupt_in_urb);
- }
- }
-
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
-
- mos7840_port->open = 0;
}
/*****************************************************************************
@@ -1205,21 +795,8 @@ static void mos7840_close(struct usb_serial_port *port)
static void mos7840_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned char data;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
if (break_state == -1)
data = mos7840_port->shadowLCR | LCR_SET_BREAK;
@@ -1244,17 +821,10 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
static int mos7840_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int room = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1280,29 +850,17 @@ static int mos7840_write_room(struct tty_struct *tty)
static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int status;
int i;
int bytes_sent = 0;
int transfer_size;
unsigned long flags;
-
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
struct urb *urb;
/* __u16 Data; */
const unsigned char *current_position = data;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- serial = port->serial;
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
-
/* try to find a free urb in the list */
urb = NULL;
@@ -1383,22 +941,9 @@ exit:
static void mos7840_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s", "port not opened\n");
- return;
- }
-
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
@@ -1425,19 +970,8 @@ static void mos7840_throttle(struct tty_struct *tty)
static void mos7840_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
@@ -1460,15 +994,10 @@ static void mos7840_unthrottle(struct tty_struct *tty)
static int mos7840_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
unsigned int result;
__u16 msr;
__u16 mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
if (status < 0)
@@ -1493,15 +1022,10 @@ static int mos7840_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned int mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
-
/* FIXME: What locks the port registers ? */
mcr = mos7840_port->shadowMCR;
if (clear & TIOCM_RTS)
@@ -1578,21 +1102,11 @@ static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port,
static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
int baudRate)
{
+ struct usb_serial_port *port = mos7840_port->port;
int divisor = 0;
int status;
__u16 Data;
__u16 clk_sel_val;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return -1;
-
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return -1;
dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate);
/* reset clk_uart_sel in spregOffset */
@@ -1681,6 +1195,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
static void mos7840_change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7840_port, struct ktermios *old_termios)
{
+ struct usb_serial_port *port = mos7840_port->port;
int baud;
unsigned cflag;
__u8 lData;
@@ -1688,23 +1203,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
__u8 lStop;
int status;
__u16 Data;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return;
-
- port = mos7840_port->port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
lData = LCR_BITS_8;
lStop = LCR_STOP_1;
@@ -1839,37 +1337,13 @@ static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* change the port settings to the new ones specified */
mos7840_change_port_settings(tty, mos7840_port, old_termios);
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!!!\n");
- return;
- }
-
if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1916,7 +1390,7 @@ static int mos7840_get_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
ss->type = PORT_16550A;
ss->line = mos7840_port->port->minor;
@@ -1939,15 +1413,6 @@ static int mos7840_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -1;
switch (cmd) {
/* return number of bytes available */
@@ -1962,6 +1427,13 @@ static int mos7840_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+/*
+ * Check if GPO (pin 42) is connected to GPI (pin 33) as recommended by ASIX
+ * for MCS7810 by bit-banging a 16-bit word.
+ *
+ * Note that GPO is really RTS of the third port so this will toggle RTS of
+ * port two or three on two- and four-port devices.
+ */
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
@@ -2019,16 +1491,12 @@ static int mos7810_check(struct usb_serial *serial)
static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ unsigned long device_flags = id->driver_info;
u8 *buf;
- int device_type;
- if (product == MOSCHIP_DEVICE_ID_7810 ||
- product == MOSCHIP_DEVICE_ID_7820 ||
- product == MOSCHIP_DEVICE_ID_7843) {
- device_type = product;
+ /* Skip device-type detection if we already have device flags. */
+ if (device_flags)
goto out;
- }
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
@@ -2040,15 +1508,15 @@ static int mos7840_probe(struct usb_serial *serial,
/* For a MCS7840 device GPIO0 must be set to 1 */
if (buf[0] & 0x01)
- device_type = MOSCHIP_DEVICE_ID_7840;
+ device_flags = MCS_PORTS(4);
else if (mos7810_check(serial))
- device_type = MOSCHIP_DEVICE_ID_7810;
+ device_flags = MCS_PORTS(1) | MCS_LED;
else
- device_type = MOSCHIP_DEVICE_ID_7820;
+ device_flags = MCS_PORTS(2);
kfree(buf);
out:
- usb_set_serial_data(serial, (void *)(unsigned long)device_type);
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2056,19 +1524,10 @@ out:
static int mos7840_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
- int device_type = (unsigned long)usb_get_serial_data(serial);
- int num_ports;
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
+ int num_ports = MCS_PORTS(device_flags);
- if (device_type == MOSCHIP_DEVICE_ID_7843)
- num_ports = 3;
- else
- num_ports = (device_type >> 4) & 0x000F;
-
- /*
- * num_ports is currently never zero as device_type is one of
- * MOSCHIP_DEVICE_ID_78{1,2,4}0.
- */
- if (num_ports == 0)
+ if (num_ports == 0 || num_ports > 4)
return -ENODEV;
if (epds->num_bulk_in < num_ports || epds->num_bulk_out < num_ports) {
@@ -2079,10 +1538,27 @@ static int mos7840_calc_num_ports(struct usb_serial *serial,
return num_ports;
}
+static int mos7840_attach(struct usb_serial *serial)
+{
+ struct device *dev = &serial->interface->dev;
+ int status;
+ u16 val;
+
+ /* Zero Length flag enable */
+ val = 0x0f;
+ status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, val);
+ if (status < 0)
+ dev_dbg(dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
+ else
+ dev_dbg(dev, "ZLP_REG5 Writing success status%d\n", status);
+
+ return status;
+}
+
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- int device_type = (unsigned long)usb_get_serial_data(serial);
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
@@ -2103,7 +1579,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
* common to all port */
mos7840_port->port = port;
- mos7840_set_port_private(port, mos7840_port);
spin_lock_init(&mos7840_port->pool_lock);
/* minor is not initialised until later by
@@ -2129,14 +1604,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->DcrRegOffset = 0x16 + 3 * (phy_num - 2);
}
mos7840_dump_serial_port(port, mos7840_port);
- mos7840_set_port_private(port, mos7840_port);
+ usb_set_serial_port_data(port, mos7840_port);
/* enable rx_disable bit in control register */
status = mos7840_get_reg_sync(port,
mos7840_port->ControlRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
Data |= 0x08; /* setting driver done bit */
@@ -2148,7 +1623,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
@@ -2159,7 +1634,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 0), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
@@ -2168,7 +1643,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 1), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
@@ -2177,7 +1652,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 2), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
@@ -2186,7 +1661,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
@@ -2203,7 +1678,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
@@ -2217,7 +1692,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
} else {
@@ -2229,27 +1704,16 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
}
- mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
- mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
- mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
- if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
- !mos7840_port->dr) {
- status = -ENOMEM;
- goto error;
- }
- mos7840_port->has_led = false;
+ mos7840_port->has_led = device_flags & MCS_LED;
/* Initialize LED timers */
- if (device_type == MOSCHIP_DEVICE_ID_7810) {
- mos7840_port->has_led = true;
-
+ if (mos7840_port->has_led) {
mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
GFP_KERNEL);
@@ -2269,29 +1733,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
-out:
- if (pnum == serial->num_ports - 1) {
- /* Zero Length flag enable */
- Data = 0x0f;
- status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
- if (status < 0) {
- dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
- goto error;
- } else
- dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
- MOS_WDR_TIMEOUT);
- }
return 0;
error:
kfree(mos7840_port->led_dr);
usb_free_urb(mos7840_port->led_urb);
- kfree(mos7840_port->dr);
- kfree(mos7840_port->ctrl_buf);
- usb_free_urb(mos7840_port->control_urb);
kfree(mos7840_port);
return status;
@@ -2299,9 +1745,7 @@ error:
static int mos7840_port_remove(struct usb_serial_port *port)
{
- struct moschip_port *mos7840_port;
-
- mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
if (mos7840_port->has_led) {
/* Turn off LED */
@@ -2314,10 +1758,7 @@ static int mos7840_port_remove(struct usb_serial_port *port)
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->led_dr);
}
- usb_kill_urb(mos7840_port->control_urb);
- usb_free_urb(mos7840_port->control_urb);
- kfree(mos7840_port->ctrl_buf);
- kfree(mos7840_port->dr);
+
kfree(mos7840_port);
return 0;
@@ -2340,18 +1781,17 @@ static struct usb_serial_driver moschip7840_4port_device = {
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
.probe = mos7840_probe,
+ .attach = mos7840_attach,
.ioctl = mos7840_ioctl,
.get_serial = mos7840_get_serial_info,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
- .read_int_callback = mos7840_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 06ab016be0b6..e9491d400a24 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define DELL_PRODUCT_5821E 0x81d7
+#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9d27b76c5c6e..aab737e1e7b6 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -47,6 +47,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GC) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GT) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GL) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GE) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GS) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
@@ -130,9 +136,11 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define VENDOR_WRITE_REQUEST_TYPE 0x40
#define VENDOR_WRITE_REQUEST 0x01
+#define VENDOR_WRITE_NREQUEST 0x80
#define VENDOR_READ_REQUEST_TYPE 0xc0
#define VENDOR_READ_REQUEST 0x01
+#define VENDOR_READ_NREQUEST 0x81
#define UART_STATE_INDEX 8
#define UART_STATE_MSR_MASK 0x8b
@@ -148,11 +156,24 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define PL2303_FLOWCTRL_MASK 0xf0
+#define PL2303_READ_TYPE_HX_STATUS 0x8080
+
+#define PL2303_HXN_RESET_REG 0x07
+#define PL2303_HXN_RESET_UPSTREAM_PIPE 0x02
+#define PL2303_HXN_RESET_DOWNSTREAM_PIPE 0x01
+
+#define PL2303_HXN_FLOWCTRL_REG 0x0a
+#define PL2303_HXN_FLOWCTRL_MASK 0x1c
+#define PL2303_HXN_FLOWCTRL_NONE 0x1c
+#define PL2303_HXN_FLOWCTRL_RTS_CTS 0x18
+#define PL2303_HXN_FLOWCTRL_XON_XOFF 0x0c
+
static void pl2303_set_break(struct usb_serial_port *port, bool enable);
enum pl2303_type {
TYPE_01, /* Type 0 and 1 (difference unknown) */
TYPE_HX, /* HX version of the pl2303 chip */
+ TYPE_HXN, /* HXN version of the pl2303 chip */
TYPE_COUNT
};
@@ -184,16 +205,26 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
[TYPE_HX] = {
.max_baud_rate = 12000000,
},
+ [TYPE_HXN] = {
+ .max_baud_rate = 12000000,
+ },
};
static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
unsigned char buf[1])
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_READ_NREQUEST;
+ else
+ request = VENDOR_READ_REQUEST;
+
res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ request, VENDOR_READ_REQUEST_TYPE,
value, 0, buf, 1, 100);
if (res != 1) {
dev_err(dev, "%s - failed to read [%04x]: %d\n", __func__,
@@ -211,13 +242,20 @@ static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, index);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_WRITE_NREQUEST;
+ else
+ request = VENDOR_WRITE_REQUEST;
+
res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- VENDOR_WRITE_REQUEST, VENDOR_WRITE_REQUEST_TYPE,
+ request, VENDOR_WRITE_REQUEST_TYPE,
value, index, NULL, 0, 100);
if (res) {
dev_err(dev, "%s - failed to write [%04x]: %d\n", __func__,
@@ -230,6 +268,7 @@ static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int ret = 0;
u8 *buf;
@@ -237,7 +276,11 @@ static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
if (!buf)
return -ENOMEM;
- ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ ret = pl2303_vendor_read(serial, reg, buf);
+ else
+ ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+
if (ret)
goto out_free;
@@ -320,6 +363,7 @@ static int pl2303_startup(struct usb_serial *serial)
struct pl2303_serial_private *spriv;
enum pl2303_type type = TYPE_01;
unsigned char *buf;
+ int res;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
@@ -341,26 +385,37 @@ static int pl2303_startup(struct usb_serial *serial)
type = TYPE_01; /* type 1 */
dev_dbg(&serial->interface->dev, "device type: %d\n", type);
+ if (type == TYPE_HX) {
+ res = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ PL2303_READ_TYPE_HX_STATUS, 0, buf, 1, 100);
+ if (res != 1)
+ type = TYPE_HXN;
+ }
+
spriv->type = &pl2303_type_data[type];
spriv->quirks = (unsigned long)usb_get_serial_data(serial);
spriv->quirks |= spriv->type->quirks;
usb_set_serial_data(serial, spriv);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 0);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 1);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_write(serial, 0, 1);
- pl2303_vendor_write(serial, 1, 0);
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
- pl2303_vendor_write(serial, 2, 0x24);
- else
- pl2303_vendor_write(serial, 2, 0x44);
+ if (type != TYPE_HXN) {
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 0);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 1);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_write(serial, 0, 1);
+ pl2303_vendor_write(serial, 1, 0);
+ if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ pl2303_vendor_write(serial, 2, 0x24);
+ else
+ pl2303_vendor_write(serial, 2, 0x44);
+ }
kfree(buf);
@@ -719,14 +774,31 @@ static void pl2303_set_termios(struct tty_struct *tty,
}
if (C_CRTSCTS(tty)) {
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ if (spriv->quirks & PL2303_QUIRK_LEGACY) {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x40);
- else
+ } else if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_RTS_CTS);
+ } else {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x60);
+ }
} else if (pl2303_enable_xonxoff(tty, spriv->type)) {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_XON_XOFF);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ }
} else {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_NONE);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ }
}
kfree(buf);
@@ -767,8 +839,14 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
/* reset upstream data pipes */
- pl2303_vendor_write(serial, 8, 0);
- pl2303_vendor_write(serial, 9, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_vendor_write(serial, PL2303_HXN_RESET_REG,
+ PL2303_HXN_RESET_UPSTREAM_PIPE |
+ PL2303_HXN_RESET_DOWNSTREAM_PIPE);
+ } else {
+ pl2303_vendor_write(serial, 8, 0);
+ pl2303_vendor_write(serial, 9, 0);
+ }
}
/* Setup termios */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index b0175f17d1a2..a019ea7e6e0e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -9,6 +9,12 @@
#define PL2303_VENDOR_ID 0x067b
#define PL2303_PRODUCT_ID 0x2303
#define PL2303_PRODUCT_ID_TB 0x2304
+#define PL2303_PRODUCT_ID_GC 0x23a3
+#define PL2303_PRODUCT_ID_GB 0x23b3
+#define PL2303_PRODUCT_ID_GT 0x23c3
+#define PL2303_PRODUCT_ID_GL 0x23d3
+#define PL2303_PRODUCT_ID_GE 0x23e3
+#define PL2303_PRODUCT_ID_GS 0x23f3
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 54a3c8195c96..66a4dcbbb1fc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -369,8 +369,8 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
/* check for state-transition errors */
if (us->srb != NULL) {
- printk(KERN_ERR "usb-storage: Error in %s: us->srb = %p\n",
- __func__, us->srb);
+ dev_err(&us->pusb_intf->dev,
+ "Error in %s: us->srb = %p\n", __func__, us->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 34538253f12c..475b9c692827 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -825,6 +825,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->wce_default_on = 1;
}
+ /* Some disks cannot handle READ_CAPACITY_16 */
+ if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
+ sdev->no_read_capacity_16 = 1;
+
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
@@ -834,6 +838,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->fix_capacity = 1;
/*
+ * in some cases we have to guess
+ */
+ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ sdev->guess_capacity = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d0bdebd87ce3..1b23741036ee 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -87,12 +87,15 @@ UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
-/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+/*
+ * Initially Reported-by: Takeo Nakayama <javhera@gmx.com>
+ * UAS Ignore Reported by Steven Ellis <sellis@redhat.com>
+ */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
"JMS566",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_REPORT_OPCODES),
+ US_FL_NO_REPORT_OPCODES | US_FL_IGNORE_UAS),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 895e2418de53..b4f2aac7ae8a 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -50,6 +50,17 @@ source "drivers/usb/typec/tcpm/Kconfig"
source "drivers/usb/typec/ucsi/Kconfig"
+config TYPEC_HD3SS3220
+ tristate "TI HD3SS3220 Type-C DRP Port controller driver"
+ depends on I2C
+ depends on USB_ROLE_SWITCH
+ help
+ Say Y or M here if your system has TI HD3SS3220 Type-C DRP Port
+ controller driver.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called hd3ss3220.ko.
+
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 6696b7263d61..7753a5c3cd46 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -4,5 +4,6 @@ typec-y := class.o mux.o bus.o
obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
+obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 94a3eda62add..7ece6ca6e690 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -53,6 +53,7 @@ struct typec_port {
struct typec_mux *mux;
const struct typec_capability *cap;
+ const struct typec_operations *ops;
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
@@ -955,7 +956,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EOPNOTSUPP;
}
- if (!port->cap->try_role) {
+ if (!port->ops || !port->ops->try_role) {
dev_dbg(dev, "Setting preferred role not supported\n");
return -EOPNOTSUPP;
}
@@ -968,7 +969,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ret = port->cap->try_role(port->cap, role);
+ ret = port->ops->try_role(port, role);
if (ret)
return ret;
@@ -999,7 +1000,7 @@ static ssize_t data_role_store(struct device *dev,
struct typec_port *port = to_typec_port(dev);
int ret;
- if (!port->cap->dr_set) {
+ if (!port->ops || !port->ops->dr_set) {
dev_dbg(dev, "data role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1014,7 +1015,7 @@ static ssize_t data_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->dr_set(port->cap, ret);
+ ret = port->ops->dr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1049,7 +1050,7 @@ static ssize_t power_role_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->pr_set) {
+ if (!port->ops || !port->ops->pr_set) {
dev_dbg(dev, "power role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1071,7 +1072,7 @@ static ssize_t power_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->pr_set(port->cap, ret);
+ ret = port->ops->pr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1102,7 +1103,8 @@ port_type_store(struct device *dev, struct device_attribute *attr,
int ret;
enum typec_port_type type;
- if (!port->cap->port_type_set || port->cap->type != TYPEC_PORT_DRP) {
+ if (port->cap->type != TYPEC_PORT_DRP ||
+ !port->ops || !port->ops->port_type_set) {
dev_dbg(dev, "changing port type not supported\n");
return -EOPNOTSUPP;
}
@@ -1119,7 +1121,7 @@ port_type_store(struct device *dev, struct device_attribute *attr,
goto unlock_and_ret;
}
- ret = port->cap->port_type_set(port->cap, type);
+ ret = port->ops->port_type_set(port, type);
if (ret)
goto unlock_and_ret;
@@ -1175,7 +1177,7 @@ static ssize_t vconn_source_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->vconn_set) {
+ if (!port->ops || !port->ops->vconn_set) {
dev_dbg(dev, "VCONN swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1184,7 +1186,7 @@ static ssize_t vconn_source_store(struct device *dev,
if (ret)
return ret;
- ret = port->cap->vconn_set(port->cap, (enum typec_role)source);
+ ret = port->ops->vconn_set(port, (enum typec_role)source);
if (ret)
return ret;
@@ -1278,6 +1280,7 @@ static void typec_release(struct device *dev)
ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
+ kfree(port->cap);
kfree(port);
}
@@ -1487,6 +1490,16 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
/* --------------------------------------- */
/**
+ * typec_get_drvdata - Return private driver data pointer
+ * @port: USB Type-C port
+ */
+void *typec_get_drvdata(struct typec_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+EXPORT_SYMBOL_GPL(typec_get_drvdata);
+
+/**
* typec_port_register_altmode - Register USB Type-C Port Alternate Mode
* @port: USB Type-C Port that supports the alternate mode
* @desc: Description of the alternate mode
@@ -1579,7 +1592,7 @@ struct typec_port *typec_register_port(struct device *parent,
mutex_init(&port->port_type_lock);
port->id = id;
- port->cap = cap;
+ port->ops = cap->ops;
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
@@ -1589,6 +1602,13 @@ struct typec_port *typec_register_port(struct device *parent,
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
+ dev_set_drvdata(&port->dev, cap->driver_data);
+
+ port->cap = kmemdup(cap, sizeof(*cap), GFP_KERNEL);
+ if (!port->cap) {
+ put_device(&port->dev);
+ return ERR_PTR(-ENOMEM);
+ }
port->sw = typec_switch_get(&port->dev);
if (IS_ERR(port->sw)) {
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
new file mode 100644
index 000000000000..323dfa8160ab
--- /dev/null
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TI HD3SS3220 Type-C DRP Port Controller Driver
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/usb/role.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+#include <linux/delay.h>
+
+#define HD3SS3220_REG_CN_STAT_CTRL 0x09
+#define HD3SS3220_REG_GEN_CTRL 0x0A
+#define HD3SS3220_REG_DEV_REV 0xA0
+
+/* Register HD3SS3220_REG_CN_STAT_CTRL*/
+#define HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_DFP BIT(6)
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_UFP BIT(7)
+#define HD3SS3220_REG_CN_STAT_CTRL_TO_ACCESSORY (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS BIT(4)
+
+/* Register HD3SS3220_REG_GEN_CTRL*/
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK (BIT(2) | BIT(1))
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT 0x00
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK BIT(1)
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC (BIT(2) | BIT(1))
+
+struct hd3ss3220 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct usb_role_switch *role_sw;
+ struct typec_port *port;
+};
+
+static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
+{
+ return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK,
+ src_pref);
+}
+
+static enum usb_role hd3ss3220_get_attached_state(struct hd3ss3220 *hd3ss3220)
+{
+ unsigned int reg_val;
+ enum usb_role attached_state;
+ int ret;
+
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL,
+ &reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (reg_val & HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK) {
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_DFP:
+ attached_state = USB_ROLE_HOST;
+ break;
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_UFP:
+ attached_state = USB_ROLE_DEVICE;
+ break;
+ default:
+ attached_state = USB_ROLE_NONE;
+ break;
+ }
+
+ return attached_state;
+}
+
+static int hd3ss3220_dr_set(struct typec_port *port, enum typec_data_role role)
+{
+ struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
+ enum usb_role role_val;
+ int pref, ret = 0;
+
+ if (role == TYPEC_HOST) {
+ role_val = USB_ROLE_HOST;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
+ } else {
+ role_val = USB_ROLE_DEVICE;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
+ }
+
+ ret = hd3ss3220_set_source_pref(hd3ss3220, pref);
+ usleep_range(10, 100);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_val);
+ typec_set_data_role(hd3ss3220->port, role);
+
+ return ret;
+}
+
+static const struct typec_operations hd3ss3220_ops = {
+ .dr_set = hd3ss3220_dr_set
+};
+
+static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
+{
+ enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_state);
+ if (role_state == USB_ROLE_NONE)
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+
+ switch (role_state) {
+ case USB_ROLE_HOST:
+ typec_set_data_role(hd3ss3220->port, TYPEC_HOST);
+ break;
+ case USB_ROLE_DEVICE:
+ typec_set_data_role(hd3ss3220->port, TYPEC_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
+{
+ int err;
+
+ hd3ss3220_set_role(hd3ss3220);
+ err = regmap_update_bits_base(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ NULL, false, true);
+ if (err < 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hd3ss3220_irq_handler(int irq, void *data)
+{
+ struct i2c_client *client = to_i2c_client(data);
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ return hd3ss3220_irq(hd3ss3220);
+}
+
+static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0A,
+};
+
+static int hd3ss3220_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct typec_capability typec_cap = { };
+ struct hd3ss3220 *hd3ss3220;
+ struct fwnode_handle *connector;
+ int ret;
+ unsigned int data;
+
+ hd3ss3220 = devm_kzalloc(&client->dev, sizeof(struct hd3ss3220),
+ GFP_KERNEL);
+ if (!hd3ss3220)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, hd3ss3220);
+
+ hd3ss3220->dev = &client->dev;
+ hd3ss3220->regmap = devm_regmap_init_i2c(client, &config);
+ if (IS_ERR(hd3ss3220->regmap))
+ return PTR_ERR(hd3ss3220->regmap);
+
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+ connector = device_get_named_child_node(hd3ss3220->dev, "connector");
+ if (!connector)
+ return -ENODEV;
+
+ hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector);
+ if (IS_ERR(hd3ss3220->role_sw)) {
+ ret = PTR_ERR(hd3ss3220->role_sw);
+ goto err_put_fwnode;
+ }
+
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = hd3ss3220;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.ops = &hd3ss3220_ops;
+ typec_cap.fwnode = connector;
+
+ hd3ss3220->port = typec_register_port(&client->dev, &typec_cap);
+ if (IS_ERR(hd3ss3220->port)) {
+ ret = PTR_ERR(hd3ss3220->port);
+ goto err_put_role;
+ }
+
+ hd3ss3220_set_role(hd3ss3220);
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL, &data);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ if (data & HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS) {
+ ret = regmap_write(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ data | HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS);
+ if (ret < 0)
+ goto err_unreg_port;
+ }
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ hd3ss3220_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hd3ss3220", &client->dev);
+ if (ret)
+ goto err_unreg_port;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, HD3SS3220_REG_DEV_REV);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ fwnode_handle_put(connector);
+
+ dev_info(&client->dev, "probed revision=0x%x\n", ret);
+
+ return 0;
+err_unreg_port:
+ typec_unregister_port(hd3ss3220->port);
+err_put_role:
+ usb_role_switch_put(hd3ss3220->role_sw);
+err_put_fwnode:
+ fwnode_handle_put(connector);
+
+ return ret;
+}
+
+static int hd3ss3220_remove(struct i2c_client *client)
+{
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ typec_unregister_port(hd3ss3220->port);
+ usb_role_switch_put(hd3ss3220->role_sw);
+
+ return 0;
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "ti,hd3ss3220"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver hd3ss3220_driver = {
+ .driver = {
+ .name = "hd3ss3220",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = hd3ss3220_probe,
+ .remove = hd3ss3220_remove,
+};
+
+module_i2c_driver(hd3ss3220_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das@bp.renesas.com>");
+MODULE_DESCRIPTION("TI HD3SS3220 DRP Port Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 5f61d9977a15..56fc356bc55c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -380,9 +380,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SNK_UNATTACHED;
else if (port->try_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
- else if (port->tcpc->config &&
- port->tcpc->config->default_role == TYPEC_SINK)
- return SNK_UNATTACHED;
/* Fall through to return SRC_UNATTACHED */
} else if (port->port_type == TYPEC_PORT_SNK) {
return SNK_UNATTACHED;
@@ -390,12 +387,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SRC_UNATTACHED;
}
-static inline
-struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
-{
- return container_of(cap, struct tcpm_port, typec_caps);
-}
-
static bool tcpm_port_is_disconnected(struct tcpm_port *port)
{
return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
@@ -3970,10 +3961,9 @@ void tcpm_pd_hard_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
-static int tcpm_dr_set(const struct typec_capability *cap,
- enum typec_data_role data)
+static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4038,10 +4028,9 @@ swap_unlock:
return ret;
}
-static int tcpm_pr_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4082,10 +4071,9 @@ swap_unlock:
return ret;
}
-static int tcpm_vconn_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4122,16 +4110,16 @@ swap_unlock:
return ret;
}
-static int tcpm_try_role(const struct typec_capability *cap, int role)
+static int tcpm_try_role(struct typec_port *p, int role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
struct tcpc_dev *tcpc = port->tcpc;
int ret = 0;
mutex_lock(&port->lock);
if (tcpc->try_role)
ret = tcpc->try_role(tcpc, role);
- if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
+ if (!ret)
port->try_role = role;
port->try_src_count = 0;
port->try_snk_count = 0;
@@ -4331,10 +4319,9 @@ static void tcpm_init(struct tcpm_port *port)
tcpm_set_state(port, PORT_RESET, 0);
}
-static int tcpm_port_type_set(const struct typec_capability *cap,
- enum typec_port_type type)
+static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
mutex_lock(&port->lock);
if (type == port->port_type)
@@ -4359,6 +4346,14 @@ port_unlock:
return 0;
}
+static const struct typec_operations tcpm_ops = {
+ .try_role = tcpm_try_role,
+ .dr_set = tcpm_dr_set,
+ .pr_set = tcpm_pr_set,
+ .vconn_set = tcpm_vconn_set,
+ .port_type_set = tcpm_port_type_set
+};
+
void tcpm_tcpc_reset(struct tcpm_port *port)
{
mutex_lock(&port->lock);
@@ -4368,34 +4363,6 @@ void tcpm_tcpc_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
-static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
- unsigned int nr_pdo)
-{
- unsigned int i;
-
- if (nr_pdo > PDO_MAX_OBJECTS)
- nr_pdo = PDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_pdo; i++)
- dest_pdo[i] = src_pdo[i];
-
- return nr_pdo;
-}
-
-static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
- unsigned int nr_vdo)
-{
- unsigned int i;
-
- if (nr_vdo > VDO_MAX_OBJECTS)
- nr_vdo = VDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_vdo; i++)
- dest_vdo[i] = src_vdo[i];
-
- return nr_vdo;
-}
-
static int tcpm_fw_get_caps(struct tcpm_port *port,
struct fwnode_handle *fwnode)
{
@@ -4698,35 +4665,10 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
return PTR_ERR_OR_ZERO(port->psy);
}
-static int tcpm_copy_caps(struct tcpm_port *port,
- const struct tcpc_config *tcfg)
-{
- if (tcpm_validate_caps(port, tcfg->src_pdo, tcfg->nr_src_pdo) ||
- tcpm_validate_caps(port, tcfg->snk_pdo, tcfg->nr_snk_pdo))
- return -EINVAL;
-
- port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcfg->src_pdo,
- tcfg->nr_src_pdo);
- port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcfg->snk_pdo,
- tcfg->nr_snk_pdo);
-
- port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcfg->snk_vdo,
- tcfg->nr_snk_vdo);
-
- port->operating_snk_mw = tcfg->operating_snk_mw;
-
- port->typec_caps.prefer_role = tcfg->default_role;
- port->typec_caps.type = tcfg->type;
- port->typec_caps.data = tcfg->data;
- port->self_powered = tcfg->self_powered;
-
- return 0;
-}
-
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
- int i, err;
+ int err;
if (!dev || !tcpc ||
!tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
@@ -4759,24 +4701,16 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
tcpm_debugfs_init(port);
err = tcpm_fw_get_caps(port, tcpc->fwnode);
- if ((err < 0) && tcpc->config)
- err = tcpm_copy_caps(port, tcpc->config);
if (err < 0)
goto out_destroy_wq;
- if (!tcpc->config || !tcpc->config->try_role_hw)
- port->try_role = port->typec_caps.prefer_role;
- else
- port->try_role = TYPEC_NO_PREFERRED_ROLE;
+ port->try_role = port->typec_caps.prefer_role;
port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
- port->typec_caps.dr_set = tcpm_dr_set;
- port->typec_caps.pr_set = tcpm_pr_set;
- port->typec_caps.vconn_set = tcpm_vconn_set;
- port->typec_caps.try_role = tcpm_try_role;
- port->typec_caps.port_type_set = tcpm_port_type_set;
+ port->typec_caps.driver_data = port;
+ port->typec_caps.ops = &tcpm_ops;
port->partner_desc.identity = &port->partner_ident;
port->port_type = port->typec_caps.type;
@@ -4797,29 +4731,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_role_sw_put;
}
- if (tcpc->config && tcpc->config->alt_modes) {
- const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
-
- i = 0;
- while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
- struct typec_altmode *alt;
-
- alt = typec_port_register_altmode(port->typec_port,
- paltmode);
- if (IS_ERR(alt)) {
- tcpm_log(port,
- "%s: failed to register port alternate mode 0x%x",
- dev_name(dev), paltmode->svid);
- break;
- }
- typec_altmode_set_drvdata(alt, port);
- alt->ops = &tcpm_altmode_ops;
- port->port_altmode[i] = alt;
- i++;
- paltmode++;
- }
- }
-
mutex_lock(&port->lock);
tcpm_init(port);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index a38d1409f15b..0698addd1185 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -94,7 +94,6 @@ struct tps6598x {
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
- struct typec_capability typec_cap;
};
/*
@@ -307,11 +306,10 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
return 0;
}
-static int
-tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role)
+static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -338,11 +336,10 @@ out_unlock:
return ret;
}
-static int
-tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role)
+static int tps6598x_pr_set(struct typec_port *port, enum typec_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -369,6 +366,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations tps6598x_ops = {
+ .dr_set = tps6598x_dr_set,
+ .pr_set = tps6598x_pr_set,
+};
+
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
@@ -448,6 +450,7 @@ static const struct regmap_config tps6598x_regmap_config = {
static int tps6598x_probe(struct i2c_client *client)
{
+ struct typec_capability typec_cap = { };
struct tps6598x *tps;
u32 status;
u32 conf;
@@ -492,40 +495,40 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- tps->typec_cap.revision = USB_TYPEC_REV_1_2;
- tps->typec_cap.pd_revision = 0x200;
- tps->typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
- tps->typec_cap.pr_set = tps6598x_pr_set;
- tps->typec_cap.dr_set = tps6598x_dr_set;
+ typec_cap.revision = USB_TYPEC_REV_1_2;
+ typec_cap.pd_revision = 0x200;
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = tps;
+ typec_cap.ops = &tps6598x_ops;
switch (TPS_SYSCONF_PORTINFO(conf)) {
case TPS_PORTINFO_SINK_ACCESSORY:
case TPS_PORTINFO_SINK:
- tps->typec_cap.type = TYPEC_PORT_SNK;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_SNK;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_UFP_DRD:
case TPS_PORTINFO_DRP_DFP_DRD:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
break;
case TPS_PORTINFO_DRP_UFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_DFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
case TPS_PORTINFO_SOURCE:
- tps->typec_cap.type = TYPEC_PORT_SRC;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_SRC;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
default:
return -ENODEV;
}
- tps->port = typec_register_port(&client->dev, &tps->typec_cap);
+ tps->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(tps->port))
return PTR_ERR(tps->port);
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d99700cb4dca..d4d5189edfb8 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -48,7 +48,8 @@ struct ucsi_dp {
static int ucsi_displayport_enter(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ struct ucsi *ucsi = dp->con->ucsi;
+ u64 command;
u8 cur = 0;
int ret;
@@ -59,25 +60,21 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
- mutex_unlock(&dp->con->lock);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto err_unlock;
}
- UCSI_CMD_GET_CURRENT_CAM(ctrl, dp->con->num);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num);
+ ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (dp->con->ucsi->ppm->data->version > 0x0100) {
- mutex_unlock(&dp->con->lock);
- return ret;
- }
+ if (ucsi->version > 0x0100)
+ goto err_unlock;
cur = 0xff;
}
if (cur != 0xff) {
- mutex_unlock(&dp->con->lock);
- if (dp->con->port_altmode[cur] == alt)
- return 0;
- return -EBUSY;
+ ret = dp->con->port_altmode[cur] == alt ? 0 : -EBUSY;
+ goto err_unlock;
}
/*
@@ -94,16 +91,17 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dp->vdo_size = 1;
schedule_work(&dp->work);
-
+ ret = 0;
+err_unlock:
mutex_unlock(&dp->con->lock);
- return 0;
+ return ret;
}
static int ucsi_displayport_exit(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ u64 command;
int ret = 0;
mutex_lock(&dp->con->lock);
@@ -117,8 +115,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
goto out_unlock;
}
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
+ ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0);
if (ret < 0)
goto out_unlock;
@@ -172,14 +170,14 @@ static int ucsi_displayport_status_update(struct ucsi_dp *dp)
static int ucsi_displayport_configure(struct ucsi_dp *dp)
{
u32 pins = DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
- struct ucsi_control ctrl;
+ u64 command;
if (!dp->override)
return 0;
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
- return ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(dp->con->ucsi, command, NULL, 0);
}
static int ucsi_displayport_vdm(struct typec_altmode *alt,
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 1dabafb74320..48ad1dc1b1b2 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -33,17 +33,6 @@ const char *ucsi_cmd_str(u64 raw_cmd)
return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
}
-static const char * const ucsi_ack_strs[] = {
- [0] = "",
- [UCSI_ACK_EVENT] = "event",
- [UCSI_ACK_CMD] = "command",
-};
-
-const char *ucsi_ack_str(u8 ack)
-{
- return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack];
-}
-
const char *ucsi_cci_str(u32 cci)
{
if (cci & GENMASK(7, 0)) {
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
index 783ec9c72055..a0d3a934d3d9 100644
--- a/drivers/usb/typec/ucsi/trace.h
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -10,54 +10,18 @@
#include <linux/usb/typec_altmode.h>
const char *ucsi_cmd_str(u64 raw_cmd);
-const char *ucsi_ack_str(u8 ack);
const char *ucsi_cci_str(u32 cci);
const char *ucsi_recipient_str(u8 recipient);
-DECLARE_EVENT_CLASS(ucsi_log_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack),
- TP_STRUCT__entry(
- __field(u8, ack)
- ),
- TP_fast_assign(
- __entry->ack = ack;
- ),
- TP_printk("ACK %s", ucsi_ack_str(__entry->ack))
-);
-
-DEFINE_EVENT(ucsi_log_ack, ucsi_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_control,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl),
- TP_STRUCT__entry(
- __field(u64, ctrl)
- ),
- TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
- ),
- TP_printk("control=%08llx (%s)", __entry->ctrl,
- ucsi_cmd_str(__entry->ctrl))
-);
-
-DEFINE_EVENT(ucsi_log_control, ucsi_command,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl)
-);
-
DECLARE_EVENT_CLASS(ucsi_log_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret),
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret),
TP_STRUCT__entry(
__field(u64, ctrl)
__field(int, ret)
),
TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
+ __entry->ctrl = command;
__entry->ret = ret;
),
TP_printk("%s -> %s (err=%d)", ucsi_cmd_str(__entry->ctrl),
@@ -66,30 +30,13 @@ DECLARE_EVENT_CLASS(ucsi_log_command,
);
DEFINE_EVENT(ucsi_log_command, ucsi_run_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DEFINE_EVENT(ucsi_log_command, ucsi_reset_ppm,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_cci,
- TP_PROTO(u32 cci),
- TP_ARGS(cci),
- TP_STRUCT__entry(
- __field(u32, cci)
- ),
- TP_fast_assign(
- __entry->cci = cci;
- ),
- TP_printk("CCI=%08x %s", __entry->cci, ucsi_cci_str(__entry->cci))
-);
-
-DEFINE_EVENT(ucsi_log_cci, ucsi_notify,
- TP_PROTO(u32 cci),
- TP_ARGS(cci)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DECLARE_EVENT_CLASS(ucsi_log_connector_status,
@@ -109,13 +56,13 @@ DECLARE_EVENT_CLASS(ucsi_log_connector_status,
TP_fast_assign(
__entry->port = port - 1;
__entry->change = status->change;
- __entry->opmode = status->pwr_op_mode;
- __entry->connected = status->connected;
- __entry->pwr_dir = status->pwr_dir;
- __entry->partner_flags = status->partner_flags;
- __entry->partner_type = status->partner_type;
+ __entry->opmode = UCSI_CONSTAT_PWR_OPMODE(status->flags);
+ __entry->connected = !!(status->flags & UCSI_CONSTAT_CONNECTED);
+ __entry->pwr_dir = !!(status->flags & UCSI_CONSTAT_PWR_DIR);
+ __entry->partner_flags = UCSI_CONSTAT_PARTNER_FLAGS(status->flags);
+ __entry->partner_type = UCSI_CONSTAT_PARTNER_TYPE(status->flags);
__entry->request_data_obj = status->request_data_obj;
- __entry->bc_status = status->bc_status;
+ __entry->bc_status = UCSI_CONSTAT_BC_STATUS(status->pwr_status);
),
TP_printk("port%d status: change=%04x, opmode=%x, connected=%d, "
"sourcing=%d, partner_flags=%x, partner_type=%x, "
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index ba288b964dc8..4459bc68aa33 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -17,9 +17,6 @@
#include "ucsi.h"
#include "trace.h"
-#define to_ucsi_connector(_cap_) container_of(_cap_, struct ucsi_connector, \
- typec_cap)
-
/*
* UCSI_TIMEOUT_MS - PPM communication timeout
*
@@ -39,169 +36,148 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
-static inline int ucsi_sync(struct ucsi *ucsi)
+static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
- if (ucsi->ppm && ucsi->ppm->sync)
- return ucsi->ppm->sync(ucsi->ppm);
- return 0;
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_COMMAND_COMPLETE;
+
+ return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+}
+
+static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
+{
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
+
+ return ucsi->ops->async_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
}
-static int ucsi_command(struct ucsi *ucsi, struct ucsi_control *ctrl)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 command);
+
+static int ucsi_read_error(struct ucsi *ucsi)
{
+ u16 error;
int ret;
- trace_ucsi_command(ctrl);
+ /* Acknowlege the command that failed */
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- set_bit(COMMAND_PENDING, &ucsi->flags);
+ ret = ucsi_exec_command(ucsi, UCSI_GET_ERROR_STATUS);
+ if (ret < 0)
+ return ret;
- ret = ucsi->ppm->cmd(ucsi->ppm, ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
if (ret)
- goto err_clear_flag;
+ return ret;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS))) {
- dev_warn(ucsi->dev, "PPM NOT RESPONDING\n");
- ret = -ETIMEDOUT;
+ switch (error) {
+ case UCSI_ERROR_INCOMPATIBLE_PARTNER:
+ return -EOPNOTSUPP;
+ case UCSI_ERROR_CC_COMMUNICATION_ERR:
+ return -ECOMM;
+ case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
+ return -EPROTO;
+ case UCSI_ERROR_DEAD_BATTERY:
+ dev_warn(ucsi->dev, "Dead battery condition!\n");
+ return -EPERM;
+ case UCSI_ERROR_INVALID_CON_NUM:
+ case UCSI_ERROR_UNREGONIZED_CMD:
+ case UCSI_ERROR_INVALID_CMD_ARGUMENT:
+ dev_err(ucsi->dev, "possible UCSI driver bug %u\n", error);
+ return -EINVAL;
+ case UCSI_ERROR_OVERCURRENT:
+ dev_warn(ucsi->dev, "Overcurrent condition\n");
+ break;
+ case UCSI_ERROR_PARTNER_REJECTED_SWAP:
+ dev_warn(ucsi->dev, "Partner rejected swap\n");
+ break;
+ case UCSI_ERROR_HARD_RESET:
+ dev_warn(ucsi->dev, "Hard reset occurred\n");
+ break;
+ case UCSI_ERROR_PPM_POLICY_CONFLICT:
+ dev_warn(ucsi->dev, "PPM Policy conflict\n");
+ break;
+ case UCSI_ERROR_SWAP_REJECTED:
+ dev_warn(ucsi->dev, "Swap rejected\n");
+ break;
+ case UCSI_ERROR_UNDEFINED:
+ default:
+ dev_err(ucsi->dev, "unknown error %u\n", error);
+ break;
}
-err_clear_flag:
- clear_bit(COMMAND_PENDING, &ucsi->flags);
-
- return ret;
+ return -EIO;
}
-static int ucsi_ack(struct ucsi *ucsi, u8 ack)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
{
- struct ucsi_control ctrl;
+ u32 cci;
int ret;
- trace_ucsi_ack(ack);
-
- set_bit(ACK_PENDING, &ucsi->flags);
+ ret = ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- UCSI_CMD_ACK(ctrl, ack);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
- goto out_clear_bit;
+ return ret;
- /* Waiting for ACK with ACK CMD, but not with EVENT for now */
- if (ack == UCSI_ACK_EVENT)
- goto out_clear_bit;
+ if (cci & UCSI_CCI_BUSY)
+ return -EBUSY;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS)))
- ret = -ETIMEDOUT;
+ if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
+ return -EIO;
-out_clear_bit:
- clear_bit(ACK_PENDING, &ucsi->flags);
+ if (cci & UCSI_CCI_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
- if (ret)
- dev_err(ucsi->dev, "%s: failed\n", __func__);
+ if (cci & UCSI_CCI_ERROR) {
+ if (cmd == UCSI_GET_ERROR_STATUS)
+ return -EIO;
+ return ucsi_read_error(ucsi);
+ }
- return ret;
+ return UCSI_CCI_LENGTH(cci);
}
-static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+static int ucsi_run_command(struct ucsi *ucsi, u64 command,
void *data, size_t size)
{
- struct ucsi_control _ctrl;
- u8 data_length;
- u16 error;
+ u8 length;
int ret;
- ret = ucsi_command(ucsi, ctrl);
- if (ret)
- goto err;
-
- switch (ucsi->status) {
- case UCSI_IDLE:
- ret = ucsi_sync(ucsi);
- if (ret)
- dev_warn(ucsi->dev, "%s: sync failed\n", __func__);
-
- if (data)
- memcpy(data, ucsi->ppm->data->message_in, size);
-
- data_length = ucsi->ppm->data->cci.data_length;
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (!ret)
- ret = data_length;
- break;
- case UCSI_BUSY:
- /* The caller decides whether to cancel or not */
- ret = -EBUSY;
- break;
- case UCSI_ERROR:
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (ret)
- break;
-
- _ctrl.raw_cmd = 0;
- _ctrl.cmd.cmd = UCSI_GET_ERROR_STATUS;
- ret = ucsi_command(ucsi, &_ctrl);
- if (ret) {
- dev_err(ucsi->dev, "reading error failed!\n");
- break;
- }
+ ret = ucsi_exec_command(ucsi, command);
+ if (ret < 0)
+ return ret;
- memcpy(&error, ucsi->ppm->data->message_in, sizeof(error));
+ length = ret;
- /* Something has really gone wrong */
- if (WARN_ON(ucsi->status == UCSI_ERROR)) {
- ret = -ENODEV;
- break;
- }
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+ if (data) {
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
if (ret)
- break;
-
- switch (error) {
- case UCSI_ERROR_INCOMPATIBLE_PARTNER:
- ret = -EOPNOTSUPP;
- break;
- case UCSI_ERROR_CC_COMMUNICATION_ERR:
- ret = -ECOMM;
- break;
- case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
- ret = -EPROTO;
- break;
- case UCSI_ERROR_DEAD_BATTERY:
- dev_warn(ucsi->dev, "Dead battery condition!\n");
- ret = -EPERM;
- break;
- /* The following mean a bug in this driver */
- case UCSI_ERROR_INVALID_CON_NUM:
- case UCSI_ERROR_UNREGONIZED_CMD:
- case UCSI_ERROR_INVALID_CMD_ARGUMENT:
- dev_warn(ucsi->dev,
- "%s: possible UCSI driver bug - error 0x%x\n",
- __func__, error);
- ret = -EINVAL;
- break;
- default:
- dev_warn(ucsi->dev,
- "%s: error without status\n", __func__);
- ret = -EIO;
- break;
- }
- break;
+ return ret;
}
-err:
- trace_ucsi_run_command(ctrl, ret);
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- return ret;
+ return length;
}
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size)
{
int ret;
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, ctrl, retval, size);
+ ret = ucsi_run_command(ucsi, command, retval, size);
mutex_unlock(&ucsi->ppm_lock);
return ret;
@@ -210,11 +186,12 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
int ucsi_resume(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command;
/* Restore UCSI notification enable mask after system resume */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- return ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+
+ return ucsi_send_command(ucsi, command, NULL, 0);
}
EXPORT_SYMBOL_GPL(ucsi_resume);
/* -------------------------------------------------------------------------- */
@@ -222,15 +199,15 @@ EXPORT_SYMBOL_GPL(ucsi_resume);
void ucsi_altmode_update_active(struct ucsi_connector *con)
{
const struct typec_altmode *altmode = NULL;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
u8 cur;
int i;
- UCSI_CMD_GET_CURRENT_CAM(ctrl, con->num);
- ret = ucsi_run_command(con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (con->ucsi->ppm->data->version > 0x0100) {
+ if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
"GET_CURRENT_CAM command failed\n");
return;
@@ -346,7 +323,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
int max_altmodes = UCSI_MAX_ALTMODES;
struct typec_altmode_desc desc;
struct ucsi_altmode alt[2];
- struct ucsi_control ctrl;
+ u64 command;
int num = 1;
int ret;
int len;
@@ -364,8 +341,11 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
for (i = 0; i < max_altmodes;) {
memset(alt, 0, sizeof(alt));
- UCSI_CMD_GET_ALTERNATE_MODES(ctrl, recipient, con->num, i, 1);
- len = ucsi_run_command(con->ucsi, &ctrl, alt, sizeof(alt));
+ command = UCSI_GET_ALTERNATE_MODES;
+ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_ALTMODE_OFFSET(i);
+ len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt));
if (len <= 0)
return len;
@@ -427,7 +407,7 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
- switch (con->status.pwr_op_mode) {
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
break;
@@ -445,6 +425,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
static int ucsi_register_partner(struct ucsi_connector *con)
{
+ u8 pwr_opmode = UCSI_CONSTAT_PWR_OPMODE(con->status.flags);
struct typec_partner_desc desc;
struct typec_partner *partner;
@@ -453,7 +434,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
memset(&desc, 0, sizeof(desc));
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
desc.accessory = TYPEC_ACCESSORY_DEBUG;
break;
@@ -464,7 +445,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
break;
}
- desc.usb_pd = con->status.pwr_op_mode == UCSI_CONSTAT_PWR_OPMODE_PD;
+ desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -496,7 +477,7 @@ static void ucsi_partner_change(struct ucsi_connector *con)
if (!con->partner)
return;
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -521,29 +502,33 @@ static void ucsi_partner_change(struct ucsi_connector *con)
ucsi_altmode_update_active(con);
}
-static void ucsi_connector_change(struct work_struct *work)
+static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
work);
struct ucsi *ucsi = con->ucsi;
- struct ucsi_control ctrl;
+ enum typec_role role;
+ u64 command;
int ret;
mutex_lock(&con->lock);
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_send_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
goto out_unlock;
}
+ role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE)
ucsi_pwr_opmode_change(con);
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
/* Complete pending power role swap */
if (!completion_done(&con->complete))
@@ -551,9 +536,9 @@ static void ucsi_connector_change(struct work_struct *work)
}
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -564,7 +549,7 @@ static void ucsi_connector_change(struct work_struct *work)
break;
}
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED)
ucsi_register_partner(con);
else
ucsi_unregister_partner(con);
@@ -576,14 +561,15 @@ static void ucsi_connector_change(struct work_struct *work)
* Running GET_CAM_SUPPORTED command just to make sure the PPM
* does not get stuck in case it assumes we do so.
*/
- UCSI_CMD_GET_CAM_SUPPORTED(ctrl, con->num);
- ucsi_run_command(con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_GET_CAM_SUPPORTED;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ucsi_run_command(con->ucsi, command, NULL, 0);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
ucsi_partner_change(con);
- ret = ucsi_ack(ucsi, UCSI_ACK_EVENT);
+ ret = ucsi_acknowledge_connector_change(ucsi);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -595,117 +581,83 @@ out_unlock:
}
/**
- * ucsi_notify - PPM notification handler
- * @ucsi: Source UCSI Interface for the notifications
- *
- * Handle notifications from PPM of @ucsi.
+ * ucsi_connector_change - Process Connector Change Event
+ * @ucsi: UCSI Interface
+ * @num: Connector number
*/
-void ucsi_notify(struct ucsi *ucsi)
+void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
- struct ucsi_cci *cci;
-
- /* There is no requirement to sync here, but no harm either. */
- ucsi_sync(ucsi);
-
- cci = &ucsi->ppm->data->cci;
-
- if (cci->error)
- ucsi->status = UCSI_ERROR;
- else if (cci->busy)
- ucsi->status = UCSI_BUSY;
- else
- ucsi->status = UCSI_IDLE;
+ struct ucsi_connector *con = &ucsi->connector[num - 1];
- if (cci->cmd_complete && test_bit(COMMAND_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->ack_complete && test_bit(ACK_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->connector_change) {
- struct ucsi_connector *con;
-
- con = &ucsi->connector[cci->connector_change - 1];
-
- if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
- schedule_work(&con->work);
- }
-
- trace_ucsi_notify(ucsi->ppm->data->raw_cci);
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+ schedule_work(&con->work);
}
-EXPORT_SYMBOL_GPL(ucsi_notify);
+EXPORT_SYMBOL_GPL(ucsi_connector_change);
/* -------------------------------------------------------------------------- */
static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
{
- struct ucsi_control ctrl;
+ u64 command;
- UCSI_CMD_CONNECTOR_RESET(ctrl, con, hard);
+ command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0;
- return ucsi_send_command(con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(con->ucsi, command, NULL, 0);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command = UCSI_PPM_RESET;
unsigned long tmo;
+ u32 cci;
int ret;
- ctrl.raw_cmd = 0;
- ctrl.cmd.cmd = UCSI_PPM_RESET;
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- /* Here sync is critical. */
- ret = ucsi_sync(ucsi);
- if (ret)
- goto err;
+ if (time_is_before_jiffies(tmo))
+ return -ETIMEDOUT;
- if (ucsi->ppm->data->cci.reset_complete)
- break;
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return ret;
/* If the PPM is still doing something else, reset it again. */
- if (ucsi->ppm->data->raw_cci) {
- dev_warn_ratelimited(ucsi->dev,
- "Failed to reset PPM! Trying again..\n");
-
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ if (cci & ~UCSI_CCI_RESET_COMPLETE) {
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL,
+ &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
}
- /* Letting the PPM settle down. */
msleep(20);
+ } while (!(cci & UCSI_CCI_RESET_COMPLETE));
- ret = -ETIMEDOUT;
- } while (time_is_after_jiffies(tmo));
-
-err:
- trace_ucsi_reset_ppm(&ctrl, ret);
-
- return ret;
+ return 0;
}
-static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
+static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
{
int ret;
- ret = ucsi_send_command(con->ucsi, ctrl, NULL, 0);
+ ret = ucsi_send_command(con->ucsi, command, NULL, 0);
if (ret == -ETIMEDOUT) {
- struct ucsi_control c;
+ u64 c;
/* PPM most likely stopped responding. Resetting everything. */
mutex_lock(&con->ucsi->ppm_lock);
ucsi_reset_ppm(con->ucsi);
mutex_unlock(&con->ucsi->ppm_lock);
- UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL);
- ucsi_send_command(con->ucsi, &c, NULL, 0);
+ c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
}
@@ -713,11 +665,11 @@ static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
return ret;
}
-static int
-ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
+static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ u8 partner_type;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -727,14 +679,17 @@ ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
goto out_unlock;
}
- if ((con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
+ partner_type = UCSI_CONSTAT_PARTNER_TYPE(con->status.flags);
+ if ((partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
role == TYPEC_DEVICE) ||
- (con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
+ (partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
role == TYPEC_HOST))
goto out_unlock;
- UCSI_CMD_SET_UOR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_UOR_ROLE(role);
+ command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -748,11 +703,11 @@ out_unlock:
return ret < 0 ? ret : 0;
}
-static int
-ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
+static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ enum typec_role cur_role;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -762,11 +717,15 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
goto out_unlock;
}
- if (con->status.pwr_dir == role)
+ cur_role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
+ if (cur_role == role)
goto out_unlock;
- UCSI_CMD_SET_PDR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_PDR_ROLE(role);
+ command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -777,7 +736,8 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
}
/* Something has gone wrong while swapping the role */
- if (con->status.pwr_op_mode != UCSI_CONSTAT_PWR_OPMODE_PD) {
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
+ UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_reset_connector(con, true);
ret = -EPROTO;
}
@@ -788,6 +748,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations ucsi_ops = {
+ .dr_set = ucsi_dr_swap,
+ .pr_set = ucsi_pr_swap
+};
+
static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
{
struct fwnode_handle *fwnode;
@@ -804,18 +769,19 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
- INIT_WORK(&con->work, ucsi_connector_change);
+ INIT_WORK(&con->work, ucsi_handle_connector_change);
init_completion(&con->complete);
mutex_init(&con->lock);
con->num = index + 1;
con->ucsi = ucsi;
/* Get connector capability */
- UCSI_CMD_GET_CONNECTOR_CAPABILITY(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->cap, sizeof(con->cap));
+ command = UCSI_GET_CONNECTOR_CAPABILITY;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
return ret;
@@ -826,11 +792,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_UFP)
cap->data = TYPEC_PORT_UFP;
- if (con->cap.provider && con->cap.consumer)
+ if ((con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER) &&
+ (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER))
cap->type = TYPEC_PORT_DRP;
- else if (con->cap.provider)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER)
cap->type = TYPEC_PORT_SRC;
- else if (con->cap.consumer)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER)
cap->type = TYPEC_PORT_SNK;
cap->revision = ucsi->cap.typec_version;
@@ -843,8 +810,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
*accessory = TYPEC_ACCESSORY_DEBUG;
cap->fwnode = ucsi_find_fwnode(con);
- cap->dr_set = ucsi_dr_swap;
- cap->pr_set = ucsi_pr_swap;
+ cap->driver_data = con;
+ cap->ops = &ucsi_ops;
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
@@ -858,17 +825,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
con->num);
/* Get the status */
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
return 0;
}
- ucsi_pwr_opmode_change(con);
- typec_set_pwr_role(con->port, con->status.pwr_dir);
-
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -880,8 +845,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
}
/* Check if there is already something connected */
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
+ typec_set_pwr_role(con->port,
+ !!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
+ ucsi_pwr_opmode_change(con);
ucsi_register_partner(con);
+ }
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
@@ -898,11 +867,16 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
return 0;
}
-static void ucsi_init(struct work_struct *work)
+/**
+ * ucsi_init - Initialize UCSI interface
+ * @ucsi: UCSI to be initialized
+ *
+ * Registers all ports @ucsi has and enables all notification events.
+ */
+int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi *ucsi = container_of(work, struct ucsi, work);
struct ucsi_connector *con;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
int i;
@@ -916,15 +890,15 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable basic notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE |
- UCSI_ENABLE_NTFY_ERROR);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE;
+ command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
- UCSI_CMD_GET_CAPABILITY(ctrl);
- ret = ucsi_run_command(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap));
+ command = UCSI_GET_CAPABILITY;
+ ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
if (ret < 0)
goto err_reset;
@@ -949,14 +923,14 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable all notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
mutex_unlock(&ucsi->ppm_lock);
- return;
+ return 0;
err_unregister:
for (con = ucsi->connector; con->port; con++) {
@@ -970,59 +944,115 @@ err_reset:
ucsi_reset_ppm(ucsi);
err:
mutex_unlock(&ucsi->ppm_lock);
- dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ucsi_init);
+
+static void ucsi_init_work(struct work_struct *work)
+{
+ struct ucsi *ucsi = container_of(work, struct ucsi, work);
+ int ret;
+
+ ret = ucsi_init(ucsi);
+ if (ret)
+ dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
}
/**
- * ucsi_register_ppm - Register UCSI PPM Interface
- * @dev: Device interface to the PPM
- * @ppm: The PPM interface
- *
- * Allocates UCSI instance, associates it with @ppm and returns it to the
- * caller, and schedules initialization of the interface.
+ * ucsi_get_drvdata - Return private driver data pointer
+ * @ucsi: UCSI interface
+ */
+void *ucsi_get_drvdata(struct ucsi *ucsi)
+{
+ return ucsi->driver_data;
+}
+EXPORT_SYMBOL_GPL(ucsi_get_drvdata);
+
+/**
+ * ucsi_get_drvdata - Assign private driver data pointer
+ * @ucsi: UCSI interface
+ * @data: Private data pointer
*/
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm)
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
+{
+ ucsi->driver_data = data;
+}
+EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
+
+/**
+ * ucsi_create - Allocate UCSI instance
+ * @dev: Device interface to the PPM (Platform Policy Manager)
+ * @ops: I/O routines
+ */
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
{
struct ucsi *ucsi;
+ if (!ops || !ops->read || !ops->sync_write || !ops->async_write)
+ return ERR_PTR(-EINVAL);
+
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
if (!ucsi)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&ucsi->work, ucsi_init);
- init_completion(&ucsi->complete);
+ INIT_WORK(&ucsi->work, ucsi_init_work);
mutex_init(&ucsi->ppm_lock);
-
ucsi->dev = dev;
- ucsi->ppm = ppm;
+ ucsi->ops = ops;
+
+ return ucsi;
+}
+EXPORT_SYMBOL_GPL(ucsi_create);
+
+/**
+ * ucsi_destroy - Free UCSI instance
+ * @ucsi: UCSI instance to be freed
+ */
+void ucsi_destroy(struct ucsi *ucsi)
+{
+ kfree(ucsi);
+}
+EXPORT_SYMBOL_GPL(ucsi_destroy);
+
+/**
+ * ucsi_register - Register UCSI interface
+ * @ucsi: UCSI instance
+ */
+int ucsi_register(struct ucsi *ucsi)
+{
+ int ret;
+
+ ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ucsi->version,
+ sizeof(ucsi->version));
+ if (ret)
+ return ret;
+
+ if (!ucsi->version)
+ return -ENODEV;
- /*
- * Communication with the PPM takes a lot of time. It is not reasonable
- * to initialize the driver here. Using a work for now.
- */
queue_work(system_long_wq, &ucsi->work);
- return ucsi;
+ return 0;
}
-EXPORT_SYMBOL_GPL(ucsi_register_ppm);
+EXPORT_SYMBOL_GPL(ucsi_register);
/**
- * ucsi_unregister_ppm - Unregister UCSI PPM Interface
- * @ucsi: struct ucsi associated with the PPM
+ * ucsi_unregister - Unregister UCSI interface
+ * @ucsi: UCSI interface to be unregistered
*
- * Unregister UCSI PPM that was created with ucsi_register().
+ * Unregister UCSI interface that was created with ucsi_register().
*/
-void ucsi_unregister_ppm(struct ucsi *ucsi)
+void ucsi_unregister(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_work_sync(&ucsi->work);
- /* Disable everything except command complete notification */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE)
- ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ /* Disable notifications */
+ ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
@@ -1032,12 +1062,9 @@ void ucsi_unregister_ppm(struct ucsi *ucsi)
typec_unregister_port(ucsi->connector[i].port);
}
- ucsi_reset_ppm(ucsi);
-
kfree(ucsi->connector);
- kfree(ucsi);
}
-EXPORT_SYMBOL_GPL(ucsi_unregister_ppm);
+EXPORT_SYMBOL_GPL(ucsi_unregister);
MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index de87d0b8319d..8569bbd3762f 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -10,177 +10,55 @@
/* -------------------------------------------------------------------------- */
-/* Command Status and Connector Change Indication (CCI) data structure */
-struct ucsi_cci {
- u8:1; /* reserved */
- u8 connector_change:7;
- u8 data_length;
- u16:9; /* reserved */
- u16 not_supported:1;
- u16 cancel_complete:1;
- u16 reset_complete:1;
- u16 busy:1;
- u16 ack_complete:1;
- u16 error:1;
- u16 cmd_complete:1;
-} __packed;
-
-/* Default fields in CONTROL data structure */
-struct ucsi_command {
- u8 cmd;
- u8 length;
- u64 data:48;
-} __packed;
-
-/* ACK Command structure */
-struct ucsi_ack_cmd {
- u8 cmd;
- u8 length;
- u8 cci_ack:1;
- u8 cmd_ack:1;
- u8:6; /* reserved */
-} __packed;
-
-/* Connector Reset Command structure */
-struct ucsi_con_rst {
- u8 cmd;
- u8 length;
- u8 con_num:7;
- u8 hard_reset:1;
-} __packed;
-
-/* Set USB Operation Mode Command structure */
-struct ucsi_uor_cmd {
- u8 cmd;
- u8 length;
- u16 con_num:7;
- u16 role:3;
-#define UCSI_UOR_ROLE_DFP BIT(0)
-#define UCSI_UOR_ROLE_UFP BIT(1)
-#define UCSI_UOR_ROLE_DRP BIT(2)
- u16:6; /* reserved */
-} __packed;
-
-/* Get Alternate Modes Command structure */
-struct ucsi_altmode_cmd {
- u8 cmd;
- u8 length;
- u8 recipient;
-#define UCSI_RECIPIENT_CON 0
-#define UCSI_RECIPIENT_SOP 1
-#define UCSI_RECIPIENT_SOP_P 2
-#define UCSI_RECIPIENT_SOP_PP 3
- u8 con_num;
- u8 offset;
- u8 num_altmodes;
-} __packed;
+struct ucsi;
-struct ucsi_control {
- union {
- u64 raw_cmd;
- struct ucsi_command cmd;
- struct ucsi_uor_cmd uor;
- struct ucsi_ack_cmd ack;
- struct ucsi_con_rst con_rst;
- struct ucsi_altmode_cmd alt;
- };
+/* UCSI offsets (Bytes) */
+#define UCSI_VERSION 0
+#define UCSI_CCI 4
+#define UCSI_CONTROL 8
+#define UCSI_MESSAGE_IN 16
+#define UCSI_MESSAGE_OUT 32
+
+/* Command Status and Connector Change Indication (CCI) bits */
+#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 0)) >> 1)
+#define UCSI_CCI_LENGTH(_c_) (((_c_) & GENMASK(15, 8)) >> 8)
+#define UCSI_CCI_NOT_SUPPORTED BIT(25)
+#define UCSI_CCI_CANCEL_COMPLETE BIT(26)
+#define UCSI_CCI_RESET_COMPLETE BIT(27)
+#define UCSI_CCI_BUSY BIT(28)
+#define UCSI_CCI_ACK_COMPLETE BIT(29)
+#define UCSI_CCI_ERROR BIT(30)
+#define UCSI_CCI_COMMAND_COMPLETE BIT(31)
+
+/**
+ * struct ucsi_operations - UCSI I/O operations
+ * @read: Read operation
+ * @sync_write: Blocking write operation
+ * @async_write: Non-blocking write operation
+ *
+ * Read and write routines for UCSI interface. @sync_write must wait for the
+ * Command Completion Event from the PPM before returning, and @async_write must
+ * return immediately after sending the data to the PPM.
+ */
+struct ucsi_operations {
+ int (*read)(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len);
+ int (*sync_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
+ int (*async_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
};
-#define __UCSI_CMD(_ctrl_, _cmd_) \
-{ \
- (_ctrl_).raw_cmd = 0; \
- (_ctrl_).cmd.cmd = _cmd_; \
-}
-
-/* Helper for preparing ucsi_control for CONNECTOR_RESET command. */
-#define UCSI_CMD_CONNECTOR_RESET(_ctrl_, _con_, _hard_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_CONNECTOR_RESET) \
- (_ctrl_).con_rst.con_num = (_con_)->num; \
- (_ctrl_).con_rst.hard_reset = _hard_; \
-}
-
-/* Helper for preparing ucsi_control for ACK_CC_CI command. */
-#define UCSI_CMD_ACK(_ctrl_, _ack_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_ACK_CC_CI) \
- (_ctrl_).ack.cci_ack = ((_ack_) == UCSI_ACK_EVENT); \
- (_ctrl_).ack.cmd_ack = ((_ack_) == UCSI_ACK_CMD); \
-}
-
-/* Helper for preparing ucsi_control for SET_NOTIFY_ENABLE command. */
-#define UCSI_CMD_SET_NTFY_ENABLE(_ctrl_, _ntfys_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_SET_NOTIFICATION_ENABLE) \
- (_ctrl_).cmd.data = _ntfys_; \
-}
-
-/* Helper for preparing ucsi_control for GET_CAPABILITY command. */
-#define UCSI_CMD_GET_CAPABILITY(_ctrl_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CAPABILITY) \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_CAPABILITY command. */
-#define UCSI_CMD_GET_CONNECTOR_CAPABILITY(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_CAPABILITY) \
- (_ctrl_).cmd.data = _con_; \
-}
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
+void ucsi_destroy(struct ucsi *ucsi);
+int ucsi_register(struct ucsi *ucsi);
+void ucsi_unregister(struct ucsi *ucsi);
+void *ucsi_get_drvdata(struct ucsi *ucsi);
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data);
-/* Helper for preparing ucsi_control for GET_ALTERNATE_MODES command. */
-#define UCSI_CMD_GET_ALTERNATE_MODES(_ctrl_, _r_, _con_num_, _o_, _num_)\
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_ALTERNATE_MODES) \
- _ctrl_.alt.recipient = (_r_); \
- _ctrl_.alt.con_num = (_con_num_); \
- _ctrl_.alt.offset = (_o_); \
- _ctrl_.alt.num_altmodes = (_num_) - 1; \
-}
+void ucsi_connector_change(struct ucsi *ucsi, u8 num);
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CAM_SUPPORTED(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CAM_SUPPORTED) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CURRENT_CAM(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CURRENT_CAM) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_STATUS command. */
-#define UCSI_CMD_GET_CONNECTOR_STATUS(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_STATUS) \
- (_ctrl_).cmd.data = _con_; \
-}
-
-#define __UCSI_ROLE(_ctrl_, _cmd_, _con_num_) \
-{ \
- __UCSI_CMD(_ctrl_, _cmd_) \
- (_ctrl_).uor.con_num = _con_num_; \
- (_ctrl_).uor.role = UCSI_UOR_ROLE_DRP; \
-}
-
-/* Helper for preparing ucsi_control for SET_UOR command. */
-#define UCSI_CMD_SET_UOR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_UOR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_HOST ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
-
-/* Helper for preparing ucsi_control for SET_PDR command. */
-#define UCSI_CMD_SET_PDR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_PDR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_SOURCE ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
+/* -------------------------------------------------------------------------- */
/* Commands */
#define UCSI_PPM_RESET 0x01
@@ -203,24 +81,49 @@ struct ucsi_control {
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_ERROR_STATUS 0x13
-/* ACK_CC_CI commands */
-#define UCSI_ACK_EVENT 1
-#define UCSI_ACK_CMD 2
-
-/* Bits for SET_NOTIFICATION_ENABLE command */
-#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(0)
-#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(1)
-#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(2)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(5)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(6)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(7)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(8)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(9)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(11)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(12)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(14)
-#define UCSI_ENABLE_NTFY_ERROR BIT(15)
-#define UCSI_ENABLE_NTFY_ALL 0xdbe7
+#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
+
+/* CONNECTOR_RESET command bits */
+#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
+
+/* ACK_CC_CI bits */
+#define UCSI_ACK_CONNECTOR_CHANGE BIT(16)
+#define UCSI_ACK_COMMAND_COMPLETE BIT(17)
+
+/* SET_NOTIFICATION_ENABLE command bits */
+#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16)
+#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17)
+#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26)
+#define UCSI_ENABLE_NTFY_ERROR BIT(27)
+#define UCSI_ENABLE_NTFY_ALL 0xdbe70000
+
+/* SET_UOR command bits */
+#define UCSI_SET_UOR_ROLE(_r_) (((_r_) == TYPEC_HOST ? 1 : 2) << 23)
+#define UCSI_SET_UOR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* SET_PDF command bits */
+#define UCSI_SET_PDR_ROLE(_r_) (((_r_) == TYPEC_SOURCE ? 1 : 2) << 23)
+#define UCSI_SET_PDR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* GET_ALTERNATE_MODES command bits */
+#define UCSI_GET_ALTMODE_RECIPIENT(_r_) ((u64)(_r_) << 16)
+#define UCSI_RECIPIENT_CON 0
+#define UCSI_RECIPIENT_SOP 1
+#define UCSI_RECIPIENT_SOP_P 2
+#define UCSI_RECIPIENT_SOP_PP 3
+#define UCSI_GET_ALTMODE_CONNECTOR_NUMBER(_r_) ((u64)(_r_) << 24)
+#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32)
+#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40)
+
+/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
#define UCSI_ERROR_UNREGONIZED_CMD BIT(0)
@@ -230,6 +133,12 @@ struct ucsi_control {
#define UCSI_ERROR_CC_COMMUNICATION_ERR BIT(4)
#define UCSI_ERROR_DEAD_BATTERY BIT(5)
#define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL BIT(6)
+#define UCSI_ERROR_OVERCURRENT BIT(7)
+#define UCSI_ERROR_UNDEFINED BIT(8)
+#define UCSI_ERROR_PARTNER_REJECTED_SWAP BIT(9)
+#define UCSI_ERROR_HARD_RESET BIT(10)
+#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11)
+#define UCSI_ERROR_SWAP_REJECTED BIT(12)
/* Data structure filled by PPM in response to GET_CAPABILITY command. */
struct ucsi_capability {
@@ -241,8 +150,8 @@ struct ucsi_capability {
#define UCSI_CAP_ATTR_POWER_AC_SUPPLY BIT(8)
#define UCSI_CAP_ATTR_POWER_OTHER BIT(10)
#define UCSI_CAP_ATTR_POWER_VBUS BIT(14)
- u32 num_connectors:8;
- u32 features:24;
+ u8 num_connectors;
+ u8 features;
#define UCSI_CAP_SET_UOM BIT(0)
#define UCSI_CAP_SET_PDM BIT(1)
#define UCSI_CAP_ALT_MODE_DETAILS BIT(2)
@@ -251,8 +160,9 @@ struct ucsi_capability {
#define UCSI_CAP_CABLE_DETAILS BIT(5)
#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6)
#define UCSI_CAP_PD_RESET BIT(7)
+ u16 reserved_1;
u8 num_alt_modes;
- u8 reserved;
+ u8 reserved_2;
u16 bc_version;
u16 pd_version;
u16 typec_version;
@@ -269,9 +179,9 @@ struct ucsi_connector_capability {
#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
- u8 provider:1;
- u8 consumer:1;
- u8:6; /* reserved */
+ u8 flags;
+#define UCSI_CONCAP_FLAG_PROVIDER BIT(0)
+#define UCSI_CONCAP_FLAG_CONSUMER BIT(1)
} __packed;
struct ucsi_altmode {
@@ -283,18 +193,17 @@ struct ucsi_altmode {
struct ucsi_cable_property {
u16 speed_supported;
u8 current_capability;
- u8 vbus_in_cable:1;
- u8 active_cable:1;
- u8 directionality:1;
- u8 plug_type:2;
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
-#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
- u8 mode_support:1;
- u8:2; /* reserved */
- u8 latency:4;
- u8:4; /* reserved */
+ u8 flags;
+#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
+#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
+#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
+#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
+#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+ u8 latency;
} __packed;
/* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */
@@ -311,83 +220,47 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_POWER_DIR_CHANGE BIT(12)
#define UCSI_CONSTAT_CONNECT_CHANGE BIT(14)
#define UCSI_CONSTAT_ERROR BIT(15)
- u16 pwr_op_mode:3;
-#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
-#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
-#define UCSI_CONSTAT_PWR_OPMODE_BC 2
-#define UCSI_CONSTAT_PWR_OPMODE_PD 3
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
- u16 connected:1;
- u16 pwr_dir:1;
- u16 partner_flags:8;
-#define UCSI_CONSTAT_PARTNER_FLAG_USB BIT(0)
-#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE BIT(1)
- u16 partner_type:3;
-#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
-#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
-#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
+ u16 flags;
+#define UCSI_CONSTAT_PWR_OPMODE(_f_) ((_f_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
+#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
+#define UCSI_CONSTAT_PWR_OPMODE_BC 2
+#define UCSI_CONSTAT_PWR_OPMODE_PD 3
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
+#define UCSI_CONSTAT_CONNECTED BIT(3)
+#define UCSI_CONSTAT_PWR_DIR BIT(4)
+#define UCSI_CONSTAT_PARTNER_FLAGS(_f_) (((_f_) & GENMASK(12, 5)) >> 5)
+#define UCSI_CONSTAT_PARTNER_FLAG_USB 1
+#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE 2
+#define UCSI_CONSTAT_PARTNER_TYPE(_f_) (((_f_) & GENMASK(15, 13)) >> 13)
+#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
+#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
+#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 bc_status:2;
-#define UCSI_CONSTAT_BC_NOT_CHARGING 0
-#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
-#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
-#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
- u8 provider_cap_limit_reason:4;
-#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
- u8:2; /* reserved */
+ u8 pwr_status;
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_BC_NOT_CHARGING 0
+#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
+#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
+#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
+#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_) & GENMASK(6, 3)) >> 3)
+#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
+#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
} __packed;
/* -------------------------------------------------------------------------- */
-struct ucsi;
-
-struct ucsi_data {
- u16 version;
- u16 reserved;
- union {
- u32 raw_cci;
- struct ucsi_cci cci;
- };
- struct ucsi_control ctrl;
- u32 message_in[4];
- u32 message_out[4];
-} __packed;
-
-/*
- * struct ucsi_ppm - Interface to UCSI Platform Policy Manager
- * @data: memory location to the UCSI data structures
- * @cmd: UCSI command execution routine
- * @sync: Refresh UCSI mailbox (the data structures)
- */
-struct ucsi_ppm {
- struct ucsi_data *data;
- int (*cmd)(struct ucsi_ppm *, struct ucsi_control *);
- int (*sync)(struct ucsi_ppm *);
-};
-
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm);
-void ucsi_unregister_ppm(struct ucsi *ucsi);
-void ucsi_notify(struct ucsi *ucsi);
-
-/* -------------------------------------------------------------------------- */
-
-enum ucsi_status {
- UCSI_IDLE = 0,
- UCSI_BUSY,
- UCSI_ERROR,
-};
-
struct ucsi {
+ u16 version;
struct device *dev;
- struct ucsi_ppm *ppm;
+ struct driver_data *driver_data;
+
+ const struct ucsi_operations *ops;
- enum ucsi_status status;
- struct completion complete;
struct ucsi_capability cap;
struct ucsi_connector *connector;
@@ -426,7 +299,7 @@ struct ucsi_connector {
struct ucsi_connector_capability cap;
};
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size);
void ucsi_altmode_update_active(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index a18112a83fae..3f1786170098 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -19,7 +19,9 @@
struct ucsi_acpi {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
+ void __iomem *base;
+ struct completion complete;
+ unsigned long flags;
guid_t guid;
};
@@ -39,27 +41,73 @@ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
return 0;
}
-static int ucsi_acpi_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ memcpy(val, (const void __force *)(ua->base + offset), val_len);
+
+ return 0;
+}
+
+static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ memcpy((void __force *)(ua->base + offset), val, val_len);
return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
}
-static int ucsi_acpi_sync(struct ucsi_ppm *ppm)
+static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ set_bit(COMMAND_PENDING, &ua->flags);
+
+ ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto out_clear_bit;
- return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (!wait_for_completion_timeout(&ua->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
+
+out_clear_bit:
+ clear_bit(COMMAND_PENDING, &ua->flags);
+
+ return ret;
}
+static const struct ucsi_operations ucsi_acpi_ops = {
+ .read = ucsi_acpi_read,
+ .sync_write = ucsi_acpi_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ucsi_acpi *ua = data;
+ u32 cci;
+ int ret;
+
+ ret = ucsi_acpi_read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return;
- ucsi_notify(ua->ucsi);
+ if (test_bit(COMMAND_PENDING, &ua->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&ua->complete);
+ else if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
}
static int ucsi_acpi_probe(struct platform_device *pdev)
@@ -90,35 +138,39 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
* it can not be requested here, and we can not use
* devm_ioremap_resource().
*/
- ua->ppm.data = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ua->ppm.data)
+ ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ua->base)
return -ENOMEM;
- if (!ua->ppm.data->version)
- return -ENODEV;
-
ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
if (ret)
return ret;
- ua->ppm.cmd = ucsi_acpi_cmd;
- ua->ppm.sync = ucsi_acpi_sync;
+ init_completion(&ua->complete);
ua->dev = &pdev->dev;
+ ua->ucsi = ucsi_create(&pdev->dev, &ucsi_acpi_ops);
+ if (IS_ERR(ua->ucsi))
+ return PTR_ERR(ua->ucsi);
+
+ ucsi_set_drvdata(ua->ucsi, ua);
+
status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify, ua);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "failed to install notify handler\n");
+ ucsi_destroy(ua->ucsi);
return -ENODEV;
}
- ua->ucsi = ucsi_register_ppm(&pdev->dev, &ua->ppm);
- if (IS_ERR(ua->ucsi)) {
+ ret = ucsi_register(ua->ucsi);
+ if (ret) {
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
- return PTR_ERR(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
+ return ret;
}
platform_set_drvdata(pdev, ua);
@@ -130,7 +182,8 @@ static int ucsi_acpi_remove(struct platform_device *pdev)
{
struct ucsi_acpi *ua = platform_get_drvdata(pdev);
- ucsi_unregister_ppm(ua->ucsi);
+ ucsi_unregister(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index d772fce51905..3370b3fc37b1 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -176,8 +176,8 @@ struct ccg_resp {
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
struct i2c_client *client;
+
struct ccg_dev_info info;
/* version info for boot, primary and secondary */
struct version_info version[FW2 + 1];
@@ -196,6 +196,8 @@ struct ucsi_ccg {
/* fw build with vendor information */
u16 fw_build;
struct work_struct pm_work;
+
+ struct completion complete;
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -243,7 +245,7 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
return 0;
}
-static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
{
struct i2c_client *client = uc->client;
unsigned char *buf;
@@ -317,88 +319,85 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
return -ETIMEDOUT;
}
-static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
- status = ccg_write(uc, rab, ppm +
- offsetof(struct ucsi_data, message_out),
- sizeof(uc->ppm.data->message_out));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
- return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
- sizeof(uc->ppm.data->ctrl));
+ return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
- status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
- sizeof(uc->ppm.data->cci));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
- return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
- sizeof(uc->ppm.data->message_in));
+ return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- int status;
- unsigned char data;
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ int ret;
- status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
- if (status < 0)
- return status;
+ mutex_lock(&uc->lock);
+ pm_runtime_get_sync(uc->dev);
+ set_bit(DEV_CMD_PENDING, &uc->flags);
- return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
-}
+ ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto err_clear_bit;
-static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
- int status;
+ if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
- status = ucsi_ccg_recv_data(uc);
- if (status < 0)
- return status;
+err_clear_bit:
+ clear_bit(DEV_CMD_PENDING, &uc->flags);
+ pm_runtime_put_sync(uc->dev);
+ mutex_unlock(&uc->lock);
- /* ack interrupt to allow next command to run */
- return ucsi_ccg_ack_interrupt(uc);
+ return ret;
}
-static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
-
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
- return ucsi_ccg_send_data(uc);
-}
+static const struct ucsi_operations ucsi_ccg_ops = {
+ .read = ucsi_ccg_read,
+ .sync_write = ucsi_ccg_sync_write,
+ .async_write = ucsi_ccg_async_write
+};
static irqreturn_t ccg_irq_handler(int irq, void *data)
{
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
+ u8 intr_reg;
+ u32 cci;
+ int ret;
+
+ ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
+ if (ret)
+ return ret;
+
+ ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
+ if (ret)
+ goto err_clear_irq;
+
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
+
+ if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&uc->complete);
- ucsi_notify(uc->ucsi);
+err_clear_irq:
+ ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
return IRQ_HANDLED;
}
static void ccg_pm_workaround_work(struct work_struct *pm_work)
{
- struct ucsi_ccg *uc = container_of(pm_work, struct ucsi_ccg, pm_work);
-
- ucsi_notify(uc->ucsi);
+ ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
}
static int get_fw_info(struct ucsi_ccg *uc)
@@ -1027,10 +1026,10 @@ static int ccg_restart(struct ucsi_ccg *uc)
return status;
}
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
+ status = ucsi_register(uc->ucsi);
+ if (status) {
+ dev_err(uc->dev, "failed to register the interface\n");
+ return status;
}
return 0;
@@ -1047,7 +1046,7 @@ static void ccg_update_firmware(struct work_struct *work)
return;
if (flash_mode != FLASH_NOT_NEEDED) {
- ucsi_unregister_ppm(uc->ucsi);
+ ucsi_unregister(uc->ucsi);
free_irq(uc->irq, uc);
ccg_fw_update(uc, flash_mode);
@@ -1091,21 +1090,15 @@ static int ucsi_ccg_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ucsi_ccg *uc;
int status;
- u16 rab;
uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
if (!uc)
return -ENOMEM;
- uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
- if (!uc->ppm.data)
- return -ENOMEM;
-
- uc->ppm.cmd = ucsi_ccg_cmd;
- uc->ppm.sync = ucsi_ccg_sync;
uc->dev = dev;
uc->client = client;
mutex_init(&uc->lock);
+ init_completion(&uc->complete);
INIT_WORK(&uc->work, ccg_update_firmware);
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1133,30 +1126,25 @@ static int ucsi_ccg_probe(struct i2c_client *client,
if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
uc->port_num++;
+ uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
+ if (IS_ERR(uc->ucsi))
+ return PTR_ERR(uc->ucsi);
+
+ ucsi_set_drvdata(uc->ucsi, uc);
+
status = request_threaded_irq(client->irq, NULL, ccg_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
dev_name(dev), uc);
if (status < 0) {
dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
- return status;
+ goto out_ucsi_destroy;
}
uc->irq = client->irq;
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
- }
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
- status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
- offsetof(struct ucsi_data, version),
- sizeof(uc->ppm.data->version));
- if (status < 0) {
- ucsi_unregister_ppm(uc->ucsi);
- return status;
- }
+ status = ucsi_register(uc->ucsi);
+ if (status)
+ goto out_free_irq;
i2c_set_clientdata(client, uc);
@@ -1167,6 +1155,13 @@ static int ucsi_ccg_probe(struct i2c_client *client,
pm_runtime_idle(uc->dev);
return 0;
+
+out_free_irq:
+ free_irq(uc->irq, uc);
+out_ucsi_destroy:
+ ucsi_destroy(uc->ucsi);
+
+ return status;
}
static int ucsi_ccg_remove(struct i2c_client *client)
@@ -1175,8 +1170,9 @@ static int ucsi_ccg_remove(struct i2c_client *client)
cancel_work_sync(&uc->pm_work);
cancel_work_sync(&uc->work);
- ucsi_unregister_ppm(uc->ucsi);
pm_runtime_disable(uc->dev);
+ ucsi_unregister(uc->ucsi);
+ ucsi_destroy(uc->ucsi);
free_irq(uc->irq, uc);
return 0;
diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
index 2f86b28fa3da..7bbae7a08642 100644
--- a/drivers/usb/usbip/Kconfig
+++ b/drivers/usb/usbip/Kconfig
@@ -4,6 +4,7 @@ config USBIP_CORE
tristate "USB/IP support"
depends on NET
select USB_COMMON
+ select SGL_ALLOC
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 66edfeea68fe..e2b019532234 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
if (pipe == -1)
return;
+ /*
+ * Smatch reported the error case where use_sg is true and buf_len is 0.
+ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
+ * released by stub event handler and connection will be shut down.
+ */
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
+ if (use_sg && !buf_len) {
+ dev_err(&udev->dev, "sg buffer with zero length\n");
+ goto err_malloc;
+ }
+
/* allocate urb transfer buffer, if needed */
if (buf_len) {
if (use_sg) {
sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
if (!sgl)
goto err_malloc;
+
+ /* Check if the server's HCD supports SG */
+ if (!udev->bus->sg_tablesize) {
+ /*
+ * If the server's HCD doesn't support SG, break
+ * a single SG request into several URBs and map
+ * each SG list entry to corresponding URB
+ * buffer. The previously allocated SG list is
+ * stored in priv->sgl (If the server's HCD
+ * support SG, SG list is stored only in
+ * urb->sg) and it is used as an indicator that
+ * the server split single SG request into
+ * several URBs. Later, priv->sgl is used by
+ * stub_complete() and stub_send_ret_submit() to
+ * reassemble the divied URBs.
+ */
+ support_sg = 0;
+ num_urbs = nents;
+ priv->completed_urbs = 0;
+ pdu->u.cmd_submit.transfer_flags &=
+ ~URB_DMA_MAP_SG;
+ }
} else {
buffer = kzalloc(buf_len, GFP_KERNEL);
if (!buffer)
@@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
}
- /* Check if the server's HCD supports SG */
- if (use_sg && !udev->bus->sg_tablesize) {
- /*
- * If the server's HCD doesn't support SG, break a single SG
- * request into several URBs and map each SG list entry to
- * corresponding URB buffer. The previously allocated SG
- * list is stored in priv->sgl (If the server's HCD support SG,
- * SG list is stored only in urb->sg) and it is used as an
- * indicator that the server split single SG request into
- * several URBs. Later, priv->sgl is used by stub_complete() and
- * stub_send_ret_submit() to reassemble the divied URBs.
- */
- support_sg = 0;
- num_urbs = nents;
- priv->completed_urbs = 0;
- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
- }
-
/* allocate urb array */
priv->num_urbs = num_urbs;
priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 36010a82b359..b1c2f6781cb3 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -291,7 +291,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
kfree(iov);
usbip_event_add(&sdev->ud,
SDEV_EVENT_ERROR_TCP);
- return -1;
+ return -1;
}
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 9f57736fe15e..dde392b91bb3 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -384,6 +384,49 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .module = THIS_MODULE,
+
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
+
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -438,7 +481,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
/* Only accept correctly addressed packets */
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
- virtio_transport_recv_pkt(pkt);
+ virtio_transport_recv_pkt(&vhost_transport, pkt);
else
virtio_transport_free_pkt(pkt);
@@ -675,6 +718,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
if (guest_cid > U32_MAX)
return -EINVAL;
+ /* Refuse if CID is assigned to the guest->host transport (i.e. nested
+ * VM), to make the loopback work.
+ */
+ if (vsock_find_cid(guest_cid))
+ return -EADDRINUSE;
+
/* Refuse if CID is already in use */
mutex_lock(&vhost_vsock_mutex);
other = vhost_vsock_get(guest_cid);
@@ -786,57 +835,12 @@ static struct miscdevice vhost_vsock_misc = {
.fops = &vhost_vsock_fops,
};
-static struct virtio_transport vhost_transport = {
- .transport = {
- .get_local_cid = vhost_transport_get_local_cid,
-
- .init = virtio_transport_do_socket_init,
- .destruct = virtio_transport_destruct,
- .release = virtio_transport_release,
- .connect = virtio_transport_connect,
- .shutdown = virtio_transport_shutdown,
- .cancel_pkt = vhost_transport_cancel_pkt,
-
- .dgram_enqueue = virtio_transport_dgram_enqueue,
- .dgram_dequeue = virtio_transport_dgram_dequeue,
- .dgram_bind = virtio_transport_dgram_bind,
- .dgram_allow = virtio_transport_dgram_allow,
-
- .stream_enqueue = virtio_transport_stream_enqueue,
- .stream_dequeue = virtio_transport_stream_dequeue,
- .stream_has_data = virtio_transport_stream_has_data,
- .stream_has_space = virtio_transport_stream_has_space,
- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
- .stream_is_active = virtio_transport_stream_is_active,
- .stream_allow = virtio_transport_stream_allow,
-
- .notify_poll_in = virtio_transport_notify_poll_in,
- .notify_poll_out = virtio_transport_notify_poll_out,
- .notify_recv_init = virtio_transport_notify_recv_init,
- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
- .notify_send_init = virtio_transport_notify_send_init,
- .notify_send_pre_block = virtio_transport_notify_send_pre_block,
- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
-
- .set_buffer_size = virtio_transport_set_buffer_size,
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
- .get_buffer_size = virtio_transport_get_buffer_size,
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
- },
-
- .send_pkt = vhost_transport_send_pkt,
-};
-
static int __init vhost_vsock_init(void)
{
int ret;
- ret = vsock_core_init(&vhost_transport.transport);
+ ret = vsock_core_register(&vhost_transport.transport,
+ VSOCK_TRANSPORT_F_H2G);
if (ret < 0)
return ret;
return misc_register(&vhost_vsock_misc);
@@ -845,7 +849,7 @@ static int __init vhost_vsock_init(void)
static void __exit vhost_vsock_exit(void)
{
misc_deregister(&vhost_vsock_misc);
- vsock_core_exit();
+ vsock_core_unregister(&vhost_transport.transport);
};
module_init(vhost_vsock_init);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index c6b3bdbbdbc9..de7b8382aba9 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -113,9 +113,9 @@ static int __init text_mode(char *str)
{
vgacon_text_mode_force = true;
- pr_warning("You have booted with nomodeset. This means your GPU drivers are DISABLED\n");
- pr_warning("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n");
- pr_warning("Unless you actually understand what nomodeset does, you should reboot without enabling it\n");
+ pr_warn("You have booted with nomodeset. This means your GPU drivers are DISABLED\n");
+ pr_warn("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n");
+ pr_warn("Unless you actually understand what nomodeset does, you should reboot without enabling it\n");
return 1;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index e6a1c805064f..95c32952fa8a 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1758,21 +1758,19 @@ EXPORT_SYMBOL(remove_conflicting_framebuffers);
/**
* remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
* @pdev: PCI device
- * @res_id: index of PCI BAR configuring framebuffer memory
* @name: requesting driver name
*
* This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for @pdev's BAR @res_id.
+ * using memory range configured for any of @pdev's memory bars.
*
* The function assumes that PCI device with shadowed ROM drives a primary
* display and so kicks out vga16fb.
*/
-int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const char *name)
+int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
{
struct apertures_struct *ap;
bool primary = false;
int err, idx, bar;
- bool res_id_found = false;
for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
@@ -1789,16 +1787,11 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const
continue;
ap->ranges[idx].base = pci_resource_start(pdev, bar);
ap->ranges[idx].size = pci_resource_len(pdev, bar);
- pci_info(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
- (unsigned long)pci_resource_start(pdev, bar),
- (unsigned long)pci_resource_end(pdev, bar));
+ pci_dbg(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
+ (unsigned long)pci_resource_start(pdev, bar),
+ (unsigned long)pci_resource_end(pdev, bar));
idx++;
- if (res_id == bar)
- res_id_found = true;
}
- if (!res_id_found)
- pci_warn(pdev, "%s: passed res_id (%d) is not a memory bar\n",
- __func__, res_id);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index ae2bcfee338a..81ad3aa1ca06 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -968,19 +968,6 @@ static void sa1100fb_task(struct work_struct *w)
#ifdef CONFIG_CPU_FREQ
/*
- * Calculate the minimum DMA period over all displays that we own.
- * This, together with the SDRAM bandwidth defines the slowest CPU
- * frequency that can be selected.
- */
-static unsigned int sa1100fb_min_dma_period(struct sa1100fb_info *fbi)
-{
- /*
- * FIXME: we need to verify _all_ consoles.
- */
- return sa1100fb_display_dma_period(&fbi->fb.var);
-}
-
-/*
* CPU clock speed change handler. We need to adjust the LCD timing
* parameters when the CPU clock is adjusted by the power management
* subsystem.
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index b939bc28d886..9c82e2a0a411 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -1576,12 +1576,12 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
if (ptr[0] & 0x10)
frame->active_aspect = ptr[1] & 0xf;
if (ptr[0] & 0x8) {
- frame->top_bar = (ptr[5] << 8) + ptr[6];
- frame->bottom_bar = (ptr[7] << 8) + ptr[8];
+ frame->top_bar = (ptr[6] << 8) | ptr[5];
+ frame->bottom_bar = (ptr[8] << 8) | ptr[7];
}
if (ptr[0] & 0x4) {
- frame->left_bar = (ptr[9] << 8) + ptr[10];
- frame->right_bar = (ptr[11] << 8) + ptr[12];
+ frame->left_bar = (ptr[10] << 8) | ptr[9];
+ frame->right_bar = (ptr[12] << 8) | ptr[11];
}
frame->scan_mode = ptr[0] & 0x3;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 226fbb995fb0..e05679c478e2 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -772,6 +772,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
}
+static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
+ unsigned long pages_to_free)
+{
+ return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
+ VIRTIO_BALLOON_PAGES_PER_PAGE;
+}
+
static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
unsigned long pages_to_free)
{
@@ -782,11 +789,10 @@ static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
* VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
* multiple times to deflate pages till reaching pages_to_free.
*/
- while (vb->num_pages && pages_to_free) {
- pages_freed += leak_balloon(vb, pages_to_free) /
- VIRTIO_BALLOON_PAGES_PER_PAGE;
- pages_to_free -= pages_freed;
- }
+ while (vb->num_pages && pages_freed < pages_to_free)
+ pages_freed += leak_balloon_pages(vb,
+ pages_to_free - pages_freed);
+
update_balloon_size(vb);
return pages_freed;
@@ -799,7 +805,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+ pages_to_free = sc->nr_to_scan;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
pages_freed = shrink_free_pages(vb, pages_to_free);
@@ -820,7 +826,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
unsigned long count;
count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
- count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+ count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
return count;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a8041e451e9e..867c7ebd3f10 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -583,7 +583,7 @@ unmap_release:
kfree(desc);
END_USE(vq);
- return -EIO;
+ return -ENOMEM;
}
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
@@ -1085,7 +1085,7 @@ unmap_release:
kfree(desc);
END_USE(vq);
- return -EIO;
+ return -ENOMEM;
}
static inline int virtqueue_add_packed(struct virtqueue *_vq,
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
index 1b2d96b945be..e8c7fa68d3cc 100644
--- a/drivers/w1/masters/sgi_w1.c
+++ b/drivers/w1/masters/sgi_w1.c
@@ -77,15 +77,13 @@ static int sgi_w1_probe(struct platform_device *pdev)
{
struct sgi_w1_device *sdev;
struct sgi_w1_platform_data *pdata;
- struct resource *res;
sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
GFP_KERNEL);
if (!sdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->mcr = devm_ioremap_resource(&pdev->dev, res);
+ sdev->mcr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mcr))
return PTR_ERR(sdev->mcr);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index b7847636501d..687753889c34 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -74,6 +74,14 @@ config W1_SLAVE_DS2805
organized as 7 pages of 16 bytes each with 64bit
unique number. Requires OverDrive Speed to talk to.
+config W1_SLAVE_DS2430
+ tristate "256b EEPROM family support (DS2430)"
+ help
+ Say Y here if you want to use a 1-wire 256bit EEPROM
+ family device (DS2430).
+ This EEPROM is organized as one page of 32 bytes for random
+ access.
+
config W1_SLAVE_DS2431
tristate "1kb EEPROM family support (DS2431)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 8e9655eaa478..278bcf2a9bfd 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
+obj-$(CONFIG_W1_SLAVE_DS2430) += w1_ds2430.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
new file mode 100644
index 000000000000..6fb0563fb2ae
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * w1_ds2430.c - w1 family 14 (DS2430) driver
+ **
+ * Copyright (c) 2019 Angelo Dureghello <angelo.dureghello@timesys.com>
+ *
+ * Cloned and modified from ds2431
+ * Copyright (c) 2008 Bernhard Weirich <bernhard.weirich@riedel.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/w1.h>
+
+#define W1_EEPROM_DS2430 0x14
+
+#define W1_F14_EEPROM_SIZE 32
+#define W1_F14_PAGE_COUNT 1
+#define W1_F14_PAGE_BITS 5
+#define W1_F14_PAGE_SIZE (1 << W1_F14_PAGE_BITS)
+#define W1_F14_PAGE_MASK 0x1F
+
+#define W1_F14_SCRATCH_BITS 5
+#define W1_F14_SCRATCH_SIZE (1 << W1_F14_SCRATCH_BITS)
+#define W1_F14_SCRATCH_MASK (W1_F14_SCRATCH_SIZE-1)
+
+#define W1_F14_READ_EEPROM 0xF0
+#define W1_F14_WRITE_SCRATCH 0x0F
+#define W1_F14_READ_SCRATCH 0xAA
+#define W1_F14_COPY_SCRATCH 0x55
+#define W1_F14_VALIDATION_KEY 0xa5
+
+#define W1_F14_TPROG_MS 11
+#define W1_F14_READ_RETRIES 10
+#define W1_F14_READ_MAXLEN W1_F14_SCRATCH_SIZE
+
+/*
+ * Check the file size bounds and adjusts count as needed.
+ * This would not be needed if the file size didn't reset to 0 after a write.
+ */
+static inline size_t w1_f14_fix_count(loff_t off, size_t count, size_t size)
+{
+ if (off > size)
+ return 0;
+
+ if ((off + count) > size)
+ return size - off;
+
+ return count;
+}
+
+/*
+ * Read a block from W1 ROM two times and compares the results.
+ * If they are equal they are returned, otherwise the read
+ * is repeated W1_F14_READ_RETRIES times.
+ *
+ * count must not exceed W1_F14_READ_MAXLEN.
+ */
+static int w1_f14_readblock(struct w1_slave *sl, int off, int count, char *buf)
+{
+ u8 wrbuf[2];
+ u8 cmp[W1_F14_READ_MAXLEN];
+ int tries = W1_F14_READ_RETRIES;
+
+ do {
+ wrbuf[0] = W1_F14_READ_EEPROM;
+ wrbuf[1] = off & 0xff;
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, buf, count);
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, cmp, count);
+
+ if (!memcmp(cmp, buf, count))
+ return 0;
+ } while (--tries);
+
+ dev_err(&sl->dev, "proof reading failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+}
+
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int todo = count;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* read directly from the EEPROM in chunks of W1_F14_READ_MAXLEN */
+ while (todo > 0) {
+ int block_read;
+
+ if (todo >= W1_F14_READ_MAXLEN)
+ block_read = W1_F14_READ_MAXLEN;
+ else
+ block_read = todo;
+
+ if (w1_f14_readblock(sl, off, block_read, buf) < 0)
+ count = -EIO;
+
+ todo -= W1_F14_READ_MAXLEN;
+ buf += W1_F14_READ_MAXLEN;
+ off += W1_F14_READ_MAXLEN;
+ }
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+/*
+ * Writes to the scratchpad and reads it back for verification.
+ * Then copies the scratchpad to EEPROM.
+ * The data must be aligned at W1_F14_SCRATCH_SIZE bytes and
+ * must be W1_F14_SCRATCH_SIZE bytes long.
+ * The master must be locked.
+ *
+ * @param sl The slave structure
+ * @param addr Address for the write
+ * @param len length must be <= (W1_F14_PAGE_SIZE - (addr & W1_F14_PAGE_MASK))
+ * @param data The data to write
+ * @return 0=Success -1=failure
+ */
+static int w1_f14_write(struct w1_slave *sl, int addr, int len, const u8 *data)
+{
+ int tries = W1_F14_READ_RETRIES;
+ u8 wrbuf[2];
+ u8 rdbuf[W1_F14_SCRATCH_SIZE + 3];
+
+retry:
+
+ /* Write the data to the scratchpad */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_WRITE_SCRATCH;
+ wrbuf[1] = addr & 0xff;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_write_block(sl->master, data, len);
+
+ /* Read the scratchpad and verify */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_8(sl->master, W1_F14_READ_SCRATCH);
+ w1_read_block(sl->master, rdbuf, len + 2);
+
+ /*
+ * Compare what was read against the data written
+ * Note: on read scratchpad, device returns 2 bulk 0xff bytes,
+ * to be discarded.
+ */
+ if ((memcmp(data, &rdbuf[2], len) != 0)) {
+
+ if (--tries)
+ goto retry;
+
+ dev_err(&sl->dev,
+ "could not write to eeprom, scratchpad compare failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+ }
+
+ /* Copy the scratchpad to EEPROM */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_COPY_SCRATCH;
+ wrbuf[1] = W1_F14_VALIDATION_KEY;
+ w1_write_block(sl->master, wrbuf, 2);
+
+ /* Sleep for tprog ms to wait for the write to complete */
+ msleep(W1_F14_TPROG_MS);
+
+ /* Reset the bus to wake up the EEPROM */
+ w1_reset_bus(sl->master);
+
+ return 0;
+}
+
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int addr, len;
+ int copy;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Can only write data in blocks of the size of the scratchpad */
+ addr = off;
+ len = count;
+ while (len > 0) {
+
+ /* if len too short or addr not aligned */
+ if (len < W1_F14_SCRATCH_SIZE || addr & W1_F14_SCRATCH_MASK) {
+ char tmp[W1_F14_SCRATCH_SIZE];
+
+ /* read the block and update the parts to be written */
+ if (w1_f14_readblock(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp)) {
+ count = -EIO;
+ goto out_up;
+ }
+
+ /* copy at most to the boundary of the PAGE or len */
+ copy = W1_F14_SCRATCH_SIZE -
+ (addr & W1_F14_SCRATCH_MASK);
+
+ if (copy > len)
+ copy = len;
+
+ memcpy(&tmp[addr & W1_F14_SCRATCH_MASK], buf, copy);
+ if (w1_f14_write(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ } else {
+
+ copy = W1_F14_SCRATCH_SIZE;
+ if (w1_f14_write(sl, addr, copy, buf) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ }
+ buf += copy;
+ addr += copy;
+ len -= copy;
+ }
+
+out_up:
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+static BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
+
+static struct bin_attribute *w1_f14_bin_attrs[] = {
+ &bin_attr_eeprom,
+ NULL,
+};
+
+static const struct attribute_group w1_f14_group = {
+ .bin_attrs = w1_f14_bin_attrs,
+};
+
+static const struct attribute_group *w1_f14_groups[] = {
+ &w1_f14_group,
+ NULL,
+};
+
+static struct w1_family_ops w1_f14_fops = {
+ .groups = w1_f14_groups,
+};
+
+static struct w1_family w1_family_14 = {
+ .fid = W1_EEPROM_DS2430,
+ .fops = &w1_f14_fops,
+};
+module_w1_family(w1_family_14);
+
+MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com>");
+MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256kb EEPROM");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2430));
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index ed18238c5407..8973f98bc6a5 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -168,3 +168,4 @@ module_mcb_driver(men_z069_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z069");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 79cc75096f42..61212fc7f0c7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -106,27 +106,27 @@ config XENFS
If in doubt, say yes.
config XEN_COMPAT_XENFS
- bool "Create compatibility mount point /proc/xen"
- depends on XENFS
- default y
- help
- The old xenstore userspace tools expect to find "xenbus"
- under /proc/xen, but "xenbus" is now found at the root of the
- xenfs filesystem. Selecting this causes the kernel to create
- the compatibility mount point /proc/xen if it is running on
- a xen platform.
- If in doubt, say yes.
+ bool "Create compatibility mount point /proc/xen"
+ depends on XENFS
+ default y
+ help
+ The old xenstore userspace tools expect to find "xenbus"
+ under /proc/xen, but "xenbus" is now found at the root of the
+ xenfs filesystem. Selecting this causes the kernel to create
+ the compatibility mount point /proc/xen if it is running on
+ a xen platform.
+ If in doubt, say yes.
config XEN_SYS_HYPERVISOR
- bool "Create xen entries under /sys/hypervisor"
- depends on SYSFS
- select SYS_HYPERVISOR
- default y
- help
- Create entries under /sys/hypervisor describing the Xen
- hypervisor environment. When running native or in another
- virtual environment, /sys/hypervisor will still be present,
- but will have no xen contents.
+ bool "Create xen entries under /sys/hypervisor"
+ depends on SYSFS
+ select SYS_HYPERVISOR
+ default y
+ help
+ Create entries under /sys/hypervisor describing the Xen
+ hypervisor environment. When running native or in another
+ virtual environment, /sys/hypervisor will still be present,
+ but will have no xen contents.
config XEN_XENBUS_FRONTEND
tristate
@@ -141,7 +141,8 @@ config XEN_GNTDEV
config XEN_GNTDEV_DMABUF
bool "Add support for dma-buf grant access device driver extension"
- depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER
+ depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC
+ select DMA_SHARED_BUFFER
help
Allows userspace processes and kernel modules to use Xen backed
dma-buf implementation. With this extension grant references to
@@ -270,7 +271,7 @@ config XEN_ACPI_PROCESSOR
depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
- This ACPI processor uploads Power Management information to the Xen
+ This ACPI processor uploads Power Management information to the Xen
hypervisor.
To do that the driver parses the Power Management data and uploads
@@ -279,19 +280,19 @@ config XEN_ACPI_PROCESSOR
SMM so that other drivers (such as ACPI cpufreq scaling driver) will
not load.
- To compile this driver as a module, choose M here: the module will be
+ To compile this driver as a module, choose M here: the module will be
called xen_acpi_processor If you do not know what to choose, select
M here. If the CPUFREQ drivers are built in, select Y here.
config XEN_MCE_LOG
bool "Xen platform mcelog"
- depends on XEN_DOM0 && X86_64 && X86_MCE
+ depends on XEN_DOM0 && X86_MCE
help
Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools
config XEN_HAVE_PVMMU
- bool
+ bool
config XEN_EFI
def_bool y
@@ -308,15 +309,15 @@ config XEN_ACPI
depends on X86 && ACPI
config XEN_SYMS
- bool "Xen symbols"
- depends on X86 && XEN_DOM0 && XENFS
- default y if KALLSYMS
- help
- Exports hypervisor symbols (along with their types and addresses) via
- /proc/xen/xensyms file, similar to /proc/kallsyms
+ bool "Xen symbols"
+ depends on X86 && XEN_DOM0 && XENFS
+ default y if KALLSYMS
+ help
+ Exports hypervisor symbols (along with their types and addresses) via
+ /proc/xen/xensyms file, similar to /proc/kallsyms
config XEN_HAVE_VPMU
- bool
+ bool
config XEN_FRONT_PGDIR_SHBUF
tristate
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index b8bf61abb65b..e9ac3b8c4167 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -222,7 +222,7 @@ static int convert_log(struct mc_info *mi)
struct mcinfo_global *mc_global;
struct mcinfo_bank *mc_bank;
struct xen_mce m;
- uint32_t i;
+ unsigned int i, j;
mic = NULL;
x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
@@ -248,7 +248,17 @@ static int convert_log(struct mc_info *mi)
m.socketid = g_physinfo[i].mc_chipid;
m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
- m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value;
+ for (j = 0; j < g_physinfo[i].mc_nmsrvals; ++j)
+ switch (g_physinfo[i].mc_msrvalues[j].reg) {
+ case MSR_IA32_MCG_CAP:
+ m.mcgcap = g_physinfo[i].mc_msrvalues[j].value;
+ break;
+
+ case MSR_PPIN:
+ case MSR_AMD_PPIN:
+ m.ppin = g_physinfo[i].mc_msrvalues[j].value;
+ break;
+ }
mic = NULL;
x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);